python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import json
import os
import subprocess
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_url, extract
IMAGE_URL = "http://images.cocodataset.org/zips/{split}{version}.zip"
# flake8: noqa
TEST_LABEL_URL = (
"http://images.cocodataset.org/annotations/image_info_test{version}.zip"
)
# flake8: noqa
TRAIN_VAL_LABEL_URL = (
"http://images.cocodataset.org/annotations/annotations_trainval{version}.zip"
)
@datasets.register()
class coco(DatasetBuilder):
VERSIONS = ["2014"]
info = DatasetInfo(
name="coco",
full_name="Common Objects in Context",
description="Image data sets for object class recognition.",
homepage="https://cocodataset.org/#home",
tags=["image", "object recognition"],
citation=None,
)
def build(self):
dfs = []
for split in ["train", "val"]:
dct = json.load(
open(
os.path.join(
self.dataset_dir, f"annotations/instances_{split}2014.json"
),
"rb",
)
)
df = mk.DataFrame(dct["images"])
df["split"] = split
dfs.append(df)
df = mk.concat(dfs, axis=0)
path = df["split"] + "2014/" + df["file_name"]
df["image"] = mk.files(path, base_dir=self.var_dataset_dir)
df.data.reorder(
["id", "image"] + [c for c in df.columns if c not in ["id", "image"]]
)
return df
def download(self):
for split in ["train", "val", "test"]:
downloaded_path = download_url(
IMAGE_URL.format(version=self.version, split=split), self.dataset_dir
)
extract(downloaded_path, self.dataset_dir)
downloaded_path = download_url(
TEST_LABEL_URL.format(version=self.version), self.dataset_dir
)
extract(downloaded_path, self.dataset_dir)
downloaded_path = download_url(
TRAIN_VAL_LABEL_URL.format(version=self.version), self.dataset_dir
)
extract(downloaded_path, self.dataset_dir)
def build_coco_2014_df(dataset_dir: str, download: bool = False):
if download:
curr_dir = os.getcwd()
os.makedirs(dataset_dir, exist_ok=True)
os.chdir(dataset_dir)
for split in ["train", "val", "test"]:
if not os.path.exists(f"{split}2014"):
subprocess.run(
args=[
"wget",
f"http://images.cocodataset.org/zips/{split}2014.zip",
],
shell=True,
check=True,
)
subprocess.run(["unzip", f"{split}2014.zip"])
subprocess.run(["rm", f"{split}2014.zip"])
# download train and test annotations
if not os.path.exists("annotations/captions_train2014.json"):
subprocess.run(
args=[
"wget",
"http://images.cocodataset.org/annotations/annotations_trainval2014.zip", # noqa: E501
]
)
subprocess.run(["unzip", "annotations_trainval2014.zip"])
subprocess.run(["rm", "annotations_trainval2014.zip"])
# download test image info
if not os.path.exists("annotations/image_info_test2014.json"):
subprocess.run(
args=[
"wget",
"http://images.cocodataset.org/annotations/image_info_test2014.zip",
]
)
subprocess.run(["unzip", "image_info_test2014.zip"])
subprocess.run(["rm", "image_info_test2014.zip"])
os.chdir(curr_dir)
dfs = []
for split in ["train", "val"]:
dct = json.load(
open(
os.path.join(dataset_dir, f"annotations/instances_{split}2014.json"),
"rb",
)
)
df = mk.DataFrame(dct["images"])
df["split"] = split
dfs.append(df)
df = mk.concat(dfs, axis=0)
path = df["split"] + "2014/" + df["file_name"]
df["image"] = mk.files(path, base_dir=dataset_dir)
df.data.reorder(
["id", "image"] + [c for c in df.columns if c not in ["id", "image"]]
)
return df
|
meerkat-main
|
meerkat/datasets/coco/__init__.py
|
import os
import numpy as np
import pandas as pd
import PIL
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
def concat_images(x: PIL.Image.Image, y: PIL.Image.Image):
return PIL.Image.fromarray(np.concatenate([np.array(x), np.array(y)], axis=1))
@datasets.register()
class rfw(DatasetBuilder):
VERSIONS = ["main"]
GROUPS = ["Caucasian", "African", "Asian", "Indian"]
info = DatasetInfo(
name="fer",
full_name="Racial Faces in-the-Wild",
# flake8: noqa
description="Racial Faces in-the-Wild (RFW) is a testing database for studying racial bias in face recognition. Four testing subsets, namely Caucasian, Asian, Indian and African, are constructed, and each contains about 3000 individuals with 6000 image pairs for face verification. They can be used to fairly evaluate and compare the recognition ability of the algorithm on different races.",
# flake8: noqa
homepage="http://www.whdeng.cn/RFW/testing.html",
tags=["image", "facial recognition", "algorithmic bias"],
)
def build(self):
dfs = []
for group in self.GROUPS:
df = pd.read_csv(
os.path.join(self.dataset_dir, f"test/txts/{group}/{group}_images.txt"),
delimiter="\t",
names=["filename", "count"],
)
df["ethnicity"] = group.lower()
df["identity"] = df["filename"].str.rsplit("_", n=1).str[0]
df["image_id"] = df["filename"].str.rsplit(".", n=1).str[0]
df["image_path"] = df.apply(
lambda x: f"test/data/{group}/{x['identity']}/{x['filename']}", axis=1
)
df.drop(columns=["filename", "count"])
dfs.append(df)
df = pd.concat(dfs)
# drop duplicate rows with the same image_id
df = df.drop_duplicates(subset=["image_id"], keep=False)
df = mk.DataFrame.from_pandas(df)
df["image"] = mk.ImageColumn.from_filepaths(
df["image_path"], base_dir=self.dataset_dir
)
return df[["image_id", "identity", "ethnicity", "image"]]
return None
def download(self):
raise ValueError(
"To download the RFW dataset, you must request access following the "
"instructions at http://www.whdeng.cn/RFW/testing.html."
"Once you've been granted access and downloaded the data, move it "
f"to the directory {self.dataset_dir} and extract it."
)
|
meerkat-main
|
meerkat/datasets/rfw/__init__.py
|
import os
import re
import pandas as pd
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_url, extract
_URL = "https://sid.erda.dk/public/archives/ff17dc924eba88d5d01a807357d6614c/FullIJCNN{version}.zip" # noqa: E501
@datasets.register()
class gtsdb(DatasetBuilder):
"""German Traffic Sign Detection Benchmark GTSDB."""
VERSIONS = ["2013"]
info = DatasetInfo(
name="gtsdb",
full_name="German Traffic Sign Detection Benchmark GTSDB",
description=("Image data set to detect street signs."),
homepage="https://sid.erda.dk/public/archives/ff17dc924eba88d5d01a807357d6614c/published-archive.html", # noqa: E501
tags=["image", "object recognition"],
citation=None,
)
def build(self):
"""Get the processed dataframe hosted on huggingface."""
folder = os.path.join(self.dataset_dir, f"FullIJCNN{self.version}")
gt_ann = pd.read_csv(os.path.join(folder, "gt.txt"), sep=";", header=None)
# Format categories
readme = os.path.join(folder, "ReadMe.txt")
with open(readme, "r") as f:
lines = [
x.strip() for x in f.readlines() if re.match("^[0-9]* = .*$", x.strip())
]
categories = []
for line in lines:
category_id, category_full_name = line.split(" = ")
category_id = int(category_id)
category_full_name = category_full_name.strip()
category_name, supercategory = category_full_name.rsplit(" ", 1)
category_name = category_name.strip()
supercategory = supercategory.strip().strip("(").strip(")")
categories.append(
{
"category_id": int(category_id),
"category": category_name,
"supercategory": supercategory,
}
)
categories = pd.DataFrame(categories)
# Format dataframe
df = gt_ann.rename(
{0: "filename", 1: "x1", 2: "y1", 3: "x2", 4: "y2", 5: "category_id"},
axis=1,
)
df = df.merge(categories, on="category_id")
# Split
images_files = sorted([x for x in os.listdir(folder) if x.endswith(".ppm")])
image_df = pd.DataFrame({"filename": images_files})
image_df["split"] = "train"
image_df.loc[600:, "split"] = "test"
df = df.merge(image_df, on="filename")
df = mk.DataFrame.from_pandas(df).drop("index")
df["image"] = mk.files(df["filename"], base_dir=folder, type="image")
df["image_crop"] = mk.defer(df, crop)
return df
def download(self):
downloaded_path = download_url(
_URL.format(version=self.version), self.dataset_dir
)
extract(downloaded_path, self.dataset_dir)
def is_downloaded(self):
return os.path.exists(self.dataset_dir) and os.path.exists(
os.path.join(self.dataset_dir, f"FullIJCNN{self.version}")
)
def crop(image, x1, y1, x2, y2):
out = image.crop((x1, y1, x2, y2))
return out
|
meerkat-main
|
meerkat/datasets/gtsdb/__init__.py
|
import os
import subprocess
import pandas as pd
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
@datasets.register()
class celeba(DatasetBuilder):
VERSIONS = ["main"]
info = DatasetInfo(
name="celeba",
full_name="CelebFaces Attributes",
description=(
"CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes "
"dataset with more than 200K celebrity images, each with 40 attribute "
" annotations. The images in this dataset cover large pose variations and "
" background clutter."
),
homepage="https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html",
tags=["image", "face recognition"],
)
def build(self):
df = build_celeba_df(dataset_dir=self.dataset_dir)
df = mk.DataFrame.from_pandas(df)
df["image"] = mk.ImageColumn.from_filepaths(
filepaths=df["img_path"], base_dir=self.var_dataset_dir
)
return df
def download(self):
download_celeba(self.dataset_dir)
ATTRIBUTES = [
"5_o_clock_shadow",
"arched_eyebrows",
"attractive",
"bags_under_eyes",
"bald",
"bangs",
"big_lips",
"big_nose",
"black_hair",
"blond_hair",
"blurry",
"brown_hair",
"bushy_eyebrows",
"chubby",
"double_chin",
"eyeglasses",
"goatee",
"gray_hair",
"heavy_makeup",
"high_cheekbones",
"male",
"mouth_slightly_open",
"mustache",
"narrow_eyes",
"no_beard",
"oval_face",
"pale_skin",
"pointy_nose",
"receding_hairline",
"rosy_cheeks",
"sideburns",
"smiling",
"straight_hair",
"wavy_hair",
"wearing_earrings",
"wearing_hat",
"wearing_lipstick",
"wearing_necklace",
"wearing_necktie",
"young",
]
def get_celeba(dataset_dir: str, download: bool = False):
"""Build the dataframe by joining on the attribute, split and identity
CelebA CSVs."""
if download:
download_celeba(dataset_dir=dataset_dir)
df = build_celeba_df(dataset_dir=dataset_dir)
df = mk.DataFrame.from_pandas(df)
df["image"] = mk.ImageColumn.from_filepaths(
filepaths=df["img_path"], base_dir=dataset_dir
)
return df
def download_celeba(dataset_dir: str):
# if not os.path.exists(dataset_dir):
# CelebA(os.path.split(dataset_dir)[:-1][0], download=True)
if os.path.exists(dataset_dir):
return
curr_dir = os.getcwd()
os.makedirs(dataset_dir, exist_ok=True)
os.chdir(dataset_dir)
subprocess.run(
args=["kaggle datasets download " "-d jessicali9530/celeba-dataset"],
shell=True,
check=True,
)
subprocess.run(["unzip", "-q", "celeba-dataset.zip"])
os.chdir(curr_dir)
def build_celeba_df(dataset_dir: str):
"""Build the dataframe by joining on the attribute, split and identity
CelebA CSVs."""
# identity_df = pd.read_csv(
# os.path.join(dataset_dir, "identity_CelebA.txt"),
# delim_whitespace=True,
# header=None,
# names=["file", "identity"],
# )
attr_df = pd.read_csv(
os.path.join(dataset_dir, "list_attr_celeba.csv"),
index_col=0,
)
attr_df.columns = pd.Series(attr_df.columns).apply(lambda x: x.lower())
attr_df = ((attr_df + 1) // 2).rename_axis("file").reset_index()
celeb_df = attr_df # identity_df.merge(attr_df, on="file", validate="one_to_one")
celeb_df["img_path"] = celeb_df.file.apply(
lambda x: os.path.join("img_align_celeba", x)
)
split_df = pd.read_csv(os.path.join(dataset_dir, "list_eval_partition.csv"))
split_df["split"] = split_df["partition"].replace(
{0: "train", 1: "valid", 2: "test"}
)
celeb_df = celeb_df.merge(
split_df[["image_id", "split"]], left_on="file", right_on="image_id"
)
return celeb_df
|
meerkat-main
|
meerkat/datasets/celeba/__init__.py
|
import os
from asyncio import subprocess
import numpy as np
import pandas as pd
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import extract
@datasets.register()
class mirflickr(DatasetBuilder):
VERSIONS = ["25k"]
VERSION_TO_URLS = {
"25k": [
"http://press.liacs.nl/mirflickr/mirflickr25k.v3b/mirflickr25k.zip",
# flake8: noqa
"http://press.liacs.nl/mirflickr/mirflickr25k.v3b/mirflickr25k_annotations_v080.zip",
]
}
info = DatasetInfo(
name="mirflickr",
full_name="PASCAL",
description=(
"The MIRFLICKR-25000 open evaluation project consists of 25000 images "
"downloaded from the social photography site Flickr through its public API "
" coupled with complete manual annotations, pre-computed descriptors and "
"software for bag-of-words based similarity and classification and a "
"matlab-like tool for exploring and classifying imagery."
),
homepage="https://press.liacs.nl/mirflickr/",
tags=["image", "retrieval"],
citation=(
"@inproceedings{huiskes08,"
" author = {Mark J. Huiskes and Michael S. Lew},"
" title = {The MIR Flickr Retrieval Evaluation},"
" booktitle = {MIR '08: Proceedings of the 2008 ACM International"
" Conference on Multimedia Information Retrieval},"
" year = {2008},"
" location = {Vancouver, Canada},"
" publisher = {ACM},"
" address = {New York, NY, USA},"
"}"
),
)
def download(self):
urls = self.VERSION_TO_URLS[self.version]
for url in urls:
downloaded_path = self.download_url(url)
extract(downloaded_path, self.dataset_dir)
def build(self) -> mk.DataFrame:
# get list of image ids
file_names = pd.Series(
[
f
for f in os.listdir(os.path.join(self.dataset_dir, "mirflickr"))
if f.endswith(".jpg")
]
)
# remove jpg extension
ids = file_names.str.replace(".jpg", "", regex=False)
df = mk.DataFrame({"id": ids, "file_name": file_names})
df["image"] = mk.ImageColumn.from_filepaths(
df["file_name"], base_dir=os.path.join(self.var_dataset_dir, "mirflickr")
)
for class_name in MIR_FLICKR_25K_CLASSES:
ids = (
"im"
+ pd.read_csv(
os.path.join(self.dataset_dir, class_name + ".txt"),
header=None,
names=["id"],
).astype(str)["id"]
)
df[class_name] = np.zeros(len(df))
df[class_name][df["id"].isin(ids)] = 1
return df
def build_mirflickr_25k_df(dataset_dir: str, download: bool = False):
if download:
subprocess.run(
[
"wget",
"http://press.liacs.nl/mirflickr/mirflickr25k.v3b/mirflickr25k.zip",
]
)
subprocess.run(["unzip", "mirflickr25k.zip"])
os.remove("mirflickr25k.zip")
subprocess.run(
[
"wget",
"http://press.liacs.nl/mirflickr/mirflickr25k.v3b/mirflickr25k_annotations_v080.zip", # noqa: E501
]
)
subprocess.run(["unzip", "mirflickr25k_annotations_v080.zip"])
os.remove("mirflickr25k_annotations_v080.zip")
# get list of image ids
file_names = pd.Series(
[
f
for f in os.listdir(os.path.join(dataset_dir, "mirflickr"))
if f.endswith(".jpg")
]
)
# remove jpg extension
ids = file_names.str.replace(".jpg", "", regex=False)
df = mk.DataFrame({"id": ids, "file_name": file_names})
df["image"] = mk.ImageColumn.from_filepaths(
df["file_name"], base_dir=os.path.join(dataset_dir, "mirflickr")
)
for class_name in MIR_FLICKR_25K_CLASSES:
ids = (
"im"
+ pd.read_csv(
os.path.join(dataset_dir, class_name + ".txt"),
header=None,
names=["id"],
).astype(str)["id"]
)
df[class_name] = np.zeros(len(df))
df[class_name][df["id"].isin(ids)] = 1
return df
MIR_FLICKR_25K_CLASSES = [
"animals",
"baby",
"bird",
"car",
"clouds",
"dog",
"female",
"flower",
"food",
"indoor",
"lake",
"male",
"night",
"people",
"plant_life",
"portrait",
"river",
"sea",
"sky",
"structures",
"sunset",
"transport",
"tree",
"water",
]
|
meerkat-main
|
meerkat/datasets/mirflickr/__init__.py
|
import os
import subprocess
from typing import Dict
import numpy as np
import pandas as pd
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
@datasets.register()
class imagenet(DatasetBuilder):
VERSIONS = ["ilsvrc2012"]
info = DatasetInfo(
name="imagenet",
full_name="ImageNet",
# flake8: noqa
description="ImageNet is an image database organized according to the WordNet hierarchy (currently only the nouns), in which each node of the hierarchy is depicted by hundreds and thousands of images..",
homepage="https://www.image-net.org/",
tags=["image", "classification"],
citation=(
"@inproceedings{imagenet_cvpr09,"
"AUTHOR = {Deng, J. and Dong, W. and Socher, R. and Li, L.-J. and Li, K. and Fei-Fei, L.},"
"TITLE = {{ImageNet: A Large-Scale Hierarchical Image Database}},"
"BOOKTITLE = {CVPR09},"
"YEAR = {2009},"
'BIBSOURCE = "http://www.image-net.org/papers/imagenet_cvpr09.bib"}'
),
)
def build(self):
paths = pd.read_csv(
os.path.join(self.dataset_dir, "ILSVRC/ImageSets/CLS-LOC/train_cls.txt"),
delimiter=" ",
names=["path", "idx"],
)["path"]
train_df = paths.str.extract(r"(?P<synset>.*)/(?P<image_id>.*)")
train_df["path"] = paths.apply(
lambda x: os.path.join(
self.dataset_dir, "ILSVRC/Data/CLS-LOC/train", f"{x}.JPEG"
)
)
train_df["split"] = "train"
# load validation data
valid_df = pd.read_csv(
os.path.join(self.dataset_dir, "LOC_val_solution.csv")
).rename(columns={"ImageId": "image_id"})
valid_df["synset"] = valid_df["PredictionString"].str.split(" ", expand=True)[0]
valid_df["path"] = valid_df["image_id"].apply(
lambda x: os.path.join(
self.dataset_dir, "ILSVRC/Data/CLS-LOC/val", f"{x}.JPEG"
)
)
valid_df["split"] = "valid"
df = mk.DataFrame.from_pandas(
pd.concat([train_df, valid_df.drop(columns="PredictionString")])
)
df["image"] = mk.ImageColumn.from_filepaths(df["path"])
# mapping from synset to english
with open(os.path.join(self.dataset_dir, "LOC_synset_mapping.txt")) as f:
lines = f.read().splitlines()
df = (
pd.Series(lines)
.str.split(" ", expand=True, n=1)
.rename(columns={0: "synset", 1: "name"})
)
mapping_df = mk.DataFrame.from_pandas(df)
# torchvision models use class indices corresponding to the order of the
# LOC_synset_mapping.txt file, which we confirmed using the mapping provided here
# https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a
mapping_df["class_idx"] = np.arange(len(mapping_df))
df = df.merge(mapping_df, how="left", on="synset")
return df
def download(self):
curr_dir = os.getcwd()
os.makedirs(self.dataset_dir, exist_ok=True)
os.chdir(self.dataset_dir)
subprocess.run(
args=[
"kaggle competitions download "
"-c imagenet-object-localization-challenge",
],
shell=True,
check=True,
)
subprocess.run(["unzip", "imagenet-object-localization-challenge.zip"])
subprocess.run(
["tar", "-xzvf", "imagenet_object_localization_patched2019.tar.gz"]
)
os.chdir(curr_dir)
def build_imagenet_dfs(
dataset_dir: str, download: bool = False
) -> Dict[str, mk.DataFrame]:
if download:
curr_dir = os.getcwd()
os.makedirs(dataset_dir, exist_ok=True)
os.chdir(dataset_dir)
# subprocess.run(
# args=[
# "kaggle competitions download "
# "-c imagenet-object-localization-challenge",
# ],
# shell=True,
# check=True,
# )
# subprocess.run(["unzip", "imagenet-object-localization-challenge.zip"])
subprocess.run(
["tar", "-xzvf", "imagenet_object_localization_patched2019.tar.gz"]
)
os.chdir(curr_dir)
# load training data
paths = pd.read_csv(
os.path.join(dataset_dir, "ILSVRC/ImageSets/CLS-LOC/train_cls.txt"),
delimiter=" ",
names=["path", "idx"],
)["path"]
train_df = paths.str.extract(r"(?P<synset>.*)/(?P<image_id>.*)")
train_df["path"] = paths.apply(
lambda x: os.path.join(dataset_dir, "ILSVRC/Data/CLS-LOC/train", f"{x}.JPEG")
)
train_df["split"] = "train"
# load validation data
valid_df = pd.read_csv(os.path.join(dataset_dir, "LOC_val_solution.csv")).rename(
columns={"ImageId": "image_id"}
)
valid_df["synset"] = valid_df["PredictionString"].str.split(" ", expand=True)[0]
valid_df["path"] = valid_df["image_id"].apply(
lambda x: os.path.join(dataset_dir, "ILSVRC/Data/CLS-LOC/val", f"{x}.JPEG")
)
valid_df["split"] = "valid"
df = mk.DataFrame.from_pandas(
pd.concat([train_df, valid_df.drop(columns="PredictionString")])
)
df["image"] = mk.ImageColumn.from_filepaths(df["path"])
# mapping from synset to english
with open(os.path.join(dataset_dir, "LOC_synset_mapping.txt")) as f:
lines = f.read().splitlines()
df = (
pd.Series(lines)
.str.split(" ", expand=True, n=1)
.rename(columns={0: "synset", 1: "name"})
)
mapping_df = mk.DataFrame.from_pandas(df)
# torchvision models use class indices corresponding to the order of the
# LOC_synset_mapping.txt file, which we confirmed using the mapping provided here
# https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a
mapping_df["class_idx"] = np.arange(len(mapping_df))
df = df.merge(mapping_df, how="left", on="synset")
return df
|
meerkat-main
|
meerkat/datasets/imagenet/__init__.py
|
import random
from typing import TYPE_CHECKING, Optional, Tuple
from meerkat.tools.lazy_loader import LazyLoader
torch = LazyLoader("torch")
if TYPE_CHECKING:
import torch
class TemporalDownsampling(object):
"""Video transformation for performing temporal downsampling (i.e. reading
in every Nth frame only). This can be used in tandem with VideoCell by
passing it into the `transform` keyword in the constructor. Can be used
with Compose in torchvision.
When using with TemporalCrop, it is highly recommended to put TemporalDownsampling
first, with TemporalCrop second.
Arguments:
downsample_factor (int): the factor by which the input video should be
downsampled. Must be a strictly positive integer.
time_dim (int): the time dimension of the input video.
Examples:
# Create a VideoCell from "/path/to/video.mp4" with time in dimension one,
showing every other frame
>>> cell = VideoCell("path/to/video.mp4",
time_dim=1,
transform=TemporalDownsampling(2, time_dim=1)
)
Note that time_dim in the TemporalDownsampling call must match the the time_dim
in the VideoCell constructor!
"""
def __init__(self, downsample_factor: int, time_dim: Optional[int] = 1):
self.downsample_factor = downsample_factor
self.time_dim = time_dim
if self.downsample_factor != int(self.downsample_factor):
raise ValueError("Fractional downsampling not supported.")
if self.downsample_factor < 1:
raise ValueError(
"Downsampling must be by a factor of 1 (no upsampling) or greater."
)
def __call__(self, video: "torch.Tensor") -> "torch.Tensor":
video_length = video.size(self.time_dim)
downsampled_indices = torch.arange(
0, video_length, self.downsample_factor
).long()
frames = torch.index_select(video, self.time_dim, downsampled_indices)
return frames
class TemporalCrop(object):
"""Video transformation for performing "temporal cropping:" the sampling of
a pre-defined number of clips, each with pre-defined length, from a full
video. Can be used with Compose in torchvision.
When used with TemporalDownsampling, it is highly recommended to put
TemporalCrop after TemporalDownsampling. Since TemporalCrop can change the
number of dimensions in the output tensor, due to clip selection, it is in
fact recommended to put this transform at the end of a video transformation
pipeline.
Arguments:
n_clips (int): the number of clips that should be sampled.
clip_length (int): the length of each clip (in the number of frames)
time_dim (int): the index of the time dimension of the video
clip_spacing (Optional; default "equal"): how to choose starting locations
for sampling clips. Keyword "equal" means that clip starting locations
are sampled from each 1/n_clips segment of the video. The other option,
"anywhere", places no restrictions on where clip starting locations
can be sampled.
padding_mode: (Optional; default "loop"): behavior if a requested clip
length would result a clip exceeding the end of the video. Keyword
"loop" results in a wrap-around to the start of the video. The other
option, "freeze", repeats the final frame until the requested clip
length is achieved.
sample_starting_location: (Optional; default False): whether to sample a
starting location (usually used for training) for a clip. Can be used
in tandem with "equal" during training to sample clips with random
starting locations distributed across time. Redundant if `clip_spacing`
is "anywhere".
stack_clips: (Optional; default True): whether to stack clips in a new
dimension (used in 3D action recognition backbones), or stack clips by
concatenating across the time dimension (used in 2D action recognition
backbones). Output shape if True is (n_clips, *video_shape). If False,
the output shape has the same number of dimensions as the original
video, but the time dimension is extended by a factor of n_clips.
Examples:
# Create a VideoCell from "/path/to/video.mp4" with time in dimension one,
sampling 10 clips each of length 16, sampling clips equally across the video
>>> cell = VideoCell("/path/to/video.mp4",
time_dim=1,
transform=TemporalCrop(10, 16, time_dim=1)
)
# output shape: (10, n_channels, 16, H, W)
# Create a VideoCell from "/path/to/video.mp4" with time in dimension one,
sampling 8 clips each of length 8, sampling clips from arbitrary video
locations and freezing the last frame if a clip exceeds the video length
>>> cell = VideoCell("/path/to/video.mp4",
time_dim=1,
transform=TemporalCrop(8, 8, time_dim=1, clip_spacing="anywhere",
padding_mode="freeze")
)
# output shape: (8, n_channels, 8, H, W)
# Create a VideoCell from "/path/to/video.mp4" with time in dimension one,
sampling one frame from each third of the video, concatenating the frames
in the time dimension
>>> cell = VideoCell("/path/to/video.mp4",
time_dim=1,
transform=TemporalCrop(1, 3, time_dim=1, clip_spacing="equal",
sample_starting_location=True, stack_clips=False)
)
# output shape: (n_channels, 3, H, W)
Note that time_dim in the TemporalDownsampling call must match the the time_dim
in the VideoCell constructor!
"""
def __init__(
self,
n_clips: int,
clip_length: int,
time_dim: Optional[int] = 1,
clip_spacing: Optional[str] = "equal",
padding_mode: Optional[str] = "loop",
sample_starting_location: Optional[bool] = False,
stack_clips: Optional[bool] = True,
):
self.n_clips = n_clips
self.clip_length = clip_length
self.time_dim = time_dim
self.clip_spacing = clip_spacing
self.padding_mode = padding_mode
self.stack_clips = stack_clips
self.sample_starting_location = sample_starting_location
if clip_length != int(clip_length):
raise ValueError("Clip length (# of frames per clip) must be an integer")
if clip_length <= 0:
raise ValueError(
"Clip length (# of frames per clip) must be a positive integer."
)
if n_clips != int(n_clips):
raise ValueError("Number of clips is not an integer")
if n_clips <= 0:
raise ValueError("Number of clips must be a positive integer")
assert clip_spacing in ["equal", "anywhere"]
assert padding_mode in ["loop", "freeze"]
def _get_sampling_boundaries(
self, video_length: int, clip_number: int
) -> Tuple[int, int]:
if self.clip_spacing == "equal":
start = int(clip_number / self.n_clips * video_length)
end = int((clip_number + 1) / self.n_clips * video_length)
else: # self.clip_spacing == "anywhere"
start = 0
end = video_length
return start, end
def _build_indices(self, start: int, length: int) -> "torch.LongTensor":
vanilla_indices = torch.arange(start, start + self.clip_length)
if vanilla_indices.max().item() >= length:
if self.padding_mode == "loop":
vanilla_indices %= length
else: # self.padding_mode == "freeze":
vanilla_indices[vanilla_indices >= length] = length - 1
return vanilla_indices
def __call__(self, video: "torch.Tensor") -> "torch.Tensor":
video_length = video.size(self.time_dim)
clips = []
for clip_number in range(self.n_clips):
start, end = self._get_sampling_boundaries(video_length, clip_number)
if self.sample_starting_location:
first_frame = random.randint(start, end)
else:
first_frame = start
indices = self._build_indices(first_frame, video_length)
clip = torch.index_select(video, self.time_dim, indices)
clips.append(clip)
if self.stack_clips: # new dim for clips (n_clips, n_channels, duration, h, w)
all_clips = torch.stack(clips, dim=0)
else: # concat clips in time dimension (n_channels, n_clips * duration, h, w)
all_clips = torch.cat(clips, dim=self.time_dim)
return all_clips
|
meerkat-main
|
meerkat/datasets/video_corruptions/transforms.py
|
meerkat-main
|
meerkat/datasets/video_corruptions/__init__.py
|
|
import os
class stderr_suppress(object):
"""A context manager for doing a "deep suppression" of stdout and stderr in
Python.
This is necessary when reading in a corrupted video, or else stderr
will emit 10000s of errors via ffmpeg. Great for decoding IRL, not
great for loading 100s of corrupted videos.
"""
def __init__(self):
# Open a pair of null files
self.null_fd = os.open(os.devnull, os.O_RDWR)
# Save stderr (2) file descriptor.
self.save_fd = os.dup(2)
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fd, 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fd, 2)
# Close all file descriptors
os.close(self.null_fd)
os.close(self.save_fd)
|
meerkat-main
|
meerkat/datasets/video_corruptions/utils.py
|
import os
import meerkat as mk
from meerkat.columns.deferred.file import Downloader
DATASET_URL = "https://www.radar-service.eu/radar/en/dataset/tJzxrsYUkvPklBOw"
def build_dew_df(dataset_dir: str, download: bool = True) -> mk.DataFrame:
if not os.path.exists(os.path.join(dataset_dir)):
print(
f"Please download the dataset from {DATASET_URL} and place it at "
f"{dataset_dir}."
)
df = mk.DataFrame.from_csv(
os.path.join(dataset_dir, "data/dataset/meta.csv"), parse_dates=["date_taken"]
)
df["image"] = mk.ImageColumn(
df["url"],
loader=Downloader(cache_dir=os.path.join(dataset_dir, "data/images")),
)
return df
|
meerkat-main
|
meerkat/datasets/dew/__init__.py
|
import os
import pandas as pd
import meerkat as mk
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.tools.utils import deprecated
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
torch = LazyLoader("torch")
torchaudio = LazyLoader("torchaudio")
@datasets.register()
class yesno(DatasetBuilder):
"""YESNO dataset.
Reference:
https://www.openslr.org/1/
"""
info = DatasetInfo(
name="yesno",
full_name="YesNo",
description=(
"This dataset contains 60 .wav files, sampled at 8 kHz. "
"All were recorded by the same male speaker, in Hebrew. "
"In each file, the individual says 8 words; each word is either the "
"Hebrew for 'yes' or 'no', so each file is a random sequence of 8 yes-es "
"or noes. There is no separate transcription provided; the sequence is "
"encoded in the filename, with 1 for yes and 0 for no."
),
homepage="https://www.openslr.org/1/",
tags=["audio", "classification"],
)
VERSIONS = ["release1"]
def download(self):
os.makedirs(self.dataset_dir, exist_ok=True)
torchaudio.datasets.YESNO(root=self.dataset_dir, download=True)
def is_downloaded(self) -> bool:
return super().is_downloaded() and os.path.exists(
os.path.join(self.dataset_dir, "waves_yesno")
)
def build(self):
dataset = torchaudio.datasets.YESNO(root=self.dataset_dir, download=False)
df = mk.DataFrame(
{
"id": dataset._walker,
"audio": mk.files(
pd.Series(dataset._walker) + ".wav", base_dir=dataset._path
),
"labels": torch.tensor(
[[int(c) for c in fileid.split("_")] for fileid in dataset._walker]
),
}
)
return df
@deprecated("mk.get('yesno')")
def get_yesno(dataset_dir: str, download: bool = True):
"""Load YESNO as a Meerkat DataFrame.
Args:
download_dir: download directory
frac_val: fraction of training set to use for validation
Returns:
a DataFrame containing columns `raw_image`, `image` and `label`
"""
if download:
dataset = torchaudio.datasets.YESNO(root=dataset_dir, download=True)
df = mk.DataFrame(
{
"id": dataset._walker,
"audio": mk.AudioColumn(
pd.Series(dataset._walker) + ".wav", base_dir=dataset._path
),
"labels": torch.tensor(
[[int(c) for c in fileid.split("_")] for fileid in dataset._walker]
),
}
)
return df
|
meerkat-main
|
meerkat/datasets/torchaudio/__init__.py
|
import os
import subprocess
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import extract
@datasets.register()
class fer(DatasetBuilder):
VERSIONS = ["plus"]
info = DatasetInfo(
name="fer",
full_name="Facial Expression Recognition Challenge",
# flake8: noqa
description="ImageNet is an image database organized according to the WordNet hierarchy (currently only the nouns), in which each node of the hierarchy is depicted by hundreds and thousands of images..",
# flake8: noqa
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data?select=icml_face_data.csv",
tags=["image", "facial emotion recognition"],
)
def build(self):
return None
def download(self):
curr_dir = os.getcwd()
os.makedirs(self.dataset_dir, exist_ok=True)
os.chdir(self.dataset_dir)
subprocess.run(
args=[
"kaggle competitions download "
"-c challenges-in-representation-learning-facial-expression-recognition-challenge",
],
shell=True,
check=True,
)
# extract(
# os.path.join(
# self.dataset_dir,
# "challenges-in-representation-learning-facial-expression-recognition-challenge.zip",
# ),
# "fer2013"
# )
extract(
os.path.join(self.dataset_dir, "fer2013", "fer2013.tar.gz"),
os.path.join(self.dataset_dir, "fer2013", "images"),
)
os.chdir(curr_dir)
|
meerkat-main
|
meerkat/datasets/fer/__init__.py
|
import json
import os
from typing import Dict, Mapping
from tqdm import tqdm
import meerkat as mk
def crop_object(row: Mapping[str, object]):
img = row["image"]
length = max(row["h"], row["w"])
box = (
max(row["x"] - ((length - row["w"]) / 2), 0),
max(row["y"] - ((length - row["h"]) / 2), 0),
min(row["x"] + row["w"] + ((length - row["w"]) / 2), img.width),
min(row["y"] + row["h"] + ((length - row["h"]) / 2), img.height),
)
return img.crop(box)
def build_gqa_dfs(dataset_dir: str, write: bool = False) -> Dict[str, mk.DataFrame]:
objects = []
images = []
relations = []
attributes = []
for split in ["train", "val"]:
print(f"Loading {split} scene graphs...")
with open(os.path.join(dataset_dir, f"{split}_sceneGraphs.json")) as f:
graphs = json.load(f)
for image_id, graph in tqdm(graphs.items()):
image_id = int(image_id) # convert to int for faster filtering and joins
for object_id, obj in graph.pop("objects").items():
object_id = int(
object_id
) # convert to int for faster filtering and joins
for relation in obj.pop("relations"):
relations.append(
{
"subject_object_id": object_id,
"object_id": int(relation["object"]),
"name": relation["name"],
}
)
for attribute in obj.pop("attributes"):
attributes.append({"object_id": object_id, "attribute": attribute})
objects.append({"object_id": object_id, "image_id": image_id, **obj})
images.append({"image_id": image_id, **graph})
# prepare DataFrames
print("Preparing DataFrames...")
image_df = mk.DataFrame(images)
image_df["image"] = mk.ImageColumn(
image_df["image_id"].map(
lambda x: os.path.join(dataset_dir, "images", f"{x}.jpg")
)
)
object_df = mk.DataFrame(objects).merge(
image_df[["image_id", "image", "height", "width"]], on="image_id"
)
object_df["object_image"] = object_df.to_lambda(crop_object)
# filter out objects with no width or height
object_df = object_df[(object_df["h"] != 0) & (object_df["w"] != 0)]
# filter out objects whose bounding boxes are not contained within the image
object_df = object_df[
(object_df["x"] < object_df["width"]) & (object_df["y"] < object_df["height"])
]
dfs = {
"images": image_df,
"objects": object_df,
"relations": mk.DataFrame(relations),
"attributes": mk.DataFrame(attributes),
}
if write:
write_gqa_dfs(dfs=dfs, dataset_dir=dataset_dir)
return dfs
def read_gqa_dfs(dataset_dir: str) -> Dict[str, mk.DataFrame]:
return {
key: mk.DataFrame.read(os.path.join(dataset_dir, f"{key}.mk"))
for key in ["attributes", "relations", "objects", "images"]
}
def write_gqa_dfs(dfs: Mapping[str, mk.DataFrame], dataset_dir: str):
for key, df in dfs.items():
df.write(os.path.join(dataset_dir, f"{key}.mk"))
|
meerkat-main
|
meerkat/datasets/gqa/__init__.py
|
import datetime
import os
import PIL
import requests
import meerkat as mk
from meerkat.columns.deferred.image import load_image
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
REPO = "https://github.com/NationalGalleryOfArt/opendata.git"
def _write_empty_image(dst):
img = PIL.Image.new("RGB", (32, 32), color="black")
img.save(dst, format="JPEG")
@datasets.register()
class wikipaintings(DatasetBuilder):
VERSIONS = ["main"]
info = DatasetInfo(
name="wikipaintings",
full_name="Paintings from WikiData",
# flake8: noqa
description="",
homepage="https://www.wikidata.org/wiki/Wikidata:Main_Page",
tags=["art"],
citation=None,
)
def build(self):
df = mk.read(os.path.join(self.dataset_dir, "data.mk"))
df = df[~df["qid"].duplicated()]
df = df[~df["title"].duplicated()]
return df
def download(self):
url = "https://query.wikidata.org/sparql"
query = """
SELECT ?painting ?paintingLabel ?image ?date
WHERE {
?painting wdt:P31 wd:Q3305213.
?painting wdt:P170 ?artist.
?painting wdt:P18 ?image.
?painting wdt:P571 ?date.
SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }
}
"""
r = requests.get(url, params={"format": "json", "query": query})
data = r.json()
def extract_year(date: str):
try:
return datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ").year
except ValueError:
return -1
df = mk.DataFrame(
[
{
"qid": row["painting"]["value"].split("/")[-1],
"title": row["paintingLabel"]["value"],
"image_url": row["image"]["value"],
"year": extract_year(row["date"]["value"]),
"artist": row["artistLabel"]["value"],
}
for row in data["results"]["bindings"]
]
)
df.write(os.path.join(self.dataset_dir, "data.mk"))
|
meerkat-main
|
meerkat/datasets/wikipaintings/__init__.py
|
import os
import pickle
import shutil
import subprocess
from glob import glob
import numpy as np
import pandas as pd
import PIL
from PIL import Image
from meerkat import column, env
from meerkat.cells.volume import MedicalVolumeCell
from meerkat.columns.deferred.file import FileColumn
from meerkat.dataframe import DataFrame
from meerkat.tools.lazy_loader import LazyLoader
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_url, extract
transforms = LazyLoader("torchvision.transforms")
if env.package_available("pydicom"):
import pydicom
else:
pydicom = None
GAZE_DATA_URL = "https://raw.githubusercontent.com/robustness-gym/meerkat/dev/examples/03-med_img/cxr_gaze_data.json" # noqa: E501
@datasets.register()
class siim_cxr(DatasetBuilder):
"""The SIIM-CXR dataset from Kaggle.
Reference:
https://www.kaggle.com/competitions/siim-acr-pneumothorax-segmentation/data
"""
VERSIONS = [
"stage_2",
"stage_1",
]
info = DatasetInfo(
name="siim_cxr",
full_name="SSIM-ACR Pneumothorax Segmentation",
description=(
"SSIM CXR is a dataset of chest X-rays of patients with and without "
"pneumothorax. "
"This dataset consists of RLE encoded masks for the pneumothorax regions. "
),
homepage="https://www.kaggle.com/competitions/siim-acr-pneumothorax-segmentation/data", # noqa: E501
tags=["image", "classification", "segmentation"],
)
def download(self):
if self.version == "stage_1":
self._download_stage_1()
elif self.version == "stage_2":
self._download_stage_2()
def _download_stage_1(self):
tar_file = os.path.join(self.dataset_dir, "dicom-images-train.tar.gz")
dirpath = os.path.join(self.dataset_dir, "dicom-images-train")
if not os.path.exists(tar_file):
raise ValueError("Please download the stage 1 dataset tar file.")
if not os.path.exists(dirpath):
extract(tar_file, self.dataset_dir)
assert os.path.isdir(dirpath)
# Download the pneumothorax labels.
labels = os.path.join(self.dataset_dir, "train-rle.csv")
if not os.path.exists(labels):
raise ValueError("Please download the stage 1 labels.")
# Download the chest tube labels.
path = download_url(
"https://github.com/khaledsaab/spatial_specificity/raw/main/cxr_tube_dict.pkl", # noqa: E501
self.dataset_dir,
) # noqa: E501
shutil.move(path, os.path.join(self.dataset_dir, "cxr_tube_dict.pkl"))
def _download_stage_2(self):
"""Download the SIIM CXR dataset from kaggle."""
if not env.package_available("kaggle"):
raise ImportError("Please install kaggle using `pip install kaggle`")
# download and integrate gaze data
# os.environ["KAGGLE_USERNAME"] = self.kaggle_username
# os.environ["KAGGLE_KEY"] = self.kaggle_key
out = subprocess.run(
[
"kaggle",
"competitions",
"download",
"-c",
"siim-acr-pneumothorax-segmentation",
"-p",
self.dataset_dir,
]
)
if out.returncode != 0:
raise ValueError("Downloading the kaggle dataset failed.")
expected_zip_file = os.path.join(
self.dataset_dir, "siim-acr-pneumothorax-segmentation.zip"
)
if not os.path.exists(expected_zip_file):
raise ValueError("Downloaded dataset is not in the expected format.")
extract(expected_zip_file, self.dataset_dir)
def build(self):
if self.version == "stage_1":
return self._build_stage_1()
elif self.version == "stage_2":
return self._build_stage_2()
def _build_stage_1(self):
"""Build the SIIM CXR dataset (stage 1 version)."""
# Get filenames.
dcm_folder = os.path.join(self.dataset_dir, "dicom-images-train")
_files = _collect_all_dicoms(dcm_folder)
df = pd.DataFrame({"filename": column(_files)})
df["img_id"] = df["filename"].map(
lambda filename: os.path.splitext(os.path.basename(filename))[0]
)
# Get pneumothorax labels.
label_df = self._build_stage_1_labels()
# important to perform a left join here, because there are some images in the
# directory without labels in `segment_df` and we only want those with labelsy
df = df.merge(label_df, how="left", on="img_id")
df = DataFrame.from_pandas(df, primary_key="img_id").drop("index")
# Load the data
df["img"] = FileColumn(
_files, type="image", loader=_load_siim_cxr, base_dir=dcm_folder
)
# df["img_tensor"] = df["img"].defer(cxr_transform)
# drop nan columns
df = df[~df["pmx"].isna()]
return df
def _build_stage_1_labels(self):
segment_df = pd.read_csv(os.path.join(self.dataset_dir, "train-rle.csv"))
segment_df = segment_df.rename(
columns={"ImageId": "img_id", " EncodedPixels": "encoded_pixels"}
)
# there are some images that were segemented by multiple annotators,
# we'll just take the first
segment_df = segment_df[~segment_df.img_id.duplicated(keep="first")]
# get binary labels for pneumothorax, any row with a "-1" for
# encoded pixels is considered a negative
segment_df["pmx"] = (segment_df.encoded_pixels != "-1").astype(int)
# Chest tube labels.
with open(os.path.join(self.dataset_dir, "cxr_tube_dict.pkl"), "rb") as f:
tube_dict = pickle.load(f)
img_id = tube_dict.keys()
values = [tube_dict[k] for k in img_id]
tube_df = pd.DataFrame({"img_id": img_id, "tube": values})
segment_df = segment_df.merge(tube_df, how="left", on="img_id")
return segment_df[["img_id", "pmx", "tube"]]
def _build_stage_2(self):
"""Build the SIIM CXR dataset."""
dcm_folder = os.path.join(self.dataset_dir, "stage_2_images")
_files = os.listdir(dcm_folder)
_files = [fname for fname in _files if fname.endswith(".dcm")]
df = DataFrame({"fname": column(_files)})
# Load the data
df["img"] = FileColumn(
_files, type="image", loader=_load_siim_cxr, base_dir=dcm_folder
)
df["img_tensor"] = df["img"].defer(cxr_transform)
return df
def _collect_all_dicoms(root_dir: str):
"""Return the relative paths for all dicoms in a directory."""
# TODO: make this work with windows
remove_str = root_dir
if remove_str[-1] != "/":
remove_str += "/"
relative_paths = []
for root, dirs, files in os.walk(root_dir):
for file in files:
if file.endswith(".dcm"):
file_path = os.path.join(root, file)
# Remove the root directory from the file path.
file_path = file_path.replace(remove_str, "")
relative_paths.append(file_path)
return relative_paths
def _load_siim_cxr(filepath) -> PIL.Image:
"""Load a single image from the SIIM-CXR dataset."""
return PIL.Image.fromarray(pydicom.read_file(filepath).pixel_array)
def download_siim_cxr(
dataset_dir: str,
kaggle_username: str,
kaggle_key: str,
download_gaze_data: bool = True,
include_mock_reports: bool = True,
):
"""Download the dataset from the SIIM-ACR Pneumothorax Segmentation
challenge. https://www.kaggle.com/c/siim-acr-pneumothorax-
segmentation/data.
Args:
dataset_dir (str): Path to directory where the dataset will be downloaded.
kaggle_username (str): Your kaggle username.
kaggle_key (str): A kaggle API key. In order to use the Kaggle’s public API, you
must first authenticate using an API token. From the site header, click on
your user profile picture, then on “My Account” from the dropdown menu. This
will take you to your account settings at https://www.kaggle.com/account.
Scroll down to the section of the page labelled API: To create a new token,
click on the “Create New API Token” button. This will download a json file
with a "username" and "key" field. Copy and paste the "key" field and pass
it in as `kaggle_key`.
Instructions copied from Kaggle API docs: https://www.kaggle.com/docs/api
download_gaze_data (str): Download a pkl file containing eye-tracking data
collected on a radiologist interpreting the xray.
"""
if not env.package_available("kaggle"):
raise ImportError("Please install kaggle using `pip install kaggle`")
# download and integrate gaze data
os.environ["KAGGLE_USERNAME"] = kaggle_username
os.environ["KAGGLE_KEY"] = kaggle_key
out = subprocess.run(
[
"kaggle",
"datasets",
"download",
"-d",
"seesee/siim-train-test",
"-p",
dataset_dir,
]
)
if out.returncode != 0:
raise ValueError("Downloading the kaggle dataset failed.")
if os.path.exists(os.path.join(dataset_dir, "siim-train-test.zip")):
subprocess.run(
[
"unzip",
"-q",
os.path.join(dataset_dir, "siim-train-test.zip"),
"-d",
dataset_dir,
]
)
os.remove(os.path.join(dataset_dir, "siim-train-test.zip"))
# get segment annotations
segment_df = pd.read_csv(os.path.join(dataset_dir, "siim", "train-rle.csv"))
segment_df = segment_df.rename(
columns={"ImageId": "image_id", " EncodedPixels": "encoded_pixels"}
)
# there are some images that were segemented by multiple annotators, we'll just take
# the first
segment_df = segment_df[~segment_df.image_id.duplicated(keep="first")]
# get binary labels for pneumothorax, any row with a "-1" for encoded pixels is
# considered a negative
segment_df["pmx"] = (segment_df.encoded_pixels != "-1").astype(int)
# start building up a main dataframe with a few `merge` operations (i.e. join)
df = segment_df
# get filepaths for all images in the "dicom-images-train" directory
filepaths = sorted(
glob(os.path.join(dataset_dir, "siim", "dicom-images-train/*/*/*.dcm"))
)
filepath_df = pd.DataFrame(
[
{
"filepath": filepath,
"image_id": os.path.splitext(os.path.basename(filepath))[0],
}
for filepath in filepaths
]
)
# important to perform a left join here, because there are some images in the
# directory without labels in `segment_df` and we only want those with labelsy
df = df.merge(filepath_df, how="left", on="image_id")
if download_gaze_data:
subprocess.run(
[
"curl",
GAZE_DATA_URL,
"--output",
os.path.join(dataset_dir, "cxr_gaze_data.json"),
]
)
if include_mock_reports:
df["report"] = (df["pmx"] == 1).apply(_get_mock_report)
df.to_csv(os.path.join(dataset_dir, "siim_cxr.csv"), index=False)
CXR_MEAN = 0.48865
CXR_STD = 0.24621
CXR_SIZE = 224
def cxr_transform_pil(volume: MedicalVolumeCell):
array = volume._volume.squeeze()
return Image.fromarray(np.uint8(array))
def cxr_transform(volume: MedicalVolumeCell):
if isinstance(volume, MedicalVolumeCell):
img = cxr_transform_pil(volume)
else:
img = volume
img = transforms.Compose(
[
transforms.Resize([CXR_SIZE, CXR_SIZE]),
transforms.ToTensor(),
transforms.Normalize(CXR_MEAN, CXR_STD),
]
)(img)
return img.repeat([3, 1, 1])
def _get_mock_report(pmx: bool):
state = (np.random.choice(["severe", "moderate"])) if pmx else "no"
return np.random.choice(
[
(
"Cardiac size cannot be evaluated. Large left pleural effusion is new. "
"Small right effusion is new. The upper lungs are clear. Right lower "
f" lobe opacities are better seen in prior CT. There is {state} "
" pneumothorax. There are mild degenerative changes in the thoracic "
"spine."
),
(
f"There is {state} pneumothorax. There are mild degenerative changes "
"in the thoracic spine. The upper lungs are clear. Right lower lobe "
"opacities are better seen in prior CT."
"There are mild degenerative changes in the thoracic spine."
),
(
"The upper lungs are clear. Right lower lobe opacities are better "
f"seen in prior CT. There is {state} pneumothorax. "
"There are mild degenerative changes in the thoracic spine."
),
]
)
|
meerkat-main
|
meerkat/datasets/siim_cxr/__init__.py
|
from __future__ import annotations
import io
import os
from typing import Sequence
from google.cloud import storage
from meerkat import ImageColumn
from meerkat.columns.deferred.base import DeferredCell, DeferredColumn
from meerkat.columns.pandas_column import ScalarColumn
class GCSImageCell(DeferredCell):
def __init__(
self,
transform: callable = None,
loader: callable = None,
data: str = None,
):
self.loader = self.default_loader if loader is None else loader
self.transform = transform
self._data = data
def fn(self, filepath: str):
image = self.loader(filepath)
if self.transform is not None:
image = self.transform(image)
return image
def __eq__(self, other):
return (
(other.__class__ == self.__class__)
and (self.data == other.data)
and (self.transform == other.transform)
and (self.loader == other.loader)
)
def __repr__(self):
transform = getattr(self.transform, "__qualname__", repr(self.transform))
dirs = self.data.split("/")
short_path = ("" if len(dirs) <= 2 else ".../") + "/".join(dirs[-2:])
return f"ImageCell({short_path}, transform={transform})"
class GCSImageColumn(ImageColumn):
def __init__(
self,
blob_names: ScalarColumn = None,
bucket_name: str = None,
project: str = None,
transform: callable = None,
loader: callable = None,
writer: callable = None,
local_dir: str = None,
_skip_cache: bool = False,
*args,
**kwargs,
):
super(GCSImageColumn, self).__init__(
blob_names, transform, loader, *args, **kwargs
)
self.project = project
self.bucket_name = bucket_name
self._set_state()
storage_client = storage.Client(project=project)
self.bucket = storage_client.bucket(bucket_name, user_project=project)
self.loader = (lambda x: x) if loader is None else loader
self.writer = writer
self.local_dir = local_dir
self._skip_cache = _skip_cache
def _get_formatter(self) -> callable:
# downloading the images from gcp for every visualization is probably not
# what we want as it makes dataframe visualization very slow
return None
def _create_cell(self, data: object) -> DeferredCell:
# don't want to create a lambda
return DeferredColumn._create_cell(self, data)
def fn(self, blob_name: str):
if (
self.local_dir is not None
and os.path.exists(os.path.join(self.local_dir, str(blob_name)))
and not self._skip_cache
):
# fetch locally if it's been cached locally
return super(GCSImageColumn, self).fn(
os.path.join(self.local_dir, str(blob_name))
)
# otherwise pull from GCP
out = self.loader(
io.BytesIO(self.bucket.blob(str(blob_name)).download_as_bytes())
)
if self.writer is not None and self.local_dir is not None:
# cache locally if writer and local dir are both provided
path = os.path.join(self.local_dir, str(blob_name))
os.makedirs(os.path.dirname(path), exist_ok=True)
self.writer(path, out)
return out
@classmethod
def from_blob_names(
cls,
blob_names: Sequence[str],
loader: callable = None,
transform: callable = None,
*args,
**kwargs,
):
if not isinstance(blob_names, ScalarColumn):
blob_names = ScalarColumn(blob_names)
return cls(
blob_names=blob_names,
loader=loader,
transform=transform,
*args,
**kwargs,
)
@classmethod
def _state_keys(cls) -> set:
"""List of attributes that describe the state of the object."""
return super()._state_keys() | {
"bucket_name",
"project",
"local_dir",
"writer",
"_skip_cache",
}
@classmethod
def _clone_keys(cls) -> set:
# need to avoid reaccessing bucket on clone, too slow
return {"bucket"}
def _set_state(self, state: dict = None):
if state is not None:
state["base_dir"] = state.get("base_dir", None) # backwards compatibility
self.__dict__.update(state)
if state is None or "bucket" not in state:
storage_client = storage.Client(project=self.project)
self.bucket = storage_client.bucket(
self.bucket_name, user_project=self.project
)
|
meerkat-main
|
meerkat/datasets/mimic_iv/gcs.py
|
import logging
import os
import re
import subprocess
from typing import Iterable
import google.auth
import pandas as pd
from google.cloud import bigquery, bigquery_storage, storage
from PIL import Image
from pydicom.filereader import dcmread
import meerkat as mk
from .gcs import GCSImageColumn
from .modules import TABLES
from .reports import ReportColumn
logger = logging.getLogger(__name__)
def build_mimic_df(
dataset_dir: str,
gcp_project: str,
tables: Iterable[str] = None,
excluded_tables: Iterable[str] = None,
reports: bool = False,
cxr_dicom: bool = True,
cxr_jpg: bool = True,
split: bool = True,
download_jpg: bool = False,
download_resize: int = None,
write: bool = False,
) -> mk.DataFrame:
"""Builds a `DataFrame` for accessing data from the MIMIC-CXR database
https://physionet.org/content/mimic-cxr/2.0.0/ The MIMIC-CXR database
integrates chest X-ray imaging data with structured EHR data from Beth
Israel Deaconess Medical Center. The full database has an uncompressed size
of over 5 TB. This function quickly builds a `DataFrame` that can be used
to explore, slice and download the database. Building the DataFrame takes.
~1 minute (when not downloading the radiology reports). The large CXR DICOM
and JPEG files are not downloaded, but lazily pulled from Google Cloud
Storage (GCS) only when they are accessed. This makes it possible to
inspect and explore that data without downloading the full 5 TB.
Note: model training will likely bottleneck on the GCS downloads, so it is
recommended that you cache the JPEG images locally bfore training. This can be
accomplished by setting a `writer` and running a map over the data.
```
df["jpg_img].writer = lambda path, img: x.save(path, img)
df["jpg_img].map(lambda x: True)
```
The images will be saved in `dataset_dir`. This will take several hours for the full
dataset. You can also slice down to a subset of the dataset before running the map.
Each row corresponds to a single chest X-ray image (stored in both DICOM format and
JPEG in the MIMIC database). Each row is uniquely identified by the "dicom_id"
column. Note that a single chest X-ray study (identified by "study_id" column) may
consist of multiple images and a single patient (identified by "subject_id" column)
may have multiple studies in the database. The columns in the DataFrame can be
grouped into four categories:
1. (`PandasSeriesColumn`) Metadata and labels pulled from tables in the MIMIC-IV
EHR database (e.g."pneumonia", "ethnicity", "view_position", "gender"). For
more information on the tables see: https://mimic.mit.edu/docs/iv/modules/.
For more information on the CheXpert labels see:
https://physionet.org/content/mimic-cxr-jpg/2.0.0/
2. (`GCSImageColumn`) DICOM and JPEG image files of the chest xrays.These
columns do not actually hold the images, but instead lazily load from the
GCS when they're indexed.
3. (`ReportColumn`) The radiology reports for each exam are downloaded to disk,
and lazily loaded when accessed.
The arguments below can be used to specify which of the columns to include in the
DataFrame.
Args:
dataset_dir (str): A local directory in which downloaded data will be cached.
gcp_project (str): The Google Cloud Platform project that will be billed for
downloads from the database (MIMIC has requester-pays enabled). If you do
not have GCP project, see instructions for creating one here:
https://cloud.google.com/resource-manager/docs/creating-managing-projects
tables (Iterable[str], optional): A subset of ["patient", "admit", "labels",
"dicom_meta"] specifying which tables to include in the DataFrame.
Defaults to None, in which case all of the tables listed in
"meerkat.contrib.mimic.TABLES" will be included.
excluded_tables (Iterable[str], optional): A subset of ["patient", "admit",
"labels", "dicom_meta"] specifying which tables to exclude from the
DataFrame. Defaults to None, in which case none are excluded.
reports (bool, optional): Download reports if they aren't already downloaded
in `dataset_dir` and add a "report" column to the DataFrame. Defaults to
False.
cxr_dicom (bool, optional): Add a `GCSImagecolumn` called "cxr_dicom" to the
DataFrame for the DICOM files for each image. Defaults to True.
cxr_jpg (bool, optional): Add a `GCSImagecolumn` called "cxr_jpg" to the
DataFrame for the JPEG files for each image. Defaults to True.
split (bool, optional): Add a "split" column with "train", "validate" and "test"
splits. Defaults to True.
download_jpg (bool, optional): Download jpegs for all the scans in the dataset
to `dataset_dir`. Expect this to take several hours. Defaults to False.
download_resize (bool, optional): Resize the images before saving them to disk.
Defaults to None, in which case the images are not resized.
write (bool, optiional): Write the dataframe to the directory.
Returns:
DataFrame: The MIMIC `DataFrame` with columns
"""
os.environ["GOOGLE_CLOUD_PROJECT"] = gcp_project
tables = set(TABLES.keys() if tables is None else tables)
if excluded_tables is not None:
tables -= set(excluded_tables)
# must include the cxr_records table
tables |= set(["cxr_records"])
fields = [
(
f"{table}.{field[0]} AS {field[1]}"
if isinstance(field, tuple)
else f"{table}.{field}"
)
for table in tables
for field in TABLES[table]["fields"]
if table != "admit"
]
query_str = f"""
SELECT {','.join(fields)}
FROM `physionet-data.mimic_cxr.record_list` cxr_records
LEFT JOIN `physionet-data.mimic_cxr.study_list` cxr_studies
ON cxr_records.study_id = cxr_studies.study_id
"""
if "labels" in tables:
query_str += """
LEFT JOIN `physionet-data.mimic_cxr.chexpert` labels
ON cxr_records.study_id = labels.study_id
"""
if "patients" in tables:
query_str += """
LEFT JOIN `physionet-data.mimic_core.patients` patients
ON cxr_records.subject_id = patients.subject_id
"""
if "dicom_meta" in tables:
query_str += """
LEFT JOIN `physionet-data.mimic_cxr.dicom_metadata_string` AS dicom_meta
ON cxr_records.dicom_id = dicom_meta.dicom
"""
elif "admit" in tables:
# need the StudyDate to include admission data
query_str += """
LEFT JOIN (SELECT StudyDate, dicom
FROM `physionet-data.mimic_cxr.dicom_metadata_string`) AS meta
ON cxr_records.dicom_id = meta.dicom
"""
print(f"Querying MIMIC database: `gcp_project`={gcp_project}, `tables`={tables}.")
df = query_mimic_db(query_str, gcp_project=gcp_project)
if "admit" in tables:
# joining in admissions data is more complicated because the study_list table
# does not include an index into the admissions table
# instead we match each study to the first admission for which the study date
# falls between the admission and discharge dates
fields = ["admit." + field for field in TABLES["admit"]["fields"]] + [
"subject_id"
]
admit_df = query_mimic_db(
query_str=f"""
SELECT {','.join(fields)}
FROM `physionet-data.mimic_core.admissions` admit
""",
gcp_project=gcp_project,
)
admit_df = df[["subject_id", "StudyDate", "study_id"]].merge(
admit_df, on="subject_id"
)
study_date = pd.to_datetime(admit_df["StudyDate"])
admit_df = admit_df[
(study_date >= admit_df["admittime"].dt.date)
& (study_date <= admit_df["dischtime"].dt.date)
]
df = df.merge(
admit_df.drop(columns=["StudyDate", "subject_id"]).drop_duplicates(
subset="study_id"
),
how="left",
on="study_id",
)
# convert dicom metadata from str to float
if "dicom_meta" in tables:
for field in TABLES["dicom_meta"]["fields"]:
try:
df[field] = df[field].astype(float)
except ValueError:
# if we can't convert, just keep the field as str
continue
# convert to snake case
df = df.rename(columns=lambda x: re.sub(r"(?<!^)(?<!_)(?=[A-Z])", "_", x).lower())
print("Preparing DataFrame...")
df = mk.DataFrame.from_pandas(df)
# add GCSImageColumn for the jpg version of the xrays
if cxr_jpg:
paths = pd.Series(df["dicom_path"].data)
df["jpg_path"] = paths.str.split(".").str[0] + ".jpg"
df["cxr_jpg"] = GCSImageColumn.from_blob_names(
blob_names=df["jpg_path"],
bucket_name="mimic-cxr-jpg-2.0.0.physionet.org",
project=gcp_project,
loader=Image.open,
local_dir=dataset_dir,
)
# add GCSImageColumn for the dicoms
if cxr_dicom:
df["cxr_dicom"] = GCSImageColumn.from_blob_names(
blob_names=df["dicom_path"],
bucket_name="mimic-cxr-2.0.0.physionet.org",
project=gcp_project,
loader=dcmread,
local_dir=dataset_dir,
)
if reports:
reports_dir = os.path.join(dataset_dir, "mimic-cxr-reports")
if not os.path.exists(reports_dir):
# download and unzip reports
print("Downloading reports...")
storage_client = storage.Client(project=gcp_project)
bucket = storage_client.bucket(
"mimic-cxr-2.0.0.physionet.org", user_project=gcp_project
)
filepath = os.path.join(dataset_dir, "mimic-cxr-reports.zip")
bucket.blob("mimic-cxr-reports.zip").download_to_filename(filepath)
subprocess.run(
[
"unzip",
filepath,
"-d",
os.path.join(dataset_dir, "mimic-cxr-reports"),
]
)
df["report"] = ReportColumn.from_filepaths(
reports_dir + "/" + df["report_path"]
)
if split:
print("Downloading splits...")
storage_client = storage.Client(project=gcp_project)
bucket = storage_client.bucket(
"mimic-cxr-jpg-2.0.0.physionet.org", user_project=gcp_project
)
filepath = os.path.join(dataset_dir, "mimic-cxr-2.0.0-split.csv.gz")
bucket.blob("mimic-cxr-2.0.0-split.csv.gz").download_to_filename(
filepath,
)
subprocess.run(["gunzip", filepath])
df = df.merge(
mk.DataFrame.from_csv(
os.path.join(dataset_dir, "mimic-cxr-2.0.0-split.csv")
)[["split", "dicom_id"]],
how="left",
on="dicom_id",
)
if download_jpg:
df = download_mimic_df(df, resize=download_resize)
if write:
df.write(os.path.join(dataset_dir, "mimic.mk"))
return df
def query_mimic_db(query_str: str, gcp_project: str) -> pd.DataFrame:
credentials, _ = google.auth.default(
scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
# Make clients.
bqclient = bigquery.Client(
credentials=credentials,
project=gcp_project,
)
bqstorageclient = bigquery_storage.BigQueryReadClient(credentials=credentials)
df = (
bqclient.query(query_str)
.result()
.to_dataframe(bqstorage_client=bqstorageclient, progress_bar_type="tqdm")
)
return df
def download_mimic_df(mimic_df: mk.DataFrame, resize: int = None, **kwargs):
col = mimic_df["cxr_jpg"].view()
dataset_dir = col.local_dir
paths = mimic_df["jpg_path"]
if resize:
paths = paths.apply(
lambda x: os.path.join(
dataset_dir, os.path.splitext(x)[0] + f"_{resize}" + ".jpg"
)
)
def _write_resized(path, img):
if resize is not None:
img.thumbnail((resize, resize))
root, ext = os.path.splitext(path)
path = root + f"_{resize}" + ext
img.save(path)
col._skip_cache = True
col.writer = _write_resized
col.map(
lambda x: True,
num_workers=6,
pbar=True,
)
mimic_df[f"cxr_jpg_{resize}"] = mk.ImageColumn.from_filepaths(paths)
return mimic_df
|
meerkat-main
|
meerkat/datasets/mimic_iv/__init__.py
|
from __future__ import annotations
import logging
from typing import Collection, Sequence
from meerkat.columns.deferred.base import DeferredColumn
from meerkat.columns.pandas_column import ScalarColumn
logger = logging.getLogger(__name__)
class ReportColumn(DeferredColumn):
def __init__(
self,
data: Sequence[str] = None,
transform: callable = None,
loader: callable = None,
*args,
**kwargs,
):
super(ReportColumn, self).__init__(
ScalarColumn.from_data(data), *args, **kwargs
)
self.loader = self.default_loader if loader is None else loader
self.transform = transform
def fn(self, filepath: str):
image = self.loader(filepath)
if self.transform is not None:
image = self.transform(image)
return image
@classmethod
def from_filepaths(
cls,
filepaths: Sequence[str],
loader: callable = None,
transform: callable = None,
*args,
**kwargs,
):
return cls(
data=filepaths,
loader=loader,
transform=transform,
*args,
**kwargs,
)
@classmethod
def default_loader(cls, filepath):
with open(filepath) as f:
return f.read()
@classmethod
def _state_keys(cls) -> Collection:
return (super()._state_keys() | {"transform", "loader"}) - {"fn"}
|
meerkat-main
|
meerkat/datasets/mimic_iv/reports.py
|
# we only include a subset of the fields in the dicom metadata
TABLES = {
"cxr_records": {
"table": "physionet-data.mimic_cxr.record_list",
"fields": [
"study_id",
"subject_id",
"dicom_id",
# use tuples to specify alias
("path", "dicom_path"),
],
},
"cxr_studies": {
"table": "physionet-data.mimic_cxr.study_list",
"fields": [
("path", "report_path"),
],
},
"labels": {
"table": "physionet-data.mimic_cxr.chexpert",
"fields": [
"Atelectasis",
"Cardiomegaly",
"Consolidation",
"Edema",
"Enlarged_Cardiomediastinum",
"Fracture",
"Lung_Lesion",
"Lung_Opacity",
"No_Finding",
"Pleural_Effusion",
"Pleural_Other",
"Pneumonia",
"Pneumothorax",
"Support_Devices",
],
},
"dicom_meta": {
"table": "physionet-data.mimic_cxr.dicom_metadata_string",
"fields": [
"dicom",
"StudyDate",
"ImageType",
"TableType",
"DistanceSourceToDetector",
"DistanceSourceToPatient",
"Exposure",
"ExposureTime",
"XRayTubeCurrent",
"FieldOfViewRotation",
"FieldOfViewOrigin",
"FieldOfViewHorizontalFlip",
"ViewPosition",
"PatientOrientation",
"BurnedInAnnotation",
"RequestingService",
"DetectorPrimaryAngle",
"DetectorElementPhysicalSize",
],
},
"patients": {
"table": "physionet-data.mimic_core.patients",
"fields": ["gender", "anchor_age", "anchor_year", "anchor_year_group", "dod"],
},
"admit": {
"table": "physionet-data.mimic_core.admissions",
"fields": [
"hadm_id",
"admittime",
"dischtime",
"deathtime",
"admission_type",
"admission_location",
"discharge_location",
"insurance",
"language",
"marital_status",
"ethnicity",
"edregtime",
"edouttime",
"hospital_expire_flag",
],
},
}
|
meerkat-main
|
meerkat/datasets/mimic_iv/modules.py
|
import os
import subprocess
import pandas as pd
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_google_drive
@datasets.register()
class expw(DatasetBuilder):
VERSION_TO_GDRIVE_ID = {"main": "19Eb_WiTsWelYv7Faff0L5Lmo1zv0vzwR"}
VERSIONS = ["main"]
info = DatasetInfo(
name="expw",
full_name="Expression in-the-Wild",
description=(
"Imagenette is a subset of 10 easily classified classes from Imagenet "
"(tench, English springer, cassette player, chain saw, church, "
"French horn, garbage truck, gas pump, golf ball, parachute)."
),
homepage="https://github.com/fastai/imagenette",
tags=["image", "classification"],
)
def build(self):
df = pd.read_csv(
os.path.join(self.dataset_dir, "label/label.lst"),
delimiter=" ",
names=[
"image_name",
"face_id_in_image",
"face_box_top",
"face_box_left",
"face_box_right",
"face_box_bottom",
"face_box_cofidence",
"expression_label",
],
)
df = df.drop_duplicates()
df = mk.DataFrame.from_pandas(df)
# ensure that all the image files are downloaded
if (
not df["image_name"]
.apply(
lambda name: os.path.exists(
os.path.join(self.dataset_dir, "image/origin", name)
)
)
.all()
):
raise ValueError(
"Some images are not downloaded to expected directory: "
f"{os.path.join(self.dataset_dir, 'image/origin')}. Verify download."
)
# remove file extension and add the face_id
df["example_id"] = (
df["image_name"].str.replace(".jpg", "", regex=False)
+ "_"
+ df["face_id_in_image"].astype(str)
)
df["image"] = mk.ImageColumn.from_filepaths(
"image/origin/" + df["image_name"], base_dir=self.dataset_dir
)
df["face_image"] = df[
"image",
"face_box_top",
"face_box_left",
"face_box_right",
"face_box_bottom",
].defer(crop)
return df
def download(self):
gdrive_id = self.VERSION_TO_GDRIVE_ID[self.version]
download_google_drive(id=gdrive_id, dst=self.dataset_dir, is_folder=True)
os.makedirs(os.path.join(self.dataset_dir, "image"), exist_ok=True)
for file in os.listdir(os.path.join(self.dataset_dir, "image")):
# run 7za to extract the file using subprocess
subprocess.call(
[
"7za",
"x",
os.path.join(self.dataset_dir, "image", file),
"-o" + os.path.join(self.dataset_dir, "image"),
]
)
def crop(row: dict):
return row["image"].crop(
(
row["face_box_left"],
row["face_box_top"],
row["face_box_right"],
row["face_box_bottom"],
)
)
|
meerkat-main
|
meerkat/datasets/expw/__init__.py
|
import email
import os
import subprocess
import pandas as pd
from tqdm import tqdm
import meerkat as mk
COLUMNS = [
"From",
"To",
"Message-ID",
"Subject",
"X-FileName",
"X-From",
"X-To",
"X-cc",
"X-bcc",
"X-Folder",
"Date",
]
def _parse_email(email_string: str):
e = email.message_from_string(email_string)
d = {col.lower(): e.get(col, "") for col in COLUMNS}
d["body"] = e.get_payload()
return d
def build_enron_df(dataset_dir: str, download: bool = True) -> mk.DataFrame:
df_path = os.path.join(dataset_dir, "enron.mk")
if os.path.exists(df_path):
return mk.DataFrame.read(df_path)
downloaded = os.path.exists(os.path.join(dataset_dir, "emails.csv"))
if not downloaded and download:
print("Downloading data...")
curr_dir = os.getcwd()
os.makedirs(dataset_dir, exist_ok=True)
os.chdir(dataset_dir)
subprocess.run(
args=["kaggle datasets download -d wcukierski/enron-email-dataset"],
shell=True,
check=True,
)
subprocess.run(
args=["unzip enron-email-dataset.zip"],
shell=True,
check=True,
)
os.chdir(curr_dir)
# load training data
print("Parsing emails...")
df = mk.DataFrame.from_csv(os.path.join(dataset_dir, "emails.csv"))
df = mk.DataFrame([_parse_email(message) for message in tqdm(df["message"])])
print("Parsing dates...")
# need to remove timezone info to save and load with feather
# otherwise get UnknownTimeZoneError on read
df["date"] = pd.to_datetime(df["date"], utc=True)
df.write(df_path)
return df
|
meerkat-main
|
meerkat/datasets/enron/__init__.py
|
import logging
import os
import pickle
from functools import partial
from tqdm import tqdm
import meerkat as mk
from .data_utils import (
compute_file_tuples,
compute_slice_matrix,
compute_stanford_file_tuples,
get_sz_labels,
stanford_eeg_loader,
)
logger = logging.getLogger(__name__)
def build_eeg_df(
dataset_dir: str,
raw_dataset_dir: str,
splits=["train", "dev"],
clip_len: int = 60,
step_size: int = 1,
stride: int = 60,
):
"""Builds a `DataFrame` for accessing EEG data.
Currently only supports TUH dataset for seq-seq prediction.
Future TODO: integrating stanford dataset with weak seq-seq labels
Args:
dataset_dir (str): A local directory where the preprocessed
(h5) EEG data are stored
raw_dataset_dir (str): A local directory where the original
(edf) EEG data are stored
clip_len (int): Number of seconds in an EEG clip
step_size (int): Number of seconds in a single 'step'
stride (int): Number of seconds in the stride when extracting
clips from signals
"""
# retrieve paths of all edf files in the raw_dataset_dir
edf_files = []
for path, subdirs, files in os.walk(raw_dataset_dir):
for name in files:
if ".edf" in name:
edf_files.append(os.path.join(path, name))
data = []
for split in splits:
file_tuples = compute_file_tuples(
raw_dataset_dir, dataset_dir, split, clip_len, stride
)
for edf_fn, clip_idx, _ in tqdm(file_tuples, total=len(file_tuples)):
filepath = [file for file in edf_files if edf_fn in file]
filepath = filepath[0]
file_id = edf_fn.split(".edf")[0]
sequence_sz, binary_sz = get_sz_labels(
edf_fn=filepath,
clip_idx=int(clip_idx),
time_step_size=step_size,
clip_len=clip_len,
stride=stride,
)
row_df = {
"filepath": filepath,
"file_id": file_id,
"sequence_sz": sequence_sz,
"binary_sz": binary_sz,
"clip_idx": int(clip_idx),
"h5_fn": os.path.join(dataset_dir, edf_fn.split(".edf")[0] + ".h5"),
"split": split,
}
data.append(row_df)
df = mk.DataFrame(data)
eeg_loader = partial(
compute_slice_matrix, time_step_size=step_size, clip_len=clip_len, stride=stride
)
eeg_input_col = df[["clip_idx", "h5_fn"]].defer(fn=eeg_loader)
df.add_column(
"eeg_input",
eeg_input_col,
overwrite=True,
)
return df
def download_tusz(download_dir, version="1.5.2"):
"""Downloads the EEG Seizure TUH dataset (TUSZ)
REQUIRED:
1. Need to first registed at
https://www.isip.piconepress.com/projects/tuh_eeg/html/downloads.shtml
2. run download_tusz from python script or simply run the provided rsync
command below in your terminal
3. enter the provided password sent to your email after step (1)
Args:
download_dir (str): The directory path to save to.
version (str, optional): Which version to download
"""
src_pth = f"nedc@www.isip.piconepress.com:data/tuh_eeg_seizure/v{version}/"
rsync_command = f"rsync -auxvL {src_pth} {download_dir}"
print("Executing rsync command")
os.system(rsync_command)
def build_stanford_eeg_df(
stanford_dataset_dir: str,
lpch_dataset_dir: str,
file_marker_dir: str,
splits=["train", "dev"],
reports_pth=None,
clip_len: int = 60,
):
"""Builds a `DataFrame` for accessing EEG data.
This is for accessing private stanford data.
The stanford data is limited to specific researchers on IRB.
No public directions on how to download them yet.
Contact ksaab@stanford.edu for more information.
Args:
stanford_dataset_dir (str): A local dir where stanford EEG are stored
lpch_dataset_dir (str): A local dir where the lpch EEG are stored
file_marker_dir (str): A local dir where file markers are stored
splits (list[str]): List of splits to load
reports_pth (str): if not None, will load reports
clip_len (int): Number of seconds in an EEG clip
"""
# retrieve file tuples which is a list of
# (eeg filepath, location of sz or -1 if no sz, split)
file_tuples = compute_stanford_file_tuples(
stanford_dataset_dir, lpch_dataset_dir, file_marker_dir, splits
)
data = []
for filepath, sz_loc, split in file_tuples:
row_df = {
"filepath": filepath,
"file_id": filepath.split("/")[-1].split(".eeghdf")[0],
"binary_sz": sz_loc != -1,
"sz_start_index": sz_loc,
"split": split,
}
data.append(row_df)
df = mk.DataFrame(data)
eeg_input_col = df[["sz_start_index", "filepath", "split"]].defer(
fn=partial(stanford_eeg_loader, clip_len=clip_len)
)
df.add_column(
"eeg_input",
eeg_input_col,
overwrite=True,
)
if reports_pth:
raw_reports_pth = os.path.join(reports_pth, "reports_unique_for_hl_mm.txt")
raw_reports_df = mk.DataFrame.from_csv(raw_reports_pth, sep="\t")
parsed_reports_pth = os.path.join(reports_pth, "parsed_eeg_notes.dill")
with open(parsed_reports_pth, "rb") as dill_f:
parsed_reports = pickle.load(dill_f)
doc_data = []
for doc in parsed_reports:
uuid = doc.doc_id
mask_id = raw_reports_df["note_uuid"] == uuid
if mask_id.sum() == 1 and "findings" in doc.sections:
file_id = raw_reports_df[mask_id]["edf_file_name"][0].split(".edf")[0]
findings = doc.sections["findings"]["text"]
row_df = {"file_id": file_id, "findings": findings}
doc_data.append(row_df)
reports_df = mk.DataFrame(doc_data)
df = df.merge(reports_df, how="left", on="file_id")
return df
|
meerkat-main
|
meerkat/datasets/eeg/__init__.py
|
import os
# import eeghdf
import h5py
import numpy as np
# import pyedflib
from scipy.signal import resample
from tqdm import tqdm
FREQUENCY = 200
INCLUDED_CHANNELS = [
"EEG FP1",
"EEG FP2",
"EEG F3",
"EEG F4",
"EEG C3",
"EEG C4",
"EEG P3",
"EEG P4",
"EEG O1",
"EEG O2",
"EEG F7",
"EEG F8",
"EEG T3",
"EEG T4",
"EEG T5",
"EEG T6",
"EEG FZ",
"EEG CZ",
"EEG PZ",
]
STANFORD_INCLUDED_CHANNELS = [
"EEG Fp1",
"EEG Fp2",
"EEG F3",
"EEG F4",
"EEG C3",
"EEG C4",
"EEG P3",
"EEG P4",
"EEG O1",
"EEG O2",
"EEG F7",
"EEG F8",
"EEG T3",
"EEG T4",
"EEG T5",
"EEG T6",
"EEG Fz",
"EEG Cz",
"EEG Pz",
]
SEIZURE_STRINGS = ["sz", "seizure", "absence", "spasm"]
FILTER_SZ_STRINGS = ["@sz", "@seizure"]
def compute_file_tuples(raw_dataset_dir, dataset_dir, split, clip_len, stride):
"""
Args:
dataset_dir (str): location where resampled signals are
split (str): whether train, dev, test
clip_len(int): length of each clip in the input eeg segments
stride (int): how to sample clips from eeg signal
Returns:
(file_name, clip_idx, seizure_label) tuples for the given split, clip_len, and
stride. The clip_idx indicates which clip (i.e. segment of EEG signal with
clip_len seconds). The stride determines how to sample clips from the eeg signal
(e.g. if stride=clip_len we have no overlapping clips)
"""
# retrieve paths of all edf files in the dataset_dir for given split
edf_files = []
edf_fullfiles = []
for path, _, files in os.walk(os.path.join(raw_dataset_dir, split)):
for name in files:
if ".edf" in name:
edf_fullfiles.append(os.path.join(path, name))
edf_files.append(name)
resampled_files = os.listdir(dataset_dir)
file_tuples = []
for h5_fn in resampled_files:
edf_fn = h5_fn.split(".h5")[0] + ".edf"
if edf_fn not in edf_files:
continue
edf_fn_full = [file for file in edf_fullfiles if edf_fn in file]
if len(edf_fn_full) != 1:
print(f"{edf_fn} found {len(edf_fn_full)} times!")
print(edf_fn_full)
edf_fn_full = edf_fn_full[0]
seizure_times = get_seizure_times(edf_fn_full.split(".edf")[0])
h5_fn_full = os.path.join(dataset_dir, h5_fn)
with h5py.File(h5_fn_full, "r") as hf:
resampled_sig = hf["resampled_signal"][()]
num_clips = (resampled_sig.shape[-1] - clip_len * FREQUENCY) // (
stride * FREQUENCY
) + 1
for i in range(num_clips):
start_window = i * FREQUENCY * stride
end_window = np.minimum(
start_window + FREQUENCY * clip_len, resampled_sig.shape[-1]
)
is_seizure = 0
for t in seizure_times:
start_t = int(t[0] * FREQUENCY)
end_t = int(t[1] * FREQUENCY)
if not ((end_window < start_t) or (start_window > end_t)):
is_seizure = 1
break
file_tuples.append((edf_fn, i, is_seizure))
return file_tuples
def get_sz_labels(
edf_fn,
clip_idx,
time_step_size=1,
clip_len=60,
stride=60,
):
"""
Convert entire EEG sequence into clips of length clip_len
Args:
edf_fn: edf/eeghdf file name, full path
channel_names: list of channel names
clip_idx: index of current clip/sliding window, int
time_step_size: length of each time step, in seconds, int
clip_len: sliding window size or EEG clip length, in seconds, int
stride: stride size, by how many seconds the sliding window moves, int
Returns:
seizure_labels: per-time-step seizure labels
is_seizure: overall label, 1 if at least one seizure in clip
"""
physical_clip_len = int(FREQUENCY * clip_len)
start_window = clip_idx * FREQUENCY * stride
end_window = start_window + physical_clip_len
# get seizure times, take min_sz_len into account
if ".edf" in edf_fn:
seizure_times = get_seizure_times(edf_fn.split(".edf")[0])
else:
raise NotImplementedError
# get per-time-step seizure labels
num_time_steps = int(clip_len / time_step_size)
seizure_labels = np.zeros((num_time_steps)).astype(int)
is_seizure = 0
for t in seizure_times:
start_t = int(t[0] * FREQUENCY)
end_t = int(t[1] * FREQUENCY)
if not ((end_window < start_t) or (start_window > end_t)):
is_seizure = 1
start_t_sec = int(t[0]) # start of seizure in int seconds
end_t_sec = int(t[1]) # end of seizure in int seconds
# shift start_t_sec and end_t_sec so that they start at current clip
start_t_sec = np.maximum(0, start_t_sec - int(start_window / FREQUENCY))
end_t_sec = np.minimum(clip_len, end_t_sec - int(start_window / FREQUENCY))
# print("start_t_sec: {}; end_t_sec: {}".format(start_t_sec, end_t_sec))
# time step size may not be 1-sec
start_time_step = int(np.floor(start_t_sec / time_step_size))
end_time_step = int(np.ceil(end_t_sec / time_step_size))
seizure_labels[start_time_step:end_time_step] = 1
return seizure_labels, is_seizure
def compute_slice_matrix(
input_dict,
time_step_size=1,
clip_len=60,
stride=60,
):
"""
Convert entire EEG sequence into clips of length clip_len
Args:
channel_names: list of channel names
clip_idx: index of current clip/sliding window, int
h5_fn: file name of resampled signal h5 file (full path)
time_step_size: length of each time step, in seconds, int
clip_len: sliding window size or EEG clip length, in seconds, int
stride: stride size, by how many seconds the sliding window moves, int
Returns:
eeg_clip: EEG clip
"""
clip_idx = input_dict["clip_idx"]
h5_fn = input_dict["h5_fn"]
physical_clip_len = int(FREQUENCY * clip_len)
start_window = clip_idx * FREQUENCY * stride
with h5py.File(h5_fn, "r") as f:
signal_array = f["resampled_signal"][()]
resampled_freq = f["resample_freq"][()]
assert resampled_freq == FREQUENCY
# (num_channels, physical_clip_len)
end_window = np.minimum(signal_array.shape[-1], start_window + physical_clip_len)
curr_slc = signal_array[:, start_window:end_window] # (num_channels, FREQ*clip_len)
physical_time_step_size = int(FREQUENCY * time_step_size)
start_time_step = 0
time_steps = []
while start_time_step <= curr_slc.shape[1] - physical_time_step_size:
end_time_step = start_time_step + physical_time_step_size
# (num_channels, physical_time_step_size)
curr_time_step = curr_slc[:, start_time_step:end_time_step]
time_steps.append(curr_time_step)
start_time_step = end_time_step
eeg_clip = np.stack(time_steps, axis=0)
return eeg_clip
def get_seizure_times(file_name):
"""
Args:
file_name: file name of .edf file etc.
Returns:
seizure_times: list of times of seizure onset in seconds
"""
tse_file = file_name.split(".edf")[0] + ".tse_bi"
seizure_times = []
with open(tse_file) as f:
for line in f.readlines():
if "seiz" in line: # if seizure
# seizure start and end time
seizure_times.append(
[
float(line.strip().split(" ")[0]),
float(line.strip().split(" ")[1]),
]
)
return seizure_times
def get_ordered_channels(
file_name,
labels_object,
channel_names=INCLUDED_CHANNELS,
verbose=False,
):
"""
Reads channel names and returns consistent ordering
Args:
file_name (str): name of edf file
labels_object: extracted from edf signal using f.getSignalLabels()
channel_names (List(str)): list of channel names
verbose (bool): whether to be verbose
Returns:
list of channel indices in ordered form
"""
labels = list(labels_object)
for i in range(len(labels)):
labels[i] = labels[i].split("-")[0]
ordered_channels = []
for ch in channel_names:
try:
ordered_channels.append(labels.index(ch))
except IndexError:
if verbose:
print(file_name + " failed to get channel " + ch)
raise Exception("channel not match")
return ordered_channels
def get_edf_signals(edf):
"""
Get EEG signal in edf file
Args:
edf: edf object
Returns:
signals: shape (num_channels, num_data_points)
"""
n = edf.signals_in_file
samples = edf.getNSamples()[0]
signals = np.zeros((n, samples))
for i in range(n):
try:
signals[i, :] = edf.readSignal(i)
except IndexError:
pass
return signals
def resample_data(signals, to_freq=200, window_size=4):
"""
Resample signals from its original sampling freq to another freq
Args:
signals: EEG signal slice, (num_channels, num_data_points)
to_freq: Re-sampled frequency in Hz
window_size: time window in seconds
Returns:
resampled: (num_channels, resampled_data_points)
"""
num = int(to_freq * window_size)
resampled = resample(signals, num=num, axis=1)
return resampled
def resample_files(raw_edf_dir, save_dir):
"""Resamples edf files to FREQUENCY and saves them in specified dir.
Args:
raw_edf_dir (str): location where original edf files are located
save_dir (str): location to save resampled signals
"""
edf_files = []
for path, subdirs, files in os.walk(raw_edf_dir):
for name in files:
if ".edf" in name:
edf_files.append(os.path.join(path, name))
failed_files = []
for idx in tqdm(range(len(edf_files))):
edf_fn = edf_files[idx]
save_fn = os.path.join(save_dir, edf_fn.split("/")[-1].split(".edf")[0] + ".h5")
if os.path.exists(save_fn):
continue
try:
f = None # pyedflib.EdfReader(edf_fn)
except BaseException:
failed_files.append(edf_fn)
orderedChannels = get_ordered_channels(edf_fn, f.getSignalLabels())
signals = get_edf_signals(f)
signal_array = np.array(signals[orderedChannels, :])
sample_freq = f.getSampleFrequency(0)
if sample_freq != FREQUENCY:
signal_array = resample_data(
signal_array,
to_freq=FREQUENCY,
window_size=int(signal_array.shape[1] / sample_freq),
)
with h5py.File(save_fn, "w") as hf:
hf.create_dataset("resampled_signal", data=signal_array)
hf.create_dataset("resample_freq", data=FREQUENCY)
print("DONE. {} files failed.".format(len(failed_files)))
def compute_stanford_file_tuples(
stanford_dataset_dir, lpch_dataset_dir, file_marker_dir, splits
):
"""Given the splits, processes file tuples form filemarkers file tuple:
(eeg filename, location of sz or -1 if no sz, split)
Args:
stanford_dataset_dir (str): data dir for stanford EEG files
lpch_dataset_dir (str): data dir for lpc EEG files
file_marker_dir (str): dir where file markers are stored
splits (List[str]): which splits to process
"""
file_tuples = []
for split in splits:
for hospital in ["lpch", "stanford"]:
data_dir = (
stanford_dataset_dir if hospital == "stanford" else lpch_dataset_dir
)
for sz_type in ["non_sz", "sz"]:
fm_dir = (
f"{file_marker_dir}/file_markers_{hospital}/{sz_type}_{split}.txt"
)
filemarker_contents = open(fm_dir, "r").readlines()
for fm in filemarker_contents:
fm_tuple = fm.strip("\n").split(",")
filepath = os.path.join(data_dir, fm_tuple[0])
fm_tuple = (filepath, float(fm_tuple[1]), split)
file_tuples.append(fm_tuple)
return file_tuples
def get_stanford_sz_times(eegf):
df = eegf.edf_annotations_df
seizure_df = df[df.text.str.contains("|".join(SEIZURE_STRINGS), case=False)]
seizure_df = seizure_df[
seizure_df.text.str.contains("|".join(FILTER_SZ_STRINGS), case=False) is False
]
seizure_times = seizure_df["starts_sec"].tolist()
return seizure_times
def is_increasing(channel_indices):
"""Check if a list of indices is sorted in ascending order.
If not, we will have to convert it to a numpy array before slicing,
which is a rather expensive operation
Returns: bool
"""
last = channel_indices[0]
for i in range(1, len(channel_indices)):
if channel_indices[i] < last:
return False
last = channel_indices[i]
return True
def stanford_eeg_loader(input_dict, clip_len=60):
"""given filepath and sz_start, extracts EEG clip of length 60 sec."""
filepath = input_dict["filepath"]
sz_start_idx = input_dict["sz_start_index"]
split = input_dict["split"]
# load EEG signal
eegf = None # eeghdf.Eeghdf(filepath)
ordered_channels = get_ordered_channels(
filepath, eegf.electrode_labels, channel_names=STANFORD_INCLUDED_CHANNELS
)
phys_signals = eegf.phys_signals
# get seizure time
if sz_start_idx == -1 or split != "train":
sz_start = sz_start_idx
else:
sz_times = get_stanford_sz_times(eegf)
sz_start = sz_times[int(sz_start_idx)]
# extract clip
if sz_start == -1:
max_start = max(phys_signals.shape[1] - FREQUENCY * clip_len, 0)
sz_start = int(max_start / 2)
sz_start /= FREQUENCY
start_time = int(FREQUENCY * max(0, sz_start))
end_time = start_time + int(FREQUENCY * clip_len)
if not is_increasing(ordered_channels):
eeg_slice = phys_signals[:, start_time:end_time]
eeg_slice = eeg_slice[ordered_channels, :]
else:
eeg_slice = (
phys_signals.s2u[ordered_channels]
* phys_signals.data[ordered_channels, start_time:end_time].T
).T
diff = FREQUENCY * clip_len - eeg_slice.shape[1]
# padding zeros
if diff > 0:
zeros = np.zeros((eeg_slice.shape[0], diff))
eeg_slice = np.concatenate((eeg_slice, zeros), axis=1)
eeg_slice = eeg_slice.T
return eeg_slice
|
meerkat-main
|
meerkat/datasets/eeg/data_utils.py
|
meerkat-main
|
meerkat/cli/__init__.py
|
|
import os
import shutil
import subprocess
import time
from enum import Enum
import rich
import typer
from rich.progress import Progress, SpinnerColumn, TextColumn
from meerkat.constants import (
MEERKAT_DEMO_DIR,
MEERKAT_INTERNAL_APP_DIR,
MEERKAT_NPM_PACKAGE,
App,
MeerkatApp,
PackageManager,
PathHelper,
SystemHelper,
)
from meerkat.interactive.server import API_PORT, FRONTEND_PORT
from meerkat.interactive.startup import run_frontend, run_script
from meerkat.state import APIInfo, state
from meerkat.tools.collect_env import collect_env_info
cli = typer.Typer()
def _unwrap_enum(value):
"""Unwrap an Enum value if it's an Enum, otherwise return the value."""
return value.value if isinstance(value, Enum) else value
@cli.command()
def init(
name: str = typer.Option(
"meerkat_app",
help="Name of the app",
),
package_manager: PackageManager = typer.Option(
"npm",
show_choices=True,
help="Package manager to use",
),
):
"""Create a new Meerkat app. This will create a new folder called `app` in
the current directory and install all the necessary packages.
Internally, Meerkat uses SvelteKit to create the app, and adds all
the setup required by Meerkat to the app.
"""
# This is a no-op, but it's here for clarity.
os.chdir(PathHelper().rundir)
# Create a MeerkatApp object to represent the app
package_manager = _unwrap_enum(package_manager)
app = MeerkatApp(
appdir=PathHelper().appdir,
appname=name,
package_manager=package_manager,
)
if app.exists():
# Check if app exists, and tell the user to delete it if it does
rich.print(
f"[red]An app already exists at {app.appdir}. "
"Please delete it and rerun this command.[/red]"
)
raise typer.Exit(1)
rich.print(
f":seedling: Creating [purple]Meerkat[/purple] app: [green]{name}[/green]"
)
with Progress(
SpinnerColumn(spinner_name="material"),
TextColumn("[progress.description]{task.description}"),
transient=True,
) as progress:
# Install prerequisites: package manager
progress.add_task(description="Installing system prerequisites...", total=None)
try:
if package_manager == "bun":
# Install the bun package manager
SystemHelper().install_bun()
except subprocess.CalledProcessError as e:
rich.print(e.stdout.decode("utf-8"))
raise e
progress.add_task(description="Creating app...", total=None)
try:
# Create the Meerkat app.
app.create()
except subprocess.CalledProcessError as e:
rich.print(e.stdout.decode("utf-8"))
raise e
progress.add_task(description="Installing packages...", total=None)
try:
# Install packages in the new app.
app.setup_mk_dependencies()
app.setup_mk_build_command()
app.install()
except subprocess.CalledProcessError as e:
rich.print(e.stdout.decode("utf-8"))
raise e
progress.add_task(description="Getting tailwind...", total=None)
try:
# Install TailwindCSS.
app.install_tailwind()
except subprocess.CalledProcessError as e:
rich.print(e.stdout.decode("utf-8"))
raise e
# Final setup for the app.
app.setup()
# Print a message.
app.print_finish_message()
@cli.command()
def run(
script_path: str = typer.Argument(
..., help="Path to a Python script to run in Meerkat"
),
dev: bool = typer.Option(False, "--dev/--prod", help="Run in development mode"),
api_port: int = typer.Option(API_PORT, help="Meerkat API port"),
frontend_port: int = typer.Option(FRONTEND_PORT, help="Meerkat frontend port"),
host: str = typer.Option("127.0.0.1", help="Host to run on"),
target: str = typer.Option("page", help="Target to run in script"),
package_manager: PackageManager = typer.Option(
"npm", show_choices=True, help="Package manager to use"
),
shareable: bool = typer.Option(False, help="Run in public sharing mode"),
subdomain: str = typer.Option(
"app", help="Subdomain to use for public sharing mode"
),
debug: bool = typer.Option(False, help="Enable debug logging mode"),
skip_build: bool = typer.Option(True, help="Skip building the app."),
):
"""Launch a Meerkat app, given a path to a Python script."""
_run(
script_path=script_path,
dev=dev,
host=host,
api_port=api_port,
frontend_port=frontend_port,
target=target,
package_manager=_unwrap_enum(package_manager),
shareable=shareable,
subdomain=subdomain,
debug=debug,
skip_build=skip_build,
)
def _run(
script_path: str,
dev: bool = False,
host: str = "127.0.0.1",
api_port: int = API_PORT,
frontend_port: int = FRONTEND_PORT,
target: str = "page",
package_manager: PackageManager = PackageManager.npm,
shareable: bool = False,
subdomain: str = "app",
debug: bool = False,
skip_build: bool = True,
):
# Pretty print information to console
rich.print(f":rocket: Running [bold violet]{script_path}[/bold violet]")
if dev:
rich.print(
":wrench: Dev mode is [bold violet]on[/bold violet]\n"
":hammer: Live reload is [bold violet]enabled[/bold violet]"
)
else:
rich.print(":wrench: Production mode is [bold violet]on[/bold violet]")
rich.print(":x: To stop the app, press [bold violet]Ctrl+C[/bold violet]")
rich.print()
# Run the frontend
# TODO: make the dummy API info take in the actual hostname
dummy_api_info = APIInfo(api=None, port=api_port, name="127.0.0.1")
frontend_info = run_frontend(
package_manager=package_manager,
port=frontend_port,
dev=dev,
shareable=shareable,
subdomain=subdomain,
apiurl=dummy_api_info.url,
appdir=PathHelper().appdir,
skip_build=skip_build,
)
# Run the uvicorn server
# Set the logging level to debug if debug is enabled
api_info = run_script(
script_path,
server_name=host,
port=api_port,
dev=dev,
target=target,
frontend_url=frontend_info.url,
apiurl=dummy_api_info.url,
debug=debug,
)
# Put them into the global state so the exit handler can use them to clean up
# the processes when the user exits this script.
state.api_info = api_info
state.frontend_info = frontend_info
while (api_info.process.poll() is None) or (frontend_info.process.poll() is None):
# Exit on Ctrl+C
try:
time.sleep(1)
except KeyboardInterrupt:
rich.print()
break
@cli.command()
def update():
"""Update the Meerkat npm package to the latest version."""
# Check if there's an app/ folder in the current directory
if os.path.exists("app"):
# Run `npm i MEERKAT_NPM_PACKAGE` in the app/ folder
subprocess.run(["npm", "i", MEERKAT_NPM_PACKAGE], cwd="app")
rich.print(":tada: Updated Meerkat npm package to the latest version!")
else:
rich.print(
":x: Could not find [purple]app[/purple] folder in the current directory."
)
@cli.command()
def install(
package_manager: PackageManager = typer.Option(
"npm", show_choices=True, help="Package manager to use"
),
run_dev: bool = typer.Option(False, help="Run `npm run dev` after installation"),
):
"""Install npm and other dependencies for interactive Meerkat."""
SystemHelper().install_node()
package_manager = _unwrap_enum(package_manager)
app = App(appdir=MEERKAT_INTERNAL_APP_DIR, package_manager=package_manager)
app.install()
if run_dev:
app.run_dev()
@cli.command()
def build(
package_manager: PackageManager = typer.Option(
"npm", show_choices=True, help="Package manager to use"
)
):
"""Build app folder for both the internal meerkat folder and custom
project."""
from meerkat.interactive.svelte import SvelteWriter
writer = SvelteWriter()
if writer.appdir != MEERKAT_INTERNAL_APP_DIR:
# Build wrappers for internal meerkat folder.
from meerkat.interactive.svelte import SvelteWriter
mk_writer = SvelteWriter()
mk_writer.app = App(
appdir=MEERKAT_INTERNAL_APP_DIR, package_manager=package_manager
)
mk_writer.run()
mk_writer.app.run_build()
writer.app.run_build()
# Find all .py files in MEERKAT_DEMO_DIR, OR .py files
# in subdirectories of MEERKAT_DEMO_DIR that start with "main_"
demos = []
if MEERKAT_DEMO_DIR:
for root, dirs, files in os.walk(MEERKAT_DEMO_DIR):
for dir in dirs:
for file in os.listdir(os.path.join(root, dir)):
if file.startswith("main") and file.endswith(".py"):
demos.append(os.path.join(dir, file.replace(".py", "")))
DemoScript = Enum(
"DemoScript",
{
k: k
for k in [
x.split(".py")[0] for x in os.listdir(MEERKAT_DEMO_DIR) if x.endswith(".py")
]
+ demos
}
if MEERKAT_DEMO_DIR
else {},
)
@cli.command()
def demo(
script: DemoScript = typer.Argument(
..., show_choices=True, help="Demo script to run"
),
run: bool = typer.Option(True, help="Run the demo script"),
api_port: int = typer.Option(API_PORT, help="Meerkat API port"),
frontend_port: int = typer.Option(FRONTEND_PORT, help="Meerkat frontend port"),
dev: bool = typer.Option(False, "--dev/--prod", help="Run in development mode"),
copy: bool = typer.Option(
False, help="Copy the demo script to the current directory"
),
debug: bool = typer.Option(False, help="Enable debug logging mode"),
):
"""Run a demo script."""
# Get the path to the demo script
script = script.value
script_path = os.path.join(MEERKAT_DEMO_DIR, f"{script}.py")
# Optional: Copy the demo script to the current directory.
if copy:
shutil.copy(script_path, f"./{script}.py")
rich.print(f"Copied [purple]{script}.py[/purple] to the current directory.")
script_path = f"{script}.py"
# Optional: Run the demo script.
if run:
_run(
script_path=script_path,
api_port=api_port,
frontend_port=frontend_port,
dev=dev,
debug=debug,
)
@cli.command()
def collect_env():
print(collect_env_info())
if __name__ == "__main__":
cli()
|
meerkat-main
|
meerkat/cli/main.py
|
"""Startup script for interactive Meerkat.
Some code and design patters are borrowed from Gradio and Pynecone.
"""
import atexit
import fnmatch
import logging
import os
import pathlib
import re
import socket
import subprocess
import time
from typing import Tuple
import requests
import rich
from uvicorn import Config
from meerkat.constants import (
MEERKAT_APP_DIR,
MEERKAT_BASE_DIR,
MEERKAT_INTERNAL_APP_BUILD_DIR,
MEERKAT_RUN_ID,
MEERKAT_RUN_PROCESS,
MEERKAT_RUN_SUBPROCESS,
PathHelper,
is_notebook,
write_file,
)
from meerkat.interactive.api import MeerkatAPI
from meerkat.interactive.server import (
API_PORT,
FRONTEND_PORT,
LOCALHOST_NAME,
TRY_NUM_PORTS,
Server,
)
from meerkat.interactive.tunneling import setup_tunnel
from meerkat.state import APIInfo, FrontendInfo, state
from meerkat.version import __version__ as mk_version
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
logger = logging.getLogger(__name__)
def file_find_replace(directory, find, replace, pattern):
for path, _, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, pattern):
filepath = os.path.join(path, filename)
with open(filepath) as f:
s = f.read()
# s = s.replace(find, replace)
s = re.sub(find, replace, s)
with open(filepath, "w") as f:
f.write(s)
def get_first_available_port(initial: int, final: int) -> int:
"""Gets the first open port in a specified range of port numbers. Taken
from https://github.com/gradio-app/gradio/blob/main/gradio/networking.py.
More reading:
https://stackoverflow.com/questions/19196105/how-to-check-if-a-network-port-is-open
Args:
initial: the initial value in the range of port numbers
final: final (exclusive) value in the range of port numbers,
should be greater than `initial`
Returns:
port: the first open port in the range
"""
# rich.print(f"Trying to find an open port in ({initial}, {final}). ", end="")
for port in range(initial, final):
try:
s = socket.socket(
socket.AF_INET, socket.SOCK_STREAM
) # create a socket object
result = s.bind((LOCALHOST_NAME, port)) # Bind to the port # noqa: F841
s.close()
# rich.print(f"Found open port: {port}")
return port
except OSError:
pass
raise OSError(
"All ports from {} to {} are in use. Please close a port.".format(
initial, final - 1
)
)
def snake_case_to_camel_case(snake_case: str) -> str:
"""Converts a snake case string to camel case.
Args:
snake_case (str): the snake case string to convert.
Returns:
str: the camel case string.
"""
substrings = snake_case.split("_")
return substrings[0] + "".join(x.title() for x in substrings[1:])
def to_py_module_name(script: str) -> str:
"""Converts a script name to a Python module name.
Args:
script (str): the script name to convert.
Returns:
str: the Python module name.
"""
# Make sure script is in module format.
if script.endswith(".py"):
# Strip the .py extension.
script = script[:-3]
# Replace all / with .
script = script.replace("/", ".")
return script
def run_script(
script: str,
server_name: str = LOCALHOST_NAME,
port: int = API_PORT,
dev: bool = True,
target: str = "page",
shareable: bool = False,
subdomain: str = "app",
frontend_url: str = None,
apiurl: str = None,
debug: bool = False,
) -> APIInfo:
"""Run a script with uvicorn.
Args:
script (str): the script to run.
server_name (str, optional): the name of the server to run the
script on. Defaults to "localhost".
port (int, optional): the port to run the script on. Defaults to
the default API port in Meerkat, which is 5000.
dev (bool, optional): whether to run the script in development
mode. Defaults to True.
target (str, optional): the target `Page` instance to run. Defaults to
"page".
"""
# Make sure script is in module format.
script = os.path.abspath(script) # to_py_module_name(script)
# Run the script with uvicorn. This will start the FastAPI server
# and serve the backend.
env = os.environ.copy()
if frontend_url is not None:
env["MEERKAT_FRONTEND_URL"] = frontend_url
if apiurl is not None:
env["MEERKAT_API_URL"] = apiurl
if debug:
env["MEERKAT_LOGGING_LEVEL"] = "DEBUG"
env["MEERKAT_RUN_SUBPROCESS"] = str(1)
env["MEERKAT_RUN_SCRIPT_PATH"] = script
env["MEERKAT_RUN_ID"] = MEERKAT_RUN_ID
# Create a file that will be used to count the number of times the script has been
# reloaded. This is used by `SvelteWriter` to decide when to write the Svelte
# wrappers (see that class for more details).
write_file(f"{PathHelper().appdir}/.{MEERKAT_RUN_ID}.reload", str(1))
process = subprocess.Popen(
[
"uvicorn",
f"{os.path.basename(script).rsplit('.')[0]}:{target}",
"--port",
str(port),
"--host",
server_name,
"--log-level",
"warning",
"--factory",
]
+ (["--reload"] if dev else [])
+ (["--reload-dir", os.path.dirname(script)] if dev else [])
+ (["--app-dir", os.path.dirname(script)]),
env=env,
stderr=subprocess.STDOUT,
)
# If shareable, start the tunnel
if shareable:
server_name = setup_tunnel(port, subdomain=f"{subdomain}server")
return APIInfo(
api=MeerkatAPI,
port=port,
name=server_name,
shared=shareable,
process=process,
)
def run_api_server(
server_name: str = LOCALHOST_NAME,
port: int = API_PORT,
dev: bool = True,
shareable: bool = False,
subdomain: str = "app",
) -> APIInfo:
# Move to the base directory at meerkat/
currdir = os.getcwd()
os.chdir(MEERKAT_BASE_DIR)
# Start the FastAPI server
# Note: it isn't possible to support live reloading
# via uvicorn with this method
server = Server(
Config(
"meerkat.interactive.api.main:app",
port=port,
host=server_name,
# log_level="info" if dev else "warning",
log_level="warning",
)
)
server.run_in_thread()
os.chdir(currdir)
# If shareable, start the tunnel
if shareable:
server_name = setup_tunnel(port, subdomain=f"{subdomain}server")
return APIInfo(
api=MeerkatAPI,
server=server,
port=port,
name=server_name,
shared=shareable,
)
def run_frontend_dev(
port: int,
package_manager: Literal["npm", "bun"] = "npm",
env: dict = {},
) -> subprocess.Popen:
# Run the npm server in dev mode
process = subprocess.Popen(
[
package_manager,
"run",
"dev",
"--",
"--port",
str(port),
"--strictPort",
"true",
"--logLevel",
"info",
],
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Make a regex for
# `Local: http://127.0.0.1:8000/\n` and
# `Local: http://localhost:8000/\n`
regex_1 = re.compile(r"http://" + "127.0.0.1" + r":(\d+)/")
regex_2 = re.compile(r"http://" + "localhost" + r":(\d+)/")
def escape_ansi(line):
ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
return ansi_escape.sub("", line)
# Need to check if it started successfully
start_time = time.time()
while process.poll() is None:
out = process.stdout.readline().decode("utf-8")
out = escape_ansi(out)
match_1 = regex_1.search(out)
match_2 = regex_2.search(out)
if match_1 or match_2:
break
if time.time() - start_time > 10:
raise TimeoutError(
"""Could not start frontend dev server.
Here are the stderr logs (if they are empty, this is likely an
issue with how we recognize if the server started successfully, please
file an issue on GitHub):
"""
+ process.stderr.read().decode("utf-8")
)
return process
def run_frontend_build(
package_manager: Literal["npm", "bun"] = "npm",
env: dict = {},
):
env.update({"VITE_API_URL_PLACEHOLDER": "http://meerkat.dummy"})
build_process = subprocess.Popen(
[
package_manager,
"run",
"build",
],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
# Print a progress bar with rich, show the time elapsed
start_time = time.time()
while build_process.poll() is None:
output = build_process.stdout.readline().decode("utf-8").strip()
# Pad output to 100 characters
output = output.ljust(100)
if "node_modules/" in output or "unused" in output:
continue
# Remove any symbols that would mess up the progress bar
stmt = f"Building (may take up to a minute)... {time.time() - start_time:.2f}s"
if is_notebook():
print(stmt, end="\r", flush=True)
else:
rich.print(stmt, end="\r", flush=True)
# Put a sleep to make the progress smoother
time.sleep(0.1)
if 'Wrote site to "build"' in output:
rich.print(
f"Build completed in {time.time() - start_time:.2f}s." + " " * 140
)
break
def download_frontend_build(version: str = None):
"""Download the frontend build to the meerkat internal app folder.
This function should be used when users who downloaded the meerkat
repo do not have npm/bun to build.
Args:
version: The meerkat version associated with the build.
Defaults to the current meerkat package version.
"""
from meerkat.datasets.utils import download_url, extract_tar_file
if version is None:
version = mk_version
# Download the build from huggingface.
# This is a tarfile that contains the build folder.
# We extract it to the meerkat internal app folder.
url = f"https://huggingface.co/datasets/meerkat-ml/component-static-builds/resolve/main/static-build-{version}.tar.gz" # noqa: E501
# Check if the url exists.
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f"Could not find a build for version {version}. "
"Please check the the url for a list of versions: "
"https://huggingface.co/datasets/meerkat-ml/component-static-builds"
)
build_file = download_url(
url, dataset_dir=os.path.join(pathlib.Path.home(), ".meerkat", "build")
)
extract_tar_file(build_file, download_dir=MEERKAT_INTERNAL_APP_BUILD_DIR)
def run_frontend_prod(
port: int,
api_url: str,
libpath: pathlib.Path,
package_manager: Literal["npm", "bun"] = "npm",
env: dict = {},
skip_build: bool = False,
) -> subprocess.Popen:
if not skip_build:
run_frontend_build(package_manager, env)
else:
logger.debug("Skipping build step.")
assert (libpath / "build").exists(), "libpath must exist if skip_build is True."
# File find replacement for the VITE_API_URL_PLACEHOLDER
# Replace VITE_API_URL||"http://some.url.here:port" with
# VITE_API_URL||"http://localhost:8000"
# using a regex
file_find_replace(
libpath / "build",
r"(VITE_API_URL\|\|\".*?\")",
f'VITE_API_URL||"{api_url}"',
"*js",
)
file_find_replace(
libpath / ".svelte-kit/output/client/_app/",
r"(VITE_API_URL\|\|\".*?\")",
f'VITE_API_URL||"{api_url}"',
"*js",
)
# Run the statically built app with preview mode
# This doesn't serve the build directory, but instead serves the
# .svelte-kit/output/client/_app directory.
# This works with [slug] routes, but it requires a Cmd + Shift + R to
# hard refresh the page when the API server port changes. We need to
# investigate this further, so we can use this to serve the [slug] routes.
# process = subprocess.Popen(
# [
# package_manager,
# "run",
# "preview",
# "--",
# "--port",
# str(port),
# ],
# env=env,
# stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT,
# )
# Alternately we run the statically built app with a simple python server
# Note: this does not seem to work with [slug] routes, so we
# should use the preview mode instead (gives a 404 error).
# We can use this if we explicitly write routes for each
# page. We are using this to serve pages using /?id=page_id for now.
os.chdir(libpath / "build")
process = subprocess.Popen(
[
"python",
"-m",
"http.server",
str(port),
],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
os.chdir(libpath)
return process
def run_frontend(
package_manager: Literal["npm", "bun"] = "npm",
port: int = FRONTEND_PORT,
dev: bool = True,
shareable: bool = False,
subdomain: str = "app",
apiurl: str = None,
appdir: str = MEERKAT_APP_DIR,
skip_build: bool = False,
) -> FrontendInfo:
"""Run the frontend server.
Args:
package_manager (Literal["npm", "bun"], optional):
The package manager to use. Defaults to "npm".
port (int, optional): The port to run the frontend server on.
Defaults to FRONTEND_PORT.
dev (bool, optional): Whether to run the frontend in development mode.
Defaults to True.
shareable (bool, optional): Whether to create a shareable link.
Defaults to False.
subdomain (str, optional): The subdomain to use for the shareable link.
Defaults to "app".
apiurl (str, optional): The URL of the API server.
Defaults to None.
appdir (str, optional): The directory of the frontend app.
Defaults to APP_DIR.
skip_build (bool, optional): Whether to skip the build step in production.
Defaults to False.
Returns:
FrontendInfo: A FrontendInfo object containing the port
and process of the frontend server.
"""
currdir = os.getcwd()
# Search for the first available port in the range
# [port, port + TRY_NUM_PORTS)
port = get_first_available_port(int(port), int(port) + TRY_NUM_PORTS)
# Enter the "app/" directory
libpath = pathlib.Path(appdir)
os.chdir(libpath)
env = os.environ.copy()
# Start the frontend server
if dev:
# Update the VITE_API_URL environment variable
env.update({"VITE_API_URL": apiurl})
process = run_frontend_dev(port, package_manager, env)
else:
# Download the build folder from HF
if not os.path.exists(MEERKAT_INTERNAL_APP_BUILD_DIR):
logger.debug(f"Downloading frontend build for meerkat v{mk_version}")
download_frontend_build(version=mk_version)
# Read the timestamp of the most recent file change, for the last build
if os.path.exists(".buildprint"):
with open(".buildprint", "r") as f:
last_buildprint = int(f.read())
else:
last_buildprint = 0
# Get the timestamp of the most recent file change
most_recent_file = max(
(
# Exclude the build and package folders
os.path.join(root, f)
for root, _, files in os.walk(os.getcwd())
for f in files
if ("build" not in root)
and ("/package/" not in root)
and ("node_modules" not in root)
and (".svelte-kit" not in root)
and ("src/lib/wrappers" not in root)
and (f != "ComponentContext.svelte")
and (f != ".buildprint")
),
key=os.path.getctime,
)
buildprint = int(os.path.getctime(most_recent_file))
logger.debug("Most recent file change:", most_recent_file, buildprint)
# Record the timestamp of the most recent file change, for the current build
with open(".buildprint", "w") as f:
f.write(str(buildprint))
# If the most recent file change is the same as the last build,
# skip the build step
skip_build = skip_build or (last_buildprint == buildprint)
if not (libpath / "build").exists():
skip_build = False
process = run_frontend_prod(
port, apiurl, libpath, package_manager, env, skip_build
)
if shareable:
domain = setup_tunnel(port, subdomain=subdomain)
os.chdir(currdir)
return FrontendInfo(
package_manager=package_manager,
port=port,
name="localhost" if not shareable else domain,
shared=shareable,
process=process,
)
def start(
package_manager: Literal["npm", "bun"] = "npm",
shareable: bool = False,
subdomain: str = "app",
api_server_name: str = LOCALHOST_NAME,
api_port: int = API_PORT,
frontend_port: int = FRONTEND_PORT,
dev: bool = False,
skip_build: bool = True,
) -> Tuple[APIInfo, FrontendInfo]:
"""Start a Meerkat interactive server.
Args:
package_manager (str): the frontend package_manager to use. Defaults to "npm".
shareable (bool): whether to share the interface at a publicly accesible link.
This feature works by establishing a reverse SSH tunnel to a Meerkat server.
Do not use this feature with private data. In order to use this feature, you
will need an SSH key for the server. If you already have one, add it to the
file at f"{config.system.ssh_identity_file}, or set the option
`mk.config.system.ssh_identity_file` to the file where they are stored. If
you don't yet have a key, you can request access by emailing
eyuboglu@stanford.edu. Remember to ensure after downloading it that the
identity file is read/write only by the user (e.g. with
`chmod 600 path/to/id_file`). See `subdomain` arg for controlling the
domain name of the shared link. Defaults to False.
subdomain (str): the subdomain to use for the shared link. For example, if
`subdomain="myinterface"`, then the shareable link will have the domain
`myinterface.meerkat.wiki`. Defaults to None, in which case a random
subdomain will be generated.
api_server_name (str): the name of the API server. Defaults to "localhost".
api_port (int): the port to use for the Meerkat API server. Defaults to 5000.
frontend_port (int): the port to use for the Meerkat Vite server.
Defaults to 8000.
dev (bool): whether to run in development mode. Defaults to True.
Returns:
Tuple[APIInfo, FrontendInfo]: A tuple containing the APIInfo and
FrontendInfo objects.
"""
if MEERKAT_RUN_SUBPROCESS:
rich.print(
"Calling `start` from a script run with `mk run` has no effect. "
"Continuing..."
)
return
# Run the API server
api_info = run_api_server(api_server_name, api_port, dev, shareable, subdomain)
# Run the frontend server
frontend_info = run_frontend(
package_manager,
frontend_port,
dev,
shareable,
subdomain,
api_info.url,
PathHelper().appdir,
skip_build=skip_build,
)
# Store in global state
state.frontend_info = frontend_info
state.api_info = api_info
return api_info, frontend_info
# TODO: @atexist.register doesn't work for notebooks, find a way to make it work
# there.
@atexit.register
def cleanup():
"""Clean up Meerkat processes and files when exiting."""
if MEERKAT_RUN_SUBPROCESS:
# Don't clean up anything if running from the `mk run` subprocess.
return
if MEERKAT_RUN_PROCESS:
try:
# Remove the reload counter file if running from the `mk run` process.
os.remove(f"{PathHelper().appdir}/.{MEERKAT_RUN_ID}.reload")
except FileNotFoundError:
pass
if state.frontend_info or state.api_info:
# Keep message inside if statement to avoid printing when not needed
# e.g. when running `mk run --help`
rich.print(
"\n:electric_plug: Cleaning up [violet]Meerkat[/violet].\n" ":wave: Bye!",
)
# Shut down servers
if state.frontend_info is not None:
if state.frontend_info.process:
state.frontend_info.process.terminate()
state.frontend_info.process.wait()
if state.api_info is not None:
if state.api_info.server:
state.api_info.server.close()
if state.api_info.process:
state.api_info.process.terminate()
state.api_info.process.wait()
# from meerkat.interactive.svelte import SvelteWriter
# TODO: this was causing issues earlier, but it seems to be working now.
# Investigate further and we can remove this comment.
# SvelteWriter().cleanup()
|
meerkat-main
|
meerkat/interactive/startup.py
|
# TODO: Think about whether we can move to
# from typing_extensions import Protocol
# in order to implement the EventInterface based type hints.
class EventInterface:
"""Defines the interface for an event.
Subclass this to define the interface for a new event type.
The class will specify the keyword arguments returned by an event from the
frontend to any endpoint that has subscribed to it.
All endpoints that are expected to receive an event of this type should
ensure they have a signature that matches the keyword arguments defined
in this class.
"""
pass
|
meerkat-main
|
meerkat/interactive/event.py
|
import os
import threading
import time
import uvicorn
# By default, the local server will try to open on localhost, port 7860.
# If that is not available, then it will try 7861, 7862, ... 7959.
API_PORT = int(os.getenv("MK_API_PORT", "5000"))
FRONTEND_PORT = int(os.getenv("MK_FRONTEND_PORT", "8000"))
INITIAL_PORT_VALUE = int(os.getenv("MK_SERVER_PORT", "7860"))
TRY_NUM_PORTS = int(os.getenv("MK_NUM_PORTS", "100"))
LOCALHOST_NAME = os.getenv("MK_SERVER_NAME", "127.0.0.1")
MEERKAT_API_SERVER = "https://api.meerkat.app/v1/tunnel-request"
class Server(uvicorn.Server):
"""Taken from https://stackoverflow.com/questions/61577643/python-how-to-\
use-fastapi-and-uvicorn-run-without-blocking-the-thread and Gradio."""
def install_signal_handlers(self):
pass
def run_in_thread(self):
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
start_time = time.time()
while not self.started:
time.sleep(1e-3)
# Wait 3 seconds for the server to start, otherwise raise an error.
if time.time() - start_time > 3:
raise RuntimeError(
"Server failed to start. "
"This is likely due to a port conflict, "
"retry with another port."
)
def close(self):
self.should_exit = True
self.thread.join()
|
meerkat-main
|
meerkat/interactive/server.py
|
from abc import ABC
from typing import TYPE_CHECKING, Any, List
from pydantic import BaseModel
if TYPE_CHECKING:
from meerkat.interactive.node import Node
class Modification(BaseModel, ABC):
"""Base class for modifications.
Modifications are used to track changes to Reference and Store nodes
in the graph.
Attributes:
id (str): The id of the Reference or Store.
"""
id: str
@property
def node(self):
"""The Reference or Store node that this modification is for."""
raise NotImplementedError()
def add_to_queue(self):
"""Add this modification to the queue."""
# Get the queue
from meerkat.state import state
state.modification_queue.add(self)
# TODO: need to consolidate Modification
# associate them with NodeMixin (Nodeable objects)
class DataFrameModification(Modification):
scope: List[str]
type: str = "ref"
@property
def node(self) -> "Node":
from meerkat.state import state
try:
df = state.identifiables.get(group="dataframes", id=self.id)
return df.inode
except Exception:
return state.identifiables.get(group="nodes", id=self.id)
class StoreModification(Modification):
value: Any # : Storeable # TODO(karan): Storeable prevents
# us from storing objects in the store
type: str = "store"
@property
def backend_only(self) -> bool:
"""Whether this modification should not be sent to frontend."""
from meerkat.state import state
store = state.identifiables.get(group="stores", id=self.id)
return store._self_backend_only
@property
def node(self) -> "Node":
from meerkat.state import state
# FIXME: what's going on with this try-except here?
try:
store = state.identifiables.get(group="stores", id=self.id)
return store.inode
except Exception:
return state.identifiables.get(group="nodes", id=self.id)
|
meerkat-main
|
meerkat/interactive/modification.py
|
"""This file providdes remote forwarding."""
import os
import re
import subprocess
import time
from tempfile import mkstemp
from meerkat.config import config
PORT = "2222"
DOMAIN = "meerkat.wiki"
def setup_tunnel(local_port: int, subdomain: str) -> str:
""""""
PORT = "2222"
DOMAIN = "meerkat.wiki"
if not os.path.exists(config.system.ssh_identity_file):
raise ConnectionError(
f"No SSH keys found at {config.system.ssh_identity_file}. "
"Request access to Meerkat's shareable link feature by emailing "
"eyuboglu@stanford.edu. "
"Once you have a key add it to the file at "
f"{config.system.ssh_identity_file}, or set the "
"`mk.config.system.ssh_identity_file` to the file where they are stored."
)
# open a temporary file to write the output of the npm process
out_file, out_path = mkstemp(suffix=".out")
err_file, err_path = mkstemp(suffix=".err")
subprocess.Popen(
[
"ssh",
# need to use the -T arg to avoid corruption of the users terminal
"-T",
"-p",
PORT,
# sish does not support ControlMaster as discussed in this issue
# https://github.com/antoniomika/sish/issues/252
"-o",
"ControlMaster=no",
"-i",
config.system.ssh_identity_file,
"-R",
f"{subdomain}:80:localhost:{local_port}",
DOMAIN,
],
stdout=out_file,
stderr=err_file,
)
MAX_WAIT = 10
for i in range(MAX_WAIT):
time.sleep(0.5)
# this checks whether or not the tunnel has successfully been established
# and the subdomain is printed to out
match = re.search(f"http://(.*).{DOMAIN}", open(out_path, "r").read())
if match is not None:
break
if match is None:
raise ValueError(
f"Failed to establish tunnel: \
out={open(out_path, 'r').read()} err={open(err_path, 'r').read()}"
)
actual_subdomain = match.group(1)
if actual_subdomain != subdomain:
# need to check because the requested subdomain may already be in use
print(
f"Subdomain {subdomain} is not available. "
f"Using {actual_subdomain} instead."
)
return f"{actual_subdomain}.{DOMAIN}"
|
meerkat-main
|
meerkat/interactive/tunneling.py
|
from abc import ABC, abstractmethod
from pydantic import BaseModel
class FrontendMixin(ABC):
"""A mixin for objects that can be sent to the frontend.
The purpose of this mixin is currently just to enable clean
`isinstance` checks when determining whether an object can be sent
to the frontend. Each subclass needs to implement frontend
themselves.
"""
@property
@abstractmethod
def frontend(self) -> BaseModel:
"""Returns a Pydantic model that can be should be sent to the frontend.
These models are typically named <something>Frontend (e.g.
ComponentFrontend, StoreFrontend).
"""
raise NotImplementedError()
|
meerkat-main
|
meerkat/interactive/frontend.py
|
import pathlib
from meerkat.interactive.app.src.lib.component import (
contrib,
core,
flowbite,
html,
plotly,
)
from meerkat.interactive.app.src.lib.component.abstract import BaseComponent, Component
from meerkat.interactive.app.src.lib.component.contrib.fm_filter import FMFilter
from meerkat.interactive.app.src.lib.component.contrib.mocha import ChangeList
from meerkat.interactive.app.src.lib.component.core import * # noqa: F401, F403
from meerkat.interactive.app.src.lib.shared.cell.website import Website
from meerkat.interactive.endpoint import Endpoint, endpoint, endpoints, make_endpoint
from meerkat.interactive.graph import ( # noqa: F401
Store,
is_unmarked_context,
magic,
mark,
reactive,
unmarked,
)
from meerkat.interactive.modification import DataFrameModification, Modification
from meerkat.interactive.page import Page, page
from meerkat.interactive.startup import start
from meerkat.interactive.state import State
from meerkat.interactive.utils import print
INTERACTIVE_LIB_PATH = pathlib.Path(__file__).parent.resolve()
__all__ = [
# <<<< Startup >>>>
"start",
# <<<< Core Library >>>>
# Component
"BaseComponent",
"Component",
# Page
"Page",
"page",
# Store
"Store",
# Endpoint
"Endpoint",
"endpoint",
"endpoints",
"make_endpoint",
# Reactivity
"reactive",
"unmarked",
"is_unmarked_context",
"mark",
"magic",
# Modification Types
"DataFrameModification",
"Modification",
# Add-ons
"State",
# <<<< Component Namespaces >>>>
"contrib",
"core",
"flowbite",
"html",
"plotly",
# <<<< Contrib Components >>>>
"ChangeList",
"FMFilter",
# <<<< Shared Components >>>>
"Website",
# <<<< Utilities >>>>
"print",
]
# Add core components to the top-level namespace.
__all__.extend(core.__all__)
|
meerkat-main
|
meerkat/interactive/__init__.py
|
import code
from functools import partial, wraps
from typing import Callable
import rich
from pydantic import BaseModel
from meerkat.constants import MEERKAT_RUN_SUBPROCESS, is_notebook
from meerkat.interactive import html
from meerkat.interactive.app.src.lib.component._internal.progress import Progress
from meerkat.interactive.app.src.lib.component.abstract import (
BaseComponent,
ComponentFrontend,
)
from meerkat.mixins.identifiable import IdentifiableMixin
from meerkat.state import state
if is_notebook():
from IPython.display import IFrame
def page(fn: Callable):
@wraps(fn)
def wrapper(*args, **kwargs):
page = Page(
component=partial(fn, *args, **kwargs),
id=fn.__name__,
)
return page.launch()
return wrapper
class PageFrontend(BaseModel):
component: ComponentFrontend
name: str
class Page(IdentifiableMixin):
_self_identifiable_group: str = "pages"
def __init__(
self,
component: BaseComponent,
id: str,
name: str = "Page",
height: str = "100%",
width: str = "100%",
progress: bool = False,
):
super().__init__(id=id)
if progress:
component = html.flexcol(
slots=[
Progress(),
component,
],
classes="h-full",
)
self.component = component
self.name = name
self.height = height
self.width = width
def __call__(self):
"""Return the FastAPI object, this allows Page objects to be targeted
by uvicorn when running a script."""
from meerkat.interactive.api import MeerkatAPI
return MeerkatAPI
def launch(self, return_url: bool = False):
if state.frontend_info is None:
rich.print("Frontend is not initialized. Running `mk.gui.start()`.")
from .startup import start
start()
# TODO: restore the original route
# We had issues using the original route when serving [slug] pages
# in production mode, see `run_frontend_prod` in `startup.py`.
# url = f"{state.frontend_info.url}/{self.id}"
url = f"{state.frontend_info.url}/?id={self.id}"
if return_url:
return url
if is_notebook():
return IFrame(url, width=self.width, height=self.height)
else:
rich.print(
":scroll: "
f"Frontend [violet]{self.id}[/violet] "
f"is at [violet]{url}[/violet]"
)
rich.print(
":newspaper: "
f"API docs are at [violet]{state.api_info.docs_url}[/violet]"
)
rich.print()
# in_mk_run_subprocess = int(os.environ.get("MEERKAT_RUN", 0))
if not MEERKAT_RUN_SUBPROCESS:
# get locals of the main module when running in script.
import __main__
code.interact(local=__main__.__dict__)
@property
def frontend(self):
return PageFrontend(name=self.name, component=self.component.frontend)
|
meerkat-main
|
meerkat/interactive/page.py
|
from typing import TYPE_CHECKING, Dict, List, TypeVar, Union
from pydantic import StrictBool, StrictFloat, StrictInt, StrictStr
if TYPE_CHECKING:
from meerkat.dataframe import DataFrame
from meerkat.ops.sliceby.sliceby import SliceBy
Primitive = Union[StrictInt, StrictStr, StrictFloat, StrictBool]
Storeable = Union[
None,
Primitive,
List[Primitive],
Dict[Primitive, Primitive],
Dict[Primitive, List[Primitive]],
List[Dict[Primitive, Primitive]],
]
T = TypeVar("T", "DataFrame", "SliceBy")
|
meerkat-main
|
meerkat/interactive/types.py
|
from __future__ import annotations
import inspect
import logging
import typing
from functools import partial, wraps
from typing import Any, Callable, Generic, Union
from fastapi import APIRouter, Body
from pydantic import BaseModel, create_model
from meerkat.interactive.graph import Store, trigger, unmarked
from meerkat.interactive.graph.store import _unpack_stores_from_object
from meerkat.interactive.node import Node, NodeMixin
from meerkat.interactive.types import T
from meerkat.mixins.identifiable import IdentifiableMixin, is_meerkat_id
from meerkat.state import state
from meerkat.tools.utils import get_type_hint_args, get_type_hint_origin, has_var_args
logger = logging.getLogger(__name__)
# KG: must declare this dynamically defined model here,
# otherwise we get a FastAPI error
# when only declaring this inside the Endpoint class.
FnPydanticModel = None
class SingletonRouter(type):
"""A metaclass that ensures that only one instance of a router is created,
*for a given prefix*.
A prefix is a string that is used to identify a router. For example,
the prefix for the router that handles endpoints is "/endpoint". We
want to ensure that only one router is created for each prefix.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
prefix = kwargs["prefix"]
# Look up if this (cls, prefix) pair has been created before
if (cls, prefix) not in cls._instances:
# If not, we let a new instance be created
cls._instances[(cls, prefix)] = super(SingletonRouter, cls).__call__(
*args, **kwargs
)
return cls._instances[(cls, prefix)]
class SimpleRouter(IdentifiableMixin, APIRouter): # , metaclass=SingletonRouter):
# KG: using the SingletonRouter metaclass causes a bug.
# app.include_router() inside Endpoint is called multiple times
# for the same router. This causes an error because some
# endpoints are registered multiple times because the FastAPI
# class doesn't check if an endpoint is already registered.
# As a patch, we're generating one router per Endpoint object
# (this could generate multiple routers for the same prefix, but
# that's not been a problem).
"""A very simple FastAPI router.
This router allows you to pass in arbitrary keyword arguments that are
passed to the FastAPI router, and sets sensible defaults for the
prefix, tags, and responses.
Note that if you create two routers with the same prefix, they will
not be the same object.
Attributes:
prefix (str): The prefix for this router.
**kwargs: Arbitrary keyword arguments that are passed to the FastAPI
router.
"""
_self_identifiable_group: str = "routers"
def __init__(self, prefix: str, **kwargs):
super().__init__(
prefix=prefix,
tags=[prefix.strip("/").replace("/", "-")],
responses={404: {"description": "Not found"}},
id=self.prepend_meerkat_id_prefix(prefix),
**kwargs,
)
class EndpointFrontend(BaseModel):
"""A schema for sending an endpoint to the frontend."""
endpointId: Union[str, None]
# TODO: technically Endpoint doesn't need to be NodeMixin (probably)
class Endpoint(IdentifiableMixin, NodeMixin, Generic[T]):
EmbeddedBody = partial(Body, embed=True)
_self_identifiable_group: str = "endpoints"
def __init__(
self,
fn: Callable = None,
prefix: Union[str, APIRouter] = None,
route: str = None,
):
"""Create an endpoint from a function in Meerkat.
Typically, you will not need to call this class directly, but
instead use the `endpoint` decorator.
Attributes:
fn (Callable): The function to create an endpoint from.
prefix (str): The prefix for this endpoint.
route (str): The route for this endpoint.
Note:
All endpoints can be hit with a POST request at
/{endpoint_id}/dispatch/
The request needs a JSON body with the following keys:
- kwargs: a dictionary of keyword arguments to be
passed to the endpoint function `fn`
- payload: additional payload, if any
Optionally, the user can customize how endpoints are
organized by specifying a prefix and a route. The prefix
is a string that is used to identify a router. For example,
the prefix for the router that handles endpoints is "/endpoint".
The route is a string that is used to identify an endpoint
within a router. For example, the route for the endpoint
that handles the `get` function could be "/get".
If only a prefix is specified, then the route will be the
name of the function e.g. "my_endpoint". If both a prefix
and a route are specified, then the route will be the
specified route e.g. "/specific/route/".
Refer to the FastAPI documentation for more information
on how to create routers and endpoints.
"""
super().__init__()
if fn is None:
self.id = None
self.fn = fn
self._validate_fn()
if prefix is None:
# No prefix, no router
self.router = None
else:
# Make the router
if isinstance(prefix, APIRouter):
self.router = prefix
else:
self.router = SimpleRouter(prefix=prefix)
self.prefix = prefix
self.route = route
def __repr__(self) -> str:
if hasattr(self.fn, "__name__"):
name = self.fn.__name__
elif hasattr(self.fn, "func"):
name = self.fn.func.__name__
else:
name = None
return (
f"Endpoint(id={self.id}, name={name}, prefix={self.prefix}, "
f"route={self.route})"
)
def _validate_fn(self):
"""Validate the function `fn`."""
if not callable(self.fn):
raise TypeError(f"Endpoint function {self.fn} is not callable.")
# Disallow *args
if has_var_args(self.fn):
raise TypeError(
f"Endpoint function {self.fn} has a `*args` parameter."
" Please use keyword arguments instead."
)
# Do we allow lambdas?
@property
def frontend(self):
return EndpointFrontend(
endpointId=self.id,
)
def to_json(self):
return {"endpointId": self._self_id}
def run(self, *args, **kwargs) -> Any:
"""Actually run the endpoint function `fn`.
Args:
*args: Positional arguments to pass to `fn`.
**kwargs: Keyword arguments to pass to `fn`.
Returns:
The return value of `fn`.
"""
logger.debug(f"Running endpoint {self}.")
# Apply a partial function to ingest the additional arguments
# that are passed in
partial_fn = partial(self.fn, *args, **kwargs)
# Check if the partial_fn has any arguments left to be filled
spec = inspect.getfullargspec(partial_fn)
# Check if spec has no args: if it does have args,
# it means that we can't call the function without filling them in
no_args = len(spec.args) == 0
# Check if all the kwonlyargs are in the keywords: if yes, we've
# bound all the keyword arguments
no_kwonlyargs = all([arg in partial_fn.keywords for arg in spec.kwonlyargs])
# Get the signature
signature = inspect.signature(partial_fn)
# Check if any parameters are unfilled args
no_unfilled_args = all(
[
param.default is not param.empty
for param in signature.parameters.values()
]
)
if not (no_args and no_kwonlyargs and no_unfilled_args):
# Find the missing keyword arguments
missing_args = [
arg for arg in spec.kwonlyargs if arg not in partial_fn.keywords
] + [
param.name
for param in signature.parameters.values()
if param.default == param.empty
]
raise ValueError(
f"Endpoint {self.id} still has arguments left to be \
filled (args: {spec.args}, kwargs: {missing_args}). \
Ensure that all keyword arguments \
are passed in when calling `.run()` on this endpoint."
)
# Clear the modification queue before running the function
# This is an invariant: there should be no pending modifications
# when running an endpoint, so that only the modifications
# that are made by the endpoint are applied
state.modification_queue.clear()
# Ready the ModificationQueue so that it can be used to track
# modifications made by the endpoint
state.modification_queue.ready()
state.progress_queue.add(
self.fn.func.__name__ if isinstance(self.fn, partial) else self.fn.__name__
)
try:
# The function should not add any operations to the graph.
with unmarked():
result = partial_fn()
except Exception as e:
# Unready the modification queue
state.modification_queue.unready()
raise e
with unmarked():
modifications = trigger()
# End the progress bar
state.progress_queue.add(None)
return result, modifications
def partial(self, *args, **kwargs) -> Endpoint:
# Any NodeMixin objects that are passed in as arguments
# should have this Endpoint as a non-triggering child
if not self.has_inode():
node = self.create_inode()
self.attach_to_inode(node)
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, NodeMixin):
if not arg.has_inode():
inode_id = None if not isinstance(arg, Store) else arg.id
node = arg.create_inode(inode_id=inode_id)
arg.attach_to_inode(node)
arg.inode.add_child(self.inode, triggers=False)
# TODO (sabri): make this work for derived dataframes
# There's a subtle issue with partial that we should figure out. I spent an
# hour or so on it, but am gonna table it til after the deadline tomorrow
# because I have a hacky workaround. Basically, if we create an endpoint
# partial passing a "derived" dataframe, when the endpoint is called, we
# should expect that the current value of the dataframe will be passed.
# Currently, the original value of the dataframe is passed. It makes sense to
# me why this is happening, but the right fix is eluding me.
# All NodeMixin objects need to be replaced by their node id.
# This ensures that we can resolve the correct object at runtime
# even if the object is a result of a reactive function
# (i.e. not a root of the graph).
def _get_node_id_or_arg(arg):
if isinstance(arg, NodeMixin):
assert arg.has_inode()
return arg.inode.id
return arg
args = [_get_node_id_or_arg(arg) for arg in args]
kwargs = {key: _get_node_id_or_arg(val) for key, val in kwargs.items()}
fn = partial(self.fn, *args, **kwargs)
fn.__name__ = self.fn.__name__
return Endpoint(
fn=fn,
prefix=None,
route=None,
)
def compose(self, fn: Union[Endpoint, Callable]) -> Endpoint:
"""Create a new Endpoint that applies `fn` to the return value of this
Endpoint. Effectively equivalent to `fn(self.fn(*args, **kwargs))`.
If the return value is None and `fn` doesn't take any inputs, then
`fn` will be called with no arguments.
Args:
fn (Endpoint, callable): An Endpoint or a callable function that accepts
a single argument of the same type as the return of this Endpoint
(i.e. self).
Return:
Endpoint: The new composed Endpoint.
"""
if not isinstance(fn, Endpoint):
fn = Endpoint(fn=fn)
# `fn` may not take any inputs.
# FIXME: Should this logic be in ``compose``? or some other function?
sig = get_signature(fn)
pipe_return = len(sig.parameters) > 0
@wraps(self.fn)
def composed(*args, **kwargs):
out = self.fn(*args, **kwargs)
return fn.fn(out) if pipe_return else fn.fn()
composed.__name__ = f"composed({str(self)} | {str(fn)})"
return Endpoint(
fn=composed,
prefix=self.prefix,
route=self.route,
)
def add_route(self, method: str = "POST") -> None:
"""Add a FastAPI route for this endpoint to the router. This function
will not do anything if the router is None (i.e. no prefix was
specified).
This function is called automatically when the endpoint is
created using the `endpoint` decorator.
"""
if self.router is None:
return
if self.route is None:
# The route will be postfixed with the fn name
self.route = f"/{self.fn.__name__}/"
# Analyze the function signature of `fn` to
# construct a dictionary, mapping argument names
# to their types and default values for creating a
# Pydantic model.
# During this we also
# - make sure that args are either type-hinted or
# annotated with a default value (can't create
# a Pydantic model without a type hint or default)
# - replace arguments that have type-hints which
# are subclasses of `IdentifiableMixin` with
# strings (i.e. the id of the Identifiable)
# (e.g. `Store` -> `str`)
signature = inspect.signature(self.fn)
pydantic_model_params = {}
for p in signature.parameters:
annot = signature.parameters[p].annotation
default = signature.parameters[p].default
has_default = default is not inspect._empty
if annot is inspect.Parameter.empty:
if p == "kwargs":
# Allow arbitrary keyword arguments
pydantic_model_params[p] = (dict, ...)
continue
if not has_default:
raise ValueError(
f"Parameter {p} must have a type annotation or "
"a default value."
)
elif isinstance(annot, type) and issubclass(annot, IdentifiableMixin):
# e.g. Stores must be referred to by str ids when
# passed into the API
pydantic_model_params[p] = (str, ...)
else:
pydantic_model_params[p] = (
(annot, default) if has_default else (annot, ...)
)
# Allow arbitrary types in the Pydantic model
class Config:
arbitrary_types_allowed = True
# Create the Pydantic model, named `{fn_name}Model`
global FnPydanticModel
FnPydanticModel = create_model(
f"{self.fn.__name__.capitalize()}{self.prefix.replace('/', '').capitalize()}Model", # noqa: E501
__config__=Config,
**pydantic_model_params,
)
# Create a wrapper function, with kwargs that conform to the
# Pydantic model, and a return annotation that matches `fn`
def _fn(
kwargs: FnPydanticModel = Endpoint.EmbeddedBody(),
): # -> signature.return_annotation:
return self.fn(**kwargs.dict())
# from inspect import Parameter, Signature
# params = []
# for p, (annot, default) in pydantic_model_params.items():
# params.append(
# Parameter(
# p,
# kind=Parameter.POSITIONAL_OR_KEYWORD,
# annotation=annot,
# default=default,
# )
# )
# _fn.__signature__ = Signature(params)
# Name the wrapper function the same as `fn`, so it looks nice
# in the docs
_fn.__name__ = self.fn.__name__
else:
# If the user specifies a route manually, then they're responsible for
# everything, including type-hints and default values.
signature = inspect.signature(self.fn)
for p in signature.parameters:
annot = signature.parameters[p].annotation
# If annot is a subclass of `IdentifiableMixin`, replace
# it with the `str` type (i.e. the id of the Identifiable)
# (e.g. `Store` -> `str`)
if isinstance(annot, type) and issubclass(annot, IdentifiableMixin):
self.fn.__annotations__[p] = str
_fn = self.fn
# Make FastAPI endpoint for POST requests
self.router.add_api_route(
self.route + "/" if not self.route.endswith("/") else self.route,
_fn,
methods=[method],
)
# Must add the router to the app again, everytime a new route is added
# otherwise, the new route does not show up in the docs
from meerkat.interactive.api.main import app
app.include_router(self.router)
def __call__(self, *args, __fn_only=False, **kwargs):
"""Calling the endpoint will just call .run(...) by default.
If `__fn_only=True` is specified, it will call the raw function
underlying this endpoint.
"""
if __fn_only:
# FIXME(Sabri): This isn't working for some reason. The '__fn_only' arg
# is for some reason being put in the kwargs dict. Workaround is to just
# use self.fn directly.
return self.fn(*args, **kwargs)
return self.run(*args, **kwargs)
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not isinstance(v, cls):
return make_endpoint(v)
return v
class EndpointProperty(Endpoint, Generic[T]):
pass
def make_endpoint(endpoint_or_fn: Union[Callable, Endpoint, None]) -> Endpoint:
"""Make an Endpoint."""
return (
endpoint_or_fn
if isinstance(endpoint_or_fn, Endpoint)
else Endpoint(endpoint_or_fn)
)
def endpoint(
fn: Callable = None,
prefix: Union[str, APIRouter] = None,
route: str = None,
method: str = "POST",
) -> Endpoint:
"""Decorator to mark a function as an endpoint.
An endpoint is a function that can be called to
- update the value of a Store (e.g. incrementing a counter)
- update a DataFrame (e.g. adding a new row)
- run a computation and return its result to the frontend
- run a function in response to a frontend event (e.g. button
click)
Endpoints differ from reactive functions in that they are not
automatically triggered by changes in their inputs. Instead,
they are triggered by explicit calls to the endpoint function.
The Store and DataFrame objects that are modified inside the endpoint
function will automatically trigger reactive functions that
depend on them.
.. code-block:: python
@endpoint
def increment(count: Store, step: int = 1):
count.set(count + step)
# ^ update the count Store, which will trigger operations
# that depend on it
# Create a button that calls the increment endpoint
counter = Store(0)
button = Button(on_click=increment(counter))
# ^ read this as: call the increment endpoint with the `counter`
# Store when the button is clicked
Args:
fn: The function to decorate.
prefix: The prefix to add to the route. If a string, it will be
prepended to the route. If an APIRouter, the route will be
added to the router.
route: The route to add to the endpoint. If not specified, the
route will be the name of the function.
method: The HTTP method to use for the endpoint. Defaults to
"POST".
Returns:
The decorated function, as an Endpoint object.
"""
if fn is None:
return partial(endpoint, prefix=prefix, route=route, method=method)
@wraps(fn)
def _endpoint(fn: Callable):
# Gather up
# 1. all the arguments that are hinted as Stores
# 2. the hinted arguments that subclass IdentifiableMixin
# e.g. Store, Endpoint, Page, etc.
stores = set()
identifiables = {}
for name, annot in inspect.getfullargspec(fn).annotations.items():
is_annotation_store = _is_annotation_store(annot)
if is_annotation_store:
stores.add(name)
# TODO: See if we can remove this in the future.
if is_annotation_store or (
isinstance(annot, type) and issubclass(annot, IdentifiableMixin)
):
# This will also include `Store`, so it will be a superset
# of `stores`
identifiables[name] = annot
@wraps(fn)
def wrapper(*args, **kwargs):
# Keep the arguments that were not annotated to be `Stores`
fn_signature = inspect.signature(fn)
fn_bound_arguments = fn_signature.bind(*args, **kwargs).arguments
# `Identifiables` that are passed into the function
# may be passed in as a string id, or as the object itself
# If they are passed in as a string id, we need to get the object
# from the registry
_args, _kwargs = [], {}
for k, v in fn_bound_arguments.items():
if k in identifiables:
# Dereference the argument if it was passed in as a string id
if not isinstance(v, str):
# Not a string id, so just use the object
_kwargs[k] = v
else:
if isinstance(v, IdentifiableMixin):
# v is a string, but it is also an IdentifiableMixin
# e.g. Store("foo"), so just use v as is
_kwargs[k] = v
else:
# v is a string id
try:
# Directly try to look up the string id in the
# registry of the annotated type
_kwargs[k] = identifiables[k].from_id(v)
except Exception:
# If that fails, try to look up the string id in
# the Node registry, and then get the object
# from the Node
try:
_kwargs[k] = Node.from_id(v).obj
except Exception as e:
# If that fails and the object is a non-id string,
# then just use the string as is.
# We have to do this check here rather than above
# because we want to make sure we check for all
# identifiable and nodes before checking if the
# string is just a string.
# this is required for compatibility with
# IdentifiableMixin objects that do not start with
# the meerkat id prefix.
if isinstance(v, str) and not is_meerkat_id(v):
_kwargs[k] = v
else:
raise e
else:
if k == "args":
# These are *args under the `args` key
# These are the only arguments that will be passed in as
# *args to the fn
v = [_resolve_id_to_obj(_value) for _value in v]
_args, _ = _unpack_stores_from_object(v)
elif k == "kwargs":
# These are **kwargs under the `kwargs` key
v = {_k: _resolve_id_to_obj(_value) for _k, _value in v.items()}
v, _ = _unpack_stores_from_object(v)
_kwargs = {**_kwargs, **v}
else:
# All other positional arguments that were not *args were
# bound, so they become kwargs
v, _ = _unpack_stores_from_object(_resolve_id_to_obj(v))
_kwargs[k] = v
try:
with unmarked():
# Run the function
result = fn(*_args, **_kwargs)
except Exception as e:
# If the function raises an exception, log it and return
# the exception
# In case the exception is about .set() being missing, add
# a more helpful error message
if "no attribute 'set'" in str(e):
# Get the name of the object that was passed in
# as a Store, but did not have a .set() method
obj_name = str(e).split("'")[1].strip("'")
# Update the error message to be more helpful
e = AttributeError(
f"Exception raised in endpoint `{fn.__name__}`. "
f"The object of type `{obj_name}` that you called to "
"update with `.set()` "
"is not a `Store`. You probably forgot to "
"annotate this object's typehint in the signature of "
f"`{fn.__name__}` as a `Store` i.e. \n\n"
"@endpoint\n"
f"def {fn.__name__}(..., parameter: Store, ...):\n\n"
"Remember that without this type annotation, the object "
"will be automatically unpacked by Meerkat inside the endpoint "
"if it is a `Store`."
)
logger.exception(e)
raise e
# Return the result of the function
return result
# Register the endpoint and return it
endpoint = Endpoint(
fn=wrapper,
prefix=prefix,
route=route,
)
endpoint.add_route(method)
return endpoint
return _endpoint(fn)
def endpoints(cls: type = None, prefix: str = None):
"""Decorator to mark a class as containing a collection of endpoints. All
instance methods in the marked class will be converted to endpoints.
This decorator is useful when you want to create a class that
contains some logical state variables (e.g. a Counter class), along
with methods to manipulate the values of those variables (e.g.
increment or decrement the counter).
"""
if cls is None:
return partial(endpoints, prefix=prefix)
_ids = {}
_max_ids = {}
if cls not in _ids:
_ids[cls] = {}
_max_ids[cls] = 1
def _endpoints(cls):
class EndpointClass:
def __init__(self, *args, **kwargs):
self.instance = cls(*args, **kwargs)
self.endpoints = {}
# Access all the user-defined attributes of the instance
# to create endpoints
for attrib in dir(self.instance):
if attrib.startswith("__"):
continue
obj = self.instance.__getattribute__(attrib)
if callable(obj):
if attrib not in self.endpoints:
self.endpoints[attrib] = endpoint(
obj, prefix=prefix + f"/{_ids[cls][self]}"
)
def __getattribute__(self, attrib):
if self not in _ids[cls]:
_ids[cls][self] = _max_ids[cls]
_max_ids[cls] += 1
try:
obj = super().__getattribute__(attrib)
return obj
except AttributeError:
pass
obj = self.instance.__getattribute__(attrib)
if callable(obj):
if attrib not in self.endpoints:
return obj
return self.endpoints[attrib]
else:
return obj
return EndpointClass
return _endpoints(cls)
def get_signature(fn: Union[Callable, Endpoint]) -> inspect.Signature:
"""Get the signature of a function or endpoint.
Args:
fn: The function or endpoint to get the signature of.
Returns:
The signature of the function or endpoint.
"""
if isinstance(fn, Endpoint):
fn = fn.fn
return inspect.signature(fn)
def _resolve_id_to_obj(value):
if isinstance(value, str) and is_meerkat_id(value):
# This is a string that corresponds to a meerkat id,
# so look it up.
return Node.from_id(value).obj
return value
def _is_annotation_store(type_hint) -> bool:
"""Check if a type hint is a Store or a Union of Stores.
Returns True if:
- The type hint is a Store
- The type hint is a Union of Store and other non-Store values.
- The type hint is a generic store Store[T] or Union[Store[T], ...]
"""
if isinstance(type_hint, type) and issubclass(type_hint, Store):
return True
if isinstance(type_hint, typing._GenericAlias):
origin = get_type_hint_origin(type_hint)
args = get_type_hint_args(type_hint)
if origin == typing.Union:
return any(_is_annotation_store(arg) for arg in args)
elif issubclass(origin, Store):
return True
return False
|
meerkat-main
|
meerkat/interactive/endpoint.py
|
import importlib.util
import logging
import os
import shutil
import sys
from typing import TYPE_CHECKING, List, Set, Type
from tabulate import tabulate
from meerkat.constants import (
JINJA_ENV,
MEERKAT_INIT_PROCESS,
MEERKAT_NPM_PACKAGE,
MEERKAT_RUN_ID,
MEERKAT_RUN_PROCESS,
MEERKAT_RUN_RELOAD_COUNT,
MEERKAT_RUN_SUBPROCESS,
App,
PathHelper,
write_file,
)
from meerkat.interactive import BaseComponent
from meerkat.tools.filelock import FileLock
from meerkat.tools.singleton import Singleton
if TYPE_CHECKING:
from meerkat.interactive import Page
logger = logging.getLogger(__name__)
def get_subclasses_recursive(cls: type) -> List[type]:
"""Recursively find all subclasses of a class.
Args:
cls (type): the class to find subclasses of.
Returns:
List[type]: a list of all subclasses of cls.
"""
subclasses = []
for subclass in cls.__subclasses__():
subclasses.append(subclass)
subclasses.extend(get_subclasses_recursive(subclass))
return subclasses
def write_file_if_changed(path: str, content: str):
"""Write a file if the content has changed. Note this is not atomic.
Args:
path (str): the path to write to.
content (str): the content to write.
"""
if os.path.exists(path):
with open(path, "r") as f:
if f.read() == content:
return
write_file(path, content)
class SvelteWriter(metaclass=Singleton):
"""Class that handles writing Svelte components to a Meerkat app."""
def __init__(self):
self.app = App(appdir=PathHelper().appdir)
self._ran_import_app_components = False
self._components = None
self._frontend_components = None
@property
def appdir(self):
return self.app.appdir
def run(self):
"""Write component wrappers and context at the start of a run."""
self.import_app_components()
with FileLock(os.path.join(self.appdir, "svelte_writer")):
self.cleanup()
self.write_all_component_wrappers() # src/lib/wrappers/
self.write_component_context() # ComponentContext.svelte
def cleanup(self):
"""Cleanup the app."""
self.remove_all_component_wrappers()
self.remove_component_context()
logger.debug("Removed all component wrappers and ComponentContext.svelte.")
def get_all_components(
self,
exclude_classes: Set[str] = {"Component", "BaseComponent"},
) -> List[Type["BaseComponent"]]:
"""Get all subclasses of BaseComponent, excluding the ones in
`exclude_classes`.
Args:
exclude_classes (Set[str], optional): Set of classes
to exclude. Defaults to {"Component", "BaseComponent"}.
Returns:
List[Type["BaseComponent"]]: List of subclasses of BaseComponent.
"""
if self._components:
return self._components
# Recursively find all subclasses of Component
subclasses = get_subclasses_recursive(BaseComponent)
# Filter out the classes we don't want and sort
subclasses = [c for c in subclasses if c.__name__ not in exclude_classes]
subclasses = sorted(subclasses, key=lambda c: c.alias)
tabulated_subclasses = tabulate(
[[subclass.__module__, subclass.__name__] for subclass in subclasses]
)
logger.debug(f"Found {len(subclasses)} components.\n" f"{tabulated_subclasses}")
self._components = subclasses
return subclasses
def get_all_frontend_components(self) -> List[Type["BaseComponent"]]:
"""Get all subclasses of BaseComponent that have a unique
frontend_alias.
Returns:
List[Type["BaseComponent"]]: List of subclasses of BaseComponent.
"""
if self._frontend_components:
return self._frontend_components
# Create a `frontend_components` list that contains the
# components that have unique component.frontend_alias
components = self.get_all_components()
frontend_components = []
aliases = set()
for component in components:
if component.frontend_alias not in aliases:
frontend_components.append(component)
aliases.add(component.frontend_alias)
self._frontend_components = frontend_components
return frontend_components
def import_app_components(self):
"""Import all components inside the app/src/lib/components directory to
register custom user components from their app."""
if self._ran_import_app_components:
# Only run this once in a process
return
if self.app.is_user_app:
# Import all components inside the app/src/lib/components
# directory to register user components from the app
# Otherwise do nothing
logger.debug(
"The app being run is as a user app. "
f"Adding {self.appdir} to sys.path. "
"Importing app components from app/src/lib/components."
)
# StackOverflow:
# How can I import a module dynamically given the full path?
# https://stackoverflow.com/a/67692
# This module name can be anything
module_name = "user.app.src.lib.components"
spec = importlib.util.spec_from_file_location(
module_name,
f"{self.appdir}/src/lib/components/__init__.py",
)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
self._ran_import_app_components = True
def remove_all_component_wrappers(self):
"""Remove all component wrappers from the app."""
try:
shutil.rmtree(f"{self.appdir}/src/lib/wrappers")
except (FileNotFoundError, OSError):
pass
def remove_component_context(self):
"""Remove the ComponentContext.svelte file from the app."""
try:
os.remove(f"{self.appdir}/src/lib/ComponentContext.svelte")
except OSError:
pass
def render_component_context(self):
"""Render the ComponentContext.svelte file for the app."""
template = JINJA_ENV.get_template("ComponentContext.svelte")
components = self.get_all_components()
frontend_components = self.get_all_frontend_components()
# Get the libraries that the frontend components
# depend on
libraries = set([c.library for c in frontend_components])
# Filter to only include components and frontend components
# whose libraries are installed
installed_libraries = self.app.filter_installed_libraries(libraries) + ["html"]
components = [
c
for c in components
if c.library in installed_libraries
or c.library == MEERKAT_NPM_PACKAGE
and not self.app.is_user_app
]
frontend_components = [
c
for c in frontend_components
if c.library in installed_libraries
or c.library == MEERKAT_NPM_PACKAGE
and not self.app.is_user_app
]
# For the Meerkat npm package, check the components offered by the
# user's installed version, and filter out the ones that aren't available
if MEERKAT_NPM_PACKAGE in installed_libraries and self.app.is_user_app:
try:
mk_components = set(
[f"Meerkat{c}" for c in self.app.get_mk_package_info()]
)
components = [
c
for c in components
if (c.frontend_alias in mk_components and c.namespace == "meerkat")
or (c.library == MEERKAT_NPM_PACKAGE and c.namespace != "meerkat")
or c.library != MEERKAT_NPM_PACKAGE
]
frontend_components = [
c
for c in frontend_components
if (c.frontend_alias in mk_components and c.namespace == "meerkat")
or (c.library == MEERKAT_NPM_PACKAGE and c.namespace != "meerkat")
or c.library != MEERKAT_NPM_PACKAGE
]
except Exception as e:
logger.error(
"Error getting Meerkat package info. "
"Components from the Meerkat npm package may not be available."
)
logger.debug(e)
return template.render(
components=components,
frontend_components=frontend_components,
)
def render_component_wrapper(self, component: Type[BaseComponent]):
# TODO: fix line breaks in Wrapper.svelte
template = JINJA_ENV.get_template("Wrapper.svelte")
from meerkat.interactive.startup import snake_case_to_camel_case
prop_names_camel_case = [
snake_case_to_camel_case(prop_name) for prop_name in component.prop_names
]
return template.render(
import_style=component.wrapper_import_style,
component_name=component.component_name,
path=component.path,
prop_names=component.prop_names,
prop_names_camel_case=prop_names_camel_case,
event_names=component.event_names,
use_bindings=True,
prop_bindings=component.prop_bindings,
slottable=component.slottable,
zip=zip,
is_user_app=self.app.is_user_app,
)
def get_import_prefix(self):
if self.app.is_user_app:
# Use the MEERKAT_NPM_PACKAGE package instead of $lib
# in a Meerkat generated app
return MEERKAT_NPM_PACKAGE
return "$lib"
def render_route(self, page: "Page"):
template = JINJA_ENV.get_template("page.svelte.jinja")
# TODO: make this similar to render_root_route
# and use component.frontend_alias and component.alias
return template.render(
route=page.id,
title=page.name,
import_prefix=self.get_import_prefix(),
components=list(sorted(page.component.get_components())),
queryparam=False,
)
def render_root_route(self):
template = JINJA_ENV.get_template("page.root.svelte.jinja")
components = self.get_all_components()
frontend_components = self.get_all_frontend_components()
return template.render(
title="Meerkat",
import_prefix=self.get_import_prefix(),
components=components,
frontend_components=frontend_components,
)
def write_component_wrapper(self, component: Type[BaseComponent]):
cwd = f"{self.appdir}/src/lib/wrappers/__{component.namespace}"
os.makedirs(cwd, exist_ok=True)
write_file_if_changed(
f"{cwd}/{component.__name__}.svelte",
self.render_component_wrapper(component),
)
def write_all_component_wrappers(
self,
exclude_classes: Set[str] = {"Component", "BaseComponent"},
):
# Recursively find all subclasses of BaseComponent
subclasses = get_subclasses_recursive(BaseComponent)
for subclass in subclasses:
# Use subclass.__name__ as the component name, instead of
# subclass.component_name, because the latter is not guaranteed to be
# unique.
component_name = subclass.__name__
if component_name in exclude_classes:
continue
# Make a file for the component, inside a subdirectory for the namespace
# e.g. src/lib/wrappers/__meerkat/Component.svelte
self.write_component_wrapper(subclass)
def write_component_context(self):
write_file_if_changed(
f"{self.appdir}/src/lib/ComponentContext.svelte",
self.render_component_context(),
)
"""
Convert all Python component classes to Svelte component wrappers.
We only run the following code if
- a script importing `meerkat` is run directly with Python e.g. `python myscript.py`
- a notebook importing `meerkat` is run directly with Jupyter
- a script was run with `mk run` and we are in the `mk run` process
- a script was run with `mk run`, we are in its `uvicorn` subprocess
and this is a live reload run (i.e. not the first run of the subprocess)
"""
if (
(not MEERKAT_RUN_PROCESS and not MEERKAT_RUN_SUBPROCESS)
or MEERKAT_RUN_PROCESS
or (MEERKAT_RUN_SUBPROCESS and MEERKAT_RUN_RELOAD_COUNT > 1)
) and not MEERKAT_INIT_PROCESS:
logger.debug("Running SvelteWriter().run().")
SvelteWriter().run()
if MEERKAT_RUN_SUBPROCESS:
# Increment the MEERKAT_RUN_RELOAD_COUNT
# so that the `uvicorn` subprocess knows that it has been reloaded
# on a subsequent live reload run
write_file(
f"{PathHelper().appdir}/.{MEERKAT_RUN_ID}.reload",
str(MEERKAT_RUN_RELOAD_COUNT + 1),
)
|
meerkat-main
|
meerkat/interactive/svelte.py
|
from typing import Callable, Dict, Mapping, Type
import numpy as np
import pandas as pd
import rich
from meerkat.env import is_torch_available
from meerkat.interactive.graph.reactivity import reactive
from meerkat.tools.lazy_loader import LazyLoader
torch = LazyLoader("torch")
print = reactive(rich.print)
def get_custom_json_encoder() -> Dict[Type, Callable]:
from meerkat.columns.abstract import Column
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.graph.store import Store
custom_encoder = {
np.ndarray: lambda v: v.tolist(),
pd.Series: lambda v: v.tolist(),
Column: lambda v: v.to_json(),
np.int64: lambda v: int(v),
np.float64: lambda v: float(v),
np.int32: lambda v: int(v),
np.bool_: lambda v: bool(v),
np.bool8: lambda v: bool(v),
Store: lambda v: v.to_json(),
Endpoint: lambda v: v.to_json(),
}
if is_torch_available():
custom_encoder[torch.Tensor] = lambda v: v.tolist()
return custom_encoder
def is_equal(a, b):
"""Recursively check equality of two objects.
This also verifies that the types of the objects are the same.
Args:
a: The first object.
b: The second object.
Returns:
True if the objects are equal, False otherwise.
"""
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
return isinstance(a, type(b)) and isinstance(b, type(a)) and np.all(a, b)
elif isinstance(a, pd.Series) or isinstance(b, pd.Series):
return isinstance(a, type(b)) and isinstance(b, type(a)) and np.all(a == b)
elif isinstance(a, (list, tuple)) or isinstance(b, (list, tuple)):
return (
isinstance(a, type(b))
and isinstance(b, type(a))
and len(a) == len(b)
and all(is_equal(_a, _b) for _a, _b in zip(a, b))
)
elif isinstance(a, Mapping) or isinstance(b, Mapping):
a_keys = a.keys()
b_keys = b.keys()
return (
isinstance(a, type(b))
and isinstance(b, type(a))
and len(a) == len(b)
and a_keys == b_keys
and all(is_equal(a[k], b[k]) for k in a_keys)
)
else:
return a == b
|
meerkat-main
|
meerkat/interactive/utils.py
|
from collections import defaultdict
from typing import Any, Dict, Iterable, List, Optional
from pydantic import BaseModel, ValidationError
from meerkat.interactive.frontend import FrontendMixin
from meerkat.mixins.identifiable import IdentifiableMixin
class NodeFrontendModel(BaseModel):
refId: str
type: str
is_store: bool = True
class Node(IdentifiableMixin, FrontendMixin):
_self_identifiable_group: str = "nodes"
def __init__(self, obj: Any, **kwargs):
"""A node in the computational graph. This could be an object or an
operation.
Args:
obj (Any): This could be any class that has NodeMixin (e.g. store,
Operation, DataFrame, Column).
"""
super().__init__(**kwargs)
self.obj = obj
self.children: Dict["Node", bool] = dict()
def add_child(self, child, triggers=True):
"""Adds a child to this node.
Args:
child: The child to add.
triggers: If True, this child is triggered
when this node is triggered.
"""
if child not in self.children:
self.children[child] = triggers
# Don't overwrite triggers=True with triggers=False.
# TODO: why did we do this again? This is important though.
self.children[child] = triggers | self.children[child]
@property
def frontend(self):
return NodeFrontendModel(
refId=self.id,
type=self.obj.__class__.__name__,
)
@property
def trigger_children(self):
"""Returns the children that are triggered."""
return [child for child, triggers in self.children.items() if triggers]
def __repr__(self) -> str:
return f"Node({repr(self.obj)}, {len(self.children)} children)"
def __hash__(self):
"""Hash is based on the id of the node."""
return hash(id(self))
def __eq__(self, other):
"""Two nodes are equal if they have the same id."""
return self.id == other.id
def has_children(self):
"""Returns True if this node has children."""
return len(self.children) > 0
def has_trigger_children(self):
"""Returns True if this node has children that are triggered."""
return any(self.children.values())
class NodeMixin(FrontendMixin):
"""Mixin for Classes whose objects can be attached to a node in the
computation graph.
Add this mixin to any class whose objects should be nodes
in a graph.
This mixin is used in Reference, Store and Operation to make
them part of a computation graph.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# The children of this node: this is a dictionary
# mapping children to a boolean indicating whether
# the child is triggered when this node is triggered.
# self._self_children: Dict[Node, bool] = dict()
self._self_inode = None # Node(self)
# self._set_node_id()
def attach_to_inode(self, inode: Node):
"""Attach this object to a node."""
# The object should point to the node
self._self_inode = inode
# The node should point to the object
inode.obj = self
def detach_inode(self) -> Node:
"""Detach this object from its node."""
# Grab the node
inode = self._self_inode
# Point the node to None
inode.obj = None
# The object should point to nothing
self._self_inode = None
# Return the node
return inode
def create_inode(self, inode_id: str = None) -> Node:
"""Creates a node for this object.
Doesn't attach the node to the object yet.
"""
return Node(None, id=inode_id)
def has_inode(self):
"""Returns True if this object has a node."""
return self._self_inode is not None
@property
def inode(self) -> Optional[Node]:
"""The node for this object, if it exists."""
return self._self_inode
@property
def inode_id(self):
return self.inode.id if self.inode else None
def _set_inode(self):
"""Sets the node for this object."""
self._self_inode = None
@property
def frontend(self) -> BaseModel:
assert self.inode is not None, "Node not set."
return self.inode.frontend
@classmethod
def __get_validators__(cls):
# Needed to ensure that NodeMixins can be used as
yield cls.validate
@classmethod
def validate(cls, v):
if not isinstance(v, cls):
raise ValidationError(f"Expected {cls.__name__}, got {type(v).__name__}")
return v
def _topological_sort(root_nodes: List[NodeMixin]) -> Iterable[NodeMixin]:
"""
Perform a topological sort on a graph.
TODO: Add a check to ensure the graph is acyclic.
Args:
root_nodes (List[NodeMixin]): The root nodes of the graph.
Returns:
List[NodeMixin]: The topologically sorted nodes.
"""
# get a mapping from node to the children of each node
# only get the children that are triggered by the node
# i.e. ignore children that use the node as a dependency
# but are not triggered by the node
parents = defaultdict(set)
nodes = set()
# TODO (arjun): Add check for cycles.
while root_nodes:
node = root_nodes.pop(0)
for child in node.trigger_children:
parents[child].add(node)
nodes.add(node)
root_nodes.append(child)
current = [
node for node in nodes if not parents[node]
] # get a set of all the nodes without an incoming edge
while current:
node: Node = current.pop(0)
yield node
for child in node.trigger_children:
parents[child].remove(node)
if not parents[child]:
current.append(child)
|
meerkat-main
|
meerkat/interactive/node.py
|
from typing import List
from IPython.display import IFrame
import meerkat as mk
class GUI:
pass
class DataFrameGUI(GUI):
def __init__(self, df: mk.DataFrame):
self.df = df
def table(
self,
**kwargs,
) -> IFrame:
return mk.gui.Table(
df=self.df,
classes="h-[550px]",
**kwargs,
)
def gallery(self, main_column: str = None, tag_columns: List[str] = None, **kwargs):
return mk.gui.Gallery(
df=self.df,
main_column=main_column,
tag_columns=tag_columns,
**kwargs,
)
|
meerkat-main
|
meerkat/interactive/gui.py
|
from pydantic import BaseModel, Extra, validator
from meerkat.interactive.endpoint import endpoint
from meerkat.interactive.graph import Store
from meerkat.interactive.node import Node, NodeMixin
from meerkat.mixins.identifiable import IdentifiableMixin
from meerkat.tools.utils import classproperty
class EndpointMixin:
def __init__(self, prefix=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._endpoints = {}
if prefix is None:
prefix = f"/{self.__class__.__name__.lower()}"
# Access all the user-defined attributes of the instance to create endpoints
# Here, we keep only methods that:
# - are defined in this subclass, but not in any of its superclasses
# (e.g. BaseComponent, IdentifiableMixin, EndpointMixin etc.)
# - don't begin with "_"
# - are callables
names = set(dir(self.__class__)) - set(
sum([dir(e) for e in self.__class__.mro()[1:]], [])
)
for attrib in names:
if attrib.startswith("_"):
continue
obj = self.__getattribute__(attrib)
if callable(obj):
if attrib not in self._endpoints:
print(attrib)
self._endpoints[attrib] = endpoint(
obj, prefix=prefix + f"/{self.id}"
)
@property
def endpoints(self):
return self._endpoints
class State(EndpointMixin, IdentifiableMixin, BaseModel):
@classproperty
def identifiable_group(self):
# Ordinarily, we would create a new classproperty for this, like
# _self_identifiable_group: str = "states"
# However, this causes pydantic to show _self_identifiable_group in
# type hints when using the component in the IDE, which might
# be confusing to users.
# We just override the classproperty here directly as an alternative.
return "states"
@validator("*", pre=False)
def _check_inode(cls, value):
if isinstance(value, NodeMixin) and not isinstance(value, Store):
# Now value is a NodeMixin object
# We need to make sure that value points to a Node in the graph
# If it doesn't, we need to add it to the graph
if not value.has_inode():
value.attach_to_inode(value.create_inode())
# Now value is a NodeMixin object that points to a Node in the graph
return value.inode # this will exist
return value
def __getattribute__(self, name):
value = super().__getattribute__(name)
if isinstance(value, Node):
# because the validator above converts dataframes to nodes, when the
# dataframe is accessed we need to convert it back to the dataframe
return value.obj
if callable(value) and hasattr(self, "_endpoints"):
if name not in self._endpoints:
return value
return self._endpoints[name]
return value
class Config:
arbitrary_types_allowed = True
extra = Extra.allow
copy_on_model_validation = False
|
meerkat-main
|
meerkat/interactive/state.py
|
meerkat-main
|
meerkat/interactive/app/__init__.py
|
|
meerkat-main
|
meerkat/interactive/app/src/__init__.py
|
|
meerkat-main
|
meerkat/interactive/app/src/lib/__init__.py
|
|
import collections
import inspect
import os
import typing
import uuid
import warnings
from typing import Dict, List, Set
from pydantic import BaseModel, Extra, root_validator
from meerkat.constants import MEERKAT_NPM_PACKAGE, PathHelper
from meerkat.dataframe import DataFrame
from meerkat.interactive.endpoint import Endpoint, EndpointProperty
from meerkat.interactive.event import EventInterface
from meerkat.interactive.frontend import FrontendMixin
from meerkat.interactive.graph import Store
from meerkat.interactive.node import Node, NodeMixin
from meerkat.mixins.identifiable import IdentifiableMixin
from meerkat.tools.utils import (
classproperty,
get_type_hint_args,
get_type_hint_origin,
has_var_kwargs,
is_subclass,
nested_apply,
)
try:
collections_abc = collections.abc
except AttributeError:
collections_abc = collections
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
class ComponentFrontend(BaseModel):
name: str
props: Dict
slots: list
class WrappableMixin:
@classproperty
def wrapper_import_style(cls) -> Literal["default", "named", "none"]:
# TODO: this will create issues if users want to use plotly components
# in mk init apps. In general, we need to make the library / namespace
# distinction more explicit and this system more robust.
if cls.library == MEERKAT_NPM_PACKAGE and (
cls.namespace == "meerkat" or cls.namespace == "plotly"
):
# Meerkat components
if not PathHelper().is_user_app:
# In Meerkat package
# Use named import: import Something from "path/to/component";
return "named"
else:
# Use default import: import { Something } from MEERKAT_NPM_PACKAGE;
return "default"
elif cls.library == MEERKAT_NPM_PACKAGE:
# Custom user components
if PathHelper().is_user_app:
# Use named import: import Something from "path/to/component";
return "named"
else:
# This should never happen: we should never be wrapping a custom
# component directly in the Meerkat package + no components
# in Meerkat should have a namespace other than "meerkat"
raise ValueError(
f"Cannot use custom component {cls.component_name}, "
"please initialize a Meerkat app using `mk init` first."
)
elif cls.library == "html":
# No need to import HTML tags
return "none"
else:
return "default"
class PythonToSvelteMixin:
def get_components(self) -> Set[str]:
nested_components = set()
nested_components.add(self.component_name)
for s in self.slots:
nested_components.update(s.get_components())
if hasattr(self, "component"):
nested_components.update(self.component.get_components())
if hasattr(self, "components"):
for c in self.components:
nested_components.update(c.get_components())
return nested_components
class Slottable:
@classproperty
def slottable(cls) -> bool:
return True
def iterable(arg):
return isinstance(arg, collections_abc.Iterable) and not isinstance(arg, str)
class SlotsMixin:
def __init__(self, slots: List["BaseComponent"] = [], *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(slots, BaseComponent) or not iterable(slots):
slots = [slots]
self._slots = slots
@property
def slots(self) -> List["BaseComponent"]:
from meerkat.interactive.app.src.lib.component.core.put import Put
_slots = []
for slot in self._slots:
if not isinstance(slot, BaseComponent):
# Wrap it in a Put component
_slots.append(Put(data=slot))
else:
_slots.append(slot)
return _slots
def append(self, other):
# Allow users to append to slots
from meerkat.interactive.app.src.lib.component.core.put import Put
if isinstance(other, BaseComponent):
self._slots.append(other)
else:
self._slots.append(Put(data=other))
@classproperty
def slottable(cls) -> bool:
return False
class BaseComponent(
IdentifiableMixin,
FrontendMixin,
SlotsMixin,
WrappableMixin,
PythonToSvelteMixin,
BaseModel,
):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __getattribute__(self, name):
if name == "component_id":
# need to wrap in a Store so component_id is passed through the wrapper
return Store(self.id)
value = super().__getattribute__(name)
if isinstance(value, Node):
# because the validator converts dataframes to nodes, when the
# dataframe is accessed we need to convert it back to the dataframe
return value.obj
return value
@classproperty
def alias(cls):
"""Unique alias for this component that uses the namespace and the name
of the BaseComponent subclass.
This will give components with the same name from different
libraries different names e.g. `MeerkatButton` and
`CarbonButton`.
"""
return cls.namespace.title() + cls.__name__
@classproperty
def frontend_alias(cls):
"""Alias for this component that is used in the frontend.
This is not unique, and it is possible to have multiple
components with the same frontend alias. This is useful for
components that are just wrappers around other components, e.g.
a layout BaseComponent that subclasses a Grid BaseComponent will
still have the same frontend alias as the Grid BaseComponent.
"""
return cls.namespace.title() + cls.component_name
@classproperty
def component_name(cls):
# Inheriting an existing BaseComponent and modifying it on the Python side
# should not change the name of the component used on the frontend
if cls.__bases__[0] != BaseComponent and issubclass(
cls.__bases__[0], BaseComponent
):
return cls.__bases__[0].__name__
return cls.__name__
@classproperty
def event_names(cls) -> List[str]:
"""Returns a list of event names that this component emits."""
return [
k[3:]
for k in cls.__fields__
if k.startswith("on_")
and not issubclass(cls.__fields__[k].type_, EndpointProperty)
]
@classproperty
def events(cls) -> List[str]:
"""Returns a list of events that this component emits."""
return [
k
for k in cls.__fields__
if k.startswith("on_")
and not issubclass(cls.__fields__[k].type_, EndpointProperty)
]
@classproperty
def identifiable_group(self):
# Ordinarily, we would create a new classproperty for this, like
# _self_identifiable_group: str = "components"
# However, this causes pydantic to show _self_identifiable_group in
# type hints when using the component in the IDE, which might
# be confusing to users.
# We just override the classproperty here directly as an alternative.
return "components"
@classproperty
def library(cls):
return MEERKAT_NPM_PACKAGE
@classproperty
def namespace(cls):
return "meerkat"
@classproperty
def path(cls):
if not cls.library == MEERKAT_NPM_PACKAGE or (
cls.library == MEERKAT_NPM_PACKAGE
# KG: TODO: Temporary hack to be able to use multiple namespaces
# for components provided natively in the Meerkat library.
and (cls.namespace == "meerkat" or cls.namespace == "plotly")
and PathHelper().is_user_app
):
return cls.library
path = os.path.join(
os.path.dirname(inspect.getfile(cls)),
f"{cls.component_name}.svelte",
)
if os.path.exists(path):
return path
# Raise an error if the file doesn't exist
raise FileNotFoundError(
f"Could not find {path}. "
f"Check that the definition of this BaseComponent {cls} "
"is in the same folder as the Svelte file. "
"You might also be using a "
"component from a library, in which case set the `library` "
"property of the BaseComponent correctly."
)
@classproperty
def prop_names(cls):
return [
k for k in cls.__fields__ if not k.startswith("on_") and "_self_id" != k
] + [
k
for k in cls.__fields__
if k.startswith("on_")
and issubclass(cls.__fields__[k].type_, EndpointProperty)
]
@classproperty
def prop_bindings(cls):
if not issubclass(cls, Component):
# These props need to be bound with `bind:` in Svelte
types_to_bind = {Store, DataFrame}
return {
prop: cls.__fields__[prop].type_ in types_to_bind
for prop in cls.prop_names
}
else:
return {
prop: (cls.__fields__[prop].type_ != EndpointProperty)
for prop in cls.prop_names
}
@property
def frontend(self):
def _frontend(value):
if isinstance(value, FrontendMixin):
return value.frontend
return value
frontend_props = nested_apply(
self.virtual_props,
_frontend,
base_types=(Store),
)
return ComponentFrontend(
# component_id=self.id,
# path=os.path.join(
# os.path.dirname(inspect.getfile(self.__class__)),
# f"{self.component_name}.svelte",
# ),
# name=self.alias,
name=self.frontend_alias,
props=frontend_props,
slots=[slot.frontend for slot in self.slots],
# library=self.library,
)
@property
def props(self):
return {k: self.__getattribute__(k) for k in self.prop_names}
@property
def virtual_props(self):
"""Props, and all events (as_*) as props."""
vprop_names = [k for k in self.__fields__ if "_self_id" != k] + ["component_id"]
return {k: self.__getattribute__(k) for k in vprop_names}
@root_validator(pre=True)
def _init_cache(cls, values):
# This is a workaround because Pydantic automatically converts
# all Store objects to their underlying values when validating
# the class. We need to keep the Store objects around.
cls._cache = values.copy()
return values
@root_validator(pre=True)
def _endpoint_name_starts_with_on(cls, values):
"""Make sure that all `Endpoint` fields have a name that starts with
`on_`."""
# TODO: this shouldn't really be a validator, this needs to be run
# exactly once when the class is created.
# Iterate over all fields in the class
for k, v in cls.__fields__.items():
# TODO: revisit this. Here we only enforce the on_* naming convention for
# endpoints, not endpoint properties, but this should be reconsidered.
if is_subclass(v.type_, Endpoint) and not is_subclass(
v.type_, EndpointProperty
):
if not k.startswith("on_"):
raise ValueError(
f"Endpoint {k} must have a name that starts with `on_`"
)
return values
@staticmethod
def _get_event_interface_from_typehint(type_hint):
"""Recurse on type hints to find all the Endpoint[EventInterface]
types.
Only run this on the type hints of a Component, for fields that are
endpoints.
Returns:
EventInterface: The EventInterface that the endpoint expects. None if
the endpoint does not have a type hint for the EventInterface.
"""
if isinstance(type_hint, typing._GenericAlias):
origin = get_type_hint_origin(type_hint)
args = get_type_hint_args(type_hint)
if is_subclass(origin, Endpoint):
# Endpoint[XXX]
if len(args) != 1:
raise TypeError(
"Endpoint type hints should only have one EventInterface."
)
if not issubclass(args[0], EventInterface):
raise TypeError(
"Endpoint type hints should be of type EventInterface."
)
return args[0]
else:
# Alias[XXX]
for arg in args:
out = BaseComponent._get_event_interface_from_typehint(arg)
if out is not None:
return out
return None
@root_validator(pre=True)
def _endpoint_signature_matches(cls, values):
"""Make sure that the signature of the Endpoint that is passed in
matches the parameter names and types that are sent from Svelte.
Procedurally, this validator:
- Gets the type hints for this BaseComponent subclass.
- Gets all fields that are endpoints.
- Gets the EventInterface from the Endpoint type hint.
- Gets the parameters from the EventInterface.
- Gets the function passed by the user.
- Gets the parameters from the function.
- Compares the two sets of parameters.
"""
type_hints = typing.get_type_hints(cls)
# Get all fields that pydantic tells us are endpoints.
for field, value in cls.__fields__.items():
if (
not is_subclass(value.type_, Endpoint)
or field not in values
or values[field] is None
):
continue
# Pull out the EventInterface from Endpoint.
event_interface = cls._get_event_interface_from_typehint(type_hints[field])
if event_interface is None:
warnings.warn(
f"Endpoint `{field}` does not have a type hint. "
"We recommend subclassing EventInterface to provide "
"an explicit type hint to users."
)
continue
# Get the parameters from the EventInterface.
event_interface_params = typing.get_type_hints(event_interface).keys()
# Get the endpoint passed by the user.
endpoint = values[field]
# Raise an error if it's not an Endpoint.
if not isinstance(endpoint, Endpoint):
raise TypeError(
f"Endpoint `{field}` should be of type Endpoint, "
f"but is of type {type(endpoint)}."
)
fn = endpoint.fn
fn_signature = inspect.signature(fn)
fn_params = fn_signature.parameters.keys()
# Make sure that the parameters passed by the user are a superset of
# the parameters expected by the EventInterface.
# NOTE: if the function has a ** argument, which will absorb any extra
# parameters passed by the Svelte dispatch call. So we do not need to
# do the superset check.
remaining_params = event_interface_params - fn_params
if not has_var_kwargs(fn) and len(remaining_params) > 0:
raise TypeError(
f"Endpoint `{field}` will be called with parameters: "
f"{', '.join(f'`{param}`' for param in event_interface_params)}. "
"\n"
f"Function specified by the user is missing the "
"following parameters: "
f"{', '.join(f'`{param}`' for param in remaining_params)}. "
)
# Check that the frontend will provide all of the necessary arguments
# to call fn. i.e. fn should not have any remaining args once the
# frontend sends over the inputs.
# Do this by making a set of all fn parameters that don't have defaults.
# This set should be a subset of the EventInterface.parameters.
# Get all the parameters that don't have default values
required_fn_params = {
k: v
for k, v in fn_signature.parameters.items()
if v.default is v.empty
and v.kind not in (v.VAR_POSITIONAL, v.VAR_KEYWORD)
}
# Make sure that the EventInterface parameters are a super set of these
# required parameters.
if not set(required_fn_params).issubset(set(event_interface_params)):
raise TypeError(
f"Endpoint `{field}` will be called with parameters: "
f"{', '.join(f'`{param}`' for param in event_interface_params)}. "
f"Check the {event_interface.__name__} class to see what "
"parameters are expected to be passed in."
"\n"
f"The function `{fn}` expects the following parameters: "
f"{', '.join(f'`{param}`' for param in required_fn_params)}. "
f"Perhaps you forgot to fill out all of the parameters of {fn}?"
)
return values
@root_validator(pre=False)
def _update_cache(cls, values):
# `cls._cache` only contains the values that were passed in
# `values` contains all the values, including the ones that
# were not passed in
# Users might run validators on the class, which will
# update the `values` dict. We need to make sure that
# the values in `cls._cache` are updated as well.
for k, v in cls._cache.items():
if k in values:
if isinstance(v, Store):
v.set(values[k])
else:
cls._cache[k] = values[k]
# TODO: other types of objects that need to be updated
else:
# This has happened with a parameter that
# - had no default value
# - was annotated without `Optional[...]`
# - was passed in as a `None` value
# TODO: There may be other cases where this happens.
pass
return values
@root_validator(pre=False)
def _check_inode(cls, values):
"""Unwrap NodeMixin objects to their underlying Node (except
Stores)."""
values.update(cls._cache)
for name, value in values.items():
if isinstance(value, NodeMixin) and not isinstance(value, Store):
# Now value is a NodeMixin object
# We need to make sure that value points to a Node in the graph
# If it doesn't, we need to add it to the graph
if not value.has_inode():
value.attach_to_inode(value.create_inode())
# Now value is a NodeMixin object that points to a Node in the graph
# We replace `value` with `value.inode`, and will send
# this to the frontend
# Effectively, NodeMixin objects (except Store) are "by reference"
# and not "by value" (this is also why we explicitly exclude
# Store from this check, which is "by value")
values[name] = value.inode
else:
values[name] = value
cls._cache = values
return values
def _get_ipython_height(self) -> str:
"""Return the height of the viewport used to render this component in
the notebook. Value will be pased to IPython.display.IFrame as the
height argument.
TODO: Figure out how to do this dynamically.
"""
return "100%"
def _ipython_display_(self):
from IPython.display import display
from meerkat.interactive.page import Page
display(
Page(
component=self,
id=self.__class__.__name__ + str(uuid.uuid4()),
height=self._get_ipython_height(),
progress=False,
).launch()
)
class Config:
arbitrary_types_allowed = True
extra = Extra.allow
copy_on_model_validation = False
class Component(BaseComponent):
"""Component with simple defaults."""
@classproperty
def component_name(cls):
# Inheriting an existing Component and modifying it on the Python side
# should not change the name of the component used on the frontend
if cls.__bases__[0] != Component and issubclass(cls.__bases__[0], Component):
return cls.__bases__[0].component_name
return cls.__name__
@root_validator(pre=True)
def _init_cache(cls, values):
# This is a workaround because Pydantic automatically converts
# all Store objects to their underlying values when validating
# the class. We need to keep the Store objects around.
# Cache all the Store objects
cls._cache = values.copy()
# Convert all the Store objects to their underlying values
# and return the unwrapped values
for name, value in values.items():
if isinstance(value, Store):
values[name] = value.__wrapped__
else:
values[name] = value
return values
@root_validator(pre=False)
def _convert_fields(cls, values: dict):
values = cls._cache
cls._cache = None
for name, value in values.items():
# Wrap all the fields that are not NodeMixins in a Store
# (i.e. this will exclude DataFrame, Endpoint etc. as well as
# fields that are already Stores)
if (
name not in cls.__fields__
or cls.__fields__[name].type_ == Endpoint
or cls.__fields__[name].type_ == EndpointProperty
):
# Separately skip Endpoint fields by looking at the field type,
# since they are assigned None by default and would be missed
# by the condition below
continue
if not isinstance(value, NodeMixin) and not isinstance(value, Node):
value = values[name] = Store(value)
# Now make sure that all the `Store` objects have inodes
if hasattr(value, "has_inode") and not value.has_inode():
value.attach_to_inode(value.create_inode())
return values
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/abstract.py
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/__init__.py
|
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/_internal/__init__.py
|
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class Progress(Component):
progress: float = 0.0
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/_internal/progress/__init__.py
|
from .button import Button
from .carousel import Carousel
from .chat import Chat
from .checkbox import Checkbox
from .code import Code
from .code_cell import CodeCell
from .copy_button import CopyButton
from .document import Document
from .editor import Editor
from .fileupload import FileUpload
from .filter import Filter
from .gallery import Gallery
from .icon import Icon
from .image import Image
from .image_annotator import ImageAnnotator
from .json import Json
from .markdown import Caption, Header, Markdown, Subheader, Title
from .match import Match
from .medimage import MedicalImage
from .multiselect import MultiSelect
from .number import Number
from .numberinput import NumberInput
from .pdf import PDF
from .put import Put
from .radio import Radio, RadioGroup
from .raw_html import RawHTML
from .select import Select
from .slicebycards import SliceByCards
from .slider import Slider
from .sort import Sort
from .stats import Stats
from .table import Table
from .tabs import Tabs
from .text import Text
from .textbox import Textbox
from .toggle import Toggle
from .vega import Vega
__all__ = [
"Button",
"Caption",
"Carousel",
"Chat",
"Checkbox",
"Code",
"CodeCell",
"CopyButton",
"Document",
"Editor",
"FileUpload",
"Filter",
"Gallery",
"Header",
"Image",
"ImageAnnotator",
"Icon",
"Json",
"Markdown",
"Match",
"MedicalImage",
"MultiSelect",
"PDF",
"Put",
"Radio",
"RadioGroup",
"RawHTML",
"Number",
"NumberInput",
"Select",
"SliceByCards",
"Slider",
"Sort",
"Stats",
"Subheader",
"Table",
"Tabs",
"Text",
"Textbox",
"Title",
"Toggle",
"Vega",
]
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/__init__.py
|
import meerkat as mk
from meerkat.interactive import Page
from meerkat.interactive.app.src.lib.component.core.tabs import Tabs
tabs = Tabs(
tabs={
"Tab 1": mk.gui.core.Text("Tab 1"),
"Tab 2": mk.gui.core.Text("Tab 2"),
}
)
page = Page(component=tabs, id="tabs")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/tabs/test_tabs.py
|
import uuid
from dataclasses import dataclass, field
from typing import Mapping, Sequence, Union
from meerkat.interactive.app.src.lib.component.abstract import BaseComponent
from meerkat.interactive.frontend import FrontendMixin
@dataclass
class Tab(FrontendMixin):
label: str
component: BaseComponent
id: str = field(default_factory=lambda: str(uuid.uuid4()))
@property
def frontend(self):
return {
"id": self.id,
"label": self.label,
"component": self.component.frontend,
}
class Tabs(BaseComponent):
# TODO: Add option for setting the default selected tab.
tabs: Union[Mapping[str, BaseComponent], Sequence[Tab]]
def __init__(self, **kwargs):
super().__init__(**kwargs)
if isinstance(self.tabs, Mapping):
self.tabs = [
Tab(label=label, component=component)
for label, component in self.tabs.items()
]
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/tabs/__init__.py
|
from typing import Optional
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.event import EventInterface
class OnUploadFileUpload(EventInterface):
pass
class FileUpload(Component):
files: list = []
filenames: list = []
contents: list = []
classes: Optional[str] = None
webkitdirectory: bool = False
directory: bool = False
multiple: bool = False
on_upload: Endpoint[OnUploadFileUpload] = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/fileupload/__init__.py
|
import meerkat as mk
@mk.endpoint()
def on_change(index: int):
print("on_change", index)
radios = mk.gui.html.flex(
slots=[
mk.gui.core.Radio(
name="radio",
value="radio" + str(i),
disabled=i == 2,
slots=[mk.gui.html.div(slots=[f"Radio {i}"], classes="text-purple-500")],
on_change=on_change.partial(index=i),
)
for i in range(1, 4)
],
classes="bg-violet-50 p-0 rounded-lg w-fit text-center",
)
@mk.endpoint()
def on_change(index: int):
print("on_change", index)
radio_group = mk.gui.core.RadioGroup(
values=["Radio 1", "Radio 2", "Radio 3"],
disabled=False,
horizontal=True,
on_change=on_change,
)
component = mk.gui.html.div(
slots=[radios, radio_group], classes="flex flex-col space-y-4"
)
page = mk.gui.Page(component=component, id="radio")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/radio/test_radio.py
|
from typing import List, Optional
from meerkat.interactive.app.src.lib.component.abstract import Component, Slottable
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.event import EventInterface
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
class OnChangeRadio(EventInterface):
index: int
class Radio(Slottable, Component):
"""A single radio button.
If you just want a basic group of radio buttons, use the
RadioGroup component instead. Use this component only if
you want to customize the layout of a group of radio buttons.
For more advanced use cases, we recommend either using the
basic HTML radio button element and styling it yourself with
Tailwind, or using the Flowbite Radio component.
Args:
name (str): The name of this radio button. Assign the same
name to multiple radio buttons to group them together.
value (str): The value associated with this radio button.
disabled (bool): Whether this radio button is disabled.
color (Literal['blue', 'red', 'green', 'purple', 'teal', \
'yellow', 'orange']): The color of this radio button.
classes (str): The Tailwind classes to apply to the component.
on_change: The `Endpoint` to call when this radio button is selected. \
It must have the following signature:
`(index: int)`
with
index (int): The index of the selected radio button.
"""
name: str
value: str = ""
disabled: bool = False
color: Literal[
"blue", "red", "green", "purple", "teal", "yellow", "orange"
] = "purple"
classes: str = "bg-violet-50 p-2 rounded-lg w-fit"
on_change: Optional[Endpoint[OnChangeRadio]] = None
class OnChangeRadioGroup(EventInterface):
index: int
class RadioGroup(Component):
"""A basic group of radio buttons.
Args:
values (List[str]): The values associated with each radio button. \
The number of radio buttons will be the length of this list.
selected (Optional[int]): The index of the selected radio button (0-indexed). \
If None, no radio button will be preselected by default.
disabled (bool): Whether this radio group is disabled. If True, all \
radio buttons will be disabled and the user will not be able to \
select any of them.
horizontal (bool): Whether to display the radio buttons horizontally. \
Defaults to True.
color (Literal['blue', 'red', 'green', 'purple', 'teal', 'yellow', \
'orange']): The color of the radio buttons.
classes (str): The Tailwind classes to apply to the component.
on_change: The `Endpoint` to call when the selected radio button changes. \
It must have the following signature:
`(index: int)`
with
index (int): The index of the selected radio button.
"""
values: List[str]
selected: Optional[int] = None
disabled: bool = False
horizontal: bool = True
color: Literal[
"blue", "red", "green", "purple", "teal", "yellow", "orange"
] = "purple"
classes: str = "bg-violet-50 p-2 rounded-lg w-fit"
on_change: Optional[Endpoint[OnChangeRadioGroup]] = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/radio/__init__.py
|
from typing import Optional
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.event import EventInterface
class OnSendChat(EventInterface):
message: str
class Chat(Component):
"""A chat component.
Args:
df (DataFrame): The dataframe to sync with the chat. \
It must have the following columns:
message (str): The message to display.
name (str): The name of the sender.
time (str): The time the message was sent.
sender (str): The sender of the message. \
Must be either "user" or "chatbot".
imgChatbot (str): The image to display for the chatbot, as a URL.
imgUser (str): The image to display for the user, as a URL.
on_send: The `Endpoint` to call when a message is sent. \
It must have the following signature:
`(message: str)`
with
message (str): The message sent by the user.
"""
# The dataframe to sync with the chat.
df: DataFrame
# The image to display for the chatbot.
img_chatbot: str = "http://placekitten.com/200/300"
# The image to display for the user.
img_user: str = "http://placekitten.com/200/300"
# Endpoint to call when a message is sent.
# Endpoint should take a paramter called `message`, which is
# the message sent by the user.
# e.g. def on_send(message: str):
on_send: Optional[Endpoint[OnSendChat]] = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/chat/__init__.py
|
import uuid
from typing import Any, Dict, List, Sequence, Union
from pydantic import BaseModel
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.graph import reactive
class SortCriterion(BaseModel):
id: str
is_enabled: bool
column: str
ascending: bool
source: str = ""
def _format_criteria(
criteria: Sequence[Union[SortCriterion, Dict[str, Any]]]
) -> List[SortCriterion]:
# since the criteria can either be a list of dictionary or of SortCriterion
# we need to convert them to SortCriterion
return [
criterion
if isinstance(criterion, SortCriterion)
else SortCriterion(**criterion)
for criterion in criteria
]
@reactive()
def sort_by_criteria(
data: DataFrame,
criteria: Sequence[Union[SortCriterion, Dict[str, Any]]],
) -> DataFrame:
"""Wrapper around mk.sort that adds unpacking of store to the DAG."""
import meerkat as mk
# Since the criteria can either be a list of dictionary or of SortCriterion
# we need to convert them to SortCriterion
criteria = _format_criteria(criteria)
# Filter out criteria that are disabled.
criteria = [criterion for criterion in criteria if criterion.is_enabled]
if len(criteria) == 0:
return data.view()
sort_by = [criterion.column for criterion in criteria]
ascending = [criterion.ascending for criterion in criteria]
return mk.sort(data, by=sort_by, ascending=ascending)
class Sort(Component):
df: DataFrame
criteria: Union[List[SortCriterion], SortCriterion] = []
title: str = "Sort"
classes: str = ""
def __init__(
self,
df: DataFrame,
*,
criteria: List[SortCriterion] = [],
title: str = "Sort",
classes: str = "",
):
"""This component handles a sort_by list and a sort_order list.
Sorting criteria are maintained in a Store. On change of these
values, the dataframe is sorted.
This component will return a Reference object, which is a sorted
view of the dataframe. The sort operation is out-of-place, so a
new dataframe will be returned as a result of the op.
Args:
df: The dataframe to sort.
criteria: The sorting criteria to use.
"""
super().__init__(df=df, criteria=criteria, title=title, classes=classes)
def __call__(self, df: DataFrame = None) -> DataFrame:
if df is None:
df = self.df
return sort_by_criteria(df, self.criteria)
@staticmethod
@reactive()
def create_criterion(column: str, ascending: bool, source: str = ""):
return SortCriterion(
id=str(uuid.uuid4()),
is_enabled=True,
column=column,
ascending=ascending,
source=source,
)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/sort/__init__.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class Vega(Component):
data: dict
spec: dict
options: dict = {}
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/vega/__init__.py
|
import meerkat as mk
from meerkat.interactive.app.src.lib.component.core.vega import Vega
vega = Vega(
spec={
"$schema": "https://vega.github.io/schema/vega/v5.json",
"width": 600,
"height": 300,
"padding": 5,
"signals": [
{
"name": "tooltip",
"value": {},
"on": [
{"events": "rect:mouseover", "update": "datum"},
{"events": "rect:mouseout", "update": "{}"},
],
}
],
"data": [{"name": "table"}],
"scales": [
{
"name": "xscale",
"type": "band",
"domain": {"data": "table", "field": "category"},
"range": "width",
"padding": 0.05,
"round": True,
},
{
"name": "yscale",
"domain": {"data": "table", "field": "amount"},
"nice": True,
"range": "height",
},
],
"axes": [
{"orient": "bottom", "scale": "xscale"},
{"orient": "left", "scale": "yscale"},
],
"marks": [
{
"type": "rect",
"from": {"data": "table"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "category"},
"width": {"scale": "xscale", "band": 1},
"y": {"scale": "yscale", "field": "amount"},
"y2": {"scale": "yscale", "value": 0},
},
"update": {
"fill": {"value": "steelblue"},
},
"hover": {
"fill": {"value": "red"},
},
},
},
{
"type": "text",
"encode": {
"enter": {
"align": {"value": "center"},
"baseline": {"value": "bottom"},
"fill": {"value": "#333"},
},
"update": {
"x": {
"scale": "xscale",
"signal": "tooltip.category",
"band": 0.5,
},
"y": {
"scale": "yscale",
"signal": "tooltip.amount",
"offset": -2,
},
"text": {"signal": "tooltip.amount"},
"fillOpacity": [
{"test": "datum === tooltip", "value": 0},
{"value": 1},
],
},
},
},
],
},
data={
"table": [
{"category": "A", "amount": 28},
{"category": "B", "amount": 55},
{"category": "CC", "amount": 43},
{"category": "D", "amount": 91},
{"category": "E", "amount": 81},
{"category": "F", "amount": 53},
{"category": "G", "amount": 19},
{"category": "H", "amount": 87},
]
},
)
page = mk.gui.Page(component=vega, id="vega")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/vega/test_vega.py
|
from typing import Any
from meerkat.interactive.app.src.lib.component.abstract import Component
class Put(Component):
data: Any
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/put/__init__.py
|
import ast
from dataclasses import dataclass
from typing import TYPE_CHECKING, Union
import numpy as np
from fastapi import HTTPException
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint, EndpointProperty, endpoint
from meerkat.interactive.event import EventInterface
from meerkat.interactive.graph import Store, reactive
if TYPE_CHECKING:
from meerkat.ops.embed.encoder import Encoder
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
_SUPPORTED_BIN_OPS = {
"Add": lambda x, y: x + y,
"Sub": lambda x, y: x - y,
"Mult": lambda x, y: x * y,
"Div": lambda x, y: x / y,
"Pow": lambda x, y: x**y,
}
_SUPPORTED_CALLS = {
"concat": lambda *args: np.concatenate(args, axis=1),
}
def parse_query(query: str, encoder: Union[str, "Encoder"] = "clip"):
return _parse_query(ast.parse(query, mode="eval").body, encoder=encoder)
def _parse_query(node: ast.AST, encoder: Union[str, "Encoder"]):
import meerkat as mk
if isinstance(node, ast.BinOp):
return _SUPPORTED_BIN_OPS[node.op.__class__.__name__](
_parse_query(node.left, encoder=encoder),
_parse_query(node.right, encoder=encoder),
)
elif isinstance(node, ast.Call):
return _SUPPORTED_CALLS[node.func.id](
*[_parse_query(arg, encoder=encoder) for arg in node.args]
)
elif isinstance(node, ast.Constant):
return mk.embed(
data=mk.column([node.value]),
encoder=encoder,
num_workers=0,
pbar=False,
)
else:
node_repr = node.id if hasattr(node, "id") else node
if isinstance(node_repr, str):
node_repr = f"'{node_repr}'"
raise ValueError(f"Unsupported query {node_repr}")
@endpoint()
def get_match_schema(df: DataFrame):
import meerkat as mk
from meerkat.interactive.api.routers.dataframe import (
SchemaResponse,
_get_column_infos,
)
columns = [
k
for k, v in df.items()
if isinstance(v, mk.TensorColumn) and len(v.shape) == 2
# TODO: We should know the provenance of embeddings and where they came from,
# to explicitly check whether the encoder will match it in size.
]
return SchemaResponse(
id=df.id,
columns=_get_column_infos(df, columns),
nrows=len(df),
)
def _calc_image_query(df: DataFrame, locs: list, against: str):
"""Calculate the negative samples for a match."""
return df.loc[locs][against].mean(axis=0)
@endpoint()
def set_criterion(
df: DataFrame,
query: str,
against: str,
criterion: Store,
positives: list = None,
negatives: list = None,
encoder: Union[str, "Encoder"] = None,
):
"""Match a query string against a DataFrame column.
The `dataframe_id` remains the same as the original request.
"""
if not isinstance(df, DataFrame):
raise HTTPException(
status_code=400, detail="`match` expects a ref containing a dataframe"
)
try:
if not query and not negatives and not positives:
return criterion
query_embedding = 0.0
if query:
query_embedding = parse_query(query, encoder=encoder)
if negatives:
query_embedding = query_embedding - 0.25 * _calc_image_query(
df, negatives, against
)
if positives:
query_embedding = query_embedding + _calc_image_query(
df, positives, against
)
match_criterion = MatchCriterion(
query=query,
against=against,
query_embedding=query_embedding,
name=f"match({against}, {query})",
positives=positives,
negatives=negatives,
)
criterion.set(match_criterion)
if not (criterion.value is None or criterion.against is None):
data_embedding = df[criterion.against]
scores = (data_embedding @ criterion.query_embedding.T).squeeze()
df[criterion.name] = scores
df.set(df)
except Exception as e:
raise e
return criterion
@dataclass
class MatchCriterion:
against: str
query: str
name: str
query_embedding: np.ndarray = None
positives: list = None
negatives: list = None
class OnGetMatchSchemaMatch(EventInterface):
pass
class OnMatchMatch(EventInterface):
criterion: MatchCriterion
_get_match_schema = get_match_schema
class Match(Component):
df: DataFrame
against: str
text: str = ""
title: str = "Match"
enable_selection: bool = False
reset_criterion: bool = False
# TODO: Revisit this, how to deal with endpoint interfaces when there is composition
# and positional arguments
on_match: EndpointProperty[OnMatchMatch] = None
get_match_schema: EndpointProperty[OnGetMatchSchemaMatch] = None
on_clickminus: Endpoint = None
on_unclickminus: Endpoint = None
on_clickplus: Endpoint = None
on_unclickplus: Endpoint = None
on_reset: Endpoint = None
def __init__(
self,
df: DataFrame = None,
*,
against: str,
text: str = "",
encoder: Union[str, "Encoder"] = "clip",
title: str = "Match",
enable_selection: bool = False,
reset_criterion: bool = False,
on_match: EndpointProperty = None,
get_match_schema: EndpointProperty = None,
on_clickminus: Endpoint = None,
on_unclickminus: Endpoint = None,
on_clickplus: Endpoint = None,
on_unclickplus: Endpoint = None,
on_reset: Endpoint = None,
):
"""
Args:
df: The DataFrame.
against: The column to match against.
text: The query text.
encoder: The encoder to use.
title: The title of the component.
enable_selection: Whether to enable selection for image-based matching.
reset_criterion: Whether to reset the criterion when on_reset is called.
on_match: The endpoint to call when the match button is clicked.
This endpoint will be called after ``self.criterion`` is set.
"""
super().__init__(
df=df,
against=against,
text=text,
title=title,
enable_selection=enable_selection,
reset_criterion=reset_criterion,
on_match=on_match,
get_match_schema=get_match_schema,
on_clickminus=on_clickminus,
on_unclickminus=on_unclickminus,
on_clickplus=on_clickplus,
on_unclickplus=on_unclickplus,
on_reset=on_reset,
)
# we do not add the against or the query to the partial, because we don't
# want them to be maintained on the backend
# if they are maintained on the backend, then a store update dispatch will
# run on every key stroke
self.get_match_schema = _get_match_schema.partial(df=self.df)
self._criterion: MatchCriterion = Store(
MatchCriterion(against=None, query=None, name=None),
backend_only=True,
)
self.negative_selection = Store([], backend_only=True)
self.positive_selection = Store([], backend_only=True)
self._mode: Store[
Literal[
"set_negative_selection",
"set_positive_selection",
"",
]
] = Store("")
on_match = set_criterion.partial(
df=self.df,
encoder=encoder,
criterion=self._criterion,
positives=self.positive_selection,
negatives=self.negative_selection,
)
if self.on_match is not None:
on_match = on_match.compose(self.on_match)
self.on_match = on_match
def set_selection(self, selection: Store[list]):
self.external_selection = selection
self.enable_selection.set(True)
self._positive_selection = Store([], backend_only=True)
self._negative_selection = Store([], backend_only=True)
self.on_clickminus = self.on_set_negative_selection.partial(self)
self.on_clickplus = self.on_set_positive_selection.partial(self)
self.on_unclickminus = self.on_unset_negative_selection.partial(self)
self.on_unclickplus = self.on_unset_positive_selection.partial(self)
self.on_reset = self.on_reset_selection.partial(self)
self.on_external_selection_change(self.external_selection)
@endpoint()
def on_reset_selection(self):
"""Reset all the selections."""
self.negative_selection.set([])
self.positive_selection.set([])
self.external_selection.set([])
self._mode.set("")
self._positive_selection.set([])
self._negative_selection.set([])
if self.reset_criterion:
self._criterion.set(MatchCriterion(against=None, query=None, name=None))
@reactive()
def on_external_selection_change(self, external_selection):
if self._mode == "set_negative_selection":
self.negative_selection.set(external_selection)
elif self._mode == "set_positive_selection":
self.positive_selection.set(external_selection)
@endpoint()
def on_set_negative_selection(self):
if self._mode == "set_positive_selection":
self._positive_selection.set(self.external_selection.value)
self._mode.set("set_negative_selection")
self.external_selection.set(self._negative_selection.value)
@endpoint()
def on_unset_negative_selection(self):
self._negative_selection.set(self.external_selection.value)
self._mode.set("")
self.external_selection.set([])
@endpoint()
def on_set_positive_selection(self):
if self._mode == "set_negative_selection":
self._negative_selection.set(self.external_selection.value)
self._mode.set("set_positive_selection")
self.external_selection.set(self._positive_selection.value)
@endpoint()
def on_unset_positive_selection(self):
self._positive_selection.set(self.external_selection.value)
self._mode.set("")
self.external_selection.set([])
@property
def criterion(self) -> MatchCriterion:
return self._criterion
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/match/__init__.py
|
from typing import Optional
from pydantic import validator
from meerkat.interactive.app.src.lib.component.abstract import Component
class Markdown(Component):
body: str
classes: str = ""
base_url: Optional[str] = None
breaks: bool = True
gfm: bool = True
header_ids: bool = True
header_prefix: str = ""
lang_prefix: str = "language-"
mangle: bool = True
pedantic: bool = False
sanitize: bool = False
silent: bool = False
smartypants: bool = False
xhtml: bool = False
def __init__(
self,
body: str,
*,
classes: str = "",
base_url: Optional[str] = None,
breaks: bool = True,
gfm: bool = True,
header_ids: bool = True,
header_prefix: str = "",
lang_prefix: str = "language-",
mangle: bool = True,
pedantic: bool = False,
sanitize: bool = False,
silent: bool = False,
smartypants: bool = False,
xhtml: bool = False,
):
"""Render markdown with GitHub Flavored Markdown (GFM) syntax.
The implementation of this component uses the \
marked.js library (https://github.com/markedjs/marked).
Argument descriptions below are taken directly from the
marked.js documentation.
Args:
body: The markdown data to render.
classes: The Tailwind classes to apply to the component,
see @tailwindcss/typography for the classes that are
specifically available to style this component.
base_url: The base URL to use for relative links.
breaks: If true, add <br> on a single line break (copies
GitHub behavior on comments, but not on rendered markdown files).
Requires gfm be true.
gfm: If true, use approved GitHub Flavored Markdown (GFM) specification.
header_ids: If true, include an id attribute when emitting headings
(h1, h2, h3, etc).
header_prefix: A string to prefix the id attribute when emitting
headings (h1, h2, h3, etc).
lang_prefix: A string to prefix the className in a <code> block.
Useful for syntax highlighting.
mangle: If true, autolinked email address is escaped with HTML
character references.
pedantic: If true, conform to the original markdown.pl as much
as possible. Don't fix original markdown bugs or behavior.
Turns off and overrides gfm.
sanitize: If true, sanitize the HTML passed into markdownString
with the sanitizer function.
silent: If true, the parser does not throw any exception.
smartypants: If true, use "smart" typographic punctuation for
things like quotes and dashes.
xhtml: If true, emit self-closing HTML tags for void elements
(<br/>, <img/>, etc.) with a "/" as required by XHTML.
"""
super().__init__(
body=body,
classes=classes,
base_url=base_url,
breaks=breaks,
gfm=gfm,
header_ids=header_ids,
header_prefix=header_prefix,
lang_prefix=lang_prefix,
mangle=mangle,
pedantic=pedantic,
sanitize=sanitize,
silent=silent,
smartypants=smartypants,
xhtml=xhtml,
)
class Title(Markdown):
"""Display title text.
Use this component for the main title of a page. This will place the
text in an `<h1>` tag.
"""
def __init__(self, text: str, *, classes: str = "", **kwargs):
super().__init__(body=text, classes=classes, **kwargs)
@validator("body", pre=True)
def make_title(cls, v):
return f"# {v}"
class Header(Markdown):
"""Display header text.
Use this component for the main header of a section. This will place
the text in an `<h2>` tag.
"""
def __init__(self, text: str, *, classes: str = "", **kwargs):
super().__init__(body=text, classes=classes, **kwargs)
@validator("body", pre=True)
def make_header(cls, v):
return f"## {v}"
class Subheader(Markdown):
"""Display subheader text.
Use this component for the subheader of a section. This will place
the text in an `<h3>` tag.
"""
def __init__(self, text: str, *, classes: str = "", **kwargs):
super().__init__(body=text, classes=classes, **kwargs)
@validator("body", pre=True)
def make_subheader(cls, v):
return f"### {v}"
class Caption(Markdown):
"""Display caption text in a smaller, gray font size.
Use this component for explanatory text that is not the main body of a section.
This will place the text in a `<p>` tag.
Default Tailwind classes:
text-sm text-gray-400
"""
def __init__(self, text: str, *, classes: str = "", **kwargs):
super().__init__(body=text, classes=classes, **kwargs)
@validator("classes", pre=True, always=True)
def add_classes(cls, v):
classes = "text-sm text-gray-400"
return classes if v is None else f"{v} {classes}"
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/markdown/__init__.py
|
import meerkat as mk
title = mk.gui.core.Title(body="Title")
header = mk.gui.core.Header(body="Header")
subheader = mk.gui.core.Subheader(body="Subheader")
caption = mk.gui.core.Caption(body="Caption")
markdown = mk.gui.core.Markdown(
body="""
# Hello world
This is a markdown component.
We can show bold text like this: **bold**.
We can show italic text like this: *italic*.
We can show code like this: `code`.
We can show a link like this: [link](https://meerkat.ml).
We can show a list like this:
- item 1
- item 2
- item 3
We can show a table like this:
| Column 1 | Column 2 | Column 3 |
|----------|----------|----------|
| item 1 | item 2 | item 3 |
| item 3 | item 4 | item 5 |
Add a Python code block:
```python
import meerkat as mk
```
Add a JavaScript code block:
```javascript
console.log("Hello world");
```
""",
classes="prose prose-sm",
breaks=True,
)
component = mk.gui.html.div(slots=[
mk.gui.html.div(slots=[title, header, subheader, caption]),
mk.gui.html.div(slots=[markdown], classes="mt-8"),
])
page = mk.gui.Page(component=component, id="markdown")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/markdown/test_markdown.py
|
from typing import Callable, Dict, List, Union
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import BaseComponent
from meerkat.interactive.graph import make_store
from meerkat.mixins.identifiable import IdentifiableMixin
from meerkat.ops.sliceby.sliceby import SliceBy
class Aggregation(IdentifiableMixin):
_self_identifiable_group: str = "aggregations"
def __init__(self, func: Callable[["DataFrame"], Union[int, float, str]]):
self.func = func
super().__init__()
def __call__(self, df: "DataFrame") -> Union[int, float, str]:
return self.func(df)
@property
def config(self):
return {
"id": self.id,
}
class SliceByCards(BaseComponent): # FIXME: update this component
def __init__(
self,
sliceby: SliceBy,
main_column: str,
tag_columns: List[str] = None,
aggregations: Dict[str, Callable[["DataFrame"], Union[int, float, str]]] = None,
df: DataFrame = None,
) -> None:
super().__init__()
self.sliceby = sliceby
if df is None:
df = self.sliceby.obj.data
else:
assert self.sliceby.obj.data is df
self.df = df
if aggregations is None:
aggregations = {}
self.aggregations = {k: Aggregation(v) for k, v in aggregations.items()}
self.main_column = make_store(main_column)
if tag_columns is None:
tag_columns = []
self.tag_columns = make_store(tag_columns)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/slicebycards/__init__.py
|
from typing import List
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.formatter.base import register_placeholder
class Gallery(Component):
df: DataFrame
main_column: str
tag_columns: List[str] = []
selected: List[str] = []
allow_selection: bool = False
cell_size: int = 24
per_page: int = 20
def __init__(
self,
df: DataFrame,
*,
main_column: str = None,
tag_columns: List[str] = None,
selected: List[int] = [],
allow_selection: bool = False,
cell_size: int = 24,
per_page: int = 20,
page: int = 0,
):
"""Gallery view of a DataFrame.
Args:
df (DataFrame): The DataFrame to display.
main_column (str): The column to display in the main gallery view.
tag_columns (List[str], optional): The columns to display as tags.
Defaults to [].
selected (List[int], optional): The indices of the rows selected in the
gallery. Useful for labeling and other tasks. Defaults to [].
allow_selection (bool, optional): Whether to allow the user to select
rows. Defaults to False.
per_page (int, optional): The number of rows to display per page.
"""
if main_column is None:
main_column = df.columns[0]
if tag_columns is None:
tag_columns = []
super().__init__(
df=df,
main_column=main_column,
tag_columns=tag_columns,
selected=selected,
allow_selection=allow_selection,
cell_size=cell_size,
per_page=per_page,
page=page,
)
def _get_ipython_height(self):
return "600px"
register_placeholder(
name="gallery",
fallbacks=["thumbnail"],
description="Formatter to be used in a gallery view.",
)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/gallery/__init__.py
|
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint, EndpointProperty, endpoint
from meerkat.interactive.event import EventInterface
from meerkat.interactive.graph import Store, reactive
from meerkat.interactive.graph.marking import mark
@reactive()
def run_code_cell(df: DataFrame, code: str):
df = df.view() # this is needed to avoid cycles in simple df case
lines = code.split("\n")
_locals = locals()
exec("\n".join(lines[:-1]), {}, _locals)
return eval(lines[-1], {}, _locals)
@reactive()
def run_filter_code_cell(df: DataFrame, code: str):
df = df.view() # this is needed to avoid cycles in simple df case
_locals = locals()
exec(code, {}, _locals)
return eval("df[df.map(condition)]", {}, _locals)
@endpoint()
def base_on_run(code: Store[str], new_code: str):
# TODO: there is some checks we can do here,
# before setting (e.g. empty string, syntax checks)
code.set(new_code)
return code
class OnRunCodeCell(EventInterface):
new_code: str
class CodeCell(Component):
code: str = ""
on_run: EndpointProperty[OnRunCodeCell] = None
def __init__(self, code: str = "df", on_run: Endpoint = None):
code = mark(code)
if on_run is not None:
on_run = base_on_run.partial(code=code).compose(on_run)
else:
on_run = base_on_run.partial(code=code)
super().__init__(on_run=on_run, code=code)
def __call__(self, df: DataFrame):
out = run_code_cell(df, self.code)
if not isinstance(out, DataFrame):
raise ValueError("The code must return a DataFrame.")
return out
class FilterCodeCell(CodeCell):
def __init__(self, df: DataFrame, code: str = None, on_run: Endpoint = None):
code = f"def condition({', '.join(map(lambda x: x.value, df.columns[:4]))})"
":\n return True"
super().__init__(code=code, on_run=on_run)
def __call__(self, df: DataFrame):
out = run_filter_code_cell(df, self.code)
if not isinstance(out, DataFrame):
raise ValueError("The code must return a DataFrame.")
return out
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/code_cell/__init__.py
|
from typing import Optional
from meerkat.interactive.app.src.lib.component.abstract import Component, Slottable
from meerkat.interactive.endpoint import Endpoint
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
class Checkbox(Slottable, Component):
checked: bool = False
disabled: bool = False
color: Literal[
"blue",
"red",
"green",
"purple",
"teal",
"yellow",
"orange",
] = "purple"
classes: str = ""
on_change: Optional[Endpoint] = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/checkbox/__init__.py
|
import meerkat as mk
@mk.endpoint()
def on_change(checked: bool):
print(checked, flush=True)
checkbox = mk.gui.core.Checkbox(
slots=[mk.gui.html.div(slots=["Checkbox"], classes="text-purple-500")],
checked=True,
classes="bg-violet-50 p-2 rounded-lg w-fit",
)
checkbox.on_change = on_change.partial(checked=checkbox.checked)
page = mk.gui.Page(component=checkbox, id="checkbox")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/checkbox/test_checkbox.py
|
import meerkat as mk
code_py = mk.gui.core.Code(
body="""\
from meerkat.interactive import Page, endpoint
from meerkat.interactive.app.src.lib.component.codedisplay import CodeDisplay
"""
)
page = mk.gui.Page(component=code_py, id="code")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/code/test_code.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class Code(Component):
body: str
theme: str = "okaidia"
background: str = "bg-slate-800"
language: str = "python"
classes: str = ""
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/code/__init__.py
|
import functools
import meerkat as mk
from meerkat.interactive import Page
from meerkat.interactive.app.src.lib.component.core.pdf import PDF
rawhtml = functools.partial(PDF, data="https://arxiv.org/pdf/0704.0001.pdf")
rawhtml_full = rawhtml(view="full")
component = mk.gui.html.div(
slots=[
rawhtml_full,
]
)
page = Page(component=component, id="rawhtml")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/pdf/test_pdf.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class PDF(Component):
data: str
classes: str = ""
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/pdf/__init__.py
|
import meerkat as mk
@mk.endpoint()
def on_change(value):
print("on_change", value)
slider = mk.gui.core.Slider(
min=-2.0,
max=2.0,
step=0.01,
on_change=on_change,
)
page = mk.gui.Page(component=slider, id="slider")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/slider/test_slider.py
|
from typing import Any, Optional, Union
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.event import EventInterface
class OnChangeSlider(EventInterface):
value: Any
class Slider(Component):
"""A slider that allows the user to select a value from a range.
Args:
value: The current value of the slider.
min: The minimum value of the slider.
max: The maximum value of the slider.
step: The step size of the slider.
disabled: Whether the slider is disabled.
classes: The Tailwind classes to apply to the component.
"""
value: Union[float, int] = 0.0
min: Union[float, int] = 0.0
max: Union[float, int] = 100.0
step: Union[float, int] = 1.0
disabled: bool = False
classes: str = "bg-violet-50 px-4 py-1 rounded-lg"
on_change: Optional[Endpoint[OnChangeSlider]] = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/slider/__init__.py
|
from typing import Any, List, Optional
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import EndpointProperty
from meerkat.interactive.event import EventInterface
class OnFetchInterface(EventInterface):
"""The interface for the get_data endpoint."""
df: DataFrame
column: str
index: int
dim: Optional[int] = None
type: Optional[str] = None
class MedicalImage(Component):
"""A component for displaying medical images.
Args:
data: An array of base64 encoded images.
classes: A string of classes to apply to the component.
show_toolbar: Whether to show the toolbar.
on_fetch: An endpoint to call when the component needs to fetch data.
"""
data: List[str]
classes: str = ""
show_toolbar: bool = False
dim: int
segmentation_column: str = ""
# A function to call to encode the data.
# This should be a variant of the MedicalImage.encode method.
on_fetch: EndpointProperty[OnFetchInterface]
# We need to declare this here to enable the dynamic component
# wrapper forwarding.
# TODO: Add this to a generic CellComponent class.
cell_info: Any = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/medimage/__init__.py
|
from typing import Optional
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
class Document(Component):
df: DataFrame
text_column: str
paragraph_column: Optional[str] = None
label_column: Optional[str] = None
id_column: Optional[str] = None
@classmethod
def events(cls):
return ["label"]
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/document/__init__.py
|
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
class Carousel(Component):
df: DataFrame
main_column: str
def __init__(self, df: DataFrame, *, main_column: str):
super().__init__(df=df, main_column=main_column)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/carousel/__init__.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class Image(Component):
data: str
classes: str = ""
enable_zoom: bool = False
enable_pan: bool = False
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/image/__init__.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class CopyButton(Component):
value: str
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/copy_button/__init__.py
|
from typing import Any
from meerkat.interactive.app.src.lib.component.abstract import Component
class Number(Component):
data: Any
dtype: str = "auto"
precision: int = 3
percentage: bool = False
classes: str = ""
editable: bool = False
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/number/__init__.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.event import EventInterface
class OnChangeToggle(EventInterface):
value: bool
class Toggle(Component):
value: bool = False
on_change: Endpoint[OnChangeToggle] = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/toggle/__init__.py
|
from typing import Any
from meerkat.interactive import Page, endpoint, print
from meerkat.interactive.app.src.lib.component.core.toggle import Toggle
@endpoint
def on_change(value: Any):
# Must use flush=True to flush the stdout buffer
print(f"Toggled {value}", flush=True)
toggle = Toggle(
on_change=on_change,
)
print(toggle.value)
page = Page(component=toggle, id="toggle")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/toggle/test_toggle.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class Audio(Component):
data: str
classes: str = ""
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/audio/__init__.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class Json(Component):
"""Render a JSON object as a collapsible tree.
Args:
body (dict): The JSON object to render, as a Python dictionary.
padding (float): Left padding applied to each level of the tree.
classes (str): The Tailwind classes to apply to the component.
"""
body: dict
padding: float = 2.0
classes: str = ""
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/json/__init__.py
|
import json
import meerkat as mk
component = mk.gui.core.Json(
body=json.loads("""\
{
"name": "John Doe",
"age": 43,
"children": [
{
"name": "Sally",
"age": 13
},
{
"name": "Billy",
"age": 8
}
]
}
"""),
padding=2,
)
page = mk.gui.Page(component=component, id="json")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/json/test_json.py
|
from meerkat.interactive import Page, endpoint
from meerkat.interactive.app.src.lib.component.core.button import Button
@endpoint
def on_click():
# Must use flush=True to flush the stdout buffer
print("Button clicked", flush=True)
button = Button(
title="Button",
on_click=on_click,
)
page = Page(component=button, id="button")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/button/test_button.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import EndpointProperty
from meerkat.interactive.event import EventInterface
class OnClickButton(EventInterface):
pass
class Button(Component):
title: str
icon: str = None
classes: str = "bg-slate-100 py-1 rounded-md flex flex-col hover:bg-slate-200"
on_click: EndpointProperty[EventInterface] = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/button/__init__.py
|
from typing import Any, List, Union
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import EndpointProperty, endpoint
from meerkat.interactive.event import EventInterface
from meerkat.interactive.formatter.base import register_placeholder
class OnEditInterface(EventInterface):
"""Defines the interface for an event.
Subclass this to define the interface for a new event type.
The class will specify the keyword arguments returned by an event from the
frontend to any endpoint that has subscribed to it.
All endpoints that are expected to receive an event of this type should
ensure they have a signature that matches the keyword arguments defined
in this class.
"""
column: str
keyidx: Union[int, str]
posidx: int
value: Any
class OnSelectTable(EventInterface):
selected: List[Any]
@endpoint
def edit(df: DataFrame, column: str, keyidx: Union[int, str], posidx: int, value: Any):
df.loc[keyidx, column] = value
class Table(Component):
df: DataFrame
selected: List[str] = []
single_select: bool = False
classes: str = "h-fit"
on_edit: EndpointProperty[OnEditInterface] = None
on_select: EndpointProperty[OnSelectTable] = None
def __init__(
self,
df: DataFrame,
*,
selected: List[int] = [],
single_select: bool = False,
classes: str = "h-fit",
on_edit: EndpointProperty = None,
on_select: EndpointProperty = None
):
"""Table view of a DataFrame.
Args:
df (DataFrame): The DataFrame to display.
selected (List[int], optional): The indices of the rows selected in the \
gallery. Useful for labeling and other tasks. Defaults to [].
allow_selection (bool, optional): Whether to allow the user to select \
rows. Defaults to False.
single_select: Whether to allow the user to select only one row.
"""
if on_edit is None:
on_edit = edit.partial(df=df)
super().__init__(
df=df,
selected=selected,
single_select=single_select,
classes=classes,
on_edit=on_edit,
on_select=on_select,
)
def _get_ipython_height(self):
return "600px"
register_placeholder(
name="table",
fallbacks=["tiny"],
description="Formatter to be used in a gallery view.",
)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/table/__init__.py
|
import base64
import io
from typing import Any, Dict, Hashable, List, Optional, Sequence, Tuple, Union
import numpy as np
from PIL import Image as PILImage
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import EndpointProperty, endpoint
from meerkat.interactive.event import EventInterface
from meerkat.interactive.graph.reactivity import reactive
from meerkat.interactive.graph.store import Store
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
ColorCode = Union[str, Tuple[int, int, int]]
class SelectInterface(EventInterface):
x: float
y: float
click_type: Literal["single", "double", "right"]
class AddCategoryInterface(EventInterface):
category: str
class ColorChangeEvent(EventInterface):
category: Hashable
color: str # the hex code
class AddBoxInterface(EventInterface):
box: Any
class AddPointInterface(EventInterface):
point: Tuple[float, float]
Segmentation = Union[np.ndarray, str]
BoundingBox = Union[np.ndarray, Tuple[float]]
class ImageAnnotator(Component):
data: Union[np.ndarray, PILImage.Image, str]
categories: Optional[Union[List, Dict[Hashable, ColorCode]]] = None
segmentations: Optional[Sequence[Tuple[Segmentation, str]]] = None
points: Optional[Sequence[Dict]] = None
boxes: Optional[Sequence[Dict]] = None
opacity: float = 0.85
selected_category: str = ""
# TODO: Parameters to add
# boxes: Bounding boxes to draw on the image.
# polygons: Polygons to draw on the image.
on_add_category: EndpointProperty[AddCategoryInterface] = None
on_add_box: EndpointProperty[AddBoxInterface] = None
on_add_point: EndpointProperty[AddPointInterface] = None
def __init__(
self,
data,
*,
categories=None,
segmentations=None,
points=None,
boxes=None,
opacity: float = 0.85,
selected_category: str = "",
on_add_category: EndpointProperty[AddCategoryInterface] = None,
on_add_box: EndpointProperty[AddBoxInterface] = None,
on_add_point: EndpointProperty[AddPointInterface] = None,
):
"""
Args:
data: The base image.
Strings must be base64 encoded or a filepath to the image.
categories: The categories in the image. These categories will be used
for all annotations. Can either be a list of category names, a
dictionary mapping category names to colors, or a DataFrame
with two columns ("name" and "color").
segmentations: A list of (mask, category) tuples.
opacity: The initial opacity of the segmentation masks.
on_select: An endpoint to call when the user clicks on the image.
"""
if points is None:
points = []
if boxes is None:
boxes = []
if categories is None:
categories = [category for _, category in self.segmentations]
if isinstance(categories, (tuple, list)):
categories = dict(zip(categories, generate_random_colors(len(categories))))
super().__init__(
data=data,
categories=categories,
segmentations=segmentations,
points=points,
boxes=boxes,
opacity=opacity,
selected_category=selected_category,
on_add_category=on_add_category,
on_add_box=on_add_box,
on_add_point=on_add_point,
)
self.data = self.prepare_data(self.data)
categories = self.prepare_categories(self.categories)
self.segmentations = colorize_segmentations(self.segmentations, categories)
# At some point get rid of this and see if we can pass colorized segmentations.
self.segmentations = encode_segmentations(self.segmentations)
# Initialize endpoints
self.on_add_category = self._add_category.partial(self)
# self.on_clear_annotations = self.clear_annotations.partial(self)
@reactive()
def prepare_data(self, data):
if isinstance(data, str):
return str(data)
from meerkat.interactive.formatter.image import ImageFormatter
# TODO: Intelligently pick what the mode should be.
return ImageFormatter().encode(data, mode="RGB")
@reactive()
def prepare_categories(self, categories):
# Convert hex colors (if necessary).
# This line also creates a shallow copy of the dictionary,
# which is necessary to avoid mutating the original dictionary
# (required for reactive functions).
categories = {
k: _from_hex(v) if isinstance(v, str) else v for k, v in categories.items()
}
# Make sure all colors are in RGBA format.
for k in categories:
if len(categories[k]) == 3:
categories[k] = np.asarray(tuple(categories[k]) + (255,))
return categories
@endpoint()
def _add_category(self, category):
if category not in self.categories:
self.categories[category] = generate_random_colors(1)[0]
self.categories.set(self.categories)
def clear_annotations(self, annotation_type: Optional[str] = None):
self.points.set([])
self.segmentations.set([])
# @endpoint()
# def on_color_change(self, category: Hashable, color: ColorCode):
# self.categories[category] = _fromcolor
@reactive()
def colorize_segmentations(segmentations, categories: Dict[Hashable, np.ndarray]):
"""Colorize the segmentation masks.
We assume segmentations are in the form of (array, category) tuples.
``categories`` is a dictionary mapping categories to RGB colors.
Returns:
A list of RGBA numpy arrays - shape: (H, W, 4).
"""
if segmentations is None:
return None
return Store(
[
(_colorize_mask(segmentation, categories[name]), name)
for segmentation, name in segmentations
],
backend_only=True,
)
@reactive()
def encode_segmentations(segmentations):
"""Encode the segmentation masks as base64 strings.
We assume segmentations are in the form of (array, category) tuples.
Returns:
A list of (base64 string, category) tuples.
"""
if segmentations is None:
return None
return [(_encode_mask(segmentation), name) for segmentation, name in segmentations]
def _colorize_mask(mask, color):
# TODO: Add support for torch tensors.
color_mask = np.zeros(mask.shape + (4,), dtype=np.uint8)
if len(color) == 3:
color = np.asarray(tuple(color) + (255,))
if not isinstance(color, np.ndarray):
color = np.asarray(color)
color_mask[mask] = color
return color_mask
def _encode_mask(colored_mask):
"""Encode a colored mask as a base64 string."""
ftype = "png"
colored_mask = PILImage.fromarray(colored_mask, mode="RGBA")
with io.BytesIO() as buffer:
colored_mask.save(buffer, format=ftype)
return "data:image/{ftype};base64,{im_base_64}".format(
ftype=ftype, im_base_64=base64.b64encode(buffer.getvalue()).decode()
)
def _from_hex(color: str):
"""Convert a hex color to an RGB tuple."""
color = color.lstrip("#")
if len(color) % 2 != 0:
raise ValueError("Hex color must have an even number of digits.")
return np.asarray(
int(color[i * 2 : (i + 1) + 2], 16) for i in range(len(color) // 2)
)
def generate_random_colors(n: int):
"""Generate ``n`` random colors.
Args:
n: The number of colors to generate.
Returns:
A list of ``n`` random uint8 colors in RGBA format.
"""
out = np.random.randint(0, 255, (n, 3), dtype=np.uint8)
out = np.concatenate((out, np.full((n, 1), 255, dtype=np.uint8)), axis=1)
return out
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/image_annotator/__init__.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class RawHTML(Component):
html: str
view: str = "full"
sanitize: bool = True
classes: bool = "rounded-md shadow-md"
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/raw_html/__init__.py
|
import functools
import meerkat as mk
from meerkat.interactive import Page
from meerkat.interactive.app.src.lib.component.core.raw_html import RawHTML
rawhtml = functools.partial(
RawHTML,
html="""\
<div>
<p>Some text</p>
<p>Some more text</p>
</div>
<div>
<p>Some text</p>
</div>
"""
)
rawhtml_full = rawhtml(view="full")
rawhtml_thumbnail = rawhtml(view="thumbnail")
rawhtml_logo = rawhtml(view="logo")
component = mk.gui.html.div(slots=[
rawhtml_full,
rawhtml_thumbnail,
rawhtml_logo,
])
page = Page(component=component, id="rawhtml")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/raw_html/test_rawhtml.py
|
from typing import Dict, List
from pydantic import BaseModel
from meerkat.interactive.app.src.lib.component.abstract import Component
class TensorInfo(BaseModel):
data: List
shape: Dict[str, int]
dtype: str
class Tensor(Component):
data: TensorInfo
dtype: str
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/tensor/__init__.py
|
from typing import Optional, Union
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.event import EventInterface
class OnBlurNumberInput(EventInterface):
value: int
class OnKeyEnterNumberInput(EventInterface):
value: int
class NumberInput(Component):
value: Union[int, float]
placeholder: str = "Enter a number..."
debounceTimer: int = 150
classes: str = "grow h-10 px-3 rounded-md shadow-md my-1 border-gray-400"
on_blur: Optional[Endpoint[OnBlurNumberInput]] = None
on_keyenter: Optional[Endpoint[OnKeyEnterNumberInput]] = None
def __init__(
self,
value: Union[int, float],
*,
placeholder: str = "Enter a number...",
debounceTimer: int = 150,
classes: str = "grow h-10 px-3 rounded-md shadow-md my-1 border-gray-400",
on_blur: Optional[Endpoint[OnBlurNumberInput]] = None,
on_keyenter: Optional[Endpoint[OnKeyEnterNumberInput]] = None,
):
"""An input field that can be used to get a numeric input from the
user.
Attributes:
value: The value in the input field.
placeholder: The placeholder text.
debounce_timer: The debounce timer in milliseconds.
on_blur: The endpoint to call when the input field loses focus.
on_enter: The endpoint to call when the user presses enter.
"""
super().__init__(
value=value,
placeholder=placeholder,
debounceTimer=debounceTimer,
classes=classes,
on_blur=on_blur,
on_keyenter=on_keyenter,
)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/numberinput/__init__.py
|
from meerkat.interactive import Page, print
from meerkat.interactive.app.src.lib.component.core.numberinput import NumberInput
numberinput = NumberInput(value=1.3)
print(numberinput.value)
page = Page(component=numberinput, id="numberinput")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/numberinput/test_numberinput.py
|
from meerkat.interactive.app.src.lib.component.abstract import Component
class Text(Component):
data: str
classes: str = ""
editable: bool = False
def __init__(
self,
data: str,
*,
classes: str = "",
editable: bool = False,
):
"""Display text.
Args:
data: The text to display.
editable: Whether the text is editable.
"""
# "whitespace-nowrap text-ellipsis overflow-hidden text-right "
super().__init__(
classes=classes,
data=data,
editable=editable,
)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/text/__init__.py
|
import meerkat as mk
from meerkat.interactive import Page, print
from meerkat.interactive.app.src.lib.component.core.text import Text
text_1 = Text(
data="Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
"Sed euismod, nisl vitae ultricies lacinia, nisl nisl aliquam nisl, "
"vitae aliquam nisl nisl sit amet nisl. Nulla",
)
text_2 = Text(
data="Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
"Sed euismod, nisl vitae ultricies lacinia, nisl nisl aliquam nisl, "
"vitae aliquam nisl nisl sit amet nisl. Nulla",
editable=True,
)
text_3 = Text(
data="Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
"Sed euismod, nisl vitae ultricies lacinia, nisl nisl aliquam nisl, "
"vitae aliquam nisl nisl sit amet nisl. Nulla",
view="wrapped",
)
print(text_1.data)
print(text_2.data)
component = mk.gui.html.flexcol(
[text_1, text_2, text_3]
)
page = Page(component=component, id="text")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/text/test_text.py
|
import meerkat as mk
@mk.endpoint()
def on_change(value):
print("on_change", value, flush=True)
select.labels.set([1, 2, 3, 4, 5])
select = mk.gui.core.Select(
values=[1, 2, 3, 4, 5],
labels=["one", "two", "three", "four", "five"],
value=3,
on_change=on_change,
)
select_no_labels = mk.gui.core.Select(
values=[1, 2, 3, 4, 5],
value=3,
on_change=on_change,
)
component = mk.gui.html.div(
slots=[select, select_no_labels],
)
page = mk.gui.Page(component=component, id="select")
page.launch()
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/select/test_select.py
|
from typing import Any, List, Optional, Union
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.event import EventInterface
class OnChangeSelect(EventInterface):
value: Union[str, int, float, bool, None]
class Select(Component):
"""A selection dropdown that can be used to select a single value from a
list of options.
Args:
values (List[Any]): A list of values to select from.
labels (List[str]): A list of labels to display for each value.
value (Any): The selected value.
disabled (bool): Whether the select is disabled.
classes (str): The Tailwind classes to apply to the select.
on_change: The `Endpoint` to call when the selected value changes. \
It must have the following signature:
`(value: Union[str, int, float, bool, None])`
with
value (Union[str, int, float, bool, None]): The value of the \
selected radio button.
"""
values: List[Any]
labels: List[str] = None
value: Any = None
disabled: bool = False
classes: str = ""
on_change: Optional[Endpoint[OnChangeSelect]] = None
def __init__(
self,
values: List[Any],
*,
labels: List[str] = None,
value: Any = None,
disabled: bool = False,
classes: str = "",
on_change: Optional[Endpoint[OnChangeSelect]] = None,
):
if labels is None:
labels = values
super().__init__(
values=values,
labels=labels,
value=value,
disabled=disabled,
classes=classes,
on_change=on_change,
)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/select/__init__.py
|
from typing import Any, Dict, List, Optional, Sequence, Union
import numpy as np
import pandas as pd
from pydantic import BaseModel, Field
from meerkat.columns.abstract import Column
from meerkat.columns.scalar import ScalarColumn
from meerkat.dataframe import DataFrame
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.graph import Store, reactive
def filter_by_operator(*args, **kwargs):
raise NotImplementedError()
def _in(column: Column, value):
if not isinstance(value, (tuple, list)):
value = [value]
if isinstance(column, Column):
column = column.data
if not isinstance(column, pd.Series):
data = pd.Series(column.data)
else:
data = column
return data.isin(value)
def _contains(column: Column, value):
if not isinstance(column[0], str):
raise ValueError(
"contains operator only works on string columns. "
f"Got column {type(column)} with value {type(column[0])}."
)
if isinstance(value, (list, tuple)):
assert len(value) == 1
value = value[0]
return column.str.contains(str(value))
# Map a string operator to a function that takes a value and returns a boolean.
_operator_str_to_func = {
"==": lambda x, y: x == y, # equality
"!=": lambda x, y: x != y, # inequality
">": lambda x, y: x > y, # greater than
"<": lambda x, y: x < y, # less than
">=": lambda x, y: x >= y, # greater than or equal to
"<=": lambda x, y: x <= y, # less than or equal to
"in": _in,
"not in": lambda x, y: ~_in(x, y),
"contains": _contains,
}
class FilterCriterion(BaseModel):
is_enabled: bool
column: str
op: str
value: Any
source: Optional[str] = ""
is_fixed: bool = False
def parse_filter_criterion(criterion: str) -> Dict[str, Any]:
"""Parse the filter criterion from the string.
Args:
criterion: The string representation of the criterion.
Examples: "label == data"
Returns:
Dict[str, Any]: The column, op, and value dicts required
to construct the FilterCriterion.
"""
# Parse all the longer op keys first.
# This is to avoid split on a shorter key that could be a substring of a larger key.
operators = sorted(_operator_str_to_func.keys(), key=lambda x: len(x), reverse=True)
column = None
value = None
for op in operators:
if op not in criterion:
continue
candidates = criterion.split(op)
if len(candidates) != 2:
raise ValueError(
"Expected format: <column> <op> <value> (e.g. 'label == car')."
)
column, value = tuple(candidates)
value = value.strip()
if "," in value:
value = [x.strip() for x in value.split(",")]
return dict(column=column.strip(), value=value, op=op)
return None
# raise ValueError(f"Could not find any operation in the string {criterion}")
def _format_criteria(
criteria: List[Union[FilterCriterion, Dict[str, Any]]]
) -> List[FilterCriterion]:
# since the criteria can either be a list of dictionary or of FilterCriterion
# we need to convert them to FilterCriterion
return [
criterion
if isinstance(criterion, FilterCriterion)
else FilterCriterion(**criterion)
for criterion in criteria
]
@reactive()
def filter(
data: Union["DataFrame", "Column"],
criteria: Sequence[Union[FilterCriterion, Dict[str, Any]]],
):
"""Filter data based on operations.
This operation adds q columns to the dataframe where q is the number of queries.
Note, if data is a dataframe, this operation is performed in-place.
TODO (arjundd): Filter numpy and pandas columns first because of speed.
Args:
data: A dataframe or column containing the data to embed.
query: A single or multiple query strings to match against.
input: If ``data`` is a dataframe, the name of the column
to embed. If ``data`` is a column, then the parameter is ignored.
Defaults to None.
input_modality: The input modality. If None, infer from the input column.
query_modality: The query modality. If None, infer from the query column.
return_column_names: Whether to return the names of columns added based
on match.
Returns:
mk.DataFrame: A view of ``data`` with a new column containing the embeddings.
This column will be named according to the ``out_col`` parameter.
"""
# since the criteria can either be a list of dictionary or of FilterCriterion
# we need to convert them to FilterCriterion
criteria = _format_criteria(criteria)
# Filter out criteria that are disabled.
criteria = [criterion for criterion in criteria if criterion.is_enabled]
if len(criteria) == 0:
# we view so that the result is a different dataframe than the input
return data.view()
# Filter pandas series columns.c
# TODO (arjundd): Make this more efficient to perform filtering sequentially.
all_masks = []
for criterion in criteria:
col = data[criterion.column]
# values should be split by "," when using in/not-in operators.
if "in" in criterion.op:
if isinstance(criterion.value, str):
value = [x.strip() for x in criterion.value.split(",")]
elif isinstance(criterion.value, list):
value = criterion.value
else:
raise ValueError(
"Expected a list or comma-separated string "
f"for value {criterion.value}."
)
else:
if col.dtype in (np.bool_, bool):
value = criterion.value not in ("False", "false", "0")
else:
value = col.dtype.type(criterion.value)
# Remove trailing and leading "" if the value is a string.
if isinstance(value, str):
value = value.strip('"').strip("'")
# TODO: this logic will fail when the column is a boolean column
# beacuse all values will be rendered as strings. If the string
# is not empty, col.dtype will cast the string to True.
# e.g. np.asarray("False", dtype=np.bool) --> True
if isinstance(col, ScalarColumn):
value = np.asarray(value, dtype=col.dtype)
if "in" in criterion.op:
value = value.tolist()
# FIXME: Figure out why we cannot pass col for PandasSeriesColumn.
# the .data accessor is an interim solution.
mask = _operator_str_to_func[criterion.op](col.data, value)
all_masks.append(np.asarray(mask))
mask = np.stack(all_masks, axis=1).all(axis=1)
return data[mask]
class Filter(Component):
df: DataFrame = None
criteria: Store[List[FilterCriterion]] = Field(
default_factory=lambda: Store(list())
)
operations: Store[List[str]] = Field(
default_factory=lambda: Store(list(_operator_str_to_func.keys()))
)
title: str = "Filter"
classes: str = ""
def __init__(
self,
df: DataFrame = None,
*,
criteria: List[FilterCriterion] = [],
operations: List[str] = list(_operator_str_to_func.keys()),
title: str = "Filter",
classes: str = "",
):
"""Filter a dataframe based on a list of filter criteria.
Filtering criteria are maintained in a Store. On change of
values in the store, the dataframe is filtered.
"""
super().__init__(
df=df,
criteria=criteria,
operations=operations,
title=title,
classes=classes,
)
def __call__(self, df: DataFrame = None) -> DataFrame:
if df is None:
df = self.df
return filter(df, self.criteria)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/filter/__init__.py
|
from typing import Optional
from meerkat.interactive.app.src.lib.component.abstract import Component
class Icon(Component):
data: str = ""
name: str = "Globe2"
fill: Optional[str] = None
size: int = 16
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/icon/__init__.py
|
from pydantic import Field
from meerkat.interactive.app.src.lib.component.abstract import BaseComponent
from meerkat.interactive.graph import Store
class MultiSelect(BaseComponent):
choices: Store[list]
selected: Store[list] = Field(default_factory=lambda: Store(list()))
gui_type: str = "multiselect"
title: str = None
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/multiselect/__init__.py
|
from typing import Optional, Union
from meerkat.interactive.app.src.lib.component.abstract import Component
from meerkat.interactive.endpoint import Endpoint
from meerkat.interactive.event import EventInterface
class OnBlurTextbox(EventInterface):
text: Union[str, int, float]
class OnKeyEnterTextbox(EventInterface):
text: Union[str, int, float]
class Textbox(Component):
text: str = ""
placeholder: str = "Write some text..."
debounce_timer: int = 150
classes: str = "grow h-10 px-3 rounded-md shadow-md my-1 border-gray-400"
on_blur: Optional[Endpoint[OnBlurTextbox]] = None
on_keyenter: Optional[Endpoint[OnKeyEnterTextbox]] = None
def __init__(
self,
text: str = "",
*,
placeholder: str = "Write some text...",
debounce_timer: int = 150,
classes: str = "grow h-10 px-3 rounded-md shadow-md my-1 border-gray-400",
on_blur: Optional[Endpoint[OnBlurTextbox]] = None,
on_keyenter: Optional[Endpoint[OnKeyEnterTextbox]] = None,
):
"""A textbox that can be used to get user input.
Attributes:
text: The text in the textbox.
placeholder: The placeholder text.
debounce_timer: The debounce timer in milliseconds.
on_blur: The endpoint to call when the textbox loses focus.
on_enter: The endpoint to call when the user presses enter.
"""
super().__init__(
text=text,
placeholder=placeholder,
debounce_timer=debounce_timer,
classes=classes,
on_blur=on_blur,
on_keyenter=on_keyenter,
)
|
meerkat-main
|
meerkat/interactive/app/src/lib/component/core/textbox/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.