python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
from itertools import combinations
from typing import Sequence, Tuple, Union
import cytoolz as tz
from meerkat import DataFrame
from meerkat.columns.abstract import Column
from meerkat.errors import ConcatError
from meerkat.interactive.graph.reactivity import reactive
from .decorators import check_primary_key
@reactive()
@check_primary_key
# @capture_provenance(capture_args=["axis"])
def concat(
objs: Union[Sequence[DataFrame], Sequence[Column]],
axis: Union[str, int] = "rows",
suffixes: Tuple[str] = None,
overwrite: bool = False,
) -> Union[DataFrame, Column]:
"""Concatenate a sequence of columns or a sequence of `DataFrame`s. If
sequence is empty, returns an empty `DataFrame`.
- If concatenating columns, all columns must be of the same type (e.g. all
`ListColumn`).
- If concatenating `DataFrame`s along axis 0 (rows), all `DataFrame`s must have the
same set of columns.
- If concatenating `DataFrame`s along axis 1 (columns), all `DataFrame`s must have
the same length and cannot have any of the same column names.
Args:
objs (Union[Sequence[DataFrame], Sequence[AbstractColumn]]): sequence of columns
or DataFrames.
axis (Union[str, int]): The axis along which to concatenate. Ignored if
concatenating columns.
Returns:
Union[DataFrame, AbstractColumn]: concatenated DataFrame or column
"""
if len(objs) == 0:
return DataFrame()
if not all([type(objs[0]) == type(obj) for obj in objs[1:]]):
_any_object_empty = any([len(obj) == 0 for obj in objs])
if _any_object_empty:
raise ConcatError(
"""All objects passed to concat must be of same type.
This error may be because you have empty `objs`.
Try running `<objs>.filter(lambda x: len(x) > 0)` before calling mk.concat."""
)
raise ConcatError("All objects passed to concat must be of same type.")
if isinstance(objs[0], DataFrame):
if axis == 0 or axis == "rows":
# append new rows
columns = objs[0].columns
if not all([set(df.columns) == set(columns) for df in objs]):
raise ConcatError(
"Can only concatenate DataFrames along axis 0 (rows) if they have "
" the same set of columns names."
)
return objs[0]._clone(
{column: concat([df[column] for df in objs]) for column in columns}
)
elif axis == 1 or axis == "columns":
# append new columns
length = len(objs[0])
if not all([len(df) == length for df in objs]):
raise ConcatError(
"Can only concatenate DataFrames along axis 1 (columns) if they "
"have the same length."
)
# get all column names that appear in more than one DataFrame
shared = set()
for df1, df2 in combinations(objs, 2):
shared |= set(df1.columns) & set(df2.columns)
if shared and not overwrite:
if suffixes is None:
raise ConcatError("Must pass `suffixes` if columns are shared.")
data = tz.merge(
{k + suffixes[idx] if k in shared else k: v for k, v in df.items()}
for idx, df in enumerate(objs)
)
else:
data = tz.merge(dict(df.items()) for df in objs)
return objs[0]._clone(data=data)
else:
raise ConcatError(f"Invalid axis `{axis}` passed to concat.")
elif isinstance(objs[0], Column):
# use the concat method of the column
return objs[0].concat(objs)
else:
raise ConcatError(
"Must pass a sequence of dataframes or a sequence of columns to concat."
)
|
meerkat-main
|
meerkat/ops/concat.py
|
from typing import List, Union
import numpy as np
from meerkat import DataFrame
from meerkat.interactive.graph import reactive
@reactive()
def sort(
data: DataFrame,
by: Union[str, List[str]],
ascending: Union[bool, List[bool]] = True,
kind: str = "quicksort",
) -> DataFrame:
"""Sort a DataFrame or Column. If a DataFrame, sort by the values in the
specified columns. Similar to ``sort_values`` in pandas.
Args:
data (Union[DataFrame, AbstractColumn]): DataFrame or Column to sort.
by (Union[str, List[str]]): The columns to sort by. Ignored if data is a Column.
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`.Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
DataFrame: A sorted view of DataFrame.
"""
# Use "==" because `by` can be a Store.
# Store(None) == None is True, but `Store(None) is None` evalutes to False.
if by is None or by == None: # noqa: E711
return data.view()
by = [by] if isinstance(by, str) else by
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError(
f"Length of `ascending` ({len(ascending)}) must be the same as "
f"length of `by` ({len(by)})."
)
df = data[by].to_pandas()
df["_sort_idx_"] = np.arange(len(df))
df = df.sort_values(by=by, ascending=ascending, kind=kind, inplace=False)
sorted_indices = df["_sort_idx_"]
return data[sorted_indices]
|
meerkat-main
|
meerkat/ops/sort.py
|
meerkat-main
|
meerkat/ops/__init__.py
|
|
from functools import wraps
from typing import Any, Iterable
from meerkat.interactive.graph import reactive
@reactive()
def cand(*args):
"""Overloaded ``and`` operator.
Use this when you want to use the and operator on reactive values (e.g. Store)
Args:
*args: The arguments to and together.
Returns:
The result of the and operation.
"""
x = args[0]
for y in args[1:]:
x = x and y
return x
@reactive()
def cor(*args):
"""Overloaded ``or`` operator.
Use this when you want to use the ``or`` operator on reactive values (e.g. Store)
Args:
*args: The arguments to ``or`` together.
Returns:
The result of the ``or`` operation.
"""
x = args[0]
for y in args[1:]:
x = x or y
return x
@reactive()
def cnot(x):
"""Overloaded ``not`` operator.
Use this when you want to use the ``not`` operator on reactive values (e.g. Store).
Args:
x: The arguments to not.
Returns:
The result of the and operation.
"""
return not x
@reactive()
@wraps(all)
def _all(__iterable: Iterable[object]) -> bool:
return all(__iterable)
@reactive()
@wraps(any)
def _any(__iterable) -> bool:
return any(__iterable)
@reactive()
def _bool(x) -> bool:
"""Overloaded ``bool`` operator.
Use this when you want to use the ``bool`` operator on reactive values (e.g. Store).
Args:
x: The argument to convert to a bool.
Returns:
Store[bool] | bool: The result of the bool operation.
"""
return bool(x)
@reactive()
def _complex(real: Any, imag: Any = 0.0) -> complex:
if isinstance(real, str):
return complex(real)
return complex(real, imag)
@reactive()
def _int(__x, base: int = None):
if base is None:
return int(__x)
return int(__x, base=base)
@reactive()
def _float(__x: Any) -> float:
return float(__x)
@reactive()
@wraps(len)
def _len(__obj):
return len(__obj)
@reactive()
@wraps(hex)
def _hex(__number: Any) -> str:
return hex(__number)
@reactive()
@wraps(oct)
def _oct(__number: Any) -> str:
return oct(__number)
@reactive()
def _str(__obj) -> str:
return str(__obj)
@reactive(nested_return=False)
def _list(__iterable) -> list:
return list(__iterable)
@reactive(nested_return=False)
def _tuple(__iterable) -> tuple:
return tuple(__iterable)
@reactive()
@wraps(sum)
def _sum(__iterable) -> float:
return sum(__iterable)
@reactive()
def _dict(**kwargs) -> dict:
return dict(**kwargs)
@reactive(nested_return=False)
def _set(__iterable) -> set:
return set(__iterable)
@reactive()
def _range(*args) -> range:
return range(*args)
@reactive()
@wraps(abs)
def _abs(__x) -> float:
return abs(__x)
@reactive()
def _max(__iterable, *, key=None) -> Any:
"""Overloaded ``max`` operator."""
return max(__iterable, key=key)
@reactive()
def _min(__iterable, *, key=None) -> Any:
"""Overloaded ``min`` operator."""
return min(__iterable, key=key)
@reactive()
def _slice(*args):
"""Overloaded ``slice`` class."""
return slice(*args)
|
meerkat-main
|
meerkat/ops/cond.py
|
from typing import Union
import numpy as np
from meerkat import Column, DataFrame
from meerkat.interactive.graph.reactivity import reactive
@reactive()
def shuffle(data: Union[DataFrame, Column], seed=None) -> Union[DataFrame, Column]:
"""Shuffle the rows of a DataFrame or Column.
Shuffling is done out-of-place and with numpy.
Args:
data (Union[DataFrame, Column]): DataFrame or Column to shuffle.
seed (int): Seed to use for shuffling.
Returns:
Union[DataFrame, Column]: Shuffled DataFrame or Column.
"""
idx = np.arange(len(data))
state = np.random.RandomState(seed) if seed is not None else np.random
state.shuffle(idx)
return data[idx]
|
meerkat-main
|
meerkat/ops/shuffle.py
|
import warnings
from inspect import signature
from typing import TYPE_CHECKING, Callable, Dict, Mapping, Sequence, Tuple, Type, Union
import meerkat.tools.docs as docs
from meerkat.block.abstract import BlockView
if TYPE_CHECKING:
from meerkat.columns.abstract import Column
from meerkat.columns.deferred.base import DeferredColumn
from meerkat.dataframe import DataFrame
_SHARED_DOCS_ = {
"input_description": docs.DescriptionSection(
"""
*What gets passed to function?*
* If ${data} is a :class:`DataFrame` and ``outputs`` is not passed, then the \
function's signature is inspected to determine which columns to pass as \
keyword arguments to the function.
For example, if the function is
``lambda age, residence: age > 18 and residence == "NY"``, then
the columns ``age`` and ``residence`` will be passed to the function. If the
columns are not present in the DataFrame, then a `ValueError` will be raised.
* If ${data} is a :class:`DataFrame` and ``outputs`` is ``"single"``, then \
mapping between columns and function arguments can be overridden by passing
a the ``inputs`` argument.
* If ${data} is a :class:`Column` then values of the
column are passed as a single positional argument to the function. The
``inputs`` argument is ignored.
"""
),
"function": docs.Arg(
"""
function (Callable): The function that will be applied to the rows of
``${data}``.
"""
),
"is_batched_fn": docs.Arg(
"""
is_batched_fn (bool, optional): Whether the function must be applied on a
batch of rows. Defaults to False.
"""
),
"batch_size": docs.Arg(
"""
batch_size (int, optional): The size of the batch. Defaults to 1.
"""
),
"inputs": docs.Arg(
"""
inputs (Dict[str, str], optional): Dictionary mapping column names in
``${data}`` to keyword arguments of ``function``. Ignored if ``${data}`` is
a column. When calling ``function`` values from the columns will be fed to
the corresponding keyword arguments. Defaults to None, in which case it
inspects the signature of the function. It then finds the columns with the
same names in the DataFrame and passes the corresponding values to the
function. If the function takes a non-default argument that is not a
column in the DataFrame, the operation will raise a `ValueError`.
"""
),
"outputs": docs.Arg(
"""
outputs (Union[Dict[any, str], Tuple[str]], optional): Controls how the output
of ``function`` is mapped to the output of :func:`${name}`.
Defaults to ``None``.
* If ``None``: the output is inferred from the return type of the
function. See explanation above.
* If ``"single"``: a single :class:`DeferredColumn` is returned.
* If a ``Dict[any, str]``: then a :class:`DataFrame` containing
DeferredColumns is returned. This is useful when the output of
``function`` is a ``Dict``. ``outputs`` maps the outputs of ``function``
to column names in the resulting :class:`DataFrame`.
* If a ``Tuple[str]``: then a :class:`DataFrame` containing
output :class:`DeferredColumn` is returned. This is useful when the
of ``function`` is a ``Tuple``. ``outputs`` maps the outputs of
``function`` to column names in the resulting :class:`DataFrame`.
"""
),
"output_type": docs.Arg(
"""
output_type (Union[Dict[str, type], type], optional): Coerce the column.
Defaults to None.
"""
),
"materialize": docs.Arg(
"""
materialize (bool, optional): Whether to materialize the input column(s).
Defaults to True.
"""
),
"use_ray": docs.Arg(
"""
use_ray (bool): Use Ray to parallelize the computation. Defaults to False.
"""
),
"num_blocks": docs.Arg(
"""
num_blocks (int): When using Ray, the number of blocks to split the data into.
Defaults to 100.
"""
),
"blocks_per_window": docs.Arg(
"""
blocks_per_window (int): When using Ray, the number of blocks to process in a
single Ray task. Defaults to 10.
"""
),
"pbar": docs.Arg(
"""
pbar (bool): Show a progress bar. Defaults to False.
"""
),
}
@docs.doc(source=_SHARED_DOCS_, data="data", name="defer")
def defer(
data: Union["DataFrame", "Column"],
function: Callable,
is_batched_fn: bool = False,
batch_size: int = 1,
inputs: Union[Mapping[str, str], Sequence[str]] = None,
outputs: Union[Mapping[any, str], Sequence[str]] = None,
output_type: Union[Mapping[str, Type["Column"]], Type["Column"]] = None,
materialize: bool = True,
) -> Union["DataFrame", "DeferredColumn"]:
"""Create one or more DeferredColumns that lazily applies a function to
each row in ${data}.
This function shares nearly the exact same signature
with :func:`map`, the difference is that :func:`~meerkat.defer` returns a column
that has not yet been computed. It is a placeholder for a column that will be
computed later.
Learn more in the user guide: :ref:`guide/dataframe/ops/mapping/deferred`.
{input_description}
*What gets returned by defer?*
* If ``function`` returns a single value, then ``defer``
will return a :class:`DeferredColumn` object.
* If ``function`` returns a dictionary, then ``defer`` will return a
:class:`DataFrame` containing :class:`DeferredColumn` objects. The keys of the
dictionary are used as column names. The ``outputs`` argument can be used to
override the column names.
* If ``function`` returns a tuple, then ``defer`` will return a :class:`DataFrame`
containing :class:`DeferredColumn` objects. The column names will be integers.
The column names can be overriden by passing a tuple to the ``outputs``
argument.
* If ``function`` returns a tuple or a dictionary, then passing ``"single"`` to
the ``outputs`` argument will cause ``defer`` to return a single
:class:`DeferredColumn` that materializes to a :class:`ObjectColumn`.
*How do you execute the deferred map?*
Depending on ``function`` and the ``outputs`` argument, returns either a
:class:`DeferredColumn` or a :class:`DataFrame`. Both are **callables**. To execute
the deferred map, simply call the returned object.
.. note::
This function is also available as a method of :class:`DataFrame` and
:class:`Column` under the name ``defer``.
Args:
${data} (DataFrame): The :class:`DataFrame` or :class:`Column` to which the
function will be applied.
${function}
${is_batched_fn}
${batch_size}
${inputs}
${outputs}
${output_type}
${materialize}
Returns:
Union[DataFrame, DeferredColumn]: A :class:`DeferredColumn` or a
:class:`DataFrame` containing :class:`DeferredColumn` representing the
deferred map.
Examples
---------
We start with a small DataFrame of voters with two columns: `birth_year`, which
contains the birth year of each person, and `residence`, which contains the state in
which each person lives.
.. ipython:: python
import datetime
import meerkat as mk
df = mk.DataFrame({
"birth_year": [1967, 1993, 2010, 1985, 2007, 1990, 1943],
"residence": ["MA", "LA", "NY", "NY", "MA", "MA", "LA"]
})
**Single input column.** Lazily create a column of birth years to a column of ages.
.. ipython:: python
df["age"] = df["birth_year"].defer(
lambda x: datetime.datetime.now().year - x
)
df["age"]
We can materialize the deferred map (*i.e.* run it) by calling the column.
.. ipython:: python
df["age"]()
**Multiple input columns.** Lazily create a column of birth years to a column of
ages.
.. ipython:: python
df["ma_eligible"] = df.defer(
lambda age, residence: (residence == "MA") and (age >= 18)
)
df["ma_eligible"]()
"""
from meerkat import DeferredColumn
from meerkat.block.deferred_block import DeferredBlock, DeferredOp
from meerkat.columns.abstract import Column, infer_column_type
from meerkat.dataframe import DataFrame
base_function = function
# prepare arguments for LambdaOp
if isinstance(data, Column):
args = [data]
kwargs = {}
elif isinstance(data, DataFrame):
args, kwargs = None, None
if inputs == "row":
pass
elif isinstance(inputs, Mapping):
args = []
kwargs = {kw: data[col_name] for col_name, kw in inputs.items()}
elif isinstance(inputs, Sequence):
# TODO: make this work with a list
args = [data[col_name] for col_name in inputs]
kwargs = {}
elif inputs is None:
# infer mapping from function signature if possible otherwise pass full row
args = []
kwargs = {}
for name, param in signature(function).parameters.items():
if name in data:
kwargs[name] = data[name]
elif param.default is param.empty:
warnings.warn(
f"Non-default argument '{name}' does not have a corresponding "
"column in the DataFrame. If your function expects a full "
"DataFrame row, pass ``inputs='row'`` to ``map``. Otherwise, "
"please provide an `inputs` mapping "
"or pass a lambda function with a different signature. "
"See map documentation for more details.",
)
inputs = "row"
break
if inputs == "row":
args = []
kwargs = {col_name: col for col_name, col in data.items()}
def wrapper(*args, **kwargs):
# FIXME: this should use data._clone instead!
if is_batched_fn:
kwargs = DataFrame(kwargs)
return base_function(kwargs)
function = wrapper
if args is None or kwargs is None:
raise ValueError("``inputs`` must be Mapping, Sequence or 'row'")
op = DeferredOp(
fn=function,
args=args,
kwargs=kwargs,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
return_format=type(outputs) if outputs is not None else None,
materialize_inputs=materialize,
)
block = DeferredBlock.from_block_data(data=op)
first_row = op._get(0) if len(op) > 0 else None
if outputs is None and isinstance(first_row, Dict):
# support for splitting a dict into multiple columns without specifying outputs
outputs = {output_key: output_key for output_key in first_row}
op.return_format = type(outputs)
if outputs is None and isinstance(first_row, Tuple):
# support for splitting a tuple into multiple columns without specifying outputs
outputs = tuple([str(i) for i in range(len(first_row))])
op.return_format = type(outputs)
if outputs is None or outputs == "single":
# can only infer output type if the the input columns are nonempty
if output_type is None and first_row is not None:
output_type = infer_column_type([first_row])
if not isinstance(output_type, Type):
raise ValueError(
"Must provide a single `output_type` if `outputs` is None."
)
col = DeferredColumn(
data=BlockView(block_index=None, block=block), output_type=output_type
)
if isinstance(data, Column):
col.formatters = data.formatters.defer()
return col
elif isinstance(outputs, Mapping):
if output_type is None:
output_type = {
outputs[output_key]: infer_column_type([col])
for output_key, col in first_row.items()
}
if not isinstance(output_type, Mapping):
raise ValueError(
"Must provide a `output_type` mapping if `outputs` is a mapping."
)
return DataFrame(
{
col: DeferredColumn(
data=BlockView(block_index=output_key, block=block),
output_type=output_type[outputs[output_key]],
)
for output_key, col in outputs.items()
}
)
elif isinstance(outputs, Sequence):
if output_type is None:
output_type = [type(col) for col in first_row]
if not isinstance(output_type, Sequence):
raise ValueError(
"Must provide a `output_type` sequence if `outputs` is a sequence."
)
return DataFrame(
{
col: DeferredColumn(
data=BlockView(block_index=output_key, block=block),
output_type=output_type[output_key],
)
for output_key, col in enumerate(outputs)
}
)
@docs.doc(source=_SHARED_DOCS_, data="data", name="defer")
def map(
data: Union["DataFrame", "Column"],
function: Callable,
is_batched_fn: bool = False,
batch_size: int = 1,
inputs: Union[Mapping[str, str], Sequence[str]] = None,
outputs: Union[Mapping[any, str], Sequence[str]] = None,
output_type: Union[Mapping[str, Type["Column"]], Type["Column"]] = None,
materialize: bool = True,
use_ray: bool = False,
num_blocks: int = 100,
blocks_per_window: int = 10,
pbar: bool = False,
**kwargs,
):
"""Create a new :class:`Column` or :class:`DataFrame` by applying a
function to each row in ${data}.
This function shares nearly the exact same signature
with :func:`defer`, the difference is that :func:`~meerkat.defer` returns a column
that has not yet been computed. It is a placeholder for a column that will be
computed later.
Learn more in the user guide: :ref:`guide/dataframe/ops/mapping`.
{input_description}
*What gets returned by defer?*
* If ``function`` returns a single value, then ``defer``
will return a :class:`DeferredColumn` object.
* If ``function`` returns a dictionary, then ``defer`` will return a
:class:`DataFrame` containing :class:`DeferredColumn` objects. The keys of the
dictionary are used as column names. The ``outputs`` argument can be used to
override the column names.
* If ``function`` returns a tuple, then ``defer`` will return a :class:`DataFrame`
containing :class:`DeferredColumn` objects. The column names will be integers.
The column names can be overriden by passing a tuple to the ``outputs``
argument.
* If ``function`` returns a tuple or a dictionary, then passing ``"single"`` to
the ``outputs`` argument will cause ``defer`` to return a single
:class:`DeferredColumn` that materializes to a :class:`ObjectColumn`
.. note::
This function is also available as a method of :class:`DataFrame` and
:class:`Column` under the name ``map``.
Args:
${data} (DataFrame): The :class:`DataFrame` or :class:`Column` to which the
function will be applied.
${function}
${is_batched_fn}
${batch_size}
${inputs}
${outputs}
${output_type}
${materialize}
use_ray (bool): Use Ray to parallelize the computation. Defaults to False.
num_blocks (int): When using Ray, the number of blocks to split the data
into. Defaults to 100.
blocks_per_window (int): When using Ray, the number of blocks to process
in a single Ray task. Defaults to 10.
pbar (bool): Show a progress bar. Defaults to False.
Returns:
Union[DataFrame, Column]: A :class:`Column` or a :class:`DataFrame`.
Examples
---------
We start with a small DataFrame of voters with two columns: `birth_year`, which
contains the birth year of each person, and `residence`, which contains the state in
which each person lives.
.. ipython:: python
import datetime
import meerkat as mk
df = mk.DataFrame({
"birth_year": [1967, 1993, 2010, 1985, 2007, 1990, 1943],
"residence": ["MA", "LA", "NY", "NY", "MA", "MA", "LA"]
})
**Single input column.** Lazily create a column of birth years to a column of ages.
.. ipython:: python
df["age"] = df["birth_year"].map(
lambda x: datetime.datetime.now().year - x
)
df["age"]
**Multiple input columns.** Lazily create a column of birth years to a column of
ages.
.. ipython:: python
df["ma_eligible"] = df.map(
lambda age, residence: (residence == "MA") and (age >= 18)
)
df["ma_eligible"]
"""
deferred = defer(
data=data,
function=function,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
inputs=inputs,
outputs=outputs,
output_type=output_type,
materialize=materialize,
)
return _materialize(
deferred,
batch_size=batch_size,
pbar=pbar,
use_ray=use_ray,
num_blocks=num_blocks,
blocks_per_window=blocks_per_window,
)
def _materialize(
data: Union["DataFrame", "Column"],
batch_size: int,
pbar: bool,
use_ray: bool,
num_blocks: int,
blocks_per_window: int,
):
if use_ray:
import logging
import numpy as np
import pandas as pd
import pyarrow as pa
import ray
import torch
import meerkat as mk
from meerkat.columns.abstract import column
ray.init(ignore_reinit_error=True, logging_level=logging.ERROR)
ray.data.set_progress_bars(enabled=pbar)
# Step 1: Walk through the DeferredColumns and build a list of functions
curr = data
fns = []
while isinstance(curr, mk.DeferredColumn):
fns.append(curr.data.fn)
# For linear pipelines, there will be either one elem in args or one key in
# kwargs
if curr.data.args:
if len(curr.data.args) > 1:
raise ValueError("Multiple args not supported with `use_ray=True`.")
curr = curr.data.args[0]
elif curr.data.kwargs:
if len(curr.data.kwargs) > 1:
raise ValueError(
"Multiple kwargs not supported with `use_ray=True`."
)
curr = curr.data.kwargs[next(iter(curr.data.kwargs))]
else:
raise ValueError("No args or kwargs.")
# Step 2: Create the ray dataset from the base column
# TODO (dean): test added_dim on other data types
added_dim = False
if isinstance(curr, mk.PandasScalarColumn):
ds = ray.data.from_pandas(pd.DataFrame({"0": curr})).repartition(num_blocks)
fns.append(lambda x: x["0"])
elif isinstance(curr, mk.ArrowScalarColumn):
ds = ray.data.from_pandas(pa.table({"0": curr.data})).repartition(
num_blocks
)
fns.append(lambda x: x["0"])
elif isinstance(curr, mk.NumPyTensorColumn):
ndarrays = curr.data
if ndarrays.ndim == 1:
added_dim = True
ndarrays = np.expand_dims(ndarrays, 1)
ds = ray.data.from_numpy(ndarrays).repartition(num_blocks)
elif isinstance(curr, mk.TorchTensorColumn):
ds = ray.data.from_torch(curr).repartition(num_blocks)
elif isinstance(curr, mk.ObjectColumn):
ds = ray.data.from_items(curr).repartition(num_blocks)
elif isinstance(curr, mk.DataFrame):
raise ValueError(
"Multiple outputs (fan-out) not supported with `use_ray=True`."
)
# TODO (dean): Support fan-out (would have to create multiple pipelines)
# ds = ray.data.from_pandas(curr.data._repr_pandas_()[0])
# fns.append(lambda row: row.values())
else:
raise ValueError(
f"Base column is of unsupported type {type(curr)} with `use_ray=True`."
)
# Step 3: Build the pipeline by walking backwards through fns
pipe: ray.data.DatasetPipeline = ds.window(blocks_per_window=blocks_per_window)
for fn in reversed(fns):
# TODO (dean): if batch_size > 1, then use map_batches
pipe = pipe.map(fn)
# Step 4: Collect the results
result_ds = iter(
pipe.rewindow(blocks_per_window=num_blocks).iter_datasets()
).__next__()
result = []
if data._output_type == mk.NumPyTensorColumn:
for partition in result_ds.to_numpy_refs():
res = ray.get(partition)
if len(res):
result.append(res[0][0] if added_dim else res[0])
if added_dim:
return mk.NumPyTensorColumn.from_array(result)
return column(np.stack(result))
elif data._output_type == mk.TorchTensorColumn:
for partition in result_ds.to_torch():
result.append(partition[0])
return column(torch.stack(result))
elif data._output_type == mk.PandasScalarColumn:
for partition in result_ds.to_pandas_refs():
result.append(ray.get(partition))
return column(pd.concat(result)["value"])
elif data._output_type == mk.ArrowScalarColumn:
for partition in result_ds.to_arrow_refs():
result.append(ray.get(partition)["value"].combine_chunks())
return column(pa.concat_arrays(result))
elif data._output_type == mk.ObjectColumn:
for partition in result_ds.iter_batches():
result.extend(partition)
return column(result)
else:
raise ValueError(
f"Unsupported output type {data._output_type} with `use_ray=True`."
)
else:
from tqdm import tqdm
from .concat import concat
result = []
for batch_start in tqdm(range(0, len(data), batch_size), disable=not pbar):
result.append(
data._get(
slice(batch_start, batch_start + batch_size, 1), materialize=True
)
)
return concat(result)
|
meerkat-main
|
meerkat/ops/map.py
|
from typing import Union
import numpy as np
from meerkat import Column, DataFrame
from meerkat.interactive.graph.reactivity import reactive
@reactive
def sample(
data: Union[DataFrame, Column],
n: int = None,
frac: float = None,
replace: bool = False,
weights: Union[str, np.ndarray] = None,
random_state: Union[int, np.random.RandomState] = None,
) -> Union[DataFrame, Column]:
"""Select a random sample of rows from DataFrame or Column. Roughly
equivalent to ``sample`` in Pandas
https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.sample.html.
Args:
data (Union[DataFrame, AbstractColumn]): DataFrame or Column to sample from.
n (int): Number of samples to draw. If `frac` is specified, this parameter
should not be passed. Defaults to 1 if `frac` is not passed.
frac (float): Fraction of rows to sample. If `n` is specified, this parameter
should not be passed.
replace (bool): Sample with or without replacement. Defaults to False.
weights (Union[str, np.ndarray]): Weights to use for sampling. If `None`
(default), the rows will be sampled uniformly. If a numpy array, the
sample will be weighted accordingly. If a string and `data` is a DataFrame,
the sampled_df will be applied to the rows based on the column with the name
specified. If weights do not sum to 1 they will be normalized to sum to 1.
random_state (Union[int, np.random.RandomState]): Random state or seed to use
for sampling.
Return:
Union[DataFrame, AbstractColumn]: A random sample of rows from DataFrame or
Column.
"""
import pandas.core.common as com
from pandas.core.sample import process_sampling_size
from pandas.core.sample import sample as _sample
if isinstance(weights, str):
if isinstance(data, Column):
raise ValueError(
"Weights passed to `sample` must be a numpy array if data is a Column."
)
weights = data[weights].to_numpy()
rs = com.random_state(random_state)
n = process_sampling_size(n=n, frac=frac, replace=replace)
if frac is not None:
n = round(frac * len(data))
sampled_indices = _sample(
obj_len=len(data),
size=n,
replace=replace,
weights=weights,
random_state=rs,
)
return data[sampled_indices]
|
meerkat-main
|
meerkat/ops/sample.py
|
from typing import TYPE_CHECKING
import numpy as np
from meerkat import DataFrame, NumPyTensorColumn, TensorColumn, TorchTensorColumn
from meerkat.env import is_torch_available
from meerkat.interactive.graph.reactivity import reactive
from meerkat.tools.lazy_loader import LazyLoader
torch = LazyLoader("torch")
if TYPE_CHECKING:
import torch
@reactive
def search(
data: DataFrame,
query: np.ndarray,
by: str = None,
k: int = None,
metric: str = "dot",
score_column: str = None,
) -> DataFrame:
"""Search by a query in a DataFrame.
Args:
data: The DataFrame to search.
query: The query to search with.
by: The column to compare the query against.
k: The number of results to return.
metric: The metric to use for comparison.
score_column: The name of the column to store the scores in.
If ``None``, the scores will not be stored.
Return:
DataFrame: A sorted view of DataFrame.
Examples
---------
df.search("asdjfkdfl", "")
"""
if len(data) <= 1:
raise ValueError("Dataframe must have at least 2 rows.")
by = data[by]
if not isinstance(by, TensorColumn):
raise ValueError("")
# convert query to same backend as by
if isinstance(by, TorchTensorColumn):
import torch
if not torch.is_tensor(query):
query = torch.tensor(query)
query = query.to(by.device)
fn = _torch_search
elif isinstance(by, NumPyTensorColumn):
if is_torch_available():
import torch
if torch.is_tensor(query):
query = query.detach().cpu().numpy()
elif not isinstance(query, np.ndarray):
query = np.array(query)
fn = _numpy_search
else:
raise ValueError("")
scores, indices = fn(query=query, by=by.data, metric=metric, k=k)
data = data[indices]
if score_column is not None:
data[score_column] = scores
return data
def _torch_search(
query: "torch.Tensor", by: "torch.Tensor", metric: str, k: int
) -> "torch.Tensor":
with torch.no_grad():
if len(query.shape) == 1:
query = query.unsqueeze(0)
if metric == "dot":
scores = (by @ query.T).squeeze()
else:
raise ValueError("")
scores, indices = torch.topk(scores, k=k)
return scores.to("cpu").numpy(), indices.to("cpu")
def _numpy_search(query: np.ndarray, by: np.ndarray, metric: str, k: int) -> np.ndarray:
if query.ndim == 1:
query = query[np.newaxis, ...]
if metric == "dot":
scores = np.squeeze(by @ query.T)
else:
raise ValueError("")
if k is not None:
indices = np.argpartition(scores, -k)[-k:]
indices = indices[np.argsort(-scores[indices])]
scores = scores[indices]
else:
indices = np.argsort(-scores)
scores = scores[indices]
return scores, indices
|
meerkat-main
|
meerkat/ops/search.py
|
import os
import re
import meerkat as mk
import meerkat.tools.docs as docs
from meerkat.ops.map import _SHARED_DOCS_
@docs.doc(source=_SHARED_DOCS_)
def complete(
df: mk.DataFrame,
prompt: str,
engine: str,
batch_size: int = 1,
use_ray: bool = False,
num_blocks: int = 100,
blocks_per_window: int = 10,
pbar: bool = False,
client_connection: str = None,
cache_connection: str = "~/.manifest/cache.sqlite",
) -> mk.ScalarColumn:
"""Apply a generative language model to each row in a DataFrame.
Args:
df (DataFrame): The :class:`DataFrame` to which the
function will be applied.
prompt (str):
engine (str):
${batch_size}
${materialize}
${use_ray}
${num_blocks}
${blocks_per_window}
${pbar}
client_connection: The connection string for the client.
This is typically the key (e.g. OPENAI).
If it is not provided, it will be inferred from the engine.
cache_connection: The sqlite connection string for the cache.
Returns:
Union[Column]: A :class:`DeferredColumn` or a
:class:`DataFrame` containing :class:`DeferredColumn` representing the
deferred map.
"""
from manifest import Manifest
input_engine = engine
client_name, engine = engine.split("/")
if client_connection is None:
if client_name == "openai":
client_connection = os.environ["OPENAI_API_KEY"]
else:
raise ValueError(
f"Cannot infer client connection from engine {input_engine}."
)
cache_connection = os.path.abspath(os.path.expanduser(cache_connection))
os.makedirs(os.path.dirname(cache_connection), exist_ok=True)
manifest = Manifest(
client_name=client_name,
client_connection=client_connection,
engine=engine,
temperature=0,
max_tokens=1,
cache_name="sqlite",
cache_connection=cache_connection,
)
def _run_manifest(rows: mk.DataFrame):
out = manifest.run([prompt.format(**row) for row in rows.iterrows()])
return out
keys = re.findall(r"{(.*?)}", prompt)
output = mk.map(
df[keys],
function=_run_manifest,
inputs="row",
is_batched_fn=True,
batch_size=batch_size,
pbar=pbar,
use_ray=use_ray,
num_blocks=num_blocks,
blocks_per_window=blocks_per_window,
)
return output
|
meerkat-main
|
meerkat/ops/complete.py
|
from typing import List, Optional, Tuple, Union
from meerkat import Column, DataFrame, ScalarColumn
from .embed import embed
def match(
data: Union[DataFrame, Column],
query: Union[str, List[str], Tuple[str], ScalarColumn, DataFrame],
against: Optional[str] = None,
against_modality: Optional[str] = None,
query_modality: Optional[str] = None,
encoder: str = "clip",
return_column_names: bool = False,
):
"""Match data to another column.
This operation adds q columns to the dataframe where q is the number of queries.
Note, if data is a dataframe, this operation is performed in-place.
Args:
data: A dataframe or column containing the data to embed.
query: A single or multiple query strings to match against.
against: If ``data`` is a dataframe, the name of the column
to embed. If ``data`` is a column, then the parameter is ignored.
Defaults to None.
against_modality: The modality of the data in the against column. If None,
infer from the against column.
query_modality: The query modality. If None, infer from the query column.
return_column_names: Whether to return the names of columns added based
on match.
Returns:
mk.DataFrame: A view of ``data`` with a new column containing the embeddings.
This column will be named according to the ``out_col`` parameter.
"""
if against not in data:
raise ValueError(f"Column {against} not found in data.")
encoder = "clip"
data_embedding = data[against]
if not isinstance(query, Column):
if isinstance(query, str):
query = [query]
query = ScalarColumn(query)
# Text cannot be embedded with num_workers > 0 because the clip text encoder
# is not pickleable.
to_embedding = embed(
data=query, encoder=encoder, num_workers=0, modality=query_modality, pbar=False
)
scores = data_embedding @ to_embedding.T
column_names = []
for i, query_item in enumerate(query):
col_name = f"match({against}, {query_item})"
data[col_name] = scores[:, i]
column_names.append(col_name)
return (data, column_names) if return_column_names else data
|
meerkat-main
|
meerkat/ops/match.py
|
from functools import wraps
from meerkat import DataFrame
def check_primary_key(fn: callable):
"""This decorator should wrap meerkat ops that could potentially invalidate
a primary key.
If the primary key is invalidated, the primary key is removed from
the DataFrame.
"""
@wraps(fn)
def _wrapper(*args, **kwargs):
out = fn(*args, **kwargs)
if isinstance(out, DataFrame):
if out._primary_key is not None and (
out._primary_key not in out
or not out.primary_key._is_valid_primary_key()
):
out.set_primary_key(None, inplace=True)
return out
return _wrapper
|
meerkat-main
|
meerkat/ops/decorators.py
|
from typing import TYPE_CHECKING, Optional, Tuple, Union
from meerkat import Column, DataFrame, ScalarColumn, TensorColumn
from meerkat.tools.lazy_loader import LazyLoader
if TYPE_CHECKING:
from sklearn.base import ClusterMixin
skcluster = LazyLoader("sklearn.cluster")
def cluster(
data: Union[Column, DataFrame],
input: Optional[str] = None,
method: Union[str, "ClusterMixin"] = "KMeans",
encoder: str = "clip", # add support for auto selection of encoder
modality: str = None,
**kwargs,
) -> Tuple[ScalarColumn, "ClusterMixin"]:
"""Cluster the data in a column. If the column is an unstructured type,
(e.g. image), the column is first embedded then clustered.
Args:
data (Union[DataFrame, AbstractColumn]): The column to cluster or a dataframe
containing the column to cluster.
input (Union[str, Sequence[str]]): The column(s) to cluster by. These columns
will be embedded using the ``encoder`` and the resulting embedding
will be used. Ignored if ``data`` is a Column.
method (Union[str, ClusterMixin]): The clustering method to use.
encoder (str): The encoder to use for the embedding. Defaults to ``clip``.
modality (Union[str, Sequence[str])): The modality to of the
**kwargs: Additional keyword arguments to pass to the clustering method.
Returns:
(Union[NumpyArrayColumn, DataFrame], ClusterMixin): A tuple containing the
clustered column and the fit clusterer. If ``data`` is a DataFrame, the
clustered column is added to the DataFrame and it is returned.
"""
if isinstance(data, DataFrame):
col = data[input]
output_col = f"{method}({input})"
else:
col = data
if not isinstance(col, TensorColumn) and len(col.shape) != 2:
raise ValueError("Must pass 2D TensorColumn.")
if isinstance(method, str):
method = getattr(skcluster, method)(**kwargs)
clusters = method.fit_predict(col.data)
if isinstance(data, DataFrame):
data[output_col] = clusters
return data, method
return clusters, method
|
meerkat-main
|
meerkat/ops/cluster/__init__.py
|
from typing import TYPE_CHECKING, Mapping, Tuple, Union
from meerkat import Column, DataFrame, TorchTensorColumn
if TYPE_CHECKING:
from domino import Slicer
def explain(
data: Union[Column, DataFrame],
input: str,
target: Union[str, Mapping[str, str]],
method: Union[str, "Slicer"] = "MixtureSlicer",
encoder: str = "clip", # add support for auto selection of encoder
modality: str = None,
output_col: str = None,
**kwargs,
) -> Tuple[TorchTensorColumn, "Slicer"]:
"""Cluster the data in a column. If the column is an unstructured type,
(e.g. image), the column is first embedded then clustered.
Args:
data (Union[DataFrame, AbstractColumn]): The column to cluster or a dataframe
containing the column to cluster.
input (Union[str, Sequence[str]]): The column(s) to cluster by. These
columns will be embedded using the ``encoder`` and the resulting
embedding will be used. Ignored if ``data`` is a Column.
method (Union[str, Slicer]): The clustering method to use.
encoder (str): The encoder to use for the embedding. Defaults to ``clip``.
modality (Union[str, Sequence[str])): The modality to of the
**kwargs: Additional keyword arguments to pass to the clustering method.
Returns:
(Union[NumpyArrayColumn, DataFrame], Slicer): A tuple containing the
clustered column and the fit clusterer. If ``data`` is a DataFrame, the
clustered column is added to the DataFrame and it is returned.
"""
if isinstance(data, DataFrame):
# TODO (sabri): Give the user the option to specify the output column.
if output_col is None:
output_col = f"{method}({input},{target})"
else:
output_col = output_col
# embed_col = f"{encoder}({input})"
col = data[input]
else:
col = data
if isinstance(method, str):
import domino
method = getattr(domino, method)(**kwargs)
if isinstance(target, str):
# TODO: make this generalizable – this is a hack to make it work for RFW
target = {"targets": data[target], "pred_probs": None}
elif isinstance(target, Mapping):
target = {k: data[v] for k, v in target.items()}
method.fit(embeddings=col.data, **target)
slices = method.predict(
embeddings=col.data,
targets=None,
pred_probs=None,
)
if isinstance(data, DataFrame):
data[output_col] = slices
return data, method
return slices, method
|
meerkat-main
|
meerkat/ops/explain/__init__.py
|
from typing import TYPE_CHECKING, Dict, List, Union
from meerkat.tools.lazy_loader import LazyLoader
from .encoder import Encoder
torch = LazyLoader("torch")
if TYPE_CHECKING:
import torch
def transformers(
variant: str = "bert-large-cased", device: Union[int, str] = "cpu"
) -> Dict[str, Encoder]:
"""Transformer encoders.
- "text"
Encoders will map these different modalities to the same embedding space.
Args:
variant (str, optional): A model name listed by `clip.available_models()`, or
the path to a model checkpoint containing the state_dict. Defaults to
"ViT-B/32".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
"""
try:
from transformers import AutoModel, AutoTokenizer
except ImportError:
raise ImportError("To embed with transformers run `pip install transformers")
tokenizer = AutoTokenizer.from_pretrained(variant)
model = AutoModel.from_pretrained(variant)
model.to(device)
def _encode(x: List[str]) -> "torch.Tensor":
# need to coerce to list in case someone passes in a pandas series or ndarray
x = list(x)
return model(
**tokenizer(x, return_tensors="pt", padding=True, truncation=True).to(
device=device
)
).last_hidden_state[:, 0]
return {
"text": Encoder(
# need to squeeze out the batch dimension for compatibility with collate
encode=_encode,
preprocess=lambda x: x,
),
}
|
meerkat-main
|
meerkat/ops/embed/transformers.py
|
import functools
from typing import Any, List, Optional, Sequence
from fvcore.common.registry import Registry as _Registry
from tabulate import tabulate
class Registry(_Registry):
"""Extension of fvcore's registry that supports aliases."""
_ALIAS_KEYWORDS = ("_aliases", "_ALIASES")
def __init__(self, name: str):
super().__init__(name=name)
self._metadata_map = {}
@functools.lru_cache(maxsize=128)
def get(self, name: str, *args, **kwargs) -> Any:
ret = self._obj_map.get(name)
if ret is None:
raise KeyError(
"No object named '{}' found in '{}' registry!".format(name, self._name)
)
return ret(*args, **kwargs)
def _get_aliases(self, obj_func_or_class):
for kw in self._ALIAS_KEYWORDS:
if hasattr(obj_func_or_class, kw):
return getattr(obj_func_or_class, kw)
return []
def register(
self, obj: object = None, aliases: Sequence[str] = None
) -> Optional[object]:
if obj is None:
# used as a decorator
def deco(func_or_class: object, aliases=None) -> object:
name = func_or_class.__name__ # pyre-ignore
self._do_register(name, func_or_class)
if aliases is None:
aliases = self._get_aliases(func_or_class)
if not isinstance(aliases, (list, tuple, set)):
aliases = [aliases]
for alias in aliases:
self._do_register(alias, func_or_class)
return func_or_class
kwargs = {"aliases": aliases}
if any(v is not None for v in kwargs.values()):
return functools.partial(deco, **kwargs)
else:
return deco
name = obj.__name__ # pyre-ignore
self._do_register(name, obj)
if aliases is None:
aliases = self._get_aliases(obj)
for alias in aliases:
self._do_register(alias, obj)
def _do_register(self, name: str, obj: Any, **kwargs) -> None:
self._metadata_map[name] = {"name": name, "description": obj.__doc__, **kwargs}
return super()._do_register(name, obj)
@property
def names(self) -> List[str]:
return list(self._obj_map.keys())
def __repr__(self) -> str:
table = tabulate(self._metadata_map.values(), tablefmt="fancy_grid")
return "Registry of {}:\n".format(self._name) + table
def __str__(self) -> str:
return self.__repr__()
encoders = Registry(name="encoders")
|
meerkat-main
|
meerkat/ops/embed/registry.py
|
from typing import Callable, Union
import PIL
import meerkat as mk
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.tools.utils import choose_device
from .clip import clip
from .encoder import Encoder
from .registry import encoders
from .robust import robust
from .transformers import transformers
bit = LazyLoader(".bit")
torch = LazyLoader("torch")
__all__ = ["clip", "bit", "transformers", "robust", "embed"]
def infer_modality(col: mk.Column):
if isinstance(col, mk.ImageColumn):
return "image"
elif isinstance(col, (mk.ScalarColumn, str)):
return "text"
elif isinstance(col, mk.ArrowScalarColumn):
import pyarrow
if isinstance(col[0], pyarrow.lib.StringScalar):
return "text"
else:
raise ValueError(
f"Cannot infer modality \
from column of type {type(col)}. \
Please pass in the modality argument explicitly \
with `modality=text` or `modality=image`."
)
# @cache(params=["encoder", "modality", ""])
def embed(
data: Union[mk.DataFrame, mk.Column, str, PIL.Image.Image],
input: str = None,
encoder: Union[str, Encoder] = "clip",
modality: str = None,
out_col: str = None,
device: Union[int, str] = "auto",
mmap_dir: str = None,
num_workers: int = 0,
batch_size: int = 128,
pbar: bool = True,
**kwargs,
) -> Union[mk.DataFrame, mk.Column]:
"""Embed a column of data with an encoder from the encoder registry.
Examples
--------
Suppose you have an Image dataset (e.g. Imagenette, CIFAR-10) loaded into a
`Meerkat DataFrame <https://github.com/robustness-gym/meerkat>`_. You can embed the
images in the dataset with CLIP using a code snippet like:
.. code-block:: python
import meerkat as mk
df = mk.datasets.get("imagenette")
df = mk.embed(
data=df,
input_col="img",
encoder="clip"
)
Args:
data (Union[mk.DataFrame, mk.AbstractColumn]): A dataframe or column
containing the data to embed.
input_col (str, optional): If ``data`` is a dataframe, the name of the column
to embed. If ``data`` is a column, then the parameter is ignored. Defaults
to None.
encoder (Union[str, Encoder], optional): Name of the encoder to use. List
supported encoders with ``domino.encoders``. Defaults to "clip".
Alternatively, pass an :class:`~domino._embed.encoder.Encoder` object
containing a custom encoder.
modality (str, optional): The modality of the data to be embedded. Defaults to
None, in which case the modality is inferred from the type of the input
column.
out_col (str, optional): The name of the column where the embeddings are stored.
Defaults to None, in which case it is ``"{encoder}({input_col})"``.
device (Union[int, str], optional): The device on which. Defaults to "cpu".
mmap_dir (str, optional): The path to directory where a memory-mapped file
containing the embeddings will be written. Defaults to None, in which case
the embeddings are not memmapped.
num_workers (int, optional): Number of worker processes used to load the data
from disk. Defaults to 4.
batch_size (int, optional): Size of the batches to used . Defaults to 128.
**kwargs: Additional keyword arguments are passed to the encoder. To see
supported arguments for each encoder, see the encoder documentation (e.g.
:func:`~domino._embed.clip`).
Returns:
mk.DataFrame: A view of ``data`` with a new column containing the embeddings.
This column will be named according to the ``out_col`` parameter.
"""
col = data if isinstance(data, mk.Column) else data[input]
if len(data) == 0:
return data
device = choose_device(device)
if out_col is None:
out_col = f"{encoder}({input})"
if modality is None:
modality = infer_modality(col=col)
# TODO(karan): a hacky way to handle error with processing
# pyarrow.lib.StringScalars in a mk.ArrowArrayColumn
if modality == "text" and isinstance(col, mk.ArrowScalarColumn):
col = mk.ScalarColumn(col.to_pandas())
if isinstance(encoder, str):
encoder = encoders.get(encoder, device=device, **kwargs)
if isinstance(encoder, dict):
if modality not in encoder:
raise ValueError(
f'Encoder "{encoder}" does not support modality "{modality}".'
)
encoder = encoder[modality]
out = _embed(
col=col,
encode=encoder.encode,
preprocess=encoder.preprocess,
collate=encoder.collate,
device=device,
mmap_dir=mmap_dir,
num_workers=num_workers,
batch_size=batch_size,
pbar=pbar,
)
if isinstance(data, mk.DataFrame):
data[out_col] = out
return data
else:
return out
def _embed(
col: mk.Column,
encode: Callable,
preprocess: Callable,
collate: Callable,
device: int = None,
mmap_dir: str = None,
num_workers: int = 0,
batch_size: int = 128,
pbar: bool = True,
):
def _encode(x):
out = encode(_prepare_input(x))
if torch.is_tensor(out):
out = out.cpu().detach().numpy()
return out
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if preprocess is not None:
embed_input = col.defer(preprocess)
else:
embed_input = col
if collate is not None:
embed_input.collate_fn = collate
def _prepare_input(x):
if isinstance(x, mk.Column):
x = x.data
if torch.is_tensor(x):
x = x.to(device)
return x
with torch.no_grad():
out = embed_input.map(
_encode,
pbar=pbar,
is_batched_fn=True,
batch_size=batch_size,
# num_workers=num_workers,
# mmap=mmap_dir is not None,
# mmap_path=None
# if mmap_dir is None
# else os.path.join(mmap_dir, "emb_mmap.npy"),
# flush_size=128,
)
return out
|
meerkat-main
|
meerkat/ops/embed/__init__.py
|
from dataclasses import dataclass
@dataclass
class Encoder:
encode: callable
preprocess: callable = None
collate: callable = None
|
meerkat-main
|
meerkat/ops/embed/encoder.py
|
from functools import partial
from meerkat.tools.lazy_loader import LazyLoader
torch = LazyLoader("torch")
def _get_reduction_fn(reduction_name):
if reduction_name == "max":
reduction_fn = partial(torch.mean, dim=[-1, -2])
elif reduction_name == "mean":
reduction_fn = partial(torch.mean, dim=[-1, -2])
else:
raise ValueError(f"reduction_fn {reduction_name} not supported.")
reduction_fn.__name__ = reduction_name
return reduction_fn
class ActivationExtractor:
"""Class for extracting activations a targetted intermediate layer."""
def __init__(self, reduction_fn: callable = None):
self.activation = None
self.reduction_fn = reduction_fn
def add_hook(self, module, input, output):
if self.reduction_fn is not None:
output = self.reduction_fn(output)
self.activation = output
|
meerkat-main
|
meerkat/ops/embed/utils.py
|
import io
from collections import OrderedDict
from typing import Dict, Union
import numpy as np
import PIL
import requests
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.tools.utils import nested_getattr
from .encoder import Encoder
from .registry import encoders
from .utils import ActivationExtractor, _get_reduction_fn
torch = LazyLoader("torch")
nn = LazyLoader("torch.nn")
F = LazyLoader("torch.nn.functional")
# this implementation is primarily an adaptation of this colab
# https://colab.research.google.com/github/google-research/big_transfer/blob/master/colabs/big_transfer_pytorch.ipynb
@encoders.register
def bit(
variant: str = "BiT-M-R50x1",
device: Union[int, str] = "cpu",
reduction: str = "mean",
layer: str = "body",
) -> Dict[str, Encoder]:
"""Big Transfer (BiT) encoders [kolesnivok_2019]_. Includes encoders for
the following modalities:
- "image"
Args:
variant (str): The variant of the model to use. Variants include
"BiT-M-R50x1", "BiT-M-R101x3", "Bit-M-R152x4". Defaults to "BiT-M-R50x1".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
reduction (str, optional): The reduction function used to reduce image
embeddings of shape (batch x height x width x dimensions) to (batch x
dimensions). Defaults to "mean". Other options include "max".
layer (str, optional): The layer of the model from which the embeddings will
beto extract the embeddings from. Defaults to "body".
.. [kolesnivok_2019]
Kolesnikov, A. et al. Big Transfer (BiT): General Visual Representation
Learning. arXiv [cs.CV] (2019)
"""
try:
# flake8: noqa
pass
except ImportError:
raise ImportError(
"To embed with bit install domino with the `bit` submodule. For example, "
"pip install meerkat[bit]."
)
model = _get_model(variant=variant)
layer = nested_getattr(model, layer)
extractor = ActivationExtractor(reduction_fn=_get_reduction_fn(reduction))
layer.register_forward_hook(extractor.add_hook)
model.to(device)
@torch.no_grad()
def _embed(batch: "torch.Tensor"):
model(batch) # run forward pass, but don't collect output
return extractor.activation
return {"image": Encoder(encode=_embed, preprocess=transform)}
def transform(img: PIL.Image.Image):
import torchvision as tv
transform = tv.transforms.Compose(
[
tv.transforms.Resize(
(128, 128), interpolation=tv.transforms.InterpolationMode.BILINEAR
),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
return transform(img)
def _get_weights(variant: str):
response = requests.get(f"https://storage.googleapis.com/bit_models/{variant}.npz")
response.raise_for_status()
return np.load(io.BytesIO(response.content))
def _get_model(variant: str):
weights = _get_weights(variant=variant)
# BLOCK_UNITS expects model names like "r50"
model_str = variant.split("-")[-1].split("x")[0].lower()
model = ResNetV2(ResNetV2.BLOCK_UNITS[model_str], width_factor=1)
model.load_from(weights)
return model
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(
cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups
)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = np.transpose(conv_weights, [3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class PreActBottleneck(nn.Module):
"""Follows the implementation of "Identity Mappings in Deep Residual
Networks" here:
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-
act.lua.
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride) # Original ResNetv2 has it on conv1!!
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
# Projection also with pre-activation according to paper.
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
# Conv'ed branch
out = self.relu(self.gn1(x))
# Residual branch
residual = x
if hasattr(self, "downsample"):
residual = self.downsample(out)
# The first block has already applied pre-act before splitting, see Appendix.
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=""):
with torch.no_grad():
self.conv1.weight.copy_(
tf2th(weights[prefix + "a/standardized_conv2d/kernel"])
)
self.conv2.weight.copy_(
tf2th(weights[prefix + "b/standardized_conv2d/kernel"])
)
self.conv3.weight.copy_(
tf2th(weights[prefix + "c/standardized_conv2d/kernel"])
)
self.gn1.weight.copy_(tf2th(weights[prefix + "a/group_norm/gamma"]))
self.gn2.weight.copy_(tf2th(weights[prefix + "b/group_norm/gamma"]))
self.gn3.weight.copy_(tf2th(weights[prefix + "c/group_norm/gamma"]))
self.gn1.bias.copy_(tf2th(weights[prefix + "a/group_norm/beta"]))
self.gn2.bias.copy_(tf2th(weights[prefix + "b/group_norm/beta"]))
self.gn3.bias.copy_(tf2th(weights[prefix + "c/group_norm/beta"]))
if hasattr(self, "downsample"):
self.downsample.weight.copy_(
tf2th(weights[prefix + "a/proj/standardized_conv2d/kernel"])
)
return self
class ResNetV2(nn.Module):
BLOCK_UNITS = {
"r50": [3, 4, 6, 3],
"r101": [3, 4, 23, 3],
"r152": [3, 8, 36, 3],
}
def __init__(self, block_units, width_factor, head_size=21843, zero_head=False):
super().__init__()
wf = width_factor # shortcut 'cause we'll use it a lot.
self.root = nn.Sequential(
OrderedDict(
[
(
"conv",
StdConv2d(
3, 64 * wf, kernel_size=7, stride=2, padding=3, bias=False
),
),
("padp", nn.ConstantPad2d(1, 0)),
("pool", nn.MaxPool2d(kernel_size=3, stride=2, padding=0)),
# The following is subtly not the same!
# ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
self.body = nn.Sequential(
OrderedDict(
[
(
"block1",
nn.Sequential(
OrderedDict(
[
(
"unit01",
PreActBottleneck(
cin=64 * wf, cout=256 * wf, cmid=64 * wf
),
)
]
+ [
(
f"unit{i:02d}",
PreActBottleneck(
cin=256 * wf, cout=256 * wf, cmid=64 * wf
),
)
for i in range(2, block_units[0] + 1)
],
)
),
),
(
"block2",
nn.Sequential(
OrderedDict(
[
(
"unit01",
PreActBottleneck(
cin=256 * wf,
cout=512 * wf,
cmid=128 * wf,
stride=2,
),
)
]
+ [
(
f"unit{i:02d}",
PreActBottleneck(
cin=512 * wf, cout=512 * wf, cmid=128 * wf
),
)
for i in range(2, block_units[1] + 1)
],
)
),
),
(
"block3",
nn.Sequential(
OrderedDict(
[
(
"unit01",
PreActBottleneck(
cin=512 * wf,
cout=1024 * wf,
cmid=256 * wf,
stride=2,
),
)
]
+ [
(
f"unit{i:02d}",
PreActBottleneck(
cin=1024 * wf, cout=1024 * wf, cmid=256 * wf
),
)
for i in range(2, block_units[2] + 1)
],
)
),
),
(
"block4",
nn.Sequential(
OrderedDict(
[
(
"unit01",
PreActBottleneck(
cin=1024 * wf,
cout=2048 * wf,
cmid=512 * wf,
stride=2,
),
)
]
+ [
(
f"unit{i:02d}",
PreActBottleneck(
cin=2048 * wf, cout=2048 * wf, cmid=512 * wf
),
)
for i in range(2, block_units[3] + 1)
],
)
),
),
]
)
)
self.zero_head = zero_head
self.head = nn.Sequential(
OrderedDict(
[
("gn", nn.GroupNorm(32, 2048 * wf)),
("relu", nn.ReLU(inplace=True)),
("avg", nn.AdaptiveAvgPool2d(output_size=1)),
("conv", nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True)),
]
)
)
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert x.shape[-2:] == (1, 1) # We should have no spatial shape left.
return x[..., 0, 0]
def load_from(self, weights, prefix="resnet/"):
with torch.no_grad():
self.root.conv.weight.copy_(
tf2th(weights[f"{prefix}root_block/standardized_conv2d/kernel"])
)
self.head.gn.weight.copy_(tf2th(weights[f"{prefix}group_norm/gamma"]))
self.head.gn.bias.copy_(tf2th(weights[f"{prefix}group_norm/beta"]))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(
tf2th(weights[f"{prefix}head/conv2d/kernel"])
)
self.head.conv.bias.copy_(tf2th(weights[f"{prefix}head/conv2d/bias"]))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f"{prefix}{bname}/{uname}/")
return self
|
meerkat-main
|
meerkat/ops/embed/bit.py
|
import os
import subprocess
from typing import Dict, Union
from .encoder import Encoder
from .registry import encoders
VARIANTS = {
# flake8: noqa
"imagenet_l2_3_0": "https://www.dropbox.com/s/knf4uimlqsi1yz8/imagenet_l2_3_0.pt?dl=0",
"cifar_l2_1_0": "https://www.dropbox.com/s/s2x7thisiqxz095/cifar_l2_1_0.pt?dl=0",
# flake8: noqa
"imagenet_linf_8": "https://www.dropbox.com/s/yxn15a9zklz3s8q/imagenet_linf_8.pt?dl=0",
}
@encoders.register
def robust(
variant: str = "imagenet_l2_3_0",
device: Union[int, str] = "cpu",
model_path: str = None,
) -> Dict[str, Encoder]:
"""Image classifier trained with adversarial robustness loss.
[engstrom_2019]_.
Args:
variant (str, optional): One of ["imagenet_l2_3_0", "cifar_l2_1_0",
"imagenet_linf_8"].Defaults to "imagenet_l2_3_0".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
.. [engstrom_2019]
@misc{robustness,
title={Robustness (Python Library)},
author={Logan Engstrom and Andrew Ilyas and Hadi Salman and Shibani
Santurkar and Dimitris Tsipras},
year={2019},
url={https://github.com/MadryLab/robustness}
}
"""
model_path = (
os.path.expanduser("~/.cache/domino/robust/robust_resnet50.pth")
if model_path is None
else model_path
)
model = _load_robust_model(model_path=model_path, variant=variant).to(device)
return {
"image": Encoder(
encode=lambda x: model(x, with_latent=True)[0][1],
preprocess=_transform_image,
),
}
def _load_robust_model(model_path: str, variant: str):
try:
from robustness import datasets as dataset_utils
from robustness import model_utils
except ImportError:
raise ImportError("To embed with robust run `pip install robustness`")
# ensure model_path directories exist
os.makedirs(os.path.dirname(model_path), exist_ok=True)
subprocess.run(
[
"wget",
"-O",
model_path,
"https://www.dropbox.com/s/knf4uimlqsi1yz8/imagenet_l2_3_0.pt?dl=0",
]
)
dataset_function = getattr(dataset_utils, "ImageNet")
dataset = dataset_function("")
model_kwargs = {
"arch": variant,
"dataset": dataset,
"resume_path": model_path,
"parallel": False,
}
model, _ = model_utils.make_and_restore_model(**model_kwargs)
model.eval()
return model
def _transform_image(img):
from torchvision import transforms
return transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
)(img)
|
meerkat-main
|
meerkat/ops/embed/robust.py
|
from typing import Dict, Union
from .encoder import Encoder
from .registry import encoders
@encoders.register
def clip(
variant: str = "ViT-B/32", device: Union[int, str] = "cpu"
) -> Dict[str, Encoder]:
"""Contrastive Language-Image Pre-training (CLIP) encoders [radford_2021]_.
Includes encoders for the following modalities:
- "text"
- "image"
Encoders will map these different modalities to the same embedding space.
Args:
variant (str, optional): A model name listed by `clip.available_models()`, or
the path to a model checkpoint containing the state_dict. Defaults to
"ViT-B/32".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
.. [radford_2021]
Radford, A. et al. Learning Transferable Visual Models From Natural Language
Supervision. arXiv [cs.CV] (2021)
"""
try:
from clip import load, tokenize
except ImportError:
raise ImportError(
"To embed with CLIP you must install the. "
"Run `pip install ftfy regex git+https://github.com/openai/CLIP.git`."
)
model, preprocess = load(variant, device=device)
return {
"image": Encoder(encode=model.encode_image, preprocess=preprocess),
"text": Encoder(
# need to squeeze out the batch dimension for compatibility with collate
encode=model.encode_text,
preprocess=lambda x: tokenize(x, truncate=True).squeeze(0),
),
}
|
meerkat-main
|
meerkat/ops/embed/clip.py
|
from __future__ import annotations
from functools import wraps
from typing import Any, Callable, Dict, List, Sequence, Tuple, Union
import numpy as np
from meerkat.dataframe import DataFrame
from meerkat.interactive.graph.reactivity import reactive
from meerkat.mixins.identifiable import IdentifiableMixin
def sets_only(fn: Callable) -> Callable:
@wraps(fn)
def wrapped(self, *args, **kwargs):
if self.slice_type == "sets":
return fn(self, *args, **kwargs)
else:
raise ValueError("This method is only valid for sets")
return wrapped
SliceKey = Union[str, int]
class SliceBy(IdentifiableMixin):
_self_identifiable_group: str = "slicebys"
def __init__(
self,
data: DataFrame,
by: Union[List[str], str],
sets: Dict[Union[SliceKey, Tuple[SliceKey]], np.ndarray] = None,
scores: Dict[Union[SliceKey, Tuple[SliceKey], np.ndarray]] = None,
masks: Dict[Union[SliceKey, Tuple[SliceKey]], np.ndarray] = None,
):
super().__init__()
# exactly one of sets and scores must be provided
if (sets is None) == (scores is None):
raise ValueError("Exactly one of sets and scores must be provided")
self.slice_type = "scores" if scores is not None else "sets"
self.slices = scores if scores is not None else sets
self.data = data
if isinstance(by, str):
by = [by]
self.by = by
# # prepare the gui object
# from meerkat.interactive.gui import SliceByGUI
# self.gui = SliceByGUI(self)
self.slice = SliceIndexer(self)
def __len__(self) -> int:
return len(self.slices)
def mean(self, *args, **kwargs) -> DataFrame:
return self._aggregate(lambda x: x.mean(*args, **kwargs))
@sets_only
def count(self, *args, **kwargs) -> DataFrame:
return self._aggregate(lambda x: len(x))
@sets_only
def median(self, *args, **kwargs) -> DataFrame:
return self._aggregate(lambda x: x.median(*args, **kwargs))
@sets_only
def aggregate(self, function: Callable, accepts_df: bool = False) -> DataFrame:
"""_summary_
Args:
function (Callable): _description_
accepts_df (bool, optional): _description_. Defaults to False.
Returns:
DataFrame: _description_
"""
return self._aggregate(f=function, accepts_df=accepts_df)
@property
def slice_keys(self):
return sorted(list(self.slices.keys()))
def _aggregate(self, f: Callable, accepts_df: bool = False) -> DataFrame:
"""self.sets are a dictionary of {labels : [sets]}"""
# means will be a list of dictionaries where each element in the dict
out = []
# TODO (Sabri): This is an extremely slow way of doing this – we need to
# vectorize it
for slice_key in self.slice_keys:
if self.slice_type == "scores":
raise NotImplementedError
else:
slice_df = self.data[self.slices[slice_key]]
slice_values: Dict[str, Any] = slice_df.aggregate(
f, accepts_df=accepts_df
)
out.append(slice_values)
from meerkat.dataframe import DataFrame
# create DataFrame as a list of rows.
out = DataFrame(out)
# add the by columns.
if len(self.slice_keys) > 0:
if len(self.by) > 1:
columns = list(zip(*self.slice_keys))
for i, col in enumerate(self.by):
out[col] = columns[i]
else:
col = self.by[0]
out[col] = self.slice_keys
out = out.set_primary_key(col)
return out
def _get(self, slice_key: str, index, materialize: bool = False) -> List[Any]:
"""Get rows from a slice by."""
if self.slice_type == "sets":
return self.data._get(
self.slices[slice_key][index], materialize=materialize
)
else:
sorted = self.data[np.argsort(-np.array(self.slices[slice_key]))]
return sorted._get(index, materialize=materialize)
def get_slice_length(self, slice_key: SliceKey) -> int:
if self.slice_type == "sets":
return len(self.slices[slice_key])
else:
return len(self.data)
def __getitem__(self, column: Union[str, Sequence[str]]) -> SliceBy:
if isinstance(column, str):
column = [column]
if self.slice_type == "sets":
return self.__class__(data=self.data[column], sets=self.slices, by=self.by)
else:
return self.__class__(
data=self.data[column], scores=self.slices, by=self.by
)
class SliceIndexer:
def __init__(self, obj: object):
self.obj = obj
def __getitem__(self, index):
key, index = index
return self.obj._get(key, index, materialize=False)
@reactive
def sliceby(
data: DataFrame,
by: Union[str, Sequence[str]] = None,
key_mapping: Dict[int, str] = None,
) -> SliceBy:
"""Perform a groupby operation on a DataFrame or Column (similar to a
`DataFrame.groupby` and `Series.groupby` operations in Pandas).j.
Args:
data (Union[DataFrame, AbstractColumn]): The data to group.
by (Union[str, Sequence[str]]): The column(s) to group by. Ignored if ``data``
is a Column.
Returns:
Union[DataFrameGroupBy, AbstractColumnGroupBy]: A GroupBy object.
"""
if isinstance(by, str):
by = [by]
return SliceBy(
data=data,
by="slice",
sets={curr_by: np.where(data[curr_by] == 1)[0] for curr_by in by},
)
|
meerkat-main
|
meerkat/ops/sliceby/sliceby.py
|
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Mapping, Sequence, Tuple, Union
import numpy as np
from meerkat.dataframe import DataFrame
from meerkat.interactive.graph.reactivity import reactive
from meerkat.ops.explain import explain
from .sliceby import SliceBy
if TYPE_CHECKING:
import domino.Slicer
class ExplainBy(SliceBy):
def __init__(
self,
data: DataFrame,
by: Union[List[str], str],
scores: Dict[Union[str, Tuple[str]], np.ndarray] = None,
sets: Dict[Union[str, Tuple[str]], np.ndarray] = None,
):
super().__init__(data=data, by=by, sets=sets, scores=scores)
@reactive()
def explainby(
data: DataFrame,
by: Union[str, Sequence[str]],
target: Union[str, Mapping[str]],
method: Union[str, "domino.Slicer"] = "MixtureSlicer",
encoder: str = "clip", # add support for auto selection of encoder
modality: str = None,
scores: bool = False,
use_cache: bool = True,
output_col: str = None,
**kwargs,
) -> ExplainBy:
"""Perform a clusterby operation on a DataFrame.
Args:
data (DataFrame): The dataframe to cluster.
by (Union[str, Sequence[str]]): The column(s) to cluster by. These columns will
be embedded using the ``encoder`` and the resulting embedding will be used.
method (Union[str, domino.Slicer]): The clustering method to use.
encoder (str): The encoder to use for the embedding. Defaults to ``clip``.
modality (Union[str, Sequence[str])): The modality to of the
**kwargs: Additional keyword arguments to pass to the clustering method.
Returns:
ExplainBy: A ExplainBy object.
"""
if output_col is None:
output_col = f"{method}({by},{target})"
# TODO (sabri): Give the user the option to specify the output and remove this guard
# once caching is supported
if not isinstance(by, str):
raise NotImplementedError
if output_col not in data or not use_cache:
data, _ = explain(
data=data,
input=by,
target=target,
method=method,
encoder=encoder,
modality=modality,
output_col=output_col,
**kwargs,
)
if scores:
slice_scores = data[output_col]
slice_scores = {
key: slice_scores[:, key] for key in range(slice_scores.shape[1])
}
return ExplainBy(data=data, scores=slice_scores, by=by)
else:
slice_sets = data[output_col]
slice_sets = {
key: np.where(slice_sets[:, key] == 1)[0]
for key in range(slice_sets.shape[1])
}
return ExplainBy(data=data, sets=slice_sets, by=by)
|
meerkat-main
|
meerkat/ops/sliceby/explainby.py
|
meerkat-main
|
meerkat/ops/sliceby/__init__.py
|
|
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Sequence, Tuple, Union
import numpy as np
from meerkat.dataframe import DataFrame
from meerkat.interactive.graph.reactivity import reactive
from meerkat.ops.cluster import cluster
from .sliceby import SliceBy
if TYPE_CHECKING:
from sklearn.base import ClusterMixin
class ClusterBy(SliceBy):
def __init__(
self,
data: DataFrame,
by: Union[List[str], str],
sets: Dict[Union[str, Tuple[str]], np.ndarray] = None,
):
super().__init__(data=data, by=by, sets=sets)
@reactive()
def clusterby(
data: DataFrame,
by: Union[str, Sequence[str]],
method: Union[str, "ClusterMixin"] = "KMeans",
encoder: str = "clip", # add support for auto selection of encoder
modality: str = None,
**kwargs,
) -> ClusterBy:
"""Perform a clusterby operation on a DataFrame.
Args:
data (DataFrame): The dataframe to cluster.
by (Union[str, Sequence[str]]): The column(s) to cluster by. These columns will
be embedded using the ``encoder`` and the resulting embedding will be used.
method (Union[str, "ClusterMixin"]): The clustering method to use.
encoder (str): The encoder to use for the embedding. Defaults to ``clip``.
modality (Union[str, Sequence[str])): The modality to of the
**kwargs: Additional keyword arguments to pass to the clustering method.
Returns:
ClusterBy: A ClusterBy object.
"""
out_col = f"{method}({by})"
# TODO (sabri): Give the user the option to specify the output and remove this guard
# once caching is supported
if not isinstance(by, str):
raise NotImplementedError
if out_col not in data:
data, _ = cluster(
data=data,
input=by,
method=method,
encoder=encoder,
modality=modality,
**kwargs,
)
clusters = data[out_col]
cluster_indices = {key: np.where(clusters == key)[0] for key in np.unique(clusters)}
if isinstance(by, str):
by = [by]
return ClusterBy(data=data, sets=cluster_indices, by=by)
|
meerkat-main
|
meerkat/ops/sliceby/clusterby.py
|
from __future__ import annotations
from typing import Dict, List, Sequence, Tuple, Union
import numpy as np
from meerkat.dataframe import DataFrame
from meerkat.interactive.graph.reactivity import reactive
from .sliceby import SliceBy
class GroupBy(SliceBy):
def __init__(
self,
data: DataFrame,
by: Union[List[str], str],
sets: Dict[Union[str, Tuple[str]], np.ndarray] = None,
):
super().__init__(data=data, by=by, sets=sets)
@reactive()
def groupby(
data: DataFrame,
by: Union[str, Sequence[str]] = None,
) -> GroupBy:
"""Perform a groupby operation on a DataFrame or Column (similar to a
`DataFrame.groupby` and `Series.groupby` operations in Pandas).j.
Args:
data (Union[DataFrame, AbstractColumn]): The data to group.
by (Union[str, Sequence[str]]): The column(s) to group by. Ignored if ``data``
is a Column.
Returns:
Union[DataFrameGroupBy, AbstractColumnGroupBy]: A GroupBy object.
"""
if isinstance(by, str):
by = [by]
return GroupBy(data=data, sets=data[by].to_pandas().groupby(by).indices, by=by)
|
meerkat-main
|
meerkat/ops/sliceby/groupby.py
|
import warnings
from typing import Any, Callable, Dict, Union
import meerkat as mk
from meerkat.interactive.graph.reactivity import reactive
from ...mixins.aggregate import AggregationError
@reactive()
def aggregate(
data: mk.DataFrame,
function: Union[Callable, str],
nuisance: str = "drop",
accepts_df: bool = False,
*args,
**kwargs,
) -> Dict[str, Any]:
""""""
if nuisance not in ["drop", "raise", "warn"]:
raise ValueError(f"{nuisance} is not a valid nuisance option")
if accepts_df and not isinstance(function, Callable):
raise ValueError("Must pass a callable to aggregate if accepts_df is True")
if accepts_df:
return {"df": function(data, *args, **kwargs)}
result = {}
for name, column in data.items():
try:
result[name] = column.aggregate(function, *args, **kwargs)
except AggregationError as e:
if nuisance == "drop":
continue
elif nuisance == "raise":
raise e
elif nuisance == "warn":
warnings.warn(str(e))
continue
return result
|
meerkat-main
|
meerkat/ops/aggregate/aggregate.py
|
meerkat-main
|
meerkat/ops/aggregate/__init__.py
|
|
from __future__ import annotations
import abc
import logging
import pathlib
import reprlib
from ast import Dict
from copy import copy
from typing import (
TYPE_CHECKING,
Any,
Callable,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
)
import numpy as np
import pandas as pd
import pyarrow as pa
import meerkat.config
from meerkat.errors import ConversionError
from meerkat.interactive.graph.marking import unmarked
from meerkat.interactive.node import NodeMixin
from meerkat.mixins.aggregate import AggregateMixin
from meerkat.mixins.blockable import BlockableMixin
from meerkat.mixins.cloneable import CloneableMixin
from meerkat.mixins.collate import CollateMixin
from meerkat.mixins.deferable import DeferrableMixin
from meerkat.mixins.identifiable import IdentifiableMixin
from meerkat.mixins.indexing import MaterializationMixin
from meerkat.mixins.inspect_fn import FunctionInspectorMixin
from meerkat.mixins.io import ColumnIOMixin
from meerkat.mixins.reactifiable import ReactifiableMixin
from meerkat.provenance import ProvenanceMixin, capture_provenance
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.tools.utils import convert_to_batch_column_fn, translate_index
if TYPE_CHECKING:
import torch
from meerkat.interactive.formatter.base import FormatterGroup
torch = LazyLoader("torch") # noqa: F811
logger = logging.getLogger(__name__)
class Column(
AggregateMixin,
BlockableMixin,
CloneableMixin,
CollateMixin,
ColumnIOMixin,
FunctionInspectorMixin,
IdentifiableMixin,
DeferrableMixin,
MaterializationMixin,
NodeMixin,
ProvenanceMixin,
ReactifiableMixin,
abc.ABC,
):
"""An abstract class for Meerkat columns."""
_data: Sequence = None
# Path to a log directory
logdir: pathlib.Path = pathlib.Path.home() / "meerkat/"
# Create a directory
logdir.mkdir(parents=True, exist_ok=True)
_self_identifiable_group: str = "columns"
def __init__(
self,
data: Sequence = None,
collate_fn: Callable = None,
formatters: FormatterGroup = None,
*args,
**kwargs,
):
"""
Args:
data (Sequence, optional): [description]. Defaults to None.
collate_fn (Callable, optional): [description]. Defaults to None.
formatter (Callable, optional): . Defaults to None.
"""
# Assign to data
self._set_data(data)
super(Column, self).__init__(
collate_fn=collate_fn,
*args,
**kwargs,
)
self._formatters = (
formatters if formatters is not None else self._get_default_formatters()
)
@unmarked()
def __repr__(self):
return (
f"column({reprlib.repr([x for x in self[:10]])}, "
f"backend={type(self).__name__}"
)
@unmarked()
def __str__(self):
return reprlib.repr([x for x in self[:10]])
def streamlit(self):
return self._repr_pandas_()
def _set_data(self, data):
if self.is_blockable():
data = self._unpack_block_view(data)
self._data = data
def _is_valid_primary_key(self):
"""Subclasses should implement checks for ensuring that the column
could be used as a valid primary key.
Specifically, the check should ensure that the values in the
column are unique. If the check does not pass, returns False. If
the subclass has not implemented this method.
"""
return False
def _keyidx_to_posidx(self, keyidx: Any) -> int:
"""Get the posidx of the first occurrence of the given keyidx. Raise a
key error if the keyidx is not found.
Args:
keyidx: The keyidx to search for.
Returns:
The posidx of the first occurrence of the given keyidx.
"""
raise NotImplementedError()
def _keyidxs_to_posidxs(self, keyidxs: Sequence[Any]) -> np.ndarray:
"""Get the posidxs of the given keyidxs. Raise a key error if any of
the keyidxs are not found.
Args:
keyidxs: The keyidxs to search for.
Returns:
The posidxs of the given keyidxs.
"""
raise NotImplementedError()
@property
def data(self):
"""Get the underlying data."""
return self._data
@data.setter
def data(self, value):
self._set_data(value)
@property
def metadata(self):
return {}
@classmethod
def _state_keys(cls) -> set:
"""List of attributes that describe the state of the object."""
return {"_collate_fn", "_formatters"}
def _get_cell(self, index: int, materialize: bool = True) -> Any:
"""Get a single cell from the column.
Args:
index (int): This is an index into the ALL rows, not just visible rows. In
other words, we assume that the index passed in has already been
remapped via `_remap_index`, if `self.visible_rows` is not `None`.
materialize (bool, optional): Materialize and return the object. This
argument is used by subclasses of `Column` that hold data in an
unmaterialized format. Defaults to False.
"""
return self._data[index]
def _get_batch(self, indices: np.ndarray, materialize: bool = True) -> Column:
"""Get a batch of cells from the column.
Args:
index (int): This is an index into the ALL rows, not just visible rows. In
other words, we assume that the index passed in has already been
remapped via `_remap_index`, if `self.visible_rows` is not `None`.
materialize (bool, optional): Materialize and return the object. This
argument is used by subclasses of `Column` that hold data in an
unmaterialized format. Defaults to False.
"""
if materialize:
return self.collate(
[self._get_cell(int(i), materialize=materialize) for i in indices]
)
else:
return self.collate(
[self._get_cell(int(i), materialize=materialize) for i in indices]
)
def _get(self, index, materialize: bool = True, _data: np.ndarray = None):
index = self._translate_index(index)
if isinstance(index, int):
if _data is None:
_data = self._get_cell(index, materialize=materialize)
return _data
elif isinstance(index, np.ndarray):
# support for blocks
if _data is None:
_data = self._get_batch(index, materialize=materialize)
return self._clone(data=_data)
def __getitem__(self, index):
return self._get(index, materialize=False)
def _set_cell(self, index, value):
self._data[index] = value
def _set_batch(self, indices: np.ndarray, values):
for index, value in zip(indices, values):
self._set_cell(int(index), value)
def _set(self, index, value):
index = self._translate_index(index)
if isinstance(index, int):
self._set_cell(index, value)
elif isinstance(index, Sequence) or isinstance(index, np.ndarray):
self._set_batch(index, value)
else:
raise ValueError
def __setitem__(self, index, value):
self._set(index, value)
def _is_batch_index(self, index):
# np.ndarray indexed with a tuple of length 1 does not return an np.ndarray
# so we match this behavior
return not (
isinstance(index, int) or (isinstance(index, tuple) and len(index) == 1)
)
def _translate_index(self, index):
return translate_index(index, length=len(self))
@staticmethod
def _convert_to_batch_fn(
function: Callable, with_indices: bool, materialize: bool = True, **kwargs
) -> callable:
return convert_to_batch_column_fn(
function=function,
with_indices=with_indices,
materialize=materialize,
**kwargs,
)
@unmarked()
def __len__(self):
self._reactive_warning("len", "col")
return self.full_length()
def full_length(self):
if self._data is None:
return 0
return len(self._data)
@unmarked()
def _repr_cell_(self, index) -> object:
raise NotImplementedError
def _get_default_formatters(self) -> "FormatterGroup":
from meerkat.interactive.formatter import TextFormatter
from meerkat.interactive.formatter.base import FormatterGroup
# by default all object should have a `str` representation
return FormatterGroup(base=TextFormatter())
@property
def formatters(self) -> "FormatterGroup":
return self._formatters
@formatters.setter
def formatters(self, formatters: Union["FormatterGroup", Dict]):
if isinstance(formatters, dict):
formatters = FormatterGroup(**dict)
self._formatters = formatters
def format(self, formatters: "FormatterGroup") -> Column:
new_col = self.view()
new_col.formatters = self.formatters.copy()
new_col.formatters.update(formatters)
return new_col
@unmarked()
def _repr_pandas_(self, max_rows: int = None) -> pd.Series:
if max_rows is None:
max_rows = meerkat.config.display.max_rows
if len(self) > max_rows:
col = pd.Series(
[self._repr_cell(idx) for idx in range(max_rows // 2)]
+ [self._repr_cell(0)]
+ [
self._repr_cell(idx)
for idx in range(len(self) - max_rows // 2, len(self))
]
)
else:
col = pd.Series([self._repr_cell(idx) for idx in range(len(self))])
# TODO: if the objects have a _repr_html_ method, we should be able to use
# that instead of explicitly relying on the column having a formatter.
return (
col,
self.formatters["base"]
if self.formatters["base"] is None
else self.formatters["base"].html,
)
@unmarked()
def _repr_html_(self, max_rows: int = None):
# pd.Series objects do not implement _repr_html_
if max_rows is None:
max_rows = meerkat.config.display.max_rows
if len(self) > max_rows:
pd_index = np.concatenate(
(
np.arange(max_rows // 2),
np.zeros(1),
np.arange(len(self) - max_rows // 2, len(self)),
),
)
else:
pd_index = np.arange(len(self))
col_name = f"({self.__class__.__name__})"
col, formatter = self._repr_pandas_(max_rows=max_rows)
df = col.to_frame(name=col_name)
df = df.set_index(pd_index.astype(int))
return df.to_html(
max_rows=max_rows,
formatters={col_name: formatter},
escape=False,
)
def map(
self,
function: Callable,
is_batched_fn: bool = False,
batch_size: int = 1,
inputs: Union[Mapping[str, str], Sequence[str]] = None,
outputs: Union[Mapping[any, str], Sequence[str]] = None,
output_type: Union[Mapping[str, Type["Column"]], Type["Column"]] = None,
materialize: bool = True,
**kwargs,
) -> Optional[Union[Dict, List, Column]]:
from meerkat.ops.map import map
return map(
data=self,
function=function,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
inputs=inputs,
outputs=outputs,
output_type=output_type,
materialize=materialize,
**kwargs,
)
@capture_provenance()
def filter(
self,
function: Callable,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
drop_last_batch: bool = False,
num_workers: Optional[int] = 0,
materialize: bool = True,
# pbar: bool = False,
**kwargs,
) -> Optional[Column]:
"""Filter the elements of the column using a function."""
# Return if `self` has no examples
if not len(self):
logger.info("Dataset empty, returning it .")
return self
# Get some information about the function
function_properties = self._inspect_function(
function,
with_indices,
is_batched_fn=is_batched_fn,
materialize=materialize,
**kwargs,
)
assert function_properties.bool_output, "function must return boolean."
# Map to get the boolean outputs and indices
logger.info("Running `filter`, a new dataset will be returned.")
outputs = self.map(
function=function,
with_indices=with_indices,
# input_columns=input_columns,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_workers=num_workers,
materialize=materialize,
# pbar=pbar,
**kwargs,
)
indices = np.where(outputs)[0]
return self[indices]
def sort(
self, ascending: Union[bool, List[bool]] = True, kind: str = "quicksort"
) -> Column:
"""Return a sorted view of the column.
Args:
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`. Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
Column: A view of the column with the sorted data.
"""
raise NotImplementedError
def argsort(
self, ascending: Union[bool, List[bool]] = True, kind: str = "quicksort"
) -> Column:
"""Return indices that would sorted the column.
Args:
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`. Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
Column: A view of the column with the sorted data.
"""
raise NotImplementedError
def sample(
self,
n: int = None,
frac: float = None,
replace: bool = False,
weights: Union[str, np.ndarray] = None,
random_state: Union[int, np.random.RandomState] = None,
) -> Column:
"""Select a random sample of rows from Column. Roughly equivalent to
``sample`` in Pandas https://pandas.pydata.org/docs/reference/api/panda
s.DataFrame.sample.html.
Args:
n (int): Number of samples to draw. If `frac` is specified, this parameter
should not be passed. Defaults to 1 if `frac` is not passed.
frac (float): Fraction of rows to sample. If `n` is specified, this
parameter should not be passed.
replace (bool): Sample with or without replacement. Defaults to False.
weights (np.ndarray): Weights to use for sampling. If `None`
(default), the rows will be sampled uniformly. If a numpy array, the
sample will be weighted accordingly. If
weights do not sum to 1 they will be normalized to sum to 1.
random_state (Union[int, np.random.RandomState]): Random state or seed to
use for sampling.
Return:
Column: A random sample of rows from the DataFrame.
"""
from meerkat import sample
return sample(
data=self,
n=n,
frac=frac,
replace=replace,
weights=weights,
random_state=random_state,
)
def append(self, column: Column) -> None:
# TODO(Sabri): implement a naive `ComposedColumn` for generic append and
# implement specific ones for ListColumn, NumpyColumn etc.
raise NotImplementedError
@staticmethod
def concat(columns: Sequence[Column]) -> None:
# TODO(Sabri): implement a naive `ComposedColumn` for generic append and
# implement specific ones for ListColumn, NumpyColumn etc.
raise NotImplementedError
def is_equal(self, other: Column) -> bool:
"""Tests whether two columns.
Args:
other (Column): [description]
"""
raise NotImplementedError()
def batch(
self,
batch_size: int = 1,
drop_last_batch: bool = False,
collate: bool = True,
num_workers: int = 0,
materialize: bool = True,
*args,
**kwargs,
):
"""Batch the column.
Args:
batch_size: integer batch size
drop_last_batch: drop the last batch if its smaller than batch_size
collate: whether to collate the returned batches
Returns:
batches of data
"""
if (
self._get_batch.__func__ == Column._get_batch
and self._get.__func__ == Column._get
):
return torch.utils.data.DataLoader(
self.mz if materialize else self,
batch_size=batch_size,
collate_fn=self.collate if collate else lambda x: x,
drop_last=drop_last_batch,
num_workers=num_workers,
*args,
**kwargs,
)
else:
batch_indices = []
indices = np.arange(len(self))
for i in range(0, len(self), batch_size):
if drop_last_batch and i + batch_size > len(self):
continue
batch_indices.append(indices[i : i + batch_size])
return torch.utils.data.DataLoader(
self.mz if materialize else self,
sampler=batch_indices,
batch_size=None,
batch_sampler=None,
drop_last=drop_last_batch,
num_workers=num_workers,
*args,
**kwargs,
)
@classmethod
def get_writer(cls, mmap: bool = False, template: Column = None):
from meerkat.writers.concat_writer import ConcatWriter
if mmap:
raise ValueError("Memmapping not supported with this column type.")
else:
return ConcatWriter(output_type=cls, template=template)
Columnable = Union[Sequence, np.ndarray, pd.Series, "torch.Tensor"]
@classmethod
# @capture_provenance()
def from_data(cls, data: Union[Columnable, Column]):
"""Convert data to a meerkat column using the appropriate Column
type."""
return column(data)
def head(self, n: int = 5) -> Column:
"""Get the first `n` examples of the column."""
return self[:n]
def tail(self, n: int = 5) -> Column:
"""Get the last `n` examples of the column."""
return self[-n:]
def to_pandas(self, allow_objects: bool = False) -> pd.Series:
"""Convert the column to a Pandas Series.
If the column cannot be converted to a Pandas Series, this method will raise a
`~meerkat.errors.ConversionError`.
Returns:
pd.Series: The column as a Pandas Series.
"""
raise ConversionError(
f"Cannot convert column of type {type(self)} to Pandas Series."
)
def to_arrow(self) -> pa.Array:
"""Convert the column to an Arrow Array.
If the column cannot be converted to an Arrow Array, this method will raise a
`~meerkat.errors.ConversionError`.
Returns:
pa.Array: The column as an Arrow Array.
"""
raise ConversionError(
f"Cannot convert column of type {type(self)} to Arrow Array."
)
def to_torch(self) -> "torch.Tensor":
"""Convert the column to a PyTorch Tensor.
If the column cannot be converted to a PyTorch Tensor, this method will raise a
`~meerkat.errors.ConversionError`.
Returns:
torch.Tensor: The column as a PyTorch Tensor.
"""
raise ConversionError(
f"Cannot convert column of type {type(self)} to PyTorch Tensor."
)
def to_numpy(self) -> np.ndarray:
"""Convert the column to a Numpy array.
If the column cannot be converted to a Numpy array, this method will raise a
`~meerkat.errors.ConversionError`.
Returns:
np.ndarray: The column as a Numpy array.
"""
raise ConversionError(
f"Cannot convert column of type {type(self)} to Numpy array."
)
def __array__(self) -> np.ndarray:
"""Convert the data to a numpy array."""
return self.to_numpy()
def to_json(self) -> dict:
"""Convert the column to a JSON object.
If the column cannot be converted to a JSON object, this method will raise a
`~meerkat.errors.ConversionError`.
Returns:
dict: The column as a JSON object.
"""
raise ConversionError(
f"Cannot convert column of type {type(self)} to JSON object."
)
def _copy_data(self) -> object:
return copy(self._data)
def _view_data(self) -> object:
return self._data
@property
def is_mmap(self):
return False
def infer_column_type(data: Sequence) -> Type[Column]:
if isinstance(data, Column):
return type(data)
from .scalar.abstract import ScalarColumn
if isinstance(data, pd.Series):
return ScalarColumn
if isinstance(data, (pa.Array, pa.ChunkedArray)):
from .scalar.arrow import ArrowScalarColumn
return ArrowScalarColumn
if torch.is_tensor(data):
from .tensor.torch import TorchTensorColumn
# FIXME: Once we have a torch scalar column we should use that here
# if len(data.shape) == 1:
# return ScalarColumn(data.cpu().detach().numpy())
return TorchTensorColumn
if isinstance(data, np.ndarray):
if len(data.shape) == 1:
from .scalar.pandas import ScalarColumn
return ScalarColumn
from .tensor.numpy import NumPyTensorColumn
return NumPyTensorColumn
if isinstance(data, Sequence):
from .tensor.numpy import NumPyTensorColumn
if len(data) != 0 and (isinstance(data[0], (np.ndarray, NumPyTensorColumn))):
return NumPyTensorColumn
from .tensor.torch import TorchTensorColumn
if len(data) != 0 and (
isinstance(data[0], TorchTensorColumn) or torch.is_tensor(data[0])
):
return TorchTensorColumn
if len(data) != 0 and isinstance(data[0], (str, int, float, bool, np.generic)):
from .scalar.pandas import ScalarColumn
return ScalarColumn
from .object.base import ObjectColumn
return ObjectColumn
else:
raise ValueError(f"Cannot create column out of data of type {type(data)}")
def column(data: Sequence, scalar_backend: str = None) -> Column:
"""Create a Meerkat column from data.
The Meerkat column type is inferred from the type and structure of
the data passed in.
"""
if isinstance(data, Column):
# TODO: Need ton make this view but should decide where to do it exactly
return data # .view()
from .scalar.abstract import ScalarColumn
column_type = infer_column_type(data)
if column_type == ScalarColumn:
return ScalarColumn(data, backend=scalar_backend)
return column_type(data)
|
meerkat-main
|
meerkat/columns/abstract.py
|
meerkat-main
|
meerkat/columns/__init__.py
|
|
meerkat-main
|
meerkat/columns/deferred/__init__.py
|
|
from __future__ import annotations
import functools
import io
import logging
import os
import urllib.request
import warnings
from ctypes import Union
from pathlib import Path
from string import Template
from typing import IO, Any, Callable, Sequence
from urllib.parse import urlparse
import dill
import yaml
from PIL import Image
import meerkat.tools.docs as docs
from meerkat import env
from meerkat.block.deferred_block import DeferredOp
from meerkat.cells.audio import Audio
from meerkat.columns.abstract import Column
from meerkat.columns.deferred.base import DeferredCell, DeferredColumn
from meerkat.columns.scalar import ScalarColumn
from meerkat.interactive.formatter import (
CodeFormatterGroup,
HTMLFormatterGroup,
PDFFormatterGroup,
TextFormatterGroup,
)
from meerkat.interactive.formatter.audio import DeferredAudioFormatterGroup
from meerkat.interactive.formatter.base import FormatterGroup
from meerkat.interactive.formatter.image import DeferredImageFormatterGroup
from meerkat.interactive.formatter.medimage import MedicalImageFormatterGroup
from meerkat.tools.utils import requires
if env.is_package_installed("voxel"):
import voxel
else:
voxel = None
logger = logging.getLogger(__name__)
FILE_SHARED_DOCS = {
"loader": docs.Arg(
"""
loader (Union[str, Callable[[Union[str, IO]], Any]]): a callable that
accepts a filepath or an I/O stream and returns data.
"""
),
"cache_dir": docs.Arg(
"""
cache_dir (str, optional): the directory on disk where downloaded
files are to be cached. Defaults to None, in which case files will be
re-downloaded on every access of the data. The ``cache_dir`` can also
include environment variables (e.g. ``$DATA_DIR/images``) which will
be expanded prior to loading. This is useful when sharing DataFrames
between machines.
"""
),
"base_dir": docs.Arg(
"""
base_dir (str, optional): an absolute path to a directory containing the
files. If provided, the ``filepath`` to be loaded will be joined with
the ``base_dir``. As such, this argument should only be used if the
loader will be applied to relative paths. T
The ``base_dir`` can also
include environment variables (e.g. ``$DATA_DIR/images``) which will
be expanded prior to loading. This is useful when sharing DataFrames
between machines.
"""
),
"downloader": docs.Arg(
"""
downloader (Union[str, callable], optional): a callable that accepts at
least two positional arguments - a URI and a destination (which could
be either a string or file object).
Meerkat includes a small set of built-in downloaders ["url", "gcs"]
which can be specified via string.
"""
),
"fallback_downloader": docs.Arg(
"""
fallback_downloader (callable, optional): a callable that will be run each
time the the downloader fails (for any reason). This is useful, for
example, if you expect some of the URIs in a dataset to be broken
``fallback_downloader`` could write an empty file in place of the
original. If ``fallback_downloader`` is not supplied, the original
exception is re-raised.
"""
),
}
class FileLoader:
@docs.doc(source=FILE_SHARED_DOCS)
def __init__(
self,
loader: Union[str, Callable[[Union[str, IO]], Any]],
base_dir: str = None,
downloader: Union[str, Callable] = None,
fallback_downloader: Callable[[Union[str, IO]], None] = None,
cache_dir: str = None,
):
"""A simple file loader with support for both local paths and remote
URIs.
.. warning::
In order for the column to be serializable with ``write()``, the
callables passed to the constructor must be pickleable.
Args:
${loader}
${base_dir}
${downloader}
${fallback_downloader}
${cache_dir}
"""
self.loader = loader
self.base_dir = base_dir
if downloader == "url":
self.downloader = download_url
elif downloader == "gcs":
self.downloader = download_gcs
else:
self.downloader = downloader
self.fallback_downloader = fallback_downloader
self.cache_dir = cache_dir
def __call__(self, filepath: str):
"""
Args:
filepath (str): If `downloader` is None, this is interpreted as a local
filepath. Otherwise, it is interpreted as a URI from which the file can
be downloaded.
"""
# support including environment varaiables in the base_dir so that DataFrames
# can be easily moved between machines
if self.base_dir is not None:
# need to convert Path objects to strings for Template to work
base_dir = str(self.base_dir)
base_dir = os.path.expanduser(base_dir)
try:
# we don't use os.expanvars because it raises an error
base_dir = Template(base_dir).substitute(os.environ)
except KeyError:
raise ValueError(
f'`base_dir="{base_dir}"` contains an undefined environment'
"variable."
)
filepath = os.path.join(base_dir, filepath)
if self.downloader is not None:
parse = urlparse(filepath)
if self.cache_dir is not None:
# need to convert Path objects to strings for Template to work
cache_dir = str(self.cache_dir)
cache_dir = os.path.expanduser(cache_dir)
try:
# we don't use os.expanvars because it raises an error
cache_dir = Template(cache_dir).substitute(os.environ)
except KeyError:
raise ValueError(
f'`cache_dir="{cache_dir}"` contains an undefined environment'
"variable."
)
dst = os.path.join(cache_dir, parse.netloc + parse.path)
os.makedirs(os.path.dirname(dst), exist_ok=True)
else:
# if there's no cache_dir, we just download to a temporary directory
dst = io.BytesIO()
if isinstance(dst, io.BytesIO) or not os.path.exists(dst):
try:
self.downloader(filepath, dst)
except Exception as e:
if self.fallback_downloader is not None:
# if user passes fallback_downloader, then on any
# failed download, we write the default data to the
# destination and continue
warnings.warn(
f"Failed to download {filepath} with error {e}. Falling "
"back to default data."
)
self.fallback_downloader(dst)
else:
raise e
filepath = dst
data = self.loader(filepath)
return data
def __eq__(self, other: FileLoader) -> bool:
return (
(other.__class__ == self.__class__)
and (self.loader == other.loader)
and (self.base_dir == other.base_dir)
)
def __hash__(self) -> int:
# needs to be hasable for block signature
return hash((self.loader, self.base_dir))
def _get_state(self):
return {
"loader": self.loader,
"base_dir": self.base_dir,
"downloader": self.downloader,
"fallback_downloader": self.fallback_downloader,
"cache_dir": self.cache_dir,
}
def _set_state(self, state):
self.__dict__.update(state)
def __setstate__(self, state):
"""Set state used by Pickle."""
if "downloader" not in state:
state["downloader"] = None
if "fallback_downloader" not in state:
state["fallback_downloader"] = None
self._set_state(state)
@staticmethod
def to_yaml(dumper: yaml.Dumper, data: FileLoader):
"""This function is called by the YAML dumper to convert an
:class:`Formatter` object into a YAML node.
It should not be called directly.
"""
data = {
"class": type(data),
"state": data._get_state(),
}
return dumper.represent_mapping("!FileLoader", data)
@staticmethod
def from_yaml(loader, node):
"""This function is called by the YAML loader to convert a YAML node
into an :class:`Formatter` object.
It should not be called directly.
"""
data = loader.construct_mapping(node, deep=True)
formatter = data["class"].__new__(data["class"])
formatter._set_state(data["state"])
return formatter
yaml.add_multi_representer(FileLoader, FileLoader.to_yaml)
yaml.add_constructor("!FileLoader", FileLoader.from_yaml)
class FileCell(DeferredCell):
@property
def base_dir(self):
return self.data.fn.base_dir
@property
def absolute_path(self):
return (
os.path.join(self.base_dir, self.data.args[0])
if self.base_dir is not None
else self.data.args[0]
)
def __eq__(self, other):
return (other.__class__ == self.__class__) and other.data.is_equal(self.data)
@docs.doc(source=FILE_SHARED_DOCS)
class FileColumn(DeferredColumn):
"""A column where each cell represents an file stored on disk or the web.
The underlying data is a `PandasSeriesColumn` of strings, where each string
is the path to a file. The column materializes the files into memory when
indexed. If the column is lazy indexed with the ``lz`` indexer, the files
are not materialized and a ``FileCell`` or a ``FileColumn`` is returned
instead.
Args:
data (Sequence[str]): A list of filepaths.
${loader}
${base_dir}
${downloader}
${cache_dir}
"""
def __init__(
self,
data: Sequence[str] = None,
type: str = None,
loader: callable = None,
downloader: Union[callable | str] = None,
base_dir: str = None,
cache_dir: str = None,
formatters: FormatterGroup = None,
*args,
**kwargs,
):
if not isinstance(data, ScalarColumn):
data = ScalarColumn(data)
if type is None and (loader is None or formatters is None):
# infer the type from the file extension
type = _infer_file_type(data)
if type not in FILE_TYPES:
raise ValueError(f"Invalid file type {type}.")
if type is not None and loader is None:
loader = FILE_TYPES[type]["loader"]
if type is not None and formatters is None:
formatters = FILE_TYPES[type]["formatters"]()
if FILE_TYPES[type].get("defer", True):
formatters = formatters.defer()
# if base_dir is not provided and all paths are absolute, then
# we can infer the base_dir
if base_dir is None and data.str.startswith("/").all():
base_dir = os.path.commonpath(data)
data = data.str.replace(base_dir + "/", "")
# if downloader is not provided then we can try to infer from the filepaths
if downloader is None:
if data.str.startswith("http").all():
downloader = download_url
elif data.str.startswith("gs://").all():
downloader = download_gcs
if isinstance(loader, FileLoader):
if base_dir is not None or downloader is not None:
raise ValueError(
"Cannot pass, `base_dir`, `downloader`, when loader is "
"a `FileLoader`."
)
fn = loader
if fn.loader is None:
fn.loader = self.default_loader
else:
fn = FileLoader(
loader=self.default_loader if loader is None else loader,
base_dir=base_dir,
downloader=downloader,
cache_dir=cache_dir,
)
data = DeferredOp(
args=[data],
kwargs={},
batch_size=1,
fn=fn,
is_batched_fn=False,
)
super(FileColumn, self).__init__(data, formatters=formatters, *args, **kwargs)
@property
def loader(self):
return self.data.fn.loader
@loader.setter
def loader(self, loader: callable):
self.data.fn.loader = loader
@property
def base_dir(self):
return self.data.fn.base_dir
@base_dir.setter
def base_dir(self, base_dir: str):
self.data.fn.base_dir = base_dir
def _create_cell(self, data: object) -> DeferredCell:
return FileCell(data=data)
@classmethod
def from_urls(
cls,
urls: Sequence[str],
):
from PIL import Image
return cls(
data=urls,
loader=FileLoader(
loader=lambda bytes_io: Image.open(bytes_io).convert("RGB"),
downloader="url",
),
)
@classmethod
def default_loader(cls, path, *args, **kwargs):
if isinstance(path, io.BytesIO):
return path.read().decode("utf-8")
with open(path, "r") as f:
return f.read()
@staticmethod
def _read_data(path: str):
try:
return DeferredOp.read(path=os.path.join(path, "data"))
except KeyError:
# TODO(Sabri): Remove this in a future version, once we no longer need to
# support old DataFrames.
warnings.warn(
"Reading a LambdaColumn stored in a format that will not be"
" supported in the future. Please re-write the column to the new"
" format.",
category=FutureWarning,
)
meta = yaml.load(
open(os.path.join(path, "data", "meta.yaml")),
Loader=yaml.FullLoader,
)
if issubclass(meta["dtype"], Column):
col = Column.read(os.path.join(path, "data"))
else:
raise ValueError(
"Support for LambdaColumns based on a DataFrame is deprecated."
)
state = dill.load(open(os.path.join(path, "state.dill"), "rb"))
fn = FileLoader(
loader=state["loader"],
base_dir=state["base_dir"],
)
return DeferredOp(
args=[col],
kwargs={},
fn=fn,
is_batched_fn=False,
batch_size=1,
)
def is_equal(self, other: Column) -> bool:
return (other.__class__ == self.__class__) and self.data.is_equal(other.data)
def _infer_file_type(filepaths: ScalarColumn):
"""Infer the type of a file from its extension.
Args:
filepath (str): The path to the file.
Returns:
str: The type of the file.
"""
NUM_SAMPLES = 100
filepaths = filepaths[:NUM_SAMPLES]
# extract the extension, taking into account that it may not exist
# FIXME: make this work for URLs with `.com/...`
ext = filepaths.str.extract(r"(?P<ext>\.[^\.]+(\.gz)?)$")["ext"].str.lower()
# if the extension is not present, then we assume it is a text file
for type, info in FILE_TYPES.items():
if ext.isin(info["exts"]).any():
return type
return "text"
def load_image(f: Union[str, io.BytesIO, Path]):
img = Image.open(f)
return img.convert("RGB")
def load_bytes(path: Union[str, io.BytesIO]):
if isinstance(path, io.BytesIO):
return path.read()
with open(path, "rb") as f:
return f.read()
def load_text(path: Union[str, io.BytesIO]):
if isinstance(path, io.BytesIO):
return path.read().decode("utf-8")
with open(path, "r") as f:
return f.read()
def load_audio(path: str) -> Audio:
import torchaudio
data, sampling_rate = torchaudio.load(path)
data = data.squeeze()
return Audio(data, sampling_rate=sampling_rate)
@requires("voxel")
def load_medimg(path: Union[str, io.BytesIO]):
return voxel.read(path)
FILE_TYPES = {
"image": {
"loader": load_image,
"formatters": DeferredImageFormatterGroup,
"exts": [".jpg", ".jpeg", ".png", ".heic", ".JPEG"],
"defer": False,
},
"pdf": {
"loader": load_bytes,
"formatters": PDFFormatterGroup,
"exts": [".pdf"],
},
"html": {
"loader": load_text,
"formatters": HTMLFormatterGroup,
"exts": [".html", ".htm"],
},
"text": {
"loader": load_text,
"formatters": TextFormatterGroup,
"exts": [".txt"],
},
"code": {
"loader": load_text,
"formatters": CodeFormatterGroup,
"exts": [".py", ".js", ".css", ".json", ".java", ".cpp", ".c", ".h", ".hpp"],
},
"audio": {
"loader": load_audio,
"formatters": DeferredAudioFormatterGroup,
"exts": [".wav", ".mp3"],
"defer": False,
},
"medimg": {
"loader": load_medimg,
"formatters": MedicalImageFormatterGroup,
"exts": [".dcm", ".nii", ".nii.gz"],
"defer": False,
},
}
def download_url(url: str, dst: Union[str, io.BytesIO]):
if isinstance(dst, str):
return urllib.request.urlretrieve(url=url, filename=dst)
else:
import requests
response = requests.get(url)
data = response.content
dst.write(data)
dst.seek(0)
return dst
@functools.lru_cache()
def _get_gcs_bucket(bucket_name: str, project: str = None):
"""Get a GCS bucket."""
from google.cloud import storage
client = storage.Client(project=project)
return client.bucket(bucket_name)
def download_gcs(uri: str, dst: Union[str, io.BytesIO]):
"""Download a file from GCS."""
from google.cloud import exceptions
bucket, blob_name = urlparse(uri).netloc, urlparse(uri).path.lstrip("/")
bucket = _get_gcs_bucket(bucket_name=uri.split("/")[2])
try:
if isinstance(dst, io.BytesIO):
dst.write(bucket.blob(str(blob_name)).download_as_bytes())
dst.seek(0)
return dst
else:
bucket.blob(str(blob_name)).download_to_filename(dst)
return dst
except exceptions.NotFound:
os.remove(dst)
raise FileNotFoundError(uri)
|
meerkat-main
|
meerkat/columns/deferred/file.py
|
from typing import Callable
from meerkat.display import audio_file_formatter
from meerkat.tools.lazy_loader import LazyLoader
from .file import FileColumn
torchaudio = LazyLoader("torchaudio")
torch = LazyLoader("torch")
class AudioColumn(FileColumn):
"""A lambda column where each cell represents an audio file on disk. The
underlying data is a `PandasSeriesColumn` of strings, where each string is
the path to an image. The column materializes the images into memory when
indexed. If the column is lazy indexed with the ``lz`` indexer, the images
are not materialized and an ``FileCell`` or an ``AudioColumn`` is returned
instead.
Args:
data (Sequence[str]): A list of filepaths to images.
transform (callable): A function that transforms the image (e.g.
``torchvision.transforms.functional.center_crop``).
.. warning::
In order for the column to be serializable, the transform function must
be pickleable.
loader (callable): A callable with signature ``def loader(filepath: str) ->
PIL.Image:``. Defaults to ``torchvision.datasets.folder.default_loader``.
.. warning::
In order for the column to be serializable with ``write()``, the loader
function must be pickleable.
base_dir (str): A base directory that the paths in ``data`` are relative to. If
``None``, the paths are assumed to be absolute.
"""
@staticmethod
def _get_default_formatter() -> Callable:
return audio_file_formatter
@classmethod
def default_loader(cls, *args, **kwargs):
return torchaudio.load(*args, **kwargs)[0]
def _repr_cell(self, idx):
return self[idx]
def collate(self, batch):
tensors = [b.t() for b in batch]
tensors = torch.nn.utils.rnn.pad_sequence(tensors, batch_first=True)
tensors = tensors.transpose(1, -1)
return tensors
|
meerkat-main
|
meerkat/columns/deferred/audio.py
|
from __future__ import annotations
import logging
from io import BytesIO
from pathlib import Path
from typing import Optional, Sequence, Union
from PIL import Image
import meerkat.tools.docs as docs
from meerkat.columns.deferred.file import FILE_SHARED_DOCS, FileColumn
from meerkat.interactive.formatter import ImageFormatterGroup
from meerkat.interactive.formatter.base import deferred_formatter_group
logger = logging.getLogger(__name__)
def load_image(f: Union[str, BytesIO, Path]):
img = Image.open(f)
return img.convert("RGB")
@docs.doc(source=FILE_SHARED_DOCS)
def image(
filepaths: Sequence[str],
base_dir: Optional[str] = None,
downloader: Union[callable, str] = None,
loader: callable = load_image,
cache_dir: str = None,
):
"""Create a :class:`FileColumn` where each cell represents an image stored
on disk. The underlying data is a :class:`ScalarColumn` of strings, where
each string is the path to an image.
Args:
filepaths (Sequence[str]): A list of filepaths to images.
${loader}
${base_dir}
${downloader}
${fallback_downloader}
${cache_dir}
"""
return FileColumn(
filepaths,
type="image",
base_dir=base_dir,
loader=loader,
downloader=downloader,
cache_dir=cache_dir,
formatters=deferred_formatter_group(ImageFormatterGroup()),
)
class ImageColumn(FileColumn):
"""DEPRECATED A column where each cell represents an image stored on disk.
The underlying data is a `PandasSeriesColumn` of strings, where each string
is the path to an image. The column materializes the images into memory
when indexed. If the column is lazy indexed with the ``lz`` indexer, the
images are not materialized and an ``ImageCell`` or an ``ImageColumn`` is
returned instead.
Args:
data (Sequence[str]): A list of filepaths to images.
transform (callable): A function that transforms the image (e.g.
``torchvision.transforms.functional.center_crop``).
.. warning::
In order for the column to be serializable, the transform function must
be pickleable.
loader (callable): A callable with signature ``def loader(filepath: str) ->
PIL.Image:``. Defaults to ``torchvision.datasets.folder.default_loader``.
.. warning::
In order for the column to be serializable with ``write()``, the loader
function must be pickleable.
base_dir (str): A base directory that the paths in ``data`` are relative to. If
``None``, the paths are assumed to be absolute.
"""
@classmethod
def default_loader(cls, *args, **kwargs):
return load_image(*args, **kwargs)
|
meerkat-main
|
meerkat/columns/deferred/image.py
|
from __future__ import annotations
import logging
import os
import warnings
from typing import Callable, Collection, Sequence, Type, Union
import dill
import numpy as np
import yaml
from meerkat.block.abstract import BlockView
from meerkat.block.deferred_block import DeferredBlock, DeferredCellOp, DeferredOp
from meerkat.cells.abstract import AbstractCell
from meerkat.columns.abstract import Column
from meerkat.errors import ConcatWarning, ImmutableError
from meerkat.tools.lazy_loader import LazyLoader
Image = LazyLoader("PIL.Image")
logger = logging.getLogger(__name__)
class DeferredCell(AbstractCell):
def __init__(self, data: DeferredCellOp):
self._data = data
@property
def data(self) -> object:
"""Get the data associated with this cell."""
return self._data
def get(self, *args, **kwargs):
return self.data._get()
def __eq__(self, other):
return (other.__class__ == self.__class__) and (self.data == other.data)
def __repr__(self):
name = getattr(self.data.fn, "__qualname__", repr(self.data.fn))
return f"{self.__class__.__qualname__}(fn={name})"
def __call__(self):
return self.data._get()
class DeferredColumn(Column):
block_class: type = DeferredBlock
def __init__(
self,
data: Union[DeferredOp, BlockView],
output_type: Type["Column"] = None,
*args,
**kwargs,
):
self._output_type = output_type
super(DeferredColumn, self).__init__(data, *args, **kwargs)
def __call__(
self,
use_ray: bool = False,
pbar: bool = False,
num_blocks: int = None,
blocks_per_window: int = None,
batch_size: int = 1,
):
from meerkat.ops.map import _materialize
return _materialize(
self,
use_ray=use_ray,
pbar=pbar,
num_blocks=num_blocks,
blocks_per_window=blocks_per_window,
batch_size=batch_size,
)
def _set(self, index, value):
raise ImmutableError("LambdaColumn is immutable.")
@property
def fn(self) -> Callable:
"""Subclasses like `ImageColumn` should be able to implement their own
version."""
return self.data.fn
def _create_cell(self, data: object) -> DeferredCell:
return DeferredCell(data=data)
def _get(self, index, materialize: bool = False, _data: np.ndarray = None):
index = self._translate_index(index)
data = self.data._get(index=index, materialize=materialize)
if isinstance(index, int):
if materialize:
return data
else:
return self._create_cell(data=data)
elif isinstance(index, np.ndarray):
# support for blocks
if materialize:
# materialize could change the data in unknown ways, cannot clone
return self.convert_to_output_type(data=self.collate(data))
else:
return self._clone(data=data)
@classmethod
def _state_keys(cls) -> Collection:
return super()._state_keys() | {"_output_type"}
@staticmethod
def concat(columns: Sequence[DeferredColumn]):
for c in columns:
if c.fn != columns[0].fn:
warnings.warn(
ConcatWarning("Concatenating LambdaColumns with different `fn`.")
)
break
return columns[0]._clone(data=DeferredOp.concat([c.data for c in columns]))
def _write_data(self, path):
return self.data.write(os.path.join(path, "data"))
def is_equal(self, other: Column) -> bool:
if other.__class__ != self.__class__:
return False
return self.data.is_equal(other.data)
@staticmethod
def _read_data(path: str):
try:
return DeferredOp.read(path=os.path.join(path, "data"))
except KeyError:
# TODO(Sabri): Remove this in a future version, once we no longer need to
# support old DataFrames.
warnings.warn(
"Reading a LambdaColumn stored in a format that will soon be"
" deprecated. Please re-write the column to the new format."
)
meta = yaml.load(
open(os.path.join(path, "data", "meta.yaml")),
Loader=yaml.FullLoader,
)
if issubclass(meta["dtype"], Column):
col = Column.read(os.path.join(path, "data"))
else:
raise ValueError(
"Support for LambdaColumns based on a DataFrame is deprecated."
)
state = dill.load(open(os.path.join(path, "state.dill"), "rb"))
return DeferredOp(
args=[col],
kwargs={},
fn=state["fn"],
is_batched_fn=False,
batch_size=1,
)
def _get_default_formatters(self) -> Callable:
# materialize a sample into a column
from meerkat.interactive.formatter.base import deferred_formatter_group
col = self._get(index=slice(0, 1, 1), materialize=True)
return deferred_formatter_group(col.formatters)
def _repr_cell(self, idx):
return self[idx]
def convert_to_output_type(self, data: any):
if self._output_type is None:
from meerkat import column
return column(data)
return self._output_type(data)
|
meerkat-main
|
meerkat/columns/deferred/base.py
|
meerkat-main
|
meerkat/columns/object/__init__.py
|
|
from __future__ import annotations
import abc
import logging
from typing import Sequence
import cytoolz as tz
import numpy as np
import pandas as pd
from PIL.Image import Image
from yaml.representer import Representer
from meerkat.columns.abstract import Column
from meerkat.mixins.cloneable import CloneableMixin
Representer.add_representer(abc.ABCMeta, Representer.represent_name)
logger = logging.getLogger(__name__)
class ObjectColumn(Column):
def __init__(
self,
data: Sequence = None,
*args,
**kwargs,
):
if data is not None:
data = list(data)
super(ObjectColumn, self).__init__(data=data, *args, **kwargs)
@classmethod
def from_list(cls, data: Sequence):
return cls(data=data)
def batch(
self,
batch_size: int = 1,
drop_last_batch: bool = False,
collate: bool = True,
*args,
**kwargs,
):
for i in range(0, len(self), batch_size):
if drop_last_batch and i + batch_size > len(self):
continue
if collate:
yield self.collate(self[i : i + batch_size])
else:
yield self[i : i + batch_size]
@classmethod
def concat(cls, columns: Sequence[ObjectColumn]):
data = list(tz.concat([c.data for c in columns]))
if issubclass(cls, CloneableMixin):
return columns[0]._clone(data=data)
return cls.from_list(data)
def is_equal(self, other: Column) -> bool:
return (self.__class__ == other.__class__) and self.data == other.data
def _repr_cell(self, index) -> object:
return self[index]
def _get_default_formatters(self):
from meerkat.interactive.formatter.image import ImageFormatterGroup
sample = self[0]
if isinstance(sample, Image):
return ImageFormatterGroup()
return super()._get_default_formatters()
def to_pandas(self, allow_objects: bool = False) -> pd.Series:
return pd.Series([self[int(idx)] for idx in range(len(self))])
def to_numpy(self):
return np.array(self.data)
|
meerkat-main
|
meerkat/columns/object/base.py
|
from typing import TYPE_CHECKING, List, Union
import numpy as np
from meerkat.block.abstract import BlockView
from meerkat.block.numpy_block import NumPyBlock
from meerkat.block.torch_block import TorchBlock
from meerkat.tools.lazy_loader import LazyLoader
from ..abstract import Column
torch = LazyLoader("torch")
if TYPE_CHECKING:
from torch import TensorType
TensorColumnTypes = Union[np.ndarray, TensorType]
class TensorColumn(Column):
def __new__(cls, data: "TensorColumnTypes" = None, backend: str = None):
from .numpy import NumPyTensorColumn
from .torch import TorchTensorColumn
backends = {"torch": TorchTensorColumn, "numpy": NumPyTensorColumn}
if backend is not None:
if backend not in backends:
raise ValueError(
f"Backend {backend} not supported. "
f"Expected one of {list(backends.keys())}"
)
else:
return super().__new__(backends[backend])
if isinstance(data, BlockView):
if isinstance(data.block, TorchBlock):
backend = TorchTensorColumn
elif isinstance(data.block, NumPyBlock):
backend = NumPyTensorColumn
if (cls is not TensorColumn) or (data is None):
return super().__new__(cls)
if isinstance(data, BlockView):
if isinstance(data.block, TorchBlock):
return super().__new__(TorchTensorColumn)
elif isinstance(data.block, NumPyBlock):
return super().__new__(NumPyTensorColumn)
if isinstance(data, np.ndarray):
return super().__new__(NumPyTensorColumn)
elif torch.is_tensor(data):
return super().__new__(TorchTensorColumn)
elif isinstance(data, List):
if len(data) == 0:
raise ValueError(
"Cannot create `TensorColumn` from empty list of tensors."
)
elif torch.is_tensor(data[0]):
return super().__new__(TorchTensorColumn)
else:
return super().__new__(NumPyTensorColumn)
else:
raise ValueError(
f"Cannot create `TensorColumn` from object of type {type(data)}."
)
# def _get_default_formatters(self):
# from meerkat.interactive.formatter import TensorFormatterGroup
# return TensorFormatterGroup()
|
meerkat-main
|
meerkat/columns/tensor/abstract.py
|
from .abstract import TensorColumn
__all__ = ["TensorColumn"]
|
meerkat-main
|
meerkat/columns/tensor/__init__.py
|
from __future__ import annotations
import abc
import functools
import logging
import numbers
import os
import shutil
from mmap import mmap
from typing import TYPE_CHECKING, Any, Callable, List, Sequence, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from numpy.core._exceptions import UFuncTypeError
from yaml.representer import Representer
from meerkat.block.abstract import BlockView
from meerkat.block.numpy_block import NumPyBlock
from meerkat.columns.abstract import Column
from meerkat.mixins.aggregate import AggregationError
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.writers.concat_writer import ConcatWriter
from .abstract import TensorColumn
torch = LazyLoader("torch")
if TYPE_CHECKING:
import torch
Representer.add_representer(abc.ABCMeta, Representer.represent_name)
logger = logging.getLogger(__name__)
def getattr_decorator(fn: Callable):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
out = fn(*args, **kwargs)
if isinstance(out, np.ndarray):
return NumPyTensorColumn(out)
else:
return out
return wrapper
class NumPyTensorColumn(
TensorColumn,
np.lib.mixins.NDArrayOperatorsMixin,
):
block_class: type = NumPyBlock
def __init__(
self,
data: Sequence,
*args,
**kwargs,
):
if isinstance(data, BlockView):
if not isinstance(data.block, NumPyBlock):
raise ValueError(
"Cannot create `NumpyArrayColumn` from a `BlockView` not "
"referencing a `NumpyBlock`."
)
elif not isinstance(data, np.memmap) and not isinstance(data, np.ndarray):
if len(data) > 0 and isinstance(data[0], np.ndarray):
data = np.stack(data)
else:
data = np.asarray(data)
super(NumPyTensorColumn, self).__init__(data=data, *args, **kwargs)
# TODO (sabri): need to support str here
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc: np.ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use ArrayLike instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle ArrayLike objects.
if not isinstance(x, self._HANDLED_TYPES + (NumPyTensorColumn,)) and not (
# support for at index
method == "at"
and isinstance(x, list)
):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(
x.data if isinstance(x, NumPyTensorColumn) else x for x in inputs
)
if out:
kwargs["out"] = tuple(
x.data if isinstance(x, NumPyTensorColumn) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# multiple return values
return tuple(type(self)(x) for x in result)
elif method == "at":
# no return value
return None
else:
# one return value
return self._clone(data=result)
def __getattr__(self, name):
try:
out = getattr(object.__getattribute__(self, "data"), name)
if isinstance(out, Callable):
return getattr_decorator(out)
else:
return out
except AttributeError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
@classmethod
def from_array(cls, data: np.ndarray, *args, **kwargs):
return cls(data=data, *args, **kwargs)
def _set_batch(self, indices, values):
self._data[indices] = values
def _get(self, index, materialize: bool = True):
index = NumPyBlock._convert_index(index)
data = self._data[index]
if self._is_batch_index(index):
# only create a numpy array column
return self._clone(data=data)
else:
return data
def _copy_data(self) -> object:
return self._data.copy()
def _view_data(self) -> object:
return self._data
@property
def is_mmap(self):
# important to check if .base is a python mmap object, since a view of a mmap
# is also a memmap object, but should not be symlinked or copied
if len(self.data.shape) == 1:
# if the data is a 1D array, then their is a level of indirection to the
# the base object because we did a reshape to add an extra dimension
return isinstance(self.data, np.memmap) and isinstance(
self._block.data.base.base, mmap
)
else:
return isinstance(self.data, np.memmap) and isinstance(
self._block.data.base, mmap
)
def _write_data(self, path: str, link: bool = True) -> None:
path = os.path.join(path, "data.npy")
# important to check if .base is a python mmap object, since a view of a mmap
# is also a memmap object, but should not be symlinked
if self.is_mmap:
if link:
os.symlink(self.data.filename, path)
else:
shutil.copy(self.data.filename, path)
else:
np.save(path, self.data)
@staticmethod
def _read_data(path: str, mmap=False, *args, **kwargs) -> np.ndarray:
data_path = os.path.join(path, "data.npy")
if mmap:
return np.load(data_path, mmap_mode="r")
return np.load(data_path)
@classmethod
def concat(cls, columns: Sequence[NumPyTensorColumn]):
data = np.concatenate([c.data for c in columns])
return columns[0]._clone(data=data)
def is_equal(self, other: Column) -> bool:
if other.__class__ != self.__class__:
return False
return np.array_equal(self.data, other.data, equal_nan=True)
@classmethod
def get_writer(cls, mmap: bool = False, template: Column = None):
if mmap:
from meerkat.writers.numpy_writer import NumpyMemmapWriter
return NumpyMemmapWriter()
else:
return ConcatWriter(template=template, output_type=NumPyTensorColumn)
def _repr_cell(self, index) -> object:
if len(self.shape) > 1:
if len(self.shape) == 2 and self.shape[1] < 5:
return self[index]
return f"np.ndarray(shape={self.shape[1:]})"
else:
return self[index]
def _get_default_formatters(self):
from meerkat.interactive.formatter import (
NumberFormatterGroup,
TensorFormatterGroup,
TextFormatterGroup,
)
if len(self) == 0:
return NumberFormatterGroup()
if len(self.shape) > 1:
return TensorFormatterGroup(dtype=str(self.dtype))
if self.dtype.type is np.str_:
return TextFormatterGroup()
cell = self.data[0]
if isinstance(cell, np.generic):
return NumberFormatterGroup(dtype=type(cell.item()).__name__)
return TextFormatterGroup()
def _is_valid_primary_key(self):
if self.dtype.kind == "f":
# can't use floats as primary keys
return False
if len(self.shape) != 1:
# can't use multidimensional arrays as primary keys
return False
return len(np.unique(self.data)) == len(self)
def _keyidx_to_posidx(self, keyidx: Any) -> int:
# TODO(sabri): when we implement indices, we should use them here if we have
# one
where_result = np.where(self.data == keyidx)
if len(where_result[0]) == 0:
raise KeyError(f"keyidx {keyidx} not found in column.")
posidx = where_result[0][0]
return int(posidx)
def _keyidxs_to_posidxs(self, keyidxs: Sequence[Any]) -> np.ndarray:
posidxs = np.where(np.isin(self.data, keyidxs))[0]
diff = np.setdiff1d(keyidxs, self.data[posidxs])
if len(diff) > 0:
raise KeyError(f"Key indexes {diff} not found in column.")
return posidxs
def sort(
self,
ascending: Union[bool, List[bool]] = True,
axis: int = -1,
kind: str = "quicksort",
order: Union[str, List[str]] = None,
) -> NumPyTensorColumn:
"""Return a sorted view of the column.
Args:
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`. Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
Column: A view of the column with the sorted data.
"""
# calls argsort() function to retrieve ordered indices
sorted_index = self.argsort(ascending=ascending, kind=kind)
return self[sorted_index]
def argsort(
self, ascending: bool = True, kind: str = "quicksort"
) -> NumPyTensorColumn:
"""Return indices that would sorted the column.
Args:
ascending (bool): Whether to sort in ascending or
descending order.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
NumpySeriesColumn: A view of the column with the sorted data.
For now! Raises error when shape of input array is more than one error.
"""
num_columns = len(np.shape(self))
# Raise error if array has more than one column
if num_columns > 1:
idxs = np.lexsort(self.data)
else:
idxs = np.argsort(self.data, axis=0, kind=kind, order=None)
if not ascending:
idxs = idxs[::-1]
return idxs
def to_torch(self) -> "torch.Tensor":
return torch.tensor(self.data)
def to_pandas(self, allow_objects: bool = True) -> pd.Series:
if len(self.shape) == 1:
return pd.Series(self.data)
elif allow_objects:
# can only create a 1-D series
return pd.Series([self[int(idx)] for idx in range(len(self))])
else:
return super().to_pandas()
def to_arrow(self) -> pa.Array:
if len(self.shape) == 1:
return pa.array(self.data)
else:
return super().to_arrow()
def to_numpy(self) -> np.ndarray:
return self.data
def to_json(self) -> List[Any]:
return self.data.tolist()
@classmethod
def from_npy(
cls,
path,
mmap_mode=None,
allow_pickle=False,
fix_imports=True,
encoding="ASCII",
):
data = np.load(
path,
mmap_mode=mmap_mode,
allow_pickle=allow_pickle,
fix_imports=fix_imports,
encoding=encoding,
)
return cls(data)
def mean(
self, axis: int = None, keepdims: bool = False, **kwargs
) -> NumPyTensorColumn:
try:
return self.data.mean(axis=axis, keepdims=keepdims, **kwargs)
except (UFuncTypeError, TypeError):
raise AggregationError(
"Cannot apply mean aggregation to NumPy array with "
f" dtype '{self.data.dtype}'."
)
|
meerkat-main
|
meerkat/columns/tensor/numpy.py
|
from __future__ import annotations
import abc
import functools
import logging
import os
from typing import TYPE_CHECKING, Callable, List, Mapping, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from yaml.representer import Representer
from meerkat.block.abstract import BlockView
from meerkat.block.torch_block import TorchBlock
from meerkat.mixins.cloneable import CloneableMixin
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.writers.concat_writer import ConcatWriter
from meerkat.writers.numpy_writer import NumpyMemmapWriter
from ..abstract import Column
from .abstract import TensorColumn
torch = LazyLoader("torch")
if TYPE_CHECKING:
import torch
Representer.add_representer(abc.ABCMeta, Representer.represent_name)
Columnable = Union[Sequence, np.ndarray, pd.Series, "torch.Tensor"]
logger = logging.getLogger(__name__)
def getattr_decorator(fn: Callable):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
out = fn(*args, **kwargs)
if isinstance(out, torch.Tensor):
if out.ndim == 0:
return out.clone().detach()
return TorchTensorColumn(out)
else:
return out
return wrapper
def _as_tensor(data: Union["torch.Tensor", np.ndarray, pd.Series]) -> "torch.Tensor":
"""Overloaded as_tensor function to support other data types."""
if not isinstance(data, (np.ndarray, torch.Tensor)):
data = np.asarray(data)
return torch.as_tensor(data)
class TorchTensorColumn(
np.lib.mixins.NDArrayOperatorsMixin,
TensorColumn,
):
block_class: type = TorchBlock
def __init__(
self,
data: Sequence = None,
*args,
**kwargs,
):
if isinstance(data, BlockView):
if not isinstance(data.block, TorchBlock):
raise ValueError(
"Cannot create `TensorColumn` from a `BlockView` not "
"referencing a `TensorBlock`."
)
elif data is not None and not isinstance(data, TorchTensorColumn):
if isinstance(data, Sequence) and len(data) > 0:
# TODO: We need to apply this check and do proper conversion of every
# element in the sequence.
# e.g. a list of mixed ndarrays and torch tensors
# [np.array, torch.Tensor] should work.
if torch.is_tensor(data[0]):
# np.asarray supports a list of numpy arrays (it simply stacks them
# before putting them into an array) but torch.as_tensor does not.
# we want to support this for consistency and because it is
# important for map
data = torch.stack(data)
else:
data = np.asarray(data)
data = _as_tensor(data)
super(TorchTensorColumn, self).__init__(data=data, *args, **kwargs)
def __torch_function__(self, func, types, args=(), kwargs=None):
def _process_arg(arg):
if isinstance(arg, type(self)):
return arg.data
elif isinstance(arg, (List, Tuple)):
# Specifically use list and tuple because these are
# expected types for arguments in torch operations.
return type(arg)([_process_arg(_a) for _a in arg])
elif isinstance(arg, Mapping):
# All mappings can be converted to dictionaries
# when processed by torch operations.
return {_k: _process_arg(_a) for _k, _a in arg.items()}
else:
return arg
def _process_ret(ret):
# This function may need to be refactored into an instance method
# because the from_data implementation is different for each
# class.
if isinstance(ret, torch.Tensor):
if ret.ndim == 0:
return ret.clone().detach()
return self.from_data(ret)
elif isinstance(ret, (List, Tuple)):
return type(ret)([_process_arg(_a) for _a in ret])
elif isinstance(ret, Mapping):
return {_k: _process_arg(_a) for _k, _a in ret.items()}
else:
return ret
if kwargs is None:
kwargs = {}
args = [_process_arg(a) for a in args]
ret = func(*args, **kwargs)
return _process_ret(ret)
def __getattr__(self, name):
try:
out = getattr(object.__getattribute__(self, "data"), name)
if isinstance(out, Callable):
return getattr_decorator(out)
else:
return out
except AttributeError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
def _get(self, index, materialize: bool = True):
index = self.block_class._convert_index(index)
data = self._data[index]
if self._is_batch_index(index):
# only create a numpy array column
return self._clone(data=data)
else:
return data
def _set_batch(self, indices, values):
self._data[indices] = values
@classmethod
def concat(cls, columns: Sequence[TorchTensorColumn]):
data = torch.cat([c.data for c in columns])
if issubclass(cls, CloneableMixin):
return columns[0]._clone(data=data)
return cls(data)
@classmethod
def get_writer(cls, mmap: bool = False, template: Column = None):
if mmap:
return NumpyMemmapWriter()
else:
return ConcatWriter(template=template, output_type=TorchTensorColumn)
def _repr_cell(self, index) -> object:
if len(self.shape) > 1:
if len(self.shape) == 2 and self.shape[1] < 5:
return self[index]
return f"torch.Tensor(shape={self.shape[1:]})"
else:
return self[index]
def _get_default_formatters(self) -> Callable:
from meerkat.interactive.formatter import (
NumberFormatterGroup,
TensorFormatterGroup,
TextFormatterGroup,
)
if len(self) == 0:
return NumberFormatterGroup()
if len(self.shape) > 1:
return TensorFormatterGroup(dtype=str(self.dtype))
cell = self.data[0]
if isinstance(cell, np.generic):
return NumberFormatterGroup(dtype=type(cell.item()).__name__)
return TextFormatterGroup()
@classmethod
def from_data(cls, data: Union[Columnable, Column]):
"""Convert data to an EmbeddingColumn."""
if torch.is_tensor(data):
return cls(data)
else:
return super(TorchTensorColumn, cls).from_data(data)
def _copy_data(self) -> "torch.Tensor":
return self._data.clone()
def _view_data(self) -> object:
return self._data
def _write_data(self, path: str) -> None:
# Saving all cell data in a single pickle file
torch.save(self.data, os.path.join(path, "data.pt"))
@staticmethod
def _read_data(path: str) -> "torch.Tensor":
return torch.load(os.path.join(path, "data.pt"))
def sort(
self, ascending: Union[bool, List[bool]] = True, kind: str = "quicksort"
) -> TorchTensorColumn:
"""Return a sorted view of the column.
Args:
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`. Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
Column: A view of the column with the sorted data.
"""
# calls argsort() function to retrieve ordered indices
sorted_index = self.argsort(ascending=ascending, kind=kind)
return self[sorted_index]
def argsort(
self, ascending: Union[bool, List[bool]] = True, kind: str = "quicksort"
) -> TorchTensorColumn:
"""Return indices that would sorted the column.
Args:
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`. Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
TensorColumn: A view of the column with the sorted data.
For now! Raises error when shape of input array is more than one error.
"""
try:
self.size()[1]
except IndexError: # Case 1: The array only has one column
# returns indices of descending order of array
if not ascending:
return torch.argsort(self.data, dim=-1, descending=True)
# returns indices of ascending order of array
return torch.argsort(self.data, dim=-1, descending=False)
else: # Case 2: The array has more than one column, raise error.
raise Exception("No implementation for array with more than one column.")
def is_equal(self, other: Column) -> bool:
return (other.__class__ == self.__class__) and (self.data == other.data).all()
def to_tensor(self) -> "torch.Tensor":
return self.data
def to_pandas(self, allow_objects: bool = True) -> pd.Series:
if len(self.shape) == 1:
return pd.Series(self.to_numpy())
elif allow_objects:
# can only create a 1-D series
data = self.to_numpy()
return pd.Series([data[int(idx)] for idx in range(len(self))])
else:
# can only create a 1-D series
return super().to_pandas()
def to_numpy(self) -> pd.Series:
return self.data.detach().cpu().numpy()
def to_arrow(self) -> pa.Array:
if len(self.shape) == 1:
return pa.array(self.to_numpy())
else:
return super().to_arrow()
def mean(
self, dim: int = None, keepdim: bool = False, *args, **kwargs
) -> "torch.Tensor":
# torch only supports mean for floating point dtypes
if self.data.dtype not in [
torch.float,
torch.double,
torch.cfloat,
torch.cdouble,
torch.half,
torch.bfloat16,
]:
data = self.data.float()
else:
data = self.data
if dim is not None:
return data.mean(*args, dim=dim, keepdim=keepdim, **kwargs)
else:
return data.mean(*args, **kwargs).numpy().item()
|
meerkat-main
|
meerkat/columns/tensor/torch.py
|
from __future__ import annotations
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, List, Set, Tuple, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from pandas.core.accessor import CachedAccessor
from meerkat.block.abstract import BlockView
from meerkat.block.arrow_block import ArrowBlock
from meerkat.block.pandas_block import PandasBlock
from meerkat.columns.tensor.abstract import TensorColumn
from meerkat.tools.lazy_loader import LazyLoader
from ..abstract import Column
torch = LazyLoader("torch")
if TYPE_CHECKING:
import torch
from meerkat.dataframe import DataFrame
ScalarColumnTypes = Union[np.ndarray, "torch.TensorType", pd.Series, List]
class StringMethods:
def __init__(self, data: Column):
self.column = data
def len(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function("len", _namespace="str", **kwargs)
# predicate str methods ScalarColumn of bools
def isalnum(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"isalnum", _namespace="str", **kwargs
)
def isalpha(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"isalpha", _namespace="str", **kwargs
)
def isdecimal(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"isdecimal", _namespace="str", **kwargs
)
def isdigit(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"isdigit", _namespace="str", **kwargs
)
def islower(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"islower", _namespace="str", **kwargs
)
def isupper(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"isupper", _namespace="str", **kwargs
)
def isnumeric(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"isnumeric", _namespace="str", **kwargs
)
def isspace(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"isspace", _namespace="str", **kwargs
)
def istitle(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"istitle", _namespace="str", **kwargs
)
def center(self, width: int, fillchar: str = " ", **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"center", _namespace="str", width=width, fillchar=fillchar, **kwargs
)
# transform str methods
def capitalize(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"capitalize", _namespace="str", **kwargs
)
def lower(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function("lower", _namespace="str", **kwargs)
def upper(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function("upper", _namespace="str", **kwargs)
def swapcase(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"swapcase", _namespace="str", **kwargs
)
def strip(self, to_strip: str = None, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"strip", _namespace="str", to_strip=to_strip, **kwargs
)
def lstrip(self, to_strip: str = None, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"lstrip", _namespace="str", to_strip=to_strip, **kwargs
)
def rstrip(self, to_strip: str = None, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"rstrip", _namespace="str", to_strip=to_strip, **kwargs
)
def replace(
self, pat: str, repl: str, n: int = -1, regex: bool = False, **kwargs
) -> ScalarColumn:
return self.column._dispatch_unary_function(
"replace", _namespace="str", pat=pat, repl=repl, n=n, regex=regex, **kwargs
)
def title(self, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function("title", _namespace="str", **kwargs)
def split(
self, pat: str = None, n: int = -1, regex: bool = False, **kwargs
) -> "DataFrame":
raise NotImplementedError()
def rsplit(
self, pat: str = None, n: int = -1, regex: bool = False, **kwargs
) -> "DataFrame":
raise NotImplementedError()
def startswith(self, pat: str, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"startswith", _namespace="str", pat=pat, **kwargs
)
def contains(self, pat: str, case: bool = True, regex: bool = True) -> ScalarColumn:
return self.column._dispatch_unary_function(
"contains", _namespace="str", pat=pat, case=case, regex=regex
)
def extract(self, pat: str, **kwargs) -> "DataFrame":
return self.column._dispatch_unary_function(
"extract", _namespace="str", pat=pat, **kwargs
)
class ScalarColumn(Column):
str = CachedAccessor("str", StringMethods)
def __new__(cls, data: ScalarColumnTypes = None, backend: str = None):
from .arrow import ArrowScalarColumn
from .pandas import PandasScalarColumn
if (cls is not ScalarColumn) or (data is None):
return super().__new__(cls)
backends = {"arrow": ArrowScalarColumn, "pandas": PandasScalarColumn}
if backend is not None:
if backend not in backends:
raise ValueError(
f"Cannot create `ScalarColumn` with backend '{backend}'. "
f"Expected one of {list(backends.keys())}"
)
else:
return super().__new__(backends[backend])
if isinstance(data, BlockView):
if isinstance(data.block, PandasBlock):
return super().__new__(PandasScalarColumn)
elif isinstance(data.block, ArrowBlock):
return super().__new__(ArrowScalarColumn)
else:
raise ValueError(
f"Cannot create `ScalarColumn` from object of type {type(data)}."
)
if isinstance(data, (np.ndarray, torch.TensorType, pd.Series, List, Tuple)):
return super().__new__(PandasScalarColumn)
elif isinstance(data, pa.Array):
return super().__new__(ArrowScalarColumn)
elif isinstance(data, TensorColumn) and len(data.shape) == 1:
return super().__new__(PandasScalarColumn)
elif isinstance(data, ScalarColumn):
return data
else:
raise ValueError(
f"Cannot create `ScalarColumn` from object of type {type(data)}."
)
def _dispatch_unary_function(
self, compute_fn: str, _namespace: str = None, **kwargs
):
raise NotImplementedError()
@property
def dtype(self, **kwargs) -> Union[pa.DataType, np.dtype]:
raise NotImplementedError()
# aggregation functions
@abstractmethod
def _dispatch_aggregation_function(self, compute_fn: str, **kwargs):
raise NotImplementedError()
def mean(self, skipna: bool = True, **kwargs) -> float:
return self._dispatch_aggregation_function("mean", skipna=skipna, **kwargs)
def median(self, skipna: bool = True, **kwargs) -> Any:
return self._dispatch_aggregation_function("median", skipna=skipna, **kwargs)
def mode(self, **kwargs) -> ScalarColumn:
return self._dispatch_aggregation_function("mode", **kwargs)
def var(self, ddof: int = 1, **kwargs) -> ScalarColumn:
return self._dispatch_aggregation_function("var", ddof=ddof, **kwargs)
def std(self, ddof: int = 1, **kwargs) -> ScalarColumn:
return self._dispatch_aggregation_function("std", ddof=ddof, **kwargs)
def min(self, skipna: bool = True, **kwargs) -> ScalarColumn:
return self._dispatch_aggregation_function("min", skipna=skipna, **kwargs)
def max(self, skipna: bool = True, **kwargs) -> ScalarColumn:
return self._dispatch_aggregation_function("max", skipna=skipna, **kwargs)
def sum(self, skipna: bool = True, **kwargs) -> Any:
return self._dispatch_aggregation_function("sum", skipna=skipna, **kwargs)
def product(self, skipna: bool = True, **kwargs) -> Any:
return self._dispatch_aggregation_function("product", skipna=skipna, **kwargs)
def any(self, skipna: bool = True, **kwargs) -> Any:
return self._dispatch_aggregation_function("any", skipna=skipna, **kwargs)
def all(self, skipna: bool = True, **kwargs) -> Any:
return self._dispatch_aggregation_function("all", skipna=skipna, **kwargs)
def unique(self, **kwargs) -> ScalarColumn:
return self._dispatch_unary_function("unique", **kwargs)
# arithmetic functions
def _dispatch_arithmetic_function(
self, other, compute_fn: str, right: bool, **kwargs
):
raise NotImplementedError()
def __add__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "add", right=False)
def __radd__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "add", right=True)
def __sub__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "sub", right=False)
def __rsub__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "sub", right=True)
def __mul__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "mul", right=False)
def __rmul__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "mul", right=True)
def __truediv__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "truediv", right=False)
def __rtruediv__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "truediv", right=True)
def __floordiv__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "floordiv", right=False)
def __rfloordiv__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "floordiv", right=True)
def __mod__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "mod", right=False)
def __rmod__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "mod", right=True)
def __pow__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "pow", right=False)
def __rpow__(self, other: ScalarColumn):
return self._dispatch_arithmetic_function(other, "pow", right=True)
# comparison functions
def _dispatch_comparison_function(self, other, compute_fn: str, **kwargs):
raise NotImplementedError()
def __eq__(self, other: ScalarColumn):
return self._dispatch_comparison_function(other, "eq")
def __ne__(self, other: ScalarColumn):
return self._dispatch_comparison_function(other, "ne")
def __lt__(self, other: ScalarColumn):
return self._dispatch_comparison_function(other, "lt")
def __le__(self, other: ScalarColumn):
return self._dispatch_comparison_function(other, "le")
def __gt__(self, other: ScalarColumn):
return self._dispatch_comparison_function(other, "gt")
def __ge__(self, other: ScalarColumn):
return self._dispatch_comparison_function(other, "ge")
# logical functions
def _dispatch_logical_function(self, other, compute_fn: str, **kwargs):
raise NotImplementedError()
def __and__(self, other: ScalarColumn):
return self._dispatch_logical_function(other, "and")
def __or__(self, other: ScalarColumn):
return self._dispatch_logical_function(other, "or")
def __invert__(self):
return self._dispatch_logical_function(None, "invert")
def __xor__(self, other: ScalarColumn):
return self._dispatch_logical_function(other, "xor")
# containment functions
def isin(self, values: Union[List, Set], **kwargs) -> ScalarColumn:
raise NotImplementedError()
def isna(self, **kwargs) -> ScalarColumn:
return self._dispatch_unary_function("isna", **kwargs)
def isnull(self, **kwargs) -> ScalarColumn:
return self._dispatch_unary_function("isnull", **kwargs)
|
meerkat-main
|
meerkat/columns/scalar/abstract.py
|
from __future__ import annotations
import abc
import functools
import logging
import numbers
import os
from typing import TYPE_CHECKING, Any, Callable, List, Sequence, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays.categorical import CategoricalAccessor
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.indexes.accessors import (
CombinedDatetimelikeProperties,
DatetimeProperties,
PeriodProperties,
TimedeltaProperties,
)
from yaml.representer import Representer
from meerkat.block.abstract import BlockView
from meerkat.block.pandas_block import PandasBlock
from meerkat.columns.abstract import Column
from meerkat.interactive.formatter.base import BaseFormatter
from meerkat.mixins.aggregate import AggregationError
from meerkat.tools.lazy_loader import LazyLoader
from .abstract import ScalarColumn, StringMethods
torch = LazyLoader("torch")
if TYPE_CHECKING:
import torch
from meerkat.dataframe import DataFrame
Representer.add_representer(abc.ABCMeta, Representer.represent_name)
logger = logging.getLogger(__name__)
def getattr_decorator(fn: Callable):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
out = fn(*args, **kwargs)
if isinstance(out, pd.Series):
return PandasScalarColumn(out)
elif isinstance(out, pd.DataFrame):
from meerkat import DataFrame
# column names must be str in meerkat
out = out.rename(mapper=str, axis="columns")
return DataFrame.from_pandas(out)
else:
return out
return wrapper
class _ReturnColumnMixin:
def __getattribute__(self, name):
if name == "__class__":
# This is needed to avoid _pickle.PicklingError: args[0] from __newobj__
# args has the wrong class when pickling
return super().__getattribute__(name)
try:
attr = super().__getattribute__(name)
if isinstance(attr, Callable):
return getattr_decorator(attr)
elif isinstance(attr, pd.Series):
return PandasScalarColumn(attr)
elif isinstance(attr, pd.DataFrame):
from meerkat import DataFrame
return DataFrame.from_pandas(attr)
else:
return attr
except AttributeError:
raise AttributeError(f"object has no attribute '{name}'")
# class _MeerkatStringMethods(_ReturnColumnMixin, StringMethods):
# def __init__(self, data: Column):
# super().__init__(data.data)
class PandasStringMethods(StringMethods):
def split(
self, pat: str = None, n: int = -1, regex: bool = False, **kwargs
) -> "DataFrame":
from meerkat import DataFrame
return DataFrame(
{
str(name): self.column._clone(data=col)
for name, col in self.column.data.str.split(
" ", n=n, regex=regex, expand=True
).items()
}
)
def rsplit(
self, pat: str = None, n: int = -1, regex: bool = False, **kwargs
) -> "DataFrame":
from meerkat import DataFrame
if regex is True:
raise NotImplementedError("regex=True is not supported for rsplit")
return DataFrame(
{
str(name): self.column._clone(data=col)
for name, col in self.column.data.str.rsplit(
" ", n=n, expand=True
).items()
}
)
def extract(self, pat: str, **kwargs) -> "DataFrame":
from meerkat import DataFrame
return DataFrame(
{
str(name): self.column._clone(data=col)
for name, col in self.column.data.str.extract(
pat, expand=True, **kwargs
).items()
}
)
class _MeerkatDatetimeProperties(_ReturnColumnMixin, DatetimeProperties):
pass
class _MeerkatTimedeltaProperties(_ReturnColumnMixin, TimedeltaProperties):
pass
class _MeerkatPeriodProperties(_ReturnColumnMixin, PeriodProperties):
pass
class _MeerkatCategoricalAccessor(_ReturnColumnMixin, CategoricalAccessor):
pass
class _MeerkatCombinedDatetimelikeProperties(CombinedDatetimelikeProperties):
def __new__(cls, data: pd.Series):
# CombinedDatetimelikeProperties isn't really instantiated. Instead
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
# do all the validation here.
if not isinstance(data, ABCSeries):
raise TypeError(
f"cannot convert an object of type {type(data)} to a datetimelike index"
)
orig = data if is_categorical_dtype(data.dtype) else None
if orig is not None:
data = data._constructor(
orig.array,
name=orig.name,
copy=False,
dtype=orig._values.categories.dtype,
)
if is_datetime64_dtype(data.dtype):
obj = _MeerkatDatetimeProperties(data, orig)
elif is_datetime64tz_dtype(data.dtype):
obj = _MeerkatDatetimeProperties(data, orig)
elif is_timedelta64_dtype(data.dtype):
obj = _MeerkatTimedeltaProperties(data, orig)
elif is_period_dtype(data.dtype):
obj = _MeerkatPeriodProperties(data, orig)
else:
raise AttributeError("Can only use .dt accessor with datetimelike values")
return obj
class PandasScalarColumn(
ScalarColumn,
np.lib.mixins.NDArrayOperatorsMixin,
):
block_class: type = PandasBlock
_HANDLED_TYPES = (np.ndarray, numbers.Number, str)
dt = CachedAccessor("dt", _MeerkatCombinedDatetimelikeProperties)
cat = CachedAccessor("cat", _MeerkatCategoricalAccessor)
str = CachedAccessor("str", PandasStringMethods)
# str = CachedAccessor("str", _MeerkatStringMethods)
# plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
# sparse = CachedAccessor("sparse", SparseAccessor)
def _set_data(self, data: object):
if isinstance(data, PandasScalarColumn):
# unpack series if it is a PandasScalarColumn
data = data.data
if isinstance(data, BlockView):
if not isinstance(data.block, PandasBlock):
raise ValueError(
"Cannot create `PandasSeriesColumn` from a `BlockView` not "
"referencing a `PandasBlock`."
)
elif isinstance(data, pd.Series):
# Force the index to be contiguous so that comparisons between different
# pandas series columns are always possible.
data = data.reset_index(drop=True)
else:
data = pd.Series(data)
super(PandasScalarColumn, self)._set_data(data)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use ArrayLike instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle ArrayLike objects.
if not isinstance(x, self._HANDLED_TYPES + (PandasScalarColumn,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(
x.data if isinstance(x, PandasScalarColumn) else x for x in inputs
)
if out:
kwargs["out"] = tuple(
x.data if isinstance(x, PandasScalarColumn) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# multiple return values
return tuple(type(self)(x) for x in result) # pragma: no cover
elif method == "at":
# no return value
return None # pragma: no cover
else:
# one return value
return type(self)(result)
def __getattr__(self, name):
if name == "__getstate__" or name == "__setstate__":
# for pickle, it's important to raise an attribute error if __getstate__
# or __setstate__ is called. Without this, pickle will use the __setstate__
# and __getstate__ of the underlying pandas Series
raise AttributeError()
try:
out = getattr(object.__getattribute__(self, "data"), name)
if isinstance(out, Callable):
return getattr_decorator(out)
else:
return out
except AttributeError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
@classmethod
def from_array(cls, data: np.ndarray, *args, **kwargs):
return cls(data=data, *args, **kwargs)
def _get(self, index, materialize: bool = True):
index = self.block_class._convert_index(index)
data = self._data.iloc[index]
if self._is_batch_index(index):
# only create a numpy array column
return self._clone(data=data)
else:
return data
def _set_cell(self, index, value):
self._data.iloc[index] = value
def _set_batch(self, indices, values):
self._data.iloc[indices] = values
@classmethod
def concat(cls, columns: Sequence[PandasScalarColumn]):
data = pd.concat([c.data for c in columns])
return columns[0]._clone(data=data)
def _write_data(self, path: str) -> None:
data_path = os.path.join(path, "data.pd")
self.data.to_pickle(data_path)
@staticmethod
def _read_data(
path: str,
):
data_path = os.path.join(path, "data.pd")
# Load in the data
return pd.read_pickle(data_path)
def _repr_cell(self, index) -> object:
return self[index]
def _get_default_formatters(self) -> BaseFormatter:
# can't implement this as a class level property because then it will treat
# the formatter as a method
from meerkat.interactive.formatter import (
BooleanFormatterGroup,
NumberFormatterGroup,
TextFormatterGroup,
)
if len(self) == 0:
return super()._get_default_formatters()
if self.dtype == object:
return TextFormatterGroup()
if self.dtype == pd.StringDtype:
return TextFormatterGroup()
cell = self[0]
if isinstance(cell, np.generic):
if isinstance(cell, np.bool_):
return BooleanFormatterGroup()
return NumberFormatterGroup(dtype=type(cell.item()).__name__)
return super()._get_default_formatters()
def _is_valid_primary_key(self):
return self.data.is_unique
def _keyidx_to_posidx(self, keyidx: Any) -> int:
# TODO(sabri): when we implement indices, we should use them here if we have
# one
where_result = np.where(self.data == keyidx)
if len(where_result[0]) == 0:
raise KeyError(f"keyidx {keyidx} not found in column.")
posidx = where_result[0][0]
return int(posidx)
def _keyidxs_to_posidxs(self, keyidxs: Sequence[Any]) -> np.ndarray:
# FIXME: this implementation is very slow. This should be done with indices
return np.array([self._keyidx_to_posidx(keyidx) for keyidx in keyidxs])
def sort(
self, ascending: Union[bool, List[bool]] = True, kind: str = "quicksort"
) -> PandasScalarColumn:
"""Return a sorted view of the column.
Args:
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`. Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
AbstractColumn: A view of the column with the sorted data.
"""
# calls argsort() function to retrieve ordered indices
sorted_index = self.argsort(ascending, kind)
return self[sorted_index]
def argsort(
self, ascending: bool = True, kind: str = "quicksort"
) -> PandasScalarColumn:
"""Return indices that would sorted the column.
Args:
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`. Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
PandasSeriesColumn: A view of the column with the sorted data.
For now! Raises error when shape of input array is more than one error.
"""
num_columns = len(self.shape)
# Raise error if array has more than one column
if num_columns > 1:
raise Exception("No implementation for array with more than one column.")
# returns indices of descending order of array
if not ascending:
return (-1 * self.data).argsort(kind=kind)
# returns indices of ascending order of array
return self.data.argsort(kind=kind)
def to_tensor(self) -> "torch.Tensor":
"""Use `column.to_tensor()` instead of `torch.tensor(column)`, which is
very slow."""
dtype = self.data.values.dtype
if not np.issubdtype(dtype, np.number):
raise ValueError(
f"Cannot convert `PandasSeriesColumn` with dtype={dtype} to tensor."
)
# TODO (Sabri): understand why `torch.tensor(column)` is so slow
return torch.tensor(self.data.values)
def to_numpy(self) -> "torch.Tensor":
return self.values
def to_pandas(self, allow_objects: bool = False) -> pd.Series:
return self.data.reset_index(drop=True)
def to_arrow(self) -> pa.Array:
return pa.array(self.data.values)
def is_equal(self, other: Column) -> bool:
if other.__class__ != self.__class__:
return False
return (self.data.values == other.data.values).all()
def to_json(self) -> List[Any]:
return self.data.tolist()
@property
def dtype(self) -> Any:
return self.data.dtype
def equals(self, other: Column) -> bool:
if other.__class__ != self.__class__:
return False
return self.data.equals(other.data)
def _dispatch_aggregation_function(self, compute_fn: str, **kwargs):
return getattr(self.data, compute_fn)(**kwargs)
def mean(self, skipna: bool = True, **kwargs):
try:
return self.data.mean(skipna=skipna, **kwargs)
except TypeError:
raise AggregationError(
"Cannot apply mean aggregation to Pandas Series with "
f" dtype '{self.data.dtype}'."
)
def _dispatch_arithmetic_function(
self, other: ScalarColumn, compute_fn: str, right: bool, **kwargs
):
if isinstance(other, Column):
assert isinstance(other, PandasScalarColumn)
other = other.data
if right:
compute_fn = f"r{compute_fn}"
return self._clone(
data=getattr(self.data, f"__{compute_fn}__")(other, **kwargs)
)
def _dispatch_comparison_function(
self, other: ScalarColumn, compute_fn: str, **kwargs
):
if isinstance(other, Column):
assert isinstance(other, PandasScalarColumn)
other = other.data
return self._clone(
data=getattr(self.data, f"__{compute_fn}__")(other, **kwargs)
)
def _dispatch_logical_function(
self, other: ScalarColumn, compute_fn: str, **kwargs
):
if isinstance(other, Column):
assert isinstance(other, PandasScalarColumn)
other = other.data
if other is None:
return self._clone(data=getattr(self.data, f"__{compute_fn}__")(**kwargs))
return self._clone(
data=getattr(self.data, f"__{compute_fn}__")(other, **kwargs)
)
def isin(self, values: Sequence[Any]) -> "PandasScalarColumn":
return self._clone(data=self.data.isin(values))
def _dispatch_unary_function(
self, compute_fn: str, _namespace: str = None, **kwargs
):
if _namespace is not None:
obj = getattr(self.data, _namespace)
else:
obj = self.data
return self._clone(data=getattr(obj, compute_fn)(**kwargs))
PandasSeriesColumn = PandasScalarColumn
|
meerkat-main
|
meerkat/columns/scalar/pandas.py
|
from __future__ import annotations
import os
import re
import warnings
from typing import TYPE_CHECKING, Any, List, Sequence, Set, Union
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
from pandas.core.accessor import CachedAccessor
from meerkat.block.abstract import BlockView
from meerkat.block.arrow_block import ArrowBlock
from meerkat.errors import ImmutableError
from meerkat.tools.lazy_loader import LazyLoader
from ..abstract import Column
from .abstract import ScalarColumn, StringMethods
if TYPE_CHECKING:
from meerkat import DataFrame
from meerkat.interactive.formatter.base import BaseFormatter
torch = LazyLoader("torch")
class ArrowStringMethods(StringMethods):
def center(self, width: int, fillchar: str = " ", **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"utf8_center", width=width, padding=fillchar, **kwargs
)
def extract(self, pat: str, **kwargs) -> "DataFrame":
from meerkat import DataFrame
# Pandas raises a value error if the pattern does not include a group
# but pyarrow does not. We check for this case and raise a value error.
if not re.search(r"\(\?P<\w+>", pat):
raise ValueError(
"Pattern does not contain capture group. Use '(?P<name>...)' instead"
)
struct_array = pc.extract_regex(self.column.data, pattern=pat, **kwargs)
result = {}
for field_index in range(struct_array.type.num_fields):
field = struct_array.type.field(field_index)
result[field.name] = self.column._clone(
pc.struct_field(struct_array, field.name)
)
return DataFrame(result)
def _split(
self, pat=None, n=-1, reverse: bool = False, regex: bool = False, **kwargs
) -> "DataFrame":
from meerkat import DataFrame
fn = pc.split_pattern_regex if regex else pc.split_pattern
list_array = fn(
self.column.data,
pattern=pat,
max_splits=n if n != -1 else None,
reverse=reverse,
**kwargs,
)
# need to find the max length of the list array
if n == -1:
n = pc.max(pc.list_value_length(list_array)).as_py() - 1
return DataFrame(
{
str(i): self.column._clone(
data=pc.list_flatten(
pc.list_slice(
list_array, start=i, stop=i + 1, return_fixed_size_list=True
)
)
)
for i in range(n + 1)
}
)
def split(
self, pat: str = None, n: int = -1, regex: bool = False, **kwargs
) -> "DataFrame":
return self._split(pat=pat, n=n, reverse=False, regex=regex, **kwargs)
def rsplit(
self, pat: str = None, n: int = -1, regex: bool = False, **kwargs
) -> "DataFrame":
return self._split(pat=pat, n=n, reverse=True, regex=regex, **kwargs)
def startswith(self, pat: str, **kwargs) -> ScalarColumn:
return self.column._dispatch_unary_function(
"starts_with", pattern=pat, **kwargs
)
def strip(self, to_strip: str = None, **kwargs) -> ScalarColumn:
if to_strip is None:
return self.column._dispatch_unary_function(
"utf8_trim_whitespace", **kwargs
)
else:
return self.column._dispatch_unary_function(
"utf8_strip", characters=to_strip, **kwargs
)
def lstrip(self, to_strip: str = None, **kwargs) -> ScalarColumn:
if to_strip is None:
return self.column._dispatch_unary_function(
"utf8_ltrim_whitespace", **kwargs
)
else:
return self.column._dispatch_unary_function(
"utf8_lstrip", characters=to_strip, **kwargs
)
def rstrip(self, to_strip: str = None, **kwargs) -> ScalarColumn:
if to_strip is None:
return self.column._dispatch_unary_function(
"utf8_rtrim_whitespace", **kwargs
)
else:
return self.column._dispatch_unary_function(
"utf8_rstrip", characters=to_strip, **kwargs
)
def replace(
self, pat: str, repl: str, n: int = -1, regex: bool = False, **kwargs
) -> ScalarColumn:
fn = pc.replace_substring_regex if regex else pc.replace_substring
return self.column._clone(
fn(
self.column.data,
pattern=pat,
replacement=repl,
max_replacements=n if n != -1 else None,
**kwargs,
)
)
def contains(self, pat: str, case: bool = True, regex: bool = True) -> ScalarColumn:
fn = pc.match_substring_regex if regex else pc.match_substring
return self.column._clone(
fn(
self.column.data,
pattern=pat,
ignore_case=not case,
)
)
class ArrowScalarColumn(ScalarColumn):
block_class: type = ArrowBlock
str = CachedAccessor("str", ArrowStringMethods)
def __init__(
self,
data: Sequence,
*args,
**kwargs,
):
if isinstance(data, BlockView):
if not isinstance(data.block, ArrowBlock):
raise ValueError(
"ArrowArrayColumn can only be initialized with ArrowBlock."
)
elif not isinstance(data, (pa.Array, pa.ChunkedArray)):
# Arrow cannot construct an array from a torch.Tensor.
if isinstance(data, torch.Tensor):
data = data.numpy()
data = pa.array(data)
super(ArrowScalarColumn, self).__init__(data=data, *args, **kwargs)
def _get(self, index, materialize: bool = True):
index = ArrowBlock._convert_index(index)
if isinstance(index, slice) or isinstance(index, int):
data = self._data[index]
elif index.dtype == bool:
data = self._data.filter(pa.array(index))
else:
data = self._data.take(index)
if self._is_batch_index(index):
return self._clone(data=data)
else:
# Convert to Python object for consistency with other ScalarColumn
# implementations.
return data.as_py()
def _set(self, index, value):
raise ImmutableError("ArrowArrayColumn is immutable.")
def _is_valid_primary_key(self):
try:
return len(self.unique()) == len(self)
except Exception as e:
warnings.warn(f"Unable to check if column is a valid primary key: {e}")
return False
def _keyidx_to_posidx(self, keyidx: Any) -> int:
"""Get the posidx of the first occurrence of the given keyidx. Raise a
key error if the keyidx is not found.
Args:
keyidx: The keyidx to search for.
Returns:
The posidx of the first occurrence of the given keyidx.
"""
posidx = pc.index(self.data, keyidx)
if posidx == -1:
raise KeyError(f"keyidx {keyidx} not found in column.")
return posidx.as_py()
def _keyidxs_to_posidxs(self, keyidxs: Sequence[Any]) -> np.ndarray:
# FIXME: this implementation is very slow. This should be done with indices
return np.array([self._keyidx_to_posidx(keyidx) for keyidx in keyidxs])
def _repr_cell(self, index) -> object:
return self.data[index]
def _get_default_formatters(self) -> BaseFormatter:
# can't implement this as a class level property because then it will treat
# the formatter as a method
from meerkat.interactive.formatter import (
NumberFormatterGroup,
TextFormatterGroup,
)
if len(self) == 0:
return super()._get_default_formatters()
if self.data.type == pa.string():
return TextFormatterGroup()
cell = self[0]
return NumberFormatterGroup(dtype=type(cell).__name__)
def is_equal(self, other: Column) -> bool:
if other.__class__ != self.__class__:
return False
return pc.all(pc.equal(self.data, other.data)).as_py()
@classmethod
def _state_keys(cls) -> Set:
return super()._state_keys()
def _write_data(self, path):
table = pa.Table.from_arrays([self.data], names=["0"])
ArrowBlock._write_table(os.path.join(path, "data.arrow"), table)
@staticmethod
def _read_data(path, mmap=False):
table = ArrowBlock._read_table(os.path.join(path, "data.arrow"), mmap=mmap)
return table["0"]
@classmethod
def concat(cls, columns: Sequence[ArrowScalarColumn]):
arrays = []
for c in columns:
if isinstance(c.data, pa.Array):
arrays.append(c.data)
elif isinstance(c.data, pa.ChunkedArray):
arrays.extend(c.data.chunks)
else:
raise ValueError(f"Unexpected type {type(c.data)}")
data = pa.concat_arrays(arrays)
return columns[0]._clone(data=data)
def to_numpy(self):
return self.data.to_numpy()
def to_tensor(self):
return torch.tensor(self.data.to_numpy())
def to_pandas(self, allow_objects: bool = False):
return self.data.to_pandas()
def to_arrow(self) -> pa.Array:
return self.data
def equals(self, other: Column) -> bool:
if other.__class__ != self.__class__:
return False
return pc.all(pc.equal(self.data, other.data)).as_py()
@property
def dtype(self) -> pa.DataType:
return self.data.type
KWARG_MAPPING = {"skipna": "skip_nulls"}
COMPUTE_FN_MAPPING = {
"var": "variance",
"std": "stddev",
"sub": "subtract",
"mul": "multiply",
"truediv": "divide",
"pow": "power",
"eq": "equal",
"ne": "not_equal",
"lt": "less",
"gt": "greater",
"le": "less_equal",
"ge": "greater_equal",
"isna": "is_nan",
"capitalize": "utf8_capitalize",
"center": "utf8_center",
"isalnum": "utf8_is_alnum",
"isalpha": "utf8_is_alpha",
"isdecimal": "utf8_is_decimal",
"isdigit": "utf8_is_digit",
"islower": "utf8_is_lower",
"isnumeric": "utf8_is_numeric",
"isspace": "utf8_is_space",
"istitle": "utf8_is_title",
"isupper": "utf8_is_upper",
"lower": "utf8_lower",
"upper": "utf8_upper",
"len": "utf8_length",
"lstrip": "utf8_ltrim",
"rstrip": "utf8_rtrim",
"strip": "utf8_trim",
"swapcase": "utf8_swapcase",
"title": "utf8_title",
}
def _dispatch_aggregation_function(self, compute_fn: str, **kwargs):
kwargs = {self.KWARG_MAPPING.get(k, k): v for k, v in kwargs.items()}
out = getattr(pc, self.COMPUTE_FN_MAPPING.get(compute_fn, compute_fn))(
self.data, **kwargs
)
return out.as_py()
def mode(self, **kwargs) -> ScalarColumn:
if "n" in "kwargs":
raise ValueError(
"Meerkat does not support passing `n` to `mode` when "
"backend is Arrow."
)
# matching behavior of Pandas, get all counts, but only return top modes
struct_array = pc.mode(self.data, n=len(self), **kwargs)
modes = []
count = struct_array[0]["count"]
for mode in struct_array:
if count != mode["count"]:
break
modes.append(mode["mode"].as_py())
return ArrowScalarColumn(modes)
def median(self, skipna: bool = True, **kwargs) -> any:
warnings.warn("Arrow backend computes an approximate median.")
return pc.approximate_median(self.data, skip_nulls=skipna).as_py()
def _dispatch_arithmetic_function(
self, other: ScalarColumn, compute_fn: str, right: bool, *args, **kwargs
):
if isinstance(other, Column):
assert isinstance(other, ArrowScalarColumn)
other = other.data
compute_fn = self.COMPUTE_FN_MAPPING.get(compute_fn, compute_fn)
if right:
out = self._clone(
data=getattr(pc, compute_fn)(other, self.data, *args, **kwargs)
)
return out
else:
return self._clone(
data=getattr(pc, compute_fn)(self.data, other, *args, **kwargs)
)
def _true_div(self, other, right: bool = False, **kwargs) -> ScalarColumn:
if isinstance(other, Column):
assert isinstance(other, ArrowScalarColumn)
other = other.data
# convert other to float if it is an integer
if isinstance(other, pa.ChunkedArray) or isinstance(other, pa.Array):
if other.type == pa.int64():
other = other.cast(pa.float64())
else:
other = pa.scalar(other, type=pa.float64())
if right:
return self._clone(pc.divide(other, self.data), **kwargs)
else:
return self._clone(pc.divide(self.data, other), **kwargs)
def __add__(self, other: ScalarColumn):
if self.dtype == pa.string():
# pyarrow expects a final str used as the spearator
return self._dispatch_arithmetic_function(
other, "binary_join_element_wise", False, ""
)
return self._dispatch_arithmetic_function(other, "add", right=False)
def __radd__(self, other: ScalarColumn):
if self.dtype == pa.string():
return self._dispatch_arithmetic_function(
other, "binary_join_element_wise", True, ""
)
return self._dispatch_arithmetic_function(other, "add", right=False)
def __truediv__(self, other: ScalarColumn):
return self._true_div(other, right=False)
def __rtruediv__(self, other: ScalarColumn):
return self._true_div(other, right=True)
def _floor_div(self, other, right: bool = False, **kwargs) -> ScalarColumn:
_true_div = self._true_div(other, right=right, **kwargs)
return _true_div._clone(data=pc.floor(_true_div.data))
def __floordiv__(self, other: ScalarColumn):
return self._floor_div(other, right=False)
def __rfloordiv__(self, other: ScalarColumn):
return self._floor_div(other, right=True)
def __mod__(self, other: ScalarColumn):
raise NotImplementedError("Modulo is not supported by Arrow backend.")
def __rmod__(self, other: ScalarColumn):
raise NotImplementedError("Modulo is not supported by Arrow backend.")
def _dispatch_comparison_function(
self, other: ScalarColumn, compute_fn: str, **kwargs
):
if isinstance(other, Column):
assert isinstance(other, ArrowScalarColumn)
other = other.data
compute_fn = self.COMPUTE_FN_MAPPING.get(compute_fn, compute_fn)
return self._clone(data=getattr(pc, compute_fn)(self.data, other, **kwargs))
def _dispatch_logical_function(
self, other: ScalarColumn, compute_fn: str, **kwargs
):
if isinstance(other, Column):
assert isinstance(other, ArrowScalarColumn)
other = other.data
compute_fn = self.COMPUTE_FN_MAPPING.get(compute_fn, compute_fn)
if other is None:
return self._clone(data=getattr(pc, compute_fn)(self.data, **kwargs))
return self._clone(data=getattr(pc, compute_fn)(self.data, other, **kwargs))
def isin(self, values: Union[List, Set], **kwargs) -> ScalarColumn:
return self._clone(data=pc.is_in(self.data, pa.array(values), **kwargs))
def _dispatch_unary_function(
self, compute_fn: str, _namespace: str = None, **kwargs
):
compute_fn = self.COMPUTE_FN_MAPPING.get(compute_fn, compute_fn)
return self._clone(data=getattr(pc, compute_fn)(self.data, **kwargs))
def isnull(self, **kwargs) -> ScalarColumn:
return self._clone(data=pc.is_null(self.data, nan_is_null=True, **kwargs))
|
meerkat-main
|
meerkat/columns/scalar/arrow.py
|
from .abstract import ScalarColumn
__all__ = ["ScalarColumn"]
|
meerkat-main
|
meerkat/columns/scalar/__init__.py
|
meerkat-main
|
meerkat/logging/__init__.py
|
|
import logging
import os
import tempfile
import uuid
from datetime import datetime
from pathlib import Path
from typing import Union
from rich.logging import RichHandler
logger = logging.getLogger(__name__)
def initialize_logging(
log_dir: str = None,
log_name: str = "meerkat.log",
format: str = "[%(funcName)s()] [%(name)s: %(lineno)s] :: %(message)s",
level: int = os.environ.get("MEERKAT_LOGGING_LEVEL", logging.WARNING),
) -> None:
"""Initialize logging for Meerkat."""
# Generate a new directory using the log_dir, if it doesn't exist
date = datetime.now().strftime("%Y_%m_%d")
time = datetime.now().strftime("%H_%M_%S")
uid = str(uuid.uuid4())[:8]
if log_dir is None:
log_dir = os.environ.get("MEERKAT_LOG_DIR")
if log_dir is None:
success = False
# try potential logging directories until we find one with adequate permissions
for log_dir in [
tempfile.gettempdir(),
os.path.join(Path.home(), ".meerkat"),
]:
try:
log_path = os.path.join(log_dir, "log", date, time, uid)
os.makedirs(log_path, exist_ok=True)
success = True
except PermissionError:
pass
if not success:
raise PermissionError(
"Permission denied in all of Meerkat's default logging directories. "
"Set environment variable `MEERKAT_LOG_DIR` to specify a directory for "
"Meerkat logging."
)
else:
log_path = os.path.join(log_dir, "log", date, time, uid)
# Make the logdir
os.makedirs(log_path, exist_ok=True)
# Initialize logging
logging.basicConfig(
format=format,
level=level,
handlers=[
logging.FileHandler(os.path.join(log_path, log_name)),
# logging.StreamHandler(),
RichHandler(rich_tracebacks=True),
],
)
# Set logging levels for dependencies
set_logging_level_for_imports()
logger.info("Logging initialized.")
def set_logging_level_for_imports(level: int = logging.WARNING) -> None:
"""Set logging levels for dependencies."""
# Set levels for imports
logging.getLogger("tensorflow").setLevel(level)
logging.getLogger("matplotlib").setLevel(level)
logging.getLogger("textattack").setLevel(level)
logging.getLogger("filelock").setLevel(level)
logging.getLogger("sse_starlette").setLevel(level)
def set_logging_level(level: Union[int, str] = logging.INFO):
"""Set logging level for Meerkat."""
# Set the top-level logger
if isinstance(level, int):
logging.getLogger("meerkat").setLevel(level)
elif isinstance(level, str):
logging.getLogger("meerkat").setLevel(
{
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"critical": logging.CRITICAL,
"fatal": logging.FATAL,
}[level]
)
else:
raise NotImplementedError(f"Level `{level}` not recognized.")
|
meerkat-main
|
meerkat/logging/utils.py
|
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, Hashable, List, Mapping, Sequence, Tuple, Union
from meerkat.errors import ConsolidationError
from meerkat.tools.utils import dump_yaml, load_yaml
# an index into a block that specifies where a column's data lives in the block
BlockIndex = Union[int, slice, str]
if TYPE_CHECKING:
from meerkat.block.ref import BlockRef
from meerkat.columns.abstract import Column
@dataclass
class BlockView:
block_index: BlockIndex
block: AbstractBlock
@property
def data(self):
return self.block._get_data(self.block_index)
class AbstractBlock:
def __init__(self, *args, **kwargs):
super(AbstractBlock, self).__init__(*args, **kwargs)
def __getitem__(self, index: BlockIndex) -> BlockView:
return BlockView(block_index=index, block=self)
def _get_data(self, index: BlockIndex) -> object:
"""Must return view of the underlying data."""
raise NotImplementedError()
def subblock(self, indices: List[BlockIndex]):
raise NotImplementedError
@property
def signature(self) -> Hashable:
raise NotImplementedError
@classmethod
def from_column_data(cls, data: object) -> Tuple[AbstractBlock, BlockView]:
raise NotImplementedError()
@classmethod
def from_block_data(cls, data: object) -> Tuple[AbstractBlock, BlockView]:
raise NotImplementedError()
@classmethod
def consolidate(
cls,
block_refs: Sequence[BlockRef],
consolidated_inputs: Dict[int, "Column"] = None,
) -> Tuple[AbstractBlock, Mapping[str, BlockIndex]]:
if len(block_refs) == 0:
raise ConsolidationError("Must pass at least 1 BlockRef to consolidate.")
if len({ref.block.signature for ref in block_refs}) != 1:
raise ConsolidationError(
"Can only consolidate blocks with matching signatures."
)
return cls._consolidate(
block_refs=block_refs, consolidated_inputs=consolidated_inputs
)
@classmethod
def _consolidate(cls, block_refs: Sequence[BlockRef]) -> BlockRef:
raise NotImplementedError
def _get(self, index, block_ref: BlockRef) -> Union[BlockRef, dict]:
raise NotImplementedError
@property
def is_mmap(self):
return False
def write(self, path: str, *args, **kwargs):
os.makedirs(path, exist_ok=True)
self._write_data(path, *args, **kwargs)
metadata = {"klass": type(self)}
metadata_path = os.path.join(path, "meta.yaml")
dump_yaml(metadata, metadata_path)
@classmethod
def read(cls, path: str, *args, **kwargs):
assert os.path.exists(path), f"`path` {path} does not exist."
metadata_path = os.path.join(path, "meta.yaml")
metadata = dict(load_yaml(metadata_path))
block_class = metadata["klass"]
data = block_class._read_data(path, *args, **kwargs)
return block_class(data)
def _write_data(self, path: str, *args, **kwargs):
raise NotImplementedError
@staticmethod
def _read_data(path: str, *args, **kwargs) -> object:
raise NotImplementedError
|
meerkat-main
|
meerkat/block/abstract.py
|
from __future__ import annotations
import os
import shutil
from dataclasses import dataclass
from mmap import mmap
from typing import Dict, Hashable, Sequence, Tuple, Union
import numpy as np
from meerkat.block.ref import BlockRef
from meerkat.columns.abstract import Column
from meerkat.errors import ConsolidationError
from meerkat.tools.lazy_loader import LazyLoader
from .abstract import AbstractBlock, BlockIndex, BlockView
torch = LazyLoader("torch")
class NumPyBlock(AbstractBlock):
@dataclass(eq=True, frozen=True)
class Signature:
dtype: np.dtype
nrows: int
shape: Tuple[int]
klass: type
mmap: Union[bool, int]
def __init__(self, data, *args, **kwargs):
super(NumPyBlock, self).__init__(*args, **kwargs)
if len(data.shape) <= 1:
raise ValueError(
"Cannot create a `NumpyBlock` from data with less than 2 axes."
)
self.data = data
@property
def signature(self) -> Hashable:
return self.Signature(
klass=NumPyBlock,
# don't want to consolidate any mmaped blocks
mmap=id(self) if isinstance(self.data, np.memmap) else False,
nrows=self.data.shape[0],
shape=self.data.shape[2:],
dtype=self.data.dtype,
)
def _get_data(self, index: BlockIndex, materialize: bool = True) -> np.ndarray:
return self.data[:, index]
@classmethod
def from_column_data(cls, data: np.ndarray) -> Tuple[NumPyBlock, BlockView]:
"""[summary]
Args:
data (np.ndarray): [description]
names (Sequence[str]): [description]
Raises:
ValueError: [description]
Returns:
Tuple[NumpyBlock, Mapping[str, BlockIndex]]: [description]
"""
if len(data.shape) == 1:
data = np.expand_dims(data, axis=1)
block_index = 0
elif data.shape[1] == 1:
block_index = slice(0, 1)
else:
block_index = slice(0, data.shape[1])
block = cls(data)
return BlockView(block=block, block_index=block_index)
@classmethod
def _consolidate(
cls,
block_refs: Sequence[BlockRef],
consolidated_inputs: Dict[int, "Column"] = None,
) -> BlockRef:
offset = 0
new_indices = {}
columns = {}
to_concat = []
for block_ref in block_refs:
for name, col in block_ref.items():
# keep track of all the columns in the block_refs
if name in columns:
raise ConsolidationError(
"Cannot consolidate two block refs containing the same column."
)
columns[name] = col
# add block and compute new indices
block_index = col._block_index
if isinstance(block_index, slice):
block_view = col._block.data[:, block_index]
new_indices[name] = slice(
# need to update slice offset and remove step
offset,
block_view.shape[1] + offset,
1,
)
elif isinstance(block_index, int):
# keep block axis
block_view = col._block.data[:, block_index : block_index + 1]
new_indices[name] = offset
to_concat.append(block_view)
offset += block_view.shape[1]
block = cls(np.concatenate(to_concat, axis=1))
# create columns
new_columns = {
name: columns[name]._clone(data=block[block_index])
for name, block_index in new_indices.items()
}
return BlockRef(block=block, columns=new_columns)
@staticmethod
def _convert_index(index):
if torch.is_tensor(index):
# need to convert to numpy for boolean indexing
return index.numpy()
return index
def _get(
self, index, block_ref: BlockRef, materialize: bool = True
) -> Union[BlockRef, dict]:
index = self._convert_index(index)
# TODO: check if they're trying to index more than just the row dimension
data = self.data[index]
if isinstance(index, int):
# if indexing a single row, we do not return a block manager, just a dict
return {
name: data[col._block_index] for name, col in block_ref.columns.items()
}
block = self.__class__(data)
columns = {
name: col._clone(data=block[col._block_index])
for name, col in block_ref.columns.items()
}
# note that the new block may share memory with the old block
return BlockRef(block=block, columns=columns)
@property
def is_mmap(self):
# important to check if .base is a python mmap object, since a view of a mmap
# is also a memmap object, but should not be symlinked or copied
return isinstance(self.data, np.memmap) and isinstance(self.data.base, mmap)
def _write_data(self, path: str, link: bool = True):
path = os.path.join(path, "data.npy")
if self.is_mmap:
if link:
os.symlink(self.data.filename, path)
else:
shutil.copy(self.data.filename, path)
else:
np.save(path, self.data)
@staticmethod
def _read_data(
path: str, mmap: bool = False, read_inputs: Dict[str, Column] = None
):
data_path = os.path.join(path, "data.npy")
if mmap:
return np.load(data_path, mmap_mode="r")
return np.load(data_path, allow_pickle=True)
|
meerkat-main
|
meerkat/block/numpy_block.py
|
from __future__ import annotations
import os
from copy import copy
from dataclasses import dataclass
from typing import Dict, Hashable, List, Sequence, Tuple, Union
import numpy as np
from cytoolz import merge_with
import meerkat as mk
from meerkat.block.ref import BlockRef
from meerkat.columns.abstract import Column
from meerkat.tools.utils import dump_yaml, load_yaml, meerkat_dill_load, translate_index
from .abstract import AbstractBlock, BlockIndex, BlockView
@dataclass
class DeferredCellOp:
args: List[any]
kwargs: Dict[str, any]
fn: callable
is_batched_fn: bool
return_index: Union[str, int] = None
@staticmethod
def prepare_arg(arg):
from ..columns.deferred.base import AbstractCell, DeferredColumn
if isinstance(arg, AbstractCell):
return arg.get()
elif isinstance(arg, DeferredColumn):
return arg()
return arg
def _get(self):
args = [self.prepare_arg(arg) for arg in self.args]
kwargs = {kw: self.prepare_arg(arg) for kw, arg in self.kwargs.items()}
out = self.fn(*args, **kwargs)
if self.return_index is not None:
return out[self.return_index]
if self.is_batched_fn:
return out[0]
return out
def with_return_index(self, index: Union[str, int]):
op = copy(self)
op.return_index = index
return op
def __len__(self):
if len(self.args) > 0:
return len(self.args[0])
else:
for col in self.kwargs.values():
return len(col)
return 0
def is_equal(self, other: Column):
if (
self.fn != other.fn
or self.is_batched_fn != other.is_batched_fn
or self.return_index != other.return_index
):
return False
for arg, other_arg in zip(self.args, other.args):
if arg != other_arg:
return False
if set(self.kwargs.keys()) != set(other.kwargs.keys()):
return False
for key in self.kwargs:
if self.kwargs[key] != other.kwargs[key]:
return False
return True
@dataclass
class DeferredOp:
args: List[mk.Column]
kwargs: Dict[str, mk.Column]
fn: callable
is_batched_fn: bool
batch_size: int
return_format: type = None
return_index: Union[str, int] = None
materialize_inputs: bool = True
@staticmethod
def concat(ops: Sequence[DeferredOp]):
"""Concatenate a sequence of operations."""
if len(ops) == 0:
raise ValueError("Cannot concatenate empty sequence of LambdaOp.")
if len(ops) == 1:
return ops[0]
# going to use the `fn` etc. of the first op
op = copy(ops[0])
op.args = [mk.concat([op.args[i] for op in ops]) for i in range(len(op.args))]
op.kwargs = {
kwarg: mk.concat([op.kwargs[kwarg] for op in ops])
for kwarg in op.kwargs.keys()
}
return op
def is_equal(self, other: Column):
if (
self.fn != other.fn
or self.is_batched_fn != other.is_batched_fn
or self.return_format != other.return_format
or self.return_index != other.return_index
):
return False
for arg, other_arg in zip(self.args, other.args):
if not arg.is_equal(other_arg):
return False
if set(self.kwargs.keys()) != set(other.kwargs.keys()):
return False
for key in self.kwargs:
if not self.kwargs[key].is_equal(other.kwargs[key]):
return False
return True
def write(self, path: str, written_inputs: Dict[int, str] = None):
"""_summary_
Args:
path (str): _description_
written_inputs (dict, optional): _description_. Defaults to None.
"""
# Make all the directories to the path
os.makedirs(path, exist_ok=True)
if written_inputs is None:
written_inputs = {}
state = {
"fn": self.fn,
"return_index": self.return_index,
"return_format": self.return_format,
"is_batched_fn": self.is_batched_fn,
"batch_size": self.batch_size,
"materialize_inputs": self.materialize_inputs,
}
# state_path = os.path.join(path, "state.dill")
# dill.dump(state, open(state_path, "wb"))
meta = {"args": [], "kwargs": {}, "state": state}
args_dir = os.path.join(path, "args")
os.makedirs(args_dir, exist_ok=True)
for idx, arg in enumerate(self.args):
if id(arg) in written_inputs:
meta["args"].append(written_inputs[id(arg)])
else:
col_path = os.path.join(args_dir, f"{idx}.col")
arg.write(col_path)
meta["args"].append(os.path.relpath(col_path, path))
kwargs_dir = os.path.join(path, "kwargs")
os.makedirs(kwargs_dir, exist_ok=True)
for key, arg in self.kwargs.items():
if id(arg) in written_inputs:
meta["kwargs"][key] = written_inputs[id(arg)]
else:
col_path = os.path.join(kwargs_dir, f"{key}.col")
arg.write(col_path)
meta["kwargs"][key] = os.path.relpath(col_path, path)
# Save the metadata as a yaml file
meta_path = os.path.join(path, "meta.yaml")
dump_yaml(meta, meta_path)
@classmethod
def read(cls, path, read_inputs: Dict[str, Column] = None):
if read_inputs is None:
read_inputs = {}
# Assert that the path exists
assert os.path.exists(path), f"`path` {path} does not exist."
meta = dict(load_yaml(os.path.join(path, "meta.yaml")))
args = [
read_inputs[arg_path]
if arg_path in read_inputs
else Column.read(os.path.join(path, arg_path))
for arg_path in meta["args"]
]
kwargs = {
key: read_inputs[kwarg_path]
if kwarg_path in read_inputs
else Column.read(os.path.join(path, kwarg_path))
for key, kwarg_path in meta["kwargs"]
}
if "state" in meta:
state = meta["state"]
else:
state = meerkat_dill_load(os.path.join(path, "state.dill"))
return cls(args=args, kwargs=kwargs, **state)
def _get(
self,
index: Union[int, np.ndarray],
indexed_inputs: Dict[int, Column] = None,
materialize: bool = True,
):
if indexed_inputs is None:
indexed_inputs = {}
# if function is batched, but the index is singular, we need to turn the
# single index into a batch index, and then later unpack the result
single_on_batched = self.is_batched_fn and isinstance(index, int)
if single_on_batched:
index = np.array([index])
# we pass results from other columns
# prepare inputs
kwargs = {
# if column has already been indexed
kwarg: indexed_inputs[id(column)]
if id(column) in indexed_inputs
else column._get(index, materialize=self.materialize_inputs)
for kwarg, column in self.kwargs.items()
}
args = [
indexed_inputs[id(column)]
if id(column) in indexed_inputs
else column._get(index, materialize=self.materialize_inputs)
for column in self.args
]
if isinstance(index, int):
if materialize:
output = self.fn(*args, **kwargs)
if self.return_index is not None:
output = output[self.return_index]
return output
else:
return DeferredCellOp(
fn=self.fn,
args=args,
kwargs=kwargs,
is_batched_fn=self.is_batched_fn,
return_index=self.return_index,
)
elif isinstance(index, np.ndarray):
if materialize:
if self.is_batched_fn:
output = self.fn(*args, **kwargs)
if self.return_index is not None:
output = output[self.return_index]
if single_on_batched:
if (
(self.return_format is None or self.return_format is dict)
and isinstance(output, Dict)
and (self.return_index is None)
):
return {k: v[0] for k, v in output.items()}
elif (
(self.return_format is None or self.return_format is tuple)
and isinstance(output, Tuple)
and (self.return_index is None)
):
return [v[0] for v in output]
else:
return output[0]
return output
else:
outputs = []
for i in range(len(index)):
output = self.fn(
*[arg[i] for arg in args],
**{kwarg: column[i] for kwarg, column in kwargs.items()},
)
if self.return_index is not None:
output = output[self.return_index]
outputs.append(output)
if (self.return_format is dict) and (self.return_index is None):
return merge_with(list, outputs)
elif (self.return_format is tuple) and (self.return_index is None):
return tuple(zip(*outputs))
else:
return outputs
else:
if single_on_batched:
return DeferredCellOp(
fn=self.fn,
args=args,
kwargs=kwargs,
is_batched_fn=self.is_batched_fn,
return_index=self.return_index,
)
return DeferredOp(
fn=self.fn,
args=args,
kwargs=kwargs,
is_batched_fn=self.is_batched_fn,
batch_size=self.batch_size,
return_format=self.return_format,
return_index=self.return_index,
)
def __len__(self):
if len(self.args) > 0:
return len(self.args[0])
else:
for col in self.kwargs.values():
return len(col)
return 0
def with_return_index(self, index: Union[str, int]):
"""Return a copy of the operation with a new return index."""
op: DeferredOp = copy(self)
op.return_index = index
return op
class DeferredBlock(AbstractBlock):
@dataclass(eq=True, frozen=True)
class Signature:
klass: type
fn: callable
args: Tuple[int]
# dicts are not hashable, so inputs should be a sorted tuple of tuples
kwargs: Tuple[Tuple[Union[str, int], int]]
@property
def signature(self) -> Hashable:
return self.Signature(
klass=DeferredBlock,
fn=self.data.fn,
args=tuple(map(id, self.data.args)),
kwargs=tuple(sorted((k, id(v)) for k, v in self.data.kwargs.items())),
)
def __init__(self, data: DeferredOp):
self.data = data
@classmethod
def from_column_data(cls, data: DeferredOp) -> Tuple[DeferredBlock, BlockView]:
block_index = data.return_index
data = data.with_return_index(None)
block = cls(data=data)
return BlockView(block=block, block_index=block_index)
@classmethod
def from_block_data(cls, data: DeferredOp) -> Tuple[AbstractBlock, BlockView]:
return cls(data=data)
@classmethod
def _consolidate(
cls,
block_refs: Sequence[BlockRef],
consolidated_inputs: Dict[int, Column] = None,
) -> BlockRef:
if consolidated_inputs is None:
consolidated_inputs = {}
# if the input column has been consolidated, we need to update the inputs
# (i.e. args and kwargs) of the data
op = block_refs[0].block.data.with_return_index(None)
op.args = [consolidated_inputs.get(id(arg), arg) for arg in op.args]
op.kwargs = {
kwarg: consolidated_inputs.get(id(column), column)
for kwarg, column in op.kwargs.items()
}
block = DeferredBlock.from_block_data(op)
columns = {
name: col._clone(data=block[col._block_index])
for ref in block_refs
for name, col in ref.items()
}
return BlockRef(block=block, columns=columns)
def _convert_index(self, index):
return translate_index(index, length=len(self.data)) # TODO
def _get(
self,
index,
block_ref: BlockRef,
indexed_inputs: dict = None,
materialize: bool = True,
) -> Union[BlockRef, dict]:
if indexed_inputs is None:
indexed_inputs = {}
index = self._convert_index(index)
outputs = self.data._get(
index=index, indexed_inputs=indexed_inputs, materialize=materialize
)
# convert raw outputs into columns
if isinstance(index, int):
if materialize:
return {
name: outputs
if (col._block_index is None)
else outputs[col._block_index]
for name, col in block_ref.columns.items()
}
else:
# outputs is a
return {
name: col._create_cell(outputs.with_return_index(col._block_index))
for name, col in block_ref.columns.items()
}
else:
if materialize:
outputs = {
name: col.convert_to_output_type(
col.collate(
outputs
if (col._block_index is None)
else outputs[col._block_index]
)
)
for name, col in block_ref.columns.items()
}
return [
BlockRef(columns={name: col}, block=col._block)
if col.is_blockable() # may return a non-blockable type
else (name, col)
for name, col in outputs.items()
]
else:
block = self.from_block_data(outputs)
columns = {
name: col._clone(
data=BlockView(block=block, block_index=col._block_index)
)
for name, col in block_ref.columns.items()
}
return BlockRef(block=block, columns=columns)
def _get_data(self, index: BlockIndex) -> object:
return self.data.with_return_index(index)
def _write_data(self, path: str, written_inputs: Dict[int, str] = None):
path = os.path.join(path, "data.op")
return self.data.write(path, written_inputs=written_inputs)
@staticmethod
def _read_data(
path: str, mmap: bool = False, read_inputs: Dict[str, Column] = None
) -> object:
path = os.path.join(path, "data.op")
return DeferredOp.read(path, read_inputs=read_inputs)
|
meerkat-main
|
meerkat/block/deferred_block.py
|
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Dict, Hashable, List, Sequence, Tuple, Union
import pandas as pd
from meerkat.block.ref import BlockRef
from meerkat.columns.abstract import Column
from meerkat.columns.tensor.numpy import NumPyTensorColumn
from meerkat.columns.tensor.torch import TorchTensorColumn
from meerkat.tools.lazy_loader import LazyLoader
from .abstract import AbstractBlock, BlockIndex, BlockView
torch = LazyLoader("torch")
class PandasBlock(AbstractBlock):
@dataclass(eq=True, frozen=True)
class Signature:
nrows: int
klass: type
def __init__(self, data: pd.DataFrame, *args, **kwargs):
super(PandasBlock, self).__init__(*args, **kwargs)
self.data = data
@property
def signature(self) -> Hashable:
return self.Signature(
klass=PandasBlock,
# we don't
nrows=len(self.data),
)
def _get_data(self, index: BlockIndex) -> pd.Series:
return self.data[index]
def subblock(self, indices: List[BlockIndex]) -> PandasBlock:
return PandasBlock(data=self.data[indices])
@classmethod
def from_column_data(cls, data: pd.Series) -> Tuple[PandasBlock, BlockView]:
"""[summary]
Args:
data (np.ndarray): [description]
names (Sequence[str]): [description]
Raises:
ValueError: [description]
Returns:
Tuple[PandasBlock, Mapping[str, BlockIndex]]: [description]
"""
data = pd.DataFrame({"col": data})
block = cls(data)
return BlockView(block_index="col", block=block)
@classmethod
def _consolidate(
cls,
block_refs: Sequence[BlockRef],
consolidated_inputs: Dict[int, "Column"] = None,
) -> BlockRef:
df = pd.DataFrame(
# need to ignore index when concatenating
{
name: ref.block.data[col._block_index].reset_index(drop=True)
for ref in block_refs
for name, col in ref.items()
}
)
block = cls(df)
# pull out the block columns from all the block_refs
columns = {}
for ref in block_refs:
columns.update(ref)
new_columns = {
name: col._clone(data=block[name]) for name, col in columns.items()
}
return BlockRef(block=block, columns=new_columns)
@staticmethod
def _convert_index(index):
if torch.is_tensor(index):
# need to convert to numpy for boolean indexing
return index.numpy()
if isinstance(index, NumPyTensorColumn):
return index.data
if isinstance(index, TorchTensorColumn):
# need to convert to numpy for boolean indexing
return index.data.numpy()
if isinstance(index, pd.Series):
# need to convert to numpy for boolean indexing
return index.values
from meerkat.columns.scalar.pandas import PandasScalarColumn
if isinstance(index, PandasScalarColumn):
return index.data.values
from meerkat.columns.scalar.arrow import ArrowScalarColumn
if isinstance(index, ArrowScalarColumn):
return index.to_numpy()
return index
def _get(
self, index, block_ref: BlockRef, materialize: bool = True
) -> Union[BlockRef, dict]:
index = self._convert_index(index)
# TODO: check if they're trying to index more than just the row dimension
data = self.data.iloc[index]
if isinstance(index, int):
# if indexing a single row, we do not return a block manager, just a dict
return {
name: data[col._block_index] for name, col in block_ref.columns.items()
}
# All Pandas Columns should have contiguous indices so that we can perform
# comparisons etc.
data = data.reset_index(drop=True)
block = self.__class__(data)
columns = {
name: col._clone(data=block[col._block_index])
for name, col in block_ref.columns.items()
}
# note that the new block may share memory with the old block
return BlockRef(block=block, columns=columns)
def _write_data(self, path: str):
self.data.reset_index(drop=True).to_feather(os.path.join(path, "data.feather"))
@staticmethod
def _read_data(
path: str, mmap: bool = False, read_inputs: Dict[str, Column] = None
):
return pd.read_feather(os.path.join(path, "data.feather"))
def mean(self):
return self.data.mean()
|
meerkat-main
|
meerkat/block/pandas_block.py
|
meerkat-main
|
meerkat/block/__init__.py
|
|
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Dict, Hashable, List, Sequence, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from meerkat.block.ref import BlockRef
from meerkat.columns.abstract import Column
from meerkat.columns.tensor.numpy import NumPyTensorColumn
from meerkat.columns.tensor.torch import TorchTensorColumn
from meerkat.tools.lazy_loader import LazyLoader
from .abstract import AbstractBlock, BlockIndex, BlockView
torch = LazyLoader("torch")
class ArrowBlock(AbstractBlock):
@dataclass(eq=True, frozen=True)
class Signature:
nrows: int
klass: type
# mmap: bool
def __init__(self, data: pa.Table, *args, **kwargs):
super(ArrowBlock, self).__init__(*args, **kwargs)
self.data = data
@property
def signature(self) -> Hashable:
return self.Signature(klass=ArrowBlock, nrows=len(self.data))
def _get_data(self, index: BlockIndex) -> pa.Array:
return self.data[index]
@classmethod
def from_column_data(cls, data: pa.Array) -> BlockView:
data = pa.Table.from_pydict({"col": data})
block = cls(data)
return BlockView(block=block, block_index="col")
@classmethod
def from_block_data(cls, data: pa.Table) -> List[BlockView]:
block = cls(data)
return [
BlockView(block=block, block_index=column) for column in data.column_names
]
@classmethod
def _consolidate(
cls,
block_refs: Sequence[BlockRef],
consolidated_inputs: Dict[int, "Column"] = None,
) -> BlockRef:
table = pa.Table.from_pydict(
# need to ignore index when concatenating
{
name: ref.block.data[col._block_index]
for ref in block_refs
for name, col in ref.items()
}
)
block = cls(table)
# pull out the block columns from all the block_refs
columns = {}
for ref in block_refs:
columns.update(ref)
new_columns = {
name: col._clone(data=block[name]) for name, col in columns.items()
}
return BlockRef(block=block, columns=new_columns)
@staticmethod
def _convert_index(index):
if isinstance(index, list):
return np.array(index)
if torch.is_tensor(index):
# need to convert to numpy for boolean indexing
return index.numpy()
if isinstance(index, NumPyTensorColumn):
return index.data
if isinstance(index, TorchTensorColumn):
# need to convert to numpy for boolean indexing
return index.data.numpy()
if isinstance(index, pd.Series):
# need to convert to numpy for boolean indexing
return index.values
from meerkat.columns.scalar.pandas import PandasScalarColumn
if isinstance(index, PandasScalarColumn):
return index.data.values
from meerkat.columns.scalar.arrow import ArrowScalarColumn
if isinstance(index, ArrowScalarColumn):
return index.to_numpy()
return index
def _get(
self, index, block_ref: BlockRef, materialize: bool = True
) -> Union[BlockRef, dict]:
index = self._convert_index(index)
# TODO: check if they're trying to index more than just the row dimension
if isinstance(index, int):
# if indexing a single row, we do not return a block manager, just a dict
# Convert to Python object for consistency with other ScalarColumn
# implementations.
return {
name: self.data[col._block_index][index].as_py()
for name, col in block_ref.columns.items()
}
if isinstance(index, slice):
data = self.data[index]
elif index.dtype == bool:
data = self.data.filter(pa.array(index))
else:
# we do not want to use ``data = self.data.take(index)``
# because it can't handle ChunkedArrays that don't fit in an Array
# https://issues.apache.org/jira/browse/ARROW-9773
# TODO (Sabri): Huggingface gets around this in a similar manner but
# applies the slices to the record batches, because this allows them to do
# the batch lookup in numpy, which is faster than pure python, which is
# presumably why Table.slice does
# noqa E501, https://github.com/huggingface/datasets/blob/491dad8507792f6f51077867e22412af7cd5c2f1/src/datasets/table.py#L110
data = pa.concat_tables(self.data.slice(i, 1) for i in index)
block = self.__class__(data)
columns = {
name: col._clone(data=block[col._block_index])
for name, col in block_ref.columns.items()
}
# note that the new block may share memory with the old block
return BlockRef(block=block, columns=columns)
@staticmethod
def _write_table(path: str, table: pa.Table):
# noqa E501, source: huggingface implementation https://github.com/huggingface/datasets/blob/92304b42cf0cc6edafc97832c07de767b81306a6/src/datasets/table.py#L50
with open(path, "wb") as sink:
writer = pa.RecordBatchStreamWriter(sink=sink, schema=table.schema)
batches: List[pa.RecordBatch] = table.to_batches()
for batch in batches:
writer.write_batch(batch)
writer.close()
return sum(batch.nbytes for batch in batches)
@staticmethod
def _read_table(path: str, mmap: bool = False):
if mmap:
return pa.ipc.open_stream(pa.memory_map(path)).read_all()
else:
return pa.ipc.open_stream(pa.input_stream(path)).read_all()
def _write_data(self, path: str):
self._write_table(os.path.join(path, "data.arrow"), self.data)
@staticmethod
def _read_data(
path: str, mmap: bool = False, read_inputs: Dict[str, Column] = None
):
return ArrowBlock._read_table(os.path.join(path, "data.arrow"), mmap=mmap)
|
meerkat-main
|
meerkat/block/arrow_block.py
|
from __future__ import annotations
from typing import TYPE_CHECKING, List, Mapping, Sequence, Union
if TYPE_CHECKING:
from meerkat.block.abstract import AbstractBlock
from meerkat.columns.abstract import Column
class BlockRef(Mapping):
def __init__(self, columns: Mapping[str, Column], block: AbstractBlock):
self.columns: Mapping[str, Column] = columns
self.block: AbstractBlock = block
def __getitem__(self, index: Union[str, Sequence[str]]):
if isinstance(index, str):
return self.columns[index]
else:
return self.__class__(
columns={col: self.columns[col] for col in index},
block=self.block,
)
def __delitem__(self, key):
self.columns.pop(key)
def __len__(self):
return len(self.columns)
def __contains__(self, value):
return value in self.columns
def __iter__(self):
return iter(self.columns)
@property
def block_indices(self):
return {name: col._block_index for name, col in self.columns.items()}
def apply(
self, method_name: str = "_get", *args, **kwargs
) -> Union[BlockRef, List[BlockRef], dict]:
# apply method to the block
return getattr(self.block, method_name)(*args, **kwargs, block_ref=self)
def update(self, block_ref: BlockRef):
if id(block_ref.block) != id(self.block):
raise ValueError(
"Can only update BlockRef with another BlockRef pointing "
"to the same block."
)
self.columns.update(block_ref.columns)
|
meerkat-main
|
meerkat/block/ref.py
|
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, Hashable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from meerkat.block.ref import BlockRef
from meerkat.columns.abstract import Column
from meerkat.errors import ConsolidationError
from meerkat.tools.lazy_loader import LazyLoader
from .abstract import AbstractBlock, BlockIndex, BlockView
torch = LazyLoader("torch")
if TYPE_CHECKING:
import torch
class TorchBlock(AbstractBlock):
@dataclass(eq=True, frozen=True)
class Signature:
device: "torch.device"
dtype: "torch.dtype"
nrows: int
shape: Tuple[int]
klass: type
def __init__(self, data, *args, **kwargs):
super(TorchBlock, self).__init__(*args, **kwargs)
if len(data.shape) <= 1:
raise ValueError(
"Cannot create a `TensorBlock` from data with less than 2 axes."
)
self.data = data
@property
def signature(self) -> Hashable:
return self.Signature(
klass=TorchBlock,
device=self.data.device,
nrows=self.data.shape[0],
shape=self.data.shape[2:],
dtype=self.data.dtype,
)
def _get_data(self, index: BlockIndex) -> "torch.Tensor":
return self.data[:, index]
@classmethod
def from_column_data(cls, data: "torch.Tensor") -> Tuple[TorchBlock, BlockView]:
"""[summary]
Args:
data (np.ndarray): [description]
names (Sequence[str]): [description]
Raises:
ValueError: [description]
Returns:
Tuple[NumpyBlock, Mapping[str, BlockIndex]]: [description]
"""
if len(data.shape) == 1:
data = torch.unsqueeze(data, dim=1)
block_index = 0
elif data.shape[1] == 1:
block_index = slice(0, 1)
else:
block_index = slice(0, data.shape[1])
block = cls(data)
return BlockView(block_index=block_index, block=block)
@classmethod
def _consolidate(
cls,
block_refs: Sequence[BlockRef],
consolidated_inputs: Dict[int, "Column"] = None,
) -> BlockRef:
offset = 0
new_indices = {}
columns = {}
to_concat = []
for block_ref in block_refs:
for name, col in block_ref.items():
# keep track of all the columns in the block_refs
if name in columns:
raise ConsolidationError(
"Cannot consolidate two block refs containing the same column."
)
columns[name] = col
# add block and compute new indices
block_index = col._block_index
if isinstance(block_index, slice):
block_view = col._block.data[:, block_index]
new_indices[name] = slice(
# need to update slice offset and remove step
offset,
block_view.shape[1] + offset,
1,
)
elif isinstance(block_index, int):
# keep block axis
block_view = col._block.data[:, block_index : block_index + 1]
new_indices[name] = offset
to_concat.append(block_view)
offset += block_view.shape[1]
block = cls(torch.cat(to_concat, dim=1))
# create columns
new_columns = {
name: columns[name]._clone(data=block[block_index])
for name, block_index in new_indices.items()
}
return BlockRef(block=block, columns=new_columns)
@staticmethod
def _convert_index(index):
from meerkat.columns.tensor.torch import TorchTensorColumn
if isinstance(index, TorchTensorColumn) and index.data.dtype == np.bool_:
# needed to silence torch deprecation warning
# DeprecationWarning: In future, it will be an error for 'np.bool_' scalars
# to be interpreted as an index
return torch.as_tensor(index.data)
if isinstance(index, list):
# convert to np.ndarray so that it will be converted to a torch tensor
# following rules below
index = np.array(index)
if isinstance(index, pd.Series):
# convert to np.ndarray so that it will be converted to a torch tensor
index = index.values
if isinstance(index, np.ndarray) and index.dtype == np.bool_:
# needed to silence torch deprecation warning
# DeprecationWarning: In future, it will be an error for 'np.bool_' scalars
# to be interpreted as an index
return torch.as_tensor(index)
if isinstance(index, pd.Series):
return torch.as_tensor(index.values)
from meerkat.columns.scalar.pandas import PandasScalarColumn
if (
isinstance(index, PandasScalarColumn)
and index.data.values.dtype == np.bool_
):
# needed to silence torch deprecation warning
# DeprecationWarning: In future, it will be an error for 'np.bool_' scalars
# to be interpreted as an index
return torch.as_tensor(index.data.values)
if isinstance(index, TorchTensorColumn):
# need to convert to numpy for boolean indexing
return index.data
return index
def _get(
self, index, block_ref: BlockRef, materialize: bool = True
) -> Union[BlockRef, dict]:
index = self._convert_index(index)
# TODO: check if they're trying to index more than just the row dimension
data = self.data[index]
if isinstance(index, int):
# if indexing a single row, we do not return a block manager, just a dict
return {
name: data[col._block_index] for name, col in block_ref.columns.items()
}
block = self.__class__(data)
columns = {
name: col._clone(data=block[col._block_index])
for name, col in block_ref.columns.items()
}
# note that the new block may share memory with the old block
return BlockRef(block=block, columns=columns)
def _write_data(self, path: str):
torch.save(self.data, os.path.join(path, "data.pt"))
@staticmethod
def _read_data(
path: str, mmap: bool = False, read_inputs: Dict[str, Column] = None
):
return torch.load(os.path.join(path, "data.pt"))
|
meerkat-main
|
meerkat/block/torch_block.py
|
from __future__ import annotations
import os
import shutil
from collections import defaultdict
from collections.abc import MutableMapping
from typing import Dict, List, Mapping, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import meerkat.config
from meerkat.block.abstract import AbstractBlock, BlockIndex
from meerkat.columns.abstract import Column
from meerkat.interactive.graph.marking import unmarked
from meerkat.tools.utils import dump_yaml, load_yaml
from .deferred_block import DeferredBlock
from .ref import BlockRef
class BlockManager(MutableMapping):
"""Manages all blocks in a DataFrame."""
def __init__(self) -> None:
self._columns: Dict[str, Column] = {} # ordered as of 3.7
self._column_to_block_id: Dict[str, int] = {}
self._block_refs: Dict[int, BlockRef] = {}
def update(self, block_ref: BlockRef):
"""data (): a single blockable object, potentially contains multiple
columns."""
for name in block_ref:
if name in self:
self.remove(name)
# although we can't have the same column living in multiple managers
# we don't view here because it can lead to multiple calls to clone
self._columns.update(block_ref)
block_id = id(block_ref.block)
# check if there already is a block_ref in the manager for this block
if block_id in self._block_refs:
self._block_refs[block_id].update(block_ref)
else:
self._block_refs[block_id] = block_ref
self._column_to_block_id.update({name: block_id for name in block_ref.keys()})
def topological_block_refs(self):
"""Topological sort of the block refs based on Kahn's algorithm."""
children = defaultdict(list)
parents = defaultdict(list)
for block_id, block_ref in self._block_refs.items():
if isinstance(block_ref.block, DeferredBlock):
for arg in block_ref.block.data.args + list(
block_ref.block.data.kwargs.values()
):
if arg.is_blockable():
children[id(arg._block)].append(block_id)
# if the parent is in the block ref, add it to the graph
if (id(arg._block)) in self._block_refs:
parents[block_id].append(id(arg._block))
current = [] # get a set of all the nodes without an incoming edge
for block_id, block_ref in self._block_refs.items():
if not parents[block_id] or not isinstance(block_ref.block, DeferredBlock):
current.append((block_id, block_ref))
while current:
block_id, block_ref = current.pop(0)
yield block_id, block_ref
for child_id in children[block_id]:
parents[child_id].remove(block_id)
if not parents[child_id]:
current.append((child_id, self._block_refs[child_id]))
def apply(self, method_name: str = "_get", *args, **kwargs) -> BlockManager:
""""""
from .deferred_block import DeferredBlock
results = None
indexed_inputs = {}
for _, block_ref in self.topological_block_refs():
if isinstance(block_ref.block, DeferredBlock):
# defer computation of lambda columns, since they may be functions of
# the other columns
result = block_ref.apply(
method_name=method_name,
indexed_inputs=indexed_inputs,
*args,
**kwargs,
)
else:
result = block_ref.apply(method_name=method_name, *args, **kwargs)
if results is None:
# apply returns one of BlockRef, List[BlockRef], dict
results = BlockManager() if isinstance(result, (BlockRef, List)) else {}
if isinstance(result, List):
for ref in result:
if isinstance(ref, BlockRef):
results.update(ref)
elif isinstance(ref, Tuple):
name, column = ref
results[name] = column
else:
raise ValueError("Unrecognized.")
elif isinstance(result, (BlockRef, Dict)):
# result is a new block_ref
new_block_ref = result
for name, col in block_ref.items():
indexed_inputs[id(col)] = new_block_ref[name]
results.update(result)
else:
raise ValueError("Unexpected result of type {}".format(type(result)))
# apply method to columns not stored in block
for name, col in self._columns.items():
if results is not None and name in results:
continue
result = getattr(col, method_name)(*args, **kwargs)
if results is None:
results = BlockManager() if isinstance(result, Column) else {}
results[name] = result
if isinstance(results, BlockManager):
results.reorder(self.keys())
return results
def consolidate(self, consolidate_unitary_groups: bool = False):
column_order = list(
self._columns.keys()
) # need to maintain order after consolidate
block_ref_groups = defaultdict(list)
for _, block_ref in self.topological_block_refs():
block_ref_groups[block_ref.block.signature].append(block_ref)
# TODO we need to go through these block_ref groups in topological order
consolidated_inputs: Dict[int, Column] = {}
for block_refs in block_ref_groups.values():
if (not consolidate_unitary_groups) and len(block_refs) == 1:
# if there is only one block ref in the group, do not consolidate
continue
# consolidate group
block_class = block_refs[0].block.__class__
# consolidate needs to return a mapping from old column ids to new column
# ids so that we can update dependent lambda columns.
new_block_ref = block_class.consolidate(
block_refs, consolidated_inputs=consolidated_inputs
)
for block_ref in block_refs:
for name, col in block_ref.items():
consolidated_inputs[id(col)] = new_block_ref[name]
self.update(new_block_ref)
self.reorder(column_order)
def remove(self, name):
if name not in self._columns:
raise ValueError(f"Remove failed: no column '{name}' in BlockManager.")
self._columns.pop(name)
if name in self._column_to_block_id:
# column is blockable
block_ref = self._block_refs[self._column_to_block_id[name]]
del block_ref[name]
if len(block_ref) == 0:
self._block_refs.pop(self._column_to_block_id[name])
self._column_to_block_id.pop(name)
def reorder(self, order: Sequence[str]):
if set(order) != set(self._columns):
raise ValueError("Must include all columns when reordering a BlockManager.")
self._columns = {name: self._columns[name] for name in order}
def __getitem__(
self, index: Union[str, Sequence[str]]
) -> Union[Column, BlockManager]:
if isinstance(index, str):
return self._columns[index]
elif isinstance(index, Sequence):
mgr = BlockManager()
block_id_to_names = defaultdict(list)
for name in index:
if name not in self._column_to_block_id:
if name in self:
# non-blockable column
mgr.add_column(col=self._columns[name], name=name)
else:
raise ValueError(
f"`BlockManager` does not contain column '{name}'."
)
else:
# group blockable columns by block
block_id_to_names[self._column_to_block_id[name]].append(name)
# block refs for blockable columns
for block_id, names in block_id_to_names.items():
block_ref = self._block_refs[block_id]
mgr.update(block_ref[names])
mgr.reorder(order=index)
return mgr
else:
raise ValueError(
f"Unsupported index of type `{type(index)}` passed to `BlockManager`."
)
def __setitem__(self, index: str, data: Union[str, Sequence[str]]):
if isinstance(data, Column):
self.add_column(data, name=index)
else:
raise ValueError(
f"Cannot set item with object of type `{type(data)}` on `BlockManager`."
)
def __delitem__(self, key):
self.remove(key)
def __len__(self):
return len(self._columns)
@property
def nrows(self):
return 0 if len(self) == 0 else len(next(iter(self._columns.values())))
@property
def ncols(self):
return len(self)
def __contains__(self, value):
return value in self._columns
def __iter__(self):
return iter(self._columns)
def get_block_ref(self, name: str):
return self._block_refs[self._column_to_block_id[name]]
def add_column(self, col: Column, name: str):
"""Convert data to a meerkat column using the appropriate Column
type."""
if len(self) > 0 and len(col) != self.nrows:
raise ValueError(
f"Cannot add column '{name}' with length {len(col)} to `BlockManager` "
f" with length {self.nrows} columns."
)
# col = col.view()
if not col.is_blockable():
self._columns[name] = col
else:
self.update(BlockRef(columns={name: col}, block=col._block))
@classmethod
def from_dict(cls, data: Mapping[str, object]):
mgr = cls()
for name, data in data.items():
col = Column.from_data(data)
mgr.add_column(col=col, name=name)
return mgr
def write(self, path: str):
meta = {
"dtype": BlockManager,
"columns": {},
"_column_order": list(self.keys()),
}
# prepare directories
columns_dir = os.path.join(path, "columns")
blocks_dir = os.path.join(path, "blocks")
meta_path = os.path.join(path, "meta.yaml")
if os.path.isdir(path):
if (
os.path.exists(meta_path)
and os.path.exists(columns_dir)
and os.path.exists(blocks_dir)
):
# if overwriting, ensure that old columns are removed
shutil.rmtree(columns_dir)
shutil.rmtree(blocks_dir)
else:
# if path already points to a dir that wasn't previously holding a
# block manager, do not overwrite it. We'd like to protect against
# situation in which user accidentally puts in an important directory
raise IsADirectoryError(
f"Cannot write `BlockManager`. {path} is a directory."
)
os.makedirs(path, exist_ok=True)
os.makedirs(blocks_dir)
os.makedirs(columns_dir)
# consolidate before writing
# we also want to consolidate unitary groups (i.e. groups with only one block
# ref) so that we don't write any data not actually in the dataframe
# because of this we need to make sure lambda columns know about there
# dependencies
self.consolidate(consolidate_unitary_groups=True) # TODO: change this back
# maintain a dictionary mapping column ids to paths where they are written
# so that lambda blocks that depend on those columns can refer to them
# appropriately
written_inputs: Dict[int, str] = {}
for block_id, block_ref in self.topological_block_refs():
block: AbstractBlock = block_ref.block
block_dir = os.path.join(blocks_dir, str(block_id))
if isinstance(block, DeferredBlock):
block.write(block_dir, written_inputs=written_inputs)
else:
block.write(block_dir)
for name, column in block_ref.items():
column_dir = os.path.join(columns_dir, name)
# os.makedirs(column_dir, exist_ok=True)
state = column._get_state()
# don't write the data, reference the block
meta["columns"][name] = {
**column._get_meta(),
"state": state,
"block": {
"block_dir": os.path.relpath(block_dir, path),
"block_index": _serialize_block_index(column._block_index),
"mmap": block.is_mmap,
},
}
# add the written column to the inputs
written_inputs[id(column)] = os.path.relpath(column_dir, path)
# write columns not in a block
for name, column in self._columns.items():
if name in meta["columns"]:
continue
meta["columns"][name] = column._get_meta()
column.write(os.path.join(columns_dir, name))
# TODO(sabri): move this above and add to written inputs
# Save the metadata as a yaml file
# sort_keys=Flase is required so that the columns are written in topological
# order
dump_yaml(meta, meta_path, sort_keys=False)
@classmethod
def read(
cls,
path: str,
columns: Sequence[str] = None,
**kwargs,
) -> BlockManager:
"""Load a DataFrame stored on disk."""
# Load the metadata
meta = dict(load_yaml(os.path.join(path, "meta.yaml")))
# maintain a dictionary mapping from paths to columns
# so that lambda blocks that depend on those columns don't load them again
read_inputs: Dict[int, Column] = {}
blocks = {}
mgr = cls()
for name, col_meta in meta["columns"].items():
column_dir = os.path.join(path, "columns", name)
# load a subset of columns
if columns is not None and name not in columns:
continue
if "block" in col_meta:
# read block or fetch it from `blocks` if it's already been read
block_meta = col_meta["block"]
if block_meta["block_dir"] not in blocks:
blocks[block_meta["block_dir"]] = AbstractBlock.read(
os.path.join(path, block_meta["block_dir"]),
mmap=block_meta.get("mmap", False),
read_inputs=read_inputs,
)
block = blocks[block_meta["block_dir"]]
# read column, passing in a block_view
col = col_meta["dtype"].read(
column_dir,
_data=block[_deserialize_block_index(block_meta["block_index"])],
_meta=col_meta,
**kwargs,
)
mgr.add_column(col, name)
read_inputs[os.path.relpath(column_dir, path)] = col
else:
mgr.add_column(
col_meta["dtype"].read(path=column_dir, _meta=col_meta, **kwargs),
name,
)
mgr.reorder(meta["_column_order"])
return mgr
@unmarked()
def _repr_pandas_(self, max_rows: int = None):
if max_rows is None:
max_rows = meerkat.config.display.max_rows
cols = {}
formatters = {}
for name, column in self._columns.items():
cols[name], formatters[name] = column._repr_pandas_(max_rows=max_rows)
if self.nrows > max_rows:
pd_index = np.concatenate(
(
np.arange(max_rows // 2),
np.zeros(1),
np.arange(self.nrows - max_rows // 2, self.nrows),
),
)
else:
pd_index = np.arange(self.nrows)
df = pd.DataFrame(cols)
df = df.set_index(pd_index.astype(int))
return df, formatters
def view(self):
mgr = BlockManager()
for name, col in self.items():
mgr.add_column(col.view(), name)
return mgr
def copy(self):
mgr = BlockManager()
for name, col in self.items():
mgr.add_column(col.copy(), name)
return mgr
def _serialize_block_index(index: BlockIndex) -> Union[Dict, str, int]:
if index is not None and not isinstance(index, (int, str, slice)):
raise ValueError("Can only serialize `BlockIndex` objects.")
elif isinstance(index, slice):
return {"start": index.start, "stop": index.stop, "step": index.step}
return index
def _deserialize_block_index(index: Union[Dict, int, str]) -> BlockIndex:
if isinstance(index, Dict):
return slice(index["start"], index["stop"], index["step"])
return index
|
meerkat-main
|
meerkat/block/manager.py
|
import fnmatch
import os
import re
def file_find_replace(directory, find, replace, pattern):
for path, _, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, pattern):
filepath = os.path.join(path, filename)
with open(filepath) as f:
s = f.read()
s = re.sub(find, replace, s)
with open(filepath, "w") as f:
f.write(s)
if __name__ == "__main__":
# Redirect the docs navbar Meerkat logo to the home page
# of the website.
file_find_replace(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "static", "docs"),
r'<a class="navbar-brand text-wrap" href="#">',
r'<a class="navbar-brand text-wrap" href="/">',
"*html",
)
|
meerkat-main
|
website/reroute.py
|
meerkat-main
|
tests/__init__.py
|
|
from functools import wraps
from itertools import product
from typing import Any, Dict, Sequence
import pytest
@wraps(pytest.mark.parametrize)
def product_parametrize(params: Dict[str, Sequence[Any]], **kwargs):
"""Wrapper around pytest.mark.parametrize with a simpler interface."""
argvalues, ids = zip(
*[
(v, ",".join(map(str, v))) if len(v) > 1 else (v[0], str(v[0]))
for v in product(*params.values())
]
)
params = {
"argnames": ",".join(params.keys()),
"argvalues": argvalues,
"ids": ids,
}
return pytest.mark.parametrize(
**params,
**kwargs,
)
|
meerkat-main
|
tests/utils.py
|
"""A collection of simple testbeds to build test cases."""
import os
from functools import wraps
from itertools import product
from typing import Sequence
import numpy as np
import pandas as pd
import pytest
import torch
from PIL import Image
from meerkat.columns.deferred.file import FileColumn
# from meerkat.columns.deferred.image import ImageColumn
from meerkat.columns.object.base import ObjectColumn
from meerkat.dataframe import DataFrame
class AbstractColumnTestBed:
DEFAULT_CONFIG = {}
@classmethod
def get_params(cls, config: dict = None, params: dict = None):
updated_config = cls.DEFAULT_CONFIG.copy()
if config is not None:
updated_config.update(config)
configs = list(
map(
dict,
product(*[[(k, v) for v in vs] for k, vs in updated_config.items()]),
)
)
if params is None:
return {
"argnames": "config",
"argvalues": configs,
"ids": [str(config) for config in configs],
}
else:
argvalues = list(product(configs, *params.values()))
return {
"argnames": "config," + ",".join(params.keys()),
"argvalues": argvalues,
"ids": [",".join(map(str, values)) for values in argvalues],
}
@classmethod
@wraps(pytest.mark.parametrize)
def parametrize(cls, config: dict = None, params: dict = None):
return pytest.mark.parametrize(**cls.get_params(config=config, params=params))
class MockDatapanel:
def __init__(
self,
length: int,
use_visible_rows: bool = False,
use_visible_columns: bool = False,
include_image_column: bool = False,
tmpdir: str = None,
):
batch = {
"a": np.arange(length),
"b": ObjectColumn(np.arange(length)),
"c": [{"a": 2}] * length,
"d": torch.arange(length),
# offset the index to test robustness to nonstandard indices
"e": pd.Series(np.arange(length), index=np.arange(1, 1 + length)),
# test multidimensional
"f": np.ones((length, 5)).astype(int),
"g": torch.ones(length, 5).to(int),
}
if include_image_column:
assert tmpdir is not None
self.img_col = MockImageColumn(length=length, tmpdir=tmpdir)
batch["img"] = self.img_col.col
self.df = DataFrame.from_batch(batch)
self.visible_rows = [0, 4, 6, 11] if use_visible_rows else np.arange(length)
if use_visible_rows:
for column in self.df.values():
column.visible_rows = self.visible_rows
self.visible_columns = ["a", "b"] if use_visible_columns else self.df.columns
if use_visible_columns:
self.df.visible_columns = self.visible_columns
class MockColumn:
def __init__(
self,
use_visible_rows: bool = False,
col_type: type = ObjectColumn,
dtype: str = "int",
):
self.array = np.arange(16, dtype=dtype)
self.col = col_type(self.array)
if use_visible_rows:
self.visible_rows = np.array([0, 4, 6, 11])
self.col.visible_rows = self.visible_rows
else:
self.visible_rows = np.arange(16)
class MockStrColumn:
def __init__(self, use_visible_rows: bool = False, col_type: type = ObjectColumn):
self.array = np.array([f"row_{idx}" for idx in range(16)])
self.col = col_type(self.array)
if use_visible_rows:
self.visible_rows = np.array([0, 4, 6, 11])
self.col.visible_rows = self.visible_rows
else:
self.visible_rows = np.arange(16)
class MockAnyColumn:
def __init__(
self,
data: Sequence,
use_visible_rows: bool = False,
col_type: type = ObjectColumn,
):
self.array = data
self.col = col_type(self.array)
if use_visible_rows:
self.visible_rows = [0, 4, 6, 11]
self.col.visible_rows = self.visible_rows
else:
self.visible_rows = np.arange(16)
class MockImageColumn:
def __init__(self, length: int, tmpdir: str):
"""[summary]
Args:
wrap_dataset (bool, optional): If `True`, create a
`meerkat.DataFrame`
,
otherwise create a
`meerkat.core.dataformats.vision.VisionDataPane`
Defaults to False.
"""
self.image_paths = []
self.image_arrays = []
self.images = []
for i in range(0, length):
self.image_paths.append(os.path.join(tmpdir, "{}.png".format(i)))
self.image_arrays.append((i * np.ones((10, 10, 3))).astype(np.uint8))
im = Image.fromarray(self.image_arrays[-1])
im.save(self.image_paths[-1])
self.col = FileColumn(self.image_paths)
|
meerkat-main
|
tests/testbeds.py
|
# TODO
# Test display oiptions
|
meerkat-main
|
tests/meerkat/test_display.py
|
meerkat-main
|
tests/meerkat/__init__.py
|
|
"""Unittests for Datasets."""
import os
import tempfile
import warnings
from functools import wraps
from itertools import product
from typing import Dict, Sequence, Set, Union
import huggingface_hub
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import torch
import ujson as json
import meerkat as mk
from meerkat.block.manager import BlockManager
from meerkat.columns.abstract import Column
from meerkat.columns.deferred.base import DeferredColumn
from meerkat.columns.object.base import ObjectColumn
from meerkat.columns.scalar import ScalarColumn
from meerkat.columns.scalar.arrow import ArrowScalarColumn
from meerkat.columns.scalar.pandas import PandasScalarColumn
from meerkat.columns.tensor.abstract import TensorColumn
from meerkat.columns.tensor.numpy import NumPyTensorColumn
from meerkat.columns.tensor.torch import TorchTensorColumn
from meerkat.dataframe import DataFrame
from meerkat.interactive.graph.operation import Operation
from meerkat.interactive.graph.reactivity import is_unmarked_context
from meerkat.interactive.node import NodeMixin
from meerkat.row import Row
from ..utils import product_parametrize
from .columns.deferred.test_image import ImageColumnTestBed
from .columns.scalar.test_arrow import ArrowScalarColumnTestBed
from .columns.scalar.test_pandas import PandasScalarColumnTestBed
from .columns.tensor.test_numpy import NumPyTensorColumnTestBed
from .columns.tensor.test_torch import TorchTensorColumnTestBed
class DataFrameTestBed:
DEFAULT_CONFIG = {
"consolidated": [True, False],
}
DEFAULT_COLUMN_CONFIGS = {
"np": {"testbed_class": NumPyTensorColumnTestBed, "n": 2},
"pd": {"testbed_class": PandasScalarColumnTestBed, "n": 2},
"torch": {"testbed_class": TorchTensorColumnTestBed, "n": 2},
"img": {"testbed_class": ImageColumnTestBed, "n": 2},
"arrow": {"testbed_class": ArrowScalarColumnTestBed, "n": 2},
}
def __init__(
self,
column_configs: Dict[str, Column],
consolidated: bool = True,
length: int = 4,
tmpdir: str = None,
):
self.column_testbeds = self._build_column_testbeds(
column_configs, length=length, tmpdir=tmpdir
)
self.columns = {
name: testbed.col for name, testbed in self.column_testbeds.items()
}
self.df = DataFrame.from_batch(self.columns)
if consolidated:
self.df.consolidate()
def _build_column_testbeds(
self, column_configs: Dict[str, Column], length: int, tmpdir: str
):
def _get_tmpdir(name):
path = os.path.join(tmpdir, name)
os.makedirs(path)
return path
column_testbeds = {}
for name, config in column_configs.items():
params = config["testbed_class"].get_params(**config.get("kwargs", {}))
column_testbeds.update(
{
f"{name}_{col_id}_{idx}": config["testbed_class"](
**col_config.values[0][
1
], # may need to change this for non parameter set
seed=idx,
length=length,
tmpdir=_get_tmpdir(f"{name}_{col_id}_{idx}"),
)
for idx in range(config["n"])
for col_config, col_id in zip(params["argvalues"], params["ids"])
}
)
return column_testbeds
@classmethod
def get_params(
cls,
config: dict = None,
column_configs: Sequence[Dict] = None,
params: dict = None,
):
# produce all combinations of the config
updated_config = cls.DEFAULT_CONFIG.copy()
if config is not None:
updated_config.update(config)
configs = list(
map(
dict,
product(*[[(k, v) for v in vs] for k, vs in updated_config.items()]),
)
)
# add the column_configs to every
if column_configs is None:
column_configs = cls.DEFAULT_COLUMN_CONFIGS.copy()
for config in configs:
config["column_configs"] = column_configs
if params is None:
return {
"argnames": "testbed",
"argvalues": configs,
"ids": [str(config) for config in configs],
}
else:
def _repr_value(value):
if isinstance(value, type):
return value.__name__
return str(value)
argvalues = list(product(configs, *params.values()))
return {
"argnames": "testbed," + ",".join(params.keys()),
"argvalues": argvalues,
"ids": [",".join(map(_repr_value, values)) for values in argvalues],
}
@classmethod
@wraps(pytest.mark.parametrize)
def parametrize(
cls,
config: dict = None,
column_configs: Sequence[Dict] = None,
params: dict = None,
):
return pytest.mark.parametrize(
**cls.get_params(
config=config, params=params, column_configs=column_configs
),
indirect=["testbed"],
)
@classmethod
@wraps(pytest.fixture)
def fixture(
cls, config: dict = None, column_configs: Sequence[Dict] = None, *args, **kwargs
):
params = cls.get_params(
config=config, column_configs=column_configs, *args, **kwargs
)
return pytest.fixture(
params=params["argvalues"], ids=params["ids"], *args, **kwargs
)
@DataFrameTestBed.fixture()
def testbed(request, tmpdir):
config = request.param
return DataFrameTestBed(**config, tmpdir=tmpdir)
def test_col_index_single(testbed):
df = testbed.df
# str index => single column ()
for name in testbed.columns:
index = name
col = df[index]
assert isinstance(col, Column)
# enforce that a single column index returns a coreference
assert col is df._data[index]
def test_col_index_multiple(testbed):
df = testbed.df
# str index => single column ()
columns = list(testbed.columns)
for excluded_column in columns:
index = [c for c in columns if c != excluded_column]
new_df = df[index]
assert isinstance(new_df, DataFrame)
# enforce that a column index multiple returns a view of the old dataframe
for col_name in index:
assert new_df._data[col_name] is df._data[col_name]
def test_row_index_single(testbed):
df = testbed.df
# int index => single row (dict)
index = 2
row = df[index]
assert isinstance(row, Row)
for key, value in row().items():
col_testbed = testbed.column_testbeds[key]
col_testbed.assert_data_equal(value, col_testbed.get_data(index))
@product_parametrize(
params={
"index_type": [
np.array,
pd.Series,
torch.Tensor,
NumPyTensorColumn,
ArrowScalarColumn,
PandasScalarColumn,
TorchTensorColumn,
list,
]
}
)
def test_row_index_multiple(testbed, index_type):
df = testbed.df
rows = np.arange(len(df))
def convert_to_index_type(index, dtype):
index = index_type(index)
if index_type == torch.Tensor:
return index.to(dtype)
return index
# slice index => multiple row selection (DataFrame)
# tuple or list index => multiple row selection (DataFrame)
# np.array indeex => multiple row selection (DataFrame)
for rows, indices in (
(df[1:3], rows[1:3]),
(df[[0, 2]], rows[[0, 2]]),
(
df[convert_to_index_type(np.array((0,)), dtype=int)],
rows[np.array((0,))],
),
(
df[convert_to_index_type(np.array((1, 1)), dtype=int)],
rows[np.array((1, 1))],
),
(
df[
convert_to_index_type(
np.array((True, False) * (len(df) // 2)), dtype=bool
)
],
rows[np.array((True, False) * (len(df) // 2))],
),
):
rows = rows()
assert isinstance(rows, DataFrame)
for key, value in rows.items():
col_testbed = testbed.column_testbeds[key]
data = col_testbed.get_data(indices)
col_testbed.assert_data_equal(value.data, data)
if value.__class__ == df[key].__class__:
# if the getitem returns a column of the same type, enforce that all
# attributes were cloned over appropriately. We don't want to check
# for columns that return columns of different type from getitem
# (e.g. LambdaColumn)
assert df[key]._clone(data=data).is_equal(value)
def test_row_lz_index_single(testbed):
df = testbed.df
# int index => single row (dict)
index = 2
row = df[index]
assert isinstance(row, dict)
for key, value in row.items():
col_testbed = testbed.column_testbeds[key]
col_testbed.assert_data_equal(
value, col_testbed.get_data(index, materialize=False)
)
@product_parametrize(
params={
"index_type": [
np.array,
pd.Series,
torch.Tensor,
TorchTensorColumn,
ScalarColumn,
TorchTensorColumn,
]
}
)
def test_row_lz_index_multiple(testbed, index_type):
df = testbed.df
rows = np.arange(len(df))
def convert_to_index_type(index, dtype):
index = index_type(index)
if index_type == torch.Tensor:
return index.to(dtype)
return index
# slice index => multiple row selection (DataFrame)
# tuple or list index => multiple row selection (DataFrame)
# np.array indeex => multiple row selection (DataFrame)
for rows, indices in (
(df[1:3], rows[1:3]),
(df[[0, 2]], rows[[0, 2]]),
(
df[convert_to_index_type(np.array((0,)), dtype=int)],
rows[np.array((0,))],
),
(
df[convert_to_index_type(np.array((1, 1)), dtype=int)],
rows[np.array((1, 1))],
),
(
df[
convert_to_index_type(
np.array((True, False) * (len(df) // 2)), dtype=bool
)
],
rows[np.array((True, False) * (len(df) // 2))],
),
):
assert isinstance(rows, DataFrame)
for key, value in rows.items():
col_testbed = testbed.column_testbeds[key]
data = col_testbed.get_data(indices, materialize=False)
col_testbed.assert_data_equal(value.data, data)
# if the getitem returns a column of the same type, enforce that all the
# attributes were cloned over appropriately. We don't want to check this
# for columns that return columns of different type from getitem
# (e.g. LambdaColumn)
if value.__class__ == df[key].__class__:
assert df[key]._clone(data=data).is_equal(value)
def test_invalid_indices(testbed):
df = testbed.df
index = ["nonexistent_column"]
missing_cols = set(index) - set(df.columns)
with pytest.raises(
KeyError, match=f"DataFrame does not have columns {missing_cols}"
):
df[index]
df = testbed.df
index = "nonexistent_column"
with pytest.raises(KeyError, match=f"Column `{index}` does not exist."):
df[index]
df = testbed.df
index = np.zeros((len(df), 10))
with pytest.raises(
ValueError, match="Index must have 1 axis, not {}".format(len(index.shape))
):
df[index]
df = testbed.df
index = torch.zeros((len(df), 10))
with pytest.raises(
ValueError, match="Index must have 1 axis, not {}".format(len(index.shape))
):
df[index]
df = testbed.df
index = {"a": 1}
with pytest.raises(TypeError, match="Invalid index type: {}".format(type(index))):
df[index]
def test_col_indexing_view_copy_semantics(testbed):
df = testbed.df
# Columns (1): Indexing a single column (i.e. with a str) returns the underlying
# AbstractColumn object directly. In the example below col1 and col2 are
# coreferences of the same column.
for name in df.columns:
df[name] is df[name]
# Columns (2): Indexing multiple columns (i.e. with Sequence[str]) returns a
# view of the DataFrame holding views to the columns in the original DataFrame.
# This means the AbstractColumn objects held in the new DataFrame are the same
# AbstractColumn objects held in the original DataFrame.
columns = list(testbed.columns)
for excluded_column in columns:
index = [c for c in columns if c != excluded_column]
view_df = df[index]
for name in view_df.columns:
df[name] is view_df[name]
df[name].data is df[name].data
def test_row_indexing_view_copy_semantics():
length = 16
batch = {
"a": NumPyTensorColumn(np.arange(length)),
"b": ObjectColumn(np.arange(length)),
"c": [{"a": 2}] * length,
"d": TorchTensorColumn(torch.arange(length)),
# offset the index to test robustness to nonstandard indices
"e": pd.Series(np.arange(length), index=np.arange(1, 1 + length)),
# test multidimensional
"f": np.ones((length, 5)).astype(int),
"g": torch.ones(length, 5).to(int),
}
df = DataFrame.from_batch(batch)
# slice index
df2 = df[:8]
col = "a"
assert isinstance(df2[col], NumPyTensorColumn)
assert df[col] is not df2[col]
assert df[col].data is not df2[col].data
assert df[col].data.base is df2[col].data.base
col = "d"
assert isinstance(df2[col], TorchTensorColumn)
assert df[col] is not df2[col]
assert df[col].data is not df2[col].data
# note `data_ptr` checks whether the tensors have the same memory address of the
# first element, so this would not work if the slice didn't start at 0
assert df[col].data.data_ptr() == df2[col].data.data_ptr()
col = "e"
assert isinstance(df2[col], ScalarColumn)
assert df[col] is not df2[col]
assert df[col].data is not df2[col].data
# TODO (sabri): Figure out pandas copying behavior, it's not clear how it works
# and this deserves a deeper investigation.
# assert df[col].data.values.base is df2[col].data.values.base
# slice index
df2 = df[np.array([0, 1, 2, 5])]
col = "a"
assert isinstance(df2[col], NumPyTensorColumn)
assert df[col] is not df2[col]
assert df[col].data is not df2[col].data
assert df[col].data.base is not df2[col].data.base
col = "d"
assert isinstance(df2[col], TorchTensorColumn)
assert df[col] is not df2[col]
assert df[col].data is not df2[col].data
# note `data_ptr` checks whether the tensors have the same memory address of the
# first element, so this would not work if the slice didn't start at 0
assert df[col].data.data_ptr() != df2[col].data.data_ptr()
col = "e"
assert isinstance(df2[col], ScalarColumn)
assert df[col] is not df2[col]
assert df[col].data is not df2[col].data
assert df[col].data.values.base is not df2[col].data.values.base
# @product_parametrize(params={"batched": [True, False], "materialize": [True, False]})
# def test_map_return_multiple(
# testbed: DataFrameTestBed, batched: bool, materialize: bool
# ):
# df = testbed.df
# map_specs = {
# name: col_testbed.get_map_spec(batched=batched,
# materialize=materialize, salt=1)
# for name, col_testbed in testbed.column_testbeds.items()
# }
# def func(x):
# out = {key: map_spec["fn"](x[key]) for key, map_spec in map_specs.items()}
# return out
# result = df.map(
# func,
# batch_size=4,
# is_batched_fn=batched,
# materialize=materialize,
# output_type={
# key: map_spec["output_type"]
# for key, map_spec in map_specs.items()
# if "output_type" in map_spec
# },
# )
# assert isinstance(result, DataFrame)
# for key, map_spec in map_specs.items():
# assert result[key].is_equal(map_spec["expected_result"])
# @DataFrameTestBed.parametrize(
# column_configs={"img": {"testbed_class": ImageColumnTestBed, "n": 2}},
# )
# @product_parametrize(
# params={"batched": [True, False], "materialize": [True, False]},
# )
# def test_map_return_multiple_img_only(
# testbed: DataFrameTestBed, batched: bool, materialize: bool
# ):
# test_map_return_multiple(testbed=testbed, batched=batched,
# materialize=materialize)
# @product_parametrize(
# params={
# "batched": [True, False],
# "materialize": [True, False],
# "num_workers": [0],
# "use_kwargs": [True, False],
# }
# )
# def test_map_return_single(
# testbed: DataFrameTestBed,
# batched: bool,
# materialize: bool,
# num_workers: int,
# use_kwargs: bool,
# ):
# df = testbed.df
# kwargs = {"kwarg": 2} if use_kwargs else {}
# name = list(testbed.column_testbeds.keys())[0]
# map_spec = testbed.column_testbeds[name].get_map_spec(
# batched=batched, materialize=materialize, salt=1, **kwargs
# )
# def func(x, kwarg=0):
# out = map_spec["fn"](x[name], k=kwarg)
# return out
# result = df.map(
# func,
# batch_size=4,
# is_batched_fn=batched,
# materialize=materialize,
# num_workers=num_workers,
# **kwargs,
# )
# assert isinstance(result, Column)
# # FIXME(Sabri): put this back after implementing map
# # assert result.is_equal(map_spec["expected_result"])
# @DataFrameTestBed.parametrize(config={"consolidated": [True]})
# def test_map_return_single_multi_worker(
# testbed: DataFrameTestBed,
# ):
# test_map_return_single(
# testbed, batched=True, materialize=True, num_workers=2, use_kwargs=False
# )
# @product_parametrize(params={"batched": [True, False], "materialize": [True, False]})
# def test_map_update_new(testbed: DataFrameTestBed, batched: bool, materialize: bool):
# df = testbed.df
# map_specs = {
# name: col_testbed.get_map_spec(batched=batched,
# materialize=materialize, salt=1)
# for name, col_testbed in testbed.column_testbeds.items()
# }
# def func(x):
# out = {
# f"{key}_new": map_spec["fn"](x[key])
# for key, map_spec in map_specs.items()
# }
# return out
# result = df.update(
# func,
# batch_size=4,
# is_batched_fn=batched,
# materialize=materialize,
# output_type={
# f"{key}_new": map_spec["output_type"]
# for key, map_spec in map_specs.items()
# if "output_type" in map_spec
# },
# )
# assert set(result.columns) == set(df.columns) |
# {f"{key}_new" for key in df.columns}
# assert isinstance(result, DataFrame)
# for key, map_spec in map_specs.items():
# assert result[f"{key}_new"].is_equal(map_spec["expected_result"])
# @product_parametrize(params={"batched": [True, False],
# "materialize": [True, False]})
# def test_map_update_existing(
# testbed: DataFrameTestBed, batched: bool, materialize: bool
# ):
# df = testbed.df
# map_specs = {
# name: col_testbed.get_map_spec(batched=batched,
# materialize=materialize, salt=1)
# for name, col_testbed in testbed.column_testbeds.items()
# }
# def func(x):
# out = {f"{key}": map_spec["fn"](x[key])
# for key, map_spec in map_specs.items()}
# return out
# result = df.update(
# func,
# batch_size=4,
# is_batched_fn=batched,
# materialize=materialize,
# output_type={
# key: map_spec["output_type"]
# for key, map_spec in map_specs.items()
# if "output_type" in map_spec
# },
# )
# assert set(result.columns) == set(df.columns)
# assert result.data is not df.data
# assert isinstance(result, DataFrame)
# for key, map_spec in map_specs.items():
# assert result[key].is_equal(map_spec["expected_result"])
# @product_parametrize(params={"batched": [True, False],
# "materialize": [True, False]})
# def test_filter(testbed: DataFrameTestBed, batched: bool, materialize: bool):
# df = testbed.df
# name = list(testbed.column_testbeds.keys())[0]
# filter_spec = testbed.column_testbeds[name].get_filter_spec(
# batched=batched, materialize=materialize, salt=1
# )
# def func(x):
# out = filter_spec["fn"](x[name])
# return out
# result = df.filter(
# func,
# batch_size=4,
# is_batched_fn=batched,
# materialize=materialize,
# )
# assert isinstance(result, DataFrame)
# result[name].is_equal(filter_spec["expected_result"])
def test_remove_column():
a = np.arange(16)
b = np.arange(16) * 2
df = DataFrame.from_batch({"a": a, "b": b})
assert "a" in df
df.remove_column("a")
assert "a" not in df
def test_overwrite_column():
# make sure we remove the column when overwriting it
a = NumPyTensorColumn(np.arange(16))
b = NumPyTensorColumn(np.arange(16) * 2)
df = DataFrame.from_batch({"a": a, "b": b})
assert "a" in df
assert df[["a", "b"]]["a"]._data.base is a._data.base
# testing removal from block manager, so important to use non-blockable type
df["a"] = ObjectColumn(range(16))
assert df[["a", "b"]]["a"]._data is not a
# check that there are no duplicate columns
assert set(df.columns) == set(["a", "b"])
def test_rename():
a = NumPyTensorColumn(np.arange(16))
b = NumPyTensorColumn(np.arange(16) * 2)
df = DataFrame.from_batch({"a": a, "b": b})
assert "a" in df
new_df = df.rename({"a": "A"})
# make sure "a" was renamed to "A"
assert np.equal(new_df["A"], a)
assert np.equal(new_df["b"], b)
# check that there are no duplicate columns
assert set(new_df.columns) == set(["A", "b"])
# make sure rename happened out of place
assert df["a"]._data is a._data
assert df["b"]._data is b._data
new_df = df.rename(str.upper)
# make sure "a" was renamed to "A" and "b" was renamed to "B"
assert np.equal(new_df["A"], a)
assert np.equal(new_df["B"], b)
# check that there are no duplicate columns
assert set(new_df.columns) == set(["A", "B"])
# make sure rename happened out of place
assert df["a"]._data is a._data
assert df["b"]._data is b._data
@product_parametrize(params={"move": [True, False]})
def test_io(testbed, tmp_path, move):
"""`map`, mixed dataframe, return multiple, `is_batched_fn=True`"""
df = testbed.df
path = os.path.join(tmp_path, "test")
df.write(path)
if move:
new_path = os.path.join(tmp_path, "new_test")
os.rename(path, new_path)
path = new_path
new_df = DataFrame.read(path)
assert isinstance(new_df, DataFrame)
assert df.columns == new_df.columns
assert len(new_df) == len(df)
for name in df.columns:
# check that the mmap status is preserved across df loads
assert isinstance(new_df[name], np.memmap) == isinstance(df[name], np.memmap)
if isinstance(new_df[name], DeferredColumn):
# the lambda function isn't exactly the same after reading
new_df[name].data.fn = df[name].data.fn
if not new_df[name].is_equal(df[name]):
assert False
@pytest.mark.parametrize(
"url_suffix",
["embeddings/imagenette_160px.mk.tar.gz"],
)
def test_read_meerkat_hf_dataframe(url_suffix):
"""Test reading meerkat dataframe hosted on huggingface."""
url = os.path.join(
"https://huggingface.co/datasets/meerkat-ml/meerkat-dataframes/resolve/main",
url_suffix,
)
df = DataFrame.read(url)
assert isinstance(df, DataFrame)
def test_read_meerkat_hf_dataframe_from_repo(tmp_path):
"""Test reading meerkat dataframe from a meerkat repository."""
repo = huggingface_hub.Repository(
local_dir=tmp_path / "repo",
clone_from="meerkat-ml/test-hf_url",
repo_type="dataset",
)
df = DataFrame.read(repo.local_dir)
assert isinstance(df, DataFrame)
def test_read_meerkat_hf_dataframe_from_url():
df = DataFrame.read("https://huggingface.co/datasets/meerkat-ml/test-hf_url")
assert isinstance(df, DataFrame)
def test_ipython_display_(testbed):
testbed.df._ipython_display_()
def test_append_columns():
length = 16
batch = {
"a": np.arange(length),
"b": ObjectColumn(np.arange(length)),
"c": [{"a": 2}] * length,
"d": torch.arange(length),
# offset the index to test robustness to nonstandard indices
"e": pd.Series(np.arange(length), index=np.arange(1, 1 + length)),
# test multidimensional
"f": np.ones((length, 5)).astype(int),
"g": torch.ones(length, 5).to(int),
}
df = DataFrame.from_batch(batch)
out = df.append(df, axis="rows")
assert len(out) == len(df) * 2
assert isinstance(out, DataFrame)
assert set(out.columns) == set(df.columns)
assert (out["a"].data == np.concatenate([np.arange(length)] * 2)).all()
assert out["b"].data == list(np.concatenate([np.arange(length)] * 2))
@product_parametrize(
params={
"shuffle": [True, False],
"batch_size": [1, 4],
"materialize": [True, False],
}
)
def test_batch(testbed, shuffle: bool, batch_size: int, materialize: bool):
df = testbed.df
df["idx"] = np.arange(len(df))
order = []
for batch in df.batch(batch_size=batch_size, shuffle=shuffle):
order.append(batch["idx"].data)
for name, col in batch.items():
if materialize:
col.is_equal(df[batch["idx"]][name])
else:
col.is_equal(df[batch["idx"]][name])
order = np.array(order).flatten()
if shuffle:
assert (order != np.arange(len(df))).any()
else:
assert (order == np.arange(len(df))).all()
def test_tail(testbed):
df = testbed.df
new_df = df.tail(n=2)
assert isinstance(new_df, DataFrame)
assert new_df.columns == df.columns
assert len(new_df) == 2
def test_head(testbed):
df = testbed.df
new_df = df.head(n=2)
assert isinstance(new_df, DataFrame)
assert new_df.columns == df.columns
assert len(new_df) == 2
class DataFrameSubclass(DataFrame):
"""Mock class to test that ops on subclass returns subclass."""
def __init__(self, *args, **kwargs):
self.name = "subclass"
super().__init__(*args, **kwargs)
def _state_keys(cls) -> Set[str]:
return super()._state_keys().union({"name"})
def test_subclass():
df1 = DataFrameSubclass({"a": np.arange(3), "b": ["may", "jun", "jul"]})
df2 = DataFrameSubclass({"c": np.arange(3), "d": ["2021", "2022", "2023"]})
assert isinstance(df1[np.asarray([0, 1])], DataFrameSubclass)
assert isinstance(df1[:2], DataFrameSubclass)
assert isinstance(df1[:2], DataFrameSubclass)
assert isinstance(df1.merge(df2, left_on="a", right_on="c"), DataFrameSubclass)
assert isinstance(df1.append(df1), DataFrameSubclass)
assert df1._state_keys() == set(["name", "_primary_key"])
assert df1._get_state() == {"name": "subclass", "_primary_key": "a"}
def test_from_csv():
temp_f = tempfile.NamedTemporaryFile()
data = {
"a": [3.4, 2.3, 1.2],
"b": ["alpha", "beta", "gamma"],
"c": ["the walk", "the talk", "blah"],
}
pd.DataFrame(data).to_csv(temp_f.name)
df_new = DataFrame.from_csv(temp_f.name)
assert df_new.columns == ["Unnamed: 0", "a", "b", "c"]
# Skip index column
for k in data:
if isinstance(df_new[k], ScalarColumn):
data_to_compare = df_new[k]._data.tolist()
else:
data_to_compare = df_new[k]._data
assert data_to_compare == data[k]
def test_from_huggingface(tmpdir: str):
# Returns a dataset dict
df = DataFrame.from_huggingface(
"hf-internal-testing/fixtures_ade20k",
cache_dir=tmpdir,
)["test"]
assert len(df) == 4
assert len(df.columns) == 2
# Returns a dataset
df = DataFrame.from_huggingface(
"hf-internal-testing/fixtures_ade20k",
cache_dir=tmpdir,
split="test",
)
assert len(df) == 4
assert len(df.columns) == 2
def test_from_jsonl():
# Build jsonl file
temp_f = tempfile.NamedTemporaryFile()
data = {
"a": [3.4, 2.3, 1.2],
"b": [[7, 9], [4], [1, 2]],
"c": ["the walk", "the talk", "blah"],
}
with open(temp_f.name, "w") as out_f:
for idx in range(3):
to_write = {k: data[k][idx] for k in list(data.keys())}
out_f.write(json.dumps(to_write) + "\n")
df_new = DataFrame.from_json(temp_f.name, lines=True)
assert df_new.columns == ["a", "b", "c"]
# Skip index column
for k in data:
if isinstance(df_new[k], TorchTensorColumn):
data_to_compare = df_new[k]._data.tolist()
else:
data_to_compare = df_new[k]._data
if k == "d":
assert data_to_compare == data[k]
elif k == "b":
assert list(data_to_compare) == data[k]
else:
assert (data_to_compare == np.array(data[k])).all()
temp_f.close()
def test_from_batch():
# Build a dataset from a batch
dataframe = DataFrame.from_batch(
{
"a": [1, 2, 3],
"b": [True, False, True],
"c": ["x", "y", "z"],
"d": [{"e": 2}, {"e": 3}, {"e": 4}],
"e": torch.ones(3),
"f": np.ones(3),
},
)
assert set(dataframe.columns) == {"a", "b", "c", "d", "e", "f"}
assert len(dataframe) == 3
def test_from_arrow():
table = pa.Table.from_arrays(
[
pa.array(np.arange(0, 100)),
pa.array(np.arange(0, 100).astype(float)),
pa.array(map(str, np.arange(0, 100))),
],
names=["a", "b", "c"],
)
df = DataFrame.from_arrow(table)
# check that the underlying block is the same object as the pyarrow table
df["a"]._block is table
df["a"]._block is df["b"]._block
df["a"]._block is df["c"]._block
for col in ["a", "b", "c"]:
assert isinstance(df[col], ArrowScalarColumn)
assert pa.compute.equal(df[col].data, table[col])
def test_to_pandas_allow_objects():
import pandas as pd
length = 16
batch = {
"a": np.arange(length),
"b": ObjectColumn(np.arange(length)),
"c": [{"a": 2}] * length,
"d": torch.arange(length),
# offset the index to test robustness to nonstandard indices
"e": pd.Series(np.arange(length), index=np.arange(1, 1 + length)),
# test multidimensional
"f": np.ones((length, 5)).astype(int),
"g": torch.ones(length, 5).to(int),
}
df = DataFrame(batch)
df_pd = df.to_pandas(allow_objects=True)
assert isinstance(df_pd, pd.DataFrame)
assert list(df.columns) == list(df_pd.columns)
assert len(df) == len(df_pd)
assert (df_pd["a"].values == df["a"].data).all()
assert list(df["b"]) == list(df["b"].data)
assert isinstance(df_pd["c"][0], dict)
assert (df_pd["d"].values == df["d"].numpy()).all()
assert (df_pd["e"].values == df["e"].values).all()
def test_to_pandas_disallow_objects(testbed):
df = testbed.df
pdf = df.to_pandas(allow_objects=False)
for name, col in df.items():
if isinstance(col, ObjectColumn) or isinstance(col, DeferredColumn):
assert name not in pdf
elif isinstance(col, TensorColumn) and len(col.shape) > 1:
assert name not in pdf
else:
assert name in pdf
def test_to_arrow(testbed):
df = testbed.df
adf = df.to_arrow()
for name, col in df.items():
if isinstance(col, ObjectColumn) or isinstance(col, DeferredColumn):
assert name not in adf.column_names
elif isinstance(col, TensorColumn) and len(col.shape) > 1:
assert name not in adf.column_names
else:
assert name in adf.column_names
assert (adf[name].to_numpy() == col.to_numpy()).all()
@product_parametrize(params={"engine": ["arrow", "pandas"]})
def test_csv_io(testbed, tmpdir, engine):
df = testbed.df
filepath = os.path.join(tmpdir, "test.csv")
with pytest.warns():
df.to_csv(filepath, engine=engine)
df2 = DataFrame.from_csv(filepath)
for name, col in df.items():
if isinstance(col, ObjectColumn) or isinstance(col, DeferredColumn):
assert name not in df2
elif isinstance(col, TensorColumn) and len(col.shape) > 1:
assert name not in df2
else:
# note we do not check equality because writing to CSV can lead to
# casting issues
assert name in df2
@product_parametrize(params={"engine": ["arrow", "pandas"]})
def test_feather_io(testbed, tmpdir, engine):
df = testbed.df
filepath = os.path.join(tmpdir, "test.feather")
with pytest.warns():
df.to_feather(filepath, engine=engine)
df2 = DataFrame.from_feather(filepath)
for name, col in df.items():
if isinstance(col, ObjectColumn) or isinstance(col, DeferredColumn):
assert name not in df2
elif isinstance(col, TensorColumn) and len(col.shape) > 1:
assert name not in df2
else:
assert name in df2
assert (df2[name].to_numpy() == col.to_numpy()).all()
@product_parametrize(params={"engine": ["arrow", "pandas"]})
def test_parquet_io(testbed, tmpdir, engine):
df = testbed.df
filepath = os.path.join(tmpdir, "test.parquet")
with pytest.warns():
df.to_parquet(filepath, engine=engine)
df2 = DataFrame.from_parquet(filepath)
for name, col in df.items():
if isinstance(col, ObjectColumn) or isinstance(col, DeferredColumn):
assert name not in df2
elif isinstance(col, TensorColumn) and len(col.shape) > 1:
assert name not in df2
else:
assert name in df2
assert (df2[name].to_numpy() == col.to_numpy()).all()
def test_json_io(testbed, tmpdir):
df = testbed.df
filepath = os.path.join(tmpdir, "test.json")
with pytest.warns():
df.to_json(filepath)
df2 = DataFrame.from_json(filepath, dtype=False)
for name, col in df.items():
if isinstance(col, ObjectColumn) or isinstance(col, DeferredColumn):
assert name not in df2
elif isinstance(col, TensorColumn) and len(col.shape) > 1:
assert name not in df2
else:
assert name in df2
if col.to_numpy().dtype == "object":
assert np.all(df2[name].to_numpy() == col.to_numpy())
else:
assert np.allclose(df2[name].to_numpy(), col.to_numpy())
def test_constructor():
length = 16
# from dictionary
data = {
"a": np.arange(length),
"b": ObjectColumn(np.arange(length)),
}
df = DataFrame(data=data)
assert len(df) == length
assert df["a"].is_equal(ScalarColumn(np.arange(length)))
# from BlockManager
mgr = BlockManager.from_dict(data)
df = DataFrame(data=mgr)
assert len(df) == length
assert df["a"].is_equal(ScalarColumn(np.arange(length)))
assert df.columns == ["a", "b"]
# from list of dictionaries
data = [{"a": idx, "b": str(idx), "c": {"test": idx}} for idx in range(length)]
df = DataFrame(data=data)
assert len(df) == length
assert df["a"].is_equal(ScalarColumn(np.arange(length)))
assert isinstance(df["c"], ObjectColumn)
assert df.columns == ["a", "b", "c"]
# from list of dictionaries, missing values
data = [
{"a": idx, "b": str(idx)}
if (idx % 2 == 0)
else {"a": idx, "b": str(idx), "c": idx}
for idx in range(length)
]
df = DataFrame(data=data)
assert len(df) == length
assert df["a"].is_equal(ScalarColumn(np.arange(length)))
# need to fillna because nan comparisons return false in pandas
assert (
df["c"]
.fillna(0)
.is_equal(ScalarColumn([0 if idx % 2 == 0 else idx for idx in range(length)]))
)
assert df.columns == ["a", "b", "c"]
# from nothing
df = DataFrame()
assert len(df) == 0
def test_constructor_w_invalid_data():
with pytest.raises(
ValueError,
match=f"Cannot set DataFrame `data` to object of type {type(5)}.",
):
DataFrame(data=5)
def test_constructor_w_invalid_sequence():
data = list(range(4))
with pytest.raises(
ValueError,
match="Cannot set DataFrame `data` to a Sequence containing object of "
f" type {type(data[0])}. Must be a Sequence of Mapping.",
):
DataFrame(data=data)
def test_constructor_w_unequal_lengths():
length = 16
data = {
"a": np.arange(length),
"b": ObjectColumn(np.arange(length - 1)),
}
with pytest.raises(
ValueError,
match=(
f"Cannot add column 'b' with length {length - 1} to `BlockManager` "
f" with length {length} columns."
),
):
DataFrame(data=data)
def test_shape():
length = 16
data = {
"a": np.arange(length),
"b": ObjectColumn(np.arange(length)),
}
df = DataFrame(data)
assert df.shape == (16, 2)
def test_str(testbed):
result = str(testbed.df)
assert isinstance(result, str)
def test_repr(testbed):
result = repr(testbed.df)
assert isinstance(result, str)
@product_parametrize(params={"max_rows": [6, 16, 20]})
def test_repr_pandas(testbed, max_rows: int):
mk.config.display.max_rows = max_rows
df, _ = testbed.df._repr_pandas_()
assert isinstance(df, pd.DataFrame)
assert len(df) == min(len(df), max_rows + 1)
@product_parametrize(
params={"column_type": [PandasScalarColumn, ArrowScalarColumn, NumPyTensorColumn]}
)
def test_loc_single(testbed, column_type: type):
df = testbed.df
# int index => single row (dict)
index = 2
df["pk"] = column_type((np.arange(len(df)) + 10).astype(str))
df = df.set_primary_key("pk")
row = df.loc[str(index + 10)]
assert isinstance(row, dict)
for key, value in row.items():
if key == "pk":
continue
col_testbed = testbed.column_testbeds[key]
col_testbed.assert_data_equal(
value, col_testbed.get_data(index, materialize=False)
)
@product_parametrize(
params={"column_type": [ScalarColumn, ArrowScalarColumn, NumPyTensorColumn]}
)
def test_loc_multiple(testbed, column_type):
df = testbed.df
# int index => single row (dict)
indices = np.array([2, 3])
df["pk"] = column_type((np.arange(len(df)) + 10).astype(str))
df = df.set_primary_key("pk")
loc_index = (indices + 10).astype(str)
new_df = df.loc[loc_index]
assert isinstance(new_df, DataFrame)
for key, value in new_df.items():
if key == "pk":
continue
col_testbed = testbed.column_testbeds[key]
data = col_testbed.get_data(indices, materialize=False)
col_testbed.assert_data_equal(value.data, data)
def test_loc_missing():
df = DataFrame({"x": TorchTensorColumn([1, 2, 3]), "y": ScalarColumn([4, 5, 6])})
df = df.set_primary_key("y")
with pytest.raises(KeyError):
df.loc[1, 2, 4]
def test_primary_key_persistence():
df = DataFrame({"a": ScalarColumn(np.arange(16)), "b": ScalarColumn(np.arange(16))})
df = df.set_primary_key("a")
df = df[:4]
df._primary_key == "a"
assert (df.primary_key == ScalarColumn(np.arange(4))).all()
def test_invalid_primary_key():
# multidimenmsional
df = DataFrame({"a": TorchTensorColumn([[1, 2, 3]])})
with pytest.raises(ValueError):
df.set_primary_key("a")
def test_primary_key_reset():
df = DataFrame({"a": ScalarColumn(np.arange(16)), "b": ScalarColumn(np.arange(16))})
df = df.set_primary_key("a")
df["a"] = ScalarColumn(np.arange(16))
assert df._primary_key is None
def test_check_primary_key_reset():
df = DataFrame({"a": ScalarColumn(np.arange(16)), "b": ScalarColumn(np.arange(16))})
df = df.set_primary_key("a")
assert df.append(df).primary_key is None
def test_check_primary_key_no_reset():
df = DataFrame({"a": ScalarColumn(np.arange(16)), "b": ScalarColumn(np.arange(16))})
df = df.set_primary_key("a")
df2 = DataFrame(
{"a": ScalarColumn(np.arange(16, 32)), "b": ScalarColumn(np.arange(16))}
)
assert df.append(df2).primary_key is not None
@pytest.mark.parametrize("x", [0, 0.0, "hello world", np.nan, np.inf])
def test_scalar_setitem(x):
df = DataFrame({"a": ScalarColumn(np.arange(16))})
df["extra_column"] = x
assert len(df["extra_column"]) == len(df)
if isinstance(x, str):
assert isinstance(df["extra_column"], ScalarColumn)
else:
assert isinstance(df["extra_column"], ScalarColumn)
if not isinstance(x, str) and (np.isnan(x) or np.isinf(x)):
if np.isnan(x):
assert np.all(np.isnan(df["extra_column"]))
elif np.isinf(x):
assert np.all(np.isinf(df["extra_column"]))
else:
assert all(df["extra_column"] == x)
@mk.endpoint()
def _set_store_or_df(store: Union[mk.Store, mk.DataFrame], value):
store.set(value)
@pytest.mark.parametrize(
"name",
[
# Instance variables.
"_data",
"_primary_key",
# Properties.
"gui",
"data",
"columns",
"primary_key",
"primary_key_name",
"nrows",
"ncols",
"shape",
],
)
def test_reactivity_attributes_and_properties(name: str):
"""Test that attributes and properties of the dataframe are reactive."""
class Foo:
pass
df = DataFrame({"a": np.arange(10), "b": torch.arange(10), "c": [Foo()] * 10})
df = df.mark()
# These should return an object that can be attached to a node.
# i.e. we should be able to put the output on the graph.
out = getattr(df, name)
if name.startswith("_"):
# Private attributes should not be reactive.
assert not isinstance(out, mk.Store)
assert df.inode is None
return
assert isinstance(out, NodeMixin)
assert df.inode.has_trigger_children()
assert len(df.inode.trigger_children) == 1
# Check the operation name
op = df.inode.trigger_children[0].obj
assert isinstance(op, Operation)
assert op.fn.__name__ == name
# No reactivity.
# We want to check that the output is not a Store.
# it can be other NodeMixIns, because these are classes built into meerkat.
with mk.unmarked():
out = getattr(df, name)
assert not isinstance(out, mk.Store)
def test_reactivity_len():
df = DataFrame({"a": np.arange(10), "b": torch.arange(10)})
# Warning should be raised when is_reactive()==True and df is reactive.
df = df.mark()
assert not is_unmarked_context() and df.marked
with pytest.warns(UserWarning):
length = len(df)
assert length == 10
assert not isinstance(length, mk.Store)
# Warnings should not be raised if we are not in a reactive context.
df = df.mark()
with mk.unmarked():
assert is_unmarked_context() and df.marked
with warnings.catch_warnings():
warnings.simplefilter("error")
length = len(df)
assert length == 10
assert not isinstance(length, mk.Store)
# Warnings should not be raised when the df is not reactive.
df = df.unmark()
assert not is_unmarked_context() and not df.marked
with warnings.catch_warnings():
warnings.simplefilter("error")
length = len(df)
assert length == 10
assert not isinstance(length, mk.Store)
# Calling df.__len__ directly should also raise the warning.
# __len__ will always return a primitive, even when called in
# the getattr style: df.__len__().
with warnings.catch_warnings():
warnings.simplefilter("error")
length = df.__len__()
assert length == 10
assert not isinstance(length, mk.Store)
# mk.len should follow standard reactive protocols.
df = df.mark()
assert not is_unmarked_context() and df.marked
length = mk.len(df)
assert isinstance(length, mk.Store)
assert length == 10
assert length.inode is not None
df = df.unmark()
assert not is_unmarked_context() and not df.marked
length = mk.len(df)
assert not isinstance(length, mk.Store)
assert length == 10
def test_reactivity_contains():
df = DataFrame({"a": np.arange(10), "b": torch.arange(10)})
df = df.mark()
item = "a"
# Warning should be raised when is_reactive()==True and df is reactive.
assert not is_unmarked_context() and df.marked
with pytest.warns(UserWarning):
out = item in df
assert not isinstance(out, mk.Store)
assert out
# Warnings should not be raised if we are not in a reactive context.
df = df.mark()
with mk.unmarked():
assert is_unmarked_context() and df.marked
with warnings.catch_warnings():
warnings.simplefilter("error")
out = item in df
assert not isinstance(out, mk.Store)
assert out
# Warnings should not be raised when the df is not reactive.
df = df.unmark()
assert not is_unmarked_context() and not df.marked
with warnings.catch_warnings():
warnings.simplefilter("error")
out = item in df
assert not isinstance(out, mk.Store)
assert out
# Calling df.__contains__ directly should also raise the warning.
# __contains__ will always return a primitive, even when called in
# the getattr style: df.__contains__().
df = df.mark()
with warnings.catch_warnings():
warnings.simplefilter("error")
length = df.__contains__(item)
assert not isinstance(length, mk.Store)
assert out
def test_reactivity_contains_alternate():
"""Test the alternate way determining if an item is in a dataframe.
i.e. df.contains(item)
"""
df = DataFrame({"a": np.arange(10), "b": torch.arange(10)})
item = mk.Store("a")
df = df.mark()
with warnings.catch_warnings():
warnings.simplefilter("error")
a_contains = df.contains(item)
assert isinstance(a_contains, mk.Store)
assert a_contains.inode is not None
inode = a_contains.inode
assert a_contains
assert isinstance(a_contains, mk.Store)
assert isinstance(a_contains, bool)
assert df.inode.has_trigger_children()
assert len(df.inode.trigger_children) == 1
op = df.inode.trigger_children[0].obj
assert isinstance(op, Operation)
assert op.fn.__name__ == "contains"
assert len(op.inode.trigger_children) == 1
assert id(op.inode.trigger_children[0]) == id(a_contains.inode)
# Change the store - "c" is not in the df.
_set_store_or_df(item, "c")
assert not inode.obj
def test_reactivity_size():
df = DataFrame({"a": np.arange(10), "b": torch.arange(10)})
df = df.mark()
shape = df.size()
inode = shape.inode
assert shape == (10, 2)
assert isinstance(shape, mk.Store)
assert isinstance(shape, tuple)
assert df.inode.has_trigger_children()
assert len(df.inode.trigger_children) == 1
op = df.inode.trigger_children[0].obj
assert isinstance(op, Operation)
assert op.fn.__name__ == "size"
assert len(op.inode.trigger_children) == 1
assert id(op.inode.trigger_children[0]) == id(shape.inode)
# Change the dataframe
_set_store_or_df(df, DataFrame({"a": np.arange(5)}))
assert inode.obj == (5, 1)
@pytest.mark.parametrize("axis", ["rows", "columns"])
def test_reactivity_append(axis: str):
df = DataFrame({"a": np.arange(10), "b": torch.arange(10)})
df = df.mark()
if axis == "rows":
df2 = DataFrame({"a": np.arange(10), "b": torch.arange(10)})
else:
df2 = DataFrame({"c": np.arange(10), "d": torch.arange(10)})
df_append = df.append(df2, axis=axis)
inode = df_append.inode
assert df.inode.has_trigger_children()
assert len(df.inode.trigger_children) == 1
op = df.inode.trigger_children[0].obj
assert isinstance(op, Operation)
assert op.fn.__name__ == "append"
assert len(op.inode.trigger_children) == 1
assert id(op.inode.trigger_children[0]) == id(df_append.inode)
# Change the input dataframe
if axis == "rows":
_set_store_or_df(df, DataFrame({"a": np.arange(5), "b": torch.arange(5)}))
assert inode.obj.shape == (15, 2)
else:
_set_store_or_df(
df, DataFrame({"alpha": np.arange(10), "beta": torch.arange(10)})
)
assert inode.obj.columns == ["alpha", "beta", "c", "d"]
@pytest.mark.parametrize("op_name", ["head", "tail"])
def test_reactivity_head_tail(op_name: str):
df = DataFrame({"a": np.arange(10), "b": torch.arange(10)})
df = df.mark()
df_slice = getattr(df, op_name)()
assert df.inode.has_trigger_children()
assert len(df.inode.trigger_children) == 1
op = df.inode.trigger_children[0].obj
assert isinstance(op, Operation)
assert op.fn.__name__ == op_name
assert len(op.inode.trigger_children) == 1
assert id(op.inode.trigger_children[0]) == id(df_slice.inode)
def test_reactivity_getitem_multiple_columns():
df = DataFrame(
{"a": np.arange(10), "b": torch.arange(20, 30), "c": torch.arange(40, 50)}
)
df = df.mark()
store = mk.mark(["a", "b"])
df_col = df[store]
inode = df_col.inode
assert isinstance(df_col, DataFrame)
assert df.inode.has_trigger_children()
assert len(df.inode.trigger_children) == 1
op = df.inode.trigger_children[0].obj
assert isinstance(op, Operation)
assert op.fn.__name__ == "__getitem__"
assert len(op.inode.trigger_children) == 1
assert id(op.inode.trigger_children[0]) == id(df_col.inode)
assert len(store.inode.trigger_children) == 1
assert id(store.inode.trigger_children[0]) == id(op.inode)
# Change the store
_set_store_or_df(store, ["c"])
assert np.all(inode.obj["c"].to_numpy() == df["c"].to_numpy())
# Change the dataframe
_set_store_or_df(df, DataFrame({"c": np.arange(5)}))
assert np.all(inode.obj["c"].to_numpy() == np.arange(5))
def test_reactivity_getitem_slicing():
df = DataFrame({"a": np.arange(10), "b": torch.arange(20, 30)})
df.mark()
store = mk.Store(slice(0, 5))
df_slice = df[store]
inode = df_slice.inode
# Change the store
_set_store_or_df(store, slice(5, 10))
with mk.unmarked():
assert np.all(inode.obj["a"] == np.arange(5, 10))
assert torch.all(inode.obj["b"] == torch.arange(25, 30))
# Change the dataframe
_set_store_or_df(df, DataFrame({"a": np.arange(5)}))
assert len(inode.obj) == 0
_set_store_or_df(store, slice(0, 5))
with mk.unmarked():
assert np.all(inode.obj["a"] == np.arange(5))
def test_reactivity_getitem_single_column():
df = DataFrame(
{"a": np.arange(10), "b": torch.arange(20, 30), "c": torch.arange(40, 50)}
)
df.mark()
store = mk.Store("b")
df_col = df[store]
df_inode = df.inode
df_col_inode = df_col.inode
assert df_inode is not None
assert df_col_inode is not None
assert isinstance(df_col, mk.Column)
assert df.inode.has_trigger_children()
assert len(df.inode.trigger_children) == 1
op = df.inode.trigger_children[0].obj
assert isinstance(op, Operation)
assert op.fn.__name__ == "__getitem__"
assert len(op.inode.trigger_children) == 1
assert id(op.inode.trigger_children[0]) == id(df_col.inode)
assert len(store.inode.trigger_children) == 1
assert id(store.inode.trigger_children[0]) == id(op.inode)
# These two sets should happen simulateaneously.
# TODO: Add a utility to have multiple endpoints occur simultaneously.
_set_store_or_df(store, "c")
_set_store_or_df(df, DataFrame({"c": np.arange(5)}))
assert np.all(df_inode.obj["c"] == np.arange(5))
def test_reactivity_merge():
df1 = DataFrame({"a": np.arange(10), "b": torch.arange(20, 30)}).mark()
df2 = DataFrame({"a": np.arange(10), "d": torch.arange(20, 30)}).mark()
on = mk.Store("a")
df_merge = df1.merge(df2, on=on)
inode = df_merge.inode
assert np.all(
df_merge.to_pandas() == df1.to_pandas().merge(df2.to_pandas(), on="a")
)
assert len(df1.inode.trigger_children) == 1
assert len(df2.inode.trigger_children) == 1
assert df1.inode.trigger_children[0].obj.fn.__name__ == "merge"
assert df2.inode.trigger_children[0].obj.fn.__name__ == "merge"
new_df = df1.copy()
new_df["a"][-1] = 20
_set_store_or_df(df1, new_df)
assert len(inode.obj) == 9
def test_reactivity_sort():
a, b = np.arange(10), np.arange(20, 30)
np.random.shuffle(a)
np.random.shuffle(b)
df = DataFrame({"a": a, "b": b}).mark()
store = mk.Store("a")
df_sort = df.sort(by=store)
inode = df_sort.inode
assert np.all(inode.obj["a"] == np.arange(10))
_set_store_or_df(store, "b")
assert np.all(inode.obj["b"] == np.arange(20, 30))
def test_reactivity_sample():
df = DataFrame({"a": np.arange(100)}).mark()
frac = mk.Store(0.1)
df_sample = df.sample(frac=frac)
inode = df_sample.inode
assert len(inode.obj) == 10
# update fraction
_set_store_or_df(frac, 0.2)
assert len(inode.obj) == 20
# update dataframe
_set_store_or_df(df, DataFrame({"a": np.arange(10)}))
assert len(inode.obj) == 2
def test_reactivity_rename():
df = DataFrame({"a": np.arange(10), "b": torch.arange(20, 30)}).mark()
store = mk.Store({"a": "c"})
df_rename = df.rename(mapper=store)
inode = df_rename.inode
assert list(inode.obj.keys()) == ["c", "b"]
# rename is an out-of-place method.
# renaming occurs on the source dataframe, which has columns "a" and "b".
# Calling `rename` with "b" -> "d" will operate on the source dataframe.
# Thus column "a" should still exist.
_set_store_or_df(store, {"b": "d"})
assert list(inode.obj.keys()) == ["a", "d"]
def test_reactivity_drop():
df = DataFrame({"a": np.arange(10), "b": torch.arange(20, 30)}).mark()
store = mk.Store(["a"])
df_drop = df.drop(columns=store)
inode = df_drop.inode
assert list(inode.obj.keys()) == ["b"]
# drop is an out-of-place method.
# Thus, column "a" will still exist when `drop` is rerun with argument "b".
_set_store_or_df(store, ["b"])
assert list(inode.obj.keys()) == ["a"]
def test_reactivity_keys():
df = DataFrame({"a": np.arange(10), "b": torch.arange(20, 30)})
df = df.mark()
keys = df.keys()
assert isinstance(keys, mk.Store)
inode = keys.inode
assert keys.value == ["a", "b"]
_set_store_or_df(df, DataFrame({"c": np.arange(10)}))
assert list(inode.obj) == ["c"]
|
meerkat-main
|
tests/meerkat/test_dataframe.py
|
meerkat-main
|
tests/meerkat/mixins/__init__.py
|
|
import os
import sys
import numpy as np
import meerkat as mk
def test_meerkat_loader(tmpdir):
col = mk.NumPyTensorColumn(np.arange(10))
path = os.path.join(tmpdir, "col.mk")
col.write(path)
module = sys.modules.pop("meerkat.columns.tensor.numpy")
mk.Column.read(path)
sys.modules["meerkat.columns.tensor.numpy"] = module
|
meerkat-main
|
tests/meerkat/mixins/test_io.py
|
import pytest
import meerkat as mk
from meerkat.mixins.reactifiable import ReactifiableMixin
class Foo(ReactifiableMixin):
def __init__(self, x):
self.x = x
@mk.reactive()
def add(self, y):
return self.x + y
# Test that the decorator can be used with or without parentheses.
@mk.reactive
def add_dec_no_parenthesis(self, y):
return self.x + y
# Test that react works for magic methods, when accessing them with
# their shortcuts (e.g. foo[0]).
@mk.reactive()
def __getitem__(self, idx):
return self.x
# Test that properties behave like normal attribute accessing.
# When the instance is marked, accessing the property should be reactive.
@property
def my_x(self):
return self.x
# Python requires __len__ to return an int.
# Because this method is not wrapped with @unmarked, it will
# return self.x, which can be a Store when `not is_unmarked_context()
# and self.marked`. This will raise an error.
def __len__(self):
return self.x
@mk.unmarked()
def add_no_react(self, y):
return self.x + y
# Test if a method is not decorated, then
# it should automatically be wrapped with unmarked.
def add_auto_react(self, y):
return self.x + y
@classmethod
@mk.reactive()
def name(cls):
return cls.__name__
@staticmethod
@mk.reactive()
def static():
return 1
# TODO: Add tests for nested react/noreact funcs.
def test_marking():
foo = Foo(1)
assert not foo.marked
foo = foo.mark()
assert foo.marked
def test_reactive_setter_inplace():
"""Setting the .reactive property should be in-place."""
foo = Foo(1)
foo2 = foo.mark()
foo3 = foo2.mark()
foo4 = foo2.unmark()
assert foo is foo2
assert foo is foo3
assert foo is foo4
@pytest.mark.parametrize("react", [True, False])
@pytest.mark.parametrize("attr", ["x", "my_x"])
def test_attributes(react: bool, attr: str):
foo = Foo(1)
if react:
foo = foo.mark()
x = getattr(foo, attr)
assert x == 1
assert (not react) ^ isinstance(x, mk.Store)
@pytest.mark.parametrize("react", [True, False])
@pytest.mark.parametrize("unmark_store", [True, False])
@pytest.mark.parametrize("method", ["add", "add_dec_no_parenthesis"])
def test_instance_method_decorated(react: bool, unmark_store: bool, method: str):
y = mk.Store(4)
foo = Foo(1)
if react:
foo = foo.mark()
if unmark_store:
y = y.unmark()
is_one_arg_marked = foo.marked or y.marked
fn = getattr(foo, method)
x = fn(y)
assert x == 5
assert isinstance(x, int)
assert (not is_one_arg_marked) ^ isinstance(x, mk.Store)
if y.marked:
assert len(y.inode.trigger_children) == 1
assert y.inode.trigger_children[0].obj.fn.__name__ == method
if not is_one_arg_marked:
# When none of the inputs are marked when the function is run,
# no inodes should be created.
assert y.inode is None
else:
# If any of the inputs were marked when the function was run,
# inodes should be created for all arguments.
assert y.inode is not None
if not y.marked:
assert len(y.inode.trigger_children) == 0
@pytest.mark.parametrize("react", [True, False])
@pytest.mark.parametrize("unmark_store", [True, False])
def test_magic_method_react_shortcut_getitem_accessor(react: bool, unmark_store: bool):
y = mk.Store(1)
foo = Foo(1)
if react:
foo = foo.mark()
if unmark_store:
y = y.unmark()
is_one_arg_marked = foo.marked or y.marked
x = foo[y]
assert x == 1
assert isinstance(x, int)
assert (not is_one_arg_marked) ^ isinstance(x, mk.Store)
if y.marked:
assert len(y.inode.trigger_children) == 1
assert y.inode.trigger_children[0].obj.fn.__name__ == "__getitem__"
elif foo.marked:
# foo is marked by y is not.
assert y.inode is not None
assert len(y.inode.trigger_children) == 0
@pytest.mark.parametrize("react", [True, False])
def test_magic_method_not_decorated(react: bool):
"""Magic methods that are not decorated should never be reactive."""
foo = Foo(1)
if react:
foo = foo.mark()
# We should see an error because we do not explicitly set unmarked()
# on __len__. This means __len__ can return a store.
with pytest.raises(TypeError):
len(foo)
else:
x = len(foo)
assert x == 1
assert not isinstance(x, mk.Store)
def test_instance_method_not_decorated():
"""Instance methods that are not decorated should, by default, be
unmarked."""
foo = Foo(1)
# Object is reactive.
foo = foo.mark()
x = foo.add_auto_react(1)
assert x == 2
assert not isinstance(x, mk.Store)
# Object is not reactive.
foo = foo.unmark()
x = foo.add_auto_react(1)
assert x == 2
assert not isinstance(x, mk.Store)
# def test_class_method():
# """
# Class methods that are decorated with @reactive should always
# be reactive by default. This is because the class does not have
# a react flag that can be used to determine whether the method
# should be reactive or not.
# """
# name = Foo.name()
# assert isinstance(name, mk.Store)
# assert name == "Foo"
# def test_static_method():
# """
# Static methods that are decorated with @reactive should always
# be reactive by default. This is because the class does not have
# a react flag that can be used to determine whether the method
# should be reactive or not.
# """
# static = Foo.static()
# assert isinstance(static, mk.Store)
# assert static == 1
|
meerkat-main
|
tests/meerkat/mixins/test_reactifiable.py
|
from typing import List
import meerkat as mk
def test_versions():
versions = mk.datasets.versions("imagenette")
assert isinstance(versions, List)
assert len(versions) > 0
def test_repr():
out = repr(mk.datasets)
isinstance(out, str)
|
meerkat-main
|
tests/meerkat/datasets/test_registry.py
|
meerkat-main
|
tests/meerkat/interactive/__init__.py
|
|
import pydantic
import pytest
import meerkat as mk
from meerkat.interactive.event import EventInterface
def test_component_creation_fails_with_bad_endpoint():
"""Raise an error if an Endpoint with a mismatched function signature is
passed to a Component."""
class TestEventInterface(EventInterface):
arg_1: int
arg_2: int
class Test(mk.gui.Component):
on_click: mk.gui.Endpoint[TestEventInterface]
@mk.endpoint()
def test_endpoint_1(arg_1, arg_2, arg_3):
"""Extra argument."""
pass
@mk.endpoint()
def test_endpoint_2(arg_1, arg_2):
"""Correct signature."""
pass
@mk.endpoint()
def test_endpoint_3(arg_1):
"""Missing argument."""
pass
@mk.endpoint()
def test_endpoint_4(**kwargs):
"""Keyword arguments are okay."""
pass
@mk.endpoint()
def test_endpoint_5(arg_1, arg_2, arg_3=3):
"""Extra default arguments are okay."""
pass
with pytest.raises(pydantic.ValidationError):
Test(on_click=test_endpoint_1)
with pytest.raises(pydantic.ValidationError):
Test(on_click=test_endpoint_3)
Test(on_click=test_endpoint_2)
Test(on_click=test_endpoint_4)
Test(on_click=test_endpoint_5)
# Partial functions are okay.
@mk.endpoint()
def test_endpoint_6(arg_1, arg_2, arg_3):
pass
@mk.endpoint()
def test_endpoint_7(arg_0, arg_1, arg_2):
pass
@mk.endpoint()
def test_endpoint_8(arg_0, arg_1, arg_2, arg_3):
pass
Test(on_click=test_endpoint_6.partial(arg_3=3))
Test(on_click=test_endpoint_7.partial(3))
Test(on_click=test_endpoint_8.partial(3, arg_3=3))
def test_endpoint_warning_on_component_creation():
"""Raise a warning if an Endpoint's generic type is not specified."""
@mk.endpoint()
def test_endpoint():
pass
class Test(mk.gui.Component):
on_click: mk.gui.Endpoint
with pytest.warns(UserWarning):
Test(on_click=test_endpoint)
|
meerkat-main
|
tests/meerkat/interactive/test_component.py
|
import numpy as np
import pytest
import torch
from fastapi.encoders import jsonable_encoder
from meerkat.interactive.app.src.lib.component.core.match import MatchCriterion
from meerkat.interactive.graph.store import Store
from meerkat.interactive.utils import get_custom_json_encoder
@pytest.mark.parametrize(
"obj,expected",
[
# torch
(torch.as_tensor([1, 2, 3]), [1, 2, 3]),
(torch.as_tensor([1, 2, 3]).float(), [1.0, 2.0, 3.0]),
# numpy
(np.array([1, 2, 3]), [1, 2, 3]),
(np.array([1, 2, 3]).astype(np.float32), [1.0, 2.0, 3.0]),
(np.array(["foo", "bar"]), ["foo", "bar"]),
(np.array([[1.0, 2.0, 3.0]]), [[1.0, 2.0, 3.0]]),
(np.asarray(1.0), 1.0),
(np.asarray(1).astype(np.float16), 1.0),
(np.asarray(1).astype(np.float32), 1.0),
(np.asarray(1).astype(np.float64), 1.0),
(np.asarray(1).astype(np.int8), 1),
(np.asarray(1).astype(np.int16), 1),
(np.asarray(1).astype(np.int32), 1),
(np.asarray(1).astype(np.int64), 1),
],
)
@pytest.mark.parametrize("use_store", [False, True])
def test_custom_json_encoder_native_objects(obj, expected, use_store: bool):
if use_store:
obj = Store(obj)
out = jsonable_encoder(obj, custom_encoder=get_custom_json_encoder())
np.testing.assert_equal(out, expected)
@pytest.mark.parametrize(
"obj,expected",
[
(
MatchCriterion(
against="foo",
query="my query",
name="my name",
query_embedding=np.asarray([1, 2, 3]),
positives=[1, 2, 3, 4],
negatives=[5, 6, 7, 8],
),
{
"against": "foo",
"query": "my query",
"name": "my name",
"query_embedding": [1, 2, 3],
"positives": [1, 2, 3, 4],
"negatives": [5, 6, 7, 8],
},
)
],
)
@pytest.mark.parametrize("use_store", [False, True])
def test_custom_json_encoder_custom_objects(obj, expected, use_store: bool):
if use_store:
obj = Store(obj)
out = jsonable_encoder(obj, custom_encoder=get_custom_json_encoder())
np.testing.assert_equal(out, expected)
@pytest.mark.parametrize(
"obj,expected",
[
(Store(Store([1, 2, 3])), [1, 2, 3]),
(Store((Store([1, 2, 3]), Store([4, 5, 6]))), [[1, 2, 3], [4, 5, 6]]),
(Store({"foo": Store([1, 2, 3])}), {"foo": [1, 2, 3]}),
],
)
def test_custom_json_encoder_nested_stores(obj, expected):
out = jsonable_encoder(obj, custom_encoder=get_custom_json_encoder())
np.testing.assert_equal(out, expected)
|
meerkat-main
|
tests/meerkat/interactive/test_util.py
|
from typing import List, Union
import numpy as np
import pytest
import meerkat as mk
from meerkat.interactive.endpoint import _is_annotation_store
from meerkat.interactive.graph.store import _IteratorStore
@pytest.mark.parametrize("fn_decorator", [mk.gui.reactive])
def test_endpoint_wrapping_reactive_fn(fn_decorator):
"""When an endpoint wraps a reactive function, reactivity should be
disabled to prevent adding anything to the graph.
Note, we can only do this with methods decorated with @reactive. If
a method decorated with `@mk.gui.react()` is called from an
endpoint, the graph will be built because `@mk.gui.react()`
activates reactivity prior to the method being called.
"""
fn = fn_decorator(lambda store: store + 3)
@mk.endpoint()
def fn_endpoint(store: mk.Store):
store.set(fn(store))
# Test with @reactive decorator.
x = mk.Store(1)
assert not mk.gui.is_unmarked_context() # Verify we are in a reactive context
fn_endpoint(x)
with mk.unmarked():
assert x == 4 # Verify the endpoint works
assert x.inode is None # Graph should be empty
@pytest.mark.parametrize(
"x",
[
# TODO: Uncomment when we can issue column modifications.
# mk.ScalarColumn([1, 2, 3, 4, 5]),
mk.DataFrame({"a": [1, 2, 3, 4, 5]}),
mk.Store(np.array([1, 2, 3, 4, 5])),
],
)
def test_endpoint_with_reactive_output(
x,
):
"""Test that we can add endpoints to reactive outputs.
The graph for this test looks like
df -> view -> df_view -> view -> df_view2
^ | ^ |
| v | v
add_one add_one (endpoint)
"""
def _get_value(_x):
if isinstance(_x, mk.DataFrame):
return _x["a"]
else:
return _x
x.mark()
@mk.reactive()
def view(_x):
if isinstance(_x, (mk.DataFrame, mk.Column)):
return _x.view()
else:
return _x
# TODO: Change the type hint to Union when unions are supported.
@mk.endpoint()
def add_one(_x: mk.Store):
if isinstance(_x, mk.DataFrame):
_x["a"] = _x["a"] + 1
_x.set(_x)
else:
out = _x + 1
_x.set(out)
endpoint_df = add_one.partial(_x=x)
df_view = view(x)
assert df_view.inode is not None
assert x.inode is not None
df_view_inode = df_view.inode
endpoint_df_view = add_one.partial(_x=df_view)
assert df_view.inode is df_view_inode
df_view2 = view(df_view)
df_view2_inode = df_view2.inode
# Run the endpoint on the original input.
# This should trigger both views to update.
endpoint_df.run()
assert all(_get_value(x) == [2, 3, 4, 5, 6])
assert all(_get_value(df_view_inode.obj) == [2, 3, 4, 5, 6])
assert all(_get_value(df_view2_inode.obj) == [2, 3, 4, 5, 6])
# Run the endpoint on the first view.
# This should trigger the second view to update.
endpoint_df_view.run()
assert all(_get_value(x) == [2, 3, 4, 5, 6])
assert all(_get_value(df_view_inode.obj) == [3, 4, 5, 6, 7])
assert all(_get_value(df_view2_inode.obj) == [3, 4, 5, 6, 7])
@pytest.mark.parametrize("endpoint_id", [1, 2, 3, 4, 5])
@pytest.mark.parametrize("partial", [True, False])
def test_endpoint_type_hints(endpoint_id: int, partial: bool):
"""Test that endpoints with different type hints will work."""
@mk.endpoint()
def endpoint1(x: mk.Store):
assert isinstance(x, mk.Store)
@mk.endpoint()
def endpoint2(x: mk.Store[int]):
assert isinstance(x, mk.Store)
@mk.endpoint()
def endpoint3(x: mk.Store[List[int]]):
assert isinstance(x, mk.Store)
@mk.endpoint()
def endpoint4(x: Union[mk.Store, int]):
assert isinstance(x, mk.Store)
@mk.endpoint()
def endpoint5(x: Union[mk.Store[int], int]):
assert isinstance(x, mk.Store)
endpoint = {
1: endpoint1,
2: endpoint2,
3: endpoint3,
4: endpoint4,
5: endpoint5,
}[endpoint_id]
store = mk.Store(1)
if partial:
_endpoint = endpoint.partial(x=store)
_endpoint.run()
else:
endpoint.run(store)
@pytest.mark.parametrize("x_input", ["a", mk.Store("a"), mk.Store(1)])
@pytest.mark.parametrize("endpoint_id", [1, 2, 3])
@pytest.mark.parametrize("partial", [True, False])
def test_endpoint_with_string(x_input, endpoint_id: int, partial: bool):
"""Endpoints resolve variables based on their ids, which are strings.
This may cause problems when the input is actually as string. These
tests are to check that endpoints can work properly with non-id
strings.
"""
@mk.endpoint()
def endpoint1(x: str):
# The type hint is `str`, so the input should never be a store.
assert not isinstance(x, mk.Store)
@mk.endpoint()
def endpoint2(x: mk.Store[str]):
# The type hint is `Store[str]`, so the input should be a store.
# Type hints should never be strict constraints in Python.
# So even if the user passes in some other type, we should still
# be able to handle it.
if isinstance(x_input, mk.Store):
assert isinstance(x, mk.Store)
else:
assert not isinstance(x, mk.Store)
@mk.endpoint()
def endpoint3(x: Union[mk.Store, str]):
# The type hint is `Union[Store, str]`, so the input should be a store
# if a store was passed in. If a store wasn't passed in, then we
if isinstance(x_input, mk.Store):
assert isinstance(x, mk.Store)
else:
assert not isinstance(x, mk.Store)
endpoint = {
1: endpoint1,
2: endpoint2,
3: endpoint3,
}[endpoint_id]
if partial:
_endpoint = endpoint.partial(x=x_input)
_endpoint.run()
else:
endpoint.run(x_input)
@pytest.mark.parametrize(
"type_hint",
[
mk.Store,
mk.Store[int],
mk.Store[List[int]],
# subclass of Store
_IteratorStore,
# Union with non-generic store
Union[mk.Store, int],
# Union with generic store
Union[mk.Store[int], int],
# Nested stores
Union[Union[mk.Store[int], int], int],
],
)
def test_is_annotation_store_true(type_hint):
assert _is_annotation_store(type_hint)
@pytest.mark.parametrize("type_hint", [mk.DataFrame, mk.Column])
def test_is_annotation_store_false(type_hint):
assert not _is_annotation_store(type_hint)
|
meerkat-main
|
tests/meerkat/interactive/test_endpoint.py
|
meerkat-main
|
tests/meerkat/interactive/app/__init__.py
|
|
import meerkat as mk
from meerkat.interactive.svelte import SvelteWriter
def test_index_js():
# Read the index.js file
path = mk.__path__[0] + "/interactive/app/src/lib/index.js"
with open(path, "r") as f:
index_js = f.read()
# Extract all the export statements
export_statements = [
line for line in index_js.splitlines() if line.startswith("export")
]
# Keep only the statements for .svelte files
export_statements = [line for line in export_statements if ".svelte" in line]
# Assert that all statements look like:
# export { default as Button } from './component/button/Button.svelte';
for line in export_statements:
assert line.startswith("export { default as ")
# Extract the names of the components from "default as XXX"
exported_components = sorted(
[line.split("default as ")[-1].split(" }")[0] for line in export_statements]
)
# Remove the `Page` and `Meerkat` components
exported_components = [
component
for component in exported_components
if component not in ["Page", "Meerkat"]
]
# Get the list of all components defined in Python for Meerkat
py_components = SvelteWriter().get_all_components()
# Keep only the components that have library @meerkat-ml/meerkat
py_components = sorted(
[
component.component_name
for component in py_components
if component.library == "@meerkat-ml/meerkat"
]
)
# Assert that the list of components in Python is the same as the list of
# components exported in index.js
assert set(py_components) == set(exported_components), (
"The list of components exported in app/src/lib/index.js is "
"not the same as the list "
"of components defined in Python. Make sure to export all components in "
"index.js and to define all components in Python (with @meerkat-ml/meerkat "
"library)."
)
|
meerkat-main
|
tests/meerkat/interactive/app/test_index.py
|
meerkat-main
|
tests/meerkat/interactive/app/component/__init__.py
|
|
import meerkat as mk
def test_on_click():
store = mk.Store("")
button = mk.gui.Button(
title="test", on_click=mk.endpoint(lambda: store.set("clicked"))
)
button.on_click()
assert store == "clicked"
|
meerkat-main
|
tests/meerkat/interactive/app/component/core/test_button.py
|
import numpy as np
import pytest
import meerkat as mk
from meerkat.interactive.app.src.lib.component.core.filter import FilterCriterion
# TODO (arjun): undo the skip filter stuff
@mk.endpoint()
def _set_criteria(criteria, store: mk.Store):
store.set(criteria)
def test_filter_single_criterion():
df = mk.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
filter = mk.gui.Filter(df=df)
out = filter(df)
node = out.inode
assert filter.criteria == []
criterion = FilterCriterion(is_enabled=True, column="a", op=">", value=5)
_set_criteria(filter.criteria.value + [criterion], filter.criteria)
assert np.all(node.obj["a"].data > 5)
_set_criteria([], filter.criteria)
assert np.all(node.obj["a"].data == df["a"].data)
criterion = FilterCriterion(is_enabled=True, column="a", op="==", value=5)
_set_criteria(filter.criteria.value + [criterion], filter.criteria)
assert np.all(node.obj["a"].data == 5)
@pytest.mark.parametrize("op", [">", "<", ">=", "<=", "==", "in", "not in"])
@pytest.mark.parametrize("value", [5, [5, 10]])
def test_filter_operations(op, value):
if "in" not in op and isinstance(value, (list, tuple)):
# Skip these cases because they are not valid.
return
elif "in" in op and not isinstance(value, (list, tuple)):
value = [value]
df = mk.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
if op == "==":
expected = df["a"] == value
elif op == ">":
expected = df["a"] > value
elif op == "<":
expected = df["a"] < value
elif op == ">=":
expected = df["a"] >= value
elif op == "<=":
expected = df["a"] <= value
elif op == "!=":
expected = df["a"] != value
elif op == "in":
expected = df["a"].data.isin(value)
value = ",".join([str(v) for v in value])
elif op == "not in":
expected = ~df["a"].data.isin(value)
value = ",".join([str(v) for v in value])
expected = df[expected]["a"]
filter = mk.gui.Filter(df=df)
out = filter(df)
node = out.inode
criterion = FilterCriterion(is_enabled=True, column="a", op=op, value=value)
_set_criteria(filter.criteria.value + [criterion], filter.criteria)
assert np.all(node.obj["a"].data == expected)
@pytest.mark.parametrize("value,expected", [("hello", [0, 2]), ("bye", [1, 2])])
def test_filter_contains(value, expected):
df = mk.DataFrame({"a": ["hello world", "bye world", "hello bye world"]})
expected = df["a"][expected]
filter = mk.gui.Filter(df=df)
out = filter(df)
node = out.inode
criterion = FilterCriterion(is_enabled=True, column="a", op="contains", value=value)
_set_criteria.run(filter.criteria.value + [criterion], filter.criteria)
assert np.all(node.obj["a"].data == expected.data)
def test_filter_bool():
"""TODO: Test filtering with a boolean column."""
pass
# FIXME: the tests below were based on skip_fn which was not sufficient.
# def test_skip_filter_disabled():
# """Test logic for skipping the filter component when adding/modifying
# disabled criteria."""
# df = mk.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
# filter = mk.gui.Filter(df=df)
# out = filter(df)
# node = out.inode
# # The filter criterion is disabled, so the output dataframe should not change.
# criterion = FilterCriterion(is_enabled=False, column="a", op=">", value=5)
# _set_criteria([criterion], filter.criteria)
# assert id(node.obj) == id(out)
# # The filter criterion is disabled, so changing this criterion should not
# # change the output dataframe.
# criterion.op = "<"
# _set_criteria([criterion], filter.criteria)
# assert id(node.obj) == id(out)
# # Deleting a disabled criterion should not change the output dataframe.
# _set_criteria([], filter.criteria)
# assert id(node.obj) == id(out)
# def test_skip_filter_duplicate():
# """If a criterion is added that is a duplicate of an existing criterion, it
# should be skipped."""
# df = mk.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
# filter = mk.gui.Filter(df=df)
# out = filter(df)
# node = out.inode
# # Duplicate of the same criterion.
# criterion = FilterCriterion(is_enabled=True, column="a", op=">", value=5)
# _set_criteria([criterion], filter.criteria)
# obj_id = id(node.obj)
# duplicate_criterion = FilterCriterion(is_enabled=True, column="a", op=">", value=5) # noqa: E501
# _set_criteria([criterion, duplicate_criterion], filter.criteria)
# assert id(node.obj) == obj_id
# def test_skip_filter_order():
# """Filter criteria are order-agnostic.
# If the same criteria are added in a different order, the output
# dataframe should not change.
# """
# df = mk.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
# filter = mk.gui.Filter(df=df)
# out = filter(df)
# node = out.inode
# # Duplicate of the same criterion.
# criteria = [
# FilterCriterion(is_enabled=True, column="a", op=">", value=5),
# FilterCriterion(is_enabled=True, column="a", op="<", value=10),
# ]
# _set_criteria(criteria, filter.criteria)
# obj_id = id(node.obj)
# _set_criteria(criteria[::-1], filter.criteria)
# assert id(node.obj) == obj_id
|
meerkat-main
|
tests/meerkat/interactive/app/component/core/test_filter.py
|
meerkat-main
|
tests/meerkat/interactive/app/component/core/__init__.py
|
|
import numpy as np
import meerkat as mk
from meerkat.interactive.app.src.lib.component.core.sort import SortCriterion
@mk.endpoint()
def _set_criteria(criteria, store: mk.Store):
store.set(criteria)
def test_sort_single_criterion():
"""Sorting should work with a single criterion."""
arr = np.arange(10)
np.random.shuffle(arr)
df = mk.DataFrame({"a": arr}).mark()
sort = mk.gui.Sort(df=df)
out = sort(df)
node = out.inode
# Even without a criterion, the output dataframe should be a view
# of the input dataframe.
assert id(out) != id(df)
criterion = SortCriterion(id="foo", is_enabled=True, column="a", ascending=True)
_set_criteria([criterion], sort.criteria)
assert np.all(node.obj["a"].data == np.arange(10))
def test_sort_multiple_criteria():
a, b = np.arange(10), np.arange(10)
np.random.shuffle(a)
np.random.shuffle(b)
df = mk.DataFrame({"a": a, "b": b}).mark()
sort = mk.gui.Sort(df=df)
out = sort(df)
node = out.inode
# Sort with a.
criteria = [
SortCriterion(id="foo", is_enabled=True, column="a", ascending=True),
SortCriterion(id="foo", is_enabled=True, column="b", ascending=True),
]
_set_criteria(criteria, sort.criteria)
assert np.all(node.obj["a"].data == np.arange(10))
# Sort with b.
criteria = [
SortCriterion(id="foo", is_enabled=True, column="a", ascending=True),
SortCriterion(id="foo", is_enabled=True, column="b", ascending=True),
]
_set_criteria(criteria[::-1], sort.criteria)
assert np.all(node.obj["b"].data == np.arange(10))
# def test_skip_sort_disabled():
# """If a criterion is disabled, it should be skipped."""
# df = mk.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}).mark()
# sort = mk.gui.Sort(df=df)
# out = sort(df)
# node = out.inode
# # The sort criterion is disabled, no output dataframe should not change.
# criterion = SortCriterion(id="foo", is_enabled=False, column="a", ascending=True)
# _set_criteria([criterion], sort.criteria)
# assert id(node.obj) == id(out)
# _set_criteria([], sort.criteria)
# assert id(node.obj) == id(out)
# # The sort criterion is enabled, so the dataframe should change.
# criterion.is_enabled = True
# _set_criteria([criterion], sort.criteria)
# assert id(node.obj) != id(out)
def test_skip_sort_order():
"""When the order of the sort criteria changes, the output dataframe should
change."""
df = mk.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "b": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
).mark()
sort = mk.gui.Sort(df=df)
out = sort(df)
node = out.inode
criteria = [
SortCriterion(id="foo", is_enabled=True, column="a", ascending=True),
SortCriterion(id="foo", is_enabled=True, column="b", ascending=False),
]
_set_criteria(criteria, sort.criteria)
out_id = id(node.obj)
_set_criteria(criteria[::-1], sort.criteria)
assert id(node.obj) != out_id
|
meerkat-main
|
tests/meerkat/interactive/app/component/core/test_sort.py
|
import meerkat as mk
@mk.endpoint()
def _change_value(value: mk.Store, new_value: bool):
value.set(new_value)
def test_toggle_basic():
toggle = mk.gui.Toggle()
assert not toggle.value
assert isinstance(toggle.value, mk.Store) and isinstance(toggle.value, bool)
value: mk.Store[bool] = toggle.value
_change_value(value, True)
assert toggle.value
_change_value(value, False)
assert not toggle.value
|
meerkat-main
|
tests/meerkat/interactive/app/component/core/test_toggle.py
|
import meerkat as mk
def _default_df():
return mk.DataFrame(
{
"message": ["Lorem ipsum"],
"sender": ["chatbot"],
"name": ["ChatBot"],
"time": ["2 hours ago"],
}
)
@mk.endpoint()
def on_send(df: mk.DataFrame, message: str):
df.set(
df.append(
mk.DataFrame(
{
"message": [message, "random message"],
"sender": ["user", "chatbot"],
"name": ["User", "ChatBot"],
"time": ["1 hour ago", "1 hour ago"],
}
)
)
)
def test_on_send():
df = _default_df()
chat = mk.gui.core.Chat(
df=df,
img_chatbot="https://placeimg.com/200/200/animals",
img_user="https://placeimg.com/200/200/people",
on_send=on_send.partial(df=df),
)
chat.on_send(message="hello")
assert len(df) == 3
assert df["message"][1] == "hello"
|
meerkat-main
|
tests/meerkat/interactive/app/component/core/test_chat.py
|
# import meerkat as mk
# def test_basic():
# choices = ["a", "b", "c"]
# value = mk.Store("")
# choice = mk.gui.Choice(choices=choices, value=value)
# choice.on_select(1)
# assert value == "b"
# def test_on_select():
# other = mk.Store("")
# @mk.endpoint()
# def on_select(new_value: str):
# other.set(new_value)
# choices = ["a", "b", "c"]
# value = mk.Store("")
# choice = mk.gui.Choice(choices=choices, value=value, on_select=on_select)
# choice.on_select(1)
# assert value == "b"
# assert other == "b"
# def test_on_select_no_param():
# other = mk.Store("")
# @mk.endpoint()
# def on_select():
# other.set("set")
# choices = ["a", "b", "c"]
# value = mk.Store("")
# choice = mk.gui.Choice(choices=choices, value=value, on_select=on_select)
# choice.on_select(1)
# assert value == "b"
# assert other == "set"
# def test_not_string():
# choices = [1, 2, 3]
# value = mk.Store(None)
# choice = mk.gui.Choice(choices=choices, value=value)
# choice.on_select(1)
# assert value == 2
# # Table
# # gallery
# # toggle
# # match
# # discover
# # multiselect
|
meerkat-main
|
tests/meerkat/interactive/app/component/core/test_choice.py
|
from typing import List, Mapping
import numpy as np
import pandas as pd
import pytest
import meerkat as mk
from meerkat.interactive.graph import is_unmarked_context, reactive, trigger
from meerkat.interactive.graph.magic import magic
from meerkat.interactive.graph.store import _unpack_stores_from_object
from meerkat.interactive.modification import DataFrameModification
from meerkat.mixins.reactifiable import MarkableMixin
from meerkat.state import state
def _create_dummy_reactive_df() -> mk.DataFrame:
df = pd.DataFrame({"a": np.arange(10), "b": np.arange(10) + 10})
return mk.DataFrame.from_pandas(df).mark()
@reactive()
def _add_to_list(_keys: List[str], new_key: str):
return _keys + [new_key]
def test_react_basic():
df = _create_dummy_reactive_df()
keys_reactive = df.keys()
out = _add_to_list(keys_reactive, "c")
assert out.inode is not None
assert isinstance(keys_reactive, mk.Store)
assert keys_reactive.inode.has_trigger_children()
op_node = keys_reactive.inode.trigger_children[0]
assert op_node.obj.fn.__name__ == "_add_to_list"
assert len(op_node.trigger_children) == 1
assert op_node.trigger_children[0] is out.inode
# Outside of context manager.
with mk.unmarked():
keys = df.keys()
assert not isinstance(keys, mk.Store)
def test_unmarked_context_manager():
df = _create_dummy_reactive_df()
assert not is_unmarked_context()
keys_reactive = df.keys()
with mk.unmarked():
assert is_unmarked_context()
keys = df.keys()
assert isinstance(keys_reactive, mk.Store)
assert isinstance(keys, List)
def test_trigger_instance_method():
rng = np.random.RandomState(0)
# TODO: Why is this decorator affecting the return type?
@reactive()
def _subselect_df(df: mk.DataFrame) -> mk.DataFrame:
cols = list(rng.choice(df.columns, 3))
return df[cols]
df = mk.DataFrame({str(k): [k] for k in range(10)})
df = df.mark()
df_sub = _subselect_df(df)
keys_reactive = df_sub.keys()
keys0 = keys_reactive.__wrapped__
state.modification_queue.queue = [DataFrameModification(id=df.id, scope=[])]
modifications1 = trigger()
keys1 = modifications1[-1].value
state.modification_queue.queue = [DataFrameModification(id=df.id, scope=[])]
modifications2 = trigger()
keys2 = modifications2[-1].value
assert keys0 != keys1
assert keys1 != keys2
@pytest.mark.parametrize("is_unmarked", [False, True])
def test_unmarked_on_reactive_fn(is_unmarked: bool):
@mk.gui.reactive()
def add(a, b):
return a + b
a = mk.Store(1)
b = mk.Store(2)
if is_unmarked:
with mk.unmarked():
c = add(a, b)
else:
c = add(a, b)
expected_type = int if is_unmarked else mk.Store
assert isinstance(c, expected_type)
if not is_unmarked:
assert a.inode.has_trigger_children() and b.inode.has_trigger_children()
else:
assert a.inode is None and b.inode is None
def test_default_nested_return():
"""By default, nested return is None."""
@reactive()
def _return_tuple(_s):
return ("a", "b")
@reactive()
def _return_list(_s):
return ["a", "b"]
_s = mk.Store("")
with magic():
out = _return_tuple(_s)
with mk.unmarked():
a, b = out
assert isinstance(out, mk.Store)
assert not isinstance(a, mk.Store)
assert not isinstance(b, mk.Store)
with magic():
out = _return_list(_s)
with mk.unmarked():
a, b = out
# Lists are not unpacked by default.
assert isinstance(out, mk.Store)
assert not isinstance(a, mk.Store)
assert not isinstance(b, mk.Store)
def test_nested_reactive_fns():
"""When reactive functions are executed, only the outer function should be
added as a child to the input stores.
In simpler language, a reactive function run inside another reactive
function will not add things to the graph.
"""
@mk.gui.reactive()
def _inner(x):
return ["a", "b", x]
@mk.gui.reactive()
def _outer(x):
return ["example"] + _inner(x)
x = mk.Store("c")
_outer(x)
assert x.inode.has_trigger_children()
assert len(x.inode.trigger_children) == 1
# Compare the names because the memory addresses will be different
# when the function is wrapped in reactive.
assert x.inode.trigger_children[0].obj.fn.__name__ == "_outer"
@pytest.mark.parametrize(
"x",
[
# Primitives.
1,
"foo",
[1, 2],
(1, 4),
{"a": 1, "b": 2},
# Basic types.
mk.Store(1),
# TODO: Determine why the initialization below is causing problems.
# mk.Store("foo"),
mk.Store([1, 2]),
mk.Store((1, 4)),
mk.Store({"a": 1, "b": 2}),
# # Stores in non-reactive containers.
{"a": 1, "b": mk.Store(2)},
[1, mk.Store(2)],
(mk.Store(1), 2),
{"a": {"b": mk.Store(1)}},
# # Nested stores.
mk.Store([mk.Store(1), 2]),
],
)
@pytest.mark.parametrize("use_kwargs", [False, True])
def test_unpacking(x, use_kwargs):
"""Test that all stores are unpacked correctly."""
def _are_equal(x, y):
if isinstance(x, Mapping):
return _are_equal(x.keys(), y.keys()) and all(
_are_equal(x[k], y[k]) for k in x.keys()
)
else:
return x == y
if use_kwargs:
inputs = {"wrapped": x}
unpacked_kwargs, _ = _unpack_stores_from_object(inputs)
assert len(unpacked_kwargs) == 1
outputs = unpacked_kwargs
else:
inputs = [x]
unpacked_args, _ = _unpack_stores_from_object(inputs)
assert len(unpacked_args) == 1
outputs = unpacked_args
# Recursively check for equality.
assert _are_equal(inputs, outputs)
def test_instance_methods():
"""Test that instance methods get reactified correctly."""
class Foo:
def __init__(self, x):
self.x = x
@reactive()
def add(self, y):
return self.x + y
@reactive()
def __eq__(self, __o: int) -> bool:
return self.x == __o
foo = Foo(1)
val = mk.Store(2)
out_add = foo.add(val)
out_eq = foo == val
assert isinstance(out_add, mk.Store)
assert isinstance(out_eq, mk.Store)
assert len(val.inode.trigger_children) == 2
assert val.inode.trigger_children[0].obj.fn.__name__ == "add"
assert val.inode.trigger_children[0].trigger_children[0] is out_add.inode
assert val.inode.trigger_children[1].obj.fn.__name__ == "__eq__"
assert val.inode.trigger_children[1].trigger_children[0] is out_eq.inode
# Trigger the functions.
@mk.endpoint()
def set_val(val: mk.Store):
val.set(0)
set_val(val)
assert out_add == 1
assert not out_eq
@pytest.mark.parametrize(
"x",
[
[1, 2, 3, 4],
mk.Store([1, 2, 3, 4]),
mk.DataFrame({"a": [1, 2, 3, 4]}),
],
)
@pytest.mark.parametrize("mark", [True, False])
def test_slicing(x, mark: bool):
@mk.endpoint()
def update_store(store: mk.Store, value: int):
store.set(value)
@mk.unmarked()
def _compare_objs(x_sl, expected):
if isinstance(x_sl, mk.DataFrame):
assert x_sl.columns == expected.columns
for col in x_sl.columns:
assert np.all(x_sl[col] == expected[col])
else:
assert x_sl == expected
if mark:
if not isinstance(x, MarkableMixin):
x = mk.Store(x)
x.mark()
elif isinstance(x, MarkableMixin):
x.unmark()
start = mk.Store(0)
stop = mk.Store(4)
step = mk.Store(1)
# Using store slices with non-markable objects should raise an error.
# This is because __index__ is reactive, which raises an error.
if not isinstance(x, MarkableMixin):
with pytest.raises(TypeError):
x_sl = x[start:stop]
return
x_sl = x[start:stop:step]
_compare_objs(x_sl, x[0:4])
if not x.marked:
# Even if x is not marked, an inode should still be created.
assert x.inode is not None
return
inode = x_sl.inode
# Update the start value.
update_store.run(start, 1)
_compare_objs(inode.obj, x[1:4])
# Update the stop value.
update_store.run(stop, 3)
_compare_objs(inode.obj, x[1:3])
# Update the step value.
update_store.run(start, 0)
update_store.run(stop, 4)
update_store.run(step, 2)
_compare_objs(inode.obj, x[0:4:2])
|
meerkat-main
|
tests/meerkat/interactive/graph/test_reactivity.py
|
meerkat-main
|
tests/meerkat/interactive/graph/__init__.py
|
|
import warnings
from typing import Tuple
import pytest
import meerkat as mk
from meerkat.interactive.graph.magic import magic
from meerkat.interactive.graph.operation import Operation
from meerkat.interactive.graph.store import _IteratorStore
from meerkat.mixins.reactifiable import MarkableMixin
def _is_output_reactive(
out,
input_store: mk.Store,
*,
op_name: str = None,
op_num_children: int = None,
):
"""Check if the output is reactive.
The output is reactive if:
1. It is a Store / Markable object.
2. It is marked
3. out.inode is not None
4. The input_store is the grandparent of out
Args:
out: The output store.
input_store: The input store.
op_name: The name of the operation. If None, all operations are checked.
op_num_children: The number of children each operation should have.
"""
assert isinstance(out, MarkableMixin)
assert out.marked
assert out.inode is not None
# These should always be operations.
children = input_store.inode.trigger_children
assert all(isinstance(c.obj, Operation) for c in children)
if op_name is not None:
children = {c.obj.fn.__name__: c for c in children}
children = [children[op_name]]
if op_num_children is not None:
for op_inode in children:
assert len(op_inode.trigger_children) == op_num_children, (
f"{op_inode.obj.fn.__name__} has {len(op_inode.trigger_children)} "
f"children, expected {op_num_children}"
)
# One of the children should be the op.
grandchildren = [gc for c in children for gc in c.trigger_children]
assert out.inode in grandchildren
def _is_out_unmagiced(out, input_store: mk.Store):
"""Check if the output is unmagiced.
A store is unmagiced if:
1. It is a Store
2. It is marked
3. out.inode is None
"""
assert not isinstance(out, mk.Store)
@mk.endpoint()
def _set_store(store: mk.Store, value):
store.set(value)
@pytest.mark.parametrize("is_magic", [False, True])
def test_store_reactive_math(is_magic: bool):
"""Test basic math methods are reactive.
A method is reactive if it:
1. Returns a Store
2. Creates a connection based on the op.
"""
store = mk.Store(1)
expected = {
"add": 2,
"sub": 0,
"mul": 1,
"truediv": 1,
"floordiv": 1,
"mod": 0,
"divmod": (1, 0),
"pow": 1,
"neg": -1,
"pos": 1,
# Abs is invoked with abs(store), so it is not reactive.
# "abs": 1,
"lt": False,
"le": True,
"eq": True,
"ne": False,
"gt": False,
"ge": True,
}
out = {}
with magic(magic=is_magic):
out["add"] = store + 1
out["sub"] = store - 1
out["mul"] = store * 1
out["truediv"] = store.__truediv__(1)
out["floordiv"] = store // 1
out["mod"] = store % 1
out["divmod"] = divmod(store, 1)
out["pow"] = store**1
out["neg"] = -store
out["pos"] = +store
# Abs is invoked with abs(store), so it is not reactive.
# out["abs"] = abs(store)
out["lt"] = store < 1
out["le"] = store <= 1
out["eq"] = store == 1
out["ne"] = store != 1
out["gt"] = store > 1
out["ge"] = store >= 1
# Regardless of if magic is on/off, math operations should always be
# reactive.
assert len(store.inode.trigger_children) == len(expected)
assert store.inode is not None
for k, v in out.items():
_is_output_reactive(v, store, op_name=f"__{k}__", op_num_children=1)
assert v == expected[k]
@pytest.mark.parametrize("other", [1, 2])
@pytest.mark.parametrize("is_magic", [False, True])
def test_store_imethod(other: int, is_magic: bool):
"""Test traditional inplace methods are reactive, but return different
stores."""
def _get_expected():
# TODO: Have a variable that chooses which one of these to run.
# So we can test each one separately.
return {
"__iadd__": store + other,
"__isub__": store - other,
"__imul__": store * other,
"__itruediv__": store.__itruediv__(other),
"__ifloordiv__": store // other,
"__imod__": store % other,
"__ipow__": store**other,
"__ilshift__": store << other,
"__irshift__": store >> other,
"__iand__": store & other,
"__ixor__": store ^ other,
"__ior__": store | other,
}
store = mk.Store(1)
original = store
with pytest.warns(UserWarning):
expected = _get_expected()
# Get raw values
with mk.unmarked():
expected = _get_expected()
# Regardless of if magic is on/off, i-math operations
# should always be reactive.
out = {}
with magic(is_magic):
for k in expected:
with pytest.warns(UserWarning):
out[k] = getattr(store, k)(other)
for k, v in out.items():
assert not isinstance(expected[k], mk.Store), f"Expected: {k} returned a Store."
assert isinstance(v, mk.Store), f"{k} did not return a Store."
assert v.marked
assert id(v) != id(original), f"{k} did not return a new Store."
assert v == expected[k], f"{k} did not return the correct value."
@pytest.mark.parametrize("is_magic", [False, True])
def test_store_as_iterator(is_magic: bool):
store = mk.Store((1, 2))
with magic(is_magic):
store_iter = iter(store)
# Regardless of if magic is on, the iterator should be a Store.
# The store should also be added to the graph.
assert isinstance(store_iter, _IteratorStore)
_is_output_reactive(store_iter, store, op_name="__iter__", op_num_children=1)
# When we fetch things from the iterator, they should be stores.
# Similar to the above, only when magic is on, should the store be added
# to the graph.
with magic(is_magic):
values = [v for v in store_iter]
for v in values:
assert isinstance(v, mk.Store)
if not is_magic:
return
# Test the nodes get updated properly
assert len(values) == 2
with magic():
inode1 = values[0].inode
inode2 = values[1].inode
_set_store(store, [10, 11])
assert inode1.obj == 10
assert inode2.obj == 11
@pytest.mark.parametrize("is_magic", [False, True])
def test_tuple_unpack(is_magic: bool):
store = mk.Store((1, 2))
with magic(is_magic):
a, b = store
# Iterators and next are always are reactive, so these should
# always be stores.
assert isinstance(a, mk.Store)
assert isinstance(b, mk.Store)
# Test the nodes get updated properly
a_inode = a.inode
b_inode = b.inode
_set_store(store, [10, 11])
assert a_inode.obj == 10
assert b_inode.obj == 11
@pytest.mark.parametrize("is_magic", [False, True])
def test_tuple_unpack_return_value(is_magic: bool):
@mk.gui.reactive(nested_return=False)
def add(seq: Tuple[int]):
return tuple(x + 1 for x in seq)
store = mk.Store((1, 2))
# We need to use the `magic` decorator here because tuple unpacking
# happens outside of the function `add`. Without the decorator, the
# tuple unpacking will not be added to the graph.
with magic(is_magic):
a, b = add(store)
assert a == 2
assert b == 3
# Iterators and next are always are reactive, so these should
# always be stores.
assert isinstance(a, mk.Store)
assert isinstance(b, mk.Store)
# Test the nodes get updated properly
a_inode = a.inode
b_inode = b.inode
_set_store(store, [10, 11])
assert a_inode.obj == 11
assert b_inode.obj == 12
@pytest.mark.parametrize("is_magic", [False, True])
def test_bool(is_magic: bool):
store = mk.Store(0)
with magic(is_magic):
if is_magic:
with pytest.warns(UserWarning):
out_bool = bool(store)
with pytest.warns(UserWarning):
out_not = not store
else:
out_bool = bool(store)
out_not = not store
# Store.__bool__ is not reactive.
assert not isinstance(out_bool, mk.Store)
assert not isinstance(out_not, mk.Store)
@pytest.mark.parametrize("is_magic", [False, True])
@pytest.mark.parametrize("obj", [[0, 1, 2], (0, 1, 2), {0: "a", 1: "b", 2: "c"}])
@pytest.mark.parametrize("idx", [0, 1, 2])
def test_store_getitem(is_magic: bool, obj, idx: int):
store = mk.Store(obj)
with magic(is_magic):
out = store[idx]
assert isinstance(out, mk.Store)
_is_output_reactive(out, store, op_name="__getitem__", op_num_children=1)
def test_store_getitem_custom_obj():
"""Test we can call getitem on a custom object."""
class Foo:
def __init__(self, x):
self.x = x
def __getitem__(self, key):
return self.x[key]
store = mk.Store(Foo([0, 1, 2]))
out = store[0]
assert isinstance(out, mk.Store)
_is_output_reactive(out, store, op_name="__getitem__", op_num_children=1)
@pytest.mark.parametrize("is_magic", [False, True])
@pytest.mark.parametrize("obj", [[0, 1, 2], (0, 1, 2), {0: "a", 1: "b", 2: "c"}])
@pytest.mark.parametrize("idx", [0, 1, 2])
def test_store_getitem_multi_access(is_magic: bool, obj, idx: int):
"""Test that when we access the same index multiple times, we get unique
stores."""
store = mk.Store(obj)
with magic(is_magic):
out1 = store[idx]
out2 = store[idx]
assert isinstance(out1, mk.Store)
_is_output_reactive(out1, store)
assert isinstance(out2, mk.Store)
_is_output_reactive(out2, store)
assert out1 is not out2
@pytest.mark.parametrize("is_magic", [False, True])
def test_index(is_magic: bool):
@mk.endpoint()
def _endpoint(store: mk.Store):
store.set([4, 5, 1])
store = mk.Store([0, 1, 2])
with mk.magic(magic=is_magic):
out = store.index(1)
assert out == 1
if is_magic:
inode = out.inode
assert isinstance(out, mk.Store)
# Make sure it reacts.
_endpoint.run(store)
assert inode.obj == 2
# _is_output_reactive(out, store, op_name="index", op_num_children=1)
else:
assert not isinstance(out, mk.Store)
def test_iterator_store_warning():
"""The store class should raise a warning when initialized with an
iterator."""
with pytest.warns(UserWarning):
mk.Store(iter([0, 1, 2]))
# No error when initializing an _IteratorStore with an iterator.
with warnings.catch_warnings():
warnings.simplefilter("error")
x = iter([0, 1, 2])
_IteratorStore(x)
# No error when calling iter on a store.
with warnings.catch_warnings():
warnings.simplefilter("error")
iter(mk.Store([0, 1, 2]))
|
meerkat-main
|
tests/meerkat/interactive/graph/test_store.py
|
import pytest
import meerkat as mk
class FooToWrap:
"""Wrap this in a store to test for magic."""
def __init__(self, x):
self.fn = lambda y: x + y
self.x = x
def add(self, y):
return self.x + y
def __getitem__(self, i):
return self.x
def __len__(self):
return self.x
@pytest.mark.parametrize("is_magic", [True, False])
@pytest.mark.parametrize("name", ["x", "fn"])
def test_magic_attribute_accessor(is_magic: bool, name: str):
foo = mk.Store(FooToWrap(1))
assert foo.inode is None
with mk.magic(is_magic):
if name == "x":
out = foo.x
expected = 1
elif name == "fn":
out = foo.fn
with mk.magic(False):
expected = foo.fn
assert out == expected
if is_magic:
assert foo.inode is not None
assert out.inode is not None
else:
assert foo.inode is None
assert not isinstance(out, mk.Store)
@pytest.mark.parametrize("is_magic", [True, False])
def test_magic_getitem(is_magic: bool):
foo = mk.Store(FooToWrap(1))
assert foo.inode is None
with mk.magic(is_magic):
out = foo[0]
assert out == 1
if is_magic:
assert foo.inode is not None
assert out.inode is not None
else:
# getitem is reactive, so a node will always be created.
assert foo.inode is not None
assert isinstance(out, mk.Store)
@pytest.mark.parametrize("is_magic", [True, False])
def test_magic_instance_method(is_magic: bool):
foo = mk.Store(FooToWrap(1))
assert foo.inode is None
with mk.magic(is_magic):
fn = foo.add
out_add: mk.Store = fn(1)
assert out_add == 2
if is_magic:
assert foo.inode is not None
assert out_add.inode is not None
else:
assert foo.inode is None
assert isinstance(out_add, int) and not isinstance(out_add, mk.Store)
|
meerkat-main
|
tests/meerkat/interactive/graph/test_magic.py
|
import meerkat as mk
def test_operation_with_skip():
"""Test an operation with noop conditions."""
def skip_fn(old_x, old_y, new_x, new_y):
# Arbitrary contrived noop function.
return new_x == 2 or new_y == 2
@mk.gui.reactive(skip_fn=skip_fn)
def fn(x: int, y: int):
return x + y
@mk.endpoint()
def set_xy(x: mk.Store, y: mk.Store, x_val, y_val):
x.set(x_val)
y.set(y_val)
x = mk.Store(1)
y = mk.Store(1)
result = fn(x, y)
assert result == 2
set_xy(x, y, 3, 4)
assert result == 7
# the noop function should prevent the update
# so the value should stay 7, not update to 5.
set_xy(x, y, 2, 3)
assert result == 7
def test_instance_method_with_skip():
"""Test instance method with noop conditions."""
def skip_fn(old_y, new_y):
# Arbitrary contrived noop function.
return new_y == 2
class Foo:
def __init__(self, x: int):
self.x = x
@mk.gui.reactive(skip_fn=skip_fn)
def fn(self, y):
return self.x + y
@mk.endpoint()
def set_xy(y: mk.Store, y_val: int):
y.set(y_val)
foo = Foo(1)
y = mk.Store(1)
result = foo.fn(y)
assert result == 2
set_xy(y, 4)
assert result == 5
# the noop function should prevent the update
# so the value should stay 7, not update to 5.
set_xy(y, 2)
assert result == 5
|
meerkat-main
|
tests/meerkat/interactive/graph/test_operation.py
|
import numpy as np
import pytest
import meerkat as mk
from meerkat.interactive import endpoint
from meerkat.interactive.graph.reactivity import reactive
@reactive()
def binary_op(df_1: mk.DataFrame, df_2: mk.DataFrame) -> mk.DataFrame:
return mk.DataFrame({"a": df_1["a"] + df_2["a"]})
@reactive()
def unary_op(df_1) -> mk.DataFrame:
return mk.DataFrame({"a": df_1["a"] * 3})
@endpoint()
def update_df(df: mk.DataFrame, col: str, value: np.ndarray) -> mk.DataFrame:
df[col] = value
return df
@reactive()
def _add(a, b):
return a + b
# @endpoint()
# def _set_store(store: mk.Store, value, _check_equality=False):
# # We have to explicitly check ifthe value is the same.
# if _check_equality and store == value:
# return
# store.set(value)
@endpoint()
def _set_store(store: mk.Store, value):
# We have to explicitly check ifthe value is the same.
store.set(value)
def test_trigger():
df_1 = mk.DataFrame({"a": np.arange(10)}).mark()
df_2 = mk.DataFrame({"a": np.arange(10)}).mark()
derived_1 = binary_op(df_1, df_2)
derived_2 = unary_op(derived_1)
derived_3 = binary_op(derived_1, derived_2)
derived_4 = binary_op(derived_3, df_2)
# Unpack the node from the output dataframes.
derived_1_node = derived_1.inode
derived_2_node = derived_2.inode
derived_3_node = derived_3.inode
derived_4_node = derived_4.inode
# Update the values of df_1 and df_2.
update_df(df_1, "a", np.arange(10, 20))
update_df(df_2, "a", np.arange(10, 20))
# The node is attached to different dataframes on trigger.
# So we need to fetch the updated dataframe associated with the node.
derived_1 = derived_1_node.obj
derived_2 = derived_2_node.obj
derived_3 = derived_3_node.obj
derived_4 = derived_4_node.obj
# assert len(modifications) == 6
assert (derived_1["a"] == np.arange(10, 20) * 2).all()
assert (derived_2["a"] == derived_1["a"] * 3).all()
assert (derived_3["a"] == derived_2["a"] + derived_1["a"]).all()
assert (derived_4["a"] == derived_3["a"] + np.arange(10, 20)).all()
# TODO: fix the test when we resolve endpoint partialing issue.
@pytest.mark.parametrize("check_equality", [False])
@pytest.mark.parametrize("toggle_mark", [None, "a", "b"])
def test_trigger_hybrid_marked_unmarked_inputs(check_equality: bool, toggle_mark: str):
"""Test trigger functionality when some inputs are marked and some are
not."""
a = mk.Store(1)
b = mk.Store(2).unmark()
c = _add(a, b)
if toggle_mark == "a":
a.unmark()
elif toggle_mark == "b":
b.mark()
assert c == 3
assert a.inode is not None
assert b.inode is not None
assert len(a.inode.trigger_children) == 1
assert len(b.inode.trigger_children) == 0
op_inode = a.inode.trigger_children[0]
assert op_inode.obj.fn.__name__ == "_add"
assert list(op_inode.obj.args) == [a.inode, b.inode]
# a was marked on operation construction.
# changing it should trigger the operation.
_set_store(a, 2)
assert c == 4
# b was not marked on operation construction.
# changing it should not trigger the operation.
_set_store(b, 10)
assert c == 4
# Changing a will retrigger the operation.
# But the value of b was updated right before, so the operation
# should use the new value of b.
_set_store(a, 3)
assert c == 13
# Changing b will not retrigger the operation.
_set_store(b, 0)
assert c == 13
# If the endpoint does not issue a modification (i.e. store.set is not called),
# then the operation should not be triggered.
# When check_equality is True, .set is only called when the new value is different
# from the old value.
_set_store(a, a.value)
if check_equality:
assert c == 13
else:
assert c == 3
def test_trigger_unmarked_inputs():
a = mk.Store(1).unmark()
b = mk.Store(2).unmark()
c = _add(a, b)
assert c == 3
# When all inputs are unmarked, we shouldn't create nodes
# unnecessarily.
assert a.inode is None
assert b.inode is None
|
meerkat-main
|
tests/meerkat/interactive/graph/test_trigger.py
|
import numpy as np
import pytest
from PIL import Image
from meerkat.interactive.formatter.image import ImageFormatter
@pytest.mark.parametrize("skip_copy", [True, False])
def test_image_formatter_encode_skip_copy(skip_copy: bool):
"""Test image formatter on object columns."""
formatter = ImageFormatter(max_size=(20, 20))
image = Image.fromarray(np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
formatter.encode(image, skip_copy=skip_copy)
if skip_copy:
assert image.size == (20, 20)
else:
assert image.size == (100, 100)
|
meerkat-main
|
tests/meerkat/interactive/formatter/test_image.py
|
meerkat-main
|
tests/meerkat/interactive/formatter/__init__.py
|
|
meerkat-main
|
tests/meerkat/interactive/api/__init__.py
|
|
from fastapi.testclient import TestClient
from meerkat.interactive.api.main import app
client = TestClient(app)
# def test_get_categories():
# from meerkat.state import state
# state.secrets.add(api="ai21", api_key="")
# # TODO(karan): this .set() below fails to work,
# # but ideally we would like it to be done here
# # the issue is caused by Manifest, which sets up a SQLLite cache
# # that throws an error that it's being used in multiple threads (and
# # can only be used in the thread in which it was created)
# # As a workaround, we run a set inside the API call at the moment.
# state.llm.set(client="ai21", engine="j1-jumbo")
# response = client.post(
# "/llm/generate/categories",
# json={
# "dataset_description": "face images of people",
# "hint": "i'm interested in exploring unusual correlations \
# between attributes",
# },
# )
# assert response.status_code == 200
# def test_get_categorization():
# from meerkat.state import state
# state.secrets.add(api="ai21", api_key="")
# # TODO(karan): this .set() below fails to work,
# # but ideally we would like it to be done here
# # the issue is caused by Manifest, which sets up a SQLLite cache
# # that throws an error that it's being used in multiple threads (and
# # can only be used in the thread in which it was created)
# # As a workaround, we run a set inside the API call at the moment.
# state.llm.set(client="ai21", engine="j1-jumbo")
# response = client.post(
# "/llm/generate/categorization",
# json={
# "description": "types of paintings",
# "existing_categories": ["none"],
# },
# )
# assert response.status_code == 200
|
meerkat-main
|
tests/meerkat/interactive/api/routers/test_llm.py
|
meerkat-main
|
tests/meerkat/interactive/api/routers/__init__.py
|
|
import numpy as np
import pytest
from fastapi.testclient import TestClient
import meerkat as mk
from meerkat.interactive.api.main import app
from meerkat.interactive.graph import Store, reactive
client = TestClient(app)
@pytest.fixture
def df_testbed():
df = mk.DataFrame(
{"a": np.arange(10), "b": np.arange(10, 20), "clip(a)": np.zeros((10, 4))}
)
return {"df": df}
def test_store():
store = Store(0)
derived = unary_op(store)
response = client.post(f"/store/{store.id}/update/", json={"value": 2})
assert response.status_code == 200
assert derived.value == 5
assert store.value == 2
@reactive
def unary_op(value):
return value + 3
|
meerkat-main
|
tests/meerkat/interactive/api/routers/test_store.py
|
import numpy as np
import pytest
from fastapi.testclient import TestClient
import meerkat as mk
from meerkat.interactive.api.main import app
client = TestClient(app)
@pytest.fixture
def df_testbed():
df = mk.DataFrame(
{"a": np.arange(10), "b": np.arange(10, 20), "clip(a)": np.zeros((10, 4))}
)
df.set_primary_key("a")
return {"df": df}
def test_get_schema(df_testbed):
df: mk.DataFrame = df_testbed["df"]
response = client.post(
f"/df/{df.id}/schema/",
json={"columns": ["a", "b"]},
)
assert response.status_code == 200
assert response.json() == {
"id": df.id,
"columns": [
{
"name": "a",
"type": "PandasScalarColumn",
"cellComponent": "MeerkatNumber",
"cellProps": {
"dtype": "int",
"precision": 3,
"percentage": False,
"classes": "",
},
"cellDataProp": "data",
},
{
"name": "b",
"type": "PandasScalarColumn",
"cellComponent": "MeerkatNumber",
"cellProps": {
"dtype": "int",
"precision": 3,
"percentage": False,
"classes": "",
},
"cellDataProp": "data",
},
],
"nrows": 10,
"primaryKey": "a",
}
def test_rows(df_testbed):
df: mk.DataFrame = df_testbed["df"]
response = client.post(
f"/df/{df.id}/rows/",
json={"start": 3, "end": 7},
)
assert response.status_code == 200
response_json = response.json()
assert response_json["columnInfos"] == [
{
"name": "a",
"type": "PandasScalarColumn",
"cellComponent": "MeerkatNumber",
"cellProps": {
"dtype": "int",
"precision": 3,
"percentage": False,
"classes": "",
},
"cellDataProp": "data",
},
{
"name": "b",
"type": "PandasScalarColumn",
"cellComponent": "MeerkatNumber",
"cellProps": {
"dtype": "int",
"precision": 3,
"percentage": False,
"classes": "",
},
"cellDataProp": "data",
},
{
"name": "clip(a)",
"type": "NumPyTensorColumn",
"cellComponent": "MeerkatTensor",
"cellProps": {"dtype": "float64"},
"cellDataProp": "data",
},
]
assert response_json["rows"] == [
[3, 13, {"data": [0.0, 0.0, 0.0, 0.0], "shape": [4], "dtype": "float64"}],
[4, 14, {"data": [0.0, 0.0, 0.0, 0.0], "shape": [4], "dtype": "float64"}],
[5, 15, {"data": [0.0, 0.0, 0.0, 0.0], "shape": [4], "dtype": "float64"}],
[6, 16, {"data": [0.0, 0.0, 0.0, 0.0], "shape": [4], "dtype": "float64"}],
]
assert response_json["fullLength"] == 10
assert response_json["posidxs"] == [3, 4, 5, 6]
assert response_json["primaryKey"] == df.primary_key
@pytest.mark.skip
def test_sort(df_testbed):
df = df_testbed["df"]
df["c"] = np.random.rand(10)
response = client.post(f"/df/{df.id}/sort/", json={"by": "c"})
assert response.status_code == 200
assert response.json()["id"] != df.id
assert response.json()["columns"] == [
{
"name": "a",
"type": "NumpyArrayColumn",
"cell_component": "basic",
"cell_props": {"dtype": "int"},
},
{
"name": "b",
"type": "NumpyArrayColumn",
"cell_component": "basic",
"cell_props": {"dtype": "int"},
},
{
"name": "clip(a)",
"type": "NumpyArrayColumn",
"cell_component": "basic",
"cell_props": {"dtype": "str"},
},
{
"name": "c",
"type": "NumpyArrayColumn",
"cell_component": "basic",
"cell_props": {"dtype": "float"},
},
]
@pytest.mark.skip
@pytest.mark.parametrize("aggregation", ["mean"])
def test_aggregate_w_name(df_testbed, aggregation: str):
df = df_testbed["df"]
response = client.post(
f"/df/{df.id}/aggregate/",
json={"aggregation": aggregation},
)
assert response.status_code == 200
assert response.json() == {"a": 4.5, "b": 14.5, "clip(a)": 0.0}
@pytest.mark.skip
def test_aggregate_w_id_accepts_df(df_testbed):
df = df_testbed["df"]
from meerkat.interactive.gui import Aggregation
aggregation = lambda df: (df["a"] + df["b"]).mean() # noqa: E731
aggregation = Aggregation(aggregation)
response = client.post(
f"/df/{df.id}/aggregate/",
json={"aggregation_id": aggregation.id, "accepts_df": True},
)
assert response.status_code == 200, response.text
assert response.json() == {"df": np.mean(df["b"] + df["a"])}
@pytest.mark.skip
def test_aggregate_w_id_accepts_col(df_testbed):
df = df_testbed["df"]
from meerkat.interactive.gui import Aggregation
aggregation = lambda col: col.mean() # noqa: E731
aggregation = Aggregation(aggregation)
response = client.post(
f"/df/{df.id}/aggregate/",
json={
"aggregation_id": aggregation.id,
"columns": ["a"],
},
)
assert response.status_code == 200, response.text
assert response.json() == {"a": np.mean(df["a"])}
|
meerkat-main
|
tests/meerkat/interactive/api/routers/test_dataframe.py
|
import numpy as np
import pytest
import meerkat as mk
from meerkat.ops.sample import sample
@pytest.fixture
def simple_df():
return mk.DataFrame(
{
"tensor": mk.TorchTensorColumn([1, 2, 3, 4]),
"pandas": mk.ScalarColumn([8, 7, 9, 6]),
"numpy": mk.TorchTensorColumn([4, 6, 5, 7]),
}
)
@pytest.fixture
def simple_column():
return mk.TorchTensorColumn([4, 6, 5, 7])
def test_sample_df_w_n(simple_df):
out = simple_df.sample(
n=2,
random_state=42,
)
assert (out["tensor"] == mk.TorchTensorColumn([2, 4])).all()
assert (out["pandas"] == mk.ScalarColumn([7, 6])).all()
assert (out["numpy"] == mk.TorchTensorColumn([6, 7])).all()
def test_sample_df_w_frac(simple_df):
out = simple_df.sample(
frac=0.5,
random_state=42,
)
assert (out["tensor"] == mk.TorchTensorColumn([2, 4])).all()
assert (out["pandas"] == mk.ScalarColumn([7, 6])).all()
assert (out["numpy"] == mk.TorchTensorColumn([6, 7])).all()
def test_sample_df_w_weights(simple_df):
out = simple_df.sample(
n=2,
weights=np.array([0.5, 0.1, 0.2, 0.2]),
random_state=42,
)
assert (out["tensor"] == mk.TorchTensorColumn([1, 4])).all()
assert (out["pandas"] == mk.ScalarColumn([8, 6])).all()
assert (out["numpy"] == mk.TorchTensorColumn([4, 7])).all()
def test_sample_df_w_weights_as_str(simple_df):
out = simple_df.sample(
n=2,
weights="tensor",
random_state=42,
)
assert (out["tensor"] == mk.TorchTensorColumn([3, 4])).all()
assert (out["pandas"] == mk.ScalarColumn([9, 6])).all()
assert (out["numpy"] == mk.TorchTensorColumn([5, 7])).all()
def test_column(simple_column):
out = simple_column.sample(
n=2,
random_state=42,
)
assert (out == mk.TorchTensorColumn([6, 7])).all()
def test_column_w_weights_as_str(simple_column):
with pytest.raises(ValueError):
sample(
simple_column,
n=2,
weights="tensor",
random_state=42,
)
|
meerkat-main
|
tests/meerkat/ops/test_sample.py
|
import pytest
from ...utils import product_parametrize
from ..columns.abstract import AbstractColumnTestBed, column_parametrize
# from ..columns.scalar.test_arrow import ArrowScalarColumnTestBed
from ..columns.scalar.test_pandas import PandasScalarColumnTestBed
# from ..columns.tensor.test_numpy import NumPyTensorColumnTestBed
# from ..columns.tensor.test_torch import TorchTensorColumnTestBed
@pytest.fixture(
**column_parametrize(
[
# NumPyTensorColumnTestBed.get_params(
# config={"num_dims": [1], "dim_length": [1]}
# ),
PandasScalarColumnTestBed,
# TorchTensorColumnTestBed.get_params(
# config={"num_dims": [1], "dim_length": [1]}
# ),
# ArrowScalarColumnTestBed,
]
)
)
def column_testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@product_parametrize(params={"batched": [True, False]})
def test_filter(column_testbed: AbstractColumnTestBed, batched: bool):
"""multiple_dim=False."""
col = column_testbed.col
filter_spec = column_testbed.get_filter_spec(
batched=batched,
)
def func(x):
out = filter_spec["fn"](x)
return out
result = col.filter(
func,
batch_size=4,
is_batched_fn=batched,
)
assert result.is_equal(filter_spec["expected_result"])
|
meerkat-main
|
tests/meerkat/ops/test_filter.py
|
meerkat-main
|
tests/meerkat/ops/__init__.py
|
|
from typing import List, Union
import meerkat as mk
def make_test_df(
by: Union[str, List[str]],
ascending: Union[bool, List[bool]] = True,
):
"""Helper function, returns test df."""
df = mk.DataFrame(
{
"tensor": mk.TorchTensorColumn([3, 1, 2]),
"pandas": mk.ScalarColumn([9, 8, 7]),
"numpy": mk.TorchTensorColumn([5, 4, 6]),
}
)
test = df.sort(by=by, ascending=ascending)
return test
def make_tiebreaker_test_df(
by: Union[str, List[str]],
ascending: Union[bool, List[bool]] = True,
):
df = mk.DataFrame(
{
"tensor": mk.TorchTensorColumn([3, 2, 1]),
"pandas": mk.ScalarColumn([9, 7, 9]),
"numpy": mk.TorchTensorColumn([4, 4, 6]),
}
)
test = df.sort(by=by, ascending=ascending)
return test
# flake8: noqa
######## SINGLE COLUMN TESTS ########
def test_sort_by_ascending_tensor_column():
"""Testing all columns after sorting by an ascending tensor column."""
test = make_test_df(by=["tensor"])
assert (
(test["tensor"] == mk.TorchTensorColumn([1, 2, 3])).all()
and (test["pandas"] == mk.ScalarColumn([8, 7, 9])).all()
and (test["numpy"] == mk.TorchTensorColumn([4, 6, 5])).all()
)
def test_sort_by_ascending_pandas_on_pandas_column():
"""Testing all columns after sorting by an ascending pandas column."""
test = make_test_df(by=["pandas"])
assert (
(test["tensor"] == mk.TorchTensorColumn([2, 1, 3])).all()
and (test["pandas"] == mk.ScalarColumn([7, 8, 9])).all()
and (test["numpy"] == mk.TorchTensorColumn([6, 4, 5])).all()
)
def test_sort_single_numpy_column_ascending():
"""Testing all columns after sorting by an ascending numpy column."""
test = make_test_df(by=["numpy"])
assert (
(test["tensor"] == mk.TorchTensorColumn([1, 3, 2])).all()
and (test["pandas"] == mk.ScalarColumn([8, 9, 7])).all()
and (test["numpy"] == mk.TorchTensorColumn([4, 5, 6])).all()
)
# flake8: noqa
######## SINGLE COLUMN TESTS DESCENDING ########
def test_sort_single_tensor_column_descending():
"""Testing all columns after sorting by a descending tensor column."""
test = make_test_df(by=["tensor"], ascending=False)
assert (
(test["tensor"] == mk.TorchTensorColumn([3, 2, 1])).all()
and (test["pandas"] == mk.ScalarColumn([9, 7, 8])).all()
and (test["numpy"] == mk.TorchTensorColumn([5, 6, 4])).all()
)
def test_sort_single_pandas_column_descending():
"""Testing all columns after sorting by a descending pandas column."""
test = make_test_df(by=["pandas"], ascending=False)
assert (
(test["tensor"] == mk.TorchTensorColumn([3, 1, 2])).all()
and (test["pandas"] == mk.ScalarColumn([9, 8, 7])).all()
and (test["numpy"] == mk.TorchTensorColumn([5, 4, 6])).all()
)
def test_sort_single_numpy_column_descending():
"""Testing all columns after sorting by a descending numpy column."""
test = make_test_df(by=["numpy"], ascending=False)
assert (
(test["tensor"] == mk.TorchTensorColumn([2, 3, 1])).all()
and (test["pandas"] == mk.ScalarColumn([7, 9, 8])).all()
and (test["numpy"] == mk.TorchTensorColumn([6, 5, 4])).all()
)
######## MULTIPLE COLUMN TESTS ########
def test_sort_numpy_and_tensor_ascending():
"""# Testing all columns after sorting with multiple ascending columns
(numpy and tensor)"""
test = make_tiebreaker_test_df(by=["numpy", "tensor"], ascending=True)
assert (
(test["tensor"] == mk.TorchTensorColumn([2, 3, 1])).all()
and (test["pandas"] == mk.ScalarColumn([7, 9, 9])).all()
and (test["numpy"] == mk.TorchTensorColumn([4, 4, 6])).all()
)
def test_sort_numpy_and_pandas_ascending():
"""Testing all columns after sorting with multiple ascending columns (numpy
and tensor)"""
test = make_tiebreaker_test_df(by=["numpy", "pandas"], ascending=True)
assert (
(test["tensor"] == mk.TorchTensorColumn([2, 3, 1])).all()
and (test["pandas"] == mk.ScalarColumn([7, 9, 9])).all()
and (test["numpy"] == mk.TorchTensorColumn([4, 4, 6])).all()
)
def test_sort_numpy_and_pandas_ascending_variable():
"""Testing all columns after sorting with multiple ascending columns (numpy
and tensor)"""
test = make_tiebreaker_test_df(by=["numpy", "pandas"], ascending=[True, False])
assert (
(test["tensor"] == mk.TorchTensorColumn([3, 2, 1])).all()
and (test["pandas"] == mk.ScalarColumn([9, 7, 9])).all()
and (test["numpy"] == mk.TorchTensorColumn([4, 4, 6])).all()
)
def test_sort_numpy_and_pandas_and_tensor_ascending():
"""Testing all columns after sorting with multiple ascending columns (numpy
and pandas and tensor)"""
df = mk.DataFrame(
{
"tensor": mk.TorchTensorColumn([3, 2, 1]),
"pandas": mk.ScalarColumn([9, 7, 7]),
"numpy": mk.TorchTensorColumn([6, 4, 4]),
}
)
test = df.sort(by=["numpy", "pandas", "tensor"], ascending=True)
assert (
(test["tensor"] == mk.TorchTensorColumn([1, 2, 3])).all()
and (test["pandas"] == mk.ScalarColumn([7, 7, 9])).all()
and (test["numpy"] == mk.TorchTensorColumn([4, 4, 6])).all()
)
def test_sort_tensor_and_pandas_descending():
"""Testing all columns after sorting with multiple ascending columns
(tensor and pandas)."""
df = mk.DataFrame(
{
"tensor": mk.TorchTensorColumn([3, 2, 2]),
"pandas": mk.ScalarColumn([9, 8, 7]),
"numpy": mk.TorchTensorColumn([6, 4, 4]),
}
)
test = df.sort(by=["tensor", "pandas"], ascending=False)
assert (
(test["tensor"] == mk.TorchTensorColumn([3, 2, 2])).all()
and (test["pandas"] == mk.ScalarColumn([9, 8, 7])).all()
and (test["numpy"] == mk.TorchTensorColumn([6, 4, 4])).all()
)
def test_sort_with_store():
df = mk.DataFrame({"tensor": mk.TorchTensorColumn([3, 2, 4])})
test = df.sort(by=mk.Store("tensor"), ascending=True)
assert (test["tensor"] == mk.TorchTensorColumn([2, 3, 4])).all()
|
meerkat-main
|
tests/meerkat/ops/test_sort.py
|
"""Unittests for Datasets."""
import os
from typing import Dict
import numpy as np
import pytest
import torch
from meerkat.columns.abstract import Column
from meerkat.columns.deferred.file import FileColumn
from meerkat.columns.deferred.image import ImageColumn
from meerkat.columns.object.base import ObjectColumn
from meerkat.columns.tensor.torch import TorchTensorColumn
from meerkat.dataframe import DataFrame
from meerkat.errors import MergeError
from ...testbeds import MockImageColumn
from ..test_dataframe import DataFrameTestBed
class MergeTestBed(DataFrameTestBed):
DEFAULT_CONFIG = {
"lengths": [
{"left": 12, "right": 16},
{"left": 16, "right": 16},
{"left": 16, "right": 12},
],
"consolidated": [True, False],
}
def __init__(
self,
column_configs: Dict[str, Column],
simple: bool = False,
lengths: int = 16,
consolidated: int = 16,
tmpdir: str = None,
):
self.side_to_df = {}
if simple:
# TODO (Sabri): do away with the simple testbed, and replace with the full
# one after updating support for missing values
# https://github.com/robustness-gym/meerkat/issues/123
np.random.seed(1)
self.side_to_df["left"] = DataFrame.from_batch(
{
"key": np.arange(lengths["left"]),
"b": list(np.arange(lengths["left"])),
"c": [[i] for i in np.arange(lengths["left"])],
"d": (torch.arange(lengths["left"]) % 3),
"e": [f"1_{i}" for i in np.arange(lengths["left"])],
}
)[np.random.permutation(np.arange(lengths["left"]))]
self.side_to_df["right"] = DataFrame.from_batch(
{
"key": np.arange(lengths["right"]),
"b": list(np.arange(lengths["right"])),
"e": [f"1_{i}" for i in np.arange(lengths["right"])],
"f": (np.arange(lengths["right"]) % 2),
}
)
else:
for side in ["left", "right"]:
side_tmpdir = os.path.join(tmpdir, side)
os.makedirs(side_tmpdir)
column_testbeds = self._build_column_testbeds(
column_configs, length=lengths[side], tmpdir=side_tmpdir
)
columns = {
name: testbed.col for name, testbed in column_testbeds.items()
}
df = DataFrame.from_batch(columns)
df["key"] = np.arange(len(df))
if consolidated:
df.consolidate()
if side == "left":
np.random.seed(1)
df = df[np.random.permutation(np.arange(len(df)))]
self.side_to_df[side] = df
@pytest.fixture
def testbed(request, tmpdir):
config = request.param
return MergeTestBed(**config, tmpdir=tmpdir)
class TestMerge:
@MergeTestBed.parametrize(params={"sort": [True, False]})
def test_merge_inner(self, testbed: MergeTestBed, sort):
df1, df2 = (
testbed.side_to_df["left"],
testbed.side_to_df["right"],
)
out = df1.merge(
df2,
on="key",
how="inner",
suffixes=("_1", "_2"),
sort=sort,
)
assert isinstance(out, DataFrame)
assert len(out) == min(len(df1), len(df2))
# # check sorted
if sort:
assert np.all(np.diff(out["key"]) >= 0)
# assert set(out.columns) == set(expected_columns)
for name in df1.columns:
if name in ["key"]:
continue
if isinstance(out[f"{name}_1"], ImageColumn):
assert out[f"{name}_1"].__class__ == out[f"{name}_2"].__class__
assert (
out[f"{name}_1"]
.data.args[0]
.is_equal(
out[f"{name}_2"].data.args[0].str.replace("right", "left")
)
)
else:
assert out[f"{name}_1"].is_equal(out[f"{name}_2"])
@pytest.mark.skip
@MergeTestBed.parametrize(config={"simple": [True]}, params={"sort": [True, False]})
def test_merge_outer(self, testbed, sort):
df1, df2 = (
testbed.side_to_df["left"],
testbed.side_to_df["right"],
)
out = df1.merge(
df2,
on="key",
how="outer",
suffixes=("_1", "_2"),
sort=sort,
)
a1 = set(df1["key"])
a2 = set(df2["key"])
assert isinstance(out, DataFrame)
assert len(out) == len(a1 | a2)
# check columns
expected_columns = ["key", "b_1", "b_2", "c", "d", "e_1", "e_2", "f"]
assert set(out.columns) == set(expected_columns)
# check sorted
if sort:
assert np.all(np.diff(out["key"]) >= 0)
# check for `None` at unmatched rows
mask_both = np.where([val in (a1 & a2) for val in out["key"]])[0]
mask_1 = np.where([val in (a1 - a2) for val in out["key"]])[0]
mask_2 = np.where([val in (a2 - a1) for val in out["key"]])[0]
# check for equality at matched rows
assert list(out[mask_both]["b_1"]) == list(out[mask_both]["b_2"])
# check for `values` at unmatched rows
assert set(out[mask_1]["b_1"]) == a1 - a2
assert set(out[mask_2]["b_2"]) == a2 - a1
# check for `None` at unmatched rows
assert np.isnan(out[mask_1]["b_2"]).all()
assert np.isnan(out[mask_2]["b_1"]).all()
# check for `values` at unmatched rows
assert set(out[mask_1]["e_1"]) == set([f"1_{i}" for i in a1 - a2])
assert set(out[mask_2]["e_2"]) == set([f"1_{i}" for i in a2 - a1])
# check for equality at matched rows
assert out[mask_1]["e_2"].isna().all()
assert out[mask_2]["e_1"].isna().all()
@pytest.mark.skip
@MergeTestBed.parametrize(config={"simple": [True]}, params={"sort": [True, False]})
def test_merge_left(self, testbed, sort):
df1, df2 = (
testbed.side_to_df["left"],
testbed.side_to_df["right"],
)
out = df1.merge(
df2,
on="key",
how="left",
suffixes=("_1", "_2"),
sort=sort,
)
a1 = set(df1["key"])
a2 = set(df2["key"])
assert isinstance(out, DataFrame)
assert len(out) == len(a1)
# check columns
expected_columns = ["key", "b_1", "b_2", "c", "d", "e_1", "e_2", "f"]
assert set(out.columns) == set(expected_columns)
# check sorted
if sort:
assert np.all(np.diff(out["key"]) >= 0)
# check for `None` at unmatched rows
mask_both = np.where([val in (a1 & a2) for val in out["key"]])[0]
mask_1 = np.where([val in (a1 - a2) for val in out["key"]])[0]
# check for equality at matched rows
assert list(out[mask_both]["b_1"]) == list(out[mask_both]["b_2"])
# check for `values` at unmatched rows
assert set(out[mask_1]["b_1"]) == a1 - a2
# check for `None` at unmatched rows
assert out[mask_1]["b_2"].isna().all()
# check for `values` at unmatched rows
assert set(out[mask_1]["e_1"]) == set([f"1_{i}" for i in a1 - a2])
# check for equality at matched rows
assert out[mask_1]["e_2"].isna().all()
@pytest.mark.skip
@MergeTestBed.parametrize(config={"simple": [True]}, params={"sort": [True, False]})
def test_merge_right(self, testbed, sort):
df1, df2 = (
testbed.side_to_df["left"],
testbed.side_to_df["right"],
)
out = df1.merge(
df2,
on="key",
how="right",
suffixes=("_1", "_2"),
sort=sort,
)
a1 = set(df1["key"])
a2 = set(df2["key"])
assert isinstance(out, DataFrame)
assert len(out) == len(a2)
# check columns
expected_columns = ["key", "b_1", "b_2", "c", "d", "e_1", "e_2", "f"]
assert set(out.columns) == set(expected_columns)
# check sorted
if sort:
assert np.all(np.diff(out["key"]) >= 0)
# check for `None` at unmatched rows
mask_both = np.where([val in (a1 & a2) for val in out["key"]])[0]
mask_2 = np.where([val in (a2 - a1) for val in out["key"]])[0]
# check for equality at matched rows
assert list(out[mask_both]["b_1"]) == list(out[mask_both]["b_2"])
# check for `values` at unmatched rows
assert set(out[mask_2]["b_2"]) == a2 - a1
# check for `None` at unmatched rows
assert (out[mask_2]["b_1"]).isna().all()
# check for `values` at unmatched rows
assert set(out[mask_2]["e_2"]) == set([f"1_{i}" for i in a2 - a1])
# check for equality at matched rows
assert (out[mask_2]["e_1"]).isna().all()
def test_merge_output_column_types(self):
df1 = DataFrame.from_batch(
{"a": np.arange(3), "b": ObjectColumn(["1", "2", "3"])}
)
df2 = df1.copy()
out = df1.merge(df2, on="b", how="inner")
assert isinstance(out["b"], ObjectColumn)
def test_image_merge(self, tmpdir):
length = 16
img_col_test_bed = MockImageColumn(length=length, tmpdir=tmpdir)
df1 = DataFrame.from_batch(
{
"a": np.arange(length),
"img": img_col_test_bed.col,
}
)
rows = np.arange(4, 8)
df2 = DataFrame.from_batch(
{
"a": rows,
}
)
out = df1.merge(df2, on="a", how="inner")
assert isinstance(out["img"], FileColumn)
assert [str(fp) for fp in out["img"].data.args[0]] == [
os.path.basename(img_col_test_bed.image_paths[row]) for row in rows
]
def test_no_columns(tmpdir):
length = 16
df1 = DataFrame.from_batch(
{
"a": np.arange(length),
}
)
rows = np.arange(4, 8)
df2 = DataFrame.from_batch(
{
"a": rows,
}
)
out = df1.merge(df2, on="a", how="inner")
assert "a" in out.columns
def test_no_columns_in_left(tmpdir):
length = 16
df1 = DataFrame.from_batch(
{
"a": np.arange(length),
}
)
rows = np.arange(4, 8)
df2 = DataFrame.from_batch({"a": rows, "b": rows})
out = df1.merge(df2, on="a", how="inner")
assert "a" in out.columns
assert "b" in out.columns
def test_no_columns_in_right(tmpdir):
length = 16
df1 = DataFrame.from_batch(
{
"a": np.arange(length),
"b": np.arange(length),
}
)
rows = np.arange(4, 8)
df2 = DataFrame.from_batch(
{
"a": rows,
}
)
out = df1.merge(df2, on="a", how="inner")
assert "a" in out.columns
assert "b" in out.columns
def test_no_on(self):
length = 16
# check dictionary not hashable
df1 = DataFrame.from_batch(
{
"a": ObjectColumn([{"a": 1}] * length),
"b": list(np.arange(length)),
}
)
df2 = df1.copy()
with pytest.raises(MergeError):
df1.merge(df2)
def test_check_merge_columns(self):
import meerkat as mk
length = 16
# check dictionary not hashable
df1 = DataFrame.from_batch(
{
"a": ObjectColumn([{"a": 1}] * length),
"b": list(np.arange(length)),
}
)
df2 = df1.copy()
with pytest.raises(MergeError):
df1.merge(df2, on=["a"])
# check multi-on
with pytest.raises(MergeError):
df1.merge(df2, on=["a", "b"])
# check multi-dimensional numpy array
df1 = DataFrame.from_batch(
{
"a": TorchTensorColumn(np.stack([np.arange(5)] * length)),
"b": list(np.arange(length)),
}
)
df2 = df1.copy()
with pytest.raises(MergeError):
df1.merge(df2, on="a")
# check multi-dimensional numpy array
df1 = DataFrame.from_batch(
{
"a": TorchTensorColumn(torch.stack([torch.arange(5)] * length)),
"b": list(np.arange(length)),
}
)
df2 = df1.copy()
with pytest.raises(MergeError):
df1.merge(df2, on="a")
# checks that **all** cells are hashable (not just the first)
df1 = DataFrame.from_batch(
{
"a": ObjectColumn(["hello"] + [{"a": 1}] * (length - 1)),
"b": list(np.arange(length)),
}
)
df2 = df1.copy()
with pytest.raises(MergeError):
df1.merge(df2, on="a")
# checks if Cells in cell columns are NOT hashable
df1 = DataFrame.from_batch(
{
"a": mk.column(["a"] * length).defer(lambda x: x + "b"),
"b": list(np.arange(length)),
}
)
df2 = df1.copy()
with pytest.raises(MergeError):
df1.merge(df2, on="a")
# checks that having a column called __right_indices__ raises a merge error
df1 = DataFrame.from_batch(
{
"a": ObjectColumn(["hello"] + [{"a": 1}] * (length - 1)),
"b": list(np.arange(length)),
"__right_indices__": list(np.arange(length)),
}
)
df2 = df1.copy()
with pytest.raises(MergeError):
df1.merge(df2, on="__right_indices__")
|
meerkat-main
|
tests/meerkat/ops/test_merge.py
|
import pytest
import meerkat as mk
from meerkat.columns.abstract import Column
from meerkat.dataframe import DataFrame
from meerkat.interactive.node import NodeMixin
@pytest.mark.parametrize("x", [False, True, -1, 0, 1, 2, 4.3])
@pytest.mark.parametrize("y", [False, True, -1, 0, 1, 2, 4.3])
@pytest.mark.parametrize("react", [False, True])
@pytest.mark.parametrize("comp", [mk.cand, mk.cor])
def test_boolean_operators_multiple_arguments(x, y, react, comp):
x_store = mk.Store(x)
y_store = mk.Store(y)
if comp == mk.cand:
expected = x and y
elif comp == mk.cor:
expected = x or y
out = comp(x_store, y_store)
assert out == expected
if react:
assert isinstance(out, mk.Store)
assert isinstance(out, type(expected))
@pytest.mark.parametrize("x", [False, True, -1, 0, 1, 2, 4.3])
@pytest.mark.parametrize("react", [False, True])
@pytest.mark.parametrize("comp", [mk.bool, mk.cnot])
def test_boolean_operators_single_operator(x, react, comp):
x_store = mk.Store(x)
if comp == mk.bool:
expected = bool(x)
elif comp == mk.cnot:
expected = not x
out = comp(x_store)
assert out == expected
if react:
assert isinstance(out, mk.Store)
assert isinstance(out, type(expected))
def _invoker_helper(x, *, mk_func, base_func, should_warn: bool = True):
if isinstance(x, NodeMixin):
x = mk.mark(x)
# All custom classes that support __len__ should raise a warning
# when invoked with `len(obj)`. Because NodeMixin classes are
# custom classes in Meerkat, this is a check that we enforce.
if should_warn:
with pytest.warns(UserWarning):
expected = base_func(x)
else:
expected = base_func(x)
else:
expected = base_func(x)
x = mk.Store(x)
out = mk_func(x)
assert out == expected
assert isinstance(out, mk.Store)
assert isinstance(out, type(expected))
# Check the graph is created.
assert x.inode is not None
assert len(x.inode.trigger_children) == 1
op_node = x.inode.trigger_children[0]
assert op_node.obj.fn.__name__ == mk_func.__name__
assert len(op_node.trigger_children) == 1
assert op_node.trigger_children[0] == out.inode
@pytest.mark.parametrize("x", [(), (1,), (1, 2), (0, 1, 2)])
def test_all(x):
"""Test mk.all works identically to all."""
_invoker_helper(x, mk_func=mk.all, base_func=all)
@pytest.mark.parametrize("x", [(), (1,), (1, 2), (0, 1, 2)])
def test_any(x):
"""Test mk.any works identically to any."""
_invoker_helper(x, mk_func=mk.any, base_func=any)
@pytest.mark.parametrize("x", [False, True, -1, 0, 1.0, 1.0 + 1j, "1", "1+1j"])
def test_bool(x):
"""Test mk.bool works identically to bool."""
_invoker_helper(x, mk_func=mk.bool, base_func=bool)
@pytest.mark.parametrize("x", [False, True, -1, 0, 1.0, 1.0 + 1j, "1", "1+1j"])
def test_complex(x):
"""Test mk.complex works identically to complex."""
_invoker_helper(x, mk_func=mk.complex, base_func=complex)
@pytest.mark.parametrize("x", [False, True, -1, 0, 1.0, "10"])
def test_int(x):
"""Test mk.int works identically to int."""
_invoker_helper(x, mk_func=mk.int, base_func=int)
@pytest.mark.parametrize("x", [False, True, -1, 0, 1.0, "1.0"])
def test_float(x):
"""Test mk.float works identically to float."""
_invoker_helper(x, mk_func=mk.float, base_func=float)
@pytest.mark.parametrize(
"x",
[
[1, 2, 3],
"hello world",
(1, 2, 3),
mk.DataFrame({"a": [1, 2, 3]}),
mk.TensorColumn([1, 2, 3]),
],
)
def test_len(x):
"""Test mk.len works identically to len."""
should_warn = not isinstance(x, Column)
_invoker_helper(x, mk_func=mk.len, base_func=len, should_warn=should_warn)
@pytest.mark.parametrize("x", [False, True, -1, 0])
def test_hex(x):
"""Test mk.complex works identically to complex."""
_invoker_helper(x, mk_func=mk.hex, base_func=hex)
@pytest.mark.parametrize("x", [False, True, -1, 0])
def test_oct(x):
"""Test mk.oct works identically to oct."""
_invoker_helper(x, mk_func=mk.oct, base_func=oct)
@pytest.mark.parametrize(
"x",
[
False,
True,
0,
1,
1.0,
mk.DataFrame({"a": [1, 2, 3]}),
mk.TensorColumn([1, 2, 3]),
],
)
def test_str(x):
"""Test mk.str works identically to str."""
should_warn = not isinstance(x, (DataFrame, Column))
_invoker_helper(x, mk_func=mk.str, base_func=str, should_warn=should_warn)
@pytest.mark.parametrize("x", [[3, 2, 1], ("hello", "world")])
def test_min_max(x):
"""Test mk.min and mk.max work identically to min and max."""
_invoker_helper(x, mk_func=mk.min, base_func=min)
_invoker_helper(x, mk_func=mk.max, base_func=max)
|
meerkat-main
|
tests/meerkat/ops/test_cond.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.