instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Create docstrings for reusable components | from collections.abc import Callable
from typing import Any, Optional
from embedchain.config.embedder.base import BaseEmbedderConfig
try:
from chromadb.api.types import Embeddable, EmbeddingFunction, Embeddings
except RuntimeError:
from embedchain.utils.misc import use_pysqlite3
use_pysqlite3()
from chromadb.api.types import Embeddable, EmbeddingFunction, Embeddings
class EmbeddingFunc(EmbeddingFunction):
def __init__(self, embedding_fn: Callable[[list[str]], list[str]]):
self.embedding_fn = embedding_fn
def __call__(self, input: Embeddable) -> Embeddings:
return self.embedding_fn(input)
class BaseEmbedder:
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
if config is None:
self.config = BaseEmbedderConfig()
else:
self.config = config
self.vector_dimension: int
def set_embedding_fn(self, embedding_fn: Callable[[list[str]], list[str]]):
if not hasattr(embedding_fn, "__call__"):
raise ValueError("Embedding function is not a function")
self.embedding_fn = embedding_fn
def set_vector_dimension(self, vector_dimension: int):
if not isinstance(vector_dimension, int):
raise TypeError("vector dimension must be int")
self.vector_dimension = vector_dimension
@staticmethod
def _langchain_default_concept(embeddings: Any):
return EmbeddingFunc(embeddings.embed_documents)
def to_embeddings(self, data: str, **_):
embeddings = self.embedding_fn([data])
return embeddings[0] | --- +++ @@ -21,8 +21,20 @@
class BaseEmbedder:
+ """
+ Class that manages everything regarding embeddings. Including embedding function, loaders and chunkers.
+
+ Embedding functions and vector dimensions are set based on the child class you choose.
+ To manually overwrite you can use this classes `set_...` methods.
+ """
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
+ """
+ Initialize the embedder class.
+
+ :param config: embedder configuration option class, defaults to None
+ :type config: Optional[BaseEmbedderConfig], optional
+ """
if config is None:
self.config = BaseEmbedderConfig()
else:
@@ -30,20 +42,49 @@ self.vector_dimension: int
def set_embedding_fn(self, embedding_fn: Callable[[list[str]], list[str]]):
+ """
+ Set or overwrite the embedding function to be used by the database to store and retrieve documents.
+
+ :param embedding_fn: Function to be used to generate embeddings.
+ :type embedding_fn: Callable[[list[str]], list[str]]
+ :raises ValueError: Embedding function is not callable.
+ """
if not hasattr(embedding_fn, "__call__"):
raise ValueError("Embedding function is not a function")
self.embedding_fn = embedding_fn
def set_vector_dimension(self, vector_dimension: int):
+ """
+ Set or overwrite the vector dimension size
+
+ :param vector_dimension: vector dimension size
+ :type vector_dimension: int
+ """
if not isinstance(vector_dimension, int):
raise TypeError("vector dimension must be int")
self.vector_dimension = vector_dimension
@staticmethod
def _langchain_default_concept(embeddings: Any):
+ """
+ Langchains default function layout for embeddings.
+
+ :param embeddings: Langchain embeddings
+ :type embeddings: Any
+ :return: embedding function
+ :rtype: Callable
+ """
return EmbeddingFunc(embeddings.embed_documents)
def to_embeddings(self, data: str, **_):
+ """
+ Convert data to embeddings
+
+ :param data: data to convert to embeddings
+ :type data: str
+ :return: embeddings
+ :rtype: list[float]
+ """
embeddings = self.embedding_fn([data])
- return embeddings[0]+ return embeddings[0]
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/embedder/base.py |
Generate documentation strings for clarity | from typing import Any, Optional
from embedchain.config.base_config import BaseConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class CacheSimilarityEvalConfig(BaseConfig):
def __init__(
self,
strategy: Optional[str] = "distance",
max_distance: Optional[float] = 1.0,
positive: Optional[bool] = False,
):
self.strategy = strategy
self.max_distance = max_distance
self.positive = positive
@staticmethod
def from_config(config: Optional[dict[str, Any]]):
if config is None:
return CacheSimilarityEvalConfig()
else:
return CacheSimilarityEvalConfig(
strategy=config.get("strategy", "distance"),
max_distance=config.get("max_distance", 1.0),
positive=config.get("positive", False),
)
@register_deserializable
class CacheInitConfig(BaseConfig):
def __init__(
self,
similarity_threshold: Optional[float] = 0.8,
auto_flush: Optional[int] = 20,
):
if similarity_threshold < 0 or similarity_threshold > 1:
raise ValueError(f"similarity_threshold {similarity_threshold} should be between 0 and 1")
self.similarity_threshold = similarity_threshold
self.auto_flush = auto_flush
@staticmethod
def from_config(config: Optional[dict[str, Any]]):
if config is None:
return CacheInitConfig()
else:
return CacheInitConfig(
similarity_threshold=config.get("similarity_threshold", 0.8),
auto_flush=config.get("auto_flush", 20),
)
@register_deserializable
class CacheConfig(BaseConfig):
def __init__(
self,
similarity_eval_config: Optional[CacheSimilarityEvalConfig] = CacheSimilarityEvalConfig(),
init_config: Optional[CacheInitConfig] = CacheInitConfig(),
):
self.similarity_eval_config = similarity_eval_config
self.init_config = init_config
@staticmethod
def from_config(config: Optional[dict[str, Any]]):
if config is None:
return CacheConfig()
else:
return CacheConfig(
similarity_eval_config=CacheSimilarityEvalConfig.from_config(config.get("similarity_evaluation", {})),
init_config=CacheInitConfig.from_config(config.get("init_config", {})),
) | --- +++ @@ -6,6 +6,18 @@
@register_deserializable
class CacheSimilarityEvalConfig(BaseConfig):
+ """
+ This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
+ In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
+ put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`].
+ `positive` is used to indicate this distance is directly proportional to the similarity of two entities.
+ If `positive` is set `False`, `max_distance` will be used to subtract this distance to get the final score.
+
+ :param max_distance: the bound of maximum distance.
+ :type max_distance: float
+ :param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise, it is False.
+ :type positive: bool
+ """
def __init__(
self,
@@ -31,6 +43,15 @@
@register_deserializable
class CacheInitConfig(BaseConfig):
+ """
+ This is a cache init config. Used to initialize a cache.
+
+ :param similarity_threshold: a threshold ranged from 0 to 1 to filter search results with similarity score higher \
+ than the threshold. When it is 0, there is no hits. When it is 1, all search results will be returned as hits.
+ :type similarity_threshold: float
+ :param auto_flush: it will be automatically flushed every time xx pieces of data are added, default to 20
+ :type auto_flush: int
+ """
def __init__(
self,
@@ -72,4 +93,4 @@ return CacheConfig(
similarity_eval_config=CacheSimilarityEvalConfig.from_config(config.get("similarity_evaluation", {})),
init_config=CacheInitConfig.from_config(config.get("init_config", {})),
- )+ )
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/config/cache_config.py |
Annotate my code with docstrings | from dotenv import load_dotenv
from fastapi import Body, FastAPI, responses
from modal import Image, Secret, Stub, asgi_app
from embedchain import App
load_dotenv(".env")
image = Image.debian_slim().pip_install(
"embedchain",
"lanchain_community==0.2.6",
"youtube-transcript-api==0.6.1",
"pytube==15.0.0",
"beautifulsoup4==4.12.3",
"slack-sdk==3.21.3",
"huggingface_hub==0.23.0",
"gitpython==3.1.38",
"yt_dlp==2023.11.14",
"PyGithub==1.59.1",
"feedparser==6.0.10",
"newspaper3k==0.2.8",
"listparser==0.19",
)
stub = Stub(
name="embedchain-app",
image=image,
secrets=[Secret.from_dotenv(".env")],
)
web_app = FastAPI()
embedchain_app = App(name="embedchain-modal-app")
@web_app.post("/add")
async def add(
source: str = Body(..., description="Source to be added"),
data_type: str | None = Body(None, description="Type of the data source"),
):
if source and data_type:
embedchain_app.add(source, data_type)
elif source:
embedchain_app.add(source)
else:
return {"message": "No source provided."}
return {"message": f"Source '{source}' added successfully."}
@web_app.post("/query")
async def query(question: str = Body(..., description="Question to be answered")):
if not question:
return {"message": "No question provided."}
answer = embedchain_app.query(question)
return {"answer": answer}
@web_app.get("/chat")
async def chat(question: str = Body(..., description="Question to be answered")):
if not question:
return {"message": "No question provided."}
response = embedchain_app.chat(question)
return {"response": response}
@web_app.get("/")
async def root():
return responses.RedirectResponse(url="/docs")
@stub.function(image=image)
@asgi_app()
def fastapi_app():
return web_app | --- +++ @@ -37,6 +37,11 @@ source: str = Body(..., description="Source to be added"),
data_type: str | None = Body(None, description="Type of the data source"),
):
+ """
+ Adds a new source to the EmbedChain app.
+ Expects a JSON with a "source" and "data_type" key.
+ "data_type" is optional.
+ """
if source and data_type:
embedchain_app.add(source, data_type)
elif source:
@@ -48,6 +53,10 @@
@web_app.post("/query")
async def query(question: str = Body(..., description="Question to be answered")):
+ """
+ Handles a query to the EmbedChain app.
+ Expects a JSON with a "question" key.
+ """
if not question:
return {"message": "No question provided."}
answer = embedchain_app.query(question)
@@ -56,6 +65,10 @@
@web_app.get("/chat")
async def chat(question: str = Body(..., description="Question to be answered")):
+ """
+ Handles a chat request to the EmbedChain app.
+ Expects a JSON with a "question" key.
+ """
if not question:
return {"message": "No question provided."}
response = embedchain_app.chat(question)
@@ -70,4 +83,4 @@ @stub.function(image=image)
@asgi_app()
def fastapi_app():
- return web_app+ return web_app
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/deployment/modal.com/app.py |
Add standardized docstrings across the file | from typing import Any
from embedchain import App
from embedchain.config import AddConfig, AppConfig, BaseLlmConfig
from embedchain.embedder.openai import OpenAIEmbedder
from embedchain.helpers.json_serializable import (
JSONSerializable,
register_deserializable,
)
from embedchain.llm.openai import OpenAILlm
from embedchain.vectordb.chroma import ChromaDB
@register_deserializable
class BaseBot(JSONSerializable):
def __init__(self):
self.app = App(config=AppConfig(), llm=OpenAILlm(), db=ChromaDB(), embedding_model=OpenAIEmbedder())
def add(self, data: Any, config: AddConfig = None):
config = config if config else AddConfig()
self.app.add(data, config=config)
def query(self, query: str, config: BaseLlmConfig = None) -> str:
config = config
return self.app.query(query, config=config)
def start(self):
raise NotImplementedError("Subclasses must implement the start method.") | --- +++ @@ -17,12 +17,32 @@ self.app = App(config=AppConfig(), llm=OpenAILlm(), db=ChromaDB(), embedding_model=OpenAIEmbedder())
def add(self, data: Any, config: AddConfig = None):
+ """
+ Add data to the bot (to the vector database).
+ Auto-dectects type only, so some data types might not be usable.
+
+ :param data: data to embed
+ :type data: Any
+ :param config: configuration class instance, defaults to None
+ :type config: AddConfig, optional
+ """
config = config if config else AddConfig()
self.app.add(data, config=config)
def query(self, query: str, config: BaseLlmConfig = None) -> str:
+ """
+ Query the bot
+
+ :param query: the user query
+ :type query: str
+ :param config: configuration class instance, defaults to None
+ :type config: BaseLlmConfig, optional
+ :return: Answer
+ :rtype: str
+ """
config = config
return self.app.query(query, config=config)
def start(self):
- raise NotImplementedError("Subclasses must implement the start method.")+ """Start the bot's functionality."""
+ raise NotImplementedError("Subclasses must implement the start method.")
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/bots/base.py |
Add docstrings to clarify complex logic | from typing import Optional
from embedchain.helpers.json_serializable import register_deserializable
from .base_app_config import BaseAppConfig
@register_deserializable
class AppConfig(BaseAppConfig):
def __init__(
self,
log_level: str = "WARNING",
id: Optional[str] = None,
name: Optional[str] = None,
collect_metrics: Optional[bool] = True,
**kwargs,
):
self.name = name
super().__init__(log_level=log_level, id=id, collect_metrics=collect_metrics, **kwargs) | --- +++ @@ -7,6 +7,9 @@
@register_deserializable
class AppConfig(BaseAppConfig):
+ """
+ Config to initialize an embedchain custom `App` instance, with extra config options.
+ """
def __init__(
self,
@@ -16,5 +19,16 @@ collect_metrics: Optional[bool] = True,
**kwargs,
):
+ """
+ Initializes a configuration class instance for an App. This is the simplest form of an embedchain app.
+ Most of the configuration is done in the `App` class itself.
+
+ :param log_level: Debug level ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], defaults to "WARNING"
+ :type log_level: str, optional
+ :param id: ID of the app. Document metadata will have this id., defaults to None
+ :type id: Optional[str], optional
+ :param collect_metrics: Send anonymous telemetry to improve embedchain, defaults to True
+ :type collect_metrics: Optional[bool], optional
+ """
self.name = name
- super().__init__(log_level=log_level, id=id, collect_metrics=collect_metrics, **kwargs)+ super().__init__(log_level=log_level, id=id, collect_metrics=collect_metrics, **kwargs)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/config/app_config.py |
Add docstrings to improve readability | import logging
from typing import Optional
from embedchain.config.base_config import BaseConfig
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.vectordb.base import BaseVectorDB
logger = logging.getLogger(__name__)
class BaseAppConfig(BaseConfig, JSONSerializable):
def __init__(
self,
log_level: str = "WARNING",
db: Optional[BaseVectorDB] = None,
id: Optional[str] = None,
collect_metrics: bool = True,
collection_name: Optional[str] = None,
):
self.id = id
self.collect_metrics = True if (collect_metrics is True or collect_metrics is None) else False
self.collection_name = collection_name
if db:
self._db = db
logger.warning(
"DEPRECATION WARNING: Please supply the database as the second parameter during app init. "
"Such as `app(config=config, db=db)`."
)
if collection_name:
logger.warning("DEPRECATION WARNING: Please supply the collection name to the database config.")
return
def _setup_logging(self, log_level):
logger.basicConfig(format="%(asctime)s [%(name)s] [%(levelname)s] %(message)s", level=log_level)
self.logger = logger.getLogger(__name__) | --- +++ @@ -9,6 +9,9 @@
class BaseAppConfig(BaseConfig, JSONSerializable):
+ """
+ Parent config to initialize an instance of `App`.
+ """
def __init__(
self,
@@ -18,6 +21,23 @@ collect_metrics: bool = True,
collection_name: Optional[str] = None,
):
+ """
+ Initializes a configuration class instance for an App.
+ Most of the configuration is done in the `App` class itself.
+
+ :param log_level: Debug level ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], defaults to "WARNING"
+ :type log_level: str, optional
+ :param db: A database class. It is recommended to set this directly in the `App` class, not this config,
+ defaults to None
+ :type db: Optional[BaseVectorDB], optional
+ :param id: ID of the app. Document metadata will have this id., defaults to None
+ :type id: Optional[str], optional
+ :param collect_metrics: Send anonymous telemetry to improve embedchain, defaults to True
+ :type collect_metrics: Optional[bool], optional
+ :param collection_name: Default collection name. It's recommended to use app.db.set_collection_name() instead,
+ defaults to None
+ :type collection_name: Optional[str], optional
+ """
self.id = id
self.collect_metrics = True if (collect_metrics is True or collect_metrics is None) else False
self.collection_name = collection_name
@@ -35,4 +55,4 @@
def _setup_logging(self, log_level):
logger.basicConfig(format="%(asctime)s [%(name)s] [%(levelname)s] %(message)s", level=log_level)
- self.logger = logger.getLogger(__name__)+ self.logger = logger.getLogger(__name__)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/config/base_app_config.py |
Write docstrings that follow conventions | from importlib import import_module
from typing import Any, Optional
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config import AddConfig
from embedchain.config.add_config import ChunkerConfig, LoaderConfig
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.models.data_type import DataType
class DataFormatter(JSONSerializable):
def __init__(
self,
data_type: DataType,
config: AddConfig,
loader: Optional[BaseLoader] = None,
chunker: Optional[BaseChunker] = None,
):
self.loader = self._get_loader(data_type=data_type, config=config.loader, loader=loader)
self.chunker = self._get_chunker(data_type=data_type, config=config.chunker, chunker=chunker)
@staticmethod
def _lazy_load(module_path: str):
module_path, class_name = module_path.rsplit(".", 1)
module = import_module(module_path)
return getattr(module, class_name)
def _get_loader(
self,
data_type: DataType,
config: LoaderConfig,
loader: Optional[BaseLoader],
**kwargs: Optional[dict[str, Any]],
) -> BaseLoader:
loaders = {
DataType.YOUTUBE_VIDEO: "embedchain.loaders.youtube_video.YoutubeVideoLoader",
DataType.PDF_FILE: "embedchain.loaders.pdf_file.PdfFileLoader",
DataType.WEB_PAGE: "embedchain.loaders.web_page.WebPageLoader",
DataType.QNA_PAIR: "embedchain.loaders.local_qna_pair.LocalQnaPairLoader",
DataType.TEXT: "embedchain.loaders.local_text.LocalTextLoader",
DataType.DOCX: "embedchain.loaders.docx_file.DocxFileLoader",
DataType.SITEMAP: "embedchain.loaders.sitemap.SitemapLoader",
DataType.XML: "embedchain.loaders.xml.XmlLoader",
DataType.DOCS_SITE: "embedchain.loaders.docs_site_loader.DocsSiteLoader",
DataType.CSV: "embedchain.loaders.csv.CsvLoader",
DataType.MDX: "embedchain.loaders.mdx.MdxLoader",
DataType.IMAGE: "embedchain.loaders.image.ImageLoader",
DataType.UNSTRUCTURED: "embedchain.loaders.unstructured_file.UnstructuredLoader",
DataType.JSON: "embedchain.loaders.json.JSONLoader",
DataType.OPENAPI: "embedchain.loaders.openapi.OpenAPILoader",
DataType.GMAIL: "embedchain.loaders.gmail.GmailLoader",
DataType.NOTION: "embedchain.loaders.notion.NotionLoader",
DataType.SUBSTACK: "embedchain.loaders.substack.SubstackLoader",
DataType.YOUTUBE_CHANNEL: "embedchain.loaders.youtube_channel.YoutubeChannelLoader",
DataType.DISCORD: "embedchain.loaders.discord.DiscordLoader",
DataType.RSSFEED: "embedchain.loaders.rss_feed.RSSFeedLoader",
DataType.BEEHIIV: "embedchain.loaders.beehiiv.BeehiivLoader",
DataType.GOOGLE_DRIVE: "embedchain.loaders.google_drive.GoogleDriveLoader",
DataType.DIRECTORY: "embedchain.loaders.directory_loader.DirectoryLoader",
DataType.SLACK: "embedchain.loaders.slack.SlackLoader",
DataType.DROPBOX: "embedchain.loaders.dropbox.DropboxLoader",
DataType.TEXT_FILE: "embedchain.loaders.text_file.TextFileLoader",
DataType.EXCEL_FILE: "embedchain.loaders.excel_file.ExcelFileLoader",
DataType.AUDIO: "embedchain.loaders.audio.AudioLoader",
}
if data_type == DataType.CUSTOM or loader is not None:
loader_class: type = loader
if loader_class:
return loader_class
elif data_type in loaders:
loader_class: type = self._lazy_load(loaders[data_type])
return loader_class()
raise ValueError(
f"Cant find the loader for {data_type}.\
We recommend to pass the loader to use data_type: {data_type},\
check `https://docs.embedchain.ai/data-sources/overview`."
)
def _get_chunker(self, data_type: DataType, config: ChunkerConfig, chunker: Optional[BaseChunker]) -> BaseChunker:
chunker_classes = {
DataType.YOUTUBE_VIDEO: "embedchain.chunkers.youtube_video.YoutubeVideoChunker",
DataType.PDF_FILE: "embedchain.chunkers.pdf_file.PdfFileChunker",
DataType.WEB_PAGE: "embedchain.chunkers.web_page.WebPageChunker",
DataType.QNA_PAIR: "embedchain.chunkers.qna_pair.QnaPairChunker",
DataType.TEXT: "embedchain.chunkers.text.TextChunker",
DataType.DOCX: "embedchain.chunkers.docx_file.DocxFileChunker",
DataType.SITEMAP: "embedchain.chunkers.sitemap.SitemapChunker",
DataType.XML: "embedchain.chunkers.xml.XmlChunker",
DataType.DOCS_SITE: "embedchain.chunkers.docs_site.DocsSiteChunker",
DataType.CSV: "embedchain.chunkers.table.TableChunker",
DataType.MDX: "embedchain.chunkers.mdx.MdxChunker",
DataType.IMAGE: "embedchain.chunkers.image.ImageChunker",
DataType.UNSTRUCTURED: "embedchain.chunkers.unstructured_file.UnstructuredFileChunker",
DataType.JSON: "embedchain.chunkers.json.JSONChunker",
DataType.OPENAPI: "embedchain.chunkers.openapi.OpenAPIChunker",
DataType.GMAIL: "embedchain.chunkers.gmail.GmailChunker",
DataType.NOTION: "embedchain.chunkers.notion.NotionChunker",
DataType.SUBSTACK: "embedchain.chunkers.substack.SubstackChunker",
DataType.YOUTUBE_CHANNEL: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.DISCORD: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.CUSTOM: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.RSSFEED: "embedchain.chunkers.rss_feed.RSSFeedChunker",
DataType.BEEHIIV: "embedchain.chunkers.beehiiv.BeehiivChunker",
DataType.GOOGLE_DRIVE: "embedchain.chunkers.google_drive.GoogleDriveChunker",
DataType.DIRECTORY: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.SLACK: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.DROPBOX: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.TEXT_FILE: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.EXCEL_FILE: "embedchain.chunkers.excel_file.ExcelFileChunker",
DataType.AUDIO: "embedchain.chunkers.audio.AudioChunker",
}
if chunker is not None:
return chunker
elif data_type in chunker_classes:
chunker_class = self._lazy_load(chunker_classes[data_type])
chunker = chunker_class(config)
chunker.set_data_type(data_type)
return chunker
raise ValueError(
f"Cant find the chunker for {data_type}.\
We recommend to pass the chunker to use data_type: {data_type},\
check `https://docs.embedchain.ai/data-sources/overview`."
) | --- +++ @@ -10,6 +10,11 @@
class DataFormatter(JSONSerializable):
+ """
+ DataFormatter is an internal utility class which abstracts the mapping for
+ loaders and chunkers to the data_type entered by the user in their
+ .add or .add_local method call
+ """
def __init__(
self,
@@ -18,6 +23,14 @@ loader: Optional[BaseLoader] = None,
chunker: Optional[BaseChunker] = None,
):
+ """
+ Initialize a dataformatter, set data type and chunker based on datatype.
+
+ :param data_type: The type of the data to load and chunk.
+ :type data_type: DataType
+ :param config: AddConfig instance with nested loader and chunker config attributes.
+ :type config: AddConfig
+ """
self.loader = self._get_loader(data_type=data_type, config=config.loader, loader=loader)
self.chunker = self._get_chunker(data_type=data_type, config=config.chunker, chunker=chunker)
@@ -34,6 +47,17 @@ loader: Optional[BaseLoader],
**kwargs: Optional[dict[str, Any]],
) -> BaseLoader:
+ """
+ Returns the appropriate data loader for the given data type.
+
+ :param data_type: The type of the data to load.
+ :type data_type: DataType
+ :param config: Config to initialize the loader with.
+ :type config: LoaderConfig
+ :raises ValueError: If an unsupported data type is provided.
+ :return: The loader for the given data type.
+ :rtype: BaseLoader
+ """
loaders = {
DataType.YOUTUBE_VIDEO: "embedchain.loaders.youtube_video.YoutubeVideoLoader",
DataType.PDF_FILE: "embedchain.loaders.pdf_file.PdfFileLoader",
@@ -81,6 +105,7 @@ )
def _get_chunker(self, data_type: DataType, config: ChunkerConfig, chunker: Optional[BaseChunker]) -> BaseChunker:
+ """Returns the appropriate chunker for the given data type (updated for lazy loading)."""
chunker_classes = {
DataType.YOUTUBE_VIDEO: "embedchain.chunkers.youtube_video.YoutubeVideoChunker",
DataType.PDF_FILE: "embedchain.chunkers.pdf_file.PdfFileChunker",
@@ -126,4 +151,4 @@ f"Cant find the chunker for {data_type}.\
We recommend to pass the chunker to use data_type: {data_type},\
check `https://docs.embedchain.ai/data-sources/overview`."
- )+ )
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/data_formatter/data_formatter.py |
Add well-formatted docstrings | from fastapi import FastAPI, responses
from pydantic import BaseModel
from embedchain import App
app = FastAPI(title="Embedchain FastAPI App")
embedchain_app = App()
class SourceModel(BaseModel):
source: str
class QuestionModel(BaseModel):
question: str
@app.post("/add")
async def add_source(source_model: SourceModel):
source = source_model.source
embedchain_app.add(source)
return {"message": f"Source '{source}' added successfully."}
@app.post("/query")
async def handle_query(question_model: QuestionModel):
question = question_model.question
answer = embedchain_app.query(question)
return {"answer": answer}
@app.post("/chat")
async def handle_chat(question_model: QuestionModel):
question = question_model.question
response = embedchain_app.chat(question)
return {"response": response}
@app.get("/")
async def root():
return responses.RedirectResponse(url="/docs") | --- +++ @@ -17,6 +17,10 @@
@app.post("/add")
async def add_source(source_model: SourceModel):
+ """
+ Adds a new source to the EmbedChain app.
+ Expects a JSON with a "source" key.
+ """
source = source_model.source
embedchain_app.add(source)
return {"message": f"Source '{source}' added successfully."}
@@ -24,6 +28,10 @@
@app.post("/query")
async def handle_query(question_model: QuestionModel):
+ """
+ Handles a query to the EmbedChain app.
+ Expects a JSON with a "question" key.
+ """
question = question_model.question
answer = embedchain_app.query(question)
return {"answer": answer}
@@ -31,6 +39,10 @@
@app.post("/chat")
async def handle_chat(question_model: QuestionModel):
+ """
+ Handles a chat request to the EmbedChain app.
+ Expects a JSON with a "question" key.
+ """
question = question_model.question
response = embedchain_app.chat(question)
return {"response": response}
@@ -38,4 +50,4 @@
@app.get("/")
async def root():
- return responses.RedirectResponse(url="/docs")+ return responses.RedirectResponse(url="/docs")
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/deployment/render.com/app.py |
Add structured docstrings to improve clarity | import hashlib
import json
import logging
from typing import Any, Optional, Union
from dotenv import load_dotenv
from langchain.docstore.document import Document
from embedchain.cache import (
adapt,
get_gptcache_session,
gptcache_data_convert,
gptcache_update_cache_callback,
)
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config import AddConfig, BaseLlmConfig, ChunkerConfig
from embedchain.config.base_app_config import BaseAppConfig
from embedchain.core.db.models import ChatHistory, DataSource
from embedchain.data_formatter import DataFormatter
from embedchain.embedder.base import BaseEmbedder
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.llm.base import BaseLlm
from embedchain.loaders.base_loader import BaseLoader
from embedchain.models.data_type import (
DataType,
DirectDataType,
IndirectDataType,
SpecialDataType,
)
from embedchain.utils.misc import detect_datatype, is_valid_json_string
from embedchain.vectordb.base import BaseVectorDB
load_dotenv()
logger = logging.getLogger(__name__)
class EmbedChain(JSONSerializable):
def __init__(
self,
config: BaseAppConfig,
llm: BaseLlm,
db: BaseVectorDB = None,
embedder: BaseEmbedder = None,
system_prompt: Optional[str] = None,
):
self.config = config
self.cache_config = None
self.memory_config = None
self.mem0_memory = None
# Llm
self.llm = llm
# Database has support for config assignment for backwards compatibility
if db is None and (not hasattr(self.config, "db") or self.config.db is None):
raise ValueError("App requires Database.")
self.db = db or self.config.db
# Embedder
if embedder is None:
raise ValueError("App requires Embedder.")
self.embedder = embedder
# Initialize database
self.db._set_embedder(self.embedder)
self.db._initialize()
# Set collection name from app config for backwards compatibility.
if config.collection_name:
self.db.set_collection_name(config.collection_name)
# Add variables that are "shortcuts"
if system_prompt:
self.llm.config.system_prompt = system_prompt
# Fetch the history from the database if exists
self.llm.update_history(app_id=self.config.id)
# Attributes that aren't subclass related.
self.user_asks = []
self.chunker: Optional[ChunkerConfig] = None
@property
def collect_metrics(self):
return self.config.collect_metrics
@collect_metrics.setter
def collect_metrics(self, value):
if not isinstance(value, bool):
raise ValueError(f"Boolean value expected but got {type(value)}.")
self.config.collect_metrics = value
@property
def online(self):
return self.llm.config.online
@online.setter
def online(self, value):
if not isinstance(value, bool):
raise ValueError(f"Boolean value expected but got {type(value)}.")
self.llm.config.online = value
def add(
self,
source: Any,
data_type: Optional[DataType] = None,
metadata: Optional[dict[str, Any]] = None,
config: Optional[AddConfig] = None,
dry_run=False,
loader: Optional[BaseLoader] = None,
chunker: Optional[BaseChunker] = None,
**kwargs: Optional[dict[str, Any]],
):
if config is not None:
pass
elif self.chunker is not None:
config = AddConfig(chunker=self.chunker)
else:
config = AddConfig()
try:
DataType(source)
logger.warning(
f"""Starting from version v0.0.40, Embedchain can automatically detect the data type. So, in the `add` method, the argument order has changed. You no longer need to specify '{source}' for the `source` argument. So the code snippet will be `.add("{data_type}", "{source}")`""" # noqa #E501
)
logger.warning(
"Embedchain is swapping the arguments for you. This functionality might be deprecated in the future, so please adjust your code." # noqa #E501
)
source, data_type = data_type, source
except ValueError:
pass
if data_type:
try:
data_type = DataType(data_type)
except ValueError:
logger.info(
f"Invalid data_type: '{data_type}', using `custom` instead.\n Check docs to pass the valid data type: `https://docs.embedchain.ai/data-sources/overview`" # noqa: E501
)
data_type = DataType.CUSTOM
if not data_type:
data_type = detect_datatype(source)
# `source_hash` is the md5 hash of the source argument
source_hash = hashlib.md5(str(source).encode("utf-8")).hexdigest()
self.user_asks.append([source, data_type.value, metadata])
data_formatter = DataFormatter(data_type, config, loader, chunker)
documents, metadatas, _ids, new_chunks = self._load_and_embed(
data_formatter.loader, data_formatter.chunker, source, metadata, source_hash, config, dry_run, **kwargs
)
if data_type in {DataType.DOCS_SITE}:
self.is_docs_site_instance = True
# Convert the source to a string if it is not already
if not isinstance(source, str):
source = str(source)
# Insert the data into the 'ec_data_sources' table
self.db_session.add(
DataSource(
hash=source_hash,
app_id=self.config.id,
type=data_type.value,
value=source,
metadata=json.dumps(metadata),
)
)
try:
self.db_session.commit()
except Exception as e:
logger.error(f"Error adding data source: {e}")
self.db_session.rollback()
if dry_run:
data_chunks_info = {"chunks": documents, "metadata": metadatas, "count": len(documents), "type": data_type}
logger.debug(f"Dry run info : {data_chunks_info}")
return data_chunks_info
# Send anonymous telemetry
if self.config.collect_metrics:
# it's quicker to check the variable twice than to count words when they won't be submitted.
word_count = data_formatter.chunker.get_word_count(documents)
# Send anonymous telemetry
event_properties = {
**self._telemetry_props,
"data_type": data_type.value,
"word_count": word_count,
"chunks_count": new_chunks,
}
self.telemetry.capture(event_name="add", properties=event_properties)
return source_hash
def _get_existing_doc_id(self, chunker: BaseChunker, src: Any):
# Find existing embeddings for the source
# Depending on the data type, existing embeddings are checked for.
if chunker.data_type.value in [item.value for item in DirectDataType]:
# DirectDataTypes can't be updated.
# Think of a text:
# Either it's the same, then it won't change, so it's not an update.
# Or it's different, then it will be added as a new text.
return None
elif chunker.data_type.value in [item.value for item in IndirectDataType]:
# These types have an indirect source reference
# As long as the reference is the same, they can be updated.
where = {"url": src}
if chunker.data_type == DataType.JSON and is_valid_json_string(src):
url = hashlib.sha256((src).encode("utf-8")).hexdigest()
where = {"url": url}
if self.config.id is not None:
where.update({"app_id": self.config.id})
existing_embeddings = self.db.get(
where=where,
limit=1,
)
if len(existing_embeddings.get("metadatas", [])) > 0:
return existing_embeddings["metadatas"][0]["doc_id"]
else:
return None
elif chunker.data_type.value in [item.value for item in SpecialDataType]:
# These types don't contain indirect references.
# Through custom logic, they can be attributed to a source and be updated.
if chunker.data_type == DataType.QNA_PAIR:
# QNA_PAIRs update the answer if the question already exists.
where = {"question": src[0]}
if self.config.id is not None:
where.update({"app_id": self.config.id})
existing_embeddings = self.db.get(
where=where,
limit=1,
)
if len(existing_embeddings.get("metadatas", [])) > 0:
return existing_embeddings["metadatas"][0]["doc_id"]
else:
return None
else:
raise NotImplementedError(
f"SpecialDataType {chunker.data_type} must have a custom logic to check for existing data"
)
else:
raise TypeError(
f"{chunker.data_type} is type {type(chunker.data_type)}. "
"When it should be DirectDataType, IndirectDataType or SpecialDataType."
)
def _load_and_embed(
self,
loader: BaseLoader,
chunker: BaseChunker,
src: Any,
metadata: Optional[dict[str, Any]] = None,
source_hash: Optional[str] = None,
add_config: Optional[AddConfig] = None,
dry_run=False,
**kwargs: Optional[dict[str, Any]],
):
existing_doc_id = self._get_existing_doc_id(chunker=chunker, src=src)
app_id = self.config.id if self.config is not None else None
# Create chunks
embeddings_data = chunker.create_chunks(loader, src, app_id=app_id, config=add_config.chunker, **kwargs)
# spread chunking results
documents = embeddings_data["documents"]
metadatas = embeddings_data["metadatas"]
ids = embeddings_data["ids"]
new_doc_id = embeddings_data["doc_id"]
if existing_doc_id and existing_doc_id == new_doc_id:
logger.info("Doc content has not changed. Skipping creating chunks and embeddings")
return [], [], [], 0
# this means that doc content has changed.
if existing_doc_id and existing_doc_id != new_doc_id:
logger.info("Doc content has changed. Recomputing chunks and embeddings intelligently.")
self.db.delete({"doc_id": existing_doc_id})
# get existing ids, and discard doc if any common id exist.
where = {"url": src}
if chunker.data_type == DataType.JSON and is_valid_json_string(src):
url = hashlib.sha256((src).encode("utf-8")).hexdigest()
where = {"url": url}
# if data type is qna_pair, we check for question
if chunker.data_type == DataType.QNA_PAIR:
where = {"question": src[0]}
if self.config.id is not None:
where["app_id"] = self.config.id
db_result = self.db.get(ids=ids, where=where) # optional filter
existing_ids = set(db_result["ids"])
if len(existing_ids):
data_dict = {id: (doc, meta) for id, doc, meta in zip(ids, documents, metadatas)}
data_dict = {id: value for id, value in data_dict.items() if id not in existing_ids}
if not data_dict:
src_copy = src
if len(src_copy) > 50:
src_copy = src[:50] + "..."
logger.info(f"All data from {src_copy} already exists in the database.")
# Make sure to return a matching return type
return [], [], [], 0
ids = list(data_dict.keys())
documents, metadatas = zip(*data_dict.values())
# Loop though all metadatas and add extras.
new_metadatas = []
for m in metadatas:
# Add app id in metadatas so that they can be queried on later
if self.config.id:
m["app_id"] = self.config.id
# Add hashed source
m["hash"] = source_hash
# Note: Metadata is the function argument
if metadata:
# Spread whatever is in metadata into the new object.
m.update(metadata)
new_metadatas.append(m)
metadatas = new_metadatas
if dry_run:
return list(documents), metadatas, ids, 0
# Count before, to calculate a delta in the end.
chunks_before_addition = self.db.count()
# Filter out empty documents and ensure they meet the API requirements
valid_documents = [doc for doc in documents if doc and isinstance(doc, str)]
documents = valid_documents
# Chunk documents into batches of 2048 and handle each batch
# helps wigth large loads of embeddings that hit OpenAI limits
document_batches = [documents[i : i + 2048] for i in range(0, len(documents), 2048)]
metadata_batches = [metadatas[i : i + 2048] for i in range(0, len(metadatas), 2048)]
id_batches = [ids[i : i + 2048] for i in range(0, len(ids), 2048)]
for batch_docs, batch_meta, batch_ids in zip(document_batches, metadata_batches, id_batches):
try:
# Add only valid batches
if batch_docs:
self.db.add(documents=batch_docs, metadatas=batch_meta, ids=batch_ids, **kwargs)
except Exception as e:
logger.info(f"Failed to add batch due to a bad request: {e}")
# Handle the error, e.g., by logging, retrying, or skipping
pass
count_new_chunks = self.db.count() - chunks_before_addition
logger.info(f"Successfully saved {str(src)[:100]} ({chunker.data_type}). New chunks count: {count_new_chunks}")
return list(documents), metadatas, ids, count_new_chunks
@staticmethod
def _format_result(results):
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
def _retrieve_from_database(
self,
input_query: str,
config: Optional[BaseLlmConfig] = None,
where=None,
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, str, str]], list[str]]:
query_config = config or self.llm.config
if where is not None:
where = where
else:
where = {}
if query_config is not None and query_config.where is not None:
where = query_config.where
if self.config.id is not None:
where.update({"app_id": self.config.id})
contexts = self.db.query(
input_query=input_query,
n_results=query_config.number_documents,
where=where,
citations=citations,
**kwargs,
)
return contexts
def query(
self,
input_query: str,
config: BaseLlmConfig = None,
dry_run=False,
where: Optional[dict] = None,
citations: bool = False,
**kwargs: dict[str, Any],
) -> Union[tuple[str, list[tuple[str, dict]]], str, dict[str, Any]]:
contexts = self._retrieve_from_database(
input_query=input_query, config=config, where=where, citations=citations, **kwargs
)
if citations and len(contexts) > 0 and isinstance(contexts[0], tuple):
contexts_data_for_llm_query = list(map(lambda x: x[0], contexts))
else:
contexts_data_for_llm_query = contexts
if self.cache_config is not None:
logger.info("Cache enabled. Checking cache...")
answer = adapt(
llm_handler=self.llm.query,
cache_data_convert=gptcache_data_convert,
update_cache_callback=gptcache_update_cache_callback,
session=get_gptcache_session(session_id=self.config.id),
input_query=input_query,
contexts=contexts_data_for_llm_query,
config=config,
dry_run=dry_run,
)
else:
if self.llm.config.token_usage:
answer, token_info = self.llm.query(
input_query=input_query, contexts=contexts_data_for_llm_query, config=config, dry_run=dry_run
)
else:
answer = self.llm.query(
input_query=input_query, contexts=contexts_data_for_llm_query, config=config, dry_run=dry_run
)
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="query", properties=self._telemetry_props)
if citations:
if self.llm.config.token_usage:
return {"answer": answer, "contexts": contexts, "usage": token_info}
return answer, contexts
if self.llm.config.token_usage:
return {"answer": answer, "usage": token_info}
logger.warning(
"Starting from v0.1.125 the return type of query method will be changed to tuple containing `answer`."
)
return answer
def chat(
self,
input_query: str,
config: Optional[BaseLlmConfig] = None,
dry_run=False,
session_id: str = "default",
where: Optional[dict[str, str]] = None,
citations: bool = False,
**kwargs: dict[str, Any],
) -> Union[tuple[str, list[tuple[str, dict]]], str, dict[str, Any]]:
contexts = self._retrieve_from_database(
input_query=input_query, config=config, where=where, citations=citations, **kwargs
)
if citations and len(contexts) > 0 and isinstance(contexts[0], tuple):
contexts_data_for_llm_query = list(map(lambda x: x[0], contexts))
else:
contexts_data_for_llm_query = contexts
memories = None
if self.mem0_memory:
memories = self.mem0_memory.search(
query=input_query, agent_id=self.config.id, user_id=session_id, limit=self.memory_config.top_k
)
# Update the history beforehand so that we can handle multiple chat sessions in the same python session
self.llm.update_history(app_id=self.config.id, session_id=session_id)
if self.cache_config is not None:
logger.debug("Cache enabled. Checking cache...")
cache_id = f"{session_id}--{self.config.id}"
answer = adapt(
llm_handler=self.llm.chat,
cache_data_convert=gptcache_data_convert,
update_cache_callback=gptcache_update_cache_callback,
session=get_gptcache_session(session_id=cache_id),
input_query=input_query,
contexts=contexts_data_for_llm_query,
config=config,
dry_run=dry_run,
)
else:
logger.debug("Cache disabled. Running chat without cache.")
if self.llm.config.token_usage:
answer, token_info = self.llm.query(
input_query=input_query,
contexts=contexts_data_for_llm_query,
config=config,
dry_run=dry_run,
memories=memories,
)
else:
answer = self.llm.query(
input_query=input_query,
contexts=contexts_data_for_llm_query,
config=config,
dry_run=dry_run,
memories=memories,
)
# Add to Mem0 memory if enabled
# Adding answer here because it would be much useful than input question itself
if self.mem0_memory:
self.mem0_memory.add(data=answer, agent_id=self.config.id, user_id=session_id)
# add conversation in memory
self.llm.add_history(self.config.id, input_query, answer, session_id=session_id)
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="chat", properties=self._telemetry_props)
if citations:
if self.llm.config.token_usage:
return {"answer": answer, "contexts": contexts, "usage": token_info}
return answer, contexts
if self.llm.config.token_usage:
return {"answer": answer, "usage": token_info}
logger.warning(
"Starting from v0.1.125 the return type of query method will be changed to tuple containing `answer`."
)
return answer
def search(self, query, num_documents=3, where=None, raw_filter=None, namespace=None):
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="search", properties=self._telemetry_props)
if raw_filter and where:
raise ValueError("You can't use both `raw_filter` and `where` together.")
filter_type = "raw_filter" if raw_filter else "where"
filter_criteria = raw_filter if raw_filter else where
params = {
"input_query": query,
"n_results": num_documents,
"citations": True,
"app_id": self.config.id,
"namespace": namespace,
filter_type: filter_criteria,
}
return [{"context": c[0], "metadata": c[1]} for c in self.db.query(**params)]
def set_collection_name(self, name: str):
self.db.set_collection_name(name)
# Create the collection if it does not exist
self.db._get_or_create_collection(name)
# TODO: Check whether it is necessary to assign to the `self.collection` attribute,
# since the main purpose is the creation.
def reset(self):
try:
self.db_session.query(DataSource).filter_by(app_id=self.config.id).delete()
self.db_session.query(ChatHistory).filter_by(app_id=self.config.id).delete()
self.db_session.commit()
except Exception as e:
logger.error(f"Error deleting data sources: {e}")
self.db_session.rollback()
return None
self.db.reset()
self.delete_all_chat_history(app_id=self.config.id)
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="reset", properties=self._telemetry_props)
def get_history(
self,
num_rounds: int = 10,
display_format: bool = True,
session_id: Optional[str] = "default",
fetch_all: bool = False,
):
history = self.llm.memory.get(
app_id=self.config.id,
session_id=session_id,
num_rounds=num_rounds,
display_format=display_format,
fetch_all=fetch_all,
)
return history
def delete_session_chat_history(self, session_id: str = "default"):
self.llm.memory.delete(app_id=self.config.id, session_id=session_id)
self.llm.update_history(app_id=self.config.id)
def delete_all_chat_history(self, app_id: str):
self.llm.memory.delete(app_id=app_id)
self.llm.update_history(app_id=app_id)
def delete(self, source_id: str):
try:
self.db_session.query(DataSource).filter_by(hash=source_id, app_id=self.config.id).delete()
self.db_session.commit()
except Exception as e:
logger.error(f"Error deleting data sources: {e}")
self.db_session.rollback()
return None
self.db.delete(where={"hash": source_id})
logger.info(f"Successfully deleted {source_id}")
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="delete", properties=self._telemetry_props) | --- +++ @@ -44,6 +44,22 @@ embedder: BaseEmbedder = None,
system_prompt: Optional[str] = None,
):
+ """
+ Initializes the EmbedChain instance, sets up a vector DB client and
+ creates a collection.
+
+ :param config: Configuration just for the app, not the db or llm or embedder.
+ :type config: BaseAppConfig
+ :param llm: Instance of the LLM you want to use.
+ :type llm: BaseLlm
+ :param db: Instance of the Database to use, defaults to None
+ :type db: BaseVectorDB, optional
+ :param embedder: instance of the embedder to use, defaults to None
+ :type embedder: BaseEmbedder, optional
+ :param system_prompt: System prompt to use in the llm query, defaults to None
+ :type system_prompt: Optional[str], optional
+ :raises ValueError: No database or embedder provided.
+ """
self.config = config
self.cache_config = None
self.memory_config = None
@@ -109,6 +125,33 @@ chunker: Optional[BaseChunker] = None,
**kwargs: Optional[dict[str, Any]],
):
+ """
+ Adds the data from the given URL to the vector db.
+ Loads the data, chunks it, create embedding for each chunk
+ and then stores the embedding to vector database.
+
+ :param source: The data to embed, can be a URL, local file or raw content, depending on the data type.
+ :type source: Any
+ :param data_type: Automatically detected, but can be forced with this argument. The type of the data to add,
+ defaults to None
+ :type data_type: Optional[DataType], optional
+ :param metadata: Metadata associated with the data source., defaults to None
+ :type metadata: Optional[dict[str, Any]], optional
+ :param config: The `AddConfig` instance to use as configuration options., defaults to None
+ :type config: Optional[AddConfig], optional
+ :raises ValueError: Invalid data type
+ :param dry_run: Optional. A dry run displays the chunks to ensure that the loader and chunker work as intended.
+ defaults to False
+ :type dry_run: bool
+ :param loader: The loader to use to load the data, defaults to None
+ :type loader: BaseLoader, optional
+ :param chunker: The chunker to use to chunk the data, defaults to None
+ :type chunker: BaseChunker, optional
+ :param kwargs: To read more params for the query function
+ :type kwargs: dict[str, Any]
+ :return: source_hash, a md5-hash of the source, in hexadecimal representation.
+ :rtype: str
+ """
if config is not None:
pass
elif self.chunker is not None:
@@ -194,6 +237,9 @@ return source_hash
def _get_existing_doc_id(self, chunker: BaseChunker, src: Any):
+ """
+ Get id of existing document for a given source, based on the data type
+ """
# Find existing embeddings for the source
# Depending on the data type, existing embeddings are checked for.
if chunker.data_type.value in [item.value for item in DirectDataType]:
@@ -259,6 +305,26 @@ dry_run=False,
**kwargs: Optional[dict[str, Any]],
):
+ """
+ Loads the data from the given URL, chunks it, and adds it to database.
+
+ :param loader: The loader to use to load the data.
+ :type loader: BaseLoader
+ :param chunker: The chunker to use to chunk the data.
+ :type chunker: BaseChunker
+ :param src: The data to be handled by the loader. Can be a URL for
+ remote sources or local content for local loaders.
+ :type src: Any
+ :param metadata: Metadata associated with the data source.
+ :type metadata: dict[str, Any], optional
+ :param source_hash: Hexadecimal hash of the source.
+ :type source_hash: str, optional
+ :param add_config: The `AddConfig` instance to use as configuration options.
+ :type add_config: AddConfig, optional
+ :param dry_run: A dry run returns chunks and doesn't update DB.
+ :type dry_run: bool, defaults to False
+ :return: (list) documents (embedded text), (list) metadata, (list) ids, (int) number of chunks
+ """
existing_doc_id = self._get_existing_doc_id(chunker=chunker, src=src)
app_id = self.config.id if self.config is not None else None
@@ -377,6 +443,21 @@ citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, str, str]], list[str]]:
+ """
+ Queries the vector database based on the given input query.
+ Gets relevant doc based on the query
+
+ :param input_query: The query to use.
+ :type input_query: str
+ :param config: The query configuration, defaults to None
+ :type config: Optional[BaseLlmConfig], optional
+ :param where: A dictionary of key-value pairs to filter the database results, defaults to None
+ :type where: _type_, optional
+ :param citations: A boolean to indicate if db should fetch citation source
+ :type citations: bool
+ :return: List of contents of the document that matched your query
+ :rtype: list[str]
+ """
query_config = config or self.llm.config
if where is not None:
where = where
@@ -407,6 +488,32 @@ citations: bool = False,
**kwargs: dict[str, Any],
) -> Union[tuple[str, list[tuple[str, dict]]], str, dict[str, Any]]:
+ """
+ Queries the vector database based on the given input query.
+ Gets relevant doc based on the query and then passes it to an
+ LLM as context to get the answer.
+
+ :param input_query: The query to use.
+ :type input_query: str
+ :param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
+ To persistently use a config, declare it during app init., defaults to None
+ :type config: BaseLlmConfig, optional
+ :param dry_run: A dry run does everything except send the resulting prompt to
+ the LLM. The purpose is to test the prompt, not the response., defaults to False
+ :type dry_run: bool, optional
+ :param where: A dictionary of key-value pairs to filter the database results., defaults to None
+ :type where: dict[str, str], optional
+ :param citations: A boolean to indicate if db should fetch citation source
+ :type citations: bool
+ :param kwargs: To read more params for the query function. Ex. we use citations boolean
+ param to return context along with the answer
+ :type kwargs: dict[str, Any]
+ :return: The answer to the query, with citations if the citation flag is True
+ or the dry run result
+ :rtype: str, if citations is False and token_usage is False, otherwise if citations is true then
+ tuple[str, list[tuple[str,str,str]]] and if token_usage is true then
+ tuple[str, list[tuple[str,str,str]], dict[str, Any]]
+ """
contexts = self._retrieve_from_database(
input_query=input_query, config=config, where=where, citations=citations, **kwargs
)
@@ -463,6 +570,36 @@ citations: bool = False,
**kwargs: dict[str, Any],
) -> Union[tuple[str, list[tuple[str, dict]]], str, dict[str, Any]]:
+ """
+ Queries the vector database on the given input query.
+ Gets relevant doc based on the query and then passes it to an
+ LLM as context to get the answer.
+
+ Maintains the whole conversation in memory.
+
+ :param input_query: The query to use.
+ :type input_query: str
+ :param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
+ To persistently use a config, declare it during app init., defaults to None
+ :type config: BaseLlmConfig, optional
+ :param dry_run: A dry run does everything except send the resulting prompt to
+ the LLM. The purpose is to test the prompt, not the response., defaults to False
+ :type dry_run: bool, optional
+ :param session_id: The session id to use for chat history, defaults to 'default'.
+ :type session_id: str, optional
+ :param where: A dictionary of key-value pairs to filter the database results., defaults to None
+ :type where: dict[str, str], optional
+ :param citations: A boolean to indicate if db should fetch citation source
+ :type citations: bool
+ :param kwargs: To read more params for the query function. Ex. we use citations boolean
+ param to return context along with the answer
+ :type kwargs: dict[str, Any]
+ :return: The answer to the query, with citations if the citation flag is True
+ or the dry run result
+ :rtype: str, if citations is False and token_usage is False, otherwise if citations is true then
+ tuple[str, list[tuple[str,str,str]]] and if token_usage is true then
+ tuple[str, list[tuple[str,str,str]], dict[str, Any]]
+ """
contexts = self._retrieve_from_database(
input_query=input_query, config=config, where=where, citations=citations, **kwargs
)
@@ -537,6 +674,22 @@ return answer
def search(self, query, num_documents=3, where=None, raw_filter=None, namespace=None):
+ """
+ Search for similar documents related to the query in the vector database.
+
+ Args:
+ query (str): The query to use.
+ num_documents (int, optional): Number of similar documents to fetch. Defaults to 3.
+ where (dict[str, any], optional): Filter criteria for the search.
+ raw_filter (dict[str, any], optional): Advanced raw filter criteria for the search.
+ namespace (str, optional): The namespace to search in. Defaults to None.
+
+ Raises:
+ ValueError: If both `raw_filter` and `where` are used simultaneously.
+
+ Returns:
+ list[dict]: A list of dictionaries, each containing the 'context' and 'metadata' of a document.
+ """
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="search", properties=self._telemetry_props)
@@ -559,6 +712,14 @@ return [{"context": c[0], "metadata": c[1]} for c in self.db.query(**params)]
def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ Using `app.db.set_collection_name` method is preferred to this.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
self.db.set_collection_name(name)
# Create the collection if it does not exist
self.db._get_or_create_collection(name)
@@ -566,6 +727,10 @@ # since the main purpose is the creation.
def reset(self):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ `App` does not have to be reinitialized after using this method.
+ """
try:
self.db_session.query(DataSource).filter_by(app_id=self.config.id).delete()
self.db_session.query(ChatHistory).filter_by(app_id=self.config.id).delete()
@@ -605,6 +770,11 @@ self.llm.update_history(app_id=app_id)
def delete(self, source_id: str):
+ """
+ Deletes the data from the database.
+ :param source_hash: The hash of the source.
+ :type source_hash: str
+ """
try:
self.db_session.query(DataSource).filter_by(hash=source_id, app_id=self.config.id).delete()
self.db_session.commit()
@@ -616,4 +786,4 @@ logger.info(f"Successfully deleted {source_id}")
# Send anonymous telemetry
if self.config.collect_metrics:
- self.telemetry.capture(event_name="delete", properties=self._telemetry_props)+ self.telemetry.capture(event_name="delete", properties=self._telemetry_props)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/embedchain.py |
Add docstrings to make code maintainable | from dotenv import load_dotenv
from fastapi import FastAPI, responses
from pydantic import BaseModel
from embedchain import App
load_dotenv(".env")
app = FastAPI(title="Embedchain FastAPI App")
embedchain_app = App()
class SourceModel(BaseModel):
source: str
class QuestionModel(BaseModel):
question: str
@app.post("/add")
async def add_source(source_model: SourceModel):
source = source_model.source
embedchain_app.add(source)
return {"message": f"Source '{source}' added successfully."}
@app.post("/query")
async def handle_query(question_model: QuestionModel):
question = question_model.question
answer = embedchain_app.query(question)
return {"answer": answer}
@app.post("/chat")
async def handle_chat(question_model: QuestionModel):
question = question_model.question
response = embedchain_app.chat(question)
return {"response": response}
@app.get("/")
async def root():
return responses.RedirectResponse(url="/docs") | --- +++ @@ -20,6 +20,10 @@
@app.post("/add")
async def add_source(source_model: SourceModel):
+ """
+ Adds a new source to the EmbedChain app.
+ Expects a JSON with a "source" key.
+ """
source = source_model.source
embedchain_app.add(source)
return {"message": f"Source '{source}' added successfully."}
@@ -27,6 +31,10 @@
@app.post("/query")
async def handle_query(question_model: QuestionModel):
+ """
+ Handles a query to the EmbedChain app.
+ Expects a JSON with a "question" key.
+ """
question = question_model.question
answer = embedchain_app.query(question)
return {"answer": answer}
@@ -34,6 +42,10 @@
@app.post("/chat")
async def handle_chat(question_model: QuestionModel):
+ """
+ Handles a chat request to the EmbedChain app.
+ Expects a JSON with a "question" key.
+ """
question = question_model.question
response = embedchain_app.chat(question)
return {"response": response}
@@ -41,4 +53,4 @@
@app.get("/")
async def root():
- return responses.RedirectResponse(url="/docs")+ return responses.RedirectResponse(url="/docs")
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/deployment/fly.io/app.py |
Write beginner-friendly docstrings | import os
from alembic import command
from alembic.config import Config
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session as SQLAlchemySession
from sqlalchemy.orm import scoped_session, sessionmaker
from .models import Base
class DatabaseManager:
def __init__(self, echo: bool = False):
self.database_uri = os.environ.get("EMBEDCHAIN_DB_URI")
self.echo = echo
self.engine: Engine = None
self._session_factory = None
def setup_engine(self) -> None:
if not self.database_uri:
raise RuntimeError("Database URI is not set. Set the EMBEDCHAIN_DB_URI environment variable.")
connect_args = {}
if self.database_uri.startswith("sqlite"):
connect_args["check_same_thread"] = False
self.engine = create_engine(self.database_uri, echo=self.echo, connect_args=connect_args)
self._session_factory = scoped_session(sessionmaker(bind=self.engine))
Base.metadata.bind = self.engine
def init_db(self) -> None:
if not self.engine:
raise RuntimeError("Database engine is not initialized. Call setup_engine() first.")
Base.metadata.create_all(self.engine)
def get_session(self) -> SQLAlchemySession:
if not self._session_factory:
raise RuntimeError("Session factory is not initialized. Call setup_engine() first.")
return self._session_factory()
def close_session(self) -> None:
if self._session_factory:
self._session_factory.remove()
def execute_transaction(self, transaction_block):
session = self.get_session()
try:
transaction_block(session)
session.commit()
except Exception as e:
session.rollback()
raise e
finally:
self.close_session()
# Singleton pattern to use throughout the application
database_manager = DatabaseManager()
# Convenience functions for backward compatibility and ease of use
def setup_engine(database_uri: str, echo: bool = False) -> None:
database_manager.database_uri = database_uri
database_manager.echo = echo
database_manager.setup_engine()
def alembic_upgrade() -> None:
alembic_config_path = os.path.join(os.path.dirname(__file__), "..", "..", "alembic.ini")
alembic_cfg = Config(alembic_config_path)
command.upgrade(alembic_cfg, "head")
def init_db() -> None:
alembic_upgrade()
def get_session() -> SQLAlchemySession:
return database_manager.get_session()
def execute_transaction(transaction_block):
database_manager.execute_transaction(transaction_block) | --- +++ @@ -18,6 +18,7 @@ self._session_factory = None
def setup_engine(self) -> None:
+ """Initializes the database engine and session factory."""
if not self.database_uri:
raise RuntimeError("Database URI is not set. Set the EMBEDCHAIN_DB_URI environment variable.")
connect_args = {}
@@ -28,20 +29,24 @@ Base.metadata.bind = self.engine
def init_db(self) -> None:
+ """Creates all tables defined in the Base metadata."""
if not self.engine:
raise RuntimeError("Database engine is not initialized. Call setup_engine() first.")
Base.metadata.create_all(self.engine)
def get_session(self) -> SQLAlchemySession:
+ """Provides a session for database operations."""
if not self._session_factory:
raise RuntimeError("Session factory is not initialized. Call setup_engine() first.")
return self._session_factory()
def close_session(self) -> None:
+ """Closes the current session."""
if self._session_factory:
self._session_factory.remove()
def execute_transaction(self, transaction_block):
+ """Executes a block of code within a database transaction."""
session = self.get_session()
try:
transaction_block(session)
@@ -65,6 +70,7 @@
def alembic_upgrade() -> None:
+ """Upgrades the database to the latest version."""
alembic_config_path = os.path.join(os.path.dirname(__file__), "..", "..", "alembic.ini")
alembic_cfg = Config(alembic_config_path)
command.upgrade(alembic_cfg, "head")
@@ -79,4 +85,4 @@
def execute_transaction(transaction_block):
- database_manager.execute_transaction(transaction_block)+ database_manager.execute_transaction(transaction_block)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/core/db/database.py |
Add missing documentation to my Python functions | import ast
import concurrent.futures
import json
import logging
import os
from typing import Any, Optional, Union
import requests
import yaml
from tqdm import tqdm
from embedchain.cache import (
Config,
ExactMatchEvaluation,
SearchDistanceEvaluation,
cache,
gptcache_data_manager,
gptcache_pre_function,
)
from embedchain.client import Client
from embedchain.config import AppConfig, CacheConfig, ChunkerConfig, Mem0Config
from embedchain.core.db.database import get_session
from embedchain.core.db.models import DataSource
from embedchain.embedchain import EmbedChain
from embedchain.embedder.base import BaseEmbedder
from embedchain.embedder.openai import OpenAIEmbedder
from embedchain.evaluation.base import BaseMetric
from embedchain.evaluation.metrics import (
AnswerRelevance,
ContextRelevance,
Groundedness,
)
from embedchain.factory import EmbedderFactory, LlmFactory, VectorDBFactory
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
from embedchain.llm.openai import OpenAILlm
from embedchain.telemetry.posthog import AnonymousTelemetry
from embedchain.utils.evaluation import EvalData, EvalMetric
from embedchain.utils.misc import validate_config
from embedchain.vectordb.base import BaseVectorDB
from embedchain.vectordb.chroma import ChromaDB
from mem0 import Memory
logger = logging.getLogger(__name__)
@register_deserializable
class App(EmbedChain):
def __init__(
self,
id: str = None,
name: str = None,
config: AppConfig = None,
db: BaseVectorDB = None,
embedding_model: BaseEmbedder = None,
llm: BaseLlm = None,
config_data: dict = None,
auto_deploy: bool = False,
chunker: ChunkerConfig = None,
cache_config: CacheConfig = None,
memory_config: Mem0Config = None,
log_level: int = logging.WARN,
):
if id and config_data:
raise Exception("Cannot provide both id and config. Please provide only one of them.")
if id and name:
raise Exception("Cannot provide both id and name. Please provide only one of them.")
if name and config:
raise Exception("Cannot provide both name and config. Please provide only one of them.")
self.auto_deploy = auto_deploy
# Store the dict config as an attribute to be able to send it
self.config_data = config_data if (config_data and validate_config(config_data)) else None
self.client = None
# pipeline_id from the backend
self.id = None
self.chunker = ChunkerConfig(**chunker) if chunker else None
self.cache_config = cache_config
self.memory_config = memory_config
self.config = config or AppConfig()
self.name = self.config.name
self.config.id = self.local_id = "default-app-id" if self.config.id is None else self.config.id
if id is not None:
# Init client first since user is trying to fetch the pipeline
# details from the platform
self._init_client()
pipeline_details = self._get_pipeline(id)
self.config.id = self.local_id = pipeline_details["metadata"]["local_id"]
self.id = id
if name is not None:
self.name = name
self.embedding_model = embedding_model or OpenAIEmbedder()
self.db = db or ChromaDB()
self.llm = llm or OpenAILlm()
self._init_db()
# Session for the metadata db
self.db_session = get_session()
# If cache_config is provided, initializing the cache ...
if self.cache_config is not None:
self._init_cache()
# If memory_config is provided, initializing the memory ...
self.mem0_memory = None
if self.memory_config is not None:
self.mem0_memory = Memory()
# Send anonymous telemetry
self._telemetry_props = {"class": self.__class__.__name__}
self.telemetry = AnonymousTelemetry(enabled=self.config.collect_metrics)
self.telemetry.capture(event_name="init", properties=self._telemetry_props)
self.user_asks = []
if self.auto_deploy:
self.deploy()
def _init_db(self):
self.db._set_embedder(self.embedding_model)
self.db._initialize()
self.db.set_collection_name(self.db.config.collection_name)
def _init_cache(self):
if self.cache_config.similarity_eval_config.strategy == "exact":
similarity_eval_func = ExactMatchEvaluation()
else:
similarity_eval_func = SearchDistanceEvaluation(
max_distance=self.cache_config.similarity_eval_config.max_distance,
positive=self.cache_config.similarity_eval_config.positive,
)
cache.init(
pre_embedding_func=gptcache_pre_function,
embedding_func=self.embedding_model.to_embeddings,
data_manager=gptcache_data_manager(vector_dimension=self.embedding_model.vector_dimension),
similarity_evaluation=similarity_eval_func,
config=Config(**self.cache_config.init_config.as_dict()),
)
def _init_client(self):
config = Client.load_config()
if config.get("api_key"):
self.client = Client()
else:
api_key = input(
"🔑 Enter your Embedchain API key. You can find the API key at https://app.embedchain.ai/settings/keys/ \n" # noqa: E501
)
self.client = Client(api_key=api_key)
def _get_pipeline(self, id):
print("🛠️ Fetching pipeline details from the platform...")
url = f"{self.client.host}/api/v1/pipelines/{id}/cli/"
r = requests.get(
url,
headers={"Authorization": f"Token {self.client.api_key}"},
)
if r.status_code == 404:
raise Exception(f"❌ Pipeline with id {id} not found!")
print(
f"🎉 Pipeline loaded successfully! Pipeline url: https://app.embedchain.ai/pipelines/{r.json()['id']}\n" # noqa: E501
)
return r.json()
def _create_pipeline(self):
print("🛠️ Creating pipeline on the platform...")
# self.config_data is a dict. Pass it inside the key 'yaml_config' to the backend
payload = {
"yaml_config": json.dumps(self.config_data),
"name": self.name,
"local_id": self.local_id,
}
url = f"{self.client.host}/api/v1/pipelines/cli/create/"
r = requests.post(
url,
json=payload,
headers={"Authorization": f"Token {self.client.api_key}"},
)
if r.status_code not in [200, 201]:
raise Exception(f"❌ Error occurred while creating pipeline. API response: {r.text}")
if r.status_code == 200:
print(
f"🎉🎉🎉 Existing pipeline found! View your pipeline: https://app.embedchain.ai/pipelines/{r.json()['id']}\n" # noqa: E501
) # noqa: E501
elif r.status_code == 201:
print(
f"🎉🎉🎉 Pipeline created successfully! View your pipeline: https://app.embedchain.ai/pipelines/{r.json()['id']}\n" # noqa: E501
)
return r.json()
def _get_presigned_url(self, data_type, data_value):
payload = {"data_type": data_type, "data_value": data_value}
r = requests.post(
f"{self.client.host}/api/v1/pipelines/{self.id}/cli/presigned_url/",
json=payload,
headers={"Authorization": f"Token {self.client.api_key}"},
)
r.raise_for_status()
return r.json()
def _upload_file_to_presigned_url(self, presigned_url, file_path):
try:
with open(file_path, "rb") as file:
response = requests.put(presigned_url, data=file)
response.raise_for_status()
return response.status_code == 200
except Exception as e:
logger.exception(f"Error occurred during file upload: {str(e)}")
print("❌ Error occurred during file upload!")
return False
def _upload_data_to_pipeline(self, data_type, data_value, metadata=None):
payload = {
"data_type": data_type,
"data_value": data_value,
"metadata": metadata,
}
try:
self._send_api_request(f"/api/v1/pipelines/{self.id}/cli/add/", payload)
# print the local file path if user tries to upload a local file
printed_value = metadata.get("file_path") if metadata.get("file_path") else data_value
print(f"✅ Data of type: {data_type}, value: {printed_value} added successfully.")
except Exception as e:
print(f"❌ Error occurred during data upload for type {data_type}!. Error: {str(e)}")
def _send_api_request(self, endpoint, payload):
url = f"{self.client.host}{endpoint}"
headers = {"Authorization": f"Token {self.client.api_key}"}
response = requests.post(url, json=payload, headers=headers)
response.raise_for_status()
return response
def _process_and_upload_data(self, data_hash, data_type, data_value):
if os.path.isabs(data_value):
presigned_url_data = self._get_presigned_url(data_type, data_value)
presigned_url = presigned_url_data["presigned_url"]
s3_key = presigned_url_data["s3_key"]
if self._upload_file_to_presigned_url(presigned_url, file_path=data_value):
metadata = {"file_path": data_value, "s3_key": s3_key}
data_value = presigned_url
else:
logger.error(f"File upload failed for hash: {data_hash}")
return False
else:
if data_type == "qna_pair":
data_value = list(ast.literal_eval(data_value))
metadata = {}
try:
self._upload_data_to_pipeline(data_type, data_value, metadata)
self._mark_data_as_uploaded(data_hash)
return True
except Exception:
print(f"❌ Error occurred during data upload for hash {data_hash}!")
return False
def _mark_data_as_uploaded(self, data_hash):
self.db_session.query(DataSource).filter_by(hash=data_hash, app_id=self.local_id).update({"is_uploaded": 1})
def get_data_sources(self):
data_sources = self.db_session.query(DataSource).filter_by(app_id=self.local_id).all()
results = []
for row in data_sources:
results.append({"data_type": row.type, "data_value": row.value, "metadata": row.meta_data})
return results
def deploy(self):
if self.client is None:
self._init_client()
pipeline_data = self._create_pipeline()
self.id = pipeline_data["id"]
results = self.db_session.query(DataSource).filter_by(app_id=self.local_id, is_uploaded=0).all()
if len(results) > 0:
print("🛠️ Adding data to your pipeline...")
for result in results:
data_hash, data_type, data_value = result.hash, result.data_type, result.data_value
self._process_and_upload_data(data_hash, data_type, data_value)
# Send anonymous telemetry
self.telemetry.capture(event_name="deploy", properties=self._telemetry_props)
@classmethod
def from_config(
cls,
config_path: Optional[str] = None,
config: Optional[dict[str, Any]] = None,
auto_deploy: bool = False,
yaml_path: Optional[str] = None,
):
# Backward compatibility for yaml_path
if yaml_path and not config_path:
config_path = yaml_path
if config_path and config:
raise ValueError("Please provide only one of config_path or config.")
config_data = None
if config_path:
file_extension = os.path.splitext(config_path)[1]
with open(config_path, "r", encoding="UTF-8") as file:
if file_extension in [".yaml", ".yml"]:
config_data = yaml.safe_load(file)
elif file_extension == ".json":
config_data = json.load(file)
else:
raise ValueError("config_path must be a path to a YAML or JSON file.")
elif config and isinstance(config, dict):
config_data = config
else:
logger.error(
"Please provide either a config file path (YAML or JSON) or a config dictionary. Falling back to defaults because no config is provided.", # noqa: E501
)
config_data = {}
# Validate the config
validate_config(config_data)
app_config_data = config_data.get("app", {}).get("config", {})
vector_db_config_data = config_data.get("vectordb", {})
embedding_model_config_data = config_data.get("embedding_model", config_data.get("embedder", {}))
memory_config_data = config_data.get("memory", {})
llm_config_data = config_data.get("llm", {})
chunker_config_data = config_data.get("chunker", {})
cache_config_data = config_data.get("cache", None)
app_config = AppConfig(**app_config_data)
memory_config = Mem0Config(**memory_config_data) if memory_config_data else None
vector_db_provider = vector_db_config_data.get("provider", "chroma")
vector_db = VectorDBFactory.create(vector_db_provider, vector_db_config_data.get("config", {}))
if llm_config_data:
llm_provider = llm_config_data.get("provider", "openai")
llm = LlmFactory.create(llm_provider, llm_config_data.get("config", {}))
else:
llm = None
embedding_model_provider = embedding_model_config_data.get("provider", "openai")
embedding_model = EmbedderFactory.create(
embedding_model_provider, embedding_model_config_data.get("config", {})
)
if cache_config_data is not None:
cache_config = CacheConfig.from_config(cache_config_data)
else:
cache_config = None
return cls(
config=app_config,
llm=llm,
db=vector_db,
embedding_model=embedding_model,
config_data=config_data,
auto_deploy=auto_deploy,
chunker=chunker_config_data,
cache_config=cache_config,
memory_config=memory_config,
)
def _eval(self, dataset: list[EvalData], metric: Union[BaseMetric, str]):
metric_str = metric.name if isinstance(metric, BaseMetric) else metric
eval_class_map = {
EvalMetric.CONTEXT_RELEVANCY.value: ContextRelevance,
EvalMetric.ANSWER_RELEVANCY.value: AnswerRelevance,
EvalMetric.GROUNDEDNESS.value: Groundedness,
}
if metric_str in eval_class_map:
return eval_class_map[metric_str]().evaluate(dataset)
# Handle the case for custom metrics
if isinstance(metric, BaseMetric):
return metric.evaluate(dataset)
else:
raise ValueError(f"Invalid metric: {metric}")
def evaluate(
self,
questions: Union[str, list[str]],
metrics: Optional[list[Union[BaseMetric, str]]] = None,
num_workers: int = 4,
):
if "OPENAI_API_KEY" not in os.environ:
raise ValueError("Please set the OPENAI_API_KEY environment variable with permission to use `gpt4` model.")
queries, answers, contexts = [], [], []
if isinstance(questions, list):
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
future_to_data = {executor.submit(self.query, q, citations=True): q for q in questions}
for future in tqdm(
concurrent.futures.as_completed(future_to_data),
total=len(future_to_data),
desc="Getting answer and contexts for questions",
):
question = future_to_data[future]
queries.append(question)
answer, context = future.result()
answers.append(answer)
contexts.append(list(map(lambda x: x[0], context)))
else:
answer, context = self.query(questions, citations=True)
queries = [questions]
answers = [answer]
contexts = [list(map(lambda x: x[0], context))]
metrics = metrics or [
EvalMetric.CONTEXT_RELEVANCY.value,
EvalMetric.ANSWER_RELEVANCY.value,
EvalMetric.GROUNDEDNESS.value,
]
logger.info(f"Collecting data from {len(queries)} questions for evaluation...")
dataset = []
for q, a, c in zip(queries, answers, contexts):
dataset.append(EvalData(question=q, answer=a, contexts=c))
logger.info(f"Evaluating {len(dataset)} data points...")
result = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
future_to_metric = {executor.submit(self._eval, dataset, metric): metric for metric in metrics}
for future in tqdm(
concurrent.futures.as_completed(future_to_metric),
total=len(future_to_metric),
desc="Evaluating metrics",
):
metric = future_to_metric[future]
if isinstance(metric, BaseMetric):
result[metric.name] = future.result()
else:
result[metric] = future.result()
if self.config.collect_metrics:
telemetry_props = self._telemetry_props
metrics_names = []
for metric in metrics:
if isinstance(metric, BaseMetric):
metrics_names.append(metric.name)
else:
metrics_names.append(metric)
telemetry_props["metrics"] = metrics_names
self.telemetry.capture(event_name="evaluate", properties=telemetry_props)
return result | --- +++ @@ -46,6 +46,11 @@
@register_deserializable
class App(EmbedChain):
+ """
+ EmbedChain App lets you create a LLM powered app for your unstructured
+ data by defining your chosen data source, embedding model,
+ and vector database.
+ """
def __init__(
self,
@@ -62,6 +67,23 @@ memory_config: Mem0Config = None,
log_level: int = logging.WARN,
):
+ """
+ Initialize a new `App` instance.
+
+ :param config: Configuration for the pipeline, defaults to None
+ :type config: AppConfig, optional
+ :param db: The database to use for storing and retrieving embeddings, defaults to None
+ :type db: BaseVectorDB, optional
+ :param embedding_model: The embedding model used to calculate embeddings, defaults to None
+ :type embedding_model: BaseEmbedder, optional
+ :param llm: The LLM model used to calculate embeddings, defaults to None
+ :type llm: BaseLlm, optional
+ :param config_data: Config dictionary, defaults to None
+ :type config_data: dict, optional
+ :param auto_deploy: Whether to deploy the pipeline automatically, defaults to False
+ :type auto_deploy: bool, optional
+ :raises Exception: If an error occurs while creating the pipeline
+ """
if id and config_data:
raise Exception("Cannot provide both id and config. Please provide only one of them.")
@@ -123,6 +145,9 @@ self.deploy()
def _init_db(self):
+ """
+ Initialize the database.
+ """
self.db._set_embedder(self.embedding_model)
self.db._initialize()
self.db.set_collection_name(self.db.config.collection_name)
@@ -145,6 +170,9 @@ )
def _init_client(self):
+ """
+ Initialize the client.
+ """
config = Client.load_config()
if config.get("api_key"):
self.client = Client()
@@ -155,6 +183,9 @@ self.client = Client(api_key=api_key)
def _get_pipeline(self, id):
+ """
+ Get existing pipeline
+ """
print("🛠️ Fetching pipeline details from the platform...")
url = f"{self.client.host}/api/v1/pipelines/{id}/cli/"
r = requests.get(
@@ -170,6 +201,9 @@ return r.json()
def _create_pipeline(self):
+ """
+ Create a pipeline on the platform.
+ """
print("🛠️ Creating pipeline on the platform...")
# self.config_data is a dict. Pass it inside the key 'yaml_config' to the backend
payload = {
@@ -297,6 +331,20 @@ auto_deploy: bool = False,
yaml_path: Optional[str] = None,
):
+ """
+ Instantiate a App object from a configuration.
+
+ :param config_path: Path to the YAML or JSON configuration file.
+ :type config_path: Optional[str]
+ :param config: A dictionary containing the configuration.
+ :type config: Optional[dict[str, Any]]
+ :param auto_deploy: Whether to deploy the app automatically, defaults to False
+ :type auto_deploy: bool, optional
+ :param yaml_path: (Deprecated) Path to the YAML configuration file. Use config_path instead.
+ :type yaml_path: Optional[str]
+ :return: An instance of the App class.
+ :rtype: App
+ """
# Backward compatibility for yaml_path
if yaml_path and not config_path:
config_path = yaml_path
@@ -369,6 +417,9 @@ )
def _eval(self, dataset: list[EvalData], metric: Union[BaseMetric, str]):
+ """
+ Evaluate the app on a dataset for a given metric.
+ """
metric_str = metric.name if isinstance(metric, BaseMetric) else metric
eval_class_map = {
EvalMetric.CONTEXT_RELEVANCY.value: ContextRelevance,
@@ -391,6 +442,18 @@ metrics: Optional[list[Union[BaseMetric, str]]] = None,
num_workers: int = 4,
):
+ """
+ Evaluate the app on a question.
+
+ param: questions: A question or a list of questions to evaluate.
+ type: questions: Union[str, list[str]]
+ param: metrics: A list of metrics to evaluate. Defaults to all metrics.
+ type: metrics: Optional[list[Union[BaseMetric, str]]]
+ param: num_workers: Number of workers to use for parallel processing.
+ type: num_workers: int
+ return: A dictionary containing the evaluation results.
+ rtype: dict
+ """
if "OPENAI_API_KEY" not in os.environ:
raise ValueError("Please set the OPENAI_API_KEY environment variable with permission to use `gpt4` model.")
@@ -451,4 +514,4 @@ telemetry_props["metrics"] = metrics_names
self.telemetry.capture(event_name="evaluate", properties=telemetry_props)
- return result+ return result
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/app.py |
Add return value explanations in docstrings | from typing import Optional
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class QdrantDBConfig(BaseVectorDbConfig):
def __init__(
self,
collection_name: Optional[str] = None,
dir: Optional[str] = None,
hnsw_config: Optional[dict[str, any]] = None,
quantization_config: Optional[dict[str, any]] = None,
on_disk: Optional[bool] = None,
batch_size: Optional[int] = 10,
**extra_params: dict[str, any],
):
self.hnsw_config = hnsw_config
self.quantization_config = quantization_config
self.on_disk = on_disk
self.batch_size = batch_size
self.extra_params = extra_params
super().__init__(collection_name=collection_name, dir=dir) | --- +++ @@ -6,6 +6,10 @@
@register_deserializable
class QdrantDBConfig(BaseVectorDbConfig):
+ """
+ Config to initialize a qdrant client.
+ :param: url. qdrant url or list of nodes url to be used for connection
+ """
def __init__(
self,
@@ -17,9 +21,28 @@ batch_size: Optional[int] = 10,
**extra_params: dict[str, any],
):
+ """
+ Initializes a configuration class instance for a qdrant client.
+
+ :param collection_name: Default name for the collection, defaults to None
+ :type collection_name: Optional[str], optional
+ :param dir: Path to the database directory, where the database is stored, defaults to None
+ :type dir: Optional[str], optional
+ :param hnsw_config: Params for HNSW index
+ :type hnsw_config: Optional[dict[str, any]], defaults to None
+ :param quantization_config: Params for quantization, if None - quantization will be disabled
+ :type quantization_config: Optional[dict[str, any]], defaults to None
+ :param on_disk: If true - point`s payload will not be stored in memory.
+ It will be read from the disk every time it is requested.
+ This setting saves RAM by (slightly) increasing the response time.
+ Note: those payload values that are involved in filtering and are indexed - remain in RAM.
+ :type on_disk: bool, optional, defaults to None
+ :param batch_size: Number of items to insert in one batch, defaults to 10
+ :type batch_size: Optional[int], optional
+ """
self.hnsw_config = hnsw_config
self.quantization_config = quantization_config
self.on_disk = on_disk
self.batch_size = batch_size
self.extra_params = extra_params
- super().__init__(collection_name=collection_name, dir=dir)+ super().__init__(collection_name=collection_name, dir=dir)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/config/vector_db/qdrant.py |
Write docstrings for algorithm functions | import queue
from typing import Any, Union
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import LLMResult
STOP_ITEM = "[END]"
"""
This is a special item that is used to signal the end of the stream.
"""
class StreamingStdOutCallbackHandlerYield(StreamingStdOutCallbackHandler):
q: queue.Queue
"""
The queue to write the tokens to as they are generated.
"""
def __init__(self, q: queue.Queue) -> None:
super().__init__()
self.q = q
def on_llm_start(self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any) -> None:
with self.q.mutex:
self.q.queue.clear()
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
self.q.put(token)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.q.put(STOP_ITEM)
def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
self.q.put("%s: %s" % (type(error).__name__, str(error)))
self.q.put(STOP_ITEM)
def generate(rq: queue.Queue):
while True:
result: str = rq.get()
if result == STOP_ITEM or result is None:
break
yield result | --- +++ @@ -11,6 +11,10 @@
class StreamingStdOutCallbackHandlerYield(StreamingStdOutCallbackHandler):
+ """
+ This is a callback handler that yields the tokens as they are generated.
+ For a usage example, see the :func:`generate` function below.
+ """
q: queue.Queue
"""
@@ -18,27 +22,52 @@ """
def __init__(self, q: queue.Queue) -> None:
+ """
+ Initialize the callback handler.
+ q: The queue to write the tokens to as they are generated.
+ """
super().__init__()
self.q = q
def on_llm_start(self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any) -> None:
+ """Run when LLM starts running."""
with self.q.mutex:
self.q.queue.clear()
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
+ """Run on new LLM token. Only available when streaming is enabled."""
self.q.put(token)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
+ """Run when LLM ends running."""
self.q.put(STOP_ITEM)
def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
+ """Run when LLM errors."""
self.q.put("%s: %s" % (type(error).__name__, str(error)))
self.q.put(STOP_ITEM)
def generate(rq: queue.Queue):
+ """
+ This is a generator that yields the items in the queue until it reaches the stop item.
+
+ Usage example:
+ ```
+ def askQuestion(callback_fn: StreamingStdOutCallbackHandlerYield):
+ llm = OpenAI(streaming=True, callbacks=[callback_fn])
+ return llm.invoke(prompt="Write a poem about a tree.")
+
+ @app.route("/", methods=["GET"])
+ def generate_output():
+ q = Queue()
+ callback_fn = StreamingStdOutCallbackHandlerYield(q)
+ threading.Thread(target=askQuestion, args=(callback_fn,)).start()
+ return Response(generate(q), mimetype="text/event-stream")
+ ```
+ """
while True:
result: str = rq.get()
if result == STOP_ITEM or result is None:
break
- yield result+ yield result
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/helpers/callbacks.py |
Generate docstrings for exported functions | import concurrent.futures
import logging
import os
from string import Template
from typing import Optional
import numpy as np
from openai import OpenAI
from tqdm import tqdm
from embedchain.config.evaluation.base import GroundednessConfig
from embedchain.evaluation.base import BaseMetric
from embedchain.utils.evaluation import EvalData, EvalMetric
logger = logging.getLogger(__name__)
class Groundedness(BaseMetric):
def __init__(self, config: Optional[GroundednessConfig] = None):
super().__init__(name=EvalMetric.GROUNDEDNESS.value)
self.config = config or GroundednessConfig()
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable or pass the `api_key` in config.")
self.client = OpenAI(api_key=api_key)
def _generate_answer_claim_prompt(self, data: EvalData) -> str:
prompt = Template(self.config.answer_claims_prompt).substitute(question=data.question, answer=data.answer)
return prompt
def _get_claim_statements(self, prompt: str) -> np.ndarray:
response = self.client.chat.completions.create(
model=self.config.model,
messages=[{"role": "user", "content": f"{prompt}"}],
)
result = response.choices[0].message.content.strip()
claim_statements = np.array([statement for statement in result.split("\n") if statement])
return claim_statements
def _generate_claim_inference_prompt(self, data: EvalData, claim_statements: list[str]) -> str:
prompt = Template(self.config.claims_inference_prompt).substitute(
context="\n".join(data.contexts), claim_statements="\n".join(claim_statements)
)
return prompt
def _get_claim_verdict_scores(self, prompt: str) -> np.ndarray:
response = self.client.chat.completions.create(
model=self.config.model,
messages=[{"role": "user", "content": f"{prompt}"}],
)
result = response.choices[0].message.content.strip()
claim_verdicts = result.split("\n")
verdict_score_map = {"1": 1, "0": 0, "-1": np.nan}
verdict_scores = np.array([verdict_score_map[verdict] for verdict in claim_verdicts])
return verdict_scores
def _compute_score(self, data: EvalData) -> float:
answer_claims_prompt = self._generate_answer_claim_prompt(data)
claim_statements = self._get_claim_statements(answer_claims_prompt)
claim_inference_prompt = self._generate_claim_inference_prompt(data, claim_statements)
verdict_scores = self._get_claim_verdict_scores(claim_inference_prompt)
return np.sum(verdict_scores) / claim_statements.size
def evaluate(self, dataset: list[EvalData]):
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_data = {executor.submit(self._compute_score, data): data for data in dataset}
for future in tqdm(
concurrent.futures.as_completed(future_to_data),
total=len(future_to_data),
desc="Evaluating Groundedness",
):
data = future_to_data[future]
try:
score = future.result()
results.append(score)
except Exception as e:
logger.error(f"Error while evaluating groundedness for data point {data}: {e}")
return np.mean(results) if results else 0.0 | --- +++ @@ -16,6 +16,9 @@
class Groundedness(BaseMetric):
+ """
+ Metric for groundedness of answer from the given contexts.
+ """
def __init__(self, config: Optional[GroundednessConfig] = None):
super().__init__(name=EvalMetric.GROUNDEDNESS.value)
@@ -26,10 +29,16 @@ self.client = OpenAI(api_key=api_key)
def _generate_answer_claim_prompt(self, data: EvalData) -> str:
+ """
+ Generate the prompt for the given data.
+ """
prompt = Template(self.config.answer_claims_prompt).substitute(question=data.question, answer=data.answer)
return prompt
def _get_claim_statements(self, prompt: str) -> np.ndarray:
+ """
+ Get claim statements from the answer.
+ """
response = self.client.chat.completions.create(
model=self.config.model,
messages=[{"role": "user", "content": f"{prompt}"}],
@@ -39,12 +48,18 @@ return claim_statements
def _generate_claim_inference_prompt(self, data: EvalData, claim_statements: list[str]) -> str:
+ """
+ Generate the claim inference prompt for the given data and claim statements.
+ """
prompt = Template(self.config.claims_inference_prompt).substitute(
context="\n".join(data.contexts), claim_statements="\n".join(claim_statements)
)
return prompt
def _get_claim_verdict_scores(self, prompt: str) -> np.ndarray:
+ """
+ Get verdicts for claim statements.
+ """
response = self.client.chat.completions.create(
model=self.config.model,
messages=[{"role": "user", "content": f"{prompt}"}],
@@ -56,6 +71,9 @@ return verdict_scores
def _compute_score(self, data: EvalData) -> float:
+ """
+ Compute the groundedness score for a single data point.
+ """
answer_claims_prompt = self._generate_answer_claim_prompt(data)
claim_statements = self._get_claim_statements(answer_claims_prompt)
@@ -64,6 +82,9 @@ return np.sum(verdict_scores) / claim_statements.size
def evaluate(self, dataset: list[EvalData]):
+ """
+ Evaluate the dataset and returns the average groundedness score.
+ """
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -80,4 +101,4 @@ except Exception as e:
logger.error(f"Error while evaluating groundedness for data point {data}: {e}")
- return np.mean(results) if results else 0.0+ return np.mean(results) if results else 0.0
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/evaluation/metrics/groundedness.py |
Write proper docstrings for these functions | import hashlib
import os
from dropbox.files import FileMetadata
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.loaders.directory_loader import DirectoryLoader
@register_deserializable
class DropboxLoader(BaseLoader):
def __init__(self):
access_token = os.environ.get("DROPBOX_ACCESS_TOKEN")
if not access_token:
raise ValueError("Please set the `DROPBOX_ACCESS_TOKEN` environment variable.")
try:
from dropbox import Dropbox, exceptions
except ImportError:
raise ImportError("Dropbox requires extra dependencies. Install with `pip install dropbox==11.36.2`")
try:
dbx = Dropbox(access_token)
dbx.users_get_current_account()
self.dbx = dbx
except exceptions.AuthError as ex:
raise ValueError("Invalid Dropbox access token. Please verify your token and try again.") from ex
def _download_folder(self, path: str, local_root: str) -> list[FileMetadata]:
entries = self.dbx.files_list_folder(path).entries
for entry in entries:
local_path = os.path.join(local_root, entry.name)
if isinstance(entry, FileMetadata):
self.dbx.files_download_to_file(local_path, f"{path}/{entry.name}")
else:
os.makedirs(local_path, exist_ok=True)
self._download_folder(f"{path}/{entry.name}", local_path)
return entries
def _generate_dir_id_from_all_paths(self, path: str) -> str:
entries = self.dbx.files_list_folder(path).entries
paths = [f"{path}/{entry.name}" for entry in entries]
return hashlib.sha256("".join(paths).encode()).hexdigest()
def load_data(self, path: str):
root_dir = f"dropbox_{self._generate_dir_id_from_all_paths(path)}"
os.makedirs(root_dir, exist_ok=True)
for entry in self.dbx.files_list_folder(path).entries:
local_path = os.path.join(root_dir, entry.name)
if isinstance(entry, FileMetadata):
self.dbx.files_download_to_file(local_path, f"{path}/{entry.name}")
else:
os.makedirs(local_path, exist_ok=True)
self._download_folder(f"{path}/{entry.name}", local_path)
dir_loader = DirectoryLoader()
data = dir_loader.load_data(root_dir)["data"]
# Clean up
self._clean_directory(root_dir)
return {
"doc_id": hashlib.sha256(path.encode()).hexdigest(),
"data": data,
}
def _clean_directory(self, dir_path):
for item in os.listdir(dir_path):
item_path = os.path.join(dir_path, item)
if os.path.isdir(item_path):
self._clean_directory(item_path)
else:
os.remove(item_path)
os.rmdir(dir_path) | --- +++ @@ -27,6 +27,7 @@ raise ValueError("Invalid Dropbox access token. Please verify your token and try again.") from ex
def _download_folder(self, path: str, local_root: str) -> list[FileMetadata]:
+ """Download a folder from Dropbox and save it preserving the directory structure."""
entries = self.dbx.files_list_folder(path).entries
for entry in entries:
local_path = os.path.join(local_root, entry.name)
@@ -38,11 +39,13 @@ return entries
def _generate_dir_id_from_all_paths(self, path: str) -> str:
+ """Generate a unique ID for a directory based on all of its paths."""
entries = self.dbx.files_list_folder(path).entries
paths = [f"{path}/{entry.name}" for entry in entries]
return hashlib.sha256("".join(paths).encode()).hexdigest()
def load_data(self, path: str):
+ """Load data from a Dropbox URL, preserving the folder structure."""
root_dir = f"dropbox_{self._generate_dir_id_from_all_paths(path)}"
os.makedirs(root_dir, exist_ok=True)
@@ -66,10 +69,11 @@ }
def _clean_directory(self, dir_path):
+ """Recursively delete a directory and its contents."""
for item in os.listdir(dir_path):
item_path = os.path.join(dir_path, item)
if os.path.isdir(item_path):
self._clean_directory(item_path)
else:
os.remove(item_path)
- os.rmdir(dir_path)+ os.rmdir(dir_path)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/loaders/dropbox.py |
Add docstrings including usage examples | import json
import logging
from string import Template
from typing import Any, Type, TypeVar, Union
T = TypeVar("T", bound="JSONSerializable")
# NOTE: Through inheritance, all of our classes should be children of JSONSerializable. (highest level)
# NOTE: The @register_deserializable decorator should be added to all user facing child classes. (lowest level)
logger = logging.getLogger(__name__)
def register_deserializable(cls: Type[T]) -> Type[T]:
JSONSerializable._register_class_as_deserializable(cls)
return cls
class JSONSerializable:
_deserializable_classes = set() # Contains classes that are whitelisted for deserialization.
def serialize(self) -> str:
try:
return json.dumps(self, default=self._auto_encoder, ensure_ascii=False)
except Exception as e:
logger.error(f"Serialization error: {e}")
return "{}"
@classmethod
def deserialize(cls, json_str: str) -> Any:
try:
return json.loads(json_str, object_hook=cls._auto_decoder)
except Exception as e:
logger.error(f"Deserialization error: {e}")
# Return a default instance in case of failure
return cls()
@staticmethod
def _auto_encoder(obj: Any) -> Union[dict[str, Any], None]:
if hasattr(obj, "__dict__"):
dct = {}
for key, value in obj.__dict__.items():
try:
# Recursive: If the value is an instance of a subclass of JSONSerializable,
# serialize it using the JSONSerializable serialize method.
if isinstance(value, JSONSerializable):
serialized_value = value.serialize()
# The value is stored as a serialized string.
dct[key] = json.loads(serialized_value)
# Custom rules (subclass is not json serializable by default)
elif isinstance(value, Template):
dct[key] = {"__type__": "Template", "data": value.template}
# Future custom types we can follow a similar pattern
# elif isinstance(value, SomeOtherType):
# dct[key] = {
# "__type__": "SomeOtherType",
# "data": value.some_method()
# }
# NOTE: Keep in mind that this logic needs to be applied to the decoder too.
else:
json.dumps(value) # Try to serialize the value.
dct[key] = value
except TypeError:
pass # If it fails, simply pass to skip this key-value pair of the dictionary.
dct["__class__"] = obj.__class__.__name__
return dct
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
@classmethod
def _auto_decoder(cls, dct: dict[str, Any]) -> Any:
class_name = dct.pop("__class__", None)
if class_name:
if not hasattr(cls, "_deserializable_classes"): # Additional safety check
raise AttributeError(f"`{class_name}` has no registry of allowed deserializations.")
if class_name not in {cl.__name__ for cl in cls._deserializable_classes}:
raise KeyError(f"Deserialization of class `{class_name}` is not allowed.")
target_class = next((cl for cl in cls._deserializable_classes if cl.__name__ == class_name), None)
if target_class:
obj = target_class.__new__(target_class)
for key, value in dct.items():
if isinstance(value, dict) and "__type__" in value:
if value["__type__"] == "Template":
value = Template(value["data"])
# For future custom types we can follow a similar pattern
# elif value["__type__"] == "SomeOtherType":
# value = SomeOtherType.some_constructor(value["data"])
default_value = getattr(target_class, key, None)
setattr(obj, key, value or default_value)
return obj
return dct
def save_to_file(self, filename: str) -> None:
with open(filename, "w", encoding="utf-8") as f:
f.write(self.serialize())
@classmethod
def load_from_file(cls, filename: str) -> Any:
with open(filename, "r", encoding="utf-8") as f:
json_str = f.read()
return cls.deserialize(json_str)
@classmethod
def _register_class_as_deserializable(cls, target_class: Type[T]) -> None:
cls._deserializable_classes.add(target_class) | --- +++ @@ -12,15 +12,50 @@
def register_deserializable(cls: Type[T]) -> Type[T]:
+ """
+ A class decorator to register a class as deserializable.
+
+ When a class is decorated with @register_deserializable, it becomes
+ a part of the set of classes that the JSONSerializable class can
+ deserialize.
+
+ Deserialization is in essence loading attributes from a json file.
+ This decorator is a security measure put in place to make sure that
+ you don't load attributes that were initially part of another class.
+
+ Example:
+ @register_deserializable
+ class ChildClass(JSONSerializable):
+ def __init__(self, ...):
+ # initialization logic
+
+ Args:
+ cls (Type): The class to be registered.
+
+ Returns:
+ Type: The same class, after registration.
+ """
JSONSerializable._register_class_as_deserializable(cls)
return cls
class JSONSerializable:
+ """
+ A class to represent a JSON serializable object.
+
+ This class provides methods to serialize and deserialize objects,
+ as well as to save serialized objects to a file and load them back.
+ """
_deserializable_classes = set() # Contains classes that are whitelisted for deserialization.
def serialize(self) -> str:
+ """
+ Serialize the object to a JSON-formatted string.
+
+ Returns:
+ str: A JSON string representation of the object.
+ """
try:
return json.dumps(self, default=self._auto_encoder, ensure_ascii=False)
except Exception as e:
@@ -29,6 +64,20 @@
@classmethod
def deserialize(cls, json_str: str) -> Any:
+ """
+ Deserialize a JSON-formatted string to an object.
+ If it fails, a default class is returned instead.
+ Note: This *returns* an instance, it's not automatically loaded on the calling class.
+
+ Example:
+ app = App.deserialize(json_str)
+
+ Args:
+ json_str (str): A JSON string representation of an object.
+
+ Returns:
+ Object: The deserialized object.
+ """
try:
return json.loads(json_str, object_hook=cls._auto_decoder)
except Exception as e:
@@ -38,6 +87,15 @@
@staticmethod
def _auto_encoder(obj: Any) -> Union[dict[str, Any], None]:
+ """
+ Automatically encode an object for JSON serialization.
+
+ Args:
+ obj (Object): The object to be encoded.
+
+ Returns:
+ dict: A dictionary representation of the object.
+ """
if hasattr(obj, "__dict__"):
dct = {}
for key, value in obj.__dict__.items():
@@ -70,6 +128,15 @@
@classmethod
def _auto_decoder(cls, dct: dict[str, Any]) -> Any:
+ """
+ Automatically decode a dictionary to an object during JSON deserialization.
+
+ Args:
+ dct (dict): The dictionary representation of an object.
+
+ Returns:
+ Object: The decoded object or the original dictionary if decoding is not possible.
+ """
class_name = dct.pop("__class__", None)
if class_name:
if not hasattr(cls, "_deserializable_classes"): # Additional safety check
@@ -92,15 +159,40 @@ return dct
def save_to_file(self, filename: str) -> None:
+ """
+ Save the serialized object to a file.
+
+ Args:
+ filename (str): The path to the file where the object should be saved.
+ """
with open(filename, "w", encoding="utf-8") as f:
f.write(self.serialize())
@classmethod
def load_from_file(cls, filename: str) -> Any:
+ """
+ Load and deserialize an object from a file.
+
+ Args:
+ filename (str): The path to the file from which the object should be loaded.
+
+ Returns:
+ Object: The deserialized object.
+ """
with open(filename, "r", encoding="utf-8") as f:
json_str = f.read()
return cls.deserialize(json_str)
@classmethod
def _register_class_as_deserializable(cls, target_class: Type[T]) -> None:
- cls._deserializable_classes.add(target_class)+ """
+ Register a class as deserializable. This is a classmethod and globally shared.
+
+ This method adds the target class to the set of classes that
+ can be deserialized. This is a security measure to ensure only
+ whitelisted classes are deserialized.
+
+ Args:
+ target_class (Type): The class to be registered.
+ """
+ cls._deserializable_classes.add(target_class)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/helpers/json_serializable.py |
Replace inline comments with docstrings | import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from embedchain.core.db.models import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
config.set_main_option("sqlalchemy.url", os.environ.get("EMBEDCHAIN_DB_URI"))
def run_migrations_offline() -> None:
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online() | --- +++ @@ -19,6 +19,17 @@
def run_migrations_offline() -> None:
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the
+ script output.
+
+ """
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
@@ -32,6 +43,12 @@
def run_migrations_online() -> None:
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
@@ -48,4 +65,4 @@ if context.is_offline_mode():
run_migrations_offline()
else:
- run_migrations_online()+ run_migrations_online()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/migrations/env.py |
Write documentation strings for class attributes | import hashlib
import json
import os
import re
from typing import Union
import requests
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string, is_valid_json_string
class JSONReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(json_data: Union[dict, str]) -> list[str]:
if isinstance(json_data, str):
json_data = json.loads(json_data)
else:
json_data = json_data
json_output = json.dumps(json_data, indent=0)
lines = json_output.split("\n")
useful_lines = [line for line in lines if not re.match(r"^[{}\[\],]*$", line)]
return ["\n".join(useful_lines)]
VALID_URL_PATTERN = (
"^https?://(?:www\.)?(?:\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[a-zA-Z0-9.-]+)(?::\d+)?/(?:[^/\s]+/)*[^/\s]+\.json$"
)
class JSONLoader(BaseLoader):
@staticmethod
def _check_content(content):
if not isinstance(content, str):
raise ValueError(
"Invaid content input. \
If you want to upload (list, dict, etc.), do \
`json.dump(data, indent=0)` and add the stringified JSON. \
Check - `https://docs.embedchain.ai/data-sources/json`"
)
@staticmethod
def load_data(content):
JSONLoader._check_content(content)
loader = JSONReader()
data = []
data_content = []
content_url_str = content
if os.path.isfile(content):
with open(content, "r", encoding="utf-8") as json_file:
json_data = json.load(json_file)
elif re.match(VALID_URL_PATTERN, content):
response = requests.get(content)
if response.status_code == 200:
json_data = response.json()
else:
raise ValueError(
f"Loading data from the given url: {content} failed. \
Make sure the url is working."
)
elif is_valid_json_string(content):
json_data = content
content_url_str = hashlib.sha256((content).encode("utf-8")).hexdigest()
else:
raise ValueError(f"Invalid content to load json data from: {content}")
docs = loader.load_data(json_data)
for doc in docs:
text = doc if isinstance(doc, str) else doc["text"]
doc_content = clean_string(text)
data.append({"content": doc_content, "meta_data": {"url": content_url_str}})
data_content.append(doc_content)
doc_id = hashlib.sha256((content_url_str + ", ".join(data_content)).encode()).hexdigest()
return {"doc_id": doc_id, "data": data} | --- +++ @@ -12,10 +12,19 @@
class JSONReader:
def __init__(self) -> None:
+ """Initialize the JSONReader."""
pass
@staticmethod
def load_data(json_data: Union[dict, str]) -> list[str]:
+ """Load data from a JSON structure.
+
+ Args:
+ json_data (Union[dict, str]): The JSON data to load.
+
+ Returns:
+ list[str]: A list of strings representing the leaf nodes of the JSON.
+ """
if isinstance(json_data, str):
json_data = json.loads(json_data)
else:
@@ -45,6 +54,7 @@
@staticmethod
def load_data(content):
+ """Load a json file. Each data point is a key value pair."""
JSONLoader._check_content(content)
loader = JSONReader()
@@ -80,4 +90,4 @@ data_content.append(doc_content)
doc_id = hashlib.sha256((content_url_str + ", ".join(data_content)).encode()).hexdigest()
- return {"doc_id": doc_id, "data": data}+ return {"doc_id": doc_id, "data": data}
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/loaders/json.py |
Generate descriptive docstrings automatically | import concurrent.futures
import os
from string import Template
from typing import Optional
import numpy as np
import pysbd
from openai import OpenAI
from tqdm import tqdm
from embedchain.config.evaluation.base import ContextRelevanceConfig
from embedchain.evaluation.base import BaseMetric
from embedchain.utils.evaluation import EvalData, EvalMetric
class ContextRelevance(BaseMetric):
def __init__(self, config: Optional[ContextRelevanceConfig] = ContextRelevanceConfig()):
super().__init__(name=EvalMetric.CONTEXT_RELEVANCY.value)
self.config = config
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("API key not found. Set 'OPENAI_API_KEY' or pass it in the config.")
self.client = OpenAI(api_key=api_key)
self._sbd = pysbd.Segmenter(language=self.config.language, clean=False)
def _sentence_segmenter(self, text: str) -> list[str]:
return self._sbd.segment(text)
def _compute_score(self, data: EvalData) -> float:
original_context = "\n".join(data.contexts)
prompt = Template(self.config.prompt).substitute(context=original_context, question=data.question)
response = self.client.chat.completions.create(
model=self.config.model, messages=[{"role": "user", "content": prompt}]
)
useful_context = response.choices[0].message.content.strip()
useful_context_sentences = self._sentence_segmenter(useful_context)
original_context_sentences = self._sentence_segmenter(original_context)
if not original_context_sentences:
return 0.0
return len(useful_context_sentences) / len(original_context_sentences)
def evaluate(self, dataset: list[EvalData]) -> float:
scores = []
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(self._compute_score, data) for data in dataset]
for future in tqdm(
concurrent.futures.as_completed(futures), total=len(dataset), desc="Evaluating Context Relevancy"
):
try:
scores.append(future.result())
except Exception as e:
print(f"Error during evaluation: {e}")
return np.mean(scores) if scores else 0.0 | --- +++ @@ -14,6 +14,9 @@
class ContextRelevance(BaseMetric):
+ """
+ Metric for evaluating the relevance of context in a dataset.
+ """
def __init__(self, config: Optional[ContextRelevanceConfig] = ContextRelevanceConfig()):
super().__init__(name=EvalMetric.CONTEXT_RELEVANCY.value)
@@ -25,9 +28,15 @@ self._sbd = pysbd.Segmenter(language=self.config.language, clean=False)
def _sentence_segmenter(self, text: str) -> list[str]:
+ """
+ Segments the given text into sentences.
+ """
return self._sbd.segment(text)
def _compute_score(self, data: EvalData) -> float:
+ """
+ Computes the context relevance score for a given data item.
+ """
original_context = "\n".join(data.contexts)
prompt = Template(self.config.prompt).substitute(context=original_context, question=data.question)
response = self.client.chat.completions.create(
@@ -42,6 +51,9 @@ return len(useful_context_sentences) / len(original_context_sentences)
def evaluate(self, dataset: list[EvalData]) -> float:
+ """
+ Evaluates the dataset and returns the average context relevance score.
+ """
scores = []
with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -54,4 +66,4 @@ except Exception as e:
print(f"Error during evaluation: {e}")
- return np.mean(scores) if scores else 0.0+ return np.mean(scores) if scores else 0.0
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/evaluation/metrics/context_relevancy.py |
Generate docstrings for this script | import hashlib
import logging
import os
from typing import Any, Optional
import requests
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
logger = logging.getLogger(__name__)
class NotionDocument:
def __init__(self, text: str, extra_info: dict[str, Any]):
self.text = text
self.extra_info = extra_info
class NotionPageLoader:
BLOCK_CHILD_URL_TMPL = "https://api.notion.com/v1/blocks/{block_id}/children"
def __init__(self, integration_token: Optional[str] = None) -> None:
if integration_token is None:
integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
if integration_token is None:
raise ValueError(
"Must specify `integration_token` or set environment " "variable `NOTION_INTEGRATION_TOKEN`."
)
self.token = integration_token
self.headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
def _read_block(self, block_id: str, num_tabs: int = 0) -> str:
done = False
result_lines_arr = []
cur_block_id = block_id
while not done:
block_url = self.BLOCK_CHILD_URL_TMPL.format(block_id=cur_block_id)
res = requests.get(block_url, headers=self.headers)
data = res.json()
for result in data["results"]:
result_type = result["type"]
result_obj = result[result_type]
cur_result_text_arr = []
if "rich_text" in result_obj:
for rich_text in result_obj["rich_text"]:
if "text" in rich_text:
text = rich_text["text"]["content"]
prefix = "\t" * num_tabs
cur_result_text_arr.append(prefix + text)
result_block_id = result["id"]
has_children = result["has_children"]
if has_children:
children_text = self._read_block(result_block_id, num_tabs=num_tabs + 1)
cur_result_text_arr.append(children_text)
cur_result_text = "\n".join(cur_result_text_arr)
result_lines_arr.append(cur_result_text)
if data["next_cursor"] is None:
done = True
else:
cur_block_id = data["next_cursor"]
result_lines = "\n".join(result_lines_arr)
return result_lines
def load_data(self, page_ids: list[str]) -> list[NotionDocument]:
docs = []
for page_id in page_ids:
page_text = self._read_block(page_id)
docs.append(NotionDocument(text=page_text, extra_info={"page_id": page_id}))
return docs
@register_deserializable
class NotionLoader(BaseLoader):
def load_data(self, source):
id = source[-32:]
formatted_id = f"{id[:8]}-{id[8:12]}-{id[12:16]}-{id[16:20]}-{id[20:]}"
logger.debug(f"Extracted notion page id as: {formatted_id}")
integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
reader = NotionPageLoader(integration_token=integration_token)
documents = reader.load_data(page_ids=[formatted_id])
raw_text = documents[0].text
text = clean_string(raw_text)
doc_id = hashlib.sha256((text + source).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": [
{
"content": text,
"meta_data": {"url": f"notion-{formatted_id}"},
}
],
} | --- +++ @@ -13,6 +13,9 @@
class NotionDocument:
+ """
+ A simple Document class to hold the text and additional information of a page.
+ """
def __init__(self, text: str, extra_info: dict[str, Any]):
self.text = text
@@ -20,10 +23,15 @@
class NotionPageLoader:
+ """
+ Notion Page Loader.
+ Reads a set of Notion pages.
+ """
BLOCK_CHILD_URL_TMPL = "https://api.notion.com/v1/blocks/{block_id}/children"
def __init__(self, integration_token: Optional[str] = None) -> None:
+ """Initialize with Notion integration token."""
if integration_token is None:
integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
if integration_token is None:
@@ -38,6 +46,7 @@ }
def _read_block(self, block_id: str, num_tabs: int = 0) -> str:
+ """Read a block from Notion."""
done = False
result_lines_arr = []
cur_block_id = block_id
@@ -76,6 +85,7 @@ return result_lines
def load_data(self, page_ids: list[str]) -> list[NotionDocument]:
+ """Load data from the given list of page IDs."""
docs = []
for page_id in page_ids:
page_text = self._read_block(page_id)
@@ -86,6 +96,7 @@ @register_deserializable
class NotionLoader(BaseLoader):
def load_data(self, source):
+ """Load data from a Notion URL."""
id = source[-32:]
formatted_id = f"{id[:8]}-{id[8:12]}-{id[12:16]}-{id[16:20]}-{id[20:]}"
@@ -107,4 +118,4 @@ "meta_data": {"url": f"notion-{formatted_id}"},
}
],
- }+ }
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/loaders/notion.py |
Generate docstrings for exported functions | from enum import Enum
class DirectDataType(Enum):
TEXT = "text"
class IndirectDataType(Enum):
YOUTUBE_VIDEO = "youtube_video"
PDF_FILE = "pdf_file"
WEB_PAGE = "web_page"
SITEMAP = "sitemap"
XML = "xml"
DOCX = "docx"
DOCS_SITE = "docs_site"
NOTION = "notion"
CSV = "csv"
MDX = "mdx"
IMAGE = "image"
UNSTRUCTURED = "unstructured"
JSON = "json"
OPENAPI = "openapi"
GMAIL = "gmail"
SUBSTACK = "substack"
YOUTUBE_CHANNEL = "youtube_channel"
DISCORD = "discord"
CUSTOM = "custom"
RSSFEED = "rss_feed"
BEEHIIV = "beehiiv"
GOOGLE_DRIVE = "google_drive"
DIRECTORY = "directory"
SLACK = "slack"
DROPBOX = "dropbox"
TEXT_FILE = "text_file"
EXCEL_FILE = "excel_file"
AUDIO = "audio"
class SpecialDataType(Enum):
QNA_PAIR = "qna_pair"
class DataType(Enum):
TEXT = DirectDataType.TEXT.value
YOUTUBE_VIDEO = IndirectDataType.YOUTUBE_VIDEO.value
PDF_FILE = IndirectDataType.PDF_FILE.value
WEB_PAGE = IndirectDataType.WEB_PAGE.value
SITEMAP = IndirectDataType.SITEMAP.value
XML = IndirectDataType.XML.value
DOCX = IndirectDataType.DOCX.value
DOCS_SITE = IndirectDataType.DOCS_SITE.value
NOTION = IndirectDataType.NOTION.value
CSV = IndirectDataType.CSV.value
MDX = IndirectDataType.MDX.value
QNA_PAIR = SpecialDataType.QNA_PAIR.value
IMAGE = IndirectDataType.IMAGE.value
UNSTRUCTURED = IndirectDataType.UNSTRUCTURED.value
JSON = IndirectDataType.JSON.value
OPENAPI = IndirectDataType.OPENAPI.value
GMAIL = IndirectDataType.GMAIL.value
SUBSTACK = IndirectDataType.SUBSTACK.value
YOUTUBE_CHANNEL = IndirectDataType.YOUTUBE_CHANNEL.value
DISCORD = IndirectDataType.DISCORD.value
CUSTOM = IndirectDataType.CUSTOM.value
RSSFEED = IndirectDataType.RSSFEED.value
BEEHIIV = IndirectDataType.BEEHIIV.value
GOOGLE_DRIVE = IndirectDataType.GOOGLE_DRIVE.value
DIRECTORY = IndirectDataType.DIRECTORY.value
SLACK = IndirectDataType.SLACK.value
DROPBOX = IndirectDataType.DROPBOX.value
TEXT_FILE = IndirectDataType.TEXT_FILE.value
EXCEL_FILE = IndirectDataType.EXCEL_FILE.value
AUDIO = IndirectDataType.AUDIO.value | --- +++ @@ -2,11 +2,17 @@
class DirectDataType(Enum):
+ """
+ DirectDataType enum contains data types that contain raw data directly.
+ """
TEXT = "text"
class IndirectDataType(Enum):
+ """
+ IndirectDataType enum contains data types that contain references to data stored elsewhere.
+ """
YOUTUBE_VIDEO = "youtube_video"
PDF_FILE = "pdf_file"
@@ -39,6 +45,9 @@
class SpecialDataType(Enum):
+ """
+ SpecialDataType enum contains data types that are neither direct nor indirect, or simply require special attention.
+ """
QNA_PAIR = "qna_pair"
@@ -73,4 +82,4 @@ DROPBOX = IndirectDataType.DROPBOX.value
TEXT_FILE = IndirectDataType.TEXT_FILE.value
EXCEL_FILE = IndirectDataType.EXCEL_FILE.value
- AUDIO = IndirectDataType.AUDIO.value+ AUDIO = IndirectDataType.AUDIO.value
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/models/data_type.py |
Document functions with clear intent | import concurrent.futures
import hashlib
import logging
import re
import shlex
from typing import Any, Optional
from tqdm import tqdm
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
GITHUB_URL = "https://github.com"
GITHUB_API_URL = "https://api.github.com"
VALID_SEARCH_TYPES = set(["code", "repo", "pr", "issue", "discussion", "branch", "file"])
class GithubLoader(BaseLoader):
def __init__(self, config: Optional[dict[str, Any]] = None):
super().__init__()
if not config:
raise ValueError(
"GithubLoader requires a personal access token to use github api. Check - `https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic`" # noqa: E501
)
try:
from github import Github
except ImportError as e:
raise ValueError(
"GithubLoader requires extra dependencies. \
Install with `pip install gitpython==3.1.38 PyGithub==1.59.1`"
) from e
self.config = config
token = config.get("token")
if not token:
raise ValueError(
"GithubLoader requires a personal access token to use github api. Check - `https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic`" # noqa: E501
)
try:
self.client = Github(token)
except Exception as e:
logging.error(f"GithubLoader failed to initialize client: {e}")
self.client = None
def _github_search_code(self, query: str):
data = []
results = self.client.search_code(query)
for result in tqdm(results, total=results.totalCount, desc="Loading code files from github"):
url = result.html_url
logging.info(f"Added data from url: {url}")
content = result.decoded_content.decode("utf-8")
metadata = {
"url": url,
}
data.append(
{
"content": clean_string(content),
"meta_data": metadata,
}
)
return data
def _get_github_repo_data(self, repo_name: str, branch_name: str = None, file_path: str = None) -> list[dict]:
data = []
repo = self.client.get_repo(repo_name)
repo_contents = repo.get_contents("")
if branch_name:
repo_contents = repo.get_contents("", ref=branch_name)
if file_path:
repo_contents = [repo.get_contents(file_path)]
with tqdm(desc="Loading files:", unit="item") as progress_bar:
while repo_contents:
file_content = repo_contents.pop(0)
if file_content.type == "dir":
try:
repo_contents.extend(repo.get_contents(file_content.path))
except Exception:
logging.warning(f"Failed to read directory: {file_content.path}")
progress_bar.update(1)
continue
else:
try:
file_text = file_content.decoded_content.decode()
except Exception:
logging.warning(f"Failed to read file: {file_content.path}")
progress_bar.update(1)
continue
file_path = file_content.path
data.append(
{
"content": clean_string(file_text),
"meta_data": {
"path": file_path,
},
}
)
progress_bar.update(1)
return data
def _github_search_repo(self, query: str) -> list[dict]:
logging.info(f"Searching github repos with query: {query}")
updated_query = query.split(":")[-1]
data = self._get_github_repo_data(updated_query)
return data
def _github_search_issues_and_pr(self, query: str, type: str) -> list[dict]:
data = []
query = f"{query} is:{type}"
logging.info(f"Searching github for query: {query}")
results = self.client.search_issues(query)
logging.info(f"Total results: {results.totalCount}")
for result in tqdm(results, total=results.totalCount, desc=f"Loading {type} from github"):
url = result.html_url
title = result.title
body = result.body
if not body:
logging.warning(f"Skipping issue because empty content for: {url}")
continue
labels = " ".join([label.name for label in result.labels])
issue_comments = result.get_comments()
comments = []
comments_created_at = []
for comment in issue_comments:
comments_created_at.append(str(comment.created_at))
comments.append(f"{comment.user.name}:{comment.body}")
content = "\n".join([title, labels, body, *comments])
metadata = {
"url": url,
"created_at": str(result.created_at),
"comments_created_at": " ".join(comments_created_at),
}
data.append(
{
"content": clean_string(content),
"meta_data": metadata,
}
)
return data
# need to test more for discussion
def _github_search_discussions(self, query: str):
data = []
query = f"{query} is:discussion"
logging.info(f"Searching github repo for query: {query}")
repos_results = self.client.search_repositories(query)
logging.info(f"Total repos found: {repos_results.totalCount}")
for repo_result in tqdm(repos_results, total=repos_results.totalCount, desc="Loading discussions from github"):
teams = repo_result.get_teams()
for team in teams:
team_discussions = team.get_discussions()
for discussion in team_discussions:
url = discussion.html_url
title = discussion.title
body = discussion.body
if not body:
logging.warning(f"Skipping discussion because empty content for: {url}")
continue
comments = []
comments_created_at = []
print("Discussion comments: ", discussion.comments_url)
content = "\n".join([title, body, *comments])
metadata = {
"url": url,
"created_at": str(discussion.created_at),
"comments_created_at": " ".join(comments_created_at),
}
data.append(
{
"content": clean_string(content),
"meta_data": metadata,
}
)
return data
def _get_github_repo_branch(self, query: str, type: str) -> list[dict]:
logging.info(f"Searching github repo for query: {query} is:{type}")
pattern = r"repo:(\S+) name:(\S+)"
match = re.search(pattern, query)
if match:
repo_name = match.group(1)
branch_name = match.group(2)
else:
raise ValueError(
f"Repository name and Branch name not found, instead found this \
Repo: {repo_name}, Branch: {branch_name}"
)
data = self._get_github_repo_data(repo_name=repo_name, branch_name=branch_name)
return data
def _get_github_repo_file(self, query: str, type: str) -> list[dict]:
logging.info(f"Searching github repo for query: {query} is:{type}")
pattern = r"repo:(\S+) path:(\S+)"
match = re.search(pattern, query)
if match:
repo_name = match.group(1)
file_path = match.group(2)
else:
raise ValueError(
f"Repository name and File name not found, instead found this Repo: {repo_name}, File: {file_path}"
)
data = self._get_github_repo_data(repo_name=repo_name, file_path=file_path)
return data
def _search_github_data(self, search_type: str, query: str):
if search_type == "code":
data = self._github_search_code(query)
elif search_type == "repo":
data = self._github_search_repo(query)
elif search_type == "issue":
data = self._github_search_issues_and_pr(query, search_type)
elif search_type == "pr":
data = self._github_search_issues_and_pr(query, search_type)
elif search_type == "branch":
data = self._get_github_repo_branch(query, search_type)
elif search_type == "file":
data = self._get_github_repo_file(query, search_type)
elif search_type == "discussion":
raise ValueError("GithubLoader does not support searching discussions yet.")
else:
raise NotImplementedError(f"{search_type} not supported")
return data
@staticmethod
def _get_valid_github_query(query: str):
query_terms = shlex.split(query)
# query must provide repo to load data from
if len(query_terms) < 1 or "repo:" not in query:
raise ValueError(
"GithubLoader requires a search query with `repo:` term. Refer docs - `https://docs.embedchain.ai/data-sources/github`" # noqa: E501
)
github_query = []
types = set()
type_pattern = r"type:([a-zA-Z,]+)"
for term in query_terms:
term_match = re.search(type_pattern, term)
if term_match:
search_types = term_match.group(1).split(",")
types.update(search_types)
else:
github_query.append(term)
# query must provide search type
if len(types) == 0:
raise ValueError(
"GithubLoader requires a search query with `type:` term. Refer docs - `https://docs.embedchain.ai/data-sources/github`" # noqa: E501
)
for search_type in search_types:
if search_type not in VALID_SEARCH_TYPES:
raise ValueError(
f"Invalid search type: {search_type}. Valid types are: {', '.join(VALID_SEARCH_TYPES)}"
)
query = " ".join(github_query)
return types, query
def load_data(self, search_query: str, max_results: int = 1000):
if not self.client:
raise ValueError(
"GithubLoader client is not initialized, data will not be loaded. Refer docs - `https://docs.embedchain.ai/data-sources/github`" # noqa: E501
)
search_types, query = self._get_valid_github_query(search_query)
logging.info(f"Searching github for query: {query}, with types: {', '.join(search_types)}")
data = []
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
futures_map = executor.map(self._search_github_data, search_types, [query] * len(search_types))
for search_data in tqdm(futures_map, total=len(search_types), desc="Searching data from github"):
data.extend(search_data)
return {
"doc_id": hashlib.sha256(query.encode()).hexdigest(),
"data": data,
} | --- +++ @@ -17,6 +17,7 @@
class GithubLoader(BaseLoader):
+ """Load data from GitHub search query."""
def __init__(self, config: Optional[dict[str, Any]] = None):
super().__init__()
@@ -47,6 +48,7 @@ self.client = None
def _github_search_code(self, query: str):
+ """Search GitHub code."""
data = []
results = self.client.search_code(query)
for result in tqdm(results, total=results.totalCount, desc="Loading code files from github"):
@@ -65,6 +67,7 @@ return data
def _get_github_repo_data(self, repo_name: str, branch_name: str = None, file_path: str = None) -> list[dict]:
+ """Get file contents from Repo"""
data = []
repo = self.client.get_repo(repo_name)
@@ -108,6 +111,7 @@ return data
def _github_search_repo(self, query: str) -> list[dict]:
+ """Search GitHub repo."""
logging.info(f"Searching github repos with query: {query}")
updated_query = query.split(":")[-1]
@@ -115,6 +119,7 @@ return data
def _github_search_issues_and_pr(self, query: str, type: str) -> list[dict]:
+ """Search GitHub issues and PRs."""
data = []
query = f"{query} is:{type}"
@@ -153,6 +158,7 @@
# need to test more for discussion
def _github_search_discussions(self, query: str):
+ """Search GitHub discussions."""
data = []
query = f"{query} is:discussion"
@@ -188,6 +194,7 @@ return data
def _get_github_repo_branch(self, query: str, type: str) -> list[dict]:
+ """Get file contents for specific branch"""
logging.info(f"Searching github repo for query: {query} is:{type}")
pattern = r"repo:(\S+) name:(\S+)"
@@ -206,6 +213,7 @@ return data
def _get_github_repo_file(self, query: str, type: str) -> list[dict]:
+ """Get specific file content"""
logging.info(f"Searching github repo for query: {query} is:{type}")
pattern = r"repo:(\S+) path:(\S+)"
@@ -223,6 +231,7 @@ return data
def _search_github_data(self, search_type: str, query: str):
+ """Search github data."""
if search_type == "code":
data = self._github_search_code(query)
elif search_type == "repo":
@@ -244,6 +253,7 @@
@staticmethod
def _get_valid_github_query(query: str):
+ """Check if query is valid and return search types and valid GitHub query."""
query_terms = shlex.split(query)
# query must provide repo to load data from
if len(query_terms) < 1 or "repo:" not in query:
@@ -279,6 +289,7 @@ return types, query
def load_data(self, search_query: str, max_results: int = 1000):
+ """Load data from GitHub search query."""
if not self.client:
raise ValueError(
@@ -298,4 +309,4 @@ return {
"doc_id": hashlib.sha256(query.encode()).hexdigest(),
"data": data,
- }+ }
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/loaders/github.py |
Generate NumPy-style docstrings | import logging
import os
from collections.abc import Generator
from typing import Any, Optional
from langchain.schema import BaseMessage as LCBaseMessage
from embedchain.config import BaseLlmConfig
from embedchain.config.llm.base import (
DEFAULT_PROMPT,
DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE,
DEFAULT_PROMPT_WITH_MEM0_MEMORY_TEMPLATE,
DOCS_SITE_PROMPT_TEMPLATE,
)
from embedchain.constants import SQLITE_PATH
from embedchain.core.db.database import init_db, setup_engine
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.memory.base import ChatHistory
from embedchain.memory.message import ChatMessage
logger = logging.getLogger(__name__)
class BaseLlm(JSONSerializable):
def __init__(self, config: Optional[BaseLlmConfig] = None):
if config is None:
self.config = BaseLlmConfig()
else:
self.config = config
# Initialize the metadata db for the app here since llmfactory needs it for initialization of
# the llm memory
setup_engine(database_uri=os.environ.get("EMBEDCHAIN_DB_URI", f"sqlite:///{SQLITE_PATH}"))
init_db()
self.memory = ChatHistory()
self.is_docs_site_instance = False
self.history: Any = None
def get_llm_model_answer(self):
raise NotImplementedError
def set_history(self, history: Any):
self.history = history
def update_history(self, app_id: str, session_id: str = "default"):
chat_history = self.memory.get(app_id=app_id, session_id=session_id, num_rounds=10)
self.set_history([str(history) for history in chat_history])
def add_history(
self,
app_id: str,
question: str,
answer: str,
metadata: Optional[dict[str, Any]] = None,
session_id: str = "default",
):
chat_message = ChatMessage()
chat_message.add_user_message(question, metadata=metadata)
chat_message.add_ai_message(answer, metadata=metadata)
self.memory.add(app_id=app_id, chat_message=chat_message, session_id=session_id)
self.update_history(app_id=app_id, session_id=session_id)
def _format_history(self) -> str:
return "\n".join(self.history)
def _format_memories(self, memories: list[dict]) -> str:
return "\n".join([memory["text"] for memory in memories])
def generate_prompt(self, input_query: str, contexts: list[str], **kwargs: dict[str, Any]) -> str:
context_string = " | ".join(contexts)
web_search_result = kwargs.get("web_search_result", "")
memories = kwargs.get("memories", None)
if web_search_result:
context_string = self._append_search_and_context(context_string, web_search_result)
prompt_contains_history = self.config._validate_prompt_history(self.config.prompt)
if prompt_contains_history:
prompt = self.config.prompt.substitute(
context=context_string, query=input_query, history=self._format_history() or "No history"
)
elif self.history and not prompt_contains_history:
# History is present, but not included in the prompt.
# check if it's the default prompt without history
if (
not self.config._validate_prompt_history(self.config.prompt)
and self.config.prompt.template == DEFAULT_PROMPT
):
if memories:
# swap in the template with Mem0 memory template
prompt = DEFAULT_PROMPT_WITH_MEM0_MEMORY_TEMPLATE.substitute(
context=context_string,
query=input_query,
history=self._format_history(),
memories=self._format_memories(memories),
)
else:
# swap in the template with history
prompt = DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE.substitute(
context=context_string, query=input_query, history=self._format_history()
)
else:
# If we can't swap in the default, we still proceed but tell users that the history is ignored.
logger.warning(
"Your bot contains a history, but prompt does not include `$history` key. History is ignored."
)
prompt = self.config.prompt.substitute(context=context_string, query=input_query)
else:
# basic use case, no history.
prompt = self.config.prompt.substitute(context=context_string, query=input_query)
return prompt
@staticmethod
def _append_search_and_context(context: str, web_search_result: str) -> str:
return f"{context}\nWeb Search Result: {web_search_result}"
def get_answer_from_llm(self, prompt: str):
return self.get_llm_model_answer(prompt)
@staticmethod
def access_search_and_get_results(input_query: str):
try:
from langchain.tools import DuckDuckGoSearchRun
except ImportError:
raise ImportError(
"Searching requires extra dependencies. Install with `pip install duckduckgo-search==6.1.5`"
) from None
search = DuckDuckGoSearchRun()
logger.info(f"Access search to get answers for {input_query}")
return search.run(input_query)
@staticmethod
def _stream_response(answer: Any, token_info: Optional[dict[str, Any]] = None) -> Generator[Any, Any, None]:
streamed_answer = ""
for chunk in answer:
streamed_answer = streamed_answer + chunk
yield chunk
logger.info(f"Answer: {streamed_answer}")
if token_info:
logger.info(f"Token Info: {token_info}")
def query(self, input_query: str, contexts: list[str], config: BaseLlmConfig = None, dry_run=False, memories=None):
try:
if config:
# A config instance passed to this method will only be applied temporarily, for one call.
# So we will save the previous config and restore it at the end of the execution.
# For this we use the serializer.
prev_config = self.config.serialize()
self.config = config
if config is not None and config.query_type == "Images":
return contexts
if self.is_docs_site_instance:
self.config.prompt = DOCS_SITE_PROMPT_TEMPLATE
self.config.number_documents = 5
k = {}
if self.config.online:
k["web_search_result"] = self.access_search_and_get_results(input_query)
k["memories"] = memories
prompt = self.generate_prompt(input_query, contexts, **k)
logger.info(f"Prompt: {prompt}")
if dry_run:
return prompt
if self.config.token_usage:
answer, token_info = self.get_answer_from_llm(prompt)
else:
answer = self.get_answer_from_llm(prompt)
if isinstance(answer, str):
logger.info(f"Answer: {answer}")
if self.config.token_usage:
return answer, token_info
return answer
else:
if self.config.token_usage:
return self._stream_response(answer, token_info)
return self._stream_response(answer)
finally:
if config:
# Restore previous config
self.config: BaseLlmConfig = BaseLlmConfig.deserialize(prev_config)
def chat(
self, input_query: str, contexts: list[str], config: BaseLlmConfig = None, dry_run=False, session_id: str = None
):
try:
if config:
# A config instance passed to this method will only be applied temporarily, for one call.
# So we will save the previous config and restore it at the end of the execution.
# For this we use the serializer.
prev_config = self.config.serialize()
self.config = config
if self.is_docs_site_instance:
self.config.prompt = DOCS_SITE_PROMPT_TEMPLATE
self.config.number_documents = 5
k = {}
if self.config.online:
k["web_search_result"] = self.access_search_and_get_results(input_query)
prompt = self.generate_prompt(input_query, contexts, **k)
logger.info(f"Prompt: {prompt}")
if dry_run:
return prompt
answer, token_info = self.get_answer_from_llm(prompt)
if isinstance(answer, str):
logger.info(f"Answer: {answer}")
return answer, token_info
else:
# this is a streamed response and needs to be handled differently.
return self._stream_response(answer, token_info)
finally:
if config:
# Restore previous config
self.config: BaseLlmConfig = BaseLlmConfig.deserialize(prev_config)
@staticmethod
def _get_messages(prompt: str, system_prompt: Optional[str] = None) -> list[LCBaseMessage]:
from langchain.schema import HumanMessage, SystemMessage
messages = []
if system_prompt:
messages.append(SystemMessage(content=system_prompt))
messages.append(HumanMessage(content=prompt))
return messages | --- +++ @@ -23,6 +23,11 @@
class BaseLlm(JSONSerializable):
def __init__(self, config: Optional[BaseLlmConfig] = None):
+ """Initialize a base LLM class
+
+ :param config: LLM configuration option class, defaults to None
+ :type config: Optional[BaseLlmConfig], optional
+ """
if config is None:
self.config = BaseLlmConfig()
else:
@@ -38,12 +43,23 @@ self.history: Any = None
def get_llm_model_answer(self):
+ """
+ Usually implemented by child class
+ """
raise NotImplementedError
def set_history(self, history: Any):
+ """
+ Provide your own history.
+ Especially interesting for the query method, which does not internally manage conversation history.
+
+ :param history: History to set
+ :type history: Any
+ """
self.history = history
def update_history(self, app_id: str, session_id: str = "default"):
+ """Update class history attribute with history in memory (for chat method)"""
chat_history = self.memory.get(app_id=app_id, session_id=session_id, num_rounds=10)
self.set_history([str(history) for history in chat_history])
@@ -62,12 +78,35 @@ self.update_history(app_id=app_id, session_id=session_id)
def _format_history(self) -> str:
+ """Format history to be used in prompt
+
+ :return: Formatted history
+ :rtype: str
+ """
return "\n".join(self.history)
def _format_memories(self, memories: list[dict]) -> str:
+ """Format memories to be used in prompt
+
+ :param memories: Memories to format
+ :type memories: list[dict]
+ :return: Formatted memories
+ :rtype: str
+ """
return "\n".join([memory["text"] for memory in memories])
def generate_prompt(self, input_query: str, contexts: list[str], **kwargs: dict[str, Any]) -> str:
+ """
+ Generates a prompt based on the given query and context, ready to be
+ passed to an LLM
+
+ :param input_query: The query to use.
+ :type input_query: str
+ :param contexts: List of similar documents to the query used as context.
+ :type contexts: list[str]
+ :return: The prompt
+ :rtype: str
+ """
context_string = " | ".join(contexts)
web_search_result = kwargs.get("web_search_result", "")
memories = kwargs.get("memories", None)
@@ -112,13 +151,39 @@
@staticmethod
def _append_search_and_context(context: str, web_search_result: str) -> str:
+ """Append web search context to existing context
+
+ :param context: Existing context
+ :type context: str
+ :param web_search_result: Web search result
+ :type web_search_result: str
+ :return: Concatenated web search result
+ :rtype: str
+ """
return f"{context}\nWeb Search Result: {web_search_result}"
def get_answer_from_llm(self, prompt: str):
+ """
+ Gets an answer based on the given query and context by passing it
+ to an LLM.
+
+ :param prompt: Gets an answer based on the given query and context by passing it to an LLM.
+ :type prompt: str
+ :return: The answer.
+ :rtype: _type_
+ """
return self.get_llm_model_answer(prompt)
@staticmethod
def access_search_and_get_results(input_query: str):
+ """
+ Search the internet for additional context
+
+ :param input_query: search query
+ :type input_query: str
+ :return: Search results
+ :rtype: Unknown
+ """
try:
from langchain.tools import DuckDuckGoSearchRun
except ImportError:
@@ -131,6 +196,13 @@
@staticmethod
def _stream_response(answer: Any, token_info: Optional[dict[str, Any]] = None) -> Generator[Any, Any, None]:
+ """Generator to be used as streaming response
+
+ :param answer: Answer chunk from llm
+ :type answer: Any
+ :yield: Answer chunk from llm
+ :rtype: Generator[Any, Any, None]
+ """
streamed_answer = ""
for chunk in answer:
streamed_answer = streamed_answer + chunk
@@ -140,6 +212,24 @@ logger.info(f"Token Info: {token_info}")
def query(self, input_query: str, contexts: list[str], config: BaseLlmConfig = None, dry_run=False, memories=None):
+ """
+ Queries the vector database based on the given input query.
+ Gets relevant doc based on the query and then passes it to an
+ LLM as context to get the answer.
+
+ :param input_query: The query to use.
+ :type input_query: str
+ :param contexts: Embeddings retrieved from the database to be used as context.
+ :type contexts: list[str]
+ :param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
+ To persistently use a config, declare it during app init., defaults to None
+ :type config: Optional[BaseLlmConfig], optional
+ :param dry_run: A dry run does everything except send the resulting prompt to
+ the LLM. The purpose is to test the prompt, not the response., defaults to False
+ :type dry_run: bool, optional
+ :return: The answer to the query or the dry run result
+ :rtype: str
+ """
try:
if config:
# A config instance passed to this method will only be applied temporarily, for one call.
@@ -184,6 +274,28 @@ def chat(
self, input_query: str, contexts: list[str], config: BaseLlmConfig = None, dry_run=False, session_id: str = None
):
+ """
+ Queries the vector database on the given input query.
+ Gets relevant doc based on the query and then passes it to an
+ LLM as context to get the answer.
+
+ Maintains the whole conversation in memory.
+
+ :param input_query: The query to use.
+ :type input_query: str
+ :param contexts: Embeddings retrieved from the database to be used as context.
+ :type contexts: list[str]
+ :param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
+ To persistently use a config, declare it during app init., defaults to None
+ :type config: Optional[BaseLlmConfig], optional
+ :param dry_run: A dry run does everything except send the resulting prompt to
+ the LLM. The purpose is to test the prompt, not the response., defaults to False
+ :type dry_run: bool, optional
+ :param session_id: Session ID to use for the conversation, defaults to None
+ :type session_id: str, optional
+ :return: The answer to the query or the dry run result
+ :rtype: str
+ """
try:
if config:
# A config instance passed to this method will only be applied temporarily, for one call.
@@ -219,10 +331,20 @@
@staticmethod
def _get_messages(prompt: str, system_prompt: Optional[str] = None) -> list[LCBaseMessage]:
+ """
+ Construct a list of langchain messages
+
+ :param prompt: User prompt
+ :type prompt: str
+ :param system_prompt: System prompt, defaults to None
+ :type system_prompt: Optional[str], optional
+ :return: List of messages
+ :rtype: list[BaseMessage]
+ """
from langchain.schema import HumanMessage, SystemMessage
messages = []
if system_prompt:
messages.append(SystemMessage(content=system_prompt))
messages.append(HumanMessage(content=prompt))
- return messages+ return messages
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/llm/base.py |
Fill in missing docstrings in my code | import logging
import time
from typing import Any, Optional, Union
from tqdm import tqdm
try:
from opensearchpy import OpenSearch
from opensearchpy.helpers import bulk
except ImportError:
raise ImportError(
"OpenSearch requires extra dependencies. Install with `pip install --upgrade embedchain[opensearch]`"
) from None
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores import OpenSearchVectorSearch
from embedchain.config import OpenSearchDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
logger = logging.getLogger(__name__)
@register_deserializable
class OpenSearchDB(BaseVectorDB):
def __init__(self, config: OpenSearchDBConfig):
if config is None:
raise ValueError("OpenSearchDBConfig is required")
self.config = config
self.batch_size = self.config.batch_size
self.client = OpenSearch(
hosts=[self.config.opensearch_url],
http_auth=self.config.http_auth,
**self.config.extra_params,
)
info = self.client.info()
logger.info(f"Connected to {info['version']['distribution']}. Version: {info['version']['number']}")
# Remove auth credentials from config after successful connection
super().__init__(config=self.config)
def _initialize(self):
logger.info(self.client.info())
index_name = self._get_index()
if self.client.indices.exists(index=index_name):
print(f"Index '{index_name}' already exists.")
return
index_body = {
"settings": {"knn": True},
"mappings": {
"properties": {
"text": {"type": "text"},
"embeddings": {
"type": "knn_vector",
"index": False,
"dimension": self.config.vector_dimension,
},
}
},
}
self.client.indices.create(index_name, body=index_body)
print(self.client.indices.get(index_name))
def _get_or_create_db(self):
return self.client
def _get_or_create_collection(self, name):
def get(
self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None
) -> set[str]:
query = {}
if ids:
query["query"] = {"bool": {"must": [{"ids": {"values": ids}}]}}
else:
query["query"] = {"bool": {"must": []}}
if where:
for key, value in where.items():
query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
# OpenSearch syntax is different from Elasticsearch
response = self.client.search(index=self._get_index(), body=query, _source=True, size=limit)
docs = response["hits"]["hits"]
ids = [doc["_id"] for doc in docs]
doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
# Result is modified for compatibility with other vector databases
# TODO: Add method in vector database to return result in a standard format
result = {"ids": ids, "metadatas": []}
for doc_id in doc_ids:
result["metadatas"].append({"doc_id": doc_id})
return result
def add(self, documents: list[str], metadatas: list[object], ids: list[str], **kwargs: Optional[dict[str, any]]):
embeddings = self.embedder.embedding_fn(documents)
for batch_start in tqdm(range(0, len(documents), self.batch_size), desc="Inserting batches in opensearch"):
batch_end = batch_start + self.batch_size
batch_documents = documents[batch_start:batch_end]
batch_embeddings = embeddings[batch_start:batch_end]
# Create document entries for bulk upload
batch_entries = [
{
"_index": self._get_index(),
"_id": doc_id,
"_source": {"text": text, "metadata": metadata, "embeddings": embedding},
}
for doc_id, text, metadata, embedding in zip(
ids[batch_start:batch_end], batch_documents, metadatas[batch_start:batch_end], batch_embeddings
)
]
# Perform bulk operation
bulk(self.client, batch_entries, **kwargs)
self.client.indices.refresh(index=self._get_index())
# Sleep to avoid rate limiting
time.sleep(0.1)
def query(
self,
input_query: str,
n_results: int,
where: dict[str, any],
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
embeddings = OpenAIEmbeddings()
docsearch = OpenSearchVectorSearch(
index_name=self._get_index(),
embedding_function=embeddings,
opensearch_url=f"{self.config.opensearch_url}",
http_auth=self.config.http_auth,
use_ssl=hasattr(self.config, "use_ssl") and self.config.use_ssl,
verify_certs=hasattr(self.config, "verify_certs") and self.config.verify_certs,
)
pre_filter = {"match_all": {}} # default
if len(where) > 0:
pre_filter = {"bool": {"must": []}}
for key, value in where.items():
pre_filter["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
docs = docsearch.similarity_search_with_score(
input_query,
search_type="script_scoring",
space_type="cosinesimil",
vector_field="embeddings",
text_field="text",
metadata_field="metadata",
pre_filter=pre_filter,
k=n_results,
**kwargs,
)
contexts = []
for doc, score in docs:
context = doc.page_content
if citations:
metadata = doc.metadata
metadata["score"] = score
contexts.append(tuple((context, metadata)))
else:
contexts.append(context)
return contexts
def set_collection_name(self, name: str):
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
query = {"query": {"match_all": {}}}
response = self.client.count(index=self._get_index(), body=query)
doc_count = response["count"]
return doc_count
def reset(self):
# Delete all data from the database
if self.client.indices.exists(index=self._get_index()):
# delete index in ES
self.client.indices.delete(index=self._get_index())
def delete(self, where):
query = {"query": {"bool": {"must": []}}}
for key, value in where.items():
query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
self.client.delete_by_query(index=self._get_index(), body=query)
def _get_index(self) -> str:
return self.config.collection_name | --- +++ @@ -1,196 +1,253 @@-import logging
-import time
-from typing import Any, Optional, Union
-
-from tqdm import tqdm
-
-try:
- from opensearchpy import OpenSearch
- from opensearchpy.helpers import bulk
-except ImportError:
- raise ImportError(
- "OpenSearch requires extra dependencies. Install with `pip install --upgrade embedchain[opensearch]`"
- ) from None
-
-from langchain_community.embeddings.openai import OpenAIEmbeddings
-from langchain_community.vectorstores import OpenSearchVectorSearch
-
-from embedchain.config import OpenSearchDBConfig
-from embedchain.helpers.json_serializable import register_deserializable
-from embedchain.vectordb.base import BaseVectorDB
-
-logger = logging.getLogger(__name__)
-
-
-@register_deserializable
-class OpenSearchDB(BaseVectorDB):
-
- def __init__(self, config: OpenSearchDBConfig):
- if config is None:
- raise ValueError("OpenSearchDBConfig is required")
- self.config = config
- self.batch_size = self.config.batch_size
- self.client = OpenSearch(
- hosts=[self.config.opensearch_url],
- http_auth=self.config.http_auth,
- **self.config.extra_params,
- )
- info = self.client.info()
- logger.info(f"Connected to {info['version']['distribution']}. Version: {info['version']['number']}")
- # Remove auth credentials from config after successful connection
- super().__init__(config=self.config)
-
- def _initialize(self):
- logger.info(self.client.info())
- index_name = self._get_index()
- if self.client.indices.exists(index=index_name):
- print(f"Index '{index_name}' already exists.")
- return
-
- index_body = {
- "settings": {"knn": True},
- "mappings": {
- "properties": {
- "text": {"type": "text"},
- "embeddings": {
- "type": "knn_vector",
- "index": False,
- "dimension": self.config.vector_dimension,
- },
- }
- },
- }
- self.client.indices.create(index_name, body=index_body)
- print(self.client.indices.get(index_name))
-
- def _get_or_create_db(self):
- return self.client
-
- def _get_or_create_collection(self, name):
-
- def get(
- self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None
- ) -> set[str]:
- query = {}
- if ids:
- query["query"] = {"bool": {"must": [{"ids": {"values": ids}}]}}
- else:
- query["query"] = {"bool": {"must": []}}
-
- if where:
- for key, value in where.items():
- query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
-
- # OpenSearch syntax is different from Elasticsearch
- response = self.client.search(index=self._get_index(), body=query, _source=True, size=limit)
- docs = response["hits"]["hits"]
- ids = [doc["_id"] for doc in docs]
- doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
-
- # Result is modified for compatibility with other vector databases
- # TODO: Add method in vector database to return result in a standard format
- result = {"ids": ids, "metadatas": []}
-
- for doc_id in doc_ids:
- result["metadatas"].append({"doc_id": doc_id})
- return result
-
- def add(self, documents: list[str], metadatas: list[object], ids: list[str], **kwargs: Optional[dict[str, any]]):
-
- embeddings = self.embedder.embedding_fn(documents)
- for batch_start in tqdm(range(0, len(documents), self.batch_size), desc="Inserting batches in opensearch"):
- batch_end = batch_start + self.batch_size
- batch_documents = documents[batch_start:batch_end]
- batch_embeddings = embeddings[batch_start:batch_end]
-
- # Create document entries for bulk upload
- batch_entries = [
- {
- "_index": self._get_index(),
- "_id": doc_id,
- "_source": {"text": text, "metadata": metadata, "embeddings": embedding},
- }
- for doc_id, text, metadata, embedding in zip(
- ids[batch_start:batch_end], batch_documents, metadatas[batch_start:batch_end], batch_embeddings
- )
- ]
-
- # Perform bulk operation
- bulk(self.client, batch_entries, **kwargs)
- self.client.indices.refresh(index=self._get_index())
-
- # Sleep to avoid rate limiting
- time.sleep(0.1)
-
- def query(
- self,
- input_query: str,
- n_results: int,
- where: dict[str, any],
- citations: bool = False,
- **kwargs: Optional[dict[str, Any]],
- ) -> Union[list[tuple[str, dict]], list[str]]:
- embeddings = OpenAIEmbeddings()
- docsearch = OpenSearchVectorSearch(
- index_name=self._get_index(),
- embedding_function=embeddings,
- opensearch_url=f"{self.config.opensearch_url}",
- http_auth=self.config.http_auth,
- use_ssl=hasattr(self.config, "use_ssl") and self.config.use_ssl,
- verify_certs=hasattr(self.config, "verify_certs") and self.config.verify_certs,
- )
-
- pre_filter = {"match_all": {}} # default
- if len(where) > 0:
- pre_filter = {"bool": {"must": []}}
- for key, value in where.items():
- pre_filter["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
-
- docs = docsearch.similarity_search_with_score(
- input_query,
- search_type="script_scoring",
- space_type="cosinesimil",
- vector_field="embeddings",
- text_field="text",
- metadata_field="metadata",
- pre_filter=pre_filter,
- k=n_results,
- **kwargs,
- )
-
- contexts = []
- for doc, score in docs:
- context = doc.page_content
- if citations:
- metadata = doc.metadata
- metadata["score"] = score
- contexts.append(tuple((context, metadata)))
- else:
- contexts.append(context)
- return contexts
-
- def set_collection_name(self, name: str):
- if not isinstance(name, str):
- raise TypeError("Collection name must be a string")
- self.config.collection_name = name
-
- def count(self) -> int:
- query = {"query": {"match_all": {}}}
- response = self.client.count(index=self._get_index(), body=query)
- doc_count = response["count"]
- return doc_count
-
- def reset(self):
- # Delete all data from the database
- if self.client.indices.exists(index=self._get_index()):
- # delete index in ES
- self.client.indices.delete(index=self._get_index())
-
- def delete(self, where):
- query = {"query": {"bool": {"must": []}}}
- for key, value in where.items():
- query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
- self.client.delete_by_query(index=self._get_index(), body=query)
-
- def _get_index(self) -> str:
- return self.config.collection_name+import logging
+import time
+from typing import Any, Optional, Union
+
+from tqdm import tqdm
+
+try:
+ from opensearchpy import OpenSearch
+ from opensearchpy.helpers import bulk
+except ImportError:
+ raise ImportError(
+ "OpenSearch requires extra dependencies. Install with `pip install --upgrade embedchain[opensearch]`"
+ ) from None
+
+from langchain_community.embeddings.openai import OpenAIEmbeddings
+from langchain_community.vectorstores import OpenSearchVectorSearch
+
+from embedchain.config import OpenSearchDBConfig
+from embedchain.helpers.json_serializable import register_deserializable
+from embedchain.vectordb.base import BaseVectorDB
+
+logger = logging.getLogger(__name__)
+
+
+@register_deserializable
+class OpenSearchDB(BaseVectorDB):
+ """
+ OpenSearch as vector database
+ """
+
+ def __init__(self, config: OpenSearchDBConfig):
+ """OpenSearch as vector database.
+
+ :param config: OpenSearch domain config
+ :type config: OpenSearchDBConfig
+ """
+ if config is None:
+ raise ValueError("OpenSearchDBConfig is required")
+ self.config = config
+ self.batch_size = self.config.batch_size
+ self.client = OpenSearch(
+ hosts=[self.config.opensearch_url],
+ http_auth=self.config.http_auth,
+ **self.config.extra_params,
+ )
+ info = self.client.info()
+ logger.info(f"Connected to {info['version']['distribution']}. Version: {info['version']['number']}")
+ # Remove auth credentials from config after successful connection
+ super().__init__(config=self.config)
+
+ def _initialize(self):
+ logger.info(self.client.info())
+ index_name = self._get_index()
+ if self.client.indices.exists(index=index_name):
+ print(f"Index '{index_name}' already exists.")
+ return
+
+ index_body = {
+ "settings": {"knn": True},
+ "mappings": {
+ "properties": {
+ "text": {"type": "text"},
+ "embeddings": {
+ "type": "knn_vector",
+ "index": False,
+ "dimension": self.config.vector_dimension,
+ },
+ }
+ },
+ }
+ self.client.indices.create(index_name, body=index_body)
+ print(self.client.indices.get(index_name))
+
+ def _get_or_create_db(self):
+ """Called during initialization"""
+ return self.client
+
+ def _get_or_create_collection(self, name):
+ """Note: nothing to return here. Discuss later"""
+
+ def get(
+ self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None
+ ) -> set[str]:
+ """
+ Get existing doc ids present in vector database
+
+ :param ids: _list of doc ids to check for existence
+ :type ids: list[str]
+ :param where: to filter data
+ :type where: dict[str, any]
+ :return: ids
+ :type: set[str]
+ """
+ query = {}
+ if ids:
+ query["query"] = {"bool": {"must": [{"ids": {"values": ids}}]}}
+ else:
+ query["query"] = {"bool": {"must": []}}
+
+ if where:
+ for key, value in where.items():
+ query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
+
+ # OpenSearch syntax is different from Elasticsearch
+ response = self.client.search(index=self._get_index(), body=query, _source=True, size=limit)
+ docs = response["hits"]["hits"]
+ ids = [doc["_id"] for doc in docs]
+ doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
+
+ # Result is modified for compatibility with other vector databases
+ # TODO: Add method in vector database to return result in a standard format
+ result = {"ids": ids, "metadatas": []}
+
+ for doc_id in doc_ids:
+ result["metadatas"].append({"doc_id": doc_id})
+ return result
+
+ def add(self, documents: list[str], metadatas: list[object], ids: list[str], **kwargs: Optional[dict[str, any]]):
+ """Adds documents to the opensearch index"""
+
+ embeddings = self.embedder.embedding_fn(documents)
+ for batch_start in tqdm(range(0, len(documents), self.batch_size), desc="Inserting batches in opensearch"):
+ batch_end = batch_start + self.batch_size
+ batch_documents = documents[batch_start:batch_end]
+ batch_embeddings = embeddings[batch_start:batch_end]
+
+ # Create document entries for bulk upload
+ batch_entries = [
+ {
+ "_index": self._get_index(),
+ "_id": doc_id,
+ "_source": {"text": text, "metadata": metadata, "embeddings": embedding},
+ }
+ for doc_id, text, metadata, embedding in zip(
+ ids[batch_start:batch_end], batch_documents, metadatas[batch_start:batch_end], batch_embeddings
+ )
+ ]
+
+ # Perform bulk operation
+ bulk(self.client, batch_entries, **kwargs)
+ self.client.indices.refresh(index=self._get_index())
+
+ # Sleep to avoid rate limiting
+ time.sleep(0.1)
+
+ def query(
+ self,
+ input_query: str,
+ n_results: int,
+ where: dict[str, any],
+ citations: bool = False,
+ **kwargs: Optional[dict[str, Any]],
+ ) -> Union[list[tuple[str, dict]], list[str]]:
+ """
+ query contents from vector database based on vector similarity
+
+ :param input_query: query string
+ :type input_query: str
+ :param n_results: no of similar documents to fetch from database
+ :type n_results: int
+ :param where: Optional. to filter data
+ :type where: dict[str, any]
+ :param citations: we use citations boolean param to return context along with the answer.
+ :type citations: bool, default is False.
+ :return: The content of the document that matched your query,
+ along with url of the source and doc_id (if citations flag is true)
+ :rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
+ """
+ embeddings = OpenAIEmbeddings()
+ docsearch = OpenSearchVectorSearch(
+ index_name=self._get_index(),
+ embedding_function=embeddings,
+ opensearch_url=f"{self.config.opensearch_url}",
+ http_auth=self.config.http_auth,
+ use_ssl=hasattr(self.config, "use_ssl") and self.config.use_ssl,
+ verify_certs=hasattr(self.config, "verify_certs") and self.config.verify_certs,
+ )
+
+ pre_filter = {"match_all": {}} # default
+ if len(where) > 0:
+ pre_filter = {"bool": {"must": []}}
+ for key, value in where.items():
+ pre_filter["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
+
+ docs = docsearch.similarity_search_with_score(
+ input_query,
+ search_type="script_scoring",
+ space_type="cosinesimil",
+ vector_field="embeddings",
+ text_field="text",
+ metadata_field="metadata",
+ pre_filter=pre_filter,
+ k=n_results,
+ **kwargs,
+ )
+
+ contexts = []
+ for doc, score in docs:
+ context = doc.page_content
+ if citations:
+ metadata = doc.metadata
+ metadata["score"] = score
+ contexts.append(tuple((context, metadata)))
+ else:
+ contexts.append(context)
+ return contexts
+
+ def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
+ if not isinstance(name, str):
+ raise TypeError("Collection name must be a string")
+ self.config.collection_name = name
+
+ def count(self) -> int:
+ """
+ Count number of documents/chunks embedded in the database.
+
+ :return: number of documents
+ :rtype: int
+ """
+ query = {"query": {"match_all": {}}}
+ response = self.client.count(index=self._get_index(), body=query)
+ doc_count = response["count"]
+ return doc_count
+
+ def reset(self):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ """
+ # Delete all data from the database
+ if self.client.indices.exists(index=self._get_index()):
+ # delete index in ES
+ self.client.indices.delete(index=self._get_index())
+
+ def delete(self, where):
+ """Deletes a document from the OpenSearch index"""
+ query = {"query": {"bool": {"must": []}}}
+ for key, value in where.items():
+ query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
+ self.client.delete_by_query(index=self._get_index(), body=query)
+
+ def _get_index(self) -> str:
+ """Get the OpenSearch index for a collection
+
+ :return: OpenSearch index
+ :rtype: str
+ """
+ return self.config.collection_name
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/opensearch.py |
Create docstrings for API functions | import copy
import os
from typing import Any, Optional, Union
try:
from qdrant_client import QdrantClient
from qdrant_client.http import models
from qdrant_client.http.models import Batch
from qdrant_client.models import Distance, VectorParams
except ImportError:
raise ImportError("Qdrant requires extra dependencies. Install with `pip install embedchain[qdrant]`") from None
from tqdm import tqdm
from embedchain.config.vector_db.qdrant import QdrantDBConfig
from embedchain.vectordb.base import BaseVectorDB
class QdrantDB(BaseVectorDB):
def __init__(self, config: QdrantDBConfig = None):
if config is None:
config = QdrantDBConfig()
else:
if not isinstance(config, QdrantDBConfig):
raise TypeError(
"config is not a `QdrantDBConfig` instance. "
"Please make sure the type is right and that you are passing an instance."
)
self.config = config
self.batch_size = self.config.batch_size
self.client = QdrantClient(url=os.getenv("QDRANT_URL"), api_key=os.getenv("QDRANT_API_KEY"))
# Call parent init here because embedder is needed
super().__init__(config=self.config)
def _initialize(self):
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
self.collection_name = self._get_or_create_collection()
all_collections = self.client.get_collections()
collection_names = [collection.name for collection in all_collections.collections]
if self.collection_name not in collection_names:
self.client.recreate_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(
size=self.embedder.vector_dimension,
distance=Distance.COSINE,
hnsw_config=self.config.hnsw_config,
quantization_config=self.config.quantization_config,
on_disk=self.config.on_disk,
),
)
def _get_or_create_db(self):
return self.client
def _get_or_create_collection(self):
return f"{self.config.collection_name}-{self.embedder.vector_dimension}".lower().replace("_", "-")
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
keys = set(where.keys() if where is not None else set())
qdrant_must_filters = []
if ids:
qdrant_must_filters.append(
models.FieldCondition(
key="identifier",
match=models.MatchAny(
any=ids,
),
)
)
if len(keys) > 0:
for key in keys:
qdrant_must_filters.append(
models.FieldCondition(
key="metadata.{}".format(key),
match=models.MatchValue(
value=where.get(key),
),
)
)
offset = 0
existing_ids = []
metadatas = []
while offset is not None:
response = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=models.Filter(must=qdrant_must_filters),
offset=offset,
limit=self.batch_size,
)
offset = response[1]
for doc in response[0]:
existing_ids.append(doc.payload["identifier"])
metadatas.append(doc.payload["metadata"])
return {"ids": existing_ids, "metadatas": metadatas}
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, any]],
):
embeddings = self.embedder.embedding_fn(documents)
payloads = []
qdrant_ids = []
for id, document, metadata in zip(ids, documents, metadatas):
metadata["text"] = document
qdrant_ids.append(id)
payloads.append({"identifier": id, "text": document, "metadata": copy.deepcopy(metadata)})
for i in tqdm(range(0, len(qdrant_ids), self.batch_size), desc="Adding data in batches"):
self.client.upsert(
collection_name=self.collection_name,
points=Batch(
ids=qdrant_ids[i : i + self.batch_size],
payloads=payloads[i : i + self.batch_size],
vectors=embeddings[i : i + self.batch_size],
),
**kwargs,
)
def query(
self,
input_query: str,
n_results: int,
where: dict[str, any],
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
query_vector = self.embedder.embedding_fn([input_query])[0]
keys = set(where.keys() if where is not None else set())
qdrant_must_filters = []
if len(keys) > 0:
for key in keys:
qdrant_must_filters.append(
models.FieldCondition(
key="metadata.{}".format(key),
match=models.MatchValue(
value=where.get(key),
),
)
)
results = self.client.search(
collection_name=self.collection_name,
query_filter=models.Filter(must=qdrant_must_filters),
query_vector=query_vector,
limit=n_results,
**kwargs,
)
contexts = []
for result in results:
context = result.payload["text"]
if citations:
metadata = result.payload["metadata"]
metadata["score"] = result.score
contexts.append(tuple((context, metadata)))
else:
contexts.append(context)
return contexts
def count(self) -> int:
response = self.client.get_collection(collection_name=self.collection_name)
return response.points_count
def reset(self):
self.client.delete_collection(collection_name=self.collection_name)
self._initialize()
def set_collection_name(self, name: str):
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
self.collection_name = self._get_or_create_collection()
@staticmethod
def _generate_query(where: dict):
must_fields = []
for key, value in where.items():
must_fields.append(
models.FieldCondition(
key=f"metadata.{key}",
match=models.MatchValue(
value=value,
),
)
)
return models.Filter(must=must_fields)
def delete(self, where: dict):
db_filter = self._generate_query(where)
self.client.delete(collection_name=self.collection_name, points_selector=db_filter) | --- +++ @@ -17,8 +17,15 @@
class QdrantDB(BaseVectorDB):
+ """
+ Qdrant as vector database
+ """
def __init__(self, config: QdrantDBConfig = None):
+ """
+ Qdrant as vector database
+ :param config. Qdrant database config to be used for connection
+ """
if config is None:
config = QdrantDBConfig()
else:
@@ -34,6 +41,9 @@ super().__init__(config=self.config)
def _initialize(self):
+ """
+ This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
+ """
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
@@ -59,6 +69,18 @@ return f"{self.config.collection_name}-{self.embedder.vector_dimension}".lower().replace("_", "-")
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
+ """
+ Get existing doc ids present in vector database
+
+ :param ids: _list of doc ids to check for existence
+ :type ids: list[str]
+ :param where: to filter data
+ :type where: dict[str, any]
+ :param limit: The number of entries to be fetched
+ :type limit: Optional int, defaults to None
+ :return: All the existing IDs
+ :rtype: Set[str]
+ """
keys = set(where.keys() if where is not None else set())
@@ -108,6 +130,14 @@ ids: list[str],
**kwargs: Optional[dict[str, any]],
):
+ """add data in vector database
+ :param documents: list of texts to add
+ :type documents: list[str]
+ :param metadatas: list of metadata associated with docs
+ :type metadatas: list[object]
+ :param ids: ids of docs
+ :type ids: list[str]
+ """
embeddings = self.embedder.embedding_fn(documents)
payloads = []
@@ -136,6 +166,20 @@ citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
+ """
+ query contents from vector database based on vector similarity
+ :param input_query: query string
+ :type input_query: str
+ :param n_results: no of similar documents to fetch from database
+ :type n_results: int
+ :param where: Optional. to filter data
+ :type where: dict[str, any]
+ :param citations: we use citations boolean param to return context along with the answer.
+ :type citations: bool, default is False.
+ :return: The content of the document that matched your query,
+ along with url of the source and doc_id (if citations flag is true)
+ :rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
+ """
query_vector = self.embedder.embedding_fn([input_query])[0]
keys = set(where.keys() if where is not None else set())
@@ -179,6 +223,12 @@ self._initialize()
def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
@@ -200,4 +250,4 @@
def delete(self, where: dict):
db_filter = self._generate_query(where)
- self.client.delete(collection_name=self.collection_name, points_selector=db_filter)+ self.client.delete(collection_name=self.collection_name, points_selector=db_filter)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/qdrant.py |
Replace inline comments with docstrings | import logging
from typing import Any, Optional
from embedchain.helpers.json_serializable import JSONSerializable
logger = logging.getLogger(__name__)
class BaseMessage(JSONSerializable):
# The string content of the message.
content: str
# The created_by of the message. AI, Human, Bot etc.
created_by: str
# Any additional info.
metadata: dict[str, Any]
def __init__(self, content: str, created_by: str, metadata: Optional[dict[str, Any]] = None) -> None:
super().__init__()
self.content = content
self.created_by = created_by
self.metadata = metadata
@property
def type(self) -> str:
@classmethod
def is_lc_serializable(cls) -> bool:
return True
def __str__(self) -> str:
return f"{self.created_by}: {self.content}"
class ChatMessage(JSONSerializable):
human_message: Optional[BaseMessage] = None
ai_message: Optional[BaseMessage] = None
def add_user_message(self, message: str, metadata: Optional[dict] = None):
if self.human_message:
logger.info(
"Human message already exists in the chat message,\
overwriting it with new message."
)
self.human_message = BaseMessage(content=message, created_by="human", metadata=metadata)
def add_ai_message(self, message: str, metadata: Optional[dict] = None):
if self.ai_message:
logger.info(
"AI message already exists in the chat message,\
overwriting it with new message."
)
self.ai_message = BaseMessage(content=message, created_by="ai", metadata=metadata)
def __str__(self) -> str:
return f"{self.human_message}\n{self.ai_message}" | --- +++ @@ -7,6 +7,11 @@
class BaseMessage(JSONSerializable):
+ """
+ The base abstract message class.
+
+ Messages are the inputs and outputs of Models.
+ """
# The string content of the message.
content: str
@@ -25,9 +30,11 @@
@property
def type(self) -> str:
+ """Type of the Message, used for serialization."""
@classmethod
def is_lc_serializable(cls) -> bool:
+ """Return whether this class is serializable."""
return True
def __str__(self) -> str:
@@ -35,6 +42,12 @@
class ChatMessage(JSONSerializable):
+ """
+ The base abstract chat message class.
+
+ Chat messages are the pair of (question, answer) conversation
+ between human and model.
+ """
human_message: Optional[BaseMessage] = None
ai_message: Optional[BaseMessage] = None
@@ -58,4 +71,4 @@ self.ai_message = BaseMessage(content=message, created_by="ai", metadata=metadata)
def __str__(self) -> str:
- return f"{self.human_message}\n{self.ai_message}"+ return f"{self.human_message}\n{self.ai_message}"
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/memory/message.py |
Add docstrings that explain inputs and outputs | import hashlib
import logging
import os
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
logger = logging.getLogger(__name__)
@register_deserializable
class DiscordLoader(BaseLoader):
def __init__(self):
if not os.environ.get("DISCORD_TOKEN"):
raise ValueError("DISCORD_TOKEN is not set")
self.token = os.environ.get("DISCORD_TOKEN")
@staticmethod
def _format_message(message):
return {
"message_id": message.id,
"content": message.content,
"author": {
"id": message.author.id,
"name": message.author.name,
"discriminator": message.author.discriminator,
},
"created_at": message.created_at.isoformat(),
"attachments": [
{
"id": attachment.id,
"filename": attachment.filename,
"size": attachment.size,
"url": attachment.url,
"proxy_url": attachment.proxy_url,
"height": attachment.height,
"width": attachment.width,
}
for attachment in message.attachments
],
"embeds": [
{
"title": embed.title,
"type": embed.type,
"description": embed.description,
"url": embed.url,
"timestamp": embed.timestamp.isoformat(),
"color": embed.color,
"footer": {
"text": embed.footer.text,
"icon_url": embed.footer.icon_url,
"proxy_icon_url": embed.footer.proxy_icon_url,
},
"image": {
"url": embed.image.url,
"proxy_url": embed.image.proxy_url,
"height": embed.image.height,
"width": embed.image.width,
},
"thumbnail": {
"url": embed.thumbnail.url,
"proxy_url": embed.thumbnail.proxy_url,
"height": embed.thumbnail.height,
"width": embed.thumbnail.width,
},
"video": {
"url": embed.video.url,
"height": embed.video.height,
"width": embed.video.width,
},
"provider": {
"name": embed.provider.name,
"url": embed.provider.url,
},
"author": {
"name": embed.author.name,
"url": embed.author.url,
"icon_url": embed.author.icon_url,
"proxy_icon_url": embed.author.proxy_icon_url,
},
"fields": [
{
"name": field.name,
"value": field.value,
"inline": field.inline,
}
for field in embed.fields
],
}
for embed in message.embeds
],
}
def load_data(self, channel_id: str):
import discord
messages = []
class DiscordClient(discord.Client):
async def on_ready(self) -> None:
logger.info("Logged on as {0}!".format(self.user))
try:
channel = self.get_channel(int(channel_id))
if not isinstance(channel, discord.TextChannel):
raise ValueError(
f"Channel {channel_id} is not a text channel. " "Only text channels are supported for now."
)
threads = {}
for thread in channel.threads:
threads[thread.id] = thread
async for message in channel.history(limit=None):
messages.append(DiscordLoader._format_message(message))
if message.id in threads:
async for thread_message in threads[message.id].history(limit=None):
messages.append(DiscordLoader._format_message(thread_message))
except Exception as e:
logger.error(e)
await self.close()
finally:
await self.close()
intents = discord.Intents.default()
intents.message_content = True
client = DiscordClient(intents=intents)
client.run(self.token)
metadata = {
"url": channel_id,
}
messages = str(messages)
doc_id = hashlib.sha256((messages + channel_id).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": [
{
"content": messages,
"meta_data": metadata,
}
],
} | --- +++ @@ -10,6 +10,9 @@
@register_deserializable
class DiscordLoader(BaseLoader):
+ """
+ Load data from a Discord Channel ID.
+ """
def __init__(self):
if not os.environ.get("DISCORD_TOKEN"):
@@ -94,6 +97,7 @@ }
def load_data(self, channel_id: str):
+ """Load data from a Discord Channel ID."""
import discord
messages = []
@@ -145,4 +149,4 @@ "meta_data": metadata,
}
],
- }+ }
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/loaders/discord.py |
Add docstrings that explain inputs and outputs | from typing import Any, Dict, List, Optional, Union
import pyarrow as pa
try:
import lancedb
except ImportError:
raise ImportError('LanceDB is required. Install with pip install "embedchain[lancedb]"') from None
from embedchain.config.vector_db.lancedb import LanceDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
@register_deserializable
class LanceDB(BaseVectorDB):
def __init__(
self,
config: Optional[LanceDBConfig] = None,
):
if config:
self.config = config
else:
self.config = LanceDBConfig()
self.client = lancedb.connect(self.config.dir or "~/.lancedb")
self.embedder_check = True
super().__init__(config=self.config)
def _initialize(self):
if not self.embedder:
raise ValueError(
"Embedder not set. Please set an embedder with `_set_embedder()` function before initialization."
)
else:
# check embedder function is working or not
try:
self.embedder.embedding_fn("Hello LanceDB")
except Exception:
self.embedder_check = False
self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
return self.client
def _generate_where_clause(self, where: Dict[str, any]) -> str:
where_filters = ""
if len(list(where.keys())) == 1:
where_filters = f"{list(where.keys())[0]} = {list(where.values())[0]}"
return where_filters
where_items = list(where.items())
where_count = len(where_items)
for i, (key, value) in enumerate(where_items, start=1):
condition = f"{key} = {value} AND "
where_filters += condition
if i == where_count:
condition = f"{key} = {value}"
where_filters += condition
return where_filters
def _get_or_create_collection(self, table_name: str, reset=False):
if not self.embedder_check:
schema = pa.schema(
[
pa.field("doc", pa.string()),
pa.field("metadata", pa.string()),
pa.field("id", pa.string()),
]
)
else:
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), list_size=self.embedder.vector_dimension)),
pa.field("doc", pa.string()),
pa.field("metadata", pa.string()),
pa.field("id", pa.string()),
]
)
if not reset:
if table_name not in self.client.table_names():
self.collection = self.client.create_table(table_name, schema=schema)
else:
self.client.drop_table(table_name)
self.collection = self.client.create_table(table_name, schema=schema)
self.collection = self.client[table_name]
return self.collection
def get(self, ids: Optional[List[str]] = None, where: Optional[Dict[str, any]] = None, limit: Optional[int] = None):
if limit is not None:
max_limit = limit
else:
max_limit = 3
results = {"ids": [], "metadatas": []}
where_clause = {}
if where:
where_clause = self._generate_where_clause(where)
if ids is not None:
records = (
self.collection.to_lance().scanner(filter=f"id IN {tuple(ids)}", columns=["id"]).to_table().to_pydict()
)
for id in records["id"]:
if where is not None:
result = (
self.collection.search(query=id, vector_column_name="id")
.where(where_clause)
.limit(max_limit)
.to_list()
)
else:
result = self.collection.search(query=id, vector_column_name="id").limit(max_limit).to_list()
results["ids"] = [r["id"] for r in result]
results["metadatas"] = [r["metadata"] for r in result]
return results
def add(
self,
documents: List[str],
metadatas: List[object],
ids: List[str],
) -> Any:
data = []
to_ingest = list(zip(documents, metadatas, ids))
if not self.embedder_check:
for doc, meta, id in to_ingest:
temp = {}
temp["doc"] = doc
temp["metadata"] = str(meta)
temp["id"] = id
data.append(temp)
else:
for doc, meta, id in to_ingest:
temp = {}
temp["doc"] = doc
temp["vector"] = self.embedder.embedding_fn([doc])[0]
temp["metadata"] = str(meta)
temp["id"] = id
data.append(temp)
self.collection.add(data=data)
def _format_result(self, results) -> list:
return results.tolist()
def query(
self,
input_query: str,
n_results: int = 3,
where: Optional[dict[str, any]] = None,
raw_filter: Optional[dict[str, any]] = None,
citations: bool = False,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
if where and raw_filter:
raise ValueError("Both `where` and `raw_filter` cannot be used together.")
try:
query_embedding = self.embedder.embedding_fn(input_query)[0]
result = self.collection.search(query_embedding).limit(n_results).to_list()
except Exception as e:
e.message()
results_formatted = result
contexts = []
for result in results_formatted:
if citations:
metadata = result["metadata"]
contexts.append((result["doc"], metadata))
else:
contexts.append(result["doc"])
return contexts
def set_collection_name(self, name: str):
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
self._get_or_create_collection(self.config.collection_name)
def count(self) -> int:
return self.collection.count_rows()
def delete(self, where):
return self.collection.delete(where=where)
def reset(self):
# Delete all data from the collection and recreate collection
if self.config.allow_reset:
try:
self._get_or_create_collection(self.config.collection_name, reset=True)
except ValueError:
raise ValueError(
"For safety reasons, resetting is disabled. "
"Please enable it by setting `allow_reset=True` in your LanceDbConfig"
) from None
# Recreate
else:
print(
"For safety reasons, resetting is disabled. "
"Please enable it by setting `allow_reset=True` in your LanceDbConfig"
) | --- +++ @@ -14,11 +14,19 @@
@register_deserializable
class LanceDB(BaseVectorDB):
+ """
+ LanceDB as vector database
+ """
def __init__(
self,
config: Optional[LanceDBConfig] = None,
):
+ """LanceDB as vector database.
+
+ :param config: LanceDB database config, defaults to None
+ :type config: LanceDBConfig, optional
+ """
if config:
self.config = config
else:
@@ -30,6 +38,9 @@ super().__init__(config=self.config)
def _initialize(self):
+ """
+ This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
+ """
if not self.embedder:
raise ValueError(
"Embedder not set. Please set an embedder with `_set_embedder()` function before initialization."
@@ -44,9 +55,15 @@ self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
+ """
+ Called during initialization
+ """
return self.client
def _generate_where_clause(self, where: Dict[str, any]) -> str:
+ """
+ This method generate where clause using dictionary containing attributes and their values
+ """
where_filters = ""
@@ -68,6 +85,14 @@ return where_filters
def _get_or_create_collection(self, table_name: str, reset=False):
+ """
+ Get or create a named collection.
+
+ :param name: Name of the collection
+ :type name: str
+ :return: Created collection
+ :rtype: Collection
+ """
if not self.embedder_check:
schema = pa.schema(
[
@@ -100,6 +125,18 @@ return self.collection
def get(self, ids: Optional[List[str]] = None, where: Optional[Dict[str, any]] = None, limit: Optional[int] = None):
+ """
+ Get existing doc ids present in vector database
+
+ :param ids: list of doc ids to check for existence
+ :type ids: List[str]
+ :param where: Optional. to filter data
+ :type where: Dict[str, Any]
+ :param limit: Optional. maximum number of documents
+ :type limit: Optional[int]
+ :return: Existing documents.
+ :rtype: List[str]
+ """
if limit is not None:
max_limit = limit
else:
@@ -135,6 +172,16 @@ metadatas: List[object],
ids: List[str],
) -> Any:
+ """
+ Add vectors to lancedb database
+
+ :param documents: Documents
+ :type documents: List[str]
+ :param metadatas: Metadatas
+ :type metadatas: List[object]
+ :param ids: ids
+ :type ids: List[str]
+ """
data = []
to_ingest = list(zip(documents, metadatas, ids))
@@ -157,6 +204,14 @@ self.collection.add(data=data)
def _format_result(self, results) -> list:
+ """
+ Format LanceDB results
+
+ :param results: LanceDB query results to format.
+ :type results: QueryResult
+ :return: Formatted results
+ :rtype: list[tuple[Document, float]]
+ """
return results.tolist()
def query(
@@ -168,6 +223,24 @@ citations: bool = False,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
+ """
+ Query contents from vector database based on vector similarity
+
+ :param input_query: query string
+ :type input_query: str
+ :param n_results: no of similar documents to fetch from database
+ :type n_results: int
+ :param where: to filter data
+ :type where: dict[str, Any]
+ :param raw_filter: Raw filter to apply
+ :type raw_filter: dict[str, Any]
+ :param citations: we use citations boolean param to return context along with the answer.
+ :type citations: bool, default is False.
+ :raises InvalidDimensionException: Dimensions do not match.
+ :return: The content of the document that matched your query,
+ along with url of the source and doc_id (if citations flag is true)
+ :rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
+ """
if where and raw_filter:
raise ValueError("Both `where` and `raw_filter` cannot be used together.")
try:
@@ -188,18 +261,33 @@ return contexts
def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
self._get_or_create_collection(self.config.collection_name)
def count(self) -> int:
+ """
+ Count number of documents/chunks embedded in the database.
+
+ :return: number of documents
+ :rtype: int
+ """
return self.collection.count_rows()
def delete(self, where):
return self.collection.delete(where=where)
def reset(self):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ """
# Delete all data from the collection and recreate collection
if self.config.allow_reset:
try:
@@ -214,4 +302,4 @@ print(
"For safety reasons, resetting is disabled. "
"Please enable it by setting `allow_reset=True` in your LanceDbConfig"
- )+ )
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/lancedb.py |
Write docstrings for backend logic | import json
import logging
import uuid
from typing import Any, Optional
from embedchain.core.db.database import get_session
from embedchain.core.db.models import ChatHistory as ChatHistoryModel
from embedchain.memory.message import ChatMessage
from embedchain.memory.utils import merge_metadata_dict
logger = logging.getLogger(__name__)
class ChatHistory:
def __init__(self) -> None:
self.db_session = get_session()
def add(self, app_id, session_id, chat_message: ChatMessage) -> Optional[str]:
memory_id = str(uuid.uuid4())
metadata_dict = merge_metadata_dict(chat_message.human_message.metadata, chat_message.ai_message.metadata)
if metadata_dict:
metadata = self._serialize_json(metadata_dict)
self.db_session.add(
ChatHistoryModel(
app_id=app_id,
id=memory_id,
session_id=session_id,
question=chat_message.human_message.content,
answer=chat_message.ai_message.content,
metadata=metadata if metadata_dict else "{}",
)
)
try:
self.db_session.commit()
except Exception as e:
logger.error(f"Error adding chat memory to db: {e}")
self.db_session.rollback()
return None
logger.info(f"Added chat memory to db with id: {memory_id}")
return memory_id
def delete(self, app_id: str, session_id: Optional[str] = None):
params = {"app_id": app_id}
if session_id:
params["session_id"] = session_id
self.db_session.query(ChatHistoryModel).filter_by(**params).delete()
try:
self.db_session.commit()
except Exception as e:
logger.error(f"Error deleting chat history: {e}")
self.db_session.rollback()
def get(
self, app_id, session_id: str = "default", num_rounds=10, fetch_all: bool = False, display_format=False
) -> list[ChatMessage]:
params = {"app_id": app_id}
if not fetch_all:
params["session_id"] = session_id
results = (
self.db_session.query(ChatHistoryModel).filter_by(**params).order_by(ChatHistoryModel.created_at.asc())
)
results = results.limit(num_rounds) if not fetch_all else results
history = []
for result in results:
metadata = self._deserialize_json(metadata=result.meta_data or "{}")
# Return list of dict if display_format is True
if display_format:
history.append(
{
"session_id": result.session_id,
"human": result.question,
"ai": result.answer,
"metadata": result.meta_data,
"timestamp": result.created_at,
}
)
else:
memory = ChatMessage()
memory.add_user_message(result.question, metadata=metadata)
memory.add_ai_message(result.answer, metadata=metadata)
history.append(memory)
return history
def count(self, app_id: str, session_id: Optional[str] = None):
# Rewrite the logic below with sqlalchemy
params = {"app_id": app_id}
if session_id:
params["session_id"] = session_id
return self.db_session.query(ChatHistoryModel).filter_by(**params).count()
@staticmethod
def _serialize_json(metadata: dict[str, Any]):
return json.dumps(metadata)
@staticmethod
def _deserialize_json(metadata: str):
return json.loads(metadata)
def close_connection(self):
self.connection.close() | --- +++ @@ -41,6 +41,15 @@ return memory_id
def delete(self, app_id: str, session_id: Optional[str] = None):
+ """
+ Delete all chat history for a given app_id and session_id.
+ This is useful for deleting chat history for a given user.
+
+ :param app_id: The app_id to delete chat history for
+ :param session_id: The session_id to delete chat history for
+
+ :return: None
+ """
params = {"app_id": app_id}
if session_id:
params["session_id"] = session_id
@@ -54,6 +63,15 @@ def get(
self, app_id, session_id: str = "default", num_rounds=10, fetch_all: bool = False, display_format=False
) -> list[ChatMessage]:
+ """
+ Get the chat history for a given app_id.
+
+ param: app_id - The app_id to get chat history
+ param: session_id (optional) - The session_id to get chat history. Defaults to "default"
+ param: num_rounds (optional) - The number of rounds to get chat history. Defaults to 10
+ param: fetch_all (optional) - Whether to fetch all chat history or not. Defaults to False
+ param: display_format (optional) - Whether to return the chat history in display format. Defaults to False
+ """
params = {"app_id": app_id}
if not fetch_all:
params["session_id"] = session_id
@@ -83,6 +101,14 @@ return history
def count(self, app_id: str, session_id: Optional[str] = None):
+ """
+ Count the number of chat messages for a given app_id and session_id.
+
+ :param app_id: The app_id to count chat history for
+ :param session_id: The session_id to count chat history for
+
+ :return: The number of chat messages for a given app_id and session_id
+ """
# Rewrite the logic below with sqlalchemy
params = {"app_id": app_id}
if session_id:
@@ -98,4 +124,4 @@ return json.loads(metadata)
def close_connection(self):
- self.connection.close()+ self.connection.close()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/memory/base.py |
Annotate my code with docstrings | import logging
import os
from typing import Optional, Union
try:
import pinecone
except ImportError:
raise ImportError(
"Pinecone requires extra dependencies. Install with `pip install pinecone-text pinecone-client`"
) from None
from pinecone_text.sparse import BM25Encoder
from embedchain.config.vector_db.pinecone import PineconeDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.utils.misc import chunks
from embedchain.vectordb.base import BaseVectorDB
logger = logging.getLogger(__name__)
@register_deserializable
class PineconeDB(BaseVectorDB):
def __init__(
self,
config: Optional[PineconeDBConfig] = None,
):
if config is None:
self.config = PineconeDBConfig()
else:
if not isinstance(config, PineconeDBConfig):
raise TypeError(
"config is not a `PineconeDBConfig` instance. "
"Please make sure the type is right and that you are passing an instance."
)
self.config = config
self._setup_pinecone_index()
# Setup BM25Encoder if sparse vectors are to be used
self.bm25_encoder = None
self.batch_size = self.config.batch_size
if self.config.hybrid_search:
logger.info("Initializing BM25Encoder for sparse vectors..")
self.bm25_encoder = self.config.bm25_encoder if self.config.bm25_encoder else BM25Encoder.default()
# Call parent init here because embedder is needed
super().__init__(config=self.config)
def _initialize(self):
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
def _setup_pinecone_index(self):
api_key = self.config.api_key or os.environ.get("PINECONE_API_KEY")
if not api_key:
raise ValueError("Please set the PINECONE_API_KEY environment variable or pass it in config.")
self.client = pinecone.Pinecone(api_key=api_key, **self.config.extra_params)
indexes = self.client.list_indexes().names()
if indexes is None or self.config.index_name not in indexes:
if self.config.pod_config:
spec = pinecone.PodSpec(**self.config.pod_config)
elif self.config.serverless_config:
spec = pinecone.ServerlessSpec(**self.config.serverless_config)
else:
raise ValueError("No pod_config or serverless_config found.")
self.client.create_index(
name=self.config.index_name,
metric=self.config.metric,
dimension=self.config.vector_dimension,
spec=spec,
)
self.pinecone_index = self.client.Index(self.config.index_name)
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
existing_ids = list()
metadatas = []
if ids is not None:
for i in range(0, len(ids), self.batch_size):
result = self.pinecone_index.fetch(ids=ids[i : i + self.batch_size])
vectors = result.get("vectors")
batch_existing_ids = list(vectors.keys())
existing_ids.extend(batch_existing_ids)
metadatas.extend([vectors.get(ids).get("metadata") for ids in batch_existing_ids])
return {"ids": existing_ids, "metadatas": metadatas}
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, any]],
):
docs = []
embeddings = self.embedder.embedding_fn(documents)
for id, text, metadata, embedding in zip(ids, documents, metadatas, embeddings):
# Insert sparse vectors as well if the user wants to do the hybrid search
sparse_vector_dict = (
{"sparse_values": self.bm25_encoder.encode_documents(text)} if self.bm25_encoder else {}
)
docs.append(
{
"id": id,
"values": embedding,
"metadata": {**metadata, "text": text},
**sparse_vector_dict,
},
)
for chunk in chunks(docs, self.batch_size, desc="Adding chunks in batches"):
self.pinecone_index.upsert(chunk, **kwargs)
def query(
self,
input_query: str,
n_results: int,
where: Optional[dict[str, any]] = None,
raw_filter: Optional[dict[str, any]] = None,
citations: bool = False,
app_id: Optional[str] = None,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
query_filter = raw_filter if raw_filter is not None else self._generate_filter(where)
if app_id:
query_filter["app_id"] = {"$eq": app_id}
query_vector = self.embedder.embedding_fn([input_query])[0]
params = {
"vector": query_vector,
"filter": query_filter,
"top_k": n_results,
"include_metadata": True,
**kwargs,
}
if self.bm25_encoder:
sparse_query_vector = self.bm25_encoder.encode_queries(input_query)
params["sparse_vector"] = sparse_query_vector
data = self.pinecone_index.query(**params)
return [
(metadata.get("text"), {**metadata, "score": doc.get("score")}) if citations else metadata.get("text")
for doc in data.get("matches", [])
for metadata in [doc.get("metadata", {})]
]
def set_collection_name(self, name: str):
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
data = self.pinecone_index.describe_index_stats()
return data["total_vector_count"]
def _get_or_create_db(self):
return self.client
def reset(self):
# Delete all data from the database
self.client.delete_index(self.config.index_name)
self._setup_pinecone_index()
@staticmethod
def _generate_filter(where: dict):
query = {}
if where is None:
return query
for k, v in where.items():
query[k] = {"$eq": v}
return query
def delete(self, where: dict):
# Deleting with filters is not supported for `starter` index type.
# Follow `https://docs.pinecone.io/docs/metadata-filtering#deleting-vectors-by-metadata-filter` for more details
db_filter = self._generate_filter(where)
try:
self.pinecone_index.delete(filter=db_filter)
except Exception as e:
print(f"Failed to delete from Pinecone: {e}")
return | --- +++ @@ -21,11 +21,20 @@
@register_deserializable
class PineconeDB(BaseVectorDB):
+ """
+ Pinecone as vector database
+ """
def __init__(
self,
config: Optional[PineconeDBConfig] = None,
):
+ """Pinecone as vector database.
+
+ :param config: Pinecone database config, defaults to None
+ :type config: PineconeDBConfig, optional
+ :raises ValueError: No config provided
+ """
if config is None:
self.config = PineconeDBConfig()
else:
@@ -48,10 +57,16 @@ super().__init__(config=self.config)
def _initialize(self):
+ """
+ This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
+ """
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
def _setup_pinecone_index(self):
+ """
+ Loads the Pinecone index or creates it if not present.
+ """
api_key = self.config.api_key or os.environ.get("PINECONE_API_KEY")
if not api_key:
raise ValueError("Please set the PINECONE_API_KEY environment variable or pass it in config.")
@@ -74,6 +89,16 @@ self.pinecone_index = self.client.Index(self.config.index_name)
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
+ """
+ Get existing doc ids present in vector database
+
+ :param ids: _list of doc ids to check for existence
+ :type ids: list[str]
+ :param where: to filter data
+ :type where: dict[str, any]
+ :return: ids
+ :rtype: Set[str]
+ """
existing_ids = list()
metadatas = []
@@ -93,6 +118,15 @@ ids: list[str],
**kwargs: Optional[dict[str, any]],
):
+ """add data in vector database
+
+ :param documents: list of texts to add
+ :type documents: list[str]
+ :param metadatas: list of metadata associated with docs
+ :type metadatas: list[object]
+ :param ids: ids of docs
+ :type ids: list[str]
+ """
docs = []
embeddings = self.embedder.embedding_fn(documents)
for id, text, metadata, embedding in zip(ids, documents, metadatas, embeddings):
@@ -122,6 +156,20 @@ app_id: Optional[str] = None,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
+ """
+ Query contents from vector database based on vector similarity.
+
+ Args:
+ input_query (str): query string.
+ n_results (int): Number of similar documents to fetch from the database.
+ where (dict[str, any], optional): Filter criteria for the search.
+ raw_filter (dict[str, any], optional): Advanced raw filter criteria for the search.
+ citations (bool, optional): Flag to return context along with metadata. Defaults to False.
+ app_id (str, optional): Application ID to be passed to Pinecone.
+
+ Returns:
+ Union[list[tuple[str, dict]], list[str]]: List of document contexts, optionally with metadata.
+ """
query_filter = raw_filter if raw_filter is not None else self._generate_filter(where)
if app_id:
query_filter["app_id"] = {"$eq": app_id}
@@ -147,18 +195,34 @@ ]
def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
+ """
+ Count number of documents/chunks embedded in the database.
+
+ :return: number of documents
+ :rtype: int
+ """
data = self.pinecone_index.describe_index_stats()
return data["total_vector_count"]
def _get_or_create_db(self):
+ """Called during initialization"""
return self.client
def reset(self):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ """
# Delete all data from the database
self.client.delete_index(self.config.index_name)
self._setup_pinecone_index()
@@ -174,6 +238,10 @@ return query
def delete(self, where: dict):
+ """Delete from database.
+ :param ids: list of ids to delete
+ :type ids: list[str]
+ """
# Deleting with filters is not supported for `starter` index type.
# Follow `https://docs.pinecone.io/docs/metadata-filtering#deleting-vectors-by-metadata-filter` for more details
db_filter = self._generate_filter(where)
@@ -181,4 +249,4 @@ self.pinecone_index.delete(filter=db_filter)
except Exception as e:
print(f"Failed to delete from Pinecone: {e}")
- return+ return
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/pinecone.py |
Add docstrings for better understanding | import logging
from typing import Any, Optional, Union
from chromadb import Collection, QueryResult
from langchain.docstore.document import Document
from tqdm import tqdm
from embedchain.config import ChromaDbConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
try:
import chromadb
from chromadb.config import Settings
from chromadb.errors import InvalidDimensionException
except RuntimeError:
from embedchain.utils.misc import use_pysqlite3
use_pysqlite3()
import chromadb
from chromadb.config import Settings
from chromadb.errors import InvalidDimensionException
logger = logging.getLogger(__name__)
@register_deserializable
class ChromaDB(BaseVectorDB):
def __init__(self, config: Optional[ChromaDbConfig] = None):
if config:
self.config = config
else:
self.config = ChromaDbConfig()
self.settings = Settings(anonymized_telemetry=False)
self.settings.allow_reset = self.config.allow_reset if hasattr(self.config, "allow_reset") else False
self.batch_size = self.config.batch_size
if self.config.chroma_settings:
for key, value in self.config.chroma_settings.items():
if hasattr(self.settings, key):
setattr(self.settings, key, value)
if self.config.host and self.config.port:
logger.info(f"Connecting to ChromaDB server: {self.config.host}:{self.config.port}")
self.settings.chroma_server_host = self.config.host
self.settings.chroma_server_http_port = self.config.port
self.settings.chroma_api_impl = "chromadb.api.fastapi.FastAPI"
else:
if self.config.dir is None:
self.config.dir = "db"
self.settings.persist_directory = self.config.dir
self.settings.is_persistent = True
self.client = chromadb.Client(self.settings)
super().__init__(config=self.config)
def _initialize(self):
if not self.embedder:
raise ValueError(
"Embedder not set. Please set an embedder with `_set_embedder()` function before initialization."
)
self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
return self.client
@staticmethod
def _generate_where_clause(where: dict[str, any]) -> dict[str, any]:
# If only one filter is supplied, return it as is
# (no need to wrap in $and based on chroma docs)
if where is None:
return {}
if len(where.keys()) <= 1:
return where
where_filters = []
for k, v in where.items():
if isinstance(v, str):
where_filters.append({k: v})
return {"$and": where_filters}
def _get_or_create_collection(self, name: str) -> Collection:
if not hasattr(self, "embedder") or not self.embedder:
raise ValueError("Cannot create a Chroma database collection without an embedder.")
self.collection = self.client.get_or_create_collection(
name=name,
embedding_function=self.embedder.embedding_fn,
)
return self.collection
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
args = {}
if ids:
args["ids"] = ids
if where:
args["where"] = self._generate_where_clause(where)
if limit:
args["limit"] = limit
return self.collection.get(**args)
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, Any]],
) -> Any:
size = len(documents)
if len(documents) != size or len(metadatas) != size or len(ids) != size:
raise ValueError(
"Cannot add documents to chromadb with inconsistent sizes. Documents size: {}, Metadata size: {},"
" Ids size: {}".format(len(documents), len(metadatas), len(ids))
)
for i in tqdm(range(0, len(documents), self.batch_size), desc="Inserting batches in chromadb"):
self.collection.add(
documents=documents[i : i + self.batch_size],
metadatas=metadatas[i : i + self.batch_size],
ids=ids[i : i + self.batch_size],
)
self.config
@staticmethod
def _format_result(results: QueryResult) -> list[tuple[Document, float]]:
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
def query(
self,
input_query: str,
n_results: int,
where: Optional[dict[str, any]] = None,
raw_filter: Optional[dict[str, any]] = None,
citations: bool = False,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
if where and raw_filter:
raise ValueError("Both `where` and `raw_filter` cannot be used together.")
where_clause = None
if raw_filter:
where_clause = raw_filter
if where:
where_clause = self._generate_where_clause(where)
try:
result = self.collection.query(
query_texts=[
input_query,
],
n_results=n_results,
where=where_clause,
)
except InvalidDimensionException as e:
raise InvalidDimensionException(
e.message()
+ ". This is commonly a side-effect when an embedding function, different from the one used to add the"
" embeddings, is used to retrieve an embedding from the database."
) from None
results_formatted = self._format_result(result)
contexts = []
for result in results_formatted:
context = result[0].page_content
if citations:
metadata = result[0].metadata
metadata["score"] = result[1]
contexts.append((context, metadata))
else:
contexts.append(context)
return contexts
def set_collection_name(self, name: str):
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
self._get_or_create_collection(self.config.collection_name)
def count(self) -> int:
return self.collection.count()
def delete(self, where):
return self.collection.delete(where=self._generate_where_clause(where))
def reset(self):
# Delete all data from the collection
try:
self.client.delete_collection(self.config.collection_name)
except ValueError:
raise ValueError(
"For safety reasons, resetting is disabled. "
"Please enable it by setting `allow_reset=True` in your ChromaDbConfig"
) from None
# Recreate
self._get_or_create_collection(self.config.collection_name)
# Todo: Automatically recreating a collection with the same name cannot be the best way to handle a reset.
# A downside of this implementation is, if you have two instances,
# the other instance will not get the updated `self.collection` attribute.
# A better way would be to create the collection if it is called again after being reset.
# That means, checking if collection exists in the db-consuming methods, and creating it if it doesn't.
# That's an extra steps for all uses, just to satisfy a niche use case in a niche method. For now, this will do. | --- +++ @@ -27,8 +27,14 @@
@register_deserializable
class ChromaDB(BaseVectorDB):
+ """Vector database using ChromaDB."""
def __init__(self, config: Optional[ChromaDbConfig] = None):
+ """Initialize a new ChromaDB instance
+
+ :param config: Configuration options for Chroma, defaults to None
+ :type config: Optional[ChromaDbConfig], optional
+ """
if config:
self.config = config
else:
@@ -58,6 +64,9 @@ super().__init__(config=self.config)
def _initialize(self):
+ """
+ This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
+ """
if not self.embedder:
raise ValueError(
"Embedder not set. Please set an embedder with `_set_embedder()` function before initialization."
@@ -65,6 +74,7 @@ self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
+ """Called during initialization"""
return self.client
@staticmethod
@@ -82,6 +92,15 @@ return {"$and": where_filters}
def _get_or_create_collection(self, name: str) -> Collection:
+ """
+ Get or create a named collection.
+
+ :param name: Name of the collection
+ :type name: str
+ :raises ValueError: No embedder configured.
+ :return: Created collection
+ :rtype: Collection
+ """
if not hasattr(self, "embedder") or not self.embedder:
raise ValueError("Cannot create a Chroma database collection without an embedder.")
self.collection = self.client.get_or_create_collection(
@@ -91,6 +110,18 @@ return self.collection
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
+ """
+ Get existing doc ids present in vector database
+
+ :param ids: list of doc ids to check for existence
+ :type ids: list[str]
+ :param where: Optional. to filter data
+ :type where: dict[str, Any]
+ :param limit: Optional. maximum number of documents
+ :type limit: Optional[int]
+ :return: Existing documents.
+ :rtype: list[str]
+ """
args = {}
if ids:
args["ids"] = ids
@@ -107,6 +138,16 @@ ids: list[str],
**kwargs: Optional[dict[str, Any]],
) -> Any:
+ """
+ Add vectors to chroma database
+
+ :param documents: Documents
+ :type documents: list[str]
+ :param metadatas: Metadatas
+ :type metadatas: list[object]
+ :param ids: ids
+ :type ids: list[str]
+ """
size = len(documents)
if len(documents) != size or len(metadatas) != size or len(ids) != size:
raise ValueError(
@@ -124,6 +165,14 @@
@staticmethod
def _format_result(results: QueryResult) -> list[tuple[Document, float]]:
+ """
+ Format Chroma results
+
+ :param results: ChromaDB query results to format.
+ :type results: QueryResult
+ :return: Formatted results
+ :rtype: list[tuple[Document, float]]
+ """
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
@@ -142,6 +191,24 @@ citations: bool = False,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
+ """
+ Query contents from vector database based on vector similarity
+
+ :param input_query: query string
+ :type input_query: str
+ :param n_results: no of similar documents to fetch from database
+ :type n_results: int
+ :param where: to filter data
+ :type where: dict[str, Any]
+ :param raw_filter: Raw filter to apply
+ :type raw_filter: dict[str, Any]
+ :param citations: we use citations boolean param to return context along with the answer.
+ :type citations: bool, default is False.
+ :raises InvalidDimensionException: Dimensions do not match.
+ :return: The content of the document that matched your query,
+ along with url of the source and doc_id (if citations flag is true)
+ :rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
+ """
if where and raw_filter:
raise ValueError("Both `where` and `raw_filter` cannot be used together.")
@@ -177,18 +244,33 @@ return contexts
def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
self._get_or_create_collection(self.config.collection_name)
def count(self) -> int:
+ """
+ Count number of documents/chunks embedded in the database.
+
+ :return: number of documents
+ :rtype: int
+ """
return self.collection.count()
def delete(self, where):
return self.collection.delete(where=self._generate_where_clause(where))
def reset(self):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ """
# Delete all data from the collection
try:
self.client.delete_collection(self.config.collection_name)
@@ -205,4 +287,4 @@ # the other instance will not get the updated `self.collection` attribute.
# A better way would be to create the collection if it is called again after being reset.
# That means, checking if collection exists in the db-consuming methods, and creating it if it doesn't.
- # That's an extra steps for all uses, just to satisfy a niche use case in a niche method. For now, this will do.+ # That's an extra steps for all uses, just to satisfy a niche use case in a niche method. For now, this will do.
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/chroma.py |
Document this script properly |
import statistics
from collections import defaultdict
from typing import Dict, List, Union
import nltk
from bert_score import score as bert_score
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
from nltk.translate.meteor_score import meteor_score
from rouge_score import rouge_scorer
from sentence_transformers import SentenceTransformer
# from load_dataset import load_locomo_dataset, QA, Turn, Session, Conversation
from sentence_transformers.util import pytorch_cos_sim
# Download required NLTK data
try:
nltk.download("punkt", quiet=True)
nltk.download("wordnet", quiet=True)
except Exception as e:
print(f"Error downloading NLTK data: {e}")
# Initialize SentenceTransformer model (this will be reused)
try:
sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
except Exception as e:
print(f"Warning: Could not load SentenceTransformer model: {e}")
sentence_model = None
def simple_tokenize(text):
# Convert to string if not already
text = str(text)
return text.lower().replace(".", " ").replace(",", " ").replace("!", " ").replace("?", " ").split()
def calculate_rouge_scores(prediction: str, reference: str) -> Dict[str, float]:
scorer = rouge_scorer.RougeScorer(["rouge1", "rouge2", "rougeL"], use_stemmer=True)
scores = scorer.score(reference, prediction)
return {
"rouge1_f": scores["rouge1"].fmeasure,
"rouge2_f": scores["rouge2"].fmeasure,
"rougeL_f": scores["rougeL"].fmeasure,
}
def calculate_bleu_scores(prediction: str, reference: str) -> Dict[str, float]:
pred_tokens = nltk.word_tokenize(prediction.lower())
ref_tokens = [nltk.word_tokenize(reference.lower())]
weights_list = [(1, 0, 0, 0), (0.5, 0.5, 0, 0), (0.33, 0.33, 0.33, 0), (0.25, 0.25, 0.25, 0.25)]
smooth = SmoothingFunction().method1
scores = {}
for n, weights in enumerate(weights_list, start=1):
try:
score = sentence_bleu(ref_tokens, pred_tokens, weights=weights, smoothing_function=smooth)
except Exception as e:
print(f"Error calculating BLEU score: {e}")
score = 0.0
scores[f"bleu{n}"] = score
return scores
def calculate_bert_scores(prediction: str, reference: str) -> Dict[str, float]:
try:
P, R, F1 = bert_score([prediction], [reference], lang="en", verbose=False)
return {"bert_precision": P.item(), "bert_recall": R.item(), "bert_f1": F1.item()}
except Exception as e:
print(f"Error calculating BERTScore: {e}")
return {"bert_precision": 0.0, "bert_recall": 0.0, "bert_f1": 0.0}
def calculate_meteor_score(prediction: str, reference: str) -> float:
try:
return meteor_score([reference.split()], prediction.split())
except Exception as e:
print(f"Error calculating METEOR score: {e}")
return 0.0
def calculate_sentence_similarity(prediction: str, reference: str) -> float:
if sentence_model is None:
return 0.0
try:
# Encode sentences
embedding1 = sentence_model.encode([prediction], convert_to_tensor=True)
embedding2 = sentence_model.encode([reference], convert_to_tensor=True)
# Calculate cosine similarity
similarity = pytorch_cos_sim(embedding1, embedding2).item()
return float(similarity)
except Exception as e:
print(f"Error calculating sentence similarity: {e}")
return 0.0
def calculate_metrics(prediction: str, reference: str) -> Dict[str, float]:
# Handle empty or None values
if not prediction or not reference:
return {
"exact_match": 0,
"f1": 0.0,
"rouge1_f": 0.0,
"rouge2_f": 0.0,
"rougeL_f": 0.0,
"bleu1": 0.0,
"bleu2": 0.0,
"bleu3": 0.0,
"bleu4": 0.0,
"bert_f1": 0.0,
"meteor": 0.0,
"sbert_similarity": 0.0,
}
# Convert to strings if they're not already
prediction = str(prediction).strip()
reference = str(reference).strip()
# Calculate exact match
exact_match = int(prediction.lower() == reference.lower())
# Calculate token-based F1 score
pred_tokens = set(simple_tokenize(prediction))
ref_tokens = set(simple_tokenize(reference))
common_tokens = pred_tokens & ref_tokens
if not pred_tokens or not ref_tokens:
f1 = 0.0
else:
precision = len(common_tokens) / len(pred_tokens)
recall = len(common_tokens) / len(ref_tokens)
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0
# Calculate all scores
bleu_scores = calculate_bleu_scores(prediction, reference)
# Combine all metrics
metrics = {
"exact_match": exact_match,
"f1": f1,
**bleu_scores,
}
return metrics
def aggregate_metrics(
all_metrics: List[Dict[str, float]], all_categories: List[int]
) -> Dict[str, Dict[str, Union[float, Dict[str, float]]]]:
if not all_metrics:
return {}
# Initialize aggregates for overall and per-category metrics
aggregates = defaultdict(list)
category_aggregates = defaultdict(lambda: defaultdict(list))
# Collect all values for each metric, both overall and per category
for metrics, category in zip(all_metrics, all_categories):
for metric_name, value in metrics.items():
aggregates[metric_name].append(value)
category_aggregates[category][metric_name].append(value)
# Calculate statistics for overall metrics
results = {"overall": {}}
for metric_name, values in aggregates.items():
results["overall"][metric_name] = {
"mean": statistics.mean(values),
"std": statistics.stdev(values) if len(values) > 1 else 0.0,
"median": statistics.median(values),
"min": min(values),
"max": max(values),
"count": len(values),
}
# Calculate statistics for each category
for category in sorted(category_aggregates.keys()):
results[f"category_{category}"] = {}
for metric_name, values in category_aggregates[category].items():
if values: # Only calculate if we have values for this category
results[f"category_{category}"][metric_name] = {
"mean": statistics.mean(values),
"std": statistics.stdev(values) if len(values) > 1 else 0.0,
"median": statistics.median(values),
"min": min(values),
"max": max(values),
"count": len(values),
}
return results | --- +++ @@ -1,3 +1,14 @@+"""
+Borrowed from https://github.com/WujiangXu/AgenticMemory/blob/main/utils.py
+
+@article{xu2025mem,
+ title={A-mem: Agentic memory for llm agents},
+ author={Xu, Wujiang and Liang, Zujie and Mei, Kai and Gao, Hang and Tan, Juntao
+ and Zhang, Yongfeng},
+ journal={arXiv preprint arXiv:2502.12110},
+ year={2025}
+}
+"""
import statistics
from collections import defaultdict
@@ -29,12 +40,14 @@
def simple_tokenize(text):
+ """Simple tokenization function."""
# Convert to string if not already
text = str(text)
return text.lower().replace(".", " ").replace(",", " ").replace("!", " ").replace("?", " ").split()
def calculate_rouge_scores(prediction: str, reference: str) -> Dict[str, float]:
+ """Calculate ROUGE scores for prediction against reference."""
scorer = rouge_scorer.RougeScorer(["rouge1", "rouge2", "rougeL"], use_stemmer=True)
scores = scorer.score(reference, prediction)
return {
@@ -45,6 +58,7 @@
def calculate_bleu_scores(prediction: str, reference: str) -> Dict[str, float]:
+ """Calculate BLEU scores with different n-gram settings."""
pred_tokens = nltk.word_tokenize(prediction.lower())
ref_tokens = [nltk.word_tokenize(reference.lower())]
@@ -64,6 +78,7 @@
def calculate_bert_scores(prediction: str, reference: str) -> Dict[str, float]:
+ """Calculate BERTScore for semantic similarity."""
try:
P, R, F1 = bert_score([prediction], [reference], lang="en", verbose=False)
return {"bert_precision": P.item(), "bert_recall": R.item(), "bert_f1": F1.item()}
@@ -73,6 +88,7 @@
def calculate_meteor_score(prediction: str, reference: str) -> float:
+ """Calculate METEOR score for the prediction."""
try:
return meteor_score([reference.split()], prediction.split())
except Exception as e:
@@ -81,6 +97,7 @@
def calculate_sentence_similarity(prediction: str, reference: str) -> float:
+ """Calculate sentence embedding similarity using SentenceBERT."""
if sentence_model is None:
return 0.0
try:
@@ -97,6 +114,7 @@
def calculate_metrics(prediction: str, reference: str) -> Dict[str, float]:
+ """Calculate comprehensive evaluation metrics for a prediction."""
# Handle empty or None values
if not prediction or not reference:
return {
@@ -149,6 +167,7 @@ def aggregate_metrics(
all_metrics: List[Dict[str, float]], all_categories: List[int]
) -> Dict[str, Dict[str, Union[float, Dict[str, float]]]]:
+ """Calculate aggregate statistics for all metrics, split by category."""
if not all_metrics:
return {}
@@ -189,4 +208,4 @@ "count": len(values),
}
- return results+ return results
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/evaluation/metrics/utils.py |
Provide docstrings following PEP 257 | import datetime
import itertools
import json
import logging
import os
import re
import string
from typing import Any
from schema import Optional, Or, Schema
from tqdm import tqdm
from embedchain.models.data_type import DataType
logger = logging.getLogger(__name__)
def parse_content(content, type):
implemented = ["html.parser", "lxml", "lxml-xml", "xml", "html5lib"]
if type not in implemented:
raise ValueError(f"Parser type {type} not implemented. Please choose one of {implemented}")
from bs4 import BeautifulSoup
soup = BeautifulSoup(content, type)
original_size = len(str(soup.get_text()))
tags_to_exclude = [
"nav",
"aside",
"form",
"header",
"noscript",
"svg",
"canvas",
"footer",
"script",
"style",
]
for tag in soup(tags_to_exclude):
tag.decompose()
ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
for id in ids_to_exclude:
tags = soup.find_all(id=id)
for tag in tags:
tag.decompose()
classes_to_exclude = [
"elementor-location-header",
"navbar-header",
"nav",
"header-sidebar-wrapper",
"blog-sidebar-wrapper",
"related-posts",
]
for class_name in classes_to_exclude:
tags = soup.find_all(class_=class_name)
for tag in tags:
tag.decompose()
content = soup.get_text()
content = clean_string(content)
cleaned_size = len(content)
if original_size != 0:
logger.info(
f"Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
)
return content
def clean_string(text):
# Stripping and reducing multiple spaces to single:
cleaned_text = re.sub(r"\s+", " ", text.strip())
# Removing backslashes:
cleaned_text = cleaned_text.replace("\\", "")
# Replacing hash characters:
cleaned_text = cleaned_text.replace("#", " ")
# Eliminating consecutive non-alphanumeric characters:
# This regex identifies consecutive non-alphanumeric characters (i.e., not
# a word character [a-zA-Z0-9_] and not a whitespace) in the string
# and replaces each group of such characters with a single occurrence of
# that character.
# For example, "!!! hello !!!" would become "! hello !".
cleaned_text = re.sub(r"([^\w\s])\1*", r"\1", cleaned_text)
return cleaned_text
def is_readable(s):
len_s = len(s)
if len_s == 0:
return False
printable_chars = set(string.printable)
printable_ratio = sum(c in printable_chars for c in s) / len_s
return printable_ratio > 0.95 # 95% of characters are printable
def use_pysqlite3():
import platform
import sqlite3
if platform.system() == "Linux" and sqlite3.sqlite_version_info < (3, 35, 0):
try:
# According to the Chroma team, this patch only works on Linux
import datetime
import subprocess
import sys
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "pysqlite3-binary", "--quiet", "--disable-pip-version-check"]
)
__import__("pysqlite3")
sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
# Let the user know what happened.
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")[:-3]
print(
f"{current_time} [embedchain] [INFO]",
"Swapped std-lib sqlite3 with pysqlite3 for ChromaDb compatibility.",
f"Your original version was {sqlite3.sqlite_version}.",
)
except Exception as e:
# Escape all exceptions
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")[:-3]
print(
f"{current_time} [embedchain] [ERROR]",
"Failed to swap std-lib sqlite3 with pysqlite3 for ChromaDb compatibility.",
"Error:",
e,
)
def format_source(source: str, limit: int = 20) -> str:
if len(source) > 2 * limit:
return source[:limit] + "..." + source[-limit:]
return source
def detect_datatype(source: Any) -> DataType:
from urllib.parse import urlparse
import requests
import yaml
def is_openapi_yaml(yaml_content):
# currently the following two fields are required in openapi spec yaml config
return "openapi" in yaml_content and "info" in yaml_content
def is_google_drive_folder(url):
# checks if url is a Google Drive folder url against a regex
regex = r"^drive\.google\.com\/drive\/(?:u\/\d+\/)folders\/([a-zA-Z0-9_-]+)$"
return re.match(regex, url)
try:
if not isinstance(source, str):
raise ValueError("Source is not a string and thus cannot be a URL.")
url = urlparse(source)
# Check if both scheme and netloc are present. Local file system URIs are acceptable too.
if not all([url.scheme, url.netloc]) and url.scheme != "file":
raise ValueError("Not a valid URL.")
except ValueError:
url = False
formatted_source = format_source(str(source), 30)
if url:
YOUTUBE_ALLOWED_NETLOCKS = {
"www.youtube.com",
"m.youtube.com",
"youtu.be",
"youtube.com",
"vid.plus",
"www.youtube-nocookie.com",
}
if url.netloc in YOUTUBE_ALLOWED_NETLOCKS:
logger.debug(f"Source of `{formatted_source}` detected as `youtube_video`.")
return DataType.YOUTUBE_VIDEO
if url.netloc in {"notion.so", "notion.site"}:
logger.debug(f"Source of `{formatted_source}` detected as `notion`.")
return DataType.NOTION
if url.path.endswith(".pdf"):
logger.debug(f"Source of `{formatted_source}` detected as `pdf_file`.")
return DataType.PDF_FILE
if url.path.endswith(".xml"):
logger.debug(f"Source of `{formatted_source}` detected as `sitemap`.")
return DataType.SITEMAP
if url.path.endswith(".csv"):
logger.debug(f"Source of `{formatted_source}` detected as `csv`.")
return DataType.CSV
if url.path.endswith(".mdx") or url.path.endswith(".md"):
logger.debug(f"Source of `{formatted_source}` detected as `mdx`.")
return DataType.MDX
if url.path.endswith(".docx"):
logger.debug(f"Source of `{formatted_source}` detected as `docx`.")
return DataType.DOCX
if url.path.endswith(
(".mp3", ".mp4", ".mp2", ".aac", ".wav", ".flac", ".pcm", ".m4a", ".ogg", ".opus", ".webm")
):
logger.debug(f"Source of `{formatted_source}` detected as `audio`.")
return DataType.AUDIO
if url.path.endswith(".yaml"):
try:
response = requests.get(source)
response.raise_for_status()
try:
yaml_content = yaml.safe_load(response.text)
except yaml.YAMLError as exc:
logger.error(f"Error parsing YAML: {exc}")
raise TypeError(f"Not a valid data type. Error loading YAML: {exc}")
if is_openapi_yaml(yaml_content):
logger.debug(f"Source of `{formatted_source}` detected as `openapi`.")
return DataType.OPENAPI
else:
logger.error(
f"Source of `{formatted_source}` does not contain all the required \
fields of OpenAPI yaml. Check 'https://spec.openapis.org/oas/v3.1.0'"
)
raise TypeError(
"Not a valid data type. Check 'https://spec.openapis.org/oas/v3.1.0', \
make sure you have all the required fields in YAML config data"
)
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching URL {formatted_source}: {e}")
if url.path.endswith(".json"):
logger.debug(f"Source of `{formatted_source}` detected as `json_file`.")
return DataType.JSON
if "docs" in url.netloc or ("docs" in url.path and url.scheme != "file"):
# `docs_site` detection via path is not accepted for local filesystem URIs,
# because that would mean all paths that contain `docs` are now doc sites, which is too aggressive.
logger.debug(f"Source of `{formatted_source}` detected as `docs_site`.")
return DataType.DOCS_SITE
if "github.com" in url.netloc:
logger.debug(f"Source of `{formatted_source}` detected as `github`.")
return DataType.GITHUB
if is_google_drive_folder(url.netloc + url.path):
logger.debug(f"Source of `{formatted_source}` detected as `google drive folder`.")
return DataType.GOOGLE_DRIVE_FOLDER
# If none of the above conditions are met, it's a general web page
logger.debug(f"Source of `{formatted_source}` detected as `web_page`.")
return DataType.WEB_PAGE
elif not isinstance(source, str):
# For datatypes where source is not a string.
if isinstance(source, tuple) and len(source) == 2 and isinstance(source[0], str) and isinstance(source[1], str):
logger.debug(f"Source of `{formatted_source}` detected as `qna_pair`.")
return DataType.QNA_PAIR
# Raise an error if it isn't a string and also not a valid non-string type (one of the previous).
# We could stringify it, but it is better to raise an error and let the user decide how they want to do that.
raise TypeError(
"Source is not a string and a valid non-string type could not be detected. If you want to embed it, please stringify it, for instance by using `str(source)` or `(', ').join(source)`." # noqa: E501
)
elif os.path.isfile(source):
# For datatypes that support conventional file references.
# Note: checking for string is not necessary anymore.
if source.endswith(".docx"):
logger.debug(f"Source of `{formatted_source}` detected as `docx`.")
return DataType.DOCX
if source.endswith(".csv"):
logger.debug(f"Source of `{formatted_source}` detected as `csv`.")
return DataType.CSV
if source.endswith(".xml"):
logger.debug(f"Source of `{formatted_source}` detected as `xml`.")
return DataType.XML
if source.endswith(".mdx") or source.endswith(".md"):
logger.debug(f"Source of `{formatted_source}` detected as `mdx`.")
return DataType.MDX
if source.endswith(".txt"):
logger.debug(f"Source of `{formatted_source}` detected as `text`.")
return DataType.TEXT_FILE
if source.endswith(".pdf"):
logger.debug(f"Source of `{formatted_source}` detected as `pdf_file`.")
return DataType.PDF_FILE
if source.endswith(".yaml"):
with open(source, "r") as file:
yaml_content = yaml.safe_load(file)
if is_openapi_yaml(yaml_content):
logger.debug(f"Source of `{formatted_source}` detected as `openapi`.")
return DataType.OPENAPI
else:
logger.error(
f"Source of `{formatted_source}` does not contain all the required \
fields of OpenAPI yaml. Check 'https://spec.openapis.org/oas/v3.1.0'"
)
raise ValueError(
"Invalid YAML data. Check 'https://spec.openapis.org/oas/v3.1.0', \
make sure to add all the required params"
)
if source.endswith(".json"):
logger.debug(f"Source of `{formatted_source}` detected as `json`.")
return DataType.JSON
if os.path.exists(source) and is_readable(open(source).read()):
logger.debug(f"Source of `{formatted_source}` detected as `text_file`.")
return DataType.TEXT_FILE
# If the source is a valid file, that's not detectable as a type, an error is raised.
# It does not fall back to text.
raise ValueError(
"Source points to a valid file, but based on the filename, no `data_type` can be detected. Please be aware, that not all data_types allow conventional file references, some require the use of the `file URI scheme`. Please refer to the embedchain documentation (https://docs.embedchain.ai/advanced/data_types#remote-data-types)." # noqa: E501
)
else:
# Source is not a URL.
# TODO: check if source is gmail query
# check if the source is valid json string
if is_valid_json_string(source):
logger.debug(f"Source of `{formatted_source}` detected as `json`.")
return DataType.JSON
# Use text as final fallback.
logger.debug(f"Source of `{formatted_source}` detected as `text`.")
return DataType.TEXT
# check if the source is valid json string
def is_valid_json_string(source: str):
try:
_ = json.loads(source)
return True
except json.JSONDecodeError:
return False
def validate_config(config_data):
schema = Schema(
{
Optional("app"): {
Optional("config"): {
Optional("id"): str,
Optional("name"): str,
Optional("log_level"): Or("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
Optional("collect_metrics"): bool,
Optional("collection_name"): str,
}
},
Optional("llm"): {
Optional("provider"): Or(
"openai",
"azure_openai",
"anthropic",
"huggingface",
"cohere",
"together",
"gpt4all",
"ollama",
"jina",
"llama2",
"vertexai",
"google",
"aws_bedrock",
"mistralai",
"clarifai",
"vllm",
"groq",
"nvidia",
),
Optional("config"): {
Optional("model"): str,
Optional("model_name"): str,
Optional("number_documents"): int,
Optional("temperature"): float,
Optional("max_tokens"): int,
Optional("top_p"): Or(float, int),
Optional("stream"): bool,
Optional("online"): bool,
Optional("token_usage"): bool,
Optional("template"): str,
Optional("prompt"): str,
Optional("system_prompt"): str,
Optional("deployment_name"): str,
Optional("where"): dict,
Optional("query_type"): str,
Optional("api_key"): str,
Optional("base_url"): str,
Optional("endpoint"): str,
Optional("model_kwargs"): dict,
Optional("local"): bool,
Optional("base_url"): str,
Optional("default_headers"): dict,
Optional("api_version"): Or(str, datetime.date),
Optional("http_client_proxies"): Or(str, dict),
Optional("http_async_client_proxies"): Or(str, dict),
},
},
Optional("vectordb"): {
Optional("provider"): Or(
"chroma", "elasticsearch", "opensearch", "lancedb", "pinecone", "qdrant", "weaviate", "zilliz"
),
Optional("config"): object, # TODO: add particular config schema for each provider
},
Optional("embedder"): {
Optional("provider"): Or(
"openai",
"gpt4all",
"huggingface",
"vertexai",
"azure_openai",
"google",
"mistralai",
"clarifai",
"nvidia",
"ollama",
"cohere",
"aws_bedrock",
),
Optional("config"): {
Optional("model"): Optional(str),
Optional("deployment_name"): Optional(str),
Optional("api_key"): str,
Optional("api_base"): str,
Optional("title"): str,
Optional("task_type"): str,
Optional("vector_dimension"): int,
Optional("base_url"): str,
Optional("endpoint"): str,
Optional("model_kwargs"): dict,
Optional("http_client_proxies"): Or(str, dict),
Optional("http_async_client_proxies"): Or(str, dict),
},
},
Optional("embedding_model"): {
Optional("provider"): Or(
"openai",
"gpt4all",
"huggingface",
"vertexai",
"azure_openai",
"google",
"mistralai",
"clarifai",
"nvidia",
"ollama",
"aws_bedrock",
),
Optional("config"): {
Optional("model"): str,
Optional("deployment_name"): str,
Optional("api_key"): str,
Optional("title"): str,
Optional("task_type"): str,
Optional("vector_dimension"): int,
Optional("base_url"): str,
},
},
Optional("chunker"): {
Optional("chunk_size"): int,
Optional("chunk_overlap"): int,
Optional("length_function"): str,
Optional("min_chunk_size"): int,
},
Optional("cache"): {
Optional("similarity_evaluation"): {
Optional("strategy"): Or("distance", "exact"),
Optional("max_distance"): float,
Optional("positive"): bool,
},
Optional("config"): {
Optional("similarity_threshold"): float,
Optional("auto_flush"): int,
},
},
Optional("memory"): {
Optional("top_k"): int,
},
}
)
return schema.validate(config_data)
def chunks(iterable, batch_size=100, desc="Processing chunks"):
it = iter(iterable)
total_size = len(iterable)
with tqdm(total=total_size, desc=desc, unit="batch") as pbar:
chunk = tuple(itertools.islice(it, batch_size))
while chunk:
yield chunk
pbar.update(len(chunk))
chunk = tuple(itertools.islice(it, batch_size)) | --- +++ @@ -72,6 +72,16 @@
def clean_string(text):
+ """
+ This function takes in a string and performs a series of text cleaning operations.
+
+ Args:
+ text (str): The text to be cleaned. This is expected to be a string.
+
+ Returns:
+ cleaned_text (str): The cleaned text after all the cleaning operations
+ have been performed.
+ """
# Stripping and reducing multiple spaces to single:
cleaned_text = re.sub(r"\s+", " ", text.strip())
@@ -93,6 +103,12 @@
def is_readable(s):
+ """
+ Heuristic to determine if a string is "readable" (mostly contains printable characters and forms meaningful words)
+
+ :param s: string
+ :return: True if the string is more than 95% printable.
+ """
len_s = len(s)
if len_s == 0:
return False
@@ -102,6 +118,9 @@
def use_pysqlite3():
+ """
+ Swap std-lib sqlite3 with pysqlite3.
+ """
import platform
import sqlite3
@@ -138,12 +157,23 @@
def format_source(source: str, limit: int = 20) -> str:
+ """
+ Format a string to only take the first x and last x letters.
+ This makes it easier to display a URL, keeping familiarity while ensuring a consistent length.
+ If the string is too short, it is not sliced.
+ """
if len(source) > 2 * limit:
return source[:limit] + "..." + source[-limit:]
return source
def detect_datatype(source: Any) -> DataType:
+ """
+ Automatically detect the datatype of the given source.
+
+ :param source: the source to base the detection on
+ :return: data_type string
+ """
from urllib.parse import urlparse
import requests
@@ -504,6 +534,7 @@
def chunks(iterable, batch_size=100, desc="Processing chunks"):
+ """A helper function to break an iterable into chunks of size batch_size."""
it = iter(iterable)
total_size = len(iterable)
@@ -512,4 +543,4 @@ while chunk:
yield chunk
pbar.update(len(chunk))
- chunk = tuple(itertools.islice(it, batch_size))+ chunk = tuple(itertools.islice(it, batch_size))
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/utils/misc.py |
Document my Python code with docstrings | import logging
from typing import Any, Optional, Union
from embedchain.config import ZillizDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
try:
from pymilvus import (
Collection,
CollectionSchema,
DataType,
FieldSchema,
MilvusClient,
connections,
utility,
)
except ImportError:
raise ImportError(
"Zilliz requires extra dependencies. Install with `pip install --upgrade embedchain[milvus]`"
) from None
logger = logging.getLogger(__name__)
@register_deserializable
class ZillizVectorDB(BaseVectorDB):
def __init__(self, config: ZillizDBConfig = None):
if config is None:
self.config = ZillizDBConfig()
else:
self.config = config
self.client = MilvusClient(
uri=self.config.uri,
token=self.config.token,
)
self.connection = connections.connect(
uri=self.config.uri,
token=self.config.token,
)
super().__init__(config=self.config)
def _initialize(self):
self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
return self.client
def _get_or_create_collection(self, name):
if utility.has_collection(name):
logger.info(f"[ZillizDB]: found an existing collection {name}, make sure the auto-id is disabled.")
self.collection = Collection(name)
else:
fields = [
FieldSchema(name="id", dtype=DataType.VARCHAR, is_primary=True, max_length=512),
FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=2048),
FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=self.embedder.vector_dimension),
FieldSchema(name="metadata", dtype=DataType.JSON),
]
schema = CollectionSchema(fields, enable_dynamic_field=True)
self.collection = Collection(name=name, schema=schema)
index = {
"index_type": "AUTOINDEX",
"metric_type": self.config.metric_type,
}
self.collection.create_index("embeddings", index)
return self.collection
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
data_ids = []
metadatas = []
if self.collection.num_entities == 0 or self.collection.is_empty:
return {"ids": data_ids, "metadatas": metadatas}
filter_ = ""
if ids:
filter_ = f'id in "{ids}"'
if where:
if filter_:
filter_ += " and "
filter_ = f"{self._generate_zilliz_filter(where)}"
results = self.client.query(collection_name=self.config.collection_name, filter=filter_, output_fields=["*"])
for res in results:
data_ids.append(res.get("id"))
metadatas.append(res.get("metadata", {}))
return {"ids": data_ids, "metadatas": metadatas}
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, any]],
):
embeddings = self.embedder.embedding_fn(documents)
for id, doc, metadata, embedding in zip(ids, documents, metadatas, embeddings):
data = {"id": id, "text": doc, "embeddings": embedding, "metadata": metadata}
self.client.insert(collection_name=self.config.collection_name, data=data, **kwargs)
self.collection.load()
self.collection.flush()
self.client.flush(self.config.collection_name)
def query(
self,
input_query: str,
n_results: int,
where: dict[str, Any],
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
if self.collection.is_empty:
return []
output_fields = ["*"]
input_query_vector = self.embedder.embedding_fn([input_query])
query_vector = input_query_vector[0]
query_filter = self._generate_zilliz_filter(where)
query_result = self.client.search(
collection_name=self.config.collection_name,
data=[query_vector],
filter=query_filter,
limit=n_results,
output_fields=output_fields,
**kwargs,
)
query_result = query_result[0]
contexts = []
for query in query_result:
data = query["entity"]
score = query["distance"]
context = data["text"]
if citations:
metadata = data.get("metadata", {})
metadata["score"] = score
contexts.append(tuple((context, metadata)))
else:
contexts.append(context)
return contexts
def count(self) -> int:
return self.collection.num_entities
def reset(self, collection_names: list[str] = None):
if self.config.collection_name:
if collection_names:
for collection_name in collection_names:
if collection_name in self.client.list_collections():
self.client.drop_collection(collection_name=collection_name)
else:
self.client.drop_collection(collection_name=self.config.collection_name)
self._get_or_create_collection(self.config.collection_name)
def set_collection_name(self, name: str):
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def _generate_zilliz_filter(self, where: dict[str, str]):
operands = []
for key, value in where.items():
operands.append(f'(metadata["{key}"] == "{value}")')
return " and ".join(operands)
def delete(self, where: dict[str, Any]):
data = self.get(where=where)
keys = data.get("ids", [])
if keys:
self.client.delete(collection_name=self.config.collection_name, pks=keys) | --- +++ @@ -25,8 +25,14 @@
@register_deserializable
class ZillizVectorDB(BaseVectorDB):
+ """Base class for vector database."""
def __init__(self, config: ZillizDBConfig = None):
+ """Initialize the database. Save the config and client as an attribute.
+
+ :param config: Database configuration class instance.
+ :type config: ZillizDBConfig
+ """
if config is None:
self.config = ZillizDBConfig()
@@ -46,12 +52,24 @@ super().__init__(config=self.config)
def _initialize(self):
+ """
+ This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
+
+ So it's can't be done in __init__ in one step.
+ """
self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
+ """Get or create the database."""
return self.client
def _get_or_create_collection(self, name):
+ """
+ Get or create a named collection.
+
+ :param name: Name of the collection
+ :type name: str
+ """
if utility.has_collection(name):
logger.info(f"[ZillizDB]: found an existing collection {name}, make sure the auto-id is disabled.")
self.collection = Collection(name)
@@ -74,6 +92,18 @@ return self.collection
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
+ """
+ Get existing doc ids present in vector database
+
+ :param ids: list of doc ids to check for existence
+ :type ids: list[str]
+ :param where: Optional. to filter data
+ :type where: dict[str, Any]
+ :param limit: Optional. maximum number of documents
+ :type limit: Optional[int]
+ :return: Existing documents.
+ :rtype: Set[str]
+ """
data_ids = []
metadatas = []
if self.collection.num_entities == 0 or self.collection.is_empty:
@@ -102,6 +132,7 @@ ids: list[str],
**kwargs: Optional[dict[str, any]],
):
+ """Add to database"""
embeddings = self.embedder.embedding_fn(documents)
for id, doc, metadata, embedding in zip(ids, documents, metadatas, embeddings):
@@ -120,6 +151,22 @@ citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
+ """
+ Query contents from vector database based on vector similarity
+
+ :param input_query: query string
+ :type input_query: str
+ :param n_results: no of similar documents to fetch from database
+ :type n_results: int
+ :param where: to filter data
+ :type where: dict[str, Any]
+ :raises InvalidDimensionException: Dimensions do not match.
+ :param citations: we use citations boolean param to return context along with the answer.
+ :type citations: bool, default is False.
+ :return: The content of the document that matched your query,
+ along with url of the source and doc_id (if citations flag is true)
+ :rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
+ """
if self.collection.is_empty:
return []
@@ -153,9 +200,18 @@ return contexts
def count(self) -> int:
+ """
+ Count number of documents/chunks embedded in the database.
+
+ :return: number of documents
+ :rtype: int
+ """
return self.collection.num_entities
def reset(self, collection_names: list[str] = None):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ """
if self.config.collection_name:
if collection_names:
for collection_name in collection_names:
@@ -166,6 +222,12 @@ self._get_or_create_collection(self.config.collection_name)
def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
@@ -177,7 +239,14 @@ return " and ".join(operands)
def delete(self, where: dict[str, Any]):
+ """
+ Delete the embeddings from DB. Zilliz only support deleting with keys.
+
+
+ :param keys: Primary keys of the table entries to delete.
+ :type keys: Union[list, str, int]
+ """
data = self.get(where=where)
keys = data.get("ids", [])
if keys:
- self.client.delete(collection_name=self.config.collection_name, pks=keys)+ self.client.delete(collection_name=self.config.collection_name, pks=keys)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/zilliz.py |
Improve my code by adding docstrings | from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.helpers.json_serializable import JSONSerializable
class BaseVectorDB(JSONSerializable):
def __init__(self, config: BaseVectorDbConfig):
self.client = self._get_or_create_db()
self.config: BaseVectorDbConfig = config
def _initialize(self):
raise NotImplementedError
def _get_or_create_db(self):
raise NotImplementedError
def _get_or_create_collection(self):
raise NotImplementedError
def _set_embedder(self, embedder: BaseEmbedder):
self.embedder = embedder
def get(self):
raise NotImplementedError
def add(self):
raise NotImplementedError
def query(self):
raise NotImplementedError
def count(self) -> int:
raise NotImplementedError
def reset(self):
raise NotImplementedError
def set_collection_name(self, name: str):
raise NotImplementedError
def delete(self):
raise NotImplementedError | --- +++ @@ -4,41 +4,79 @@
class BaseVectorDB(JSONSerializable):
+ """Base class for vector database."""
def __init__(self, config: BaseVectorDbConfig):
+ """Initialize the database. Save the config and client as an attribute.
+
+ :param config: Database configuration class instance.
+ :type config: BaseVectorDbConfig
+ """
self.client = self._get_or_create_db()
self.config: BaseVectorDbConfig = config
def _initialize(self):
+ """
+ This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
+
+ So it's can't be done in __init__ in one step.
+ """
raise NotImplementedError
def _get_or_create_db(self):
+ """Get or create the database."""
raise NotImplementedError
def _get_or_create_collection(self):
+ """Get or create a named collection."""
raise NotImplementedError
def _set_embedder(self, embedder: BaseEmbedder):
+ """
+ The database needs to access the embedder sometimes, with this method you can persistently set it.
+
+ :param embedder: Embedder to be set as the embedder for this database.
+ :type embedder: BaseEmbedder
+ """
self.embedder = embedder
def get(self):
+ """Get database embeddings by id."""
raise NotImplementedError
def add(self):
+ """Add to database"""
raise NotImplementedError
def query(self):
+ """Query contents from vector database based on vector similarity"""
raise NotImplementedError
def count(self) -> int:
+ """
+ Count number of documents/chunks embedded in the database.
+
+ :return: number of documents
+ :rtype: int
+ """
raise NotImplementedError
def reset(self):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ """
raise NotImplementedError
def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
raise NotImplementedError
def delete(self):
+ """Delete from database."""
- raise NotImplementedError+ raise NotImplementedError
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/base.py |
Add docstrings including usage examples | import logging
from typing import Any, Optional, Union
try:
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
except ImportError:
raise ImportError(
"Elasticsearch requires extra dependencies. Install with `pip install --upgrade embedchain[elasticsearch]`"
) from None
from embedchain.config import ElasticsearchDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.utils.misc import chunks
from embedchain.vectordb.base import BaseVectorDB
logger = logging.getLogger(__name__)
@register_deserializable
class ElasticsearchDB(BaseVectorDB):
def __init__(
self,
config: Optional[ElasticsearchDBConfig] = None,
es_config: Optional[ElasticsearchDBConfig] = None, # Backwards compatibility
):
if config is None and es_config is None:
self.config = ElasticsearchDBConfig()
else:
if not isinstance(config, ElasticsearchDBConfig):
raise TypeError(
"config is not a `ElasticsearchDBConfig` instance. "
"Please make sure the type is right and that you are passing an instance."
)
self.config = config or es_config
if self.config.ES_URL:
self.client = Elasticsearch(self.config.ES_URL, **self.config.ES_EXTRA_PARAMS)
elif self.config.CLOUD_ID:
self.client = Elasticsearch(cloud_id=self.config.CLOUD_ID, **self.config.ES_EXTRA_PARAMS)
else:
raise ValueError(
"Something is wrong with your config. Please check again - `https://docs.embedchain.ai/components/vector-databases#elasticsearch`" # noqa: E501
)
self.batch_size = self.config.batch_size
# Call parent init here because embedder is needed
super().__init__(config=self.config)
def _initialize(self):
logger.info(self.client.info())
index_settings = {
"mappings": {
"properties": {
"text": {"type": "text"},
"embeddings": {"type": "dense_vector", "index": False, "dims": self.embedder.vector_dimension},
}
}
}
es_index = self._get_index()
if not self.client.indices.exists(index=es_index):
# create index if not exist
print("Creating index", es_index, index_settings)
self.client.indices.create(index=es_index, body=index_settings)
def _get_or_create_db(self):
return self.client
def _get_or_create_collection(self, name):
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
if ids:
query = {"bool": {"must": [{"ids": {"values": ids}}]}}
else:
query = {"bool": {"must": []}}
if where:
for key, value in where.items():
query["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
response = self.client.search(index=self._get_index(), query=query, _source=True, size=limit)
docs = response["hits"]["hits"]
ids = [doc["_id"] for doc in docs]
doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
# Result is modified for compatibility with other vector databases
# TODO: Add method in vector database to return result in a standard format
result = {"ids": ids, "metadatas": []}
for doc_id in doc_ids:
result["metadatas"].append({"doc_id": doc_id})
return result
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, any]],
) -> Any:
embeddings = self.embedder.embedding_fn(documents)
for chunk in chunks(
list(zip(ids, documents, metadatas, embeddings)),
self.batch_size,
desc="Inserting batches in elasticsearch",
): # noqa: E501
ids, docs, metadatas, embeddings = [], [], [], []
for id, text, metadata, embedding in chunk:
ids.append(id)
docs.append(text)
metadatas.append(metadata)
embeddings.append(embedding)
batch_docs = []
for id, text, metadata, embedding in zip(ids, docs, metadatas, embeddings):
batch_docs.append(
{
"_index": self._get_index(),
"_id": id,
"_source": {"text": text, "metadata": metadata, "embeddings": embedding},
}
)
bulk(self.client, batch_docs, **kwargs)
self.client.indices.refresh(index=self._get_index())
def query(
self,
input_query: str,
n_results: int,
where: dict[str, any],
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
input_query_vector = self.embedder.embedding_fn([input_query])
query_vector = input_query_vector[0]
# `https://www.elastic.co/guide/en/elasticsearch/reference/7.17/query-dsl-script-score-query.html`
query = {
"script_score": {
"query": {"bool": {"must": [{"exists": {"field": "text"}}]}},
"script": {
"source": "cosineSimilarity(params.input_query_vector, 'embeddings') + 1.0",
"params": {"input_query_vector": query_vector},
},
}
}
if where:
for key, value in where.items():
query["script_score"]["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
_source = ["text", "metadata"]
response = self.client.search(index=self._get_index(), query=query, _source=_source, size=n_results)
docs = response["hits"]["hits"]
contexts = []
for doc in docs:
context = doc["_source"]["text"]
if citations:
metadata = doc["_source"]["metadata"]
metadata["score"] = doc["_score"]
contexts.append(tuple((context, metadata)))
else:
contexts.append(context)
return contexts
def set_collection_name(self, name: str):
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
query = {"match_all": {}}
response = self.client.count(index=self._get_index(), query=query)
doc_count = response["count"]
return doc_count
def reset(self):
# Delete all data from the database
if self.client.indices.exists(index=self._get_index()):
# delete index in Es
self.client.indices.delete(index=self._get_index())
def _get_index(self) -> str:
# NOTE: The method is preferred to an attribute, because if collection name changes,
# it's always up-to-date.
return f"{self.config.collection_name}_{self.embedder.vector_dimension}".lower()
def delete(self, where):
query = {"query": {"bool": {"must": []}}}
for key, value in where.items():
query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
self.client.delete_by_query(index=self._get_index(), body=query)
self.client.indices.refresh(index=self._get_index()) | --- +++ @@ -1,196 +1,269 @@-import logging
-from typing import Any, Optional, Union
-
-try:
- from elasticsearch import Elasticsearch
- from elasticsearch.helpers import bulk
-except ImportError:
- raise ImportError(
- "Elasticsearch requires extra dependencies. Install with `pip install --upgrade embedchain[elasticsearch]`"
- ) from None
-
-from embedchain.config import ElasticsearchDBConfig
-from embedchain.helpers.json_serializable import register_deserializable
-from embedchain.utils.misc import chunks
-from embedchain.vectordb.base import BaseVectorDB
-
-logger = logging.getLogger(__name__)
-
-
-@register_deserializable
-class ElasticsearchDB(BaseVectorDB):
-
- def __init__(
- self,
- config: Optional[ElasticsearchDBConfig] = None,
- es_config: Optional[ElasticsearchDBConfig] = None, # Backwards compatibility
- ):
- if config is None and es_config is None:
- self.config = ElasticsearchDBConfig()
- else:
- if not isinstance(config, ElasticsearchDBConfig):
- raise TypeError(
- "config is not a `ElasticsearchDBConfig` instance. "
- "Please make sure the type is right and that you are passing an instance."
- )
- self.config = config or es_config
- if self.config.ES_URL:
- self.client = Elasticsearch(self.config.ES_URL, **self.config.ES_EXTRA_PARAMS)
- elif self.config.CLOUD_ID:
- self.client = Elasticsearch(cloud_id=self.config.CLOUD_ID, **self.config.ES_EXTRA_PARAMS)
- else:
- raise ValueError(
- "Something is wrong with your config. Please check again - `https://docs.embedchain.ai/components/vector-databases#elasticsearch`" # noqa: E501
- )
-
- self.batch_size = self.config.batch_size
- # Call parent init here because embedder is needed
- super().__init__(config=self.config)
-
- def _initialize(self):
- logger.info(self.client.info())
- index_settings = {
- "mappings": {
- "properties": {
- "text": {"type": "text"},
- "embeddings": {"type": "dense_vector", "index": False, "dims": self.embedder.vector_dimension},
- }
- }
- }
- es_index = self._get_index()
- if not self.client.indices.exists(index=es_index):
- # create index if not exist
- print("Creating index", es_index, index_settings)
- self.client.indices.create(index=es_index, body=index_settings)
-
- def _get_or_create_db(self):
- return self.client
-
- def _get_or_create_collection(self, name):
-
- def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
- if ids:
- query = {"bool": {"must": [{"ids": {"values": ids}}]}}
- else:
- query = {"bool": {"must": []}}
-
- if where:
- for key, value in where.items():
- query["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
-
- response = self.client.search(index=self._get_index(), query=query, _source=True, size=limit)
- docs = response["hits"]["hits"]
- ids = [doc["_id"] for doc in docs]
- doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
-
- # Result is modified for compatibility with other vector databases
- # TODO: Add method in vector database to return result in a standard format
- result = {"ids": ids, "metadatas": []}
-
- for doc_id in doc_ids:
- result["metadatas"].append({"doc_id": doc_id})
-
- return result
-
- def add(
- self,
- documents: list[str],
- metadatas: list[object],
- ids: list[str],
- **kwargs: Optional[dict[str, any]],
- ) -> Any:
-
- embeddings = self.embedder.embedding_fn(documents)
-
- for chunk in chunks(
- list(zip(ids, documents, metadatas, embeddings)),
- self.batch_size,
- desc="Inserting batches in elasticsearch",
- ): # noqa: E501
- ids, docs, metadatas, embeddings = [], [], [], []
- for id, text, metadata, embedding in chunk:
- ids.append(id)
- docs.append(text)
- metadatas.append(metadata)
- embeddings.append(embedding)
-
- batch_docs = []
- for id, text, metadata, embedding in zip(ids, docs, metadatas, embeddings):
- batch_docs.append(
- {
- "_index": self._get_index(),
- "_id": id,
- "_source": {"text": text, "metadata": metadata, "embeddings": embedding},
- }
- )
- bulk(self.client, batch_docs, **kwargs)
- self.client.indices.refresh(index=self._get_index())
-
- def query(
- self,
- input_query: str,
- n_results: int,
- where: dict[str, any],
- citations: bool = False,
- **kwargs: Optional[dict[str, Any]],
- ) -> Union[list[tuple[str, dict]], list[str]]:
- input_query_vector = self.embedder.embedding_fn([input_query])
- query_vector = input_query_vector[0]
-
- # `https://www.elastic.co/guide/en/elasticsearch/reference/7.17/query-dsl-script-score-query.html`
- query = {
- "script_score": {
- "query": {"bool": {"must": [{"exists": {"field": "text"}}]}},
- "script": {
- "source": "cosineSimilarity(params.input_query_vector, 'embeddings') + 1.0",
- "params": {"input_query_vector": query_vector},
- },
- }
- }
-
- if where:
- for key, value in where.items():
- query["script_score"]["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
-
- _source = ["text", "metadata"]
- response = self.client.search(index=self._get_index(), query=query, _source=_source, size=n_results)
- docs = response["hits"]["hits"]
- contexts = []
- for doc in docs:
- context = doc["_source"]["text"]
- if citations:
- metadata = doc["_source"]["metadata"]
- metadata["score"] = doc["_score"]
- contexts.append(tuple((context, metadata)))
- else:
- contexts.append(context)
- return contexts
-
- def set_collection_name(self, name: str):
- if not isinstance(name, str):
- raise TypeError("Collection name must be a string")
- self.config.collection_name = name
-
- def count(self) -> int:
- query = {"match_all": {}}
- response = self.client.count(index=self._get_index(), query=query)
- doc_count = response["count"]
- return doc_count
-
- def reset(self):
- # Delete all data from the database
- if self.client.indices.exists(index=self._get_index()):
- # delete index in Es
- self.client.indices.delete(index=self._get_index())
-
- def _get_index(self) -> str:
- # NOTE: The method is preferred to an attribute, because if collection name changes,
- # it's always up-to-date.
- return f"{self.config.collection_name}_{self.embedder.vector_dimension}".lower()
-
- def delete(self, where):
- query = {"query": {"bool": {"must": []}}}
- for key, value in where.items():
- query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
- self.client.delete_by_query(index=self._get_index(), body=query)
- self.client.indices.refresh(index=self._get_index())+import logging
+from typing import Any, Optional, Union
+
+try:
+ from elasticsearch import Elasticsearch
+ from elasticsearch.helpers import bulk
+except ImportError:
+ raise ImportError(
+ "Elasticsearch requires extra dependencies. Install with `pip install --upgrade embedchain[elasticsearch]`"
+ ) from None
+
+from embedchain.config import ElasticsearchDBConfig
+from embedchain.helpers.json_serializable import register_deserializable
+from embedchain.utils.misc import chunks
+from embedchain.vectordb.base import BaseVectorDB
+
+logger = logging.getLogger(__name__)
+
+
+@register_deserializable
+class ElasticsearchDB(BaseVectorDB):
+ """
+ Elasticsearch as vector database
+ """
+
+ def __init__(
+ self,
+ config: Optional[ElasticsearchDBConfig] = None,
+ es_config: Optional[ElasticsearchDBConfig] = None, # Backwards compatibility
+ ):
+ """Elasticsearch as vector database.
+
+ :param config: Elasticsearch database config, defaults to None
+ :type config: ElasticsearchDBConfig, optional
+ :param es_config: `es_config` is supported as an alias for `config` (for backwards compatibility),
+ defaults to None
+ :type es_config: ElasticsearchDBConfig, optional
+ :raises ValueError: No config provided
+ """
+ if config is None and es_config is None:
+ self.config = ElasticsearchDBConfig()
+ else:
+ if not isinstance(config, ElasticsearchDBConfig):
+ raise TypeError(
+ "config is not a `ElasticsearchDBConfig` instance. "
+ "Please make sure the type is right and that you are passing an instance."
+ )
+ self.config = config or es_config
+ if self.config.ES_URL:
+ self.client = Elasticsearch(self.config.ES_URL, **self.config.ES_EXTRA_PARAMS)
+ elif self.config.CLOUD_ID:
+ self.client = Elasticsearch(cloud_id=self.config.CLOUD_ID, **self.config.ES_EXTRA_PARAMS)
+ else:
+ raise ValueError(
+ "Something is wrong with your config. Please check again - `https://docs.embedchain.ai/components/vector-databases#elasticsearch`" # noqa: E501
+ )
+
+ self.batch_size = self.config.batch_size
+ # Call parent init here because embedder is needed
+ super().__init__(config=self.config)
+
+ def _initialize(self):
+ """
+ This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
+ """
+ logger.info(self.client.info())
+ index_settings = {
+ "mappings": {
+ "properties": {
+ "text": {"type": "text"},
+ "embeddings": {"type": "dense_vector", "index": False, "dims": self.embedder.vector_dimension},
+ }
+ }
+ }
+ es_index = self._get_index()
+ if not self.client.indices.exists(index=es_index):
+ # create index if not exist
+ print("Creating index", es_index, index_settings)
+ self.client.indices.create(index=es_index, body=index_settings)
+
+ def _get_or_create_db(self):
+ """Called during initialization"""
+ return self.client
+
+ def _get_or_create_collection(self, name):
+ """Note: nothing to return here. Discuss later"""
+
+ def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
+ """
+ Get existing doc ids present in vector database
+
+ :param ids: _list of doc ids to check for existence
+ :type ids: list[str]
+ :param where: to filter data
+ :type where: dict[str, any]
+ :return: ids
+ :rtype: Set[str]
+ """
+ if ids:
+ query = {"bool": {"must": [{"ids": {"values": ids}}]}}
+ else:
+ query = {"bool": {"must": []}}
+
+ if where:
+ for key, value in where.items():
+ query["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
+
+ response = self.client.search(index=self._get_index(), query=query, _source=True, size=limit)
+ docs = response["hits"]["hits"]
+ ids = [doc["_id"] for doc in docs]
+ doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
+
+ # Result is modified for compatibility with other vector databases
+ # TODO: Add method in vector database to return result in a standard format
+ result = {"ids": ids, "metadatas": []}
+
+ for doc_id in doc_ids:
+ result["metadatas"].append({"doc_id": doc_id})
+
+ return result
+
+ def add(
+ self,
+ documents: list[str],
+ metadatas: list[object],
+ ids: list[str],
+ **kwargs: Optional[dict[str, any]],
+ ) -> Any:
+ """
+ add data in vector database
+ :param documents: list of texts to add
+ :type documents: list[str]
+ :param metadatas: list of metadata associated with docs
+ :type metadatas: list[object]
+ :param ids: ids of docs
+ :type ids: list[str]
+ """
+
+ embeddings = self.embedder.embedding_fn(documents)
+
+ for chunk in chunks(
+ list(zip(ids, documents, metadatas, embeddings)),
+ self.batch_size,
+ desc="Inserting batches in elasticsearch",
+ ): # noqa: E501
+ ids, docs, metadatas, embeddings = [], [], [], []
+ for id, text, metadata, embedding in chunk:
+ ids.append(id)
+ docs.append(text)
+ metadatas.append(metadata)
+ embeddings.append(embedding)
+
+ batch_docs = []
+ for id, text, metadata, embedding in zip(ids, docs, metadatas, embeddings):
+ batch_docs.append(
+ {
+ "_index": self._get_index(),
+ "_id": id,
+ "_source": {"text": text, "metadata": metadata, "embeddings": embedding},
+ }
+ )
+ bulk(self.client, batch_docs, **kwargs)
+ self.client.indices.refresh(index=self._get_index())
+
+ def query(
+ self,
+ input_query: str,
+ n_results: int,
+ where: dict[str, any],
+ citations: bool = False,
+ **kwargs: Optional[dict[str, Any]],
+ ) -> Union[list[tuple[str, dict]], list[str]]:
+ """
+ query contents from vector database based on vector similarity
+
+ :param input_query: query string
+ :type input_query: str
+ :param n_results: no of similar documents to fetch from database
+ :type n_results: int
+ :param where: Optional. to filter data
+ :type where: dict[str, any]
+ :return: The context of the document that matched your query, url of the source, doc_id
+ :param citations: we use citations boolean param to return context along with the answer.
+ :type citations: bool, default is False.
+ :return: The content of the document that matched your query,
+ along with url of the source and doc_id (if citations flag is true)
+ :rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
+ """
+ input_query_vector = self.embedder.embedding_fn([input_query])
+ query_vector = input_query_vector[0]
+
+ # `https://www.elastic.co/guide/en/elasticsearch/reference/7.17/query-dsl-script-score-query.html`
+ query = {
+ "script_score": {
+ "query": {"bool": {"must": [{"exists": {"field": "text"}}]}},
+ "script": {
+ "source": "cosineSimilarity(params.input_query_vector, 'embeddings') + 1.0",
+ "params": {"input_query_vector": query_vector},
+ },
+ }
+ }
+
+ if where:
+ for key, value in where.items():
+ query["script_score"]["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
+
+ _source = ["text", "metadata"]
+ response = self.client.search(index=self._get_index(), query=query, _source=_source, size=n_results)
+ docs = response["hits"]["hits"]
+ contexts = []
+ for doc in docs:
+ context = doc["_source"]["text"]
+ if citations:
+ metadata = doc["_source"]["metadata"]
+ metadata["score"] = doc["_score"]
+ contexts.append(tuple((context, metadata)))
+ else:
+ contexts.append(context)
+ return contexts
+
+ def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+
+ :param name: Name of the collection.
+ :type name: str
+ """
+ if not isinstance(name, str):
+ raise TypeError("Collection name must be a string")
+ self.config.collection_name = name
+
+ def count(self) -> int:
+ """
+ Count number of documents/chunks embedded in the database.
+
+ :return: number of documents
+ :rtype: int
+ """
+ query = {"match_all": {}}
+ response = self.client.count(index=self._get_index(), query=query)
+ doc_count = response["count"]
+ return doc_count
+
+ def reset(self):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ """
+ # Delete all data from the database
+ if self.client.indices.exists(index=self._get_index()):
+ # delete index in Es
+ self.client.indices.delete(index=self._get_index())
+
+ def _get_index(self) -> str:
+ """Get the Elasticsearch index for a collection
+
+ :return: Elasticsearch index
+ :rtype: str
+ """
+ # NOTE: The method is preferred to an attribute, because if collection name changes,
+ # it's always up-to-date.
+ return f"{self.config.collection_name}_{self.embedder.vector_dimension}".lower()
+
+ def delete(self, where):
+ """Delete documents from the database."""
+ query = {"query": {"bool": {"must": []}}}
+ for key, value in where.items():
+ query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
+ self.client.delete_by_query(index=self._get_index(), body=query)
+ self.client.indices.refresh(index=self._get_index())
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/elasticsearch.py |
Add docstrings following best practices | import argparse
import json
from collections import defaultdict
import numpy as np
from openai import OpenAI
from mem0.memory.utils import extract_json
client = OpenAI()
ACCURACY_PROMPT = """
Your task is to label an answer to a question as ’CORRECT’ or ’WRONG’. You will be given the following data:
(1) a question (posed by one user to another user),
(2) a ’gold’ (ground truth) answer,
(3) a generated answer
which you will score as CORRECT/WRONG.
The point of the question is to ask about something one user should know about the other user based on their prior conversations.
The gold answer will usually be a concise and short answer that includes the referenced topic, for example:
Question: Do you remember what I got the last time I went to Hawaii?
Gold answer: A shell necklace
The generated answer might be much longer, but you should be generous with your grading - as long as it touches on the same topic as the gold answer, it should be counted as CORRECT.
For time related questions, the gold answer will be a specific date, month, year, etc. The generated answer might be much longer or use relative time references (like "last Tuesday" or "next month"), but you should be generous with your grading - as long as it refers to the same date or time period as the gold answer, it should be counted as CORRECT. Even if the format differs (e.g., "May 7th" vs "7 May"), consider it CORRECT if it's the same date.
Now it's time for the real question:
Question: {question}
Gold answer: {gold_answer}
Generated answer: {generated_answer}
First, provide a short (one sentence) explanation of your reasoning, then finish with CORRECT or WRONG.
Do NOT include both CORRECT and WRONG in your response, or it will break the evaluation script.
Just return the label CORRECT or WRONG in a json format with the key as "label".
"""
def evaluate_llm_judge(question, gold_answer, generated_answer):
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": ACCURACY_PROMPT.format(
question=question, gold_answer=gold_answer, generated_answer=generated_answer
),
}
],
response_format={"type": "json_object"},
temperature=0.0,
)
label = json.loads(extract_json(response.choices[0].message.content))["label"]
return 1 if label == "CORRECT" else 0
def main():
parser = argparse.ArgumentParser(description="Evaluate RAG results using LLM judge")
parser.add_argument(
"--input_file",
type=str,
default="results/default_run_v4_k30_new_graph.json",
help="Path to the input dataset file",
)
args = parser.parse_args()
dataset_path = args.input_file
output_path = f"results/llm_judge_{dataset_path.split('/')[-1]}"
with open(dataset_path, "r") as f:
data = json.load(f)
LLM_JUDGE = defaultdict(list)
RESULTS = defaultdict(list)
index = 0
for k, v in data.items():
for x in v:
question = x["question"]
gold_answer = x["answer"]
generated_answer = x["response"]
category = x["category"]
# Skip category 5
if int(category) == 5:
continue
# Evaluate the answer
label = evaluate_llm_judge(question, gold_answer, generated_answer)
LLM_JUDGE[category].append(label)
# Store the results
RESULTS[index].append(
{
"question": question,
"gt_answer": gold_answer,
"response": generated_answer,
"category": category,
"llm_label": label,
}
)
# Save intermediate results
with open(output_path, "w") as f:
json.dump(RESULTS, f, indent=4)
# Print current accuracy for all categories
print("All categories accuracy:")
for cat, results in LLM_JUDGE.items():
if results: # Only print if there are results for this category
print(f" Category {cat}: {np.mean(results):.4f} ({sum(results)}/{len(results)})")
print("------------------------------------------")
index += 1
# Save final results
with open(output_path, "w") as f:
json.dump(RESULTS, f, indent=4)
# Print final summary
print("PATH: ", dataset_path)
print("------------------------------------------")
for k, v in LLM_JUDGE.items():
print(k, np.mean(v))
if __name__ == "__main__":
main() | --- +++ @@ -37,6 +37,7 @@
def evaluate_llm_judge(question, gold_answer, generated_answer):
+ """Evaluate the generated answer against the gold answer using an LLM judge."""
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
@@ -55,6 +56,7 @@
def main():
+ """Main function to evaluate RAG results using LLM judge."""
parser = argparse.ArgumentParser(description="Evaluate RAG results using LLM judge")
parser.add_argument(
"--input_file",
@@ -125,4 +127,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/evaluation/metrics/llm_judge.py |
Add minimal docstrings for each function | import copy
import os
from typing import Optional, Union
try:
import weaviate
except ImportError:
raise ImportError(
"Weaviate requires extra dependencies. Install with `pip install --upgrade 'embedchain[weaviate]'`"
) from None
from embedchain.config.vector_db.weaviate import WeaviateDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
@register_deserializable
class WeaviateDB(BaseVectorDB):
def __init__(
self,
config: Optional[WeaviateDBConfig] = None,
):
if config is None:
self.config = WeaviateDBConfig()
else:
if not isinstance(config, WeaviateDBConfig):
raise TypeError(
"config is not a `WeaviateDBConfig` instance. "
"Please make sure the type is right and that you are passing an instance."
)
self.config = config
self.batch_size = self.config.batch_size
self.client = weaviate.Client(
url=os.environ.get("WEAVIATE_ENDPOINT"),
auth_client_secret=weaviate.AuthApiKey(api_key=os.environ.get("WEAVIATE_API_KEY")),
**self.config.extra_params,
)
# Since weaviate uses graphQL, we need to keep track of metadata keys added in the vectordb.
# This is needed to filter data while querying.
self.metadata_keys = {"data_type", "doc_id", "url", "hash", "app_id"}
# Call parent init here because embedder is needed
super().__init__(config=self.config)
def _initialize(self):
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
self.index_name = self._get_index_name()
if not self.client.schema.exists(self.index_name):
# id is a reserved field in Weaviate, hence we had to change the name of the id field to identifier
# The none vectorizer is crucial as we have our own custom embedding function
"""
TODO: wait for weaviate to add indexing on `object[]` data-type so that we can add filter while querying.
Once that is done, change `dataType` of "metadata" field to `object[]` and update the query below.
"""
class_obj = {
"classes": [
{
"class": self.index_name,
"vectorizer": "none",
"properties": [
{
"name": "identifier",
"dataType": ["text"],
},
{
"name": "text",
"dataType": ["text"],
},
{
"name": "metadata",
"dataType": [self.index_name + "_metadata"],
},
],
},
{
"class": self.index_name + "_metadata",
"vectorizer": "none",
"properties": [
{
"name": "data_type",
"dataType": ["text"],
},
{
"name": "doc_id",
"dataType": ["text"],
},
{
"name": "url",
"dataType": ["text"],
},
{
"name": "hash",
"dataType": ["text"],
},
{
"name": "app_id",
"dataType": ["text"],
},
],
},
]
}
self.client.schema.create(class_obj)
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
weaviate_where_operands = []
if ids:
for doc_id in ids:
weaviate_where_operands.append({"path": ["identifier"], "operator": "Equal", "valueText": doc_id})
keys = set(where.keys() if where is not None else set())
if len(keys) > 0:
for key in keys:
weaviate_where_operands.append(
{
"path": ["metadata", self.index_name + "_metadata", key],
"operator": "Equal",
"valueText": where.get(key),
}
)
if len(weaviate_where_operands) == 1:
weaviate_where_clause = weaviate_where_operands[0]
else:
weaviate_where_clause = {"operator": "And", "operands": weaviate_where_operands}
existing_ids = []
metadatas = []
cursor = None
offset = 0
has_iterated_once = False
query_metadata_keys = self.metadata_keys.union(keys)
while cursor is not None or not has_iterated_once:
has_iterated_once = True
results = self._query_with_offset(
self.client.query.get(
self.index_name,
[
"identifier",
weaviate.LinkTo("metadata", self.index_name + "_metadata", list(query_metadata_keys)),
],
)
.with_where(weaviate_where_clause)
.with_additional(["id"])
.with_limit(limit or self.batch_size),
offset,
)
fetched_results = results["data"]["Get"].get(self.index_name, [])
if not fetched_results:
break
for result in fetched_results:
existing_ids.append(result["identifier"])
metadatas.append(result["metadata"][0])
cursor = result["_additional"]["id"]
offset += 1
if limit is not None and len(existing_ids) >= limit:
break
return {"ids": existing_ids, "metadatas": metadatas}
def add(self, documents: list[str], metadatas: list[object], ids: list[str], **kwargs: Optional[dict[str, any]]):
embeddings = self.embedder.embedding_fn(documents)
self.client.batch.configure(batch_size=self.batch_size, timeout_retries=3) # Configure batch
with self.client.batch as batch: # Initialize a batch process
for id, text, metadata, embedding in zip(ids, documents, metadatas, embeddings):
doc = {"identifier": id, "text": text}
updated_metadata = {"text": text}
if metadata is not None:
updated_metadata.update(**metadata)
obj_uuid = batch.add_data_object(
data_object=copy.deepcopy(doc), class_name=self.index_name, vector=embedding
)
metadata_uuid = batch.add_data_object(
data_object=copy.deepcopy(updated_metadata),
class_name=self.index_name + "_metadata",
vector=embedding,
)
batch.add_reference(
obj_uuid, self.index_name, "metadata", metadata_uuid, self.index_name + "_metadata", **kwargs
)
def query(
self, input_query: str, n_results: int, where: dict[str, any], citations: bool = False
) -> Union[list[tuple[str, dict]], list[str]]:
query_vector = self.embedder.embedding_fn([input_query])[0]
keys = set(where.keys() if where is not None else set())
data_fields = ["text"]
query_metadata_keys = self.metadata_keys.union(keys)
if citations:
data_fields.append(weaviate.LinkTo("metadata", self.index_name + "_metadata", list(query_metadata_keys)))
if len(keys) > 0:
weaviate_where_operands = []
for key in keys:
weaviate_where_operands.append(
{
"path": ["metadata", self.index_name + "_metadata", key],
"operator": "Equal",
"valueText": where.get(key),
}
)
if len(weaviate_where_operands) == 1:
weaviate_where_clause = weaviate_where_operands[0]
else:
weaviate_where_clause = {"operator": "And", "operands": weaviate_where_operands}
results = (
self.client.query.get(self.index_name, data_fields)
.with_where(weaviate_where_clause)
.with_near_vector({"vector": query_vector})
.with_limit(n_results)
.with_additional(["distance"])
.do()
)
else:
results = (
self.client.query.get(self.index_name, data_fields)
.with_near_vector({"vector": query_vector})
.with_limit(n_results)
.with_additional(["distance"])
.do()
)
if results["data"]["Get"].get(self.index_name) is None:
return []
docs = results["data"]["Get"].get(self.index_name)
contexts = []
for doc in docs:
context = doc["text"]
if citations:
metadata = doc["metadata"][0]
score = doc["_additional"]["distance"]
metadata["score"] = score
contexts.append((context, metadata))
else:
contexts.append(context)
return contexts
def set_collection_name(self, name: str):
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
data = self.client.query.aggregate(self.index_name).with_meta_count().do()
return data["data"]["Aggregate"].get(self.index_name)[0]["meta"]["count"]
def _get_or_create_db(self):
return self.client
def reset(self):
# Delete all data from the database
self.client.batch.delete_objects(
self.index_name, where={"path": ["identifier"], "operator": "Like", "valueText": ".*"}
)
# Weaviate internally by default capitalizes the class name
def _get_index_name(self) -> str:
return f"{self.config.collection_name}_{self.embedder.vector_dimension}".capitalize().replace("-", "_")
@staticmethod
def _query_with_offset(query, offset):
if offset:
query.with_offset(offset)
results = query.do()
return results
def _generate_query(self, where: dict):
weaviate_where_operands = []
for key, value in where.items():
weaviate_where_operands.append(
{
"path": ["metadata", self.index_name + "_metadata", key],
"operator": "Equal",
"valueText": value,
}
)
if len(weaviate_where_operands) == 1:
weaviate_where_clause = weaviate_where_operands[0]
else:
weaviate_where_clause = {"operator": "And", "operands": weaviate_where_operands}
return weaviate_where_clause
def delete(self, where: dict):
query = self._generate_query(where)
self.client.batch.delete_objects(self.index_name, where=query) | --- +++ @@ -16,11 +16,19 @@
@register_deserializable
class WeaviateDB(BaseVectorDB):
+ """
+ Weaviate as vector database
+ """
def __init__(
self,
config: Optional[WeaviateDBConfig] = None,
):
+ """Weaviate as vector database.
+ :param config: Weaviate database config, defaults to None
+ :type config: WeaviateDBConfig, optional
+ :raises ValueError: No config provided
+ """
if config is None:
self.config = WeaviateDBConfig()
else:
@@ -44,6 +52,9 @@ super().__init__(config=self.config)
def _initialize(self):
+ """
+ This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
+ """
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
@@ -108,6 +119,15 @@ self.client.schema.create(class_obj)
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
+ """
+ Get existing doc ids present in vector database
+ :param ids: _list of doc ids to check for existance
+ :type ids: list[str]
+ :param where: to filter data
+ :type where: dict[str, any]
+ :return: ids
+ :rtype: Set[str]
+ """
weaviate_where_operands = []
if ids:
@@ -168,6 +188,14 @@ return {"ids": existing_ids, "metadatas": metadatas}
def add(self, documents: list[str], metadatas: list[object], ids: list[str], **kwargs: Optional[dict[str, any]]):
+ """add data in vector database
+ :param documents: list of texts to add
+ :type documents: list[str]
+ :param metadatas: list of metadata associated with docs
+ :type metadatas: list[object]
+ :param ids: ids of docs
+ :type ids: list[str]
+ """
embeddings = self.embedder.embedding_fn(documents)
self.client.batch.configure(batch_size=self.batch_size, timeout_retries=3) # Configure batch
with self.client.batch as batch: # Initialize a batch process
@@ -192,6 +220,20 @@ def query(
self, input_query: str, n_results: int, where: dict[str, any], citations: bool = False
) -> Union[list[tuple[str, dict]], list[str]]:
+ """
+ query contents from vector database based on vector similarity
+ :param input_query: query string
+ :type input_query: str
+ :param n_results: no of similar documents to fetch from database
+ :type n_results: int
+ :param where: Optional. to filter data
+ :type where: dict[str, any]
+ :param citations: we use citations boolean param to return context along with the answer.
+ :type citations: bool, default is False.
+ :return: The content of the document that matched your query,
+ along with url of the source and doc_id (if citations flag is true)
+ :rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
+ """
query_vector = self.embedder.embedding_fn([input_query])[0]
keys = set(where.keys() if where is not None else set())
data_fields = ["text"]
@@ -248,18 +290,32 @@ return contexts
def set_collection_name(self, name: str):
+ """
+ Set the name of the collection. A collection is an isolated space for vectors.
+ :param name: Name of the collection.
+ :type name: str
+ """
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
+ """
+ Count number of documents/chunks embedded in the database.
+ :return: number of documents
+ :rtype: int
+ """
data = self.client.query.aggregate(self.index_name).with_meta_count().do()
return data["data"]["Aggregate"].get(self.index_name)[0]["meta"]["count"]
def _get_or_create_db(self):
+ """Called during initialization"""
return self.client
def reset(self):
+ """
+ Resets the database. Deletes all embeddings irreversibly.
+ """
# Delete all data from the database
self.client.batch.delete_objects(
self.index_name, where={"path": ["identifier"], "operator": "Like", "valueText": ".*"}
@@ -267,6 +323,10 @@
# Weaviate internally by default capitalizes the class name
def _get_index_name(self) -> str:
+ """Get the Weaviate index for a collection
+ :return: Weaviate index
+ :rtype: str
+ """
return f"{self.config.collection_name}_{self.embedder.vector_dimension}".capitalize().replace("-", "_")
@staticmethod
@@ -295,5 +355,9 @@ return weaviate_where_clause
def delete(self, where: dict):
+ """Delete from database.
+ :param where: to filter data
+ :type where: dict[str, any]
+ """
query = self._generate_query(where)
- self.client.batch.delete_objects(self.index_name, where=query)+ self.client.batch.delete_objects(self.index_name, where=query)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/embedchain/embedchain/vectordb/weaviate.py |
Document this script properly | from typing import Optional
from mem0.configs.llms.base import BaseLlmConfig
class DeepSeekConfig(BaseLlmConfig):
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# DeepSeek-specific parameters
deepseek_base_url: Optional[str] = None,
):
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# DeepSeek-specific parameters
self.deepseek_base_url = deepseek_base_url | --- +++ @@ -4,6 +4,10 @@
class DeepSeekConfig(BaseLlmConfig):
+ """
+ Configuration class for DeepSeek-specific parameters.
+ Inherits from BaseLlmConfig and adds DeepSeek-specific settings.
+ """
def __init__(
self,
@@ -20,6 +24,21 @@ # DeepSeek-specific parameters
deepseek_base_url: Optional[str] = None,
):
+ """
+ Initialize DeepSeek configuration.
+
+ Args:
+ model: DeepSeek model to use, defaults to None
+ temperature: Controls randomness, defaults to 0.1
+ api_key: DeepSeek API key, defaults to None
+ max_tokens: Maximum tokens to generate, defaults to 2000
+ top_p: Nucleus sampling parameter, defaults to 0.1
+ top_k: Top-k sampling parameter, defaults to 1
+ enable_vision: Enable vision capabilities, defaults to False
+ vision_details: Vision detail level, defaults to "auto"
+ http_client_proxies: HTTP client proxy settings, defaults to None
+ deepseek_base_url: DeepSeek API base URL, defaults to None
+ """
# Initialize base parameters
super().__init__(
model=model,
@@ -34,4 +53,4 @@ )
# DeepSeek-specific parameters
- self.deepseek_base_url = deepseek_base_url+ self.deepseek_base_url = deepseek_base_url
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/deepseek.py |
Write docstrings for utility functions | from typing import Any, Dict, Optional
from mem0.configs.llms.base import BaseLlmConfig
class LMStudioConfig(BaseLlmConfig):
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# LM Studio-specific parameters
lmstudio_base_url: Optional[str] = None,
lmstudio_response_format: Optional[Dict[str, Any]] = None,
):
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# LM Studio-specific parameters
self.lmstudio_base_url = lmstudio_base_url or "http://localhost:1234/v1"
self.lmstudio_response_format = lmstudio_response_format | --- +++ @@ -4,6 +4,10 @@
class LMStudioConfig(BaseLlmConfig):
+ """
+ Configuration class for LM Studio-specific parameters.
+ Inherits from BaseLlmConfig and adds LM Studio-specific settings.
+ """
def __init__(
self,
@@ -21,6 +25,22 @@ lmstudio_base_url: Optional[str] = None,
lmstudio_response_format: Optional[Dict[str, Any]] = None,
):
+ """
+ Initialize LM Studio configuration.
+
+ Args:
+ model: LM Studio model to use, defaults to None
+ temperature: Controls randomness, defaults to 0.1
+ api_key: LM Studio API key, defaults to None
+ max_tokens: Maximum tokens to generate, defaults to 2000
+ top_p: Nucleus sampling parameter, defaults to 0.1
+ top_k: Top-k sampling parameter, defaults to 1
+ enable_vision: Enable vision capabilities, defaults to False
+ vision_details: Vision detail level, defaults to "auto"
+ http_client_proxies: HTTP client proxy settings, defaults to None
+ lmstudio_base_url: LM Studio base URL, defaults to None
+ lmstudio_response_format: LM Studio response format, defaults to None
+ """
# Initialize base parameters
super().__init__(
model=model,
@@ -36,4 +56,4 @@
# LM Studio-specific parameters
self.lmstudio_base_url = lmstudio_base_url or "http://localhost:1234/v1"
- self.lmstudio_response_format = lmstudio_response_format+ self.lmstudio_response_format = lmstudio_response_format
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/lmstudio.py |
Add docstrings that explain purpose and usage | from typing import Any, Callable, List, Optional
from mem0.configs.llms.base import BaseLlmConfig
class OpenAIConfig(BaseLlmConfig):
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# OpenAI-specific parameters
openai_base_url: Optional[str] = None,
models: Optional[List[str]] = None,
route: Optional[str] = "fallback",
openrouter_base_url: Optional[str] = None,
site_url: Optional[str] = None,
app_name: Optional[str] = None,
store: bool = False,
# Response monitoring callback
response_callback: Optional[Callable[[Any, dict, dict], None]] = None,
):
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# OpenAI-specific parameters
self.openai_base_url = openai_base_url
self.models = models
self.route = route
self.openrouter_base_url = openrouter_base_url
self.site_url = site_url
self.app_name = app_name
self.store = store
# Response monitoring
self.response_callback = response_callback | --- +++ @@ -4,6 +4,10 @@
class OpenAIConfig(BaseLlmConfig):
+ """
+ Configuration class for OpenAI and OpenRouter-specific parameters.
+ Inherits from BaseLlmConfig and adds OpenAI-specific settings.
+ """
def __init__(
self,
@@ -28,6 +32,27 @@ # Response monitoring callback
response_callback: Optional[Callable[[Any, dict, dict], None]] = None,
):
+ """
+ Initialize OpenAI configuration.
+
+ Args:
+ model: OpenAI model to use, defaults to None
+ temperature: Controls randomness, defaults to 0.1
+ api_key: OpenAI API key, defaults to None
+ max_tokens: Maximum tokens to generate, defaults to 2000
+ top_p: Nucleus sampling parameter, defaults to 0.1
+ top_k: Top-k sampling parameter, defaults to 1
+ enable_vision: Enable vision capabilities, defaults to False
+ vision_details: Vision detail level, defaults to "auto"
+ http_client_proxies: HTTP client proxy settings, defaults to None
+ openai_base_url: OpenAI API base URL, defaults to None
+ models: List of models for OpenRouter, defaults to None
+ route: OpenRouter route strategy, defaults to "fallback"
+ openrouter_base_url: OpenRouter base URL, defaults to None
+ site_url: Site URL for OpenRouter, defaults to None
+ app_name: Application name for OpenRouter, defaults to None
+ response_callback: Optional callback for monitoring LLM responses.
+ """
# Initialize base parameters
super().__init__(
model=model,
@@ -51,4 +76,4 @@ self.store = store
# Response monitoring
- self.response_callback = response_callback+ self.response_callback = response_callback
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/openai.py |
Write docstrings for algorithm functions | from typing import Optional
from mem0.configs.llms.base import BaseLlmConfig
class VllmConfig(BaseLlmConfig):
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# vLLM-specific parameters
vllm_base_url: Optional[str] = None,
):
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# vLLM-specific parameters
self.vllm_base_url = vllm_base_url or "http://localhost:8000/v1" | --- +++ @@ -4,6 +4,10 @@
class VllmConfig(BaseLlmConfig):
+ """
+ Configuration class for vLLM-specific parameters.
+ Inherits from BaseLlmConfig and adds vLLM-specific settings.
+ """
def __init__(
self,
@@ -20,6 +24,21 @@ # vLLM-specific parameters
vllm_base_url: Optional[str] = None,
):
+ """
+ Initialize vLLM configuration.
+
+ Args:
+ model: vLLM model to use, defaults to None
+ temperature: Controls randomness, defaults to 0.1
+ api_key: vLLM API key, defaults to None
+ max_tokens: Maximum tokens to generate, defaults to 2000
+ top_p: Nucleus sampling parameter, defaults to 0.1
+ top_k: Top-k sampling parameter, defaults to 1
+ enable_vision: Enable vision capabilities, defaults to False
+ vision_details: Vision detail level, defaults to "auto"
+ http_client_proxies: HTTP client proxy settings, defaults to None
+ vllm_base_url: vLLM base URL, defaults to None
+ """
# Initialize base parameters
super().__init__(
model=model,
@@ -34,4 +53,4 @@ )
# vLLM-specific parameters
- self.vllm_base_url = vllm_base_url or "http://localhost:8000/v1"+ self.vllm_base_url = vllm_base_url or "http://localhost:8000/v1"
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/vllm.py |
Generate documentation strings for clarity | from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, model_validator
class AzureMySQLConfig(BaseModel):
host: str = Field(..., description="MySQL server host (e.g., myserver.mysql.database.azure.com)")
port: int = Field(3306, description="MySQL server port")
user: str = Field(..., description="Database user")
password: Optional[str] = Field(None, description="Database password (not required if using Azure credential)")
database: str = Field(..., description="Database name")
collection_name: str = Field("mem0", description="Collection/table name")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
use_azure_credential: bool = Field(
False,
description="Use Azure DefaultAzureCredential for authentication instead of password"
)
ssl_ca: Optional[str] = Field(None, description="Path to SSL CA certificate")
ssl_disabled: bool = Field(False, description="Disable SSL connection (not recommended for production)")
minconn: int = Field(1, description="Minimum number of connections in the pool")
maxconn: int = Field(5, description="Maximum number of connections in the pool")
connection_pool: Optional[Any] = Field(
None,
description="Pre-configured connection pool object (overrides other connection parameters)"
)
@model_validator(mode="before")
@classmethod
def check_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
# If connection_pool is provided, skip validation
if values.get("connection_pool") is not None:
return values
use_azure_credential = values.get("use_azure_credential", False)
password = values.get("password")
# Either password or Azure credential must be provided
if not use_azure_credential and not password:
raise ValueError(
"Either 'password' must be provided or 'use_azure_credential' must be set to True"
)
return values
@model_validator(mode="before")
@classmethod
def check_required_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
# If connection_pool is provided, skip validation of individual parameters
if values.get("connection_pool") is not None:
return values
required_fields = ["host", "user", "database"]
missing_fields = [field for field in required_fields if not values.get(field)]
if missing_fields:
raise ValueError(
f"Missing required fields: {', '.join(missing_fields)}. "
f"These fields are required when not using a pre-configured connection_pool."
)
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
class Config:
arbitrary_types_allowed = True | --- +++ @@ -4,6 +4,7 @@
class AzureMySQLConfig(BaseModel):
+ """Configuration for Azure MySQL vector database."""
host: str = Field(..., description="MySQL server host (e.g., myserver.mysql.database.azure.com)")
port: int = Field(3306, description="MySQL server port")
@@ -28,6 +29,7 @@ @model_validator(mode="before")
@classmethod
def check_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
+ """Validate authentication parameters."""
# If connection_pool is provided, skip validation
if values.get("connection_pool") is not None:
return values
@@ -46,6 +48,7 @@ @model_validator(mode="before")
@classmethod
def check_required_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
+ """Validate required fields."""
# If connection_pool is provided, skip validation of individual parameters
if values.get("connection_pool") is not None:
return values
@@ -64,6 +67,7 @@ @model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
+ """Validate that no extra fields are provided."""
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
@@ -77,4 +81,4 @@ return values
class Config:
- arbitrary_types_allowed = True+ arbitrary_types_allowed = True
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/vector_stores/azure_mysql.py |
Create documentation strings for testing functions | from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
from databricks.sdk.service.vectorsearch import EndpointType, VectorIndexType, PipelineType
class DatabricksConfig(BaseModel):
workspace_url: str = Field(..., description="Databricks workspace URL")
access_token: Optional[str] = Field(None, description="Personal access token for authentication")
client_id: Optional[str] = Field(None, description="Databricks Service principal client ID")
client_secret: Optional[str] = Field(None, description="Databricks Service principal client secret")
azure_client_id: Optional[str] = Field(None, description="Azure AD application client ID (for Azure Databricks)")
azure_client_secret: Optional[str] = Field(
None, description="Azure AD application client secret (for Azure Databricks)"
)
endpoint_name: str = Field(..., description="Vector search endpoint name")
catalog: str = Field(..., description="The Unity Catalog catalog name")
schema: str = Field(..., description="The Unity Catalog schama name")
table_name: str = Field(..., description="Source Delta table name")
collection_name: str = Field("mem0", description="Vector search index name")
index_type: VectorIndexType = Field("DELTA_SYNC", description="Index type: DELTA_SYNC or DIRECT_ACCESS")
embedding_model_endpoint_name: Optional[str] = Field(
None, description="Embedding model endpoint for Databricks-computed embeddings"
)
embedding_dimension: int = Field(1536, description="Vector embedding dimensions")
endpoint_type: EndpointType = Field("STANDARD", description="Endpoint type: STANDARD or STORAGE_OPTIMIZED")
pipeline_type: PipelineType = Field("TRIGGERED", description="Sync pipeline type: TRIGGERED or CONTINUOUS")
warehouse_name: Optional[str] = Field(None, description="Databricks SQL warehouse Name")
query_type: str = Field("ANN", description="Query type: `ANN` and `HYBRID`")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
@model_validator(mode="after")
def validate_authentication(self):
has_token = self.access_token is not None
has_service_principal = (self.client_id is not None and self.client_secret is not None) or (
self.azure_client_id is not None and self.azure_client_secret is not None
)
if not has_token and not has_service_principal:
raise ValueError(
"Either access_token or both client_id/client_secret or azure_client_id/azure_client_secret must be provided"
)
return self
model_config = ConfigDict(arbitrary_types_allowed=True) | --- +++ @@ -6,6 +6,7 @@
class DatabricksConfig(BaseModel):
+ """Configuration for Databricks Vector Search vector store."""
workspace_url: str = Field(..., description="Databricks workspace URL")
access_token: Optional[str] = Field(None, description="Personal access token for authentication")
@@ -44,6 +45,7 @@
@model_validator(mode="after")
def validate_authentication(self):
+ """Validate that either access_token or service principal credentials are provided."""
has_token = self.access_token is not None
has_service_principal = (self.client_id is not None and self.client_secret is not None) or (
self.azure_client_id is not None and self.azure_client_secret is not None
@@ -56,4 +58,4 @@
return self
- model_config = ConfigDict(arbitrary_types_allowed=True)+ model_config = ConfigDict(arbitrary_types_allowed=True)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/vector_stores/databricks.py |
Generate missing documentation strings | from typing import Optional
from mem0.configs.llms.base import BaseLlmConfig
class OllamaConfig(BaseLlmConfig):
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# Ollama-specific parameters
ollama_base_url: Optional[str] = None,
):
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# Ollama-specific parameters
self.ollama_base_url = ollama_base_url | --- +++ @@ -4,6 +4,10 @@
class OllamaConfig(BaseLlmConfig):
+ """
+ Configuration class for Ollama-specific parameters.
+ Inherits from BaseLlmConfig and adds Ollama-specific settings.
+ """
def __init__(
self,
@@ -20,6 +24,21 @@ # Ollama-specific parameters
ollama_base_url: Optional[str] = None,
):
+ """
+ Initialize Ollama configuration.
+
+ Args:
+ model: Ollama model to use, defaults to None
+ temperature: Controls randomness, defaults to 0.1
+ api_key: Ollama API key, defaults to None
+ max_tokens: Maximum tokens to generate, defaults to 2000
+ top_p: Nucleus sampling parameter, defaults to 0.1
+ top_k: Top-k sampling parameter, defaults to 1
+ enable_vision: Enable vision capabilities, defaults to False
+ vision_details: Vision detail level, defaults to "auto"
+ http_client_proxies: HTTP client proxy settings, defaults to None
+ ollama_base_url: Ollama base URL, defaults to None
+ """
# Initialize base parameters
super().__init__(
model=model,
@@ -34,4 +53,4 @@ )
# Ollama-specific parameters
- self.ollama_base_url = ollama_base_url+ self.ollama_base_url = ollama_base_url
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/ollama.py |
Generate docstrings for script automation | import subprocess
import sys
from typing import Literal, Optional
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
try:
from ollama import Client
except ImportError:
user_input = input("The 'ollama' library is required. Install it now? [y/N]: ")
if user_input.lower() == "y":
try:
subprocess.check_call([sys.executable, "-m", "pip", "install", "ollama"])
from ollama import Client
except subprocess.CalledProcessError:
print("Failed to install 'ollama'. Please install it manually using 'pip install ollama'.")
sys.exit(1)
else:
print("The required 'ollama' library is not installed.")
sys.exit(1)
class OllamaEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "nomic-embed-text"
self.config.embedding_dims = self.config.embedding_dims or 512
self.client = Client(host=self.config.ollama_base_url)
self._ensure_model_exists()
def _ensure_model_exists(self):
local_models = self.client.list()["models"]
if not any(model.get("name") == self.config.model or model.get("model") == self.config.model for model in local_models):
self.client.pull(self.config.model)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
response = self.client.embeddings(model=self.config.model, prompt=text)
return response["embedding"] | --- +++ @@ -32,10 +32,22 @@ self._ensure_model_exists()
def _ensure_model_exists(self):
+ """
+ Ensure the specified model exists locally. If not, pull it from Ollama.
+ """
local_models = self.client.list()["models"]
if not any(model.get("name") == self.config.model or model.get("model") == self.config.model for model in local_models):
self.client.pull(self.config.model)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
+ """
+ Get the embedding for the given text using Ollama.
+
+ Args:
+ text (str): The text to embed.
+ memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
+ Returns:
+ list: The embedding vector.
+ """
response = self.client.embeddings(model=self.config.model, prompt=text)
- return response["embedding"]+ return response["embedding"]
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/embeddings/ollama.py |
Document this module using docstrings |
from pydantic import BaseModel, Field
class NeptuneAnalyticsConfig(BaseModel):
collection_name: str = Field("mem0", description="Default name for the collection")
endpoint: str = Field("endpoint", description="Graph ID for the runtime")
model_config = {
"arbitrary_types_allowed": False,
} | --- +++ @@ -1,11 +1,27 @@+"""
+Configuration for Amazon Neptune Analytics vector store.
+
+This module provides configuration settings for integrating with Amazon Neptune Analytics
+as a vector store backend for Mem0's memory layer.
+"""
from pydantic import BaseModel, Field
class NeptuneAnalyticsConfig(BaseModel):
+ """
+ Configuration class for Amazon Neptune Analytics vector store.
+
+ Amazon Neptune Analytics is a graph analytics engine that can be used as a vector store
+ for storing and retrieving memory embeddings in Mem0.
+
+ Attributes:
+ collection_name (str): Name of the collection to store vectors. Defaults to "mem0".
+ endpoint (str): Neptune Analytics graph endpoint URL or Graph ID for the runtime.
+ """
collection_name: str = Field("mem0", description="Default name for the collection")
endpoint: str = Field("endpoint", description="Graph ID for the runtime")
model_config = {
"arbitrary_types_allowed": False,
- }+ }
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/vector_stores/neptune.py |
Add docstrings with type hints explained | import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
import httpx
from pydantic import BaseModel, ConfigDict, Field
from mem0.client.utils import api_error_handler
from mem0.memory.telemetry import capture_client_event
# Exception classes are referenced in docstrings only
logger = logging.getLogger(__name__)
class ProjectConfig(BaseModel):
org_id: Optional[str] = Field(default=None, description="Organization ID")
project_id: Optional[str] = Field(default=None, description="Project ID")
user_email: Optional[str] = Field(default=None, description="User email")
model_config = ConfigDict(validate_assignment=True, extra="forbid")
class BaseProject(ABC):
def __init__(
self,
client: Any,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
self._client = client
# Handle config initialization
if config is not None:
self.config = config
else:
# Create config from parameters
self.config = ProjectConfig(org_id=org_id, project_id=project_id, user_email=user_email)
@property
def org_id(self) -> Optional[str]:
return self.config.org_id
@property
def project_id(self) -> Optional[str]:
return self.config.project_id
@property
def user_email(self) -> Optional[str]:
return self.config.user_email
def _validate_org_project(self) -> None:
if not (self.config.org_id and self.config.project_id):
raise ValueError("org_id and project_id must be set to access project operations")
def _prepare_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
if kwargs is None:
kwargs = {}
# Add org_id and project_id if available
if self.config.org_id and self.config.project_id:
kwargs["org_id"] = self.config.org_id
kwargs["project_id"] = self.config.project_id
elif self.config.org_id or self.config.project_id:
raise ValueError("Please provide both org_id and project_id")
return {k: v for k, v in kwargs.items() if v is not None}
def _prepare_org_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
if kwargs is None:
kwargs = {}
# Add org_id if available
if self.config.org_id:
kwargs["org_id"] = self.config.org_id
else:
raise ValueError("org_id must be set for organization-level operations")
return {k: v for k, v in kwargs.items() if v is not None}
@abstractmethod
def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
pass
@abstractmethod
def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
pass
@abstractmethod
def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
pass
@abstractmethod
def delete(self) -> Dict[str, Any]:
pass
@abstractmethod
def get_members(self) -> Dict[str, Any]:
pass
@abstractmethod
def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
pass
@abstractmethod
def update_member(self, email: str, role: str) -> Dict[str, Any]:
pass
@abstractmethod
def remove_member(self, email: str) -> Dict[str, Any]:
pass
class Project(BaseProject):
def __init__(
self,
client: httpx.Client,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
super().__init__(client, config, org_id, project_id, user_email)
self._validate_org_project()
@api_error_handler
def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
params = self._prepare_params({"fields": fields})
response = self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.get",
self,
{"fields": fields, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
if not self.config.org_id:
raise ValueError("org_id must be set to create a project")
payload = {"name": name}
if description is not None:
payload["description"] = description
response = self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.create",
self,
{"name": name, "description": description, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
):
raise ValueError(
"At least one parameter must be provided for update: "
"custom_instructions, custom_categories, retrieval_criteria, "
"enable_graph"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
}
)
response = self._client.patch(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"sync_type": "sync",
},
)
return response.json()
@api_error_handler
def delete(self) -> Dict[str, Any]:
response = self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
)
response.raise_for_status()
capture_client_event(
"client.project.delete",
self,
{"sync_type": "sync"},
)
return response.json()
@api_error_handler
def get_members(self) -> Dict[str, Any]:
response = self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
)
response.raise_for_status()
capture_client_event(
"client.project.get_members",
self,
{"sync_type": "sync"},
)
return response.json()
@api_error_handler
def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.add_member",
self,
{"email": email, "role": role, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def update_member(self, email: str, role: str) -> Dict[str, Any]:
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = self._client.put(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update_member",
self,
{"email": email, "role": role, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def remove_member(self, email: str) -> Dict[str, Any]:
params = {"email": email}
response = self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.remove_member",
self,
{"email": email, "sync_type": "sync"},
)
return response.json()
class AsyncProject(BaseProject):
def __init__(
self,
client: httpx.AsyncClient,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
super().__init__(client, config, org_id, project_id, user_email)
self._validate_org_project()
@api_error_handler
async def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
params = self._prepare_params({"fields": fields})
response = await self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.get",
self,
{"fields": fields, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
if not self.config.org_id:
raise ValueError("org_id must be set to create a project")
payload = {"name": name}
if description is not None:
payload["description"] = description
response = await self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.create",
self,
{"name": name, "description": description, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
):
raise ValueError(
"At least one parameter must be provided for update: "
"custom_instructions, custom_categories, retrieval_criteria, "
"enable_graph"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
}
)
response = await self._client.patch(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"sync_type": "async",
},
)
return response.json()
@api_error_handler
async def delete(self) -> Dict[str, Any]:
response = await self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
)
response.raise_for_status()
capture_client_event(
"client.project.delete",
self,
{"sync_type": "async"},
)
return response.json()
@api_error_handler
async def get_members(self) -> Dict[str, Any]:
response = await self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
)
response.raise_for_status()
capture_client_event(
"client.project.get_members",
self,
{"sync_type": "async"},
)
return response.json()
@api_error_handler
async def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = await self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.add_member",
self,
{"email": email, "role": role, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def update_member(self, email: str, role: str) -> Dict[str, Any]:
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = await self._client.put(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update_member",
self,
{"email": email, "role": role, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def remove_member(self, email: str) -> Dict[str, Any]:
params = {"email": email}
response = await self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.remove_member",
self,
{"email": email, "sync_type": "async"},
)
return response.json() | --- +++ @@ -13,6 +13,9 @@
class ProjectConfig(BaseModel):
+ """
+ Configuration for project management operations.
+ """
org_id: Optional[str] = Field(default=None, description="Organization ID")
project_id: Optional[str] = Field(default=None, description="Project ID")
@@ -22,6 +25,9 @@
class BaseProject(ABC):
+ """
+ Abstract base class for project management operations.
+ """
def __init__(
self,
@@ -31,6 +37,16 @@ project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
+ """
+ Initialize the project manager.
+
+ Args:
+ client: HTTP client instance
+ config: Project manager configuration
+ org_id: Organization ID
+ project_id: Project ID
+ user_email: User email
+ """
self._client = client
# Handle config initialization
@@ -42,21 +58,42 @@
@property
def org_id(self) -> Optional[str]:
+ """Get the organization ID."""
return self.config.org_id
@property
def project_id(self) -> Optional[str]:
+ """Get the project ID."""
return self.config.project_id
@property
def user_email(self) -> Optional[str]:
+ """Get the user email."""
return self.config.user_email
def _validate_org_project(self) -> None:
+ """
+ Validate that both org_id and project_id are set.
+
+ Raises:
+ ValueError: If org_id or project_id are not set.
+ """
if not (self.config.org_id and self.config.project_id):
raise ValueError("org_id and project_id must be set to access project operations")
def _prepare_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
+ """
+ Prepare query parameters for API requests.
+
+ Args:
+ kwargs: Additional keyword arguments.
+
+ Returns:
+ Dictionary containing prepared parameters.
+
+ Raises:
+ ValueError: If org_id or project_id validation fails.
+ """
if kwargs is None:
kwargs = {}
@@ -70,6 +107,18 @@ return {k: v for k, v in kwargs.items() if v is not None}
def _prepare_org_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
+ """
+ Prepare query parameters for organization-level API requests.
+
+ Args:
+ kwargs: Additional keyword arguments.
+
+ Returns:
+ Dictionary containing prepared parameters.
+
+ Raises:
+ ValueError: If org_id is not provided.
+ """
if kwargs is None:
kwargs = {}
@@ -83,10 +132,43 @@
@abstractmethod
def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
+ """
+ Get project details.
+
+ Args:
+ fields: List of fields to retrieve
+
+ Returns:
+ Dictionary containing the requested project fields.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
pass
@abstractmethod
def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
+ """
+ Create a new project within the organization.
+
+ Args:
+ name: Name of the project to be created
+ description: Optional description for the project
+
+ Returns:
+ Dictionary containing the created project details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id is not set.
+ """
pass
@abstractmethod
@@ -97,30 +179,128 @@ retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
+ """
+ Update project settings.
+
+ Args:
+ custom_instructions: New instructions for the project
+ custom_categories: New categories for the project
+ retrieval_criteria: New retrieval criteria for the project
+ enable_graph: Enable or disable the graph for the project
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
pass
@abstractmethod
def delete(self) -> Dict[str, Any]:
+ """
+ Delete the current project and its related data.
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
pass
@abstractmethod
def get_members(self) -> Dict[str, Any]:
+ """
+ Get all members of the current project.
+
+ Returns:
+ Dictionary containing the list of project members.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
pass
@abstractmethod
def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
+ """
+ Add a new member to the current project.
+
+ Args:
+ email: Email address of the user to add
+ role: Role to assign ("READER" or "OWNER")
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
pass
@abstractmethod
def update_member(self, email: str, role: str) -> Dict[str, Any]:
+ """
+ Update a member's role in the current project.
+
+ Args:
+ email: Email address of the user to update
+ role: New role to assign ("READER" or "OWNER")
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
pass
@abstractmethod
def remove_member(self, email: str) -> Dict[str, Any]:
+ """
+ Remove a member from the current project.
+
+ Args:
+ email: Email address of the user to remove
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
pass
class Project(BaseProject):
+ """
+ Synchronous project management operations.
+ """
def __init__(
self,
@@ -130,11 +310,37 @@ project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
+ """
+ Initialize the synchronous project manager.
+
+ Args:
+ client: HTTP client instance
+ config: Project manager configuration
+ org_id: Organization ID
+ project_id: Project ID
+ user_email: User email
+ """
super().__init__(client, config, org_id, project_id, user_email)
self._validate_org_project()
@api_error_handler
def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
+ """
+ Get project details.
+
+ Args:
+ fields: List of fields to retrieve
+
+ Returns:
+ Dictionary containing the requested project fields.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
params = self._prepare_params({"fields": fields})
response = self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
@@ -150,6 +356,23 @@
@api_error_handler
def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
+ """
+ Create a new project within the organization.
+
+ Args:
+ name: Name of the project to be created
+ description: Optional description for the project
+
+ Returns:
+ Dictionary containing the created project details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id is not set.
+ """
if not self.config.org_id:
raise ValueError("org_id must be set to create a project")
@@ -177,6 +400,25 @@ retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
+ """
+ Update project settings.
+
+ Args:
+ custom_instructions: New instructions for the project
+ custom_categories: New categories for the project
+ retrieval_criteria: New retrieval criteria for the project
+ enable_graph: Enable or disable the graph for the project
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
if (
custom_instructions is None
and custom_categories is None
@@ -217,6 +459,19 @@
@api_error_handler
def delete(self) -> Dict[str, Any]:
+ """
+ Delete the current project and its related data.
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
response = self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
)
@@ -230,6 +485,19 @@
@api_error_handler
def get_members(self) -> Dict[str, Any]:
+ """
+ Get all members of the current project.
+
+ Returns:
+ Dictionary containing the list of project members.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
response = self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
)
@@ -243,6 +511,23 @@
@api_error_handler
def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
+ """
+ Add a new member to the current project.
+
+ Args:
+ email: Email address of the user to add
+ role: Role to assign ("READER" or "OWNER")
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
@@ -262,6 +547,23 @@
@api_error_handler
def update_member(self, email: str, role: str) -> Dict[str, Any]:
+ """
+ Update a member's role in the current project.
+
+ Args:
+ email: Email address of the user to update
+ role: New role to assign ("READER" or "OWNER")
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
@@ -281,6 +583,22 @@
@api_error_handler
def remove_member(self, email: str) -> Dict[str, Any]:
+ """
+ Remove a member from the current project.
+
+ Args:
+ email: Email address of the user to remove
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
params = {"email": email}
response = self._client.delete(
@@ -297,6 +615,9 @@
class AsyncProject(BaseProject):
+ """
+ Asynchronous project management operations.
+ """
def __init__(
self,
@@ -306,11 +627,37 @@ project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
+ """
+ Initialize the asynchronous project manager.
+
+ Args:
+ client: HTTP client instance
+ config: Project manager configuration
+ org_id: Organization ID
+ project_id: Project ID
+ user_email: User email
+ """
super().__init__(client, config, org_id, project_id, user_email)
self._validate_org_project()
@api_error_handler
async def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
+ """
+ Get project details.
+
+ Args:
+ fields: List of fields to retrieve
+
+ Returns:
+ Dictionary containing the requested project fields.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
params = self._prepare_params({"fields": fields})
response = await self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
@@ -326,6 +673,23 @@
@api_error_handler
async def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
+ """
+ Create a new project within the organization.
+
+ Args:
+ name: Name of the project to be created
+ description: Optional description for the project
+
+ Returns:
+ Dictionary containing the created project details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id is not set.
+ """
if not self.config.org_id:
raise ValueError("org_id must be set to create a project")
@@ -353,6 +717,25 @@ retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
+ """
+ Update project settings.
+
+ Args:
+ custom_instructions: New instructions for the project
+ custom_categories: New categories for the project
+ retrieval_criteria: New retrieval criteria for the project
+ enable_graph: Enable or disable the graph for the project
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
if (
custom_instructions is None
and custom_categories is None
@@ -393,6 +776,19 @@
@api_error_handler
async def delete(self) -> Dict[str, Any]:
+ """
+ Delete the current project and its related data.
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
response = await self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
)
@@ -406,6 +802,19 @@
@api_error_handler
async def get_members(self) -> Dict[str, Any]:
+ """
+ Get all members of the current project.
+
+ Returns:
+ Dictionary containing the list of project members.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
response = await self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
)
@@ -419,6 +828,23 @@
@api_error_handler
async def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
+ """
+ Add a new member to the current project.
+
+ Args:
+ email: Email address of the user to add
+ role: Role to assign ("READER" or "OWNER")
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
@@ -438,6 +864,23 @@
@api_error_handler
async def update_member(self, email: str, role: str) -> Dict[str, Any]:
+ """
+ Update a member's role in the current project.
+
+ Args:
+ email: Email address of the user to update
+ role: New role to assign ("READER" or "OWNER")
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
@@ -457,6 +900,22 @@
@api_error_handler
async def remove_member(self, email: str) -> Dict[str, Any]:
+ """
+ Remove a member from the current project.
+
+ Args:
+ email: Email address of the user to remove
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ NetworkError: If network connectivity issues occur.
+ ValueError: If org_id or project_id are not set.
+ """
params = {"email": email}
response = await self._client.delete(
@@ -469,4 +928,4 @@ self,
{"email": email, "sync_type": "async"},
)
- return response.json()+ return response.json()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/client/project.py |
Add docstrings that explain inputs and outputs |
from typing import Any, Dict, Optional
class MemoryError(Exception):
def __init__(
self,
message: str,
error_code: str,
details: Optional[Dict[str, Any]] = None,
suggestion: Optional[str] = None,
debug_info: Optional[Dict[str, Any]] = None,
):
self.message = message
self.error_code = error_code
self.details = details or {}
self.suggestion = suggestion
self.debug_info = debug_info or {}
super().__init__(self.message)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"message={self.message!r}, "
f"error_code={self.error_code!r}, "
f"details={self.details!r}, "
f"suggestion={self.suggestion!r}, "
f"debug_info={self.debug_info!r})"
)
class AuthenticationError(MemoryError):
pass
class RateLimitError(MemoryError):
pass
class ValidationError(MemoryError):
pass
class MemoryNotFoundError(MemoryError):
pass
class NetworkError(MemoryError):
pass
class ConfigurationError(MemoryError):
pass
class MemoryQuotaExceededError(MemoryError):
pass
class MemoryCorruptionError(MemoryError):
pass
class VectorSearchError(MemoryError):
pass
class CacheError(MemoryError):
pass
# OSS-specific exception classes
class VectorStoreError(MemoryError):
def __init__(self, message: str, error_code: str = "VECTOR_001", details: dict = None,
suggestion: str = "Please check your vector store configuration and connection",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class GraphStoreError(MemoryError):
def __init__(self, message: str, error_code: str = "GRAPH_001", details: dict = None,
suggestion: str = "Please check your graph store configuration and connection",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class EmbeddingError(MemoryError):
def __init__(self, message: str, error_code: str = "EMBED_001", details: dict = None,
suggestion: str = "Please check your embedding model configuration",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class LLMError(MemoryError):
def __init__(self, message: str, error_code: str = "LLM_001", details: dict = None,
suggestion: str = "Please check your LLM configuration and API key",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class DatabaseError(MemoryError):
def __init__(self, message: str, error_code: str = "DB_001", details: dict = None,
suggestion: str = "Please check your database configuration and connection",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
class DependencyError(MemoryError):
def __init__(self, message: str, error_code: str = "DEPS_001", details: dict = None,
suggestion: str = "Please install the required dependencies",
debug_info: dict = None):
super().__init__(message, error_code, details, suggestion, debug_info)
# Mapping of HTTP status codes to specific exception classes
HTTP_STATUS_TO_EXCEPTION = {
400: ValidationError,
401: AuthenticationError,
403: AuthenticationError,
404: MemoryNotFoundError,
408: NetworkError,
409: ValidationError,
413: MemoryQuotaExceededError,
422: ValidationError,
429: RateLimitError,
500: MemoryError,
502: NetworkError,
503: NetworkError,
504: NetworkError,
}
def create_exception_from_response(
status_code: int,
response_text: str,
error_code: Optional[str] = None,
details: Optional[Dict[str, Any]] = None,
debug_info: Optional[Dict[str, Any]] = None,
) -> MemoryError:
exception_class = HTTP_STATUS_TO_EXCEPTION.get(status_code, MemoryError)
# Generate error code if not provided
if not error_code:
error_code = f"HTTP_{status_code}"
# Create appropriate suggestion based on status code
suggestions = {
400: "Please check your request parameters and try again",
401: "Please check your API key and authentication credentials",
403: "You don't have permission to perform this operation",
404: "The requested resource was not found",
408: "Request timed out. Please try again",
409: "Resource conflict. Please check your request",
413: "Request too large. Please reduce the size of your request",
422: "Invalid request data. Please check your input",
429: "Rate limit exceeded. Please wait before making more requests",
500: "Internal server error. Please try again later",
502: "Service temporarily unavailable. Please try again later",
503: "Service unavailable. Please try again later",
504: "Gateway timeout. Please try again later",
}
suggestion = suggestions.get(status_code, "Please try again later")
return exception_class(
message=response_text or f"HTTP {status_code} error",
error_code=error_code,
details=details or {},
suggestion=suggestion,
debug_info=debug_info or {},
) | --- +++ @@ -1,8 +1,59 @@+"""Structured exception classes for Mem0 with error codes, suggestions, and debug information.
+
+This module provides a comprehensive set of exception classes that replace the generic
+APIError with specific, actionable exceptions. Each exception includes error codes,
+user-friendly suggestions, and debug information to enable better error handling
+and recovery in applications using Mem0.
+
+Example:
+ Basic usage:
+ try:
+ memory.add(content, user_id=user_id)
+ except RateLimitError as e:
+ # Implement exponential backoff
+ time.sleep(e.debug_info.get('retry_after', 60))
+ except MemoryQuotaExceededError as e:
+ # Trigger quota upgrade flow
+ logger.error(f"Quota exceeded: {e.error_code}")
+ except ValidationError as e:
+ # Return user-friendly error
+ raise HTTPException(400, detail=e.suggestion)
+
+ Advanced usage with error context:
+ try:
+ memory.update(memory_id, content=new_content)
+ except MemoryNotFoundError as e:
+ logger.warning(f"Memory {memory_id} not found: {e.message}")
+ if e.suggestion:
+ logger.info(f"Suggestion: {e.suggestion}")
+"""
from typing import Any, Dict, Optional
class MemoryError(Exception):
+ """Base exception for all memory-related errors.
+
+ This is the base class for all Mem0-specific exceptions. It provides a structured
+ approach to error handling with error codes, contextual details, suggestions for
+ resolution, and debug information.
+
+ Attributes:
+ message (str): Human-readable error message.
+ error_code (str): Unique error identifier for programmatic handling.
+ details (dict): Additional context about the error.
+ suggestion (str): User-friendly suggestion for resolving the error.
+ debug_info (dict): Technical debugging information.
+
+ Example:
+ raise MemoryError(
+ message="Memory operation failed",
+ error_code="MEM_001",
+ details={"operation": "add", "user_id": "user123"},
+ suggestion="Please check your API key and try again",
+ debug_info={"request_id": "req_456", "timestamp": "2024-01-01T00:00:00Z"}
+ )
+ """
def __init__(
self,
@@ -12,6 +63,15 @@ suggestion: Optional[str] = None,
debug_info: Optional[Dict[str, Any]] = None,
):
+ """Initialize a MemoryError.
+
+ Args:
+ message: Human-readable error message.
+ error_code: Unique error identifier.
+ details: Additional context about the error.
+ suggestion: User-friendly suggestion for resolving the error.
+ debug_info: Technical debugging information.
+ """
self.message = message
self.error_code = error_code
self.details = details or {}
@@ -31,47 +91,230 @@
class AuthenticationError(MemoryError):
+ """Raised when authentication fails.
+
+ This exception is raised when API key validation fails, tokens are invalid,
+ or authentication credentials are missing or expired.
+
+ Common scenarios:
+ - Invalid API key
+ - Expired authentication token
+ - Missing authentication headers
+ - Insufficient permissions
+
+ Example:
+ raise AuthenticationError(
+ message="Invalid API key provided",
+ error_code="AUTH_001",
+ suggestion="Please check your API key in the Mem0 dashboard"
+ )
+ """
pass
class RateLimitError(MemoryError):
+ """Raised when rate limits are exceeded.
+
+ This exception is raised when the API rate limit has been exceeded.
+ It includes information about retry timing and current rate limit status.
+
+ The debug_info typically contains:
+ - retry_after: Seconds to wait before retrying
+ - limit: Current rate limit
+ - remaining: Remaining requests in current window
+ - reset_time: When the rate limit window resets
+
+ Example:
+ raise RateLimitError(
+ message="Rate limit exceeded",
+ error_code="RATE_001",
+ suggestion="Please wait before making more requests",
+ debug_info={"retry_after": 60, "limit": 100, "remaining": 0}
+ )
+ """
pass
class ValidationError(MemoryError):
+ """Raised when input validation fails.
+
+ This exception is raised when request parameters, memory content,
+ or configuration values fail validation checks.
+
+ Common scenarios:
+ - Invalid user_id format
+ - Missing required fields
+ - Content too long or too short
+ - Invalid metadata format
+ - Malformed filters
+
+ Example:
+ raise ValidationError(
+ message="Invalid user_id format",
+ error_code="VAL_001",
+ details={"field": "user_id", "value": "123", "expected": "string"},
+ suggestion="User ID must be a non-empty string"
+ )
+ """
pass
class MemoryNotFoundError(MemoryError):
+ """Raised when a memory is not found.
+
+ This exception is raised when attempting to access, update, or delete
+ a memory that doesn't exist or is not accessible to the current user.
+
+ Example:
+ raise MemoryNotFoundError(
+ message="Memory not found",
+ error_code="MEM_404",
+ details={"memory_id": "mem_123", "user_id": "user_456"},
+ suggestion="Please check the memory ID and ensure it exists"
+ )
+ """
pass
class NetworkError(MemoryError):
+ """Raised when network connectivity issues occur.
+
+ This exception is raised for network-related problems such as
+ connection timeouts, DNS resolution failures, or service unavailability.
+
+ Common scenarios:
+ - Connection timeout
+ - DNS resolution failure
+ - Service temporarily unavailable
+ - Network connectivity issues
+
+ Example:
+ raise NetworkError(
+ message="Connection timeout",
+ error_code="NET_001",
+ suggestion="Please check your internet connection and try again",
+ debug_info={"timeout": 30, "endpoint": "api.mem0.ai"}
+ )
+ """
pass
class ConfigurationError(MemoryError):
+ """Raised when client configuration is invalid.
+
+ This exception is raised when the client is improperly configured,
+ such as missing required settings or invalid configuration values.
+
+ Common scenarios:
+ - Missing API key
+ - Invalid host URL
+ - Incompatible configuration options
+ - Missing required environment variables
+
+ Example:
+ raise ConfigurationError(
+ message="API key not configured",
+ error_code="CFG_001",
+ suggestion="Set MEM0_API_KEY environment variable or pass api_key parameter"
+ )
+ """
pass
class MemoryQuotaExceededError(MemoryError):
+ """Raised when user's memory quota is exceeded.
+
+ This exception is raised when the user has reached their memory
+ storage or usage limits.
+
+ The debug_info typically contains:
+ - current_usage: Current memory usage
+ - quota_limit: Maximum allowed usage
+ - usage_type: Type of quota (storage, requests, etc.)
+
+ Example:
+ raise MemoryQuotaExceededError(
+ message="Memory quota exceeded",
+ error_code="QUOTA_001",
+ suggestion="Please upgrade your plan or delete unused memories",
+ debug_info={"current_usage": 1000, "quota_limit": 1000, "usage_type": "memories"}
+ )
+ """
pass
class MemoryCorruptionError(MemoryError):
+ """Raised when memory data is corrupted.
+
+ This exception is raised when stored memory data is found to be
+ corrupted, malformed, or otherwise unreadable.
+
+ Example:
+ raise MemoryCorruptionError(
+ message="Memory data is corrupted",
+ error_code="CORRUPT_001",
+ details={"memory_id": "mem_123"},
+ suggestion="Please contact support for data recovery assistance"
+ )
+ """
pass
class VectorSearchError(MemoryError):
+ """Raised when vector search operations fail.
+
+ This exception is raised when vector database operations fail,
+ such as search queries, embedding generation, or index operations.
+
+ Common scenarios:
+ - Embedding model unavailable
+ - Vector index corruption
+ - Search query timeout
+ - Incompatible vector dimensions
+
+ Example:
+ raise VectorSearchError(
+ message="Vector search failed",
+ error_code="VEC_001",
+ details={"query": "find similar memories", "vector_dim": 1536},
+ suggestion="Please try a simpler search query"
+ )
+ """
pass
class CacheError(MemoryError):
+ """Raised when caching operations fail.
+
+ This exception is raised when cache-related operations fail,
+ such as cache misses, cache invalidation errors, or cache corruption.
+
+ Example:
+ raise CacheError(
+ message="Cache operation failed",
+ error_code="CACHE_001",
+ details={"operation": "get", "key": "user_memories_123"},
+ suggestion="Cache will be refreshed automatically"
+ )
+ """
pass
# OSS-specific exception classes
class VectorStoreError(MemoryError):
+ """Raised when vector store operations fail.
+
+ This exception is raised when vector store operations fail,
+ such as embedding storage, similarity search, or vector operations.
+
+ Example:
+ raise VectorStoreError(
+ message="Vector store operation failed",
+ error_code="VECTOR_001",
+ details={"operation": "search", "collection": "memories"},
+ suggestion="Please check your vector store configuration and connection"
+ )
+ """
def __init__(self, message: str, error_code: str = "VECTOR_001", details: dict = None,
suggestion: str = "Please check your vector store configuration and connection",
debug_info: dict = None):
@@ -79,6 +322,19 @@
class GraphStoreError(MemoryError):
+ """Raised when graph store operations fail.
+
+ This exception is raised when graph store operations fail,
+ such as relationship creation, entity management, or graph queries.
+
+ Example:
+ raise GraphStoreError(
+ message="Graph store operation failed",
+ error_code="GRAPH_001",
+ details={"operation": "create_relationship", "entity": "user_123"},
+ suggestion="Please check your graph store configuration and connection"
+ )
+ """
def __init__(self, message: str, error_code: str = "GRAPH_001", details: dict = None,
suggestion: str = "Please check your graph store configuration and connection",
debug_info: dict = None):
@@ -86,6 +342,19 @@
class EmbeddingError(MemoryError):
+ """Raised when embedding operations fail.
+
+ This exception is raised when embedding operations fail,
+ such as text embedding generation or embedding model errors.
+
+ Example:
+ raise EmbeddingError(
+ message="Embedding generation failed",
+ error_code="EMBED_001",
+ details={"text_length": 1000, "model": "openai"},
+ suggestion="Please check your embedding model configuration"
+ )
+ """
def __init__(self, message: str, error_code: str = "EMBED_001", details: dict = None,
suggestion: str = "Please check your embedding model configuration",
debug_info: dict = None):
@@ -93,6 +362,19 @@
class LLMError(MemoryError):
+ """Raised when LLM operations fail.
+
+ This exception is raised when LLM operations fail,
+ such as text generation, completion, or model inference errors.
+
+ Example:
+ raise LLMError(
+ message="LLM operation failed",
+ error_code="LLM_001",
+ details={"model": "gpt-4", "prompt_length": 500},
+ suggestion="Please check your LLM configuration and API key"
+ )
+ """
def __init__(self, message: str, error_code: str = "LLM_001", details: dict = None,
suggestion: str = "Please check your LLM configuration and API key",
debug_info: dict = None):
@@ -100,6 +382,19 @@
class DatabaseError(MemoryError):
+ """Raised when database operations fail.
+
+ This exception is raised when database operations fail,
+ such as SQLite operations, connection issues, or data corruption.
+
+ Example:
+ raise DatabaseError(
+ message="Database operation failed",
+ error_code="DB_001",
+ details={"operation": "insert", "table": "memories"},
+ suggestion="Please check your database configuration and connection"
+ )
+ """
def __init__(self, message: str, error_code: str = "DB_001", details: dict = None,
suggestion: str = "Please check your database configuration and connection",
debug_info: dict = None):
@@ -107,6 +402,19 @@
class DependencyError(MemoryError):
+ """Raised when required dependencies are missing.
+
+ This exception is raised when required dependencies are missing,
+ such as optional packages for specific providers or features.
+
+ Example:
+ raise DependencyError(
+ message="Required dependency missing",
+ error_code="DEPS_001",
+ details={"package": "kuzu", "feature": "graph_store"},
+ suggestion="Please install the required dependencies: pip install kuzu"
+ )
+ """
def __init__(self, message: str, error_code: str = "DEPS_001", details: dict = None,
suggestion: str = "Please install the required dependencies",
debug_info: dict = None):
@@ -138,6 +446,29 @@ details: Optional[Dict[str, Any]] = None,
debug_info: Optional[Dict[str, Any]] = None,
) -> MemoryError:
+ """Create an appropriate exception based on HTTP response.
+
+ This function analyzes the HTTP status code and response to create
+ the most appropriate exception type with relevant error information.
+
+ Args:
+ status_code: HTTP status code from the response.
+ response_text: Response body text.
+ error_code: Optional specific error code.
+ details: Additional error context.
+ debug_info: Debug information.
+
+ Returns:
+ An instance of the appropriate MemoryError subclass.
+
+ Example:
+ exception = create_exception_from_response(
+ status_code=429,
+ response_text="Rate limit exceeded",
+ debug_info={"retry_after": 60}
+ )
+ # Returns a RateLimitError instance
+ """
exception_class = HTTP_STATUS_TO_EXCEPTION.get(status_code, MemoryError)
# Generate error code if not provided
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/exceptions.py |
Add docstrings following best practices | from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, model_validator
class CassandraConfig(BaseModel):
contact_points: List[str] = Field(
...,
description="List of contact point addresses (e.g., ['127.0.0.1', '127.0.0.2'])"
)
port: int = Field(9042, description="Cassandra port")
username: Optional[str] = Field(None, description="Database username")
password: Optional[str] = Field(None, description="Database password")
keyspace: str = Field("mem0", description="Keyspace name")
collection_name: str = Field("memories", description="Table name")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
secure_connect_bundle: Optional[str] = Field(
None,
description="Path to secure connect bundle for DataStax Astra DB"
)
protocol_version: int = Field(4, description="CQL protocol version")
load_balancing_policy: Optional[Any] = Field(
None,
description="Custom load balancing policy object"
)
@model_validator(mode="before")
@classmethod
def check_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
username = values.get("username")
password = values.get("password")
# Both username and password must be provided together or not at all
if (username and not password) or (password and not username):
raise ValueError(
"Both 'username' and 'password' must be provided together for authentication"
)
return values
@model_validator(mode="before")
@classmethod
def check_connection_config(cls, values: Dict[str, Any]) -> Dict[str, Any]:
secure_connect_bundle = values.get("secure_connect_bundle")
contact_points = values.get("contact_points")
# Either secure_connect_bundle or contact_points must be provided
if not secure_connect_bundle and not contact_points:
raise ValueError(
"Either 'contact_points' or 'secure_connect_bundle' must be provided"
)
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
class Config:
arbitrary_types_allowed = True
| --- +++ @@ -4,6 +4,7 @@
class CassandraConfig(BaseModel):
+ """Configuration for Apache Cassandra vector database."""
contact_points: List[str] = Field(
...,
@@ -28,6 +29,7 @@ @model_validator(mode="before")
@classmethod
def check_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
+ """Validate authentication parameters."""
username = values.get("username")
password = values.get("password")
@@ -42,6 +44,7 @@ @model_validator(mode="before")
@classmethod
def check_connection_config(cls, values: Dict[str, Any]) -> Dict[str, Any]:
+ """Validate connection configuration."""
secure_connect_bundle = values.get("secure_connect_bundle")
contact_points = values.get("contact_points")
@@ -56,6 +59,7 @@ @model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
+ """Validate that no extra fields are provided."""
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
@@ -70,3 +74,4 @@
class Config:
arbitrary_types_allowed = True
+
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/vector_stores/cassandra.py |
Auto-generate documentation strings for this file | import json
import logging
import httpx
from mem0.exceptions import (
NetworkError,
create_exception_from_response,
)
logger = logging.getLogger(__name__)
class APIError(Exception):
pass
def api_error_handler(func):
from functools import wraps
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
# Extract error details from response
response_text = ""
error_details = {}
debug_info = {
"status_code": e.response.status_code,
"url": str(e.request.url),
"method": e.request.method,
}
try:
response_text = e.response.text
# Try to parse JSON response for additional error details
if e.response.headers.get("content-type", "").startswith("application/json"):
error_data = json.loads(response_text)
if isinstance(error_data, dict):
error_details = error_data
response_text = error_data.get("detail", response_text)
except (json.JSONDecodeError, AttributeError):
# Fallback to plain text response
pass
# Add rate limit information if available
if e.response.status_code == 429:
retry_after = e.response.headers.get("Retry-After")
if retry_after:
try:
debug_info["retry_after"] = int(retry_after)
except ValueError:
pass
# Add rate limit headers if available
for header in ["X-RateLimit-Limit", "X-RateLimit-Remaining", "X-RateLimit-Reset"]:
value = e.response.headers.get(header)
if value:
debug_info[header.lower().replace("-", "_")] = value
# Create specific exception based on status code
exception = create_exception_from_response(
status_code=e.response.status_code,
response_text=response_text,
details=error_details,
debug_info=debug_info,
)
raise exception
except httpx.RequestError as e:
logger.error(f"Request error occurred: {e}")
# Determine the appropriate exception type based on error type
if isinstance(e, httpx.TimeoutException):
raise NetworkError(
message=f"Request timed out: {str(e)}",
error_code="NET_TIMEOUT",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "timeout", "original_error": str(e)},
)
elif isinstance(e, httpx.ConnectError):
raise NetworkError(
message=f"Connection failed: {str(e)}",
error_code="NET_CONNECT",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "connection", "original_error": str(e)},
)
else:
# Generic network error for other request errors
raise NetworkError(
message=f"Network request failed: {str(e)}",
error_code="NET_GENERIC",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "request", "original_error": str(e)},
)
return wrapper | --- +++ @@ -11,11 +11,25 @@
class APIError(Exception):
+ """Exception raised for errors in the API.
+
+ Deprecated: Use specific exception classes from mem0.exceptions instead.
+ This class is maintained for backward compatibility.
+ """
pass
def api_error_handler(func):
+ """Decorator to handle API errors consistently.
+
+ This decorator catches HTTP and request errors and converts them to
+ appropriate structured exception classes with detailed error information.
+
+ The decorator analyzes HTTP status codes and response content to create
+ the most specific exception type with helpful error messages, suggestions,
+ and debug information.
+ """
from functools import wraps
@wraps(func)
@@ -98,4 +112,4 @@ debug_info={"error_type": "request", "original_error": str(e)},
)
- return wrapper+ return wrapper
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/client/utils.py |
Add docstrings explaining edge cases | from abc import ABC, abstractmethod
from typing import Literal, Optional
from mem0.configs.embeddings.base import BaseEmbedderConfig
class EmbeddingBase(ABC):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
if config is None:
self.config = BaseEmbedderConfig()
else:
self.config = config
@abstractmethod
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]]):
pass | --- +++ @@ -5,6 +5,11 @@
class EmbeddingBase(ABC):
+ """Initialized a base embedding class
+
+ :param config: Embedding configuration option class, defaults to None
+ :type config: Optional[BaseEmbedderConfig], optional
+ """
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
if config is None:
@@ -14,4 +19,13 @@
@abstractmethod
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]]):
- pass+ """
+ Get the embedding for the given text.
+
+ Args:
+ text (str): The text to embed.
+ memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
+ Returns:
+ list: The embedding vector.
+ """
+ pass
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/embeddings/base.py |
Write docstrings for utility functions | import json
import os
import time
from collections import defaultdict
import numpy as np
import tiktoken
from dotenv import load_dotenv
from jinja2 import Template
from openai import OpenAI
from tqdm import tqdm
load_dotenv()
PROMPT = """
# Question:
{{QUESTION}}
# Context:
{{CONTEXT}}
# Short answer:
"""
class RAGManager:
def __init__(self, data_path="dataset/locomo10_rag.json", chunk_size=500, k=1):
self.model = os.getenv("MODEL")
self.client = OpenAI()
self.data_path = data_path
self.chunk_size = chunk_size
self.k = k
def generate_response(self, question, context):
template = Template(PROMPT)
prompt = template.render(CONTEXT=context, QUESTION=question)
max_retries = 3
retries = 0
while retries <= max_retries:
try:
t1 = time.time()
response = self.client.chat.completions.create(
model=self.model,
messages=[
{
"role": "system",
"content": "You are a helpful assistant that can answer "
"questions based on the provided context."
"If the question involves timing, use the conversation date for reference."
"Provide the shortest possible answer."
"Use words directly from the conversation when possible."
"Avoid using subjects in your answer.",
},
{"role": "user", "content": prompt},
],
temperature=0,
)
t2 = time.time()
return response.choices[0].message.content.strip(), t2 - t1
except Exception as e:
retries += 1
if retries > max_retries:
raise e
time.sleep(1) # Wait before retrying
def clean_chat_history(self, chat_history):
cleaned_chat_history = ""
for c in chat_history:
cleaned_chat_history += f"{c['timestamp']} | {c['speaker']}: {c['text']}\n"
return cleaned_chat_history
def calculate_embedding(self, document):
response = self.client.embeddings.create(model=os.getenv("EMBEDDING_MODEL"), input=document)
return response.data[0].embedding
def calculate_similarity(self, embedding1, embedding2):
return np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
def search(self, query, chunks, embeddings, k=1):
t1 = time.time()
query_embedding = self.calculate_embedding(query)
similarities = [self.calculate_similarity(query_embedding, embedding) for embedding in embeddings]
# Get indices of top-k most similar chunks
if k == 1:
# Original behavior - just get the most similar chunk
top_indices = [np.argmax(similarities)]
else:
# Get indices of top-k chunks
top_indices = np.argsort(similarities)[-k:][::-1]
# Combine the top-k chunks
combined_chunks = "\n<->\n".join([chunks[i] for i in top_indices])
t2 = time.time()
return combined_chunks, t2 - t1
def create_chunks(self, chat_history, chunk_size=500):
# Get the encoding for the model
encoding = tiktoken.encoding_for_model(os.getenv("EMBEDDING_MODEL"))
documents = self.clean_chat_history(chat_history)
if chunk_size == -1:
return [documents], []
chunks = []
# Encode the document
tokens = encoding.encode(documents)
# Split into chunks based on token count
for i in range(0, len(tokens), chunk_size):
chunk_tokens = tokens[i : i + chunk_size]
chunk = encoding.decode(chunk_tokens)
chunks.append(chunk)
embeddings = []
for chunk in chunks:
embedding = self.calculate_embedding(chunk)
embeddings.append(embedding)
return chunks, embeddings
def process_all_conversations(self, output_file_path):
with open(self.data_path, "r") as f:
data = json.load(f)
FINAL_RESULTS = defaultdict(list)
for key, value in tqdm(data.items(), desc="Processing conversations"):
chat_history = value["conversation"]
questions = value["question"]
chunks, embeddings = self.create_chunks(chat_history, self.chunk_size)
for item in tqdm(questions, desc="Answering questions", leave=False):
question = item["question"]
answer = item.get("answer", "")
category = item["category"]
if self.chunk_size == -1:
context = chunks[0]
search_time = 0
else:
context, search_time = self.search(question, chunks, embeddings, k=self.k)
response, response_time = self.generate_response(question, context)
FINAL_RESULTS[key].append(
{
"question": question,
"answer": answer,
"category": category,
"context": context,
"response": response,
"search_time": search_time,
"response_time": response_time,
}
)
with open(output_file_path, "w+") as f:
json.dump(FINAL_RESULTS, f, indent=4)
# Save results
with open(output_file_path, "w+") as f:
json.dump(FINAL_RESULTS, f, indent=4) | --- +++ @@ -80,6 +80,19 @@ return np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
def search(self, query, chunks, embeddings, k=1):
+ """
+ Search for the top-k most similar chunks to the query.
+
+ Args:
+ query: The query string
+ chunks: List of text chunks
+ embeddings: List of embeddings for each chunk
+ k: Number of top chunks to return (default: 1)
+
+ Returns:
+ combined_chunks: The combined text of the top-k chunks
+ search_time: Time taken for the search
+ """
t1 = time.time()
query_embedding = self.calculate_embedding(query)
similarities = [self.calculate_similarity(query_embedding, embedding) for embedding in embeddings]
@@ -99,6 +112,9 @@ return combined_chunks, t2 - t1
def create_chunks(self, chat_history, chunk_size=500):
+ """
+ Create chunks using tiktoken for more accurate token counting
+ """
# Get the encoding for the model
encoding = tiktoken.encoding_for_model(os.getenv("EMBEDDING_MODEL"))
@@ -164,4 +180,4 @@
# Save results
with open(output_file_path, "w+") as f:
- json.dump(FINAL_RESULTS, f, indent=4)+ json.dump(FINAL_RESULTS, f, indent=4)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/evaluation/src/rag.py |
Add docstrings for internal functions | from typing import Any, Dict, Optional
from mem0.configs.base import AzureConfig
from mem0.configs.llms.base import BaseLlmConfig
class AzureOpenAIConfig(BaseLlmConfig):
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# Azure OpenAI-specific parameters
azure_kwargs: Optional[Dict[str, Any]] = None,
):
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# Azure OpenAI-specific parameters
self.azure_kwargs = AzureConfig(**(azure_kwargs or {})) | --- +++ @@ -5,6 +5,10 @@
class AzureOpenAIConfig(BaseLlmConfig):
+ """
+ Configuration class for Azure OpenAI-specific parameters.
+ Inherits from BaseLlmConfig and adds Azure OpenAI-specific settings.
+ """
def __init__(
self,
@@ -21,6 +25,21 @@ # Azure OpenAI-specific parameters
azure_kwargs: Optional[Dict[str, Any]] = None,
):
+ """
+ Initialize Azure OpenAI configuration.
+
+ Args:
+ model: Azure OpenAI model to use, defaults to None
+ temperature: Controls randomness, defaults to 0.1
+ api_key: Azure OpenAI API key, defaults to None
+ max_tokens: Maximum tokens to generate, defaults to 2000
+ top_p: Nucleus sampling parameter, defaults to 0.1
+ top_k: Top-k sampling parameter, defaults to 1
+ enable_vision: Enable vision capabilities, defaults to False
+ vision_details: Vision detail level, defaults to "auto"
+ http_client_proxies: HTTP client proxy settings, defaults to None
+ azure_kwargs: Azure-specific configuration, defaults to None
+ """
# Initialize base parameters
super().__init__(
model=model,
@@ -35,4 +54,4 @@ )
# Azure OpenAI-specific parameters
- self.azure_kwargs = AzureConfig(**(azure_kwargs or {}))+ self.azure_kwargs = AzureConfig(**(azure_kwargs or {}))
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/azure.py |
Write reusable docstrings | import json
from typing import Dict, List, Optional, Union
try:
from ollama import Client
except ImportError:
raise ImportError("The 'ollama' library is required. Please install it using 'pip install ollama'.")
from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.ollama import OllamaConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
class OllamaLLM(LLMBase):
def __init__(self, config: Optional[Union[BaseLlmConfig, OllamaConfig, Dict]] = None):
# Convert to OllamaConfig if needed
if config is None:
config = OllamaConfig()
elif isinstance(config, dict):
config = OllamaConfig(**config)
elif isinstance(config, BaseLlmConfig) and not isinstance(config, OllamaConfig):
# Convert BaseLlmConfig to OllamaConfig
config = OllamaConfig(
model=config.model,
temperature=config.temperature,
api_key=config.api_key,
max_tokens=config.max_tokens,
top_p=config.top_p,
top_k=config.top_k,
enable_vision=config.enable_vision,
vision_details=config.vision_details,
http_client_proxies=config.http_client,
)
super().__init__(config)
if not self.config.model:
self.config.model = "llama3.1:70b"
self.client = Client(host=self.config.ollama_base_url)
def _parse_response(self, response, tools):
# Get the content from response
if isinstance(response, dict):
content = response["message"]["content"]
else:
content = response.message.content
if tools:
processed_response = {
"content": content,
"tool_calls": [],
}
if isinstance(response, dict):
raw_calls = response.get("message", {}).get("tool_calls") or []
else:
raw_calls = getattr(response.message, "tool_calls", None) or []
for tool_call in raw_calls:
if isinstance(tool_call, dict):
fn = tool_call.get("function", {})
name = fn.get("name", "")
arguments = fn.get("arguments", {})
else:
fn = getattr(tool_call, "function", None)
name = getattr(fn, "name", "") if fn else ""
arguments = getattr(fn, "arguments", {}) if fn else {}
if isinstance(arguments, str):
arguments = json.loads(extract_json(arguments))
processed_response["tool_calls"].append(
{"name": name, "arguments": arguments}
)
return processed_response
else:
return content
def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
# Build parameters for Ollama
params = {
"model": self.config.model,
"messages": messages,
}
# Handle JSON response format by using Ollama's native format parameter
if response_format and response_format.get("type") == "json_object":
params["format"] = "json"
# Also add JSON format instruction to the last message as a fallback
if messages and messages[-1]["role"] == "user":
messages[-1]["content"] += "\n\nPlease respond with valid JSON only."
else:
messages.append({"role": "user", "content": "Please respond with valid JSON only."})
# Add options for Ollama (temperature, num_predict, top_p)
options = {
"temperature": self.config.temperature,
"num_predict": self.config.max_tokens,
"top_p": self.config.top_p,
}
params["options"] = options
# Remove OpenAI-specific parameters that Ollama doesn't support
params.pop("max_tokens", None) # Ollama uses different parameter names
if tools:
params["tools"] = tools
response = self.client.chat(**params)
return self._parse_response(response, tools) | --- +++ @@ -41,6 +41,16 @@ self.client = Client(host=self.config.ollama_base_url)
def _parse_response(self, response, tools):
+ """
+ Process the response based on whether tools are used or not.
+
+ Args:
+ response: The raw response from API.
+ tools: The list of tools provided in the request.
+
+ Returns:
+ str or dict: The processed response.
+ """
# Get the content from response
if isinstance(response, dict):
content = response["message"]["content"]
@@ -87,6 +97,19 @@ tool_choice: str = "auto",
**kwargs,
):
+ """
+ Generate a response based on the given messages using Ollama.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ response_format (str or object, optional): Format of the response. Defaults to "text".
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+ **kwargs: Additional Ollama-specific parameters.
+
+ Returns:
+ str: The generated response.
+ """
# Build parameters for Ollama
params = {
"model": self.config.model,
@@ -117,4 +140,4 @@ params["tools"] = tools
response = self.client.chat(**params)
- return self._parse_response(response, tools)+ return self._parse_response(response, tools)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/ollama.py |
Add docstrings including usage examples | import hashlib
import logging
import os
import warnings
from typing import Any, Dict, List, Optional, Union
import httpx
import requests
from mem0.client.project import AsyncProject, Project
from mem0.client.utils import api_error_handler
# Exception classes are referenced in docstrings only
from mem0.memory.setup import get_user_id, setup_config
from mem0.memory.telemetry import capture_client_event
logger = logging.getLogger(__name__)
warnings.filterwarnings("default", category=DeprecationWarning)
# Setup user config
setup_config()
class MemoryClient:
def __init__(
self,
api_key: Optional[str] = None,
host: Optional[str] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
client: Optional[httpx.Client] = None,
):
self.api_key = api_key or os.getenv("MEM0_API_KEY")
self.host = host or "https://api.mem0.ai"
self.org_id = org_id
self.project_id = project_id
self.user_id = get_user_id()
if not self.api_key:
raise ValueError("Mem0 API Key not provided. Please provide an API Key.")
# Create MD5 hash of API key for user_id
self.user_id = hashlib.md5(self.api_key.encode()).hexdigest()
if client is not None:
self.client = client
# Ensure the client has the correct base_url and headers
self.client.base_url = httpx.URL(self.host)
self.client.headers.update(
{
"Authorization": f"Token {self.api_key}",
"Mem0-User-ID": self.user_id,
}
)
else:
self.client = httpx.Client(
base_url=self.host,
headers={
"Authorization": f"Token {self.api_key}",
"Mem0-User-ID": self.user_id,
},
timeout=300,
)
self.user_email = self._validate_api_key()
# Initialize project manager
self.project = Project(
client=self.client,
org_id=self.org_id,
project_id=self.project_id,
user_email=self.user_email,
)
capture_client_event("client.init", self, {"sync_type": "sync"})
def _validate_api_key(self):
try:
params = self._prepare_params()
response = self.client.get("/v1/ping/", params=params)
data = response.json()
response.raise_for_status()
if data.get("org_id") and data.get("project_id"):
self.org_id = data.get("org_id")
self.project_id = data.get("project_id")
return data.get("user_email")
except httpx.HTTPStatusError as e:
try:
error_data = e.response.json()
error_message = error_data.get("detail", str(e))
except Exception:
error_message = str(e)
raise ValueError(f"Error: {error_message}")
@api_error_handler
def add(self, messages, **kwargs) -> Dict[str, Any]:
# Handle different message input formats (align with OSS behavior)
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
elif isinstance(messages, dict):
messages = [messages]
elif not isinstance(messages, list):
raise ValueError(
f"messages must be str, dict, or list[dict], got {type(messages).__name__}"
)
kwargs = self._prepare_params(kwargs)
# Set async_mode to True by default, but allow user override
if "async_mode" not in kwargs:
kwargs["async_mode"] = True
# Force v1.1 format for all add operations
kwargs["output_format"] = "v1.1"
payload = self._prepare_payload(messages, kwargs)
response = self.client.post("/v1/memories/", json=payload)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event("client.add", self, {"keys": list(kwargs.keys()), "sync_type": "sync"})
return response.json()
@api_error_handler
def get(self, memory_id: str) -> Dict[str, Any]:
params = self._prepare_params()
response = self.client.get(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
capture_client_event("client.get", self, {"memory_id": memory_id, "sync_type": "sync"})
return response.json()
@api_error_handler
def get_all(self, **kwargs) -> Dict[str, Any]:
params = self._prepare_params(kwargs)
params.pop("async_mode", None)
if "page" in params and "page_size" in params:
query_params = {
"page": params.pop("page"),
"page_size": params.pop("page_size"),
}
response = self.client.post("/v2/memories/", json=params, params=query_params)
else:
response = self.client.post("/v2/memories/", json=params)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event(
"client.get_all",
self,
{
"api_version": "v2",
"keys": list(kwargs.keys()),
"sync_type": "sync",
},
)
result = response.json()
# Ensure v1.1 format (wrap raw list if needed)
if isinstance(result, list):
return {"results": result}
return result
@api_error_handler
def search(self, query: str, **kwargs) -> Dict[str, Any]:
payload = {"query": query}
params = self._prepare_params(kwargs)
params.pop("async_mode", None)
payload.update(params)
response = self.client.post("/v2/memories/search/", json=payload)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event(
"client.search",
self,
{
"api_version": "v2",
"keys": list(kwargs.keys()),
"sync_type": "sync",
},
)
result = response.json()
# Ensure v1.1 format (wrap raw list if needed)
if isinstance(result, list):
return {"results": result}
return result
@api_error_handler
def update(
self,
memory_id: str,
text: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
timestamp: Optional[Union[int, float, str]] = None,
) -> Dict[str, Any]:
if text is None and metadata is None and timestamp is None:
raise ValueError("At least one of text, metadata, or timestamp must be provided for update.")
payload = {}
if text is not None:
payload["text"] = text
if metadata is not None:
payload["metadata"] = metadata
if timestamp is not None:
payload["timestamp"] = timestamp
capture_client_event("client.update", self, {"memory_id": memory_id, "sync_type": "sync"})
params = self._prepare_params()
response = self.client.put(f"/v1/memories/{memory_id}/", json=payload, params=params)
response.raise_for_status()
return response.json()
@api_error_handler
def delete(self, memory_id: str) -> Dict[str, Any]:
params = self._prepare_params()
response = self.client.delete(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
capture_client_event("client.delete", self, {"memory_id": memory_id, "sync_type": "sync"})
return response.json()
@api_error_handler
def delete_all(self, **kwargs) -> Dict[str, str]:
params = self._prepare_params(kwargs)
response = self.client.delete("/v1/memories/", params=params)
response.raise_for_status()
capture_client_event(
"client.delete_all",
self,
{"keys": list(kwargs.keys()), "sync_type": "sync"},
)
return response.json()
@api_error_handler
def history(self, memory_id: str) -> List[Dict[str, Any]]:
params = self._prepare_params()
response = self.client.get(f"/v1/memories/{memory_id}/history/", params=params)
response.raise_for_status()
capture_client_event("client.history", self, {"memory_id": memory_id, "sync_type": "sync"})
return response.json()
@api_error_handler
def users(self) -> Dict[str, Any]:
params = self._prepare_params()
response = self.client.get("/v1/entities/", params=params)
response.raise_for_status()
capture_client_event("client.users", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def delete_users(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
app_id: Optional[str] = None,
run_id: Optional[str] = None,
) -> Dict[str, str]:
if user_id:
to_delete = [{"type": "user", "name": user_id}]
elif agent_id:
to_delete = [{"type": "agent", "name": agent_id}]
elif app_id:
to_delete = [{"type": "app", "name": app_id}]
elif run_id:
to_delete = [{"type": "run", "name": run_id}]
else:
entities = self.users()
# Filter entities based on provided IDs using list comprehension
to_delete = [{"type": entity["type"], "name": entity["name"]} for entity in entities["results"]]
params = self._prepare_params()
if not to_delete:
raise ValueError("No entities to delete")
# Delete entities and check response immediately
for entity in to_delete:
response = self.client.delete(f"/v2/entities/{entity['type']}/{entity['name']}/", params=params)
response.raise_for_status()
capture_client_event(
"client.delete_users",
self,
{
"user_id": user_id,
"agent_id": agent_id,
"app_id": app_id,
"run_id": run_id,
"sync_type": "sync",
},
)
return {
"message": "Entity deleted successfully."
if (user_id or agent_id or app_id or run_id)
else "All users, agents, apps and runs deleted."
}
@api_error_handler
def reset(self) -> Dict[str, str]:
self.delete_users()
capture_client_event("client.reset", self, {"sync_type": "sync"})
return {"message": "Client reset successful. All users and memories deleted."}
@api_error_handler
def batch_update(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
response = self.client.put("/v1/batch/", json={"memories": memories})
response.raise_for_status()
capture_client_event("client.batch_update", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def batch_delete(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
response = self.client.request("DELETE", "/v1/batch/", json={"memories": memories})
response.raise_for_status()
capture_client_event("client.batch_delete", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def create_memory_export(self, schema: str, **kwargs) -> Dict[str, Any]:
response = self.client.post(
"/v1/exports/",
json={"schema": schema, **self._prepare_params(kwargs)},
)
response.raise_for_status()
capture_client_event(
"client.create_memory_export",
self,
{
"schema": schema,
"keys": list(kwargs.keys()),
"sync_type": "sync",
},
)
return response.json()
@api_error_handler
def get_memory_export(self, **kwargs) -> Dict[str, Any]:
response = self.client.post("/v1/exports/get/", json=self._prepare_params(kwargs))
response.raise_for_status()
capture_client_event(
"client.get_memory_export",
self,
{"keys": list(kwargs.keys()), "sync_type": "sync"},
)
return response.json()
@api_error_handler
def get_summary(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
response = self.client.post("/v1/summary/", json=self._prepare_params({"filters": filters}))
response.raise_for_status()
capture_client_event("client.get_summary", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def get_project(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
logger.warning(
"get_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.get() method instead."
)
if not (self.org_id and self.project_id):
raise ValueError("org_id and project_id must be set to access instructions or categories")
params = self._prepare_params({"fields": fields})
response = self.client.get(
f"/api/v1/orgs/organizations/{self.org_id}/projects/{self.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.get_project_details",
self,
{"fields": fields, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def update_project(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
version: Optional[str] = None,
inclusion_prompt: Optional[str] = None,
exclusion_prompt: Optional[str] = None,
memory_depth: Optional[str] = None,
usecase_setting: Optional[str] = None,
) -> Dict[str, Any]:
logger.warning(
"update_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.update() method instead."
)
if not (self.org_id and self.project_id):
raise ValueError("org_id and project_id must be set to update instructions or categories")
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
and version is None
and inclusion_prompt is None
and exclusion_prompt is None
and memory_depth is None
and usecase_setting is None
):
raise ValueError(
"Currently we only support updating custom_instructions or "
"custom_categories or retrieval_criteria, so you must "
"provide at least one of them"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"version": version,
"inclusion_prompt": inclusion_prompt,
"exclusion_prompt": exclusion_prompt,
"memory_depth": memory_depth,
"usecase_setting": usecase_setting,
}
)
response = self.client.patch(
f"/api/v1/orgs/organizations/{self.org_id}/projects/{self.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.update_project",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"version": version,
"inclusion_prompt": inclusion_prompt,
"exclusion_prompt": exclusion_prompt,
"memory_depth": memory_depth,
"usecase_setting": usecase_setting,
"sync_type": "sync",
},
)
return response.json()
def chat(self):
raise NotImplementedError("Chat is not implemented yet")
@api_error_handler
def get_webhooks(self, project_id: str) -> Dict[str, Any]:
response = self.client.get(f"api/v1/webhooks/projects/{project_id}/")
response.raise_for_status()
capture_client_event("client.get_webhook", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def create_webhook(self, url: str, name: str, project_id: str, event_types: List[str]) -> Dict[str, Any]:
payload = {"url": url, "name": name, "event_types": event_types}
response = self.client.post(f"api/v1/webhooks/projects/{project_id}/", json=payload)
response.raise_for_status()
capture_client_event("client.create_webhook", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def update_webhook(
self,
webhook_id: int,
name: Optional[str] = None,
url: Optional[str] = None,
event_types: Optional[List[str]] = None,
) -> Dict[str, Any]:
payload = {k: v for k, v in {"name": name, "url": url, "event_types": event_types}.items() if v is not None}
response = self.client.put(f"api/v1/webhooks/{webhook_id}/", json=payload)
response.raise_for_status()
capture_client_event("client.update_webhook", self, {"webhook_id": webhook_id, "sync_type": "sync"})
return response.json()
@api_error_handler
def delete_webhook(self, webhook_id: int) -> Dict[str, str]:
response = self.client.delete(f"api/v1/webhooks/{webhook_id}/")
response.raise_for_status()
capture_client_event(
"client.delete_webhook",
self,
{"webhook_id": webhook_id, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def feedback(
self,
memory_id: str,
feedback: Optional[str] = None,
feedback_reason: Optional[str] = None,
) -> Dict[str, str]:
VALID_FEEDBACK_VALUES = {"POSITIVE", "NEGATIVE", "VERY_NEGATIVE"}
feedback = feedback.upper() if feedback else None
if feedback is not None and feedback not in VALID_FEEDBACK_VALUES:
raise ValueError(f"feedback must be one of {', '.join(VALID_FEEDBACK_VALUES)} or None")
data = {
"memory_id": memory_id,
"feedback": feedback,
"feedback_reason": feedback_reason,
}
response = self.client.post("/v1/feedback/", json=data)
response.raise_for_status()
capture_client_event("client.feedback", self, data, {"sync_type": "sync"})
return response.json()
def _prepare_payload(self, messages: List[Dict[str, str]], kwargs: Dict[str, Any]) -> Dict[str, Any]:
payload = {}
payload["messages"] = messages
payload.update({k: v for k, v in kwargs.items() if v is not None})
return payload
def _prepare_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
if kwargs is None:
kwargs = {}
# Add org_id and project_id if both are available
if self.org_id and self.project_id:
kwargs["org_id"] = self.org_id
kwargs["project_id"] = self.project_id
elif self.org_id or self.project_id:
raise ValueError("Please provide both org_id and project_id")
return {k: v for k, v in kwargs.items() if v is not None}
class AsyncMemoryClient:
def __init__(
self,
api_key: Optional[str] = None,
host: Optional[str] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
client: Optional[httpx.AsyncClient] = None,
):
self.api_key = api_key or os.getenv("MEM0_API_KEY")
self.host = host or "https://api.mem0.ai"
self.org_id = org_id
self.project_id = project_id
self.user_id = get_user_id()
if not self.api_key:
raise ValueError("Mem0 API Key not provided. Please provide an API Key.")
# Create MD5 hash of API key for user_id
self.user_id = hashlib.md5(self.api_key.encode()).hexdigest()
if client is not None:
self.async_client = client
# Ensure the client has the correct base_url and headers
self.async_client.base_url = httpx.URL(self.host)
self.async_client.headers.update(
{
"Authorization": f"Token {self.api_key}",
"Mem0-User-ID": self.user_id,
}
)
else:
self.async_client = httpx.AsyncClient(
base_url=self.host,
headers={
"Authorization": f"Token {self.api_key}",
"Mem0-User-ID": self.user_id,
},
timeout=300,
)
self.user_email = self._validate_api_key()
# Initialize project manager
self.project = AsyncProject(
client=self.async_client,
org_id=self.org_id,
project_id=self.project_id,
user_email=self.user_email,
)
capture_client_event("client.init", self, {"sync_type": "async"})
def _validate_api_key(self):
try:
params = self._prepare_params()
response = requests.get(
f"{self.host}/v1/ping/",
headers={
"Authorization": f"Token {self.api_key}",
"Mem0-User-ID": self.user_id,
},
params=params,
)
data = response.json()
response.raise_for_status()
if data.get("org_id") and data.get("project_id"):
self.org_id = data.get("org_id")
self.project_id = data.get("project_id")
return data.get("user_email")
except requests.exceptions.HTTPError as e:
try:
error_data = e.response.json()
error_message = error_data.get("detail", str(e))
except Exception:
error_message = str(e)
raise ValueError(f"Error: {error_message}")
def _prepare_payload(self, messages: List[Dict[str, str]], kwargs: Dict[str, Any]) -> Dict[str, Any]:
payload = {}
payload["messages"] = messages
payload.update({k: v for k, v in kwargs.items() if v is not None})
return payload
def _prepare_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
if kwargs is None:
kwargs = {}
# Add org_id and project_id if both are available
if self.org_id and self.project_id:
kwargs["org_id"] = self.org_id
kwargs["project_id"] = self.project_id
elif self.org_id or self.project_id:
raise ValueError("Please provide both org_id and project_id")
return {k: v for k, v in kwargs.items() if v is not None}
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.async_client.aclose()
@api_error_handler
async def add(self, messages, **kwargs) -> Dict[str, Any]:
# Handle different message input formats (align with OSS behavior)
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
elif isinstance(messages, dict):
messages = [messages]
elif not isinstance(messages, list):
raise ValueError(
f"messages must be str, dict, or list[dict], got {type(messages).__name__}"
)
kwargs = self._prepare_params(kwargs)
# Set async_mode to True by default, but allow user override
if "async_mode" not in kwargs:
kwargs["async_mode"] = True
# Force v1.1 format for all add operations
kwargs["output_format"] = "v1.1"
payload = self._prepare_payload(messages, kwargs)
response = await self.async_client.post("/v1/memories/", json=payload)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event("client.add", self, {"keys": list(kwargs.keys()), "sync_type": "async"})
return response.json()
@api_error_handler
async def get(self, memory_id: str) -> Dict[str, Any]:
params = self._prepare_params()
response = await self.async_client.get(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
capture_client_event("client.get", self, {"memory_id": memory_id, "sync_type": "async"})
return response.json()
@api_error_handler
async def get_all(self, **kwargs) -> Dict[str, Any]:
params = self._prepare_params(kwargs)
params.pop("async_mode", None)
if "page" in params and "page_size" in params:
query_params = {
"page": params.pop("page"),
"page_size": params.pop("page_size"),
}
response = await self.async_client.post("/v2/memories/", json=params, params=query_params)
else:
response = await self.async_client.post("/v2/memories/", json=params)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event(
"client.get_all",
self,
{
"api_version": "v2",
"keys": list(kwargs.keys()),
"sync_type": "async",
},
)
result = response.json()
# Ensure v1.1 format (wrap raw list if needed)
if isinstance(result, list):
return {"results": result}
return result
@api_error_handler
async def search(self, query: str, **kwargs) -> Dict[str, Any]:
payload = {"query": query}
params = self._prepare_params(kwargs)
params.pop("async_mode", None)
payload.update(params)
response = await self.async_client.post("/v2/memories/search/", json=payload)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event(
"client.search",
self,
{
"api_version": "v2",
"keys": list(kwargs.keys()),
"sync_type": "async",
},
)
result = response.json()
# Ensure v1.1 format (wrap raw list if needed)
if isinstance(result, list):
return {"results": result}
return result
@api_error_handler
async def update(
self,
memory_id: str,
text: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
timestamp: Optional[Union[int, float, str]] = None,
) -> Dict[str, Any]:
if text is None and metadata is None and timestamp is None:
raise ValueError("At least one of text, metadata, or timestamp must be provided for update.")
payload = {}
if text is not None:
payload["text"] = text
if metadata is not None:
payload["metadata"] = metadata
if timestamp is not None:
payload["timestamp"] = timestamp
capture_client_event("client.update", self, {"memory_id": memory_id, "sync_type": "async"})
params = self._prepare_params()
response = await self.async_client.put(f"/v1/memories/{memory_id}/", json=payload, params=params)
response.raise_for_status()
return response.json()
@api_error_handler
async def delete(self, memory_id: str) -> Dict[str, Any]:
params = self._prepare_params()
response = await self.async_client.delete(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
capture_client_event("client.delete", self, {"memory_id": memory_id, "sync_type": "async"})
return response.json()
@api_error_handler
async def delete_all(self, **kwargs) -> Dict[str, str]:
params = self._prepare_params(kwargs)
response = await self.async_client.delete("/v1/memories/", params=params)
response.raise_for_status()
capture_client_event("client.delete_all", self, {"keys": list(kwargs.keys()), "sync_type": "async"})
return response.json()
@api_error_handler
async def history(self, memory_id: str) -> List[Dict[str, Any]]:
params = self._prepare_params()
response = await self.async_client.get(f"/v1/memories/{memory_id}/history/", params=params)
response.raise_for_status()
capture_client_event("client.history", self, {"memory_id": memory_id, "sync_type": "async"})
return response.json()
@api_error_handler
async def users(self) -> Dict[str, Any]:
params = self._prepare_params()
response = await self.async_client.get("/v1/entities/", params=params)
response.raise_for_status()
capture_client_event("client.users", self, {"sync_type": "async"})
return response.json()
@api_error_handler
async def delete_users(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
app_id: Optional[str] = None,
run_id: Optional[str] = None,
) -> Dict[str, str]:
if user_id:
to_delete = [{"type": "user", "name": user_id}]
elif agent_id:
to_delete = [{"type": "agent", "name": agent_id}]
elif app_id:
to_delete = [{"type": "app", "name": app_id}]
elif run_id:
to_delete = [{"type": "run", "name": run_id}]
else:
entities = await self.users()
# Filter entities based on provided IDs using list comprehension
to_delete = [{"type": entity["type"], "name": entity["name"]} for entity in entities["results"]]
params = self._prepare_params()
if not to_delete:
raise ValueError("No entities to delete")
# Delete entities and check response immediately
for entity in to_delete:
response = await self.async_client.delete(f"/v2/entities/{entity['type']}/{entity['name']}/", params=params)
response.raise_for_status()
capture_client_event(
"client.delete_users",
self,
{
"user_id": user_id,
"agent_id": agent_id,
"app_id": app_id,
"run_id": run_id,
"sync_type": "async",
},
)
return {
"message": "Entity deleted successfully."
if (user_id or agent_id or app_id or run_id)
else "All users, agents, apps and runs deleted."
}
@api_error_handler
async def reset(self) -> Dict[str, str]:
await self.delete_users()
capture_client_event("client.reset", self, {"sync_type": "async"})
return {"message": "Client reset successful. All users and memories deleted."}
@api_error_handler
async def batch_update(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
response = await self.async_client.put("/v1/batch/", json={"memories": memories})
response.raise_for_status()
capture_client_event("client.batch_update", self, {"sync_type": "async"})
return response.json()
@api_error_handler
async def batch_delete(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
response = await self.async_client.request("DELETE", "/v1/batch/", json={"memories": memories})
response.raise_for_status()
capture_client_event("client.batch_delete", self, {"sync_type": "async"})
return response.json()
@api_error_handler
async def create_memory_export(self, schema: str, **kwargs) -> Dict[str, Any]:
response = await self.async_client.post("/v1/exports/", json={"schema": schema, **self._prepare_params(kwargs)})
response.raise_for_status()
capture_client_event(
"client.create_memory_export", self, {"schema": schema, "keys": list(kwargs.keys()), "sync_type": "async"}
)
return response.json()
@api_error_handler
async def get_memory_export(self, **kwargs) -> Dict[str, Any]:
response = await self.async_client.post("/v1/exports/get/", json=self._prepare_params(kwargs))
response.raise_for_status()
capture_client_event("client.get_memory_export", self, {"keys": list(kwargs.keys()), "sync_type": "async"})
return response.json()
@api_error_handler
async def get_summary(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
response = await self.async_client.post("/v1/summary/", json=self._prepare_params({"filters": filters}))
response.raise_for_status()
capture_client_event("client.get_summary", self, {"sync_type": "async"})
return response.json()
@api_error_handler
async def get_project(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
logger.warning(
"get_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.get() method instead."
)
if not (self.org_id and self.project_id):
raise ValueError("org_id and project_id must be set to access instructions or categories")
params = self._prepare_params({"fields": fields})
response = await self.async_client.get(
f"/api/v1/orgs/organizations/{self.org_id}/projects/{self.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event("client.get_project", self, {"fields": fields, "sync_type": "async"})
return response.json()
@api_error_handler
async def update_project(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
version: Optional[str] = None,
) -> Dict[str, Any]:
logger.warning(
"update_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.update() method instead."
)
if not (self.org_id and self.project_id):
raise ValueError("org_id and project_id must be set to update instructions or categories")
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
and version is None
):
raise ValueError(
"Currently we only support updating custom_instructions or custom_categories or retrieval_criteria, so you must provide at least one of them"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"version": version,
}
)
response = await self.async_client.patch(
f"/api/v1/orgs/organizations/{self.org_id}/projects/{self.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.update_project",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"version": version,
"sync_type": "async",
},
)
return response.json()
async def chat(self):
raise NotImplementedError("Chat is not implemented yet")
@api_error_handler
async def get_webhooks(self, project_id: str) -> Dict[str, Any]:
response = await self.async_client.get(f"api/v1/webhooks/projects/{project_id}/")
response.raise_for_status()
capture_client_event("client.get_webhook", self, {"sync_type": "async"})
return response.json()
@api_error_handler
async def create_webhook(self, url: str, name: str, project_id: str, event_types: List[str]) -> Dict[str, Any]:
payload = {"url": url, "name": name, "event_types": event_types}
response = await self.async_client.post(f"api/v1/webhooks/projects/{project_id}/", json=payload)
response.raise_for_status()
capture_client_event("client.create_webhook", self, {"sync_type": "async"})
return response.json()
@api_error_handler
async def update_webhook(
self,
webhook_id: int,
name: Optional[str] = None,
url: Optional[str] = None,
event_types: Optional[List[str]] = None,
) -> Dict[str, Any]:
payload = {k: v for k, v in {"name": name, "url": url, "event_types": event_types}.items() if v is not None}
response = await self.async_client.put(f"api/v1/webhooks/{webhook_id}/", json=payload)
response.raise_for_status()
capture_client_event("client.update_webhook", self, {"webhook_id": webhook_id, "sync_type": "async"})
return response.json()
@api_error_handler
async def delete_webhook(self, webhook_id: int) -> Dict[str, str]:
response = await self.async_client.delete(f"api/v1/webhooks/{webhook_id}/")
response.raise_for_status()
capture_client_event("client.delete_webhook", self, {"webhook_id": webhook_id, "sync_type": "async"})
return response.json()
@api_error_handler
async def feedback(
self, memory_id: str, feedback: Optional[str] = None, feedback_reason: Optional[str] = None
) -> Dict[str, str]:
VALID_FEEDBACK_VALUES = {"POSITIVE", "NEGATIVE", "VERY_NEGATIVE"}
feedback = feedback.upper() if feedback else None
if feedback is not None and feedback not in VALID_FEEDBACK_VALUES:
raise ValueError(f"feedback must be one of {', '.join(VALID_FEEDBACK_VALUES)} or None")
data = {"memory_id": memory_id, "feedback": feedback, "feedback_reason": feedback_reason}
response = await self.async_client.post("/v1/feedback/", json=data)
response.raise_for_status()
capture_client_event("client.feedback", self, data, {"sync_type": "async"})
return response.json() | --- +++ @@ -22,6 +22,19 @@
class MemoryClient:
+ """Client for interacting with the Mem0 API.
+
+ This class provides methods to create, retrieve, search, and delete
+ memories using the Mem0 API.
+
+ Attributes:
+ api_key (str): The API key for authenticating with the Mem0 API.
+ host (str): The base URL for the Mem0 API.
+ client (httpx.Client): The HTTP client used for making API requests.
+ org_id (str, optional): Organization ID.
+ project_id (str, optional): Project ID.
+ user_id (str): Unique identifier for the user.
+ """
def __init__(
self,
@@ -31,6 +44,23 @@ project_id: Optional[str] = None,
client: Optional[httpx.Client] = None,
):
+ """Initialize the MemoryClient.
+
+ Args:
+ api_key: The API key for authenticating with the Mem0 API. If not
+ provided, it will attempt to use the MEM0_API_KEY
+ environment variable.
+ host: The base URL for the Mem0 API. Defaults to
+ "https://api.mem0.ai".
+ org_id: The ID of the organization.
+ project_id: The ID of the project.
+ client: A custom httpx.Client instance. If provided, it will be
+ used instead of creating a new one. Note that base_url and
+ headers will be set/overridden as needed.
+
+ Raises:
+ ValueError: If no API key is provided or found in the environment.
+ """
self.api_key = api_key or os.getenv("MEM0_API_KEY")
self.host = host or "https://api.mem0.ai"
self.org_id = org_id
@@ -75,6 +105,7 @@ capture_client_event("client.init", self, {"sync_type": "sync"})
def _validate_api_key(self):
+ """Validate the API key by making a test request."""
try:
params = self._prepare_params()
response = self.client.get("/v1/ping/", params=params)
@@ -98,6 +129,26 @@
@api_error_handler
def add(self, messages, **kwargs) -> Dict[str, Any]:
+ """Add a new memory.
+
+ Args:
+ messages: A list of message dictionaries, a single message dictionary,
+ or a string. If a string is provided, it will be converted to
+ a user message.
+ **kwargs: Additional parameters such as user_id, agent_id, app_id,
+ metadata, filters, async_mode.
+
+ Returns:
+ A dictionary containing the API response in v1.1 format.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
# Handle different message input formats (align with OSS behavior)
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
@@ -126,6 +177,22 @@
@api_error_handler
def get(self, memory_id: str) -> Dict[str, Any]:
+ """Retrieve a specific memory by ID.
+
+ Args:
+ memory_id: The ID of the memory to retrieve.
+
+ Returns:
+ A dictionary containing the memory data.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
params = self._prepare_params()
response = self.client.get(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
@@ -134,6 +201,23 @@
@api_error_handler
def get_all(self, **kwargs) -> Dict[str, Any]:
+ """Retrieve all memories, with optional filtering.
+
+ Args:
+ **kwargs: Optional parameters for filtering (user_id, agent_id,
+ app_id, top_k, page, page_size).
+
+ Returns:
+ A dictionary containing memories in v1.1 format: {"results": [...]}
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
params = self._prepare_params(kwargs)
params.pop("async_mode", None)
@@ -166,6 +250,24 @@
@api_error_handler
def search(self, query: str, **kwargs) -> Dict[str, Any]:
+ """Search memories based on a query.
+
+ Args:
+ query: The search query string.
+ **kwargs: Additional parameters such as user_id, agent_id, app_id,
+ top_k, filters.
+
+ Returns:
+ A dictionary containing search results in v1.1 format: {"results": [...]}
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
payload = {"query": query}
params = self._prepare_params(kwargs)
params.pop("async_mode", None)
@@ -200,6 +302,22 @@ metadata: Optional[Dict[str, Any]] = None,
timestamp: Optional[Union[int, float, str]] = None,
) -> Dict[str, Any]:
+ """
+ Update a memory by ID.
+
+ Args:
+ memory_id (str): Memory ID.
+ text (str, optional): New content to update the memory with.
+ metadata (dict, optional): Metadata to update in the memory.
+ timestamp (int, float, or str, optional): Unix epoch timestamp or ISO 8601 string.
+
+ Returns:
+ Dict[str, Any]: The response from the server.
+
+ Example:
+ >>> client.update(memory_id="mem_123", text="Likes to play tennis on weekends")
+ >>> client.update(memory_id="mem_123", timestamp="2025-01-15T12:00:00Z")
+ """
if text is None and metadata is None and timestamp is None:
raise ValueError("At least one of text, metadata, or timestamp must be provided for update.")
@@ -219,6 +337,22 @@
@api_error_handler
def delete(self, memory_id: str) -> Dict[str, Any]:
+ """Delete a specific memory by ID.
+
+ Args:
+ memory_id: The ID of the memory to delete.
+
+ Returns:
+ A dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
params = self._prepare_params()
response = self.client.delete(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
@@ -227,6 +361,23 @@
@api_error_handler
def delete_all(self, **kwargs) -> Dict[str, str]:
+ """Delete all memories, with optional filtering.
+
+ Args:
+ **kwargs: Optional parameters for filtering (user_id, agent_id,
+ app_id).
+
+ Returns:
+ A dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
params = self._prepare_params(kwargs)
response = self.client.delete("/v1/memories/", params=params)
response.raise_for_status()
@@ -239,6 +390,22 @@
@api_error_handler
def history(self, memory_id: str) -> List[Dict[str, Any]]:
+ """Retrieve the history of a specific memory.
+
+ Args:
+ memory_id: The ID of the memory to retrieve history for.
+
+ Returns:
+ A list of dictionaries containing the memory history.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
params = self._prepare_params()
response = self.client.get(f"/v1/memories/{memory_id}/history/", params=params)
response.raise_for_status()
@@ -247,6 +414,7 @@
@api_error_handler
def users(self) -> Dict[str, Any]:
+ """Get all users, agents, and sessions for which memories exist."""
params = self._prepare_params()
response = self.client.get("/v1/entities/", params=params)
response.raise_for_status()
@@ -261,6 +429,24 @@ app_id: Optional[str] = None,
run_id: Optional[str] = None,
) -> Dict[str, str]:
+ """Delete specific entities or all entities if no filters provided.
+
+ Args:
+ user_id: Optional user ID to delete specific user
+ agent_id: Optional agent ID to delete specific agent
+ app_id: Optional app ID to delete specific app
+ run_id: Optional run ID to delete specific run
+
+ Returns:
+ Dict with success message
+
+ Raises:
+ ValueError: If specified entity not found
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ MemoryNotFoundError: If the entity doesn't exist.
+ NetworkError: If network connectivity issues occur.
+ """
if user_id:
to_delete = [{"type": "user", "name": user_id}]
@@ -304,6 +490,22 @@
@api_error_handler
def reset(self) -> Dict[str, str]:
+ """Reset the client by deleting all users and memories.
+
+ This method deletes all users, agents, sessions, and memories
+ associated with the client.
+
+ Returns:
+ Dict[str, str]: Message client reset successful.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
self.delete_users()
capture_client_event("client.reset", self, {"sync_type": "sync"})
@@ -311,6 +513,25 @@
@api_error_handler
def batch_update(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
+ """Batch update memories.
+
+ Args:
+ memories: List of memory dictionaries to update. Each dictionary must contain:
+ - memory_id (str): ID of the memory to update
+ - text (str, optional): New text content for the memory
+ - metadata (dict, optional): New metadata for the memory
+
+ Returns:
+ Dict[str, Any]: The response from the server.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
response = self.client.put("/v1/batch/", json={"memories": memories})
response.raise_for_status()
@@ -319,6 +540,24 @@
@api_error_handler
def batch_delete(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
+ """Batch delete memories.
+
+ Args:
+ memories: List of memory dictionaries to delete. Each dictionary
+ must contain:
+ - memory_id (str): ID of the memory to delete
+
+ Returns:
+ str: Message indicating the success of the batch deletion.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
response = self.client.request("DELETE", "/v1/batch/", json={"memories": memories})
response.raise_for_status()
@@ -327,6 +566,15 @@
@api_error_handler
def create_memory_export(self, schema: str, **kwargs) -> Dict[str, Any]:
+ """Create a memory export with the provided schema.
+
+ Args:
+ schema: JSON schema defining the export structure
+ **kwargs: Optional filters like user_id, run_id, etc.
+
+ Returns:
+ Dict containing export request ID and status message
+ """
response = self.client.post(
"/v1/exports/",
json={"schema": schema, **self._prepare_params(kwargs)},
@@ -345,6 +593,14 @@
@api_error_handler
def get_memory_export(self, **kwargs) -> Dict[str, Any]:
+ """Get a memory export.
+
+ Args:
+ **kwargs: Filters like user_id to get specific export
+
+ Returns:
+ Dict containing the exported data
+ """
response = self.client.post("/v1/exports/get/", json=self._prepare_params(kwargs))
response.raise_for_status()
capture_client_event(
@@ -356,6 +612,14 @@
@api_error_handler
def get_summary(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
+ """Get the summary of a memory export.
+
+ Args:
+ filters: Optional filters to apply to the summary request
+
+ Returns:
+ Dict containing the export status and summary data
+ """
response = self.client.post("/v1/summary/", json=self._prepare_params({"filters": filters}))
response.raise_for_status()
@@ -364,6 +628,23 @@
@api_error_handler
def get_project(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
+ """Get instructions or categories for the current project.
+
+ Args:
+ fields: List of fields to retrieve
+
+ Returns:
+ Dictionary containing the requested fields.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ ValueError: If org_id or project_id are not set.
+ """
logger.warning(
"get_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.get() method instead."
)
@@ -396,6 +677,31 @@ memory_depth: Optional[str] = None,
usecase_setting: Optional[str] = None,
) -> Dict[str, Any]:
+ """Update the project settings.
+
+ Args:
+ custom_instructions: New instructions for the project
+ custom_categories: New categories for the project
+ retrieval_criteria: New retrieval criteria for the project
+ enable_graph: Enable or disable the graph for the project
+ version: Version of the project
+ inclusion_prompt: Inclusion prompt for the project
+ exclusion_prompt: Exclusion prompt for the project
+ memory_depth: Memory depth for the project
+ usecase_setting: Usecase setting for the project
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ ValueError: If org_id or project_id are not set.
+ """
logger.warning(
"update_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.update() method instead."
)
@@ -456,10 +762,32 @@ return response.json()
def chat(self):
+ """Start a chat with the Mem0 AI. (Not implemented)
+
+ Raises:
+ NotImplementedError: This method is not implemented yet.
+ """
raise NotImplementedError("Chat is not implemented yet")
@api_error_handler
def get_webhooks(self, project_id: str) -> Dict[str, Any]:
+ """Get webhooks configuration for the project.
+
+ Args:
+ project_id: The ID of the project to get webhooks for.
+
+ Returns:
+ Dictionary containing webhook details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ ValueError: If project_id is not set.
+ """
response = self.client.get(f"api/v1/webhooks/projects/{project_id}/")
response.raise_for_status()
@@ -468,6 +796,25 @@
@api_error_handler
def create_webhook(self, url: str, name: str, project_id: str, event_types: List[str]) -> Dict[str, Any]:
+ """Create a webhook for the current project.
+
+ Args:
+ url: The URL to send the webhook to.
+ name: The name of the webhook.
+ event_types: List of event types to trigger the webhook for.
+
+ Returns:
+ Dictionary containing the created webhook details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ ValueError: If project_id is not set.
+ """
payload = {"url": url, "name": name, "event_types": event_types}
response = self.client.post(f"api/v1/webhooks/projects/{project_id}/", json=payload)
@@ -483,6 +830,25 @@ url: Optional[str] = None,
event_types: Optional[List[str]] = None,
) -> Dict[str, Any]:
+ """Update a webhook configuration.
+
+ Args:
+ webhook_id: ID of the webhook to update
+ name: Optional new name for the webhook
+ url: Optional new URL for the webhook
+ event_types: Optional list of event types to trigger the webhook for.
+
+ Returns:
+ Dictionary containing the updated webhook details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
payload = {k: v for k, v in {"name": name, "url": url, "event_types": event_types}.items() if v is not None}
response = self.client.put(f"api/v1/webhooks/{webhook_id}/", json=payload)
@@ -492,6 +858,22 @@
@api_error_handler
def delete_webhook(self, webhook_id: int) -> Dict[str, str]:
+ """Delete a webhook configuration.
+
+ Args:
+ webhook_id: ID of the webhook to delete
+
+ Returns:
+ Dictionary containing success message.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
response = self.client.delete(f"api/v1/webhooks/{webhook_id}/")
response.raise_for_status()
@@ -527,6 +909,15 @@ return response.json()
def _prepare_payload(self, messages: List[Dict[str, str]], kwargs: Dict[str, Any]) -> Dict[str, Any]:
+ """Prepare the payload for API requests.
+
+ Args:
+ messages: The messages to include in the payload.
+ kwargs: Additional keyword arguments to include in the payload.
+
+ Returns:
+ A dictionary containing the prepared payload.
+ """
payload = {}
payload["messages"] = messages
@@ -534,6 +925,17 @@ return payload
def _prepare_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
+ """Prepare query parameters for API requests.
+
+ Args:
+ kwargs: Keyword arguments to include in the parameters.
+
+ Returns:
+ A dictionary containing the prepared parameters.
+
+ Raises:
+ ValueError: If either org_id or project_id is provided but not both.
+ """
if kwargs is None:
kwargs = {}
@@ -549,6 +951,11 @@
class AsyncMemoryClient:
+ """Asynchronous client for interacting with the Mem0 API.
+
+ This class provides asynchronous versions of all MemoryClient methods.
+ It uses httpx.AsyncClient for making non-blocking API requests.
+ """
def __init__(
self,
@@ -558,6 +965,23 @@ project_id: Optional[str] = None,
client: Optional[httpx.AsyncClient] = None,
):
+ """Initialize the AsyncMemoryClient.
+
+ Args:
+ api_key: The API key for authenticating with the Mem0 API. If not
+ provided, it will attempt to use the MEM0_API_KEY
+ environment variable.
+ host: The base URL for the Mem0 API. Defaults to
+ "https://api.mem0.ai".
+ org_id: The ID of the organization.
+ project_id: The ID of the project.
+ client: A custom httpx.AsyncClient instance. If provided, it will
+ be used instead of creating a new one. Note that base_url
+ and headers will be set/overridden as needed.
+
+ Raises:
+ ValueError: If no API key is provided or found in the environment.
+ """
self.api_key = api_key or os.getenv("MEM0_API_KEY")
self.host = host or "https://api.mem0.ai"
self.org_id = org_id
@@ -603,6 +1027,7 @@ capture_client_event("client.init", self, {"sync_type": "async"})
def _validate_api_key(self):
+ """Validate the API key by making a test request."""
try:
params = self._prepare_params()
response = requests.get(
@@ -632,6 +1057,15 @@ raise ValueError(f"Error: {error_message}")
def _prepare_payload(self, messages: List[Dict[str, str]], kwargs: Dict[str, Any]) -> Dict[str, Any]:
+ """Prepare the payload for API requests.
+
+ Args:
+ messages: The messages to include in the payload.
+ kwargs: Additional keyword arguments to include in the payload.
+
+ Returns:
+ A dictionary containing the prepared payload.
+ """
payload = {}
payload["messages"] = messages
@@ -639,6 +1073,17 @@ return payload
def _prepare_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
+ """Prepare query parameters for API requests.
+
+ Args:
+ kwargs: Keyword arguments to include in the parameters.
+
+ Returns:
+ A dictionary containing the prepared parameters.
+
+ Raises:
+ ValueError: If either org_id or project_id is provided but not both.
+ """
if kwargs is None:
kwargs = {}
@@ -762,6 +1207,22 @@ metadata: Optional[Dict[str, Any]] = None,
timestamp: Optional[Union[int, float, str]] = None,
) -> Dict[str, Any]:
+ """
+ Update a memory by ID asynchronously.
+
+ Args:
+ memory_id (str): Memory ID.
+ text (str, optional): New content to update the memory with.
+ metadata (dict, optional): Metadata to update in the memory.
+ timestamp (int, float, or str, optional): Unix epoch timestamp or ISO 8601 string.
+
+ Returns:
+ Dict[str, Any]: The response from the server.
+
+ Example:
+ >>> await client.update(memory_id="mem_123", text="Likes to play tennis on weekends")
+ >>> await client.update(memory_id="mem_123", timestamp="2025-01-15T12:00:00Z")
+ """
if text is None and metadata is None and timestamp is None:
raise ValueError("At least one of text, metadata, or timestamp must be provided for update.")
@@ -781,6 +1242,22 @@
@api_error_handler
async def delete(self, memory_id: str) -> Dict[str, Any]:
+ """Delete a specific memory by ID.
+
+ Args:
+ memory_id: The ID of the memory to delete.
+
+ Returns:
+ A dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
params = self._prepare_params()
response = await self.async_client.delete(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
@@ -789,6 +1266,22 @@
@api_error_handler
async def delete_all(self, **kwargs) -> Dict[str, str]:
+ """Delete all memories, with optional filtering.
+
+ Args:
+ **kwargs: Optional parameters for filtering (user_id, agent_id, app_id).
+
+ Returns:
+ A dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
params = self._prepare_params(kwargs)
response = await self.async_client.delete("/v1/memories/", params=params)
response.raise_for_status()
@@ -797,6 +1290,22 @@
@api_error_handler
async def history(self, memory_id: str) -> List[Dict[str, Any]]:
+ """Retrieve the history of a specific memory.
+
+ Args:
+ memory_id: The ID of the memory to retrieve history for.
+
+ Returns:
+ A list of dictionaries containing the memory history.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
params = self._prepare_params()
response = await self.async_client.get(f"/v1/memories/{memory_id}/history/", params=params)
response.raise_for_status()
@@ -805,6 +1314,7 @@
@api_error_handler
async def users(self) -> Dict[str, Any]:
+ """Get all users, agents, and sessions for which memories exist."""
params = self._prepare_params()
response = await self.async_client.get("/v1/entities/", params=params)
response.raise_for_status()
@@ -819,6 +1329,24 @@ app_id: Optional[str] = None,
run_id: Optional[str] = None,
) -> Dict[str, str]:
+ """Delete specific entities or all entities if no filters provided.
+
+ Args:
+ user_id: Optional user ID to delete specific user
+ agent_id: Optional agent ID to delete specific agent
+ app_id: Optional app ID to delete specific app
+ run_id: Optional run ID to delete specific run
+
+ Returns:
+ Dict with success message
+
+ Raises:
+ ValueError: If specified entity not found
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ MemoryNotFoundError: If the entity doesn't exist.
+ NetworkError: If network connectivity issues occur.
+ """
if user_id:
to_delete = [{"type": "user", "name": user_id}]
@@ -862,12 +1390,47 @@
@api_error_handler
async def reset(self) -> Dict[str, str]:
+ """Reset the client by deleting all users and memories.
+
+ This method deletes all users, agents, sessions, and memories
+ associated with the client.
+
+ Returns:
+ Dict[str, str]: Message client reset successful.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
await self.delete_users()
capture_client_event("client.reset", self, {"sync_type": "async"})
return {"message": "Client reset successful. All users and memories deleted."}
@api_error_handler
async def batch_update(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
+ """Batch update memories.
+
+ Args:
+ memories: List of memory dictionaries to update. Each dictionary must contain:
+ - memory_id (str): ID of the memory to update
+ - text (str, optional): New text content for the memory
+ - metadata (dict, optional): New metadata for the memory
+
+ Returns:
+ Dict[str, Any]: The response from the server.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
response = await self.async_client.put("/v1/batch/", json={"memories": memories})
response.raise_for_status()
@@ -876,6 +1439,24 @@
@api_error_handler
async def batch_delete(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
+ """Batch delete memories.
+
+ Args:
+ memories: List of memory dictionaries to delete. Each dictionary
+ must contain:
+ - memory_id (str): ID of the memory to delete
+
+ Returns:
+ str: Message indicating the success of the batch deletion.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
response = await self.async_client.request("DELETE", "/v1/batch/", json={"memories": memories})
response.raise_for_status()
@@ -884,6 +1465,15 @@
@api_error_handler
async def create_memory_export(self, schema: str, **kwargs) -> Dict[str, Any]:
+ """Create a memory export with the provided schema.
+
+ Args:
+ schema: JSON schema defining the export structure
+ **kwargs: Optional filters like user_id, run_id, etc.
+
+ Returns:
+ Dict containing export request ID and status message
+ """
response = await self.async_client.post("/v1/exports/", json={"schema": schema, **self._prepare_params(kwargs)})
response.raise_for_status()
capture_client_event(
@@ -893,6 +1483,14 @@
@api_error_handler
async def get_memory_export(self, **kwargs) -> Dict[str, Any]:
+ """Get a memory export.
+
+ Args:
+ **kwargs: Filters like user_id to get specific export
+
+ Returns:
+ Dict containing the exported data
+ """
response = await self.async_client.post("/v1/exports/get/", json=self._prepare_params(kwargs))
response.raise_for_status()
capture_client_event("client.get_memory_export", self, {"keys": list(kwargs.keys()), "sync_type": "async"})
@@ -900,6 +1498,14 @@
@api_error_handler
async def get_summary(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
+ """Get the summary of a memory export.
+
+ Args:
+ filters: Optional filters to apply to the summary request
+
+ Returns:
+ Dict containing the export status and summary data
+ """
response = await self.async_client.post("/v1/summary/", json=self._prepare_params({"filters": filters}))
response.raise_for_status()
@@ -908,6 +1514,23 @@
@api_error_handler
async def get_project(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
+ """Get instructions or categories for the current project.
+
+ Args:
+ fields: List of fields to retrieve
+
+ Returns:
+ Dictionary containing the requested fields.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ ValueError: If org_id or project_id are not set.
+ """
logger.warning(
"get_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.get() method instead."
)
@@ -932,6 +1555,27 @@ enable_graph: Optional[bool] = None,
version: Optional[str] = None,
) -> Dict[str, Any]:
+ """Update the project settings.
+
+ Args:
+ custom_instructions: New instructions for the project
+ custom_categories: New categories for the project
+ retrieval_criteria: New retrieval criteria for the project
+ enable_graph: Enable or disable the graph for the project
+ version: Version of the project
+
+ Returns:
+ Dictionary containing the API response.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ ValueError: If org_id or project_id are not set.
+ """
logger.warning(
"update_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.update() method instead."
)
@@ -978,10 +1622,32 @@ return response.json()
async def chat(self):
+ """Start a chat with the Mem0 AI. (Not implemented)
+
+ Raises:
+ NotImplementedError: This method is not implemented yet.
+ """
raise NotImplementedError("Chat is not implemented yet")
@api_error_handler
async def get_webhooks(self, project_id: str) -> Dict[str, Any]:
+ """Get webhooks configuration for the project.
+
+ Args:
+ project_id: The ID of the project to get webhooks for.
+
+ Returns:
+ Dictionary containing webhook details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ ValueError: If project_id is not set.
+ """
response = await self.async_client.get(f"api/v1/webhooks/projects/{project_id}/")
response.raise_for_status()
@@ -990,6 +1656,25 @@
@api_error_handler
async def create_webhook(self, url: str, name: str, project_id: str, event_types: List[str]) -> Dict[str, Any]:
+ """Create a webhook for the current project.
+
+ Args:
+ url: The URL to send the webhook to.
+ name: The name of the webhook.
+ event_types: List of event types to trigger the webhook for.
+
+ Returns:
+ Dictionary containing the created webhook details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ ValueError: If project_id is not set.
+ """
payload = {"url": url, "name": name, "event_types": event_types}
response = await self.async_client.post(f"api/v1/webhooks/projects/{project_id}/", json=payload)
@@ -1005,6 +1690,25 @@ url: Optional[str] = None,
event_types: Optional[List[str]] = None,
) -> Dict[str, Any]:
+ """Update a webhook configuration.
+
+ Args:
+ webhook_id: ID of the webhook to update
+ name: Optional new name for the webhook
+ url: Optional new URL for the webhook
+ event_types: Optional list of event types to trigger the webhook for.
+
+ Returns:
+ Dictionary containing the updated webhook details.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
payload = {k: v for k, v in {"name": name, "url": url, "event_types": event_types}.items() if v is not None}
response = await self.async_client.put(f"api/v1/webhooks/{webhook_id}/", json=payload)
@@ -1014,6 +1718,22 @@
@api_error_handler
async def delete_webhook(self, webhook_id: int) -> Dict[str, str]:
+ """Delete a webhook configuration.
+
+ Args:
+ webhook_id: ID of the webhook to delete
+
+ Returns:
+ Dictionary containing success message.
+
+ Raises:
+ ValidationError: If the input data is invalid.
+ AuthenticationError: If authentication fails.
+ RateLimitError: If rate limits are exceeded.
+ MemoryQuotaExceededError: If memory quota is exceeded.
+ NetworkError: If network connectivity issues occur.
+ MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
+ """
response = await self.async_client.delete(f"api/v1/webhooks/{webhook_id}/")
response.raise_for_status()
@@ -1035,4 +1755,4 @@ response = await self.async_client.post("/v1/feedback/", json=data)
response.raise_for_status()
capture_client_event("client.feedback", self, data, {"sync_type": "async"})
- return response.json()+ return response.json()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/client/main.py |
Replace inline comments with docstrings | from typing import Optional
from mem0.configs.llms.base import BaseLlmConfig
class AnthropicConfig(BaseLlmConfig):
def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# Anthropic-specific parameters
anthropic_base_url: Optional[str] = None,
):
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)
# Anthropic-specific parameters
self.anthropic_base_url = anthropic_base_url | --- +++ @@ -4,6 +4,10 @@
class AnthropicConfig(BaseLlmConfig):
+ """
+ Configuration class for Anthropic-specific parameters.
+ Inherits from BaseLlmConfig and adds Anthropic-specific settings.
+ """
def __init__(
self,
@@ -20,6 +24,21 @@ # Anthropic-specific parameters
anthropic_base_url: Optional[str] = None,
):
+ """
+ Initialize Anthropic configuration.
+
+ Args:
+ model: Anthropic model to use, defaults to None
+ temperature: Controls randomness, defaults to 0.1
+ api_key: Anthropic API key, defaults to None
+ max_tokens: Maximum tokens to generate, defaults to 2000
+ top_p: Nucleus sampling parameter, defaults to 0.1
+ top_k: Top-k sampling parameter, defaults to 1
+ enable_vision: Enable vision capabilities, defaults to False
+ vision_details: Vision detail level, defaults to "auto"
+ http_client_proxies: HTTP client proxy settings, defaults to None
+ anthropic_base_url: Anthropic API base URL, defaults to None
+ """
# Initialize base parameters
super().__init__(
model=model,
@@ -34,4 +53,4 @@ )
# Anthropic-specific parameters
- self.anthropic_base_url = anthropic_base_url+ self.anthropic_base_url = anthropic_base_url
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/anthropic.py |
Turn comments into proper docstrings | from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Union
from mem0.configs.llms.base import BaseLlmConfig
class LLMBase(ABC):
def __init__(self, config: Optional[Union[BaseLlmConfig, Dict]] = None):
if config is None:
self.config = BaseLlmConfig()
elif isinstance(config, dict):
# Handle dict-based configuration (backward compatibility)
self.config = BaseLlmConfig(**config)
else:
self.config = config
# Validate configuration
self._validate_config()
def _validate_config(self):
if not hasattr(self.config, "model"):
raise ValueError("Configuration must have a 'model' attribute")
if not hasattr(self.config, "api_key") and not hasattr(self.config, "api_key"):
# Check if API key is available via environment variable
# This will be handled by individual providers
pass
def _is_reasoning_model(self, model: str) -> bool:
reasoning_models = {
"o1", "o1-preview", "o3-mini", "o3",
"gpt-5", "gpt-5o", "gpt-5o-mini", "gpt-5o-micro",
}
if model.lower() in reasoning_models:
return True
model_lower = model.lower()
if any(reasoning_model in model_lower for reasoning_model in ["gpt-5", "o1", "o3"]):
return True
return False
def _get_supported_params(self, **kwargs) -> Dict:
model = getattr(self.config, 'model', '')
if self._is_reasoning_model(model):
supported_params = {}
if "messages" in kwargs:
supported_params["messages"] = kwargs["messages"]
if "response_format" in kwargs:
supported_params["response_format"] = kwargs["response_format"]
if "tools" in kwargs:
supported_params["tools"] = kwargs["tools"]
if "tool_choice" in kwargs:
supported_params["tool_choice"] = kwargs["tool_choice"]
return supported_params
else:
# For regular models, include all common parameters
return self._get_common_params(**kwargs)
@abstractmethod
def generate_response(
self, messages: List[Dict[str, str]], tools: Optional[List[Dict]] = None, tool_choice: str = "auto", **kwargs
):
pass
def _get_common_params(self, **kwargs) -> Dict:
params = {
"temperature": self.config.temperature,
"max_tokens": self.config.max_tokens,
"top_p": self.config.top_p,
}
# Add provider-specific parameters from kwargs
params.update(kwargs)
return params | --- +++ @@ -5,8 +5,17 @@
class LLMBase(ABC):
+ """
+ Base class for all LLM providers.
+ Handles common functionality and delegates provider-specific logic to subclasses.
+ """
def __init__(self, config: Optional[Union[BaseLlmConfig, Dict]] = None):
+ """Initialize a base LLM class
+
+ :param config: LLM configuration option class or dict, defaults to None
+ :type config: Optional[Union[BaseLlmConfig, Dict]], optional
+ """
if config is None:
self.config = BaseLlmConfig()
elif isinstance(config, dict):
@@ -19,6 +28,10 @@ self._validate_config()
def _validate_config(self):
+ """
+ Validate the configuration.
+ Override in subclasses to add provider-specific validation.
+ """
if not hasattr(self.config, "model"):
raise ValueError("Configuration must have a 'model' attribute")
@@ -28,6 +41,15 @@ pass
def _is_reasoning_model(self, model: str) -> bool:
+ """
+ Check if the model is a reasoning model or GPT-5 series that doesn't support certain parameters.
+
+ Args:
+ model: The model name to check
+
+ Returns:
+ bool: True if the model is a reasoning model or GPT-5 series
+ """
reasoning_models = {
"o1", "o1-preview", "o3-mini", "o3",
"gpt-5", "gpt-5o", "gpt-5o-mini", "gpt-5o-micro",
@@ -43,6 +65,16 @@ return False
def _get_supported_params(self, **kwargs) -> Dict:
+ """
+ Get parameters that are supported by the current model.
+ Filters out unsupported parameters for reasoning models and GPT-5 series.
+
+ Args:
+ **kwargs: Additional parameters to include
+
+ Returns:
+ Dict: Filtered parameters dictionary
+ """
model = getattr(self.config, 'model', '')
if self._is_reasoning_model(model):
@@ -66,9 +98,27 @@ def generate_response(
self, messages: List[Dict[str, str]], tools: Optional[List[Dict]] = None, tool_choice: str = "auto", **kwargs
):
+ """
+ Generate a response based on the given messages.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+ **kwargs: Additional provider-specific parameters.
+
+ Returns:
+ str or dict: The generated response.
+ """
pass
def _get_common_params(self, **kwargs) -> Dict:
+ """
+ Get common parameters that most providers use.
+
+ Returns:
+ Dict: Common parameters dictionary.
+ """
params = {
"temperature": self.config.temperature,
"max_tokens": self.config.max_tokens,
@@ -78,4 +128,4 @@ # Add provider-specific parameters from kwargs
params.update(kwargs)
- return params+ return params
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/base.py |
Add docstrings to meet PEP guidelines | import json
import os
from typing import Literal, Optional
try:
import boto3
except ImportError:
raise ImportError("The 'boto3' library is required. Please install it using 'pip install boto3'.")
import numpy as np
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
class AWSBedrockEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "amazon.titan-embed-text-v1"
# Get AWS config from environment variables or use defaults
aws_access_key = os.environ.get("AWS_ACCESS_KEY_ID", "")
aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY", "")
aws_session_token = os.environ.get("AWS_SESSION_TOKEN", "")
# Check if AWS config is provided in the config
if hasattr(self.config, "aws_access_key_id"):
aws_access_key = self.config.aws_access_key_id
if hasattr(self.config, "aws_secret_access_key"):
aws_secret_key = self.config.aws_secret_access_key
# AWS region is always set in config - see BaseEmbedderConfig
aws_region = self.config.aws_region or "us-west-2"
self.client = boto3.client(
"bedrock-runtime",
region_name=aws_region,
aws_access_key_id=aws_access_key if aws_access_key else None,
aws_secret_access_key=aws_secret_key if aws_secret_key else None,
aws_session_token=aws_session_token if aws_session_token else None,
)
def _normalize_vector(self, embeddings):
emb = np.array(embeddings)
norm_emb = emb / np.linalg.norm(emb)
return norm_emb.tolist()
def _get_embedding(self, text):
# Format input body based on the provider
provider = self.config.model.split(".")[0]
input_body = {}
if provider == "cohere":
input_body["input_type"] = "search_document"
input_body["texts"] = [text]
else:
# Amazon and other providers
input_body["inputText"] = text
body = json.dumps(input_body)
try:
response = self.client.invoke_model(
body=body,
modelId=self.config.model,
accept="application/json",
contentType="application/json",
)
response_body = json.loads(response.get("body").read())
if provider == "cohere":
embeddings = response_body.get("embeddings")[0]
else:
embeddings = response_body.get("embedding")
return embeddings
except Exception as e:
raise ValueError(f"Error getting embedding from AWS Bedrock: {e}")
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
return self._get_embedding(text) | --- +++ @@ -14,6 +14,10 @@
class AWSBedrockEmbedding(EmbeddingBase):
+ """AWS Bedrock embedding implementation.
+
+ This class uses AWS Bedrock's embedding models.
+ """
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
@@ -43,11 +47,13 @@ )
def _normalize_vector(self, embeddings):
+ """Normalize the embedding to a unit vector."""
emb = np.array(embeddings)
norm_emb = emb / np.linalg.norm(emb)
return norm_emb.tolist()
def _get_embedding(self, text):
+ """Call out to Bedrock embedding endpoint."""
# Format input body based on the provider
provider = self.config.model.split(".")[0]
@@ -82,4 +88,13 @@ raise ValueError(f"Error getting embedding from AWS Bedrock: {e}")
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
- return self._get_embedding(text)+ """
+ Get the embedding for the given text using AWS Bedrock.
+
+ Args:
+ text (str): The text to embed.
+ memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
+ Returns:
+ list: The embedding vector.
+ """
+ return self._get_embedding(text)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/embeddings/aws_bedrock.py |
Fully document this Python code with docstrings | import logging
from .base import NeptuneBase
try:
from langchain_aws import NeptuneAnalyticsGraph
from botocore.config import Config
except ImportError:
raise ImportError("langchain_aws is not installed. Please install it using 'make install_all'.")
logger = logging.getLogger(__name__)
class MemoryGraph(NeptuneBase):
def __init__(self, config):
self.config = config
self.graph = None
endpoint = self.config.graph_store.config.endpoint
app_id = self.config.graph_store.config.app_id
if endpoint and endpoint.startswith("neptune-graph://"):
graph_identifier = endpoint.replace("neptune-graph://", "")
self.graph = NeptuneAnalyticsGraph(graph_identifier = graph_identifier,
config = Config(user_agent_appid=app_id))
if not self.graph:
raise ValueError("Unable to create a Neptune client: missing 'endpoint' in config")
self.node_label = ":`__Entity__`" if self.config.graph_store.config.base_label else ""
self.embedding_model = NeptuneBase._create_embedding_model(self.config)
# Default to openai if no specific provider is configured
self.llm_provider = "openai"
if self.config.llm.provider:
self.llm_provider = self.config.llm.provider
if self.config.graph_store.llm:
self.llm_provider = self.config.graph_store.llm.provider
self.llm = NeptuneBase._create_llm(self.config, self.llm_provider)
self.user_id = None
# Use threshold from graph_store config, default to 0.7 for backward compatibility
self.threshold = self.config.graph_store.threshold if hasattr(self.config.graph_store, 'threshold') else 0.7
def _delete_entities_cypher(self, source, destination, relationship, user_id):
cypher = f"""
MATCH (n {self.node_label} {{name: $source_name, user_id: $user_id}})
-[r:{relationship}]->
(m {self.node_label} {{name: $dest_name, user_id: $user_id}})
DELETE r
RETURN
n.name AS source,
m.name AS target,
type(r) AS relationship
"""
params = {
"source_name": source,
"dest_name": destination,
"user_id": user_id,
}
logger.debug(f"_delete_entities\n query={cypher}")
return cypher, params
def _add_entities_by_source_cypher(
self,
source_node_list,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
destination_label = self.node_label if self.node_label else f":`{destination_type}`"
destination_extra_set = f", destination:`{destination_type}`" if self.node_label else ""
cypher = f"""
MATCH (source {{user_id: $user_id}})
WHERE id(source) = $source_id
SET source.mentions = coalesce(source.mentions, 0) + 1
WITH source
MERGE (destination {destination_label} {{name: $destination_name, user_id: $user_id}})
ON CREATE SET
destination.created = timestamp(),
destination.updated = timestamp(),
destination.mentions = 1
{destination_extra_set}
ON MATCH SET
destination.mentions = coalesce(destination.mentions, 0) + 1,
destination.updated = timestamp()
WITH source, destination, $dest_embedding as dest_embedding
CALL neptune.algo.vectors.upsert(destination, dest_embedding)
WITH source, destination
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created = timestamp(),
r.updated = timestamp(),
r.mentions = 1
ON MATCH SET
r.mentions = coalesce(r.mentions, 0) + 1,
r.updated = timestamp()
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
"""
params = {
"source_id": source_node_list[0]["id(source_candidate)"],
"destination_name": destination,
"dest_embedding": dest_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n source_node_search_result={source_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_entities_by_destination_cypher(
self,
source,
source_embedding,
source_type,
destination_node_list,
relationship,
user_id,
):
source_label = self.node_label if self.node_label else f":`{source_type}`"
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
cypher = f"""
MATCH (destination {{user_id: $user_id}})
WHERE id(destination) = $destination_id
SET
destination.mentions = coalesce(destination.mentions, 0) + 1,
destination.updated = timestamp()
WITH destination
MERGE (source {source_label} {{name: $source_name, user_id: $user_id}})
ON CREATE SET
source.created = timestamp(),
source.updated = timestamp(),
source.mentions = 1
{source_extra_set}
ON MATCH SET
source.mentions = coalesce(source.mentions, 0) + 1,
source.updated = timestamp()
WITH source, destination, $source_embedding as source_embedding
CALL neptune.algo.vectors.upsert(source, source_embedding)
WITH source, destination
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created = timestamp(),
r.updated = timestamp(),
r.mentions = 1
ON MATCH SET
r.mentions = coalesce(r.mentions, 0) + 1,
r.updated = timestamp()
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
"""
params = {
"destination_id": destination_node_list[0]["id(destination_candidate)"],
"source_name": source,
"source_embedding": source_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n destination_node_search_result={destination_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_relationship_entities_cypher(
self,
source_node_list,
destination_node_list,
relationship,
user_id,
):
cypher = f"""
MATCH (source {{user_id: $user_id}})
WHERE id(source) = $source_id
SET
source.mentions = coalesce(source.mentions, 0) + 1,
source.updated = timestamp()
WITH source
MATCH (destination {{user_id: $user_id}})
WHERE id(destination) = $destination_id
SET
destination.mentions = coalesce(destination.mentions) + 1,
destination.updated = timestamp()
MERGE (source)-[r:{relationship}]->(destination)
ON CREATE SET
r.created_at = timestamp(),
r.updated_at = timestamp(),
r.mentions = 1
ON MATCH SET r.mentions = coalesce(r.mentions, 0) + 1
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
"""
params = {
"source_id": source_node_list[0]["id(source_candidate)"],
"destination_id": destination_node_list[0]["id(destination_candidate)"],
"user_id": user_id,
}
logger.debug(
f"_add_entities:\n destination_node_search_result={destination_node_list[0]}\n source_node_search_result={source_node_list[0]}\n query={cypher}"
)
return cypher, params
def _add_new_entities_cypher(
self,
source,
source_embedding,
source_type,
destination,
dest_embedding,
destination_type,
relationship,
user_id,
):
source_label = self.node_label if self.node_label else f":`{source_type}`"
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
destination_label = self.node_label if self.node_label else f":`{destination_type}`"
destination_extra_set = f", destination:`{destination_type}`" if self.node_label else ""
cypher = f"""
MERGE (n {source_label} {{name: $source_name, user_id: $user_id}})
ON CREATE SET n.created = timestamp(),
n.updated = timestamp(),
n.mentions = 1
{source_extra_set}
ON MATCH SET
n.mentions = coalesce(n.mentions, 0) + 1,
n.updated = timestamp()
WITH n, $source_embedding as source_embedding
CALL neptune.algo.vectors.upsert(n, source_embedding)
WITH n
MERGE (m {destination_label} {{name: $dest_name, user_id: $user_id}})
ON CREATE SET
m.created = timestamp(),
m.updated = timestamp(),
m.mentions = 1
{destination_extra_set}
ON MATCH SET
m.updated = timestamp(),
m.mentions = coalesce(m.mentions, 0) + 1
WITH n, m, $dest_embedding as dest_embedding
CALL neptune.algo.vectors.upsert(m, dest_embedding)
WITH n, m
MERGE (n)-[rel:{relationship}]->(m)
ON CREATE SET
rel.created = timestamp(),
rel.updated = timestamp(),
rel.mentions = 1
ON MATCH SET
rel.updated = timestamp(),
rel.mentions = coalesce(rel.mentions, 0) + 1
RETURN n.name AS source, type(rel) AS relationship, m.name AS target
"""
params = {
"source_name": source,
"dest_name": destination,
"source_embedding": source_embedding,
"dest_embedding": dest_embedding,
"user_id": user_id,
}
logger.debug(
f"_add_new_entities_cypher:\n query={cypher}"
)
return cypher, params
def _search_source_node_cypher(self, source_embedding, user_id, threshold):
cypher = f"""
MATCH (source_candidate {self.node_label})
WHERE source_candidate.user_id = $user_id
WITH source_candidate, $source_embedding as v_embedding
CALL neptune.algo.vectors.distanceByEmbedding(
v_embedding,
source_candidate,
{{metric:"CosineSimilarity"}}
) YIELD distance
WITH source_candidate, distance AS cosine_similarity
WHERE cosine_similarity >= $threshold
WITH source_candidate, cosine_similarity
ORDER BY cosine_similarity DESC
LIMIT 1
RETURN id(source_candidate), cosine_similarity
"""
params = {
"source_embedding": source_embedding,
"user_id": user_id,
"threshold": threshold,
}
logger.debug(f"_search_source_node\n query={cypher}")
return cypher, params
def _search_destination_node_cypher(self, destination_embedding, user_id, threshold):
cypher = f"""
MATCH (destination_candidate {self.node_label})
WHERE destination_candidate.user_id = $user_id
WITH destination_candidate, $destination_embedding as v_embedding
CALL neptune.algo.vectors.distanceByEmbedding(
v_embedding,
destination_candidate,
{{metric:"CosineSimilarity"}}
) YIELD distance
WITH destination_candidate, distance AS cosine_similarity
WHERE cosine_similarity >= $threshold
WITH destination_candidate, cosine_similarity
ORDER BY cosine_similarity DESC
LIMIT 1
RETURN id(destination_candidate), cosine_similarity
"""
params = {
"destination_embedding": destination_embedding,
"user_id": user_id,
"threshold": threshold,
}
logger.debug(f"_search_destination_node\n query={cypher}")
return cypher, params
def _delete_all_cypher(self, filters):
cypher = f"""
MATCH (n {self.node_label} {{user_id: $user_id}})
DETACH DELETE n
"""
params = {"user_id": filters["user_id"]}
logger.debug(f"delete_all query={cypher}")
return cypher, params
def _get_all_cypher(self, filters, limit):
cypher = f"""
MATCH (n {self.node_label} {{user_id: $user_id}})-[r]->(m {self.node_label} {{user_id: $user_id}})
RETURN n.name AS source, type(r) AS relationship, m.name AS target
LIMIT $limit
"""
params = {"user_id": filters["user_id"], "limit": limit}
return cypher, params
def _search_graph_db_cypher(self, n_embedding, filters, limit):
cypher_query = f"""
MATCH (n {self.node_label})
WHERE n.user_id = $user_id
WITH n, $n_embedding as n_embedding
CALL neptune.algo.vectors.distanceByEmbedding(
n_embedding,
n,
{{metric:"CosineSimilarity"}}
) YIELD distance
WITH n, distance as similarity
WHERE similarity >= $threshold
CALL {{
WITH n
MATCH (n)-[r]->(m)
RETURN n.name AS source, id(n) AS source_id, type(r) AS relationship, id(r) AS relation_id, m.name AS destination, id(m) AS destination_id
UNION ALL
WITH n
MATCH (m)-[r]->(n)
RETURN m.name AS source, id(m) AS source_id, type(r) AS relationship, id(r) AS relation_id, n.name AS destination, id(n) AS destination_id
}}
WITH distinct source, source_id, relationship, relation_id, destination, destination_id, similarity
RETURN source, source_id, relationship, relation_id, destination, destination_id, similarity
ORDER BY similarity DESC
LIMIT $limit
"""
params = {
"n_embedding": n_embedding,
"threshold": self.threshold,
"user_id": filters["user_id"],
"limit": limit,
}
logger.debug(f"_search_graph_db\n query={cypher_query}")
return cypher_query, params | --- +++ @@ -43,6 +43,15 @@ self.threshold = self.config.graph_store.threshold if hasattr(self.config.graph_store, 'threshold') else 0.7
def _delete_entities_cypher(self, source, destination, relationship, user_id):
+ """
+ Returns the OpenCypher query and parameters for deleting entities in the graph DB
+
+ :param source: source node
+ :param destination: destination node
+ :param relationship: relationship label
+ :param user_id: user_id to use
+ :return: str, dict
+ """
cypher = f"""
MATCH (n {self.node_label} {{name: $source_name, user_id: $user_id}})
@@ -71,6 +80,17 @@ relationship,
user_id,
):
+ """
+ Returns the OpenCypher query and parameters for adding entities in the graph DB
+
+ :param source_node_list: list of source nodes
+ :param destination: destination name
+ :param dest_embedding: destination embedding
+ :param destination_type: destination node label
+ :param relationship: relationship label
+ :param user_id: user id to use
+ :return: str, dict
+ """
destination_label = self.node_label if self.node_label else f":`{destination_type}`"
destination_extra_set = f", destination:`{destination_type}`" if self.node_label else ""
@@ -123,6 +143,17 @@ relationship,
user_id,
):
+ """
+ Returns the OpenCypher query and parameters for adding entities in the graph DB
+
+ :param source: source node name
+ :param source_embedding: source node embedding
+ :param source_type: source node label
+ :param destination_node_list: list of dest nodes
+ :param relationship: relationship label
+ :param user_id: user id to use
+ :return: str, dict
+ """
source_label = self.node_label if self.node_label else f":`{source_type}`"
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
@@ -175,6 +206,15 @@ relationship,
user_id,
):
+ """
+ Returns the OpenCypher query and parameters for adding entities in the graph DB
+
+ :param source_node_list: list of source node ids
+ :param destination_node_list: list of dest node ids
+ :param relationship: relationship label
+ :param user_id: user id to use
+ :return: str, dict
+ """
cypher = f"""
MATCH (source {{user_id: $user_id}})
@@ -217,6 +257,19 @@ relationship,
user_id,
):
+ """
+ Returns the OpenCypher query and parameters for adding entities in the graph DB
+
+ :param source: source node name
+ :param source_embedding: source node embedding
+ :param source_type: source node label
+ :param destination: destination name
+ :param dest_embedding: destination embedding
+ :param destination_type: destination node label
+ :param relationship: relationship label
+ :param user_id: user id to use
+ :return: str, dict
+ """
source_label = self.node_label if self.node_label else f":`{source_type}`"
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
@@ -270,6 +323,14 @@ return cypher, params
def _search_source_node_cypher(self, source_embedding, user_id, threshold):
+ """
+ Returns the OpenCypher query and parameters to search for source nodes
+
+ :param source_embedding: source vector
+ :param user_id: user_id to use
+ :param threshold: the threshold for similarity
+ :return: str, dict
+ """
cypher = f"""
MATCH (source_candidate {self.node_label})
WHERE source_candidate.user_id = $user_id
@@ -299,6 +360,14 @@ return cypher, params
def _search_destination_node_cypher(self, destination_embedding, user_id, threshold):
+ """
+ Returns the OpenCypher query and parameters to search for destination nodes
+
+ :param source_embedding: source vector
+ :param user_id: user_id to use
+ :param threshold: the threshold for similarity
+ :return: str, dict
+ """
cypher = f"""
MATCH (destination_candidate {self.node_label})
WHERE destination_candidate.user_id = $user_id
@@ -328,6 +397,12 @@ return cypher, params
def _delete_all_cypher(self, filters):
+ """
+ Returns the OpenCypher query and parameters to delete all edges/nodes in the memory store
+
+ :param filters: search filters
+ :return: str, dict
+ """
cypher = f"""
MATCH (n {self.node_label} {{user_id: $user_id}})
DETACH DELETE n
@@ -338,6 +413,13 @@ return cypher, params
def _get_all_cypher(self, filters, limit):
+ """
+ Returns the OpenCypher query and parameters to get all edges/nodes in the memory store
+
+ :param filters: search filters
+ :param limit: return limit
+ :return: str, dict
+ """
cypher = f"""
MATCH (n {self.node_label} {{user_id: $user_id}})-[r]->(m {self.node_label} {{user_id: $user_id}})
@@ -348,6 +430,14 @@ return cypher, params
def _search_graph_db_cypher(self, n_embedding, filters, limit):
+ """
+ Returns the OpenCypher query and parameters to search for similar nodes in the memory store
+
+ :param n_embedding: node vector
+ :param filters: search filters
+ :param limit: return limit
+ :return: str, dict
+ """
cypher_query = f"""
MATCH (n {self.node_label})
@@ -382,4 +472,4 @@ }
logger.debug(f"_search_graph_db\n query={cypher_query}")
- return cypher_query, params+ return cypher_query, params
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/graphs/neptune/neptunegraph.py |
Document this module using docstrings | import json
import os
from typing import Dict, List, Optional, Union
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
from openai import AzureOpenAI
from mem0.configs.llms.azure import AzureOpenAIConfig
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
SCOPE = "https://cognitiveservices.azure.com/.default"
class AzureOpenAILLM(LLMBase):
def __init__(self, config: Optional[Union[BaseLlmConfig, AzureOpenAIConfig, Dict]] = None):
# Convert to AzureOpenAIConfig if needed
if config is None:
config = AzureOpenAIConfig()
elif isinstance(config, dict):
config = AzureOpenAIConfig(**config)
elif isinstance(config, BaseLlmConfig) and not isinstance(config, AzureOpenAIConfig):
# Convert BaseLlmConfig to AzureOpenAIConfig
config = AzureOpenAIConfig(
model=config.model,
temperature=config.temperature,
api_key=config.api_key,
max_tokens=config.max_tokens,
top_p=config.top_p,
top_k=config.top_k,
enable_vision=config.enable_vision,
vision_details=config.vision_details,
http_client_proxies=config.http_client,
)
super().__init__(config)
# Model name should match the custom deployment name chosen for it.
if not self.config.model:
self.config.model = "gpt-4.1-nano-2025-04-14"
api_key = self.config.azure_kwargs.api_key or os.getenv("LLM_AZURE_OPENAI_API_KEY")
azure_deployment = self.config.azure_kwargs.azure_deployment or os.getenv("LLM_AZURE_DEPLOYMENT")
azure_endpoint = self.config.azure_kwargs.azure_endpoint or os.getenv("LLM_AZURE_ENDPOINT")
api_version = self.config.azure_kwargs.api_version or os.getenv("LLM_AZURE_API_VERSION")
default_headers = self.config.azure_kwargs.default_headers
# If the API key is not provided or is a placeholder, use DefaultAzureCredential.
if api_key is None or api_key == "" or api_key == "your-api-key":
self.credential = DefaultAzureCredential()
azure_ad_token_provider = get_bearer_token_provider(
self.credential,
SCOPE,
)
api_key = None
else:
azure_ad_token_provider = None
self.client = AzureOpenAI(
azure_deployment=azure_deployment,
azure_endpoint=azure_endpoint,
azure_ad_token_provider=azure_ad_token_provider,
api_version=api_version,
api_key=api_key,
http_client=self.config.http_client,
default_headers=default_headers,
)
def _parse_response(self, response, tools):
if tools:
processed_response = {
"content": response.choices[0].message.content,
"tool_calls": [],
}
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
processed_response["tool_calls"].append(
{
"name": tool_call.function.name,
"arguments": json.loads(extract_json(tool_call.function.arguments)),
}
)
return processed_response
else:
return response.choices[0].message.content
def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
user_prompt = messages[-1]["content"]
user_prompt = user_prompt.replace("assistant", "ai")
messages[-1]["content"] = user_prompt
params = self._get_supported_params(messages=messages, **kwargs)
# Add model and messages
params.update({
"model": self.config.model,
"messages": messages,
})
if tools:
params["tools"] = tools
params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
return self._parse_response(response, tools) | --- +++ @@ -68,6 +68,16 @@ )
def _parse_response(self, response, tools):
+ """
+ Process the response based on whether tools are used or not.
+
+ Args:
+ response: The raw response from API.
+ tools: The list of tools provided in the request.
+
+ Returns:
+ str or dict: The processed response.
+ """
if tools:
processed_response = {
"content": response.choices[0].message.content,
@@ -95,6 +105,19 @@ tool_choice: str = "auto",
**kwargs,
):
+ """
+ Generate a response based on the given messages using Azure OpenAI.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ response_format (str or object, optional): Format of the response. Defaults to "text".
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+ **kwargs: Additional Azure OpenAI-specific parameters.
+
+ Returns:
+ str: The generated response.
+ """
user_prompt = messages[-1]["content"]
@@ -115,4 +138,4 @@ params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
- return self._parse_response(response, tools)+ return self._parse_response(response, tools)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/azure_openai.py |
Add professional docstrings to my codebase | import json
import os
from typing import Dict, List, Optional, Union
from openai import OpenAI
from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.vllm import VllmConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
class VllmLLM(LLMBase):
def __init__(self, config: Optional[Union[BaseLlmConfig, VllmConfig, Dict]] = None):
# Convert to VllmConfig if needed
if config is None:
config = VllmConfig()
elif isinstance(config, dict):
config = VllmConfig(**config)
elif isinstance(config, BaseLlmConfig) and not isinstance(config, VllmConfig):
# Convert BaseLlmConfig to VllmConfig
config = VllmConfig(
model=config.model,
temperature=config.temperature,
api_key=config.api_key,
max_tokens=config.max_tokens,
top_p=config.top_p,
top_k=config.top_k,
enable_vision=config.enable_vision,
vision_details=config.vision_details,
http_client_proxies=config.http_client,
)
super().__init__(config)
if not self.config.model:
self.config.model = "Qwen/Qwen2.5-32B-Instruct"
self.config.api_key = self.config.api_key or os.getenv("VLLM_API_KEY") or "vllm-api-key"
base_url = self.config.vllm_base_url or os.getenv("VLLM_BASE_URL")
self.client = OpenAI(api_key=self.config.api_key, base_url=base_url)
def _parse_response(self, response, tools):
if tools:
processed_response = {
"content": response.choices[0].message.content,
"tool_calls": [],
}
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
processed_response["tool_calls"].append(
{
"name": tool_call.function.name,
"arguments": json.loads(extract_json(tool_call.function.arguments)),
}
)
return processed_response
else:
return response.choices[0].message.content
def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
params = self._get_supported_params(messages=messages, **kwargs)
params.update(
{
"model": self.config.model,
"messages": messages,
}
)
if tools:
params["tools"] = tools
params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
return self._parse_response(response, tools) | --- +++ @@ -41,6 +41,16 @@ self.client = OpenAI(api_key=self.config.api_key, base_url=base_url)
def _parse_response(self, response, tools):
+ """
+ Process the response based on whether tools are used or not.
+
+ Args:
+ response: The raw response from API.
+ tools: The list of tools provided in the request.
+
+ Returns:
+ str or dict: The processed response.
+ """
if tools:
processed_response = {
"content": response.choices[0].message.content,
@@ -68,6 +78,19 @@ tool_choice: str = "auto",
**kwargs,
):
+ """
+ Generate a response based on the given messages using vLLM.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ response_format (str or object, optional): Format of the response. Defaults to "text".
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+ **kwargs: Additional vLLM-specific parameters.
+
+ Returns:
+ str: The generated response.
+ """
params = self._get_supported_params(messages=messages, **kwargs)
params.update(
{
@@ -81,4 +104,4 @@ params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
- return self._parse_response(response, tools)+ return self._parse_response(response, tools)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/vllm.py |
Add docstrings explaining edge cases | import json
from typing import Dict, List, Optional, Union
from openai import OpenAI
from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.lmstudio import LMStudioConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
class LMStudioLLM(LLMBase):
def __init__(self, config: Optional[Union[BaseLlmConfig, LMStudioConfig, Dict]] = None):
# Convert to LMStudioConfig if needed
if config is None:
config = LMStudioConfig()
elif isinstance(config, dict):
config = LMStudioConfig(**config)
elif isinstance(config, BaseLlmConfig) and not isinstance(config, LMStudioConfig):
# Convert BaseLlmConfig to LMStudioConfig
config = LMStudioConfig(
model=config.model,
temperature=config.temperature,
api_key=config.api_key,
max_tokens=config.max_tokens,
top_p=config.top_p,
top_k=config.top_k,
enable_vision=config.enable_vision,
vision_details=config.vision_details,
http_client_proxies=config.http_client,
)
super().__init__(config)
self.config.model = (
self.config.model
or "lmstudio-community/Meta-Llama-3.1-70B-Instruct-GGUF/Meta-Llama-3.1-70B-Instruct-IQ2_M.gguf"
)
self.config.api_key = self.config.api_key or "lm-studio"
self.client = OpenAI(base_url=self.config.lmstudio_base_url, api_key=self.config.api_key)
def _parse_response(self, response, tools):
if tools:
processed_response = {
"content": response.choices[0].message.content,
"tool_calls": [],
}
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
processed_response["tool_calls"].append(
{
"name": tool_call.function.name,
"arguments": json.loads(extract_json(tool_call.function.arguments)),
}
)
return processed_response
else:
return response.choices[0].message.content
def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
params = self._get_supported_params(messages=messages, **kwargs)
params.update(
{
"model": self.config.model,
"messages": messages,
}
)
if self.config.lmstudio_response_format:
params["response_format"] = self.config.lmstudio_response_format
elif response_format:
params["response_format"] = response_format
else:
params["response_format"] = {"type": "json_object"}
if tools:
params["tools"] = tools
params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
return self._parse_response(response, tools) | --- +++ @@ -41,6 +41,16 @@ self.client = OpenAI(base_url=self.config.lmstudio_base_url, api_key=self.config.api_key)
def _parse_response(self, response, tools):
+ """
+ Process the response based on whether tools are used or not.
+
+ Args:
+ response: The raw response from API.
+ tools: The list of tools provided in the request.
+
+ Returns:
+ str or dict: The processed response.
+ """
if tools:
processed_response = {
"content": response.choices[0].message.content,
@@ -68,6 +78,19 @@ tool_choice: str = "auto",
**kwargs,
):
+ """
+ Generate a response based on the given messages using LM Studio.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ response_format (str or object, optional): Format of the response. Defaults to "text".
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+ **kwargs: Additional LM Studio-specific parameters.
+
+ Returns:
+ str: The generated response.
+ """
params = self._get_supported_params(messages=messages, **kwargs)
params.update(
{
@@ -88,4 +111,4 @@ params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
- return self._parse_response(response, tools)+ return self._parse_response(response, tools)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/lmstudio.py |
Add clean documentation to messy code | import json
import logging
import re
from typing import Any, Dict, List, Optional, Union
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
except ImportError:
raise ImportError("The 'boto3' library is required. Please install it using 'pip install boto3'.")
from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.aws_bedrock import AWSBedrockConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
logger = logging.getLogger(__name__)
PROVIDERS = [
"ai21", "amazon", "anthropic", "cohere", "meta", "mistral", "stability", "writer",
"deepseek", "gpt-oss", "perplexity", "snowflake", "titan", "command", "j2", "llama"
]
def extract_provider(model: str) -> str:
for provider in PROVIDERS:
if re.search(rf"\b{re.escape(provider)}\b", model):
return provider
raise ValueError(f"Unknown provider in model: {model}")
class AWSBedrockLLM(LLMBase):
def __init__(self, config: Optional[Union[AWSBedrockConfig, BaseLlmConfig, Dict]] = None):
# Convert to AWSBedrockConfig if needed
if config is None:
config = AWSBedrockConfig()
elif isinstance(config, dict):
config = AWSBedrockConfig(**config)
elif isinstance(config, BaseLlmConfig) and not isinstance(config, AWSBedrockConfig):
# Convert BaseLlmConfig to AWSBedrockConfig
config = AWSBedrockConfig(
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
top_p=config.top_p,
top_k=config.top_k,
enable_vision=getattr(config, "enable_vision", False),
)
super().__init__(config)
self.config = config
# Initialize AWS client
self._initialize_aws_client()
# Get model configuration
self.model_config = self.config.get_model_config()
self.provider = extract_provider(self.config.model)
# Initialize provider-specific settings
self._initialize_provider_settings()
def _initialize_aws_client(self):
try:
aws_config = self.config.get_aws_config()
# Create Bedrock runtime client
self.client = boto3.client("bedrock-runtime", **aws_config)
# Test connection
self._test_connection()
except NoCredentialsError:
raise ValueError(
"AWS credentials not found. Please set AWS_ACCESS_KEY_ID, "
"AWS_SECRET_ACCESS_KEY, and AWS_REGION environment variables, "
"or provide them in the config."
)
except ClientError as e:
if e.response["Error"]["Code"] == "UnauthorizedOperation":
raise ValueError(
f"Unauthorized access to Bedrock. Please ensure your AWS credentials "
f"have permission to access Bedrock in region {self.config.aws_region}."
)
else:
raise ValueError(f"AWS Bedrock error: {e}")
def _test_connection(self):
try:
# List available models to test connection
bedrock_client = boto3.client("bedrock", **self.config.get_aws_config())
response = bedrock_client.list_foundation_models()
self.available_models = [model["modelId"] for model in response["modelSummaries"]]
# Check if our model is available
if self.config.model not in self.available_models:
logger.warning(f"Model {self.config.model} may not be available in region {self.config.aws_region}")
logger.info(f"Available models: {', '.join(self.available_models[:5])}...")
except Exception as e:
logger.warning(f"Could not verify model availability: {e}")
self.available_models = []
def _initialize_provider_settings(self):
# Determine capabilities based on provider and model
self.supports_tools = self.provider in ["anthropic", "cohere", "amazon"]
self.supports_vision = self.provider in ["anthropic", "amazon", "meta", "mistral"]
self.supports_streaming = self.provider in ["anthropic", "cohere", "mistral", "amazon", "meta"]
# Set message formatting method
if self.provider == "anthropic":
self._format_messages = self._format_messages_anthropic
elif self.provider == "cohere":
self._format_messages = self._format_messages_cohere
elif self.provider == "amazon":
self._format_messages = self._format_messages_amazon
elif self.provider == "meta":
self._format_messages = self._format_messages_meta
elif self.provider == "mistral":
self._format_messages = self._format_messages_mistral
else:
self._format_messages = self._format_messages_generic
def _format_messages_anthropic(self, messages: List[Dict[str, str]]) -> tuple[List[Dict[str, Any]], Optional[str]]:
formatted_messages = []
system_message = None
for message in messages:
role = message["role"]
content = message["content"]
if role == "system":
# Anthropic supports system messages as a separate parameter
# see: https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/system-prompts
system_message = content
elif role == "user":
# Use Converse API format
formatted_messages.append({"role": "user", "content": [{"text": content}]})
elif role == "assistant":
# Use Converse API format
formatted_messages.append({"role": "assistant", "content": [{"text": content}]})
return formatted_messages, system_message
def _format_messages_cohere(self, messages: List[Dict[str, str]]) -> str:
formatted_messages = []
for message in messages:
role = message["role"].capitalize()
content = message["content"]
formatted_messages.append(f"{role}: {content}")
return "\n".join(formatted_messages)
def _format_messages_amazon(self, messages: List[Dict[str, str]]) -> List[Dict[str, Any]]:
formatted_messages = []
for message in messages:
role = message["role"]
content = message["content"]
if role == "system":
# Amazon models support system messages
formatted_messages.append({"role": "system", "content": content})
elif role == "user":
formatted_messages.append({"role": "user", "content": content})
elif role == "assistant":
formatted_messages.append({"role": "assistant", "content": content})
return formatted_messages
def _format_messages_meta(self, messages: List[Dict[str, str]]) -> str:
formatted_messages = []
for message in messages:
role = message["role"].capitalize()
content = message["content"]
formatted_messages.append(f"{role}: {content}")
return "\n".join(formatted_messages)
def _format_messages_mistral(self, messages: List[Dict[str, str]]) -> List[Dict[str, Any]]:
formatted_messages = []
for message in messages:
role = message["role"]
content = message["content"]
if role == "system":
# Mistral supports system messages
formatted_messages.append({"role": "system", "content": content})
elif role == "user":
formatted_messages.append({"role": "user", "content": content})
elif role == "assistant":
formatted_messages.append({"role": "assistant", "content": content})
return formatted_messages
def _format_messages_generic(self, messages: List[Dict[str, str]]) -> str:
formatted_messages = []
for message in messages:
role = message["role"].capitalize()
content = message["content"]
formatted_messages.append(f"\n\n{role}: {content}")
return "\n\nHuman: " + "".join(formatted_messages) + "\n\nAssistant:"
def _prepare_input(self, prompt: str) -> Dict[str, Any]:
# Base configuration
input_body = {"prompt": prompt}
# Provider-specific parameter mappings
provider_mappings = {
"meta": {"max_tokens": "max_gen_len"},
"ai21": {"max_tokens": "maxTokens", "top_p": "topP"},
"mistral": {"max_tokens": "max_tokens"},
"cohere": {"max_tokens": "max_tokens", "top_p": "p"},
"amazon": {"max_tokens": "maxTokenCount", "top_p": "topP"},
"anthropic": {"max_tokens": "max_tokens", "top_p": "top_p"},
}
# Apply provider mappings
if self.provider in provider_mappings:
for old_key, new_key in provider_mappings[self.provider].items():
if old_key in self.model_config:
input_body[new_key] = self.model_config[old_key]
# Special handling for specific providers
if self.provider == "cohere" and "cohere.command" in self.config.model:
input_body["message"] = input_body.pop("prompt")
elif self.provider == "amazon":
# Amazon Nova and other Amazon models
if "nova" in self.config.model.lower():
# Nova models use the converse API format
input_body = {
"messages": [{"role": "user", "content": prompt}],
"max_tokens": self.model_config.get("max_tokens", 5000),
"temperature": self.model_config.get("temperature", 0.1),
"top_p": self.model_config.get("top_p", 0.9),
}
else:
# Legacy Amazon models
input_body = {
"inputText": prompt,
"textGenerationConfig": {
"maxTokenCount": self.model_config.get("max_tokens", 5000),
"topP": self.model_config.get("top_p", 0.9),
"temperature": self.model_config.get("temperature", 0.1),
},
}
# Remove None values
input_body["textGenerationConfig"] = {
k: v for k, v in input_body["textGenerationConfig"].items() if v is not None
}
elif self.provider == "anthropic":
input_body = {
"messages": [{"role": "user", "content": [{"type": "text", "text": prompt}]}],
"max_tokens": self.model_config.get("max_tokens", 2000),
"temperature": self.model_config.get("temperature", 0.1),
"top_p": self.model_config.get("top_p", 0.9),
"anthropic_version": "bedrock-2023-05-31",
}
elif self.provider == "meta":
input_body = {
"prompt": prompt,
"max_gen_len": self.model_config.get("max_tokens", 5000),
"temperature": self.model_config.get("temperature", 0.1),
"top_p": self.model_config.get("top_p", 0.9),
}
elif self.provider == "mistral":
input_body = {
"prompt": prompt,
"max_tokens": self.model_config.get("max_tokens", 5000),
"temperature": self.model_config.get("temperature", 0.1),
"top_p": self.model_config.get("top_p", 0.9),
}
else:
# Generic case - add all model config parameters
input_body.update(self.model_config)
return input_body
def _convert_tool_format(self, original_tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
new_tools = []
for tool in original_tools:
if tool["type"] == "function":
function = tool["function"]
new_tool = {
"toolSpec": {
"name": function["name"],
"description": function.get("description", ""),
"inputSchema": {
"json": {
"type": "object",
"properties": {},
"required": function["parameters"].get("required", []),
}
},
}
}
# Add properties
for prop, details in function["parameters"].get("properties", {}).items():
new_tool["toolSpec"]["inputSchema"]["json"]["properties"][prop] = details
new_tools.append(new_tool)
return new_tools
def _parse_response(
self, response: Dict[str, Any], tools: Optional[List[Dict]] = None
) -> Union[str, Dict[str, Any]]:
if tools:
# Handle tool-enabled responses
processed_response = {"tool_calls": []}
if response.get("output", {}).get("message", {}).get("content"):
for item in response["output"]["message"]["content"]:
if "toolUse" in item:
processed_response["tool_calls"].append(
{
"name": item["toolUse"]["name"],
"arguments": json.loads(extract_json(json.dumps(item["toolUse"]["input"]))),
}
)
return processed_response
# Handle regular text responses
try:
response_body = response.get("body").read().decode()
response_json = json.loads(response_body)
# Provider-specific response parsing
if self.provider == "anthropic":
return response_json.get("content", [{"text": ""}])[0].get("text", "")
elif self.provider == "amazon":
# Handle both Nova and legacy Amazon models
if "nova" in self.config.model.lower():
# Nova models return content in a different format
if "content" in response_json:
return response_json["content"][0]["text"]
elif "completion" in response_json:
return response_json["completion"]
else:
# Legacy Amazon models
return response_json.get("completion", "")
elif self.provider == "meta":
return response_json.get("generation", "")
elif self.provider == "mistral":
return response_json.get("outputs", [{"text": ""}])[0].get("text", "")
elif self.provider == "cohere":
return response_json.get("generations", [{"text": ""}])[0].get("text", "")
elif self.provider == "ai21":
return response_json.get("completions", [{"data", {"text": ""}}])[0].get("data", {}).get("text", "")
else:
# Generic parsing - try common response fields
for field in ["content", "text", "completion", "generation"]:
if field in response_json:
if isinstance(response_json[field], list) and response_json[field]:
return response_json[field][0].get("text", "")
elif isinstance(response_json[field], str):
return response_json[field]
# Fallback
return str(response_json)
except Exception as e:
logger.warning(f"Could not parse response: {e}")
return "Error parsing response"
def generate_response(
self,
messages: List[Dict[str, str]],
response_format: Optional[str] = None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
stream: bool = False,
**kwargs,
) -> Union[str, Dict[str, Any]]:
try:
if tools and self.supports_tools:
# Use converse method for tool-enabled models
return self._generate_with_tools(messages, tools, stream)
else:
# Use standard invoke_model method
return self._generate_standard(messages, stream)
except Exception as e:
logger.error(f"Failed to generate response: {e}")
raise RuntimeError(f"Failed to generate response: {e}")
@staticmethod
def _convert_tools_to_converse_format(tools: List[Dict]) -> List[Dict]:
if not tools:
return []
converse_tools = []
for tool in tools:
if tool.get("type") == "function" and "function" in tool:
func = tool["function"]
converse_tool = {
"toolSpec": {
"name": func["name"],
"description": func.get("description", ""),
"inputSchema": {
"json": func.get("parameters", {})
}
}
}
converse_tools.append(converse_tool)
return converse_tools
def _generate_with_tools(self, messages: List[Dict[str, str]], tools: List[Dict], stream: bool = False) -> Dict[str, Any]:
# Format messages for tool-enabled models
system_message = None
if self.provider == "anthropic":
formatted_messages, system_message = self._format_messages_anthropic(messages)
elif self.provider == "amazon":
formatted_messages = self._format_messages_amazon(messages)
else:
formatted_messages = [{"role": "user", "content": [{"text": messages[-1]["content"]}]}]
# Prepare tool configuration in Converse API format
tool_config = None
if tools:
converse_tools = self._convert_tools_to_converse_format(tools)
if converse_tools:
tool_config = {"tools": converse_tools}
# Prepare converse parameters
converse_params = {
"modelId": self.config.model,
"messages": formatted_messages,
"inferenceConfig": {
"maxTokens": self.model_config.get("max_tokens", 2000),
"temperature": self.model_config.get("temperature", 0.1),
"topP": self.model_config.get("top_p", 0.9),
}
}
# Add system message if present (for Anthropic)
if system_message:
converse_params["system"] = [{"text": system_message}]
# Add tool config if present
if tool_config:
converse_params["toolConfig"] = tool_config
# Make API call
response = self.client.converse(**converse_params)
return self._parse_response(response, tools)
def _generate_standard(self, messages: List[Dict[str, str]], stream: bool = False) -> str:
# For Anthropic models, always use Converse API
if self.provider == "anthropic":
formatted_messages, system_message = self._format_messages_anthropic(messages)
# Prepare converse parameters
converse_params = {
"modelId": self.config.model,
"messages": formatted_messages,
"inferenceConfig": {
"maxTokens": self.model_config.get("max_tokens", 2000),
"temperature": self.model_config.get("temperature", 0.1),
"topP": self.model_config.get("top_p", 0.9),
}
}
# Add system message if present
if system_message:
converse_params["system"] = [{"text": system_message}]
# Use converse API for Anthropic models
response = self.client.converse(**converse_params)
# Parse Converse API response
if hasattr(response, 'output') and hasattr(response.output, 'message'):
return response.output.message.content[0].text
elif 'output' in response and 'message' in response['output']:
return response['output']['message']['content'][0]['text']
else:
return str(response)
elif self.provider == "amazon" and "nova" in self.config.model.lower():
# Nova models use converse API even without tools
formatted_messages = self._format_messages_amazon(messages)
input_body = {
"messages": formatted_messages,
"max_tokens": self.model_config.get("max_tokens", 5000),
"temperature": self.model_config.get("temperature", 0.1),
"top_p": self.model_config.get("top_p", 0.9),
}
# Use converse API for Nova models
response = self.client.converse(
modelId=self.config.model,
messages=input_body["messages"],
inferenceConfig={
"maxTokens": input_body["max_tokens"],
"temperature": input_body["temperature"],
"topP": input_body["top_p"],
}
)
return self._parse_response(response)
else:
# For other providers and legacy Amazon models (like Titan)
if self.provider == "amazon":
# Legacy Amazon models need string formatting, not array formatting
prompt = self._format_messages_generic(messages)
else:
prompt = self._format_messages(messages)
input_body = self._prepare_input(prompt)
# Convert to JSON
body = json.dumps(input_body)
# Make API call
response = self.client.invoke_model(
body=body,
modelId=self.config.model,
accept="application/json",
contentType="application/json",
)
return self._parse_response(response)
def list_available_models(self) -> List[Dict[str, Any]]:
try:
bedrock_client = boto3.client("bedrock", **self.config.get_aws_config())
response = bedrock_client.list_foundation_models()
models = []
for model in response["modelSummaries"]:
provider = extract_provider(model["modelId"])
models.append(
{
"model_id": model["modelId"],
"provider": provider,
"model_name": model["modelId"].split(".", 1)[1]
if "." in model["modelId"]
else model["modelId"],
"modelArn": model.get("modelArn", ""),
"providerName": model.get("providerName", ""),
"inputModalities": model.get("inputModalities", []),
"outputModalities": model.get("outputModalities", []),
"responseStreamingSupported": model.get("responseStreamingSupported", False),
}
)
return models
except Exception as e:
logger.warning(f"Could not list models: {e}")
return []
def get_model_capabilities(self) -> Dict[str, Any]:
return {
"model_id": self.config.model,
"provider": self.provider,
"model_name": self.config.model_name,
"supports_tools": self.supports_tools,
"supports_vision": self.supports_vision,
"supports_streaming": self.supports_streaming,
"max_tokens": self.model_config.get("max_tokens", 2000),
}
def validate_model_access(self) -> bool:
try:
# Try to invoke the model with a minimal request
if self.provider == "amazon" and "nova" in self.config.model.lower():
# Test Nova model with converse API
test_messages = [{"role": "user", "content": "test"}]
self.client.converse(
modelId=self.config.model,
messages=test_messages,
inferenceConfig={"maxTokens": 10}
)
else:
# Test other models with invoke_model
test_body = json.dumps({"prompt": "test"})
self.client.invoke_model(
body=test_body,
modelId=self.config.model,
accept="application/json",
contentType="application/json",
)
return True
except Exception:
return False | --- +++ @@ -23,6 +23,7 @@
def extract_provider(model: str) -> str:
+ """Extract provider from model identifier."""
for provider in PROVIDERS:
if re.search(rf"\b{re.escape(provider)}\b", model):
return provider
@@ -30,8 +31,19 @@
class AWSBedrockLLM(LLMBase):
+ """
+ AWS Bedrock LLM integration for Mem0.
+
+ Supports all available Bedrock models with automatic provider detection.
+ """
def __init__(self, config: Optional[Union[AWSBedrockConfig, BaseLlmConfig, Dict]] = None):
+ """
+ Initialize AWS Bedrock LLM.
+
+ Args:
+ config: AWS Bedrock configuration object
+ """
# Convert to AWSBedrockConfig if needed
if config is None:
config = AWSBedrockConfig()
@@ -62,6 +74,7 @@ self._initialize_provider_settings()
def _initialize_aws_client(self):
+ """Initialize AWS Bedrock client with proper credentials."""
try:
aws_config = self.config.get_aws_config()
@@ -87,6 +100,7 @@ raise ValueError(f"AWS Bedrock error: {e}")
def _test_connection(self):
+ """Test connection to AWS Bedrock service."""
try:
# List available models to test connection
bedrock_client = boto3.client("bedrock", **self.config.get_aws_config())
@@ -103,6 +117,7 @@ self.available_models = []
def _initialize_provider_settings(self):
+ """Initialize provider-specific settings and capabilities."""
# Determine capabilities based on provider and model
self.supports_tools = self.provider in ["anthropic", "cohere", "amazon"]
self.supports_vision = self.provider in ["anthropic", "amazon", "meta", "mistral"]
@@ -123,6 +138,7 @@ self._format_messages = self._format_messages_generic
def _format_messages_anthropic(self, messages: List[Dict[str, str]]) -> tuple[List[Dict[str, Any]], Optional[str]]:
+ """Format messages for Anthropic models."""
formatted_messages = []
system_message = None
@@ -144,6 +160,7 @@ return formatted_messages, system_message
def _format_messages_cohere(self, messages: List[Dict[str, str]]) -> str:
+ """Format messages for Cohere models."""
formatted_messages = []
for message in messages:
@@ -154,6 +171,7 @@ return "\n".join(formatted_messages)
def _format_messages_amazon(self, messages: List[Dict[str, str]]) -> List[Dict[str, Any]]:
+ """Format messages for Amazon models (including Nova)."""
formatted_messages = []
for message in messages:
@@ -171,6 +189,7 @@ return formatted_messages
def _format_messages_meta(self, messages: List[Dict[str, str]]) -> str:
+ """Format messages for Meta models."""
formatted_messages = []
for message in messages:
@@ -181,6 +200,7 @@ return "\n".join(formatted_messages)
def _format_messages_mistral(self, messages: List[Dict[str, str]]) -> List[Dict[str, Any]]:
+ """Format messages for Mistral models."""
formatted_messages = []
for message in messages:
@@ -198,6 +218,7 @@ return formatted_messages
def _format_messages_generic(self, messages: List[Dict[str, str]]) -> str:
+ """Generic message formatting for other providers."""
formatted_messages = []
for message in messages:
@@ -208,6 +229,15 @@ return "\n\nHuman: " + "".join(formatted_messages) + "\n\nAssistant:"
def _prepare_input(self, prompt: str) -> Dict[str, Any]:
+ """
+ Prepare input for the current provider's model.
+
+ Args:
+ prompt: Text prompt to process
+
+ Returns:
+ Prepared input dictionary
+ """
# Base configuration
input_body = {"prompt": prompt}
@@ -283,6 +313,15 @@ return input_body
def _convert_tool_format(self, original_tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
+ """
+ Convert tools to Bedrock-compatible format.
+
+ Args:
+ original_tools: List of tool definitions
+
+ Returns:
+ Converted tools in Bedrock format
+ """
new_tools = []
for tool in original_tools:
@@ -313,6 +352,16 @@ def _parse_response(
self, response: Dict[str, Any], tools: Optional[List[Dict]] = None
) -> Union[str, Dict[str, Any]]:
+ """
+ Parse response from Bedrock API.
+
+ Args:
+ response: Raw API response
+ tools: List of tools if used
+
+ Returns:
+ Parsed response
+ """
if tools:
# Handle tool-enabled responses
processed_response = {"tool_calls": []}
@@ -381,6 +430,20 @@ stream: bool = False,
**kwargs,
) -> Union[str, Dict[str, Any]]:
+ """
+ Generate response using AWS Bedrock.
+
+ Args:
+ messages: List of message dictionaries
+ response_format: Response format specification
+ tools: List of tools for function calling
+ tool_choice: Tool choice method
+ stream: Whether to stream the response
+ **kwargs: Additional parameters
+
+ Returns:
+ Generated response
+ """
try:
if tools and self.supports_tools:
# Use converse method for tool-enabled models
@@ -395,6 +458,7 @@
@staticmethod
def _convert_tools_to_converse_format(tools: List[Dict]) -> List[Dict]:
+ """Convert OpenAI-style tools to Converse API format."""
if not tools:
return []
@@ -416,6 +480,7 @@ return converse_tools
def _generate_with_tools(self, messages: List[Dict[str, str]], tools: List[Dict], stream: bool = False) -> Dict[str, Any]:
+ """Generate response with tool calling support using correct message format."""
# Format messages for tool-enabled models
system_message = None
if self.provider == "anthropic":
@@ -457,6 +522,7 @@ return self._parse_response(response, tools)
def _generate_standard(self, messages: List[Dict[str, str]], stream: bool = False) -> str:
+ """Generate standard text response using Converse API for Anthropic models."""
# For Anthropic models, always use Converse API
if self.provider == "anthropic":
formatted_messages, system_message = self._format_messages_anthropic(messages)
@@ -532,6 +598,7 @@ return self._parse_response(response)
def list_available_models(self) -> List[Dict[str, Any]]:
+ """List all available models in the current region."""
try:
bedrock_client = boto3.client("bedrock", **self.config.get_aws_config())
response = bedrock_client.list_foundation_models()
@@ -561,6 +628,7 @@ return []
def get_model_capabilities(self) -> Dict[str, Any]:
+ """Get capabilities of the current model."""
return {
"model_id": self.config.model,
"provider": self.provider,
@@ -572,6 +640,7 @@ }
def validate_model_access(self) -> bool:
+ """Validate if the model is accessible."""
try:
# Try to invoke the model with a minimal request
if self.provider == "amazon" and "nova" in self.config.model.lower():
@@ -593,4 +662,4 @@ )
return True
except Exception:
- return False+ return False
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/aws_bedrock.py |
Write reusable docstrings | import os
from typing import Any, Dict, List, Optional
from mem0.configs.llms.base import BaseLlmConfig
class AWSBedrockConfig(BaseLlmConfig):
def __init__(
self,
model: Optional[str] = None,
temperature: float = 0.1,
max_tokens: int = 2000,
top_p: float = 0.9,
top_k: int = 1,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_region: str = "",
aws_session_token: Optional[str] = None,
aws_profile: Optional[str] = None,
model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
):
super().__init__(
model=model or "anthropic.claude-3-5-sonnet-20240620-v1:0",
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
**kwargs,
)
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_region = aws_region or os.getenv("AWS_REGION", "us-west-2")
self.aws_session_token = aws_session_token
self.aws_profile = aws_profile
self.model_kwargs = model_kwargs or {}
@property
def provider(self) -> str:
if not self.model or "." not in self.model:
return "unknown"
return self.model.split(".")[0]
@property
def model_name(self) -> str:
if not self.model or "." not in self.model:
return self.model
return ".".join(self.model.split(".")[1:])
def get_model_config(self) -> Dict[str, Any]:
base_config = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"top_k": self.top_k,
}
# Add custom model kwargs
base_config.update(self.model_kwargs)
return base_config
def get_aws_config(self) -> Dict[str, Any]:
config = {
"region_name": self.aws_region,
}
if self.aws_access_key_id:
config["aws_access_key_id"] = self.aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
if self.aws_secret_access_key:
config["aws_secret_access_key"] = self.aws_secret_access_key or os.getenv("AWS_SECRET_ACCESS_KEY")
if self.aws_session_token:
config["aws_session_token"] = self.aws_session_token or os.getenv("AWS_SESSION_TOKEN")
if self.aws_profile:
config["profile_name"] = self.aws_profile or os.getenv("AWS_PROFILE")
return config
def validate_model_format(self) -> bool:
if not self.model:
return False
# Check if model follows provider.model-name format
if "." not in self.model:
return False
provider, model_name = self.model.split(".", 1)
# Validate provider
valid_providers = [
"ai21", "amazon", "anthropic", "cohere", "meta", "mistral",
"stability", "writer", "deepseek", "gpt-oss", "perplexity",
"snowflake", "titan", "command", "j2", "llama"
]
if provider not in valid_providers:
return False
# Validate model name is not empty
if not model_name:
return False
return True
def get_supported_regions(self) -> List[str]:
return [
"us-east-1",
"us-west-2",
"us-east-2",
"eu-west-1",
"ap-southeast-1",
"ap-northeast-1",
]
def get_model_capabilities(self) -> Dict[str, Any]:
capabilities = {
"supports_tools": False,
"supports_vision": False,
"supports_streaming": False,
"supports_multimodal": False,
}
if self.provider == "anthropic":
capabilities.update({
"supports_tools": True,
"supports_vision": True,
"supports_streaming": True,
"supports_multimodal": True,
})
elif self.provider == "amazon":
capabilities.update({
"supports_tools": True,
"supports_vision": True,
"supports_streaming": True,
"supports_multimodal": True,
})
elif self.provider == "cohere":
capabilities.update({
"supports_tools": True,
"supports_streaming": True,
})
elif self.provider == "meta":
capabilities.update({
"supports_vision": True,
"supports_streaming": True,
})
elif self.provider == "mistral":
capabilities.update({
"supports_vision": True,
"supports_streaming": True,
})
return capabilities | --- +++ @@ -5,6 +5,11 @@
class AWSBedrockConfig(BaseLlmConfig):
+ """
+ Configuration class for AWS Bedrock LLM integration.
+
+ Supports all available Bedrock models with automatic provider detection.
+ """
def __init__(
self,
@@ -21,6 +26,23 @@ model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
):
+ """
+ Initialize AWS Bedrock configuration.
+
+ Args:
+ model: Bedrock model identifier (e.g., "amazon.nova-3-mini-20241119-v1:0")
+ temperature: Controls randomness (0.0 to 2.0)
+ max_tokens: Maximum tokens to generate
+ top_p: Nucleus sampling parameter (0.0 to 1.0)
+ top_k: Top-k sampling parameter (1 to 40)
+ aws_access_key_id: AWS access key (optional, uses env vars if not provided)
+ aws_secret_access_key: AWS secret key (optional, uses env vars if not provided)
+ aws_region: AWS region for Bedrock service
+ aws_session_token: AWS session token for temporary credentials
+ aws_profile: AWS profile name for credentials
+ model_kwargs: Additional model-specific parameters
+ **kwargs: Additional arguments passed to base class
+ """
super().__init__(
model=model or "anthropic.claude-3-5-sonnet-20240620-v1:0",
temperature=temperature,
@@ -39,17 +61,20 @@
@property
def provider(self) -> str:
+ """Get the provider from the model identifier."""
if not self.model or "." not in self.model:
return "unknown"
return self.model.split(".")[0]
@property
def model_name(self) -> str:
+ """Get the model name without provider prefix."""
if not self.model or "." not in self.model:
return self.model
return ".".join(self.model.split(".")[1:])
def get_model_config(self) -> Dict[str, Any]:
+ """Get model-specific configuration parameters."""
base_config = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
@@ -63,6 +88,7 @@ return base_config
def get_aws_config(self) -> Dict[str, Any]:
+ """Get AWS configuration parameters."""
config = {
"region_name": self.aws_region,
}
@@ -82,6 +108,12 @@ return config
def validate_model_format(self) -> bool:
+ """
+ Validate that the model identifier follows Bedrock naming convention.
+
+ Returns:
+ True if valid, False otherwise
+ """
if not self.model:
return False
@@ -108,6 +140,7 @@ return True
def get_supported_regions(self) -> List[str]:
+ """Get list of AWS regions that support Bedrock."""
return [
"us-east-1",
"us-west-2",
@@ -118,6 +151,7 @@ ]
def get_model_capabilities(self) -> Dict[str, Any]:
+ """Get model capabilities based on provider."""
capabilities = {
"supports_tools": False,
"supports_vision": False,
@@ -155,4 +189,4 @@ "supports_streaming": True,
})
- return capabilities+ return capabilities
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/aws_bedrock.py |
Write docstrings for backend logic | import json
import logging
import os
from typing import Dict, List, Optional, Union
from openai import OpenAI
from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.openai import OpenAIConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
class OpenAILLM(LLMBase):
def __init__(self, config: Optional[Union[BaseLlmConfig, OpenAIConfig, Dict]] = None):
# Convert to OpenAIConfig if needed
if config is None:
config = OpenAIConfig()
elif isinstance(config, dict):
config = OpenAIConfig(**config)
elif isinstance(config, BaseLlmConfig) and not isinstance(config, OpenAIConfig):
# Convert BaseLlmConfig to OpenAIConfig
config = OpenAIConfig(
model=config.model,
temperature=config.temperature,
api_key=config.api_key,
max_tokens=config.max_tokens,
top_p=config.top_p,
top_k=config.top_k,
enable_vision=config.enable_vision,
vision_details=config.vision_details,
http_client_proxies=config.http_client,
)
super().__init__(config)
if not self.config.model:
self.config.model = "gpt-4.1-nano-2025-04-14"
if os.environ.get("OPENROUTER_API_KEY"): # Use OpenRouter
self.client = OpenAI(
api_key=os.environ.get("OPENROUTER_API_KEY"),
base_url=self.config.openrouter_base_url
or os.getenv("OPENROUTER_API_BASE")
or "https://openrouter.ai/api/v1",
)
else:
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
base_url = self.config.openai_base_url or os.getenv("OPENAI_BASE_URL") or "https://api.openai.com/v1"
self.client = OpenAI(api_key=api_key, base_url=base_url)
def _parse_response(self, response, tools):
if tools:
processed_response = {
"content": response.choices[0].message.content,
"tool_calls": [],
}
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
processed_response["tool_calls"].append(
{
"name": tool_call.function.name,
"arguments": json.loads(extract_json(tool_call.function.arguments)),
}
)
return processed_response
else:
return response.choices[0].message.content
def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
params = self._get_supported_params(messages=messages, **kwargs)
params.update({
"model": self.config.model,
"messages": messages,
})
if os.getenv("OPENROUTER_API_KEY"):
openrouter_params = {}
if self.config.models:
openrouter_params["models"] = self.config.models
openrouter_params["route"] = self.config.route
params.pop("model")
if self.config.site_url and self.config.app_name:
extra_headers = {
"HTTP-Referer": self.config.site_url,
"X-Title": self.config.app_name,
}
openrouter_params["extra_headers"] = extra_headers
params.update(**openrouter_params)
else:
openai_specific_generation_params = ["store"]
for param in openai_specific_generation_params:
if hasattr(self.config, param):
params[param] = getattr(self.config, param)
if response_format:
params["response_format"] = response_format
if tools: # TODO: Remove tools if no issues found with new memory addition logic
params["tools"] = tools
params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
parsed_response = self._parse_response(response, tools)
if self.config.response_callback:
try:
self.config.response_callback(self, response, params)
except Exception as e:
# Log error but don't propagate
logging.error(f"Error due to callback: {e}")
pass
return parsed_response | --- +++ @@ -51,6 +51,16 @@ self.client = OpenAI(api_key=api_key, base_url=base_url)
def _parse_response(self, response, tools):
+ """
+ Process the response based on whether tools are used or not.
+
+ Args:
+ response: The raw response from API.
+ tools: The list of tools provided in the request.
+
+ Returns:
+ str or dict: The processed response.
+ """
if tools:
processed_response = {
"content": response.choices[0].message.content,
@@ -78,6 +88,19 @@ tool_choice: str = "auto",
**kwargs,
):
+ """
+ Generate a JSON response based on the given messages using OpenAI.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ response_format (str or object, optional): Format of the response. Defaults to "text".
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+ **kwargs: Additional OpenAI-specific parameters.
+
+ Returns:
+ json: The generated response.
+ """
params = self._get_supported_params(messages=messages, **kwargs)
params.update({
@@ -121,4 +144,4 @@ # Log error but don't propagate
logging.error(f"Error due to callback: {e}")
pass
- return parsed_response+ return parsed_response
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/openai.py |
Add docstrings with type hints explained | from abc import ABC
from typing import Dict, Optional, Union
import httpx
class BaseLlmConfig(ABC):
def __init__(
self,
model: Optional[Union[str, Dict]] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[Union[Dict, str]] = None,
):
self.model = model
self.temperature = temperature
self.api_key = api_key
self.max_tokens = max_tokens
self.top_p = top_p
self.top_k = top_k
self.enable_vision = enable_vision
self.vision_details = vision_details
self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None | --- +++ @@ -5,6 +5,13 @@
class BaseLlmConfig(ABC):
+ """
+ Base configuration for LLMs with only common parameters.
+ Provider-specific configurations should be handled by separate config classes.
+
+ This class contains only the parameters that are common across all LLM providers.
+ For provider-specific parameters, use the appropriate provider config class.
+ """
def __init__(
self,
@@ -18,6 +25,32 @@ vision_details: Optional[str] = "auto",
http_client_proxies: Optional[Union[Dict, str]] = None,
):
+ """
+ Initialize a base configuration class instance for the LLM.
+
+ Args:
+ model: The model identifier to use (e.g., "gpt-4.1-nano-2025-04-14", "claude-3-5-sonnet-20240620")
+ Defaults to None (will be set by provider-specific configs)
+ temperature: Controls the randomness of the model's output.
+ Higher values (closer to 1) make output more random, lower values make it more deterministic.
+ Range: 0.0 to 2.0. Defaults to 0.1
+ api_key: API key for the LLM provider. If None, will try to get from environment variables.
+ Defaults to None
+ max_tokens: Maximum number of tokens to generate in the response.
+ Range: 1 to 4096 (varies by model). Defaults to 2000
+ top_p: Nucleus sampling parameter. Controls diversity via nucleus sampling.
+ Higher values (closer to 1) make word selection more diverse.
+ Range: 0.0 to 1.0. Defaults to 0.1
+ top_k: Top-k sampling parameter. Limits the number of tokens considered for each step.
+ Higher values make word selection more diverse.
+ Range: 1 to 40. Defaults to 1
+ enable_vision: Whether to enable vision capabilities for the model.
+ Only applicable to vision-enabled models. Defaults to False
+ vision_details: Level of detail for vision processing.
+ Options: "low", "high", "auto". Defaults to "auto"
+ http_client_proxies: Proxy settings for HTTP client.
+ Can be a dict or string. Defaults to None
+ """
self.model = model
self.temperature = temperature
self.api_key = api_key
@@ -26,4 +59,4 @@ self.top_k = top_k
self.enable_vision = enable_vision
self.vision_details = vision_details
- self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None+ self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/configs/llms/base.py |
Write beginner-friendly docstrings | import json
import os
from typing import Dict, List, Optional
try:
from together import Together
except ImportError:
raise ImportError("The 'together' library is required. Please install it using 'pip install together'.")
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
class TogetherLLM(LLMBase):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config)
if not self.config.model:
self.config.model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
api_key = self.config.api_key or os.getenv("TOGETHER_API_KEY")
self.client = Together(api_key=api_key)
def _parse_response(self, response, tools):
if tools:
processed_response = {
"content": response.choices[0].message.content,
"tool_calls": [],
}
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
processed_response["tool_calls"].append(
{
"name": tool_call.function.name,
"arguments": json.loads(extract_json(tool_call.function.arguments)),
}
)
return processed_response
else:
return response.choices[0].message.content
def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
):
params = {
"model": self.config.model,
"messages": messages,
"temperature": self.config.temperature,
"max_tokens": self.config.max_tokens,
"top_p": self.config.top_p,
}
if response_format:
params["response_format"] = response_format
if tools: # TODO: Remove tools if no issues found with new memory addition logic
params["tools"] = tools
params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
return self._parse_response(response, tools) | --- +++ @@ -23,6 +23,16 @@ self.client = Together(api_key=api_key)
def _parse_response(self, response, tools):
+ """
+ Process the response based on whether tools are used or not.
+
+ Args:
+ response: The raw response from API.
+ tools: The list of tools provided in the request.
+
+ Returns:
+ str or dict: The processed response.
+ """
if tools:
processed_response = {
"content": response.choices[0].message.content,
@@ -49,6 +59,18 @@ tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
):
+ """
+ Generate a response based on the given messages using TogetherAI.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ response_format (str or object, optional): Format of the response. Defaults to "text".
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+
+ Returns:
+ str: The generated response.
+ """
params = {
"model": self.config.model,
"messages": messages,
@@ -63,4 +85,4 @@ params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
- return self._parse_response(response, tools)+ return self._parse_response(response, tools)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/together.py |
Add documentation for all methods | from typing import List, Dict, Any, Union
import numpy as np
from mem0.reranker.base import BaseReranker
from mem0.configs.rerankers.base import BaseRerankerConfig
from mem0.configs.rerankers.huggingface import HuggingFaceRerankerConfig
try:
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
class HuggingFaceReranker(BaseReranker):
def __init__(self, config: Union[BaseRerankerConfig, HuggingFaceRerankerConfig, Dict]):
if not TRANSFORMERS_AVAILABLE:
raise ImportError("transformers package is required for HuggingFaceReranker. Install with: pip install transformers torch")
# Convert to HuggingFaceRerankerConfig if needed
if isinstance(config, dict):
config = HuggingFaceRerankerConfig(**config)
elif isinstance(config, BaseRerankerConfig) and not isinstance(config, HuggingFaceRerankerConfig):
# Convert BaseRerankerConfig to HuggingFaceRerankerConfig with defaults
config = HuggingFaceRerankerConfig(
provider=getattr(config, 'provider', 'huggingface'),
model=getattr(config, 'model', 'BAAI/bge-reranker-base'),
api_key=getattr(config, 'api_key', None),
top_k=getattr(config, 'top_k', None),
device=None, # Will auto-detect
batch_size=32, # Default
max_length=512, # Default
normalize=True, # Default
)
self.config = config
# Set device
if self.config.device is None:
self.device = "cuda" if torch.cuda.is_available() else "cpu"
else:
self.device = self.config.device
# Load model and tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.config.model)
self.model = AutoModelForSequenceClassification.from_pretrained(self.config.model)
self.model.to(self.device)
self.model.eval()
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
if not documents:
return documents
# Extract text content for reranking
doc_texts = []
for doc in documents:
if 'memory' in doc:
doc_texts.append(doc['memory'])
elif 'text' in doc:
doc_texts.append(doc['text'])
elif 'content' in doc:
doc_texts.append(doc['content'])
else:
doc_texts.append(str(doc))
try:
scores = []
# Process documents in batches
for i in range(0, len(doc_texts), self.config.batch_size):
batch_docs = doc_texts[i:i + self.config.batch_size]
batch_pairs = [[query, doc] for doc in batch_docs]
# Tokenize batch
inputs = self.tokenizer(
batch_pairs,
padding=True,
truncation=True,
max_length=self.config.max_length,
return_tensors="pt"
).to(self.device)
# Get scores
with torch.no_grad():
outputs = self.model(**inputs)
batch_scores = outputs.logits.squeeze(-1).cpu().numpy()
# Handle single item case
if batch_scores.ndim == 0:
batch_scores = [float(batch_scores)]
else:
batch_scores = batch_scores.tolist()
scores.extend(batch_scores)
# Normalize scores if requested
if self.config.normalize:
scores = np.array(scores)
scores = (scores - scores.min()) / (scores.max() - scores.min() + 1e-8)
scores = scores.tolist()
# Combine documents with scores
doc_score_pairs = list(zip(documents, scores))
# Sort by score (descending)
doc_score_pairs.sort(key=lambda x: x[1], reverse=True)
# Apply top_k limit
final_top_k = top_k or self.config.top_k
if final_top_k:
doc_score_pairs = doc_score_pairs[:final_top_k]
# Create reranked results
reranked_docs = []
for doc, score in doc_score_pairs:
reranked_doc = doc.copy()
reranked_doc['rerank_score'] = float(score)
reranked_docs.append(reranked_doc)
return reranked_docs
except Exception:
# Fallback to original order if reranking fails
for doc in documents:
doc['rerank_score'] = 0.0
final_top_k = top_k or self.config.top_k
return documents[:final_top_k] if final_top_k else documents | --- +++ @@ -14,8 +14,15 @@
class HuggingFaceReranker(BaseReranker):
+ """HuggingFace Transformers based reranker implementation."""
def __init__(self, config: Union[BaseRerankerConfig, HuggingFaceRerankerConfig, Dict]):
+ """
+ Initialize HuggingFace reranker.
+
+ Args:
+ config: Configuration object with reranker parameters
+ """
if not TRANSFORMERS_AVAILABLE:
raise ImportError("transformers package is required for HuggingFaceReranker. Install with: pip install transformers torch")
@@ -50,6 +57,17 @@ self.model.eval()
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
+ """
+ Rerank documents using HuggingFace cross-encoder model.
+
+ Args:
+ query: The search query
+ documents: List of documents to rerank
+ top_k: Number of top documents to return
+
+ Returns:
+ List of reranked documents with rerank_score
+ """
if not documents:
return documents
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/reranker/huggingface_reranker.py |
Write reusable docstrings | import re
from typing import List, Dict, Any, Union
from mem0.reranker.base import BaseReranker
from mem0.utils.factory import LlmFactory
from mem0.configs.rerankers.base import BaseRerankerConfig
from mem0.configs.rerankers.llm import LLMRerankerConfig
class LLMReranker(BaseReranker):
def __init__(self, config: Union[BaseRerankerConfig, LLMRerankerConfig, Dict]):
# Convert to LLMRerankerConfig if needed
if isinstance(config, dict):
config = LLMRerankerConfig(**config)
elif isinstance(config, BaseRerankerConfig) and not isinstance(config, LLMRerankerConfig):
# Convert BaseRerankerConfig to LLMRerankerConfig with defaults
config = LLMRerankerConfig(
provider=getattr(config, 'provider', 'openai'),
model=getattr(config, 'model', 'gpt-4o-mini'),
api_key=getattr(config, 'api_key', None),
top_k=getattr(config, 'top_k', None),
temperature=0.0, # Default for reranking
max_tokens=100, # Default for reranking
)
self.config = config
# Create LLM configuration for the factory
llm_config = {
"model": self.config.model,
"temperature": self.config.temperature,
"max_tokens": self.config.max_tokens,
}
# Add API key if provided
if self.config.api_key:
llm_config["api_key"] = self.config.api_key
# Initialize LLM using the factory
self.llm = LlmFactory.create(self.config.provider, llm_config)
# Default scoring prompt
self.scoring_prompt = getattr(self.config, 'scoring_prompt', None) or self._get_default_prompt()
def _get_default_prompt(self) -> str:
return """You are a relevance scoring assistant. Given a query and a document, you need to score how relevant the document is to the query.
Score the relevance on a scale from 0.0 to 1.0, where:
- 1.0 = Perfectly relevant and directly answers the query
- 0.8-0.9 = Highly relevant with good information
- 0.6-0.7 = Moderately relevant with some useful information
- 0.4-0.5 = Slightly relevant with limited useful information
- 0.0-0.3 = Not relevant or no useful information
Query: "{query}"
Document: "{document}"
Provide only a single numerical score between 0.0 and 1.0. Do not include any explanation or additional text."""
def _extract_score(self, response_text: str) -> float:
# Look for decimal numbers between 0.0 and 1.0
pattern = r'\b([01](?:\.\d+)?)\b'
matches = re.findall(pattern, response_text)
if matches:
score = float(matches[0])
return min(max(score, 0.0), 1.0) # Clamp between 0.0 and 1.0
# Fallback: return 0.5 if no valid score found
return 0.5
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
if not documents:
return documents
scored_docs = []
for doc in documents:
# Extract text content
if 'memory' in doc:
doc_text = doc['memory']
elif 'text' in doc:
doc_text = doc['text']
elif 'content' in doc:
doc_text = doc['content']
else:
doc_text = str(doc)
try:
# Generate scoring prompt
prompt = self.scoring_prompt.format(query=query, document=doc_text)
# Get LLM response
response = self.llm.generate_response(
messages=[{"role": "user", "content": prompt}]
)
# Extract score from response
score = self._extract_score(response)
# Create scored document
scored_doc = doc.copy()
scored_doc['rerank_score'] = score
scored_docs.append(scored_doc)
except Exception:
# Fallback: assign neutral score if scoring fails
scored_doc = doc.copy()
scored_doc['rerank_score'] = 0.5
scored_docs.append(scored_doc)
# Sort by relevance score in descending order
scored_docs.sort(key=lambda x: x['rerank_score'], reverse=True)
# Apply top_k limit
if top_k:
scored_docs = scored_docs[:top_k]
elif self.config.top_k:
scored_docs = scored_docs[:self.config.top_k]
return scored_docs | --- +++ @@ -8,8 +8,15 @@
class LLMReranker(BaseReranker):
+ """LLM-based reranker implementation."""
def __init__(self, config: Union[BaseRerankerConfig, LLMRerankerConfig, Dict]):
+ """
+ Initialize LLM reranker.
+
+ Args:
+ config: Configuration object with reranker parameters
+ """
# Convert to LLMRerankerConfig if needed
if isinstance(config, dict):
config = LLMRerankerConfig(**config)
@@ -44,6 +51,7 @@ self.scoring_prompt = getattr(self.config, 'scoring_prompt', None) or self._get_default_prompt()
def _get_default_prompt(self) -> str:
+ """Get the default scoring prompt template."""
return """You are a relevance scoring assistant. Given a query and a document, you need to score how relevant the document is to the query.
Score the relevance on a scale from 0.0 to 1.0, where:
@@ -59,6 +67,7 @@ Provide only a single numerical score between 0.0 and 1.0. Do not include any explanation or additional text."""
def _extract_score(self, response_text: str) -> float:
+ """Extract numerical score from LLM response."""
# Look for decimal numbers between 0.0 and 1.0
pattern = r'\b([01](?:\.\d+)?)\b'
matches = re.findall(pattern, response_text)
@@ -71,6 +80,17 @@ return 0.5
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
+ """
+ Rerank documents using LLM scoring.
+
+ Args:
+ query: The search query
+ documents: List of documents to rerank
+ top_k: Number of top documents to return
+
+ Returns:
+ List of reranked documents with rerank_score
+ """
if not documents:
return documents
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/reranker/llm_reranker.py |
Help me document legacy Python code | import hashlib
import logging
import re
from mem0.configs.prompts import (
AGENT_MEMORY_EXTRACTION_PROMPT,
FACT_RETRIEVAL_PROMPT,
USER_MEMORY_EXTRACTION_PROMPT,
)
logger = logging.getLogger(__name__)
def get_fact_retrieval_messages(message, is_agent_memory=False):
if is_agent_memory:
return AGENT_MEMORY_EXTRACTION_PROMPT, f"Input:\n{message}"
else:
return USER_MEMORY_EXTRACTION_PROMPT, f"Input:\n{message}"
def get_fact_retrieval_messages_legacy(message):
return FACT_RETRIEVAL_PROMPT, f"Input:\n{message}"
def ensure_json_instruction(system_prompt, user_prompt):
combined = (system_prompt + user_prompt).lower()
if "json" not in combined:
system_prompt += (
"\n\nYou must return your response in valid JSON format "
"with a 'facts' key containing an array of strings."
)
return system_prompt, user_prompt
def parse_messages(messages):
response = ""
for msg in messages:
if msg["role"] == "system":
response += f"system: {msg['content']}\n"
if msg["role"] == "user":
response += f"user: {msg['content']}\n"
if msg["role"] == "assistant":
response += f"assistant: {msg['content']}\n"
return response
def format_entities(entities):
if not entities:
return ""
formatted_lines = []
for entity in entities:
simplified = f"{entity['source']} -- {entity['relationship']} -- {entity['destination']}"
formatted_lines.append(simplified)
return "\n".join(formatted_lines)
def normalize_facts(raw_facts):
if not raw_facts:
return []
normalized = []
for item in raw_facts:
if isinstance(item, str):
fact = item
elif isinstance(item, dict):
fact = item.get("fact") or item.get("text")
if fact is None:
logger.warning("Unexpected fact shape from LLM, skipping: %s", item)
continue
else:
fact = str(item)
if fact:
normalized.append(fact)
return normalized
def remove_code_blocks(content: str) -> str:
pattern = r"^```[a-zA-Z0-9]*\n([\s\S]*?)\n```$"
match = re.match(pattern, content.strip())
match_res=match.group(1).strip() if match else content.strip()
return re.sub(r"<think>.*?</think>", "", match_res, flags=re.DOTALL).strip()
def extract_json(text):
text = text.strip()
match = re.search(r"```(?:json)?\s*(.*?)\s*```", text, re.DOTALL)
if match:
json_str = match.group(1)
else:
json_str = text # assume it's raw JSON
return json_str
def get_image_description(image_obj, llm, vision_details):
if isinstance(image_obj, str):
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": "A user is providing an image. Provide a high level description of the image and do not include any additional text.",
},
{"type": "image_url", "image_url": {"url": image_obj, "detail": vision_details}},
],
},
]
else:
messages = [image_obj]
response = llm.generate_response(messages=messages)
return response
def parse_vision_messages(messages, llm=None, vision_details="auto"):
returned_messages = []
for msg in messages:
if msg["role"] == "system":
returned_messages.append(msg)
continue
# Handle message content
if isinstance(msg["content"], list):
# Multiple image URLs in content
description = get_image_description(msg, llm, vision_details)
returned_messages.append({"role": msg["role"], "content": description})
elif isinstance(msg["content"], dict) and msg["content"].get("type") == "image_url":
# Single image content
image_url = msg["content"]["image_url"]["url"]
try:
description = get_image_description(image_url, llm, vision_details)
returned_messages.append({"role": msg["role"], "content": description})
except Exception:
raise Exception(f"Error while downloading {image_url}.")
else:
# Regular text content
returned_messages.append(msg)
return returned_messages
def process_telemetry_filters(filters):
if filters is None:
return {}
encoded_ids = {}
if "user_id" in filters:
encoded_ids["user_id"] = hashlib.md5(filters["user_id"].encode()).hexdigest()
if "agent_id" in filters:
encoded_ids["agent_id"] = hashlib.md5(filters["agent_id"].encode()).hexdigest()
if "run_id" in filters:
encoded_ids["run_id"] = hashlib.md5(filters["run_id"].encode()).hexdigest()
return list(filters.keys()), encoded_ids
def sanitize_relationship_for_cypher(relationship) -> str:
char_map = {
"...": "_ellipsis_",
"…": "_ellipsis_",
"。": "_period_",
",": "_comma_",
";": "_semicolon_",
":": "_colon_",
"!": "_exclamation_",
"?": "_question_",
"(": "_lparen_",
")": "_rparen_",
"【": "_lbracket_",
"】": "_rbracket_",
"《": "_langle_",
"》": "_rangle_",
"'": "_apostrophe_",
'"': "_quote_",
"\\": "_backslash_",
"/": "_slash_",
"|": "_pipe_",
"&": "_ampersand_",
"=": "_equals_",
"+": "_plus_",
"*": "_asterisk_",
"^": "_caret_",
"%": "_percent_",
"$": "_dollar_",
"#": "_hash_",
"@": "_at_",
"!": "_bang_",
"?": "_question_",
"(": "_lparen_",
")": "_rparen_",
"[": "_lbracket_",
"]": "_rbracket_",
"{": "_lbrace_",
"}": "_rbrace_",
"<": "_langle_",
">": "_rangle_",
}
# Apply replacements and clean up
sanitized = relationship
for old, new in char_map.items():
sanitized = sanitized.replace(old, new)
return re.sub(r"_+", "_", sanitized).strip("_")
| --- +++ @@ -12,6 +12,15 @@
def get_fact_retrieval_messages(message, is_agent_memory=False):
+ """Get fact retrieval messages based on the memory type.
+
+ Args:
+ message: The message content to extract facts from
+ is_agent_memory: If True, use agent memory extraction prompt, else use user memory extraction prompt
+
+ Returns:
+ tuple: (system_prompt, user_prompt)
+ """
if is_agent_memory:
return AGENT_MEMORY_EXTRACTION_PROMPT, f"Input:\n{message}"
else:
@@ -19,10 +28,26 @@
def get_fact_retrieval_messages_legacy(message):
+ """Legacy function for backward compatibility."""
return FACT_RETRIEVAL_PROMPT, f"Input:\n{message}"
def ensure_json_instruction(system_prompt, user_prompt):
+ """Ensure the word 'json' appears in the prompts when using json_object response format.
+
+ OpenAI's API requires the word 'json' to appear in the messages when
+ response_format is set to {"type": "json_object"}. When users provide a
+ custom_fact_extraction_prompt that doesn't include 'json', this causes a
+ 400 error. This function appends a JSON format instruction to the system
+ prompt if 'json' is not already present in either prompt.
+
+ Args:
+ system_prompt: The system prompt string
+ user_prompt: The user prompt string
+
+ Returns:
+ tuple: (system_prompt, user_prompt) with JSON instruction added if needed
+ """
combined = (system_prompt + user_prompt).lower()
if "json" not in combined:
system_prompt += (
@@ -56,6 +81,12 @@ return "\n".join(formatted_lines)
def normalize_facts(raw_facts):
+ """Normalize LLM-extracted facts to a list of strings.
+
+ Smaller LLMs (e.g. llama3.1:8b) sometimes return facts as objects
+ like {"fact": "..."} or {"text": "..."} instead of plain strings.
+ This mirrors the TypeScript FactRetrievalSchema validation.
+ """
if not raw_facts:
return []
normalized = []
@@ -75,6 +106,14 @@
def remove_code_blocks(content: str) -> str:
+ """
+ Removes enclosing code block markers ```[language] and ``` from a given string.
+
+ Remarks:
+ - The function uses a regex pattern to match code blocks that may start with ``` followed by an optional language tag (letters or numbers) and end with ```.
+ - If a code block is detected, it returns only the inner content, stripping out the markers.
+ - If no code block markers are found, the original content is returned as-is.
+ """
pattern = r"^```[a-zA-Z0-9]*\n([\s\S]*?)\n```$"
match = re.match(pattern, content.strip())
match_res=match.group(1).strip() if match else content.strip()
@@ -83,6 +122,10 @@
def extract_json(text):
+ """
+ Extracts JSON content from a string, removing enclosing triple backticks and optional 'json' tag if present.
+ If no code block is found, returns the text as-is.
+ """
text = text.strip()
match = re.search(r"```(?:json)?\s*(.*?)\s*```", text, re.DOTALL)
if match:
@@ -93,6 +136,9 @@
def get_image_description(image_obj, llm, vision_details):
+ """
+ Get the description of the image
+ """
if isinstance(image_obj, str):
messages = [
@@ -115,6 +161,9 @@
def parse_vision_messages(messages, llm=None, vision_details="auto"):
+ """
+ Parse the vision messages from the messages
+ """
returned_messages = []
for msg in messages:
if msg["role"] == "system":
@@ -142,6 +191,9 @@
def process_telemetry_filters(filters):
+ """
+ Process the telemetry filters
+ """
if filters is None:
return {}
@@ -157,6 +209,7 @@
def sanitize_relationship_for_cypher(relationship) -> str:
+ """Sanitize relationship text for Cypher queries by replacing problematic characters."""
char_map = {
"...": "_ellipsis_",
"…": "_ellipsis_",
@@ -204,3 +257,4 @@ sanitized = sanitized.replace(old, new)
return re.sub(r"_+", "_", sanitized).strip("_")
+
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/memory/utils.py |
Write docstrings for this repository | import os
from typing import List, Dict, Any
from mem0.reranker.base import BaseReranker
try:
import cohere
COHERE_AVAILABLE = True
except ImportError:
COHERE_AVAILABLE = False
class CohereReranker(BaseReranker):
def __init__(self, config):
if not COHERE_AVAILABLE:
raise ImportError("cohere package is required for CohereReranker. Install with: pip install cohere")
self.config = config
self.api_key = config.api_key or os.getenv("COHERE_API_KEY")
if not self.api_key:
raise ValueError("Cohere API key is required. Set COHERE_API_KEY environment variable or pass api_key in config.")
self.model = config.model
self.client = cohere.Client(self.api_key)
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
if not documents:
return documents
# Extract text content for reranking
doc_texts = []
for doc in documents:
if 'memory' in doc:
doc_texts.append(doc['memory'])
elif 'text' in doc:
doc_texts.append(doc['text'])
elif 'content' in doc:
doc_texts.append(doc['content'])
else:
doc_texts.append(str(doc))
try:
# Call Cohere rerank API
response = self.client.rerank(
model=self.model,
query=query,
documents=doc_texts,
top_n=top_k or self.config.top_k or len(documents),
return_documents=self.config.return_documents,
max_chunks_per_doc=self.config.max_chunks_per_doc,
)
# Create reranked results
reranked_docs = []
for result in response.results:
original_doc = documents[result.index].copy()
original_doc['rerank_score'] = result.relevance_score
reranked_docs.append(original_doc)
return reranked_docs
except Exception:
# Fallback to original order if reranking fails
for doc in documents:
doc['rerank_score'] = 0.0
return documents[:top_k] if top_k else documents | --- +++ @@ -11,8 +11,15 @@
class CohereReranker(BaseReranker):
+ """Cohere-based reranker implementation."""
def __init__(self, config):
+ """
+ Initialize Cohere reranker.
+
+ Args:
+ config: CohereRerankerConfig object with configuration parameters
+ """
if not COHERE_AVAILABLE:
raise ImportError("cohere package is required for CohereReranker. Install with: pip install cohere")
@@ -25,6 +32,17 @@ self.client = cohere.Client(self.api_key)
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
+ """
+ Rerank documents using Cohere's rerank API.
+
+ Args:
+ query: The search query
+ documents: List of documents to rerank
+ top_k: Number of top documents to return
+
+ Returns:
+ List of reranked documents with rerank_score
+ """
if not documents:
return documents
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/reranker/cohere_reranker.py |
Add docstrings including usage examples | import logging
import sqlite3
import threading
import uuid
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
class SQLiteManager:
def __init__(self, db_path: str = ":memory:"):
self.db_path = db_path
self.connection = sqlite3.connect(self.db_path, check_same_thread=False)
self._lock = threading.Lock()
self._migrate_history_table()
self._create_history_table()
def _migrate_history_table(self) -> None:
with self._lock:
try:
# Start a transaction
self.connection.execute("BEGIN")
cur = self.connection.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='history'")
if cur.fetchone() is None:
self.connection.execute("COMMIT")
return # nothing to migrate
cur.execute("PRAGMA table_info(history)")
old_cols = {row[1] for row in cur.fetchall()}
expected_cols = {
"id",
"memory_id",
"old_memory",
"new_memory",
"event",
"created_at",
"updated_at",
"is_deleted",
"actor_id",
"role",
}
if old_cols == expected_cols:
self.connection.execute("COMMIT")
return
logger.info("Migrating history table to new schema (no convo columns).")
# Clean up any existing history_old table from previous failed migration
cur.execute("DROP TABLE IF EXISTS history_old")
# Rename the current history table
cur.execute("ALTER TABLE history RENAME TO history_old")
# Create the new history table with updated schema
cur.execute(
"""
CREATE TABLE history (
id TEXT PRIMARY KEY,
memory_id TEXT,
old_memory TEXT,
new_memory TEXT,
event TEXT,
created_at DATETIME,
updated_at DATETIME,
is_deleted INTEGER,
actor_id TEXT,
role TEXT
)
"""
)
# Copy data from old table to new table
intersecting = list(expected_cols & old_cols)
if intersecting:
cols_csv = ", ".join(intersecting)
cur.execute(f"INSERT INTO history ({cols_csv}) SELECT {cols_csv} FROM history_old")
# Drop the old table
cur.execute("DROP TABLE history_old")
# Commit the transaction
self.connection.execute("COMMIT")
logger.info("History table migration completed successfully.")
except Exception as e:
# Rollback the transaction on any error
self.connection.execute("ROLLBACK")
logger.error(f"History table migration failed: {e}")
raise
def _create_history_table(self) -> None:
with self._lock:
try:
self.connection.execute("BEGIN")
self.connection.execute(
"""
CREATE TABLE IF NOT EXISTS history (
id TEXT PRIMARY KEY,
memory_id TEXT,
old_memory TEXT,
new_memory TEXT,
event TEXT,
created_at DATETIME,
updated_at DATETIME,
is_deleted INTEGER,
actor_id TEXT,
role TEXT
)
"""
)
self.connection.execute("COMMIT")
except Exception as e:
self.connection.execute("ROLLBACK")
logger.error(f"Failed to create history table: {e}")
raise
def add_history(
self,
memory_id: str,
old_memory: Optional[str],
new_memory: Optional[str],
event: str,
*,
created_at: Optional[str] = None,
updated_at: Optional[str] = None,
is_deleted: int = 0,
actor_id: Optional[str] = None,
role: Optional[str] = None,
) -> None:
with self._lock:
try:
self.connection.execute("BEGIN")
self.connection.execute(
"""
INSERT INTO history (
id, memory_id, old_memory, new_memory, event,
created_at, updated_at, is_deleted, actor_id, role
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
str(uuid.uuid4()),
memory_id,
old_memory,
new_memory,
event,
created_at,
updated_at,
is_deleted,
actor_id,
role,
),
)
self.connection.execute("COMMIT")
except Exception as e:
self.connection.execute("ROLLBACK")
logger.error(f"Failed to add history record: {e}")
raise
def get_history(self, memory_id: str) -> List[Dict[str, Any]]:
with self._lock:
cur = self.connection.execute(
"""
SELECT id, memory_id, old_memory, new_memory, event,
created_at, updated_at, is_deleted, actor_id, role
FROM history
WHERE memory_id = ?
ORDER BY created_at ASC, DATETIME(updated_at) ASC
""",
(memory_id,),
)
rows = cur.fetchall()
return [
{
"id": r[0],
"memory_id": r[1],
"old_memory": r[2],
"new_memory": r[3],
"event": r[4],
"created_at": r[5],
"updated_at": r[6],
"is_deleted": bool(r[7]),
"actor_id": r[8],
"role": r[9],
}
for r in rows
]
def reset(self) -> None:
with self._lock:
try:
self.connection.execute("BEGIN")
self.connection.execute("DROP TABLE IF EXISTS history")
self.connection.execute("COMMIT")
self._create_history_table()
except Exception as e:
self.connection.execute("ROLLBACK")
logger.error(f"Failed to reset history table: {e}")
raise
def close(self) -> None:
if self.connection:
self.connection.close()
self.connection = None
def __del__(self):
self.close() | --- +++ @@ -16,6 +16,11 @@ self._create_history_table()
def _migrate_history_table(self) -> None:
+ """
+ If a pre-existing history table had the old group-chat columns,
+ rename it, create the new schema, copy the intersecting data, then
+ drop the old table.
+ """
with self._lock:
try:
# Start a transaction
@@ -192,6 +197,7 @@ ]
def reset(self) -> None:
+ """Drop and recreate the history table."""
with self._lock:
try:
self.connection.execute("BEGIN")
@@ -209,4 +215,4 @@ self.connection = None
def __del__(self):
- self.close()+ self.close()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/memory/storage.py |
Generate consistent documentation across files | from typing import List, Dict, Any, Union
import numpy as np
from mem0.reranker.base import BaseReranker
from mem0.configs.rerankers.base import BaseRerankerConfig
from mem0.configs.rerankers.sentence_transformer import SentenceTransformerRerankerConfig
try:
from sentence_transformers import SentenceTransformer
SENTENCE_TRANSFORMERS_AVAILABLE = True
except ImportError:
SENTENCE_TRANSFORMERS_AVAILABLE = False
class SentenceTransformerReranker(BaseReranker):
def __init__(self, config: Union[BaseRerankerConfig, SentenceTransformerRerankerConfig, Dict]):
if not SENTENCE_TRANSFORMERS_AVAILABLE:
raise ImportError("sentence-transformers package is required for SentenceTransformerReranker. Install with: pip install sentence-transformers")
# Convert to SentenceTransformerRerankerConfig if needed
if isinstance(config, dict):
config = SentenceTransformerRerankerConfig(**config)
elif isinstance(config, BaseRerankerConfig) and not isinstance(config, SentenceTransformerRerankerConfig):
# Convert BaseRerankerConfig to SentenceTransformerRerankerConfig with defaults
config = SentenceTransformerRerankerConfig(
provider=getattr(config, 'provider', 'sentence_transformer'),
model=getattr(config, 'model', 'cross-encoder/ms-marco-MiniLM-L-6-v2'),
api_key=getattr(config, 'api_key', None),
top_k=getattr(config, 'top_k', None),
device=None, # Will auto-detect
batch_size=32, # Default
show_progress_bar=False, # Default
)
self.config = config
self.model = SentenceTransformer(self.config.model, device=self.config.device)
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
if not documents:
return documents
# Extract text content for reranking
doc_texts = []
for doc in documents:
if 'memory' in doc:
doc_texts.append(doc['memory'])
elif 'text' in doc:
doc_texts.append(doc['text'])
elif 'content' in doc:
doc_texts.append(doc['content'])
else:
doc_texts.append(str(doc))
try:
# Create query-document pairs
pairs = [[query, doc_text] for doc_text in doc_texts]
# Get similarity scores
scores = self.model.predict(pairs)
if isinstance(scores, np.ndarray):
scores = scores.tolist()
# Combine documents with scores
doc_score_pairs = list(zip(documents, scores))
# Sort by score (descending)
doc_score_pairs.sort(key=lambda x: x[1], reverse=True)
# Apply top_k limit
final_top_k = top_k or self.config.top_k
if final_top_k:
doc_score_pairs = doc_score_pairs[:final_top_k]
# Create reranked results
reranked_docs = []
for doc, score in doc_score_pairs:
reranked_doc = doc.copy()
reranked_doc['rerank_score'] = float(score)
reranked_docs.append(reranked_doc)
return reranked_docs
except Exception:
# Fallback to original order if reranking fails
for doc in documents:
doc['rerank_score'] = 0.0
final_top_k = top_k or self.config.top_k
return documents[:final_top_k] if final_top_k else documents | --- +++ @@ -13,8 +13,15 @@
class SentenceTransformerReranker(BaseReranker):
+ """Sentence Transformer based reranker implementation."""
def __init__(self, config: Union[BaseRerankerConfig, SentenceTransformerRerankerConfig, Dict]):
+ """
+ Initialize Sentence Transformer reranker.
+
+ Args:
+ config: Configuration object with reranker parameters
+ """
if not SENTENCE_TRANSFORMERS_AVAILABLE:
raise ImportError("sentence-transformers package is required for SentenceTransformerReranker. Install with: pip install sentence-transformers")
@@ -37,6 +44,17 @@ self.model = SentenceTransformer(self.config.model, device=self.config.device)
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
+ """
+ Rerank documents using sentence transformer cross-encoder.
+
+ Args:
+ query: The search query
+ documents: List of documents to rerank
+ top_k: Number of top documents to return
+
+ Returns:
+ List of reranked documents with rerank_score
+ """
if not documents:
return documents
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/reranker/sentence_transformer_reranker.py |
Can you add docstrings to this Python file? | import os
from typing import List, Dict, Any
from mem0.reranker.base import BaseReranker
try:
from zeroentropy import ZeroEntropy
ZERO_ENTROPY_AVAILABLE = True
except ImportError:
ZERO_ENTROPY_AVAILABLE = False
class ZeroEntropyReranker(BaseReranker):
def __init__(self, config):
if not ZERO_ENTROPY_AVAILABLE:
raise ImportError("zeroentropy package is required for ZeroEntropyReranker. Install with: pip install zeroentropy")
self.config = config
self.api_key = config.api_key or os.getenv("ZERO_ENTROPY_API_KEY")
if not self.api_key:
raise ValueError("Zero Entropy API key is required. Set ZERO_ENTROPY_API_KEY environment variable or pass api_key in config.")
self.model = config.model or "zerank-1"
# Initialize Zero Entropy client
if self.api_key:
self.client = ZeroEntropy(api_key=self.api_key)
else:
self.client = ZeroEntropy() # Will use ZERO_ENTROPY_API_KEY from environment
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
if not documents:
return documents
# Extract text content for reranking
doc_texts = []
for doc in documents:
if 'memory' in doc:
doc_texts.append(doc['memory'])
elif 'text' in doc:
doc_texts.append(doc['text'])
elif 'content' in doc:
doc_texts.append(doc['content'])
else:
doc_texts.append(str(doc))
try:
# Call Zero Entropy rerank API
response = self.client.models.rerank(
model=self.model,
query=query,
documents=doc_texts,
)
# Create reranked results
reranked_docs = []
for result in response.results:
original_doc = documents[result.index].copy()
original_doc['rerank_score'] = result.relevance_score
reranked_docs.append(original_doc)
# Sort by relevance score in descending order
reranked_docs.sort(key=lambda x: x['rerank_score'], reverse=True)
# Apply top_k limit
if top_k:
reranked_docs = reranked_docs[:top_k]
elif self.config.top_k:
reranked_docs = reranked_docs[:self.config.top_k]
return reranked_docs
except Exception:
# Fallback to original order if reranking fails
for doc in documents:
doc['rerank_score'] = 0.0
return documents[:top_k] if top_k else documents | --- +++ @@ -11,8 +11,15 @@
class ZeroEntropyReranker(BaseReranker):
+ """Zero Entropy-based reranker implementation."""
def __init__(self, config):
+ """
+ Initialize Zero Entropy reranker.
+
+ Args:
+ config: ZeroEntropyRerankerConfig object with configuration parameters
+ """
if not ZERO_ENTROPY_AVAILABLE:
raise ImportError("zeroentropy package is required for ZeroEntropyReranker. Install with: pip install zeroentropy")
@@ -30,6 +37,17 @@ self.client = ZeroEntropy() # Will use ZERO_ENTROPY_API_KEY from environment
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = None) -> List[Dict[str, Any]]:
+ """
+ Rerank documents using Zero Entropy's rerank API.
+
+ Args:
+ query: The search query
+ documents: List of documents to rerank
+ top_k: Number of top documents to return
+
+ Returns:
+ List of reranked documents with rerank_score
+ """
if not documents:
return documents
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/reranker/zero_entropy_reranker.py |
Improve documentation using docstrings | import json
import logging
from contextlib import contextmanager
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
try:
import pymysql
from pymysql.cursors import DictCursor
from dbutils.pooled_db import PooledDB
except ImportError:
raise ImportError(
"Azure MySQL vector store requires PyMySQL and DBUtils. "
"Please install them using 'pip install pymysql dbutils'"
)
try:
from azure.identity import DefaultAzureCredential
AZURE_IDENTITY_AVAILABLE = True
except ImportError:
AZURE_IDENTITY_AVAILABLE = False
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[dict]
class AzureMySQL(VectorStoreBase):
def __init__(
self,
host: str,
port: int,
user: str,
password: Optional[str],
database: str,
collection_name: str,
embedding_model_dims: int,
use_azure_credential: bool = False,
ssl_ca: Optional[str] = None,
ssl_disabled: bool = False,
minconn: int = 1,
maxconn: int = 5,
connection_pool: Optional[Any] = None,
):
self.host = host
self.port = port
self.user = user
self.password = password
self.database = database
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.use_azure_credential = use_azure_credential
self.ssl_ca = ssl_ca
self.ssl_disabled = ssl_disabled
self.connection_pool = connection_pool
# Handle Azure authentication
if use_azure_credential:
if not AZURE_IDENTITY_AVAILABLE:
raise ImportError(
"Azure Identity is required for Azure credential authentication. "
"Please install it using 'pip install azure-identity'"
)
self._setup_azure_auth()
# Setup connection pool
if self.connection_pool is None:
self._setup_connection_pool(minconn, maxconn)
# Create collection if it doesn't exist
collections = self.list_cols()
if collection_name not in collections:
self.create_col(name=collection_name, vector_size=embedding_model_dims, distance="cosine")
def _setup_azure_auth(self):
try:
credential = DefaultAzureCredential()
# Get access token for Azure Database for MySQL
token = credential.get_token("https://ossrdbms-aad.database.windows.net/.default")
# Use token as password
self.password = token.token
logger.info("Successfully authenticated using Azure DefaultAzureCredential")
except Exception as e:
logger.error(f"Failed to authenticate with Azure: {e}")
raise
def _setup_connection_pool(self, minconn: int, maxconn: int):
connect_kwargs = {
"host": self.host,
"port": self.port,
"user": self.user,
"password": self.password,
"database": self.database,
"charset": "utf8mb4",
"cursorclass": DictCursor,
"autocommit": False,
}
# SSL configuration
if not self.ssl_disabled:
ssl_config = {"ssl_verify_cert": True}
if self.ssl_ca:
ssl_config["ssl_ca"] = self.ssl_ca
connect_kwargs["ssl"] = ssl_config
try:
self.connection_pool = PooledDB(
creator=pymysql,
mincached=minconn,
maxcached=maxconn,
maxconnections=maxconn,
blocking=True,
**connect_kwargs
)
logger.info("Successfully created MySQL connection pool")
except Exception as e:
logger.error(f"Failed to create connection pool: {e}")
raise
@contextmanager
def _get_cursor(self, commit: bool = False):
conn = self.connection_pool.connection()
cur = conn.cursor()
try:
yield cur
if commit:
conn.commit()
except Exception as exc:
conn.rollback()
logger.error(f"Database error: {exc}", exc_info=True)
raise
finally:
cur.close()
conn.close()
def create_col(self, name: str = None, vector_size: int = None, distance: str = "cosine"):
table_name = name or self.collection_name
dims = vector_size or self.embedding_model_dims
with self._get_cursor(commit=True) as cur:
# Create table with vector column
cur.execute(f"""
CREATE TABLE IF NOT EXISTS `{table_name}` (
id VARCHAR(255) PRIMARY KEY,
vector JSON,
payload JSON,
INDEX idx_payload_keys ((CAST(payload AS CHAR(255)) ARRAY))
)
""")
logger.info(f"Created collection '{table_name}' with vector dimension {dims}")
def insert(self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None):
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
if payloads is None:
payloads = [{}] * len(vectors)
if ids is None:
import uuid
ids = [str(uuid.uuid4()) for _ in range(len(vectors))]
data = []
for vector, payload, vec_id in zip(vectors, payloads, ids):
data.append((vec_id, json.dumps(vector), json.dumps(payload)))
with self._get_cursor(commit=True) as cur:
cur.executemany(
f"INSERT INTO `{self.collection_name}` (id, vector, payload) VALUES (%s, %s, %s) "
f"ON DUPLICATE KEY UPDATE vector = VALUES(vector), payload = VALUES(payload)",
data
)
def _cosine_distance(self, vec1_json: str, vec2: List[float]) -> str:
# For MySQL, we need to calculate cosine similarity manually
# This is a simplified version - in production, you'd use stored procedures or UDFs
return """
1 - (
(SELECT SUM(a.val * b.val) /
(SQRT(SUM(a.val * a.val)) * SQRT(SUM(b.val * b.val))))
FROM (
SELECT JSON_EXTRACT(vector, CONCAT('$[', idx, ']')) as val
FROM (SELECT @row := @row + 1 as idx FROM (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) t1, (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) t2) indices
WHERE idx < JSON_LENGTH(vector)
) a,
(
SELECT JSON_EXTRACT(%s, CONCAT('$[', idx, ']')) as val
FROM (SELECT @row := @row + 1 as idx FROM (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) t1, (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) t2) indices
WHERE idx < JSON_LENGTH(%s)
) b
WHERE a.idx = b.idx
)
"""
def search(
self,
query: str,
vectors: List[float],
limit: int = 5,
filters: Optional[Dict] = None,
) -> List[OutputData]:
filter_conditions = []
filter_params = []
if filters:
for k, v in filters.items():
filter_conditions.append("JSON_EXTRACT(payload, %s) = %s")
filter_params.extend([f"$.{k}", json.dumps(v)])
filter_clause = "WHERE " + " AND ".join(filter_conditions) if filter_conditions else ""
# For simplicity, we'll compute cosine similarity in Python
# In production, you'd want to use MySQL stored procedures or UDFs
with self._get_cursor() as cur:
query_sql = f"""
SELECT id, vector, payload
FROM `{self.collection_name}`
{filter_clause}
"""
cur.execute(query_sql, filter_params)
results = cur.fetchall()
# Calculate cosine similarity in Python
import numpy as np
query_vec = np.array(vectors)
scored_results = []
for row in results:
vec = np.array(json.loads(row['vector']))
# Cosine similarity
similarity = np.dot(query_vec, vec) / (np.linalg.norm(query_vec) * np.linalg.norm(vec))
distance = 1 - similarity
scored_results.append((row['id'], distance, row['payload']))
# Sort by distance and limit
scored_results.sort(key=lambda x: x[1])
scored_results = scored_results[:limit]
return [
OutputData(id=r[0], score=float(r[1]), payload=json.loads(r[2]) if isinstance(r[2], str) else r[2])
for r in scored_results
]
def delete(self, vector_id: str):
with self._get_cursor(commit=True) as cur:
cur.execute(f"DELETE FROM `{self.collection_name}` WHERE id = %s", (vector_id,))
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
with self._get_cursor(commit=True) as cur:
if vector is not None:
cur.execute(
f"UPDATE `{self.collection_name}` SET vector = %s WHERE id = %s",
(json.dumps(vector), vector_id),
)
if payload is not None:
cur.execute(
f"UPDATE `{self.collection_name}` SET payload = %s WHERE id = %s",
(json.dumps(payload), vector_id),
)
def get(self, vector_id: str) -> Optional[OutputData]:
with self._get_cursor() as cur:
cur.execute(
f"SELECT id, vector, payload FROM `{self.collection_name}` WHERE id = %s",
(vector_id,),
)
result = cur.fetchone()
if not result:
return None
return OutputData(
id=result['id'],
score=None,
payload=json.loads(result['payload']) if isinstance(result['payload'], str) else result['payload']
)
def list_cols(self) -> List[str]:
with self._get_cursor() as cur:
cur.execute("SHOW TABLES")
return [row[f"Tables_in_{self.database}"] for row in cur.fetchall()]
def delete_col(self):
with self._get_cursor(commit=True) as cur:
cur.execute(f"DROP TABLE IF EXISTS `{self.collection_name}`")
logger.info(f"Deleted collection '{self.collection_name}'")
def col_info(self) -> Dict[str, Any]:
with self._get_cursor() as cur:
cur.execute("""
SELECT
TABLE_NAME as name,
TABLE_ROWS as count,
ROUND(((DATA_LENGTH + INDEX_LENGTH) / 1024 / 1024), 2) as size_mb
FROM information_schema.TABLES
WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s
""", (self.database, self.collection_name))
result = cur.fetchone()
if result:
return {
"name": result['name'],
"count": result['count'],
"size": f"{result['size_mb']} MB"
}
return {}
def list(
self,
filters: Optional[Dict] = None,
limit: int = 100
) -> List[List[OutputData]]:
filter_conditions = []
filter_params = []
if filters:
for k, v in filters.items():
filter_conditions.append("JSON_EXTRACT(payload, %s) = %s")
filter_params.extend([f"$.{k}", json.dumps(v)])
filter_clause = "WHERE " + " AND ".join(filter_conditions) if filter_conditions else ""
with self._get_cursor() as cur:
cur.execute(
f"""
SELECT id, vector, payload
FROM `{self.collection_name}`
{filter_clause}
LIMIT %s
""",
(*filter_params, limit)
)
results = cur.fetchall()
return [[
OutputData(
id=r['id'],
score=None,
payload=json.loads(r['payload']) if isinstance(r['payload'], str) else r['payload']
) for r in results
]]
def reset(self):
logger.warning(f"Resetting collection {self.collection_name}...")
self.delete_col()
self.create_col(name=self.collection_name, vector_size=self.embedding_model_dims)
def __del__(self):
try:
if hasattr(self, 'connection_pool') and self.connection_pool:
self.connection_pool.close()
except Exception:
pass | --- +++ @@ -49,6 +49,24 @@ maxconn: int = 5,
connection_pool: Optional[Any] = None,
):
+ """
+ Initialize the Azure MySQL vector store.
+
+ Args:
+ host (str): MySQL server host
+ port (int): MySQL server port
+ user (str): Database user
+ password (str, optional): Database password (not required if using Azure credential)
+ database (str): Database name
+ collection_name (str): Collection/table name
+ embedding_model_dims (int): Dimension of the embedding vector
+ use_azure_credential (bool): Use Azure DefaultAzureCredential for authentication
+ ssl_ca (str, optional): Path to SSL CA certificate
+ ssl_disabled (bool): Disable SSL connection
+ minconn (int): Minimum number of connections in the pool
+ maxconn (int): Maximum number of connections in the pool
+ connection_pool (Any, optional): Pre-configured connection pool
+ """
self.host = host
self.port = port
self.user = user
@@ -80,6 +98,7 @@ self.create_col(name=collection_name, vector_size=embedding_model_dims, distance="cosine")
def _setup_azure_auth(self):
+ """Setup Azure authentication using DefaultAzureCredential."""
try:
credential = DefaultAzureCredential()
# Get access token for Azure Database for MySQL
@@ -92,6 +111,7 @@ raise
def _setup_connection_pool(self, minconn: int, maxconn: int):
+ """Setup MySQL connection pool."""
connect_kwargs = {
"host": self.host,
"port": self.port,
@@ -126,6 +146,10 @@
@contextmanager
def _get_cursor(self, commit: bool = False):
+ """
+ Context manager to get a cursor from the connection pool.
+ Auto-commits or rolls back based on exception.
+ """
conn = self.connection_pool.connection()
cur = conn.cursor()
try:
@@ -141,6 +165,15 @@ conn.close()
def create_col(self, name: str = None, vector_size: int = None, distance: str = "cosine"):
+ """
+ Create a new collection (table in MySQL).
+ Enables vector extension and creates appropriate indexes.
+
+ Args:
+ name (str, optional): Collection name (uses self.collection_name if not provided)
+ vector_size (int, optional): Vector dimension (uses self.embedding_model_dims if not provided)
+ distance (str): Distance metric (cosine, euclidean, dot_product)
+ """
table_name = name or self.collection_name
dims = vector_size or self.embedding_model_dims
@@ -157,6 +190,14 @@ logger.info(f"Created collection '{table_name}' with vector dimension {dims}")
def insert(self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None):
+ """
+ Insert vectors into the collection.
+
+ Args:
+ vectors (List[List[float]]): List of vectors to insert
+ payloads (List[Dict], optional): List of payloads corresponding to vectors
+ ids (List[str], optional): List of IDs corresponding to vectors
+ """
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
if payloads is None:
@@ -177,6 +218,7 @@ )
def _cosine_distance(self, vec1_json: str, vec2: List[float]) -> str:
+ """Generate SQL for cosine distance calculation."""
# For MySQL, we need to calculate cosine similarity manually
# This is a simplified version - in production, you'd use stored procedures or UDFs
return """
@@ -204,6 +246,18 @@ limit: int = 5,
filters: Optional[Dict] = None,
) -> List[OutputData]:
+ """
+ Search for similar vectors using cosine similarity.
+
+ Args:
+ query (str): Query string (not used in vector search)
+ vectors (List[float]): Query vector
+ limit (int): Number of results to return
+ filters (Dict, optional): Filters to apply to the search
+
+ Returns:
+ List[OutputData]: Search results
+ """
filter_conditions = []
filter_params = []
@@ -247,6 +301,12 @@ ]
def delete(self, vector_id: str):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete
+ """
with self._get_cursor(commit=True) as cur:
cur.execute(f"DELETE FROM `{self.collection_name}` WHERE id = %s", (vector_id,))
@@ -256,6 +316,14 @@ vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update
+ vector (List[float], optional): Updated vector
+ payload (Dict, optional): Updated payload
+ """
with self._get_cursor(commit=True) as cur:
if vector is not None:
cur.execute(
@@ -269,6 +337,15 @@ )
def get(self, vector_id: str) -> Optional[OutputData]:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve
+
+ Returns:
+ OutputData: Retrieved vector or None if not found
+ """
with self._get_cursor() as cur:
cur.execute(
f"SELECT id, vector, payload FROM `{self.collection_name}` WHERE id = %s",
@@ -284,16 +361,29 @@ )
def list_cols(self) -> List[str]:
+ """
+ List all collections (tables).
+
+ Returns:
+ List[str]: List of collection names
+ """
with self._get_cursor() as cur:
cur.execute("SHOW TABLES")
return [row[f"Tables_in_{self.database}"] for row in cur.fetchall()]
def delete_col(self):
+ """Delete the collection (table)."""
with self._get_cursor(commit=True) as cur:
cur.execute(f"DROP TABLE IF EXISTS `{self.collection_name}`")
logger.info(f"Deleted collection '{self.collection_name}'")
def col_info(self) -> Dict[str, Any]:
+ """
+ Get information about the collection.
+
+ Returns:
+ Dict[str, Any]: Collection information
+ """
with self._get_cursor() as cur:
cur.execute("""
SELECT
@@ -318,6 +408,16 @@ filters: Optional[Dict] = None,
limit: int = 100
) -> List[List[OutputData]]:
+ """
+ List all vectors in the collection.
+
+ Args:
+ filters (Dict, optional): Filters to apply
+ limit (int): Number of vectors to return
+
+ Returns:
+ List[List[OutputData]]: List of vectors
+ """
filter_conditions = []
filter_params = []
@@ -349,13 +449,15 @@ ]]
def reset(self):
+ """Reset the collection by deleting and recreating it."""
logger.warning(f"Resetting collection {self.collection_name}...")
self.delete_col()
self.create_col(name=self.collection_name, vector_size=self.embedding_model_dims)
def __del__(self):
+ """Close the connection pool when the object is deleted."""
try:
if hasattr(self, 'connection_pool') and self.connection_pool:
self.connection_pool.close()
except Exception:
- pass+ pass
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/azure_mysql.py |
Write clean docstrings for readability | from abc import ABC, abstractmethod
class VectorStoreBase(ABC):
@abstractmethod
def create_col(self, name, vector_size, distance):
pass
@abstractmethod
def insert(self, vectors, payloads=None, ids=None):
pass
@abstractmethod
def search(self, query, vectors, limit=5, filters=None):
pass
@abstractmethod
def delete(self, vector_id):
pass
@abstractmethod
def update(self, vector_id, vector=None, payload=None):
pass
@abstractmethod
def get(self, vector_id):
pass
@abstractmethod
def list_cols(self):
pass
@abstractmethod
def delete_col(self):
pass
@abstractmethod
def col_info(self):
pass
@abstractmethod
def list(self, filters=None, limit=None):
pass
@abstractmethod
def reset(self):
pass | --- +++ @@ -4,44 +4,55 @@ class VectorStoreBase(ABC):
@abstractmethod
def create_col(self, name, vector_size, distance):
+ """Create a new collection."""
pass
@abstractmethod
def insert(self, vectors, payloads=None, ids=None):
+ """Insert vectors into a collection."""
pass
@abstractmethod
def search(self, query, vectors, limit=5, filters=None):
+ """Search for similar vectors."""
pass
@abstractmethod
def delete(self, vector_id):
+ """Delete a vector by ID."""
pass
@abstractmethod
def update(self, vector_id, vector=None, payload=None):
+ """Update a vector and its payload."""
pass
@abstractmethod
def get(self, vector_id):
+ """Retrieve a vector by ID."""
pass
@abstractmethod
def list_cols(self):
+ """List all collections."""
pass
@abstractmethod
def delete_col(self):
+ """Delete a collection."""
pass
@abstractmethod
def col_info(self):
+ """Get information about a collection."""
pass
@abstractmethod
def list(self, filters=None, limit=None):
+ """List all memories."""
pass
@abstractmethod
def reset(self):
- pass+ """Reset by delete the collection and recreate it."""
+ pass
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/base.py |
Add docstrings for utility scripts | import os
import json
from typing import Optional, Dict, Any
try:
from google.oauth2 import service_account
from google.auth import default
import google.auth.credentials
except ImportError:
raise ImportError("google-auth is required for GCP authentication. Install with: pip install google-auth")
class GCPAuthenticator:
@staticmethod
def get_credentials(
service_account_json: Optional[Dict[str, Any]] = None,
credentials_path: Optional[str] = None,
scopes: Optional[list] = None
) -> tuple[google.auth.credentials.Credentials, Optional[str]]:
credentials = None
project_id = None
# Method 1: Service account JSON (in-memory)
if service_account_json:
credentials = service_account.Credentials.from_service_account_info(
service_account_json, scopes=scopes
)
project_id = service_account_json.get("project_id")
# Method 2: Service account file path
elif credentials_path and os.path.isfile(credentials_path):
credentials = service_account.Credentials.from_service_account_file(
credentials_path, scopes=scopes
)
# Extract project_id from the file
with open(credentials_path, 'r') as f:
cred_data = json.load(f)
project_id = cred_data.get("project_id")
# Method 3: Environment variable path
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS"):
env_path = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if os.path.isfile(env_path):
credentials = service_account.Credentials.from_service_account_file(
env_path, scopes=scopes
)
# Extract project_id from the file
with open(env_path, 'r') as f:
cred_data = json.load(f)
project_id = cred_data.get("project_id")
# Method 4: Default credentials (GCE, Cloud Run, etc.)
if not credentials:
try:
credentials, project_id = default(scopes=scopes)
except Exception as e:
raise ValueError(
f"No valid GCP credentials found. Please provide one of:\n"
f"1. service_account_json parameter (dict)\n"
f"2. credentials_path parameter (file path)\n"
f"3. GOOGLE_APPLICATION_CREDENTIALS environment variable\n"
f"4. Default credentials (if running on GCP)\n"
f"Error: {e}"
)
return credentials, project_id
@staticmethod
def setup_vertex_ai(
service_account_json: Optional[Dict[str, Any]] = None,
credentials_path: Optional[str] = None,
project_id: Optional[str] = None,
location: str = "us-central1"
) -> str:
try:
import vertexai
except ImportError:
raise ImportError("google-cloud-aiplatform is required for Vertex AI. Install with: pip install google-cloud-aiplatform")
credentials, detected_project_id = GCPAuthenticator.get_credentials(
service_account_json=service_account_json,
credentials_path=credentials_path,
scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
# Use provided project_id or fall back to detected one
final_project_id = project_id or detected_project_id or os.getenv("GOOGLE_CLOUD_PROJECT")
if not final_project_id:
raise ValueError("Project ID could not be determined. Please provide project_id parameter or set GOOGLE_CLOUD_PROJECT environment variable.")
vertexai.init(project=final_project_id, location=location, credentials=credentials)
return final_project_id
@staticmethod
def get_genai_client(
service_account_json: Optional[Dict[str, Any]] = None,
credentials_path: Optional[str] = None,
api_key: Optional[str] = None
):
try:
from google.genai import Client as GenAIClient
except ImportError:
raise ImportError("google-genai is required. Install with: pip install google-genai")
# If API key is provided, use it directly
if api_key:
return GenAIClient(api_key=api_key)
# Otherwise, try service account authentication
credentials, _ = GCPAuthenticator.get_credentials(
service_account_json=service_account_json,
credentials_path=credentials_path,
scopes=["https://www.googleapis.com/auth/generative-language"]
)
return GenAIClient(credentials=credentials) | --- +++ @@ -11,6 +11,15 @@
class GCPAuthenticator:
+ """
+ Centralized GCP authentication handler that supports multiple credential methods.
+
+ Priority order:
+ 1. service_account_json (dict) - In-memory service account credentials
+ 2. credentials_path (str) - Path to service account JSON file
+ 3. Environment variables (GOOGLE_APPLICATION_CREDENTIALS)
+ 4. Default credentials (for environments like GCE, Cloud Run, etc.)
+ """
@staticmethod
def get_credentials(
@@ -18,6 +27,20 @@ credentials_path: Optional[str] = None,
scopes: Optional[list] = None
) -> tuple[google.auth.credentials.Credentials, Optional[str]]:
+ """
+ Get Google credentials using the priority order defined above.
+
+ Args:
+ service_account_json: Service account credentials as a dictionary
+ credentials_path: Path to service account JSON file
+ scopes: List of OAuth scopes (optional)
+
+ Returns:
+ tuple: (credentials, project_id)
+
+ Raises:
+ ValueError: If no valid credentials are found
+ """
credentials = None
project_id = None
@@ -73,6 +96,21 @@ project_id: Optional[str] = None,
location: str = "us-central1"
) -> str:
+ """
+ Initialize Vertex AI with proper authentication.
+
+ Args:
+ service_account_json: Service account credentials as dict
+ credentials_path: Path to service account JSON file
+ project_id: GCP project ID (optional, will be auto-detected)
+ location: GCP location/region
+
+ Returns:
+ str: The project ID being used
+
+ Raises:
+ ValueError: If authentication fails
+ """
try:
import vertexai
except ImportError:
@@ -99,6 +137,17 @@ credentials_path: Optional[str] = None,
api_key: Optional[str] = None
):
+ """
+ Get a Google GenAI client with authentication.
+
+ Args:
+ service_account_json: Service account credentials as dict
+ credentials_path: Path to service account JSON file
+ api_key: API key (takes precedence over service account)
+
+ Returns:
+ Google GenAI client instance
+ """
try:
from google.genai import Client as GenAIClient
except ImportError:
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/utils/gcp_auth.py |
Write clean docstrings for readability | import importlib
from typing import Dict, Optional, Union
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.configs.llms.anthropic import AnthropicConfig
from mem0.configs.llms.azure import AzureOpenAIConfig
from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.deepseek import DeepSeekConfig
from mem0.configs.llms.lmstudio import LMStudioConfig
from mem0.configs.llms.ollama import OllamaConfig
from mem0.configs.llms.openai import OpenAIConfig
from mem0.configs.llms.vllm import VllmConfig
from mem0.configs.rerankers.base import BaseRerankerConfig
from mem0.configs.rerankers.cohere import CohereRerankerConfig
from mem0.configs.rerankers.sentence_transformer import SentenceTransformerRerankerConfig
from mem0.configs.rerankers.zero_entropy import ZeroEntropyRerankerConfig
from mem0.configs.rerankers.llm import LLMRerankerConfig
from mem0.configs.rerankers.huggingface import HuggingFaceRerankerConfig
from mem0.embeddings.mock import MockEmbeddings
def load_class(class_type):
module_path, class_name = class_type.rsplit(".", 1)
module = importlib.import_module(module_path)
return getattr(module, class_name)
class LlmFactory:
# Provider mappings with their config classes
provider_to_class = {
"ollama": ("mem0.llms.ollama.OllamaLLM", OllamaConfig),
"openai": ("mem0.llms.openai.OpenAILLM", OpenAIConfig),
"groq": ("mem0.llms.groq.GroqLLM", BaseLlmConfig),
"together": ("mem0.llms.together.TogetherLLM", BaseLlmConfig),
"aws_bedrock": ("mem0.llms.aws_bedrock.AWSBedrockLLM", BaseLlmConfig),
"litellm": ("mem0.llms.litellm.LiteLLM", BaseLlmConfig),
"azure_openai": ("mem0.llms.azure_openai.AzureOpenAILLM", AzureOpenAIConfig),
"openai_structured": ("mem0.llms.openai_structured.OpenAIStructuredLLM", OpenAIConfig),
"anthropic": ("mem0.llms.anthropic.AnthropicLLM", AnthropicConfig),
"azure_openai_structured": ("mem0.llms.azure_openai_structured.AzureOpenAIStructuredLLM", AzureOpenAIConfig),
"gemini": ("mem0.llms.gemini.GeminiLLM", BaseLlmConfig),
"deepseek": ("mem0.llms.deepseek.DeepSeekLLM", DeepSeekConfig),
"xai": ("mem0.llms.xai.XAILLM", BaseLlmConfig),
"sarvam": ("mem0.llms.sarvam.SarvamLLM", BaseLlmConfig),
"lmstudio": ("mem0.llms.lmstudio.LMStudioLLM", LMStudioConfig),
"vllm": ("mem0.llms.vllm.VllmLLM", VllmConfig),
"langchain": ("mem0.llms.langchain.LangchainLLM", BaseLlmConfig),
}
@classmethod
def create(cls, provider_name: str, config: Optional[Union[BaseLlmConfig, Dict]] = None, **kwargs):
if provider_name not in cls.provider_to_class:
raise ValueError(f"Unsupported Llm provider: {provider_name}")
class_type, config_class = cls.provider_to_class[provider_name]
llm_class = load_class(class_type)
# Handle configuration
if config is None:
# Create default config with kwargs
config = config_class(**kwargs)
elif isinstance(config, dict):
# Merge dict config with kwargs
config.update(kwargs)
config = config_class(**config)
elif isinstance(config, BaseLlmConfig):
# Convert base config to provider-specific config if needed
if config_class != BaseLlmConfig:
# Convert to provider-specific config
config_dict = {
"model": config.model,
"temperature": config.temperature,
"api_key": config.api_key,
"max_tokens": config.max_tokens,
"top_p": config.top_p,
"top_k": config.top_k,
"enable_vision": config.enable_vision,
"vision_details": config.vision_details,
"http_client_proxies": config.http_client,
}
config_dict.update(kwargs)
config = config_class(**config_dict)
else:
# Use base config as-is
pass
else:
# Assume it's already the correct config type
pass
return llm_class(config)
@classmethod
def register_provider(cls, name: str, class_path: str, config_class=None):
if config_class is None:
config_class = BaseLlmConfig
cls.provider_to_class[name] = (class_path, config_class)
@classmethod
def get_supported_providers(cls) -> list:
return list(cls.provider_to_class.keys())
class EmbedderFactory:
provider_to_class = {
"openai": "mem0.embeddings.openai.OpenAIEmbedding",
"ollama": "mem0.embeddings.ollama.OllamaEmbedding",
"huggingface": "mem0.embeddings.huggingface.HuggingFaceEmbedding",
"azure_openai": "mem0.embeddings.azure_openai.AzureOpenAIEmbedding",
"gemini": "mem0.embeddings.gemini.GoogleGenAIEmbedding",
"vertexai": "mem0.embeddings.vertexai.VertexAIEmbedding",
"together": "mem0.embeddings.together.TogetherEmbedding",
"lmstudio": "mem0.embeddings.lmstudio.LMStudioEmbedding",
"langchain": "mem0.embeddings.langchain.LangchainEmbedding",
"aws_bedrock": "mem0.embeddings.aws_bedrock.AWSBedrockEmbedding",
"fastembed": "mem0.embeddings.fastembed.FastEmbedEmbedding",
}
@classmethod
def create(cls, provider_name, config, vector_config: Optional[dict]):
if provider_name == "upstash_vector" and vector_config and vector_config.enable_embeddings:
return MockEmbeddings()
class_type = cls.provider_to_class.get(provider_name)
if class_type:
embedder_instance = load_class(class_type)
base_config = BaseEmbedderConfig(**config)
return embedder_instance(base_config)
else:
raise ValueError(f"Unsupported Embedder provider: {provider_name}")
class VectorStoreFactory:
provider_to_class = {
"qdrant": "mem0.vector_stores.qdrant.Qdrant",
"chroma": "mem0.vector_stores.chroma.ChromaDB",
"pgvector": "mem0.vector_stores.pgvector.PGVector",
"milvus": "mem0.vector_stores.milvus.MilvusDB",
"upstash_vector": "mem0.vector_stores.upstash_vector.UpstashVector",
"azure_ai_search": "mem0.vector_stores.azure_ai_search.AzureAISearch",
"azure_mysql": "mem0.vector_stores.azure_mysql.AzureMySQL",
"pinecone": "mem0.vector_stores.pinecone.PineconeDB",
"mongodb": "mem0.vector_stores.mongodb.MongoDB",
"redis": "mem0.vector_stores.redis.RedisDB",
"valkey": "mem0.vector_stores.valkey.ValkeyDB",
"databricks": "mem0.vector_stores.databricks.Databricks",
"elasticsearch": "mem0.vector_stores.elasticsearch.ElasticsearchDB",
"vertex_ai_vector_search": "mem0.vector_stores.vertex_ai_vector_search.GoogleMatchingEngine",
"opensearch": "mem0.vector_stores.opensearch.OpenSearchDB",
"supabase": "mem0.vector_stores.supabase.Supabase",
"weaviate": "mem0.vector_stores.weaviate.Weaviate",
"faiss": "mem0.vector_stores.faiss.FAISS",
"langchain": "mem0.vector_stores.langchain.Langchain",
"s3_vectors": "mem0.vector_stores.s3_vectors.S3Vectors",
"baidu": "mem0.vector_stores.baidu.BaiduDB",
"cassandra": "mem0.vector_stores.cassandra.CassandraDB",
"neptune": "mem0.vector_stores.neptune_analytics.NeptuneAnalyticsVector",
}
@classmethod
def create(cls, provider_name, config):
class_type = cls.provider_to_class.get(provider_name)
if class_type:
if not isinstance(config, dict):
config = config.model_dump()
vector_store_instance = load_class(class_type)
return vector_store_instance(**config)
else:
raise ValueError(f"Unsupported VectorStore provider: {provider_name}")
@classmethod
def reset(cls, instance):
instance.reset()
return instance
class GraphStoreFactory:
provider_to_class = {
"memgraph": "mem0.memory.memgraph_memory.MemoryGraph",
"neptune": "mem0.graphs.neptune.neptunegraph.MemoryGraph",
"neptunedb": "mem0.graphs.neptune.neptunedb.MemoryGraph",
"kuzu": "mem0.memory.kuzu_memory.MemoryGraph",
"default": "mem0.memory.graph_memory.MemoryGraph",
}
@classmethod
def create(cls, provider_name, config):
class_type = cls.provider_to_class.get(provider_name, cls.provider_to_class["default"])
try:
GraphClass = load_class(class_type)
except (ImportError, AttributeError) as e:
raise ImportError(f"Could not import MemoryGraph for provider '{provider_name}': {e}")
return GraphClass(config)
class RerankerFactory:
# Provider mappings with their config classes
provider_to_class = {
"cohere": ("mem0.reranker.cohere_reranker.CohereReranker", CohereRerankerConfig),
"sentence_transformer": ("mem0.reranker.sentence_transformer_reranker.SentenceTransformerReranker", SentenceTransformerRerankerConfig),
"zero_entropy": ("mem0.reranker.zero_entropy_reranker.ZeroEntropyReranker", ZeroEntropyRerankerConfig),
"llm_reranker": ("mem0.reranker.llm_reranker.LLMReranker", LLMRerankerConfig),
"huggingface": ("mem0.reranker.huggingface_reranker.HuggingFaceReranker", HuggingFaceRerankerConfig),
}
@classmethod
def create(cls, provider_name: str, config: Optional[Union[BaseRerankerConfig, Dict]] = None, **kwargs):
if provider_name not in cls.provider_to_class:
raise ValueError(f"Unsupported reranker provider: {provider_name}")
class_path, config_class = cls.provider_to_class[provider_name]
# Handle configuration
if config is None:
config = config_class(**kwargs)
elif isinstance(config, dict):
config = config_class(**config, **kwargs)
elif not isinstance(config, BaseRerankerConfig):
raise ValueError(f"Config must be a {config_class.__name__} instance or dict")
# Import and create the reranker class
try:
reranker_class = load_class(class_path)
except (ImportError, AttributeError) as e:
raise ImportError(f"Could not import reranker for provider '{provider_name}': {e}")
return reranker_class(config) | --- +++ @@ -26,6 +26,10 @@
class LlmFactory:
+ """
+ Factory for creating LLM instances with appropriate configurations.
+ Supports both old-style BaseLlmConfig and new provider-specific configs.
+ """
# Provider mappings with their config classes
provider_to_class = {
@@ -50,6 +54,20 @@
@classmethod
def create(cls, provider_name: str, config: Optional[Union[BaseLlmConfig, Dict]] = None, **kwargs):
+ """
+ Create an LLM instance with the appropriate configuration.
+
+ Args:
+ provider_name (str): The provider name (e.g., 'openai', 'anthropic')
+ config: Configuration object or dict. If None, will create default config
+ **kwargs: Additional configuration parameters
+
+ Returns:
+ Configured LLM instance
+
+ Raises:
+ ValueError: If provider is not supported
+ """
if provider_name not in cls.provider_to_class:
raise ValueError(f"Unsupported Llm provider: {provider_name}")
@@ -92,12 +110,26 @@
@classmethod
def register_provider(cls, name: str, class_path: str, config_class=None):
+ """
+ Register a new provider.
+
+ Args:
+ name (str): Provider name
+ class_path (str): Full path to LLM class
+ config_class: Configuration class for the provider (defaults to BaseLlmConfig)
+ """
if config_class is None:
config_class = BaseLlmConfig
cls.provider_to_class[name] = (class_path, config_class)
@classmethod
def get_supported_providers(cls) -> list:
+ """
+ Get list of supported providers.
+
+ Returns:
+ list: List of supported provider names
+ """
return list(cls.provider_to_class.keys())
@@ -174,6 +206,10 @@
class GraphStoreFactory:
+ """
+ Factory for creating MemoryGraph instances for different graph store providers.
+ Usage: GraphStoreFactory.create(provider_name, config)
+ """
provider_to_class = {
"memgraph": "mem0.memory.memgraph_memory.MemoryGraph",
@@ -194,6 +230,10 @@
class RerankerFactory:
+ """
+ Factory for creating reranker instances with appropriate configurations.
+ Supports provider-specific configs following the same pattern as other factories.
+ """
# Provider mappings with their config classes
provider_to_class = {
@@ -206,6 +246,21 @@
@classmethod
def create(cls, provider_name: str, config: Optional[Union[BaseRerankerConfig, Dict]] = None, **kwargs):
+ """
+ Create a reranker instance based on the provider and configuration.
+
+ Args:
+ provider_name: The reranker provider (e.g., 'cohere', 'sentence_transformer')
+ config: Configuration object or dictionary
+ **kwargs: Additional configuration parameters
+
+ Returns:
+ Reranker instance configured for the specified provider
+
+ Raises:
+ ImportError: If the provider class cannot be imported
+ ValueError: If the provider is not supported
+ """
if provider_name not in cls.provider_to_class:
raise ValueError(f"Unsupported reranker provider: {provider_name}")
@@ -225,4 +280,4 @@ except (ImportError, AttributeError) as e:
raise ImportError(f"Could not import reranker for provider '{provider_name}': {e}")
- return reranker_class(config)+ return reranker_class(config)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/utils/factory.py |
Add docstrings for internal functions | import logging
from typing import Dict, List, Optional
from pydantic import BaseModel
try:
import chromadb
from chromadb.config import Settings
except ImportError:
raise ImportError("The 'chromadb' library is required. Please install it using 'pip install chromadb'.")
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # distance
payload: Optional[Dict] # metadata
class ChromaDB(VectorStoreBase):
def __init__(
self,
collection_name: str,
client: Optional[chromadb.Client] = None,
host: Optional[str] = None,
port: Optional[int] = None,
path: Optional[str] = None,
api_key: Optional[str] = None,
tenant: Optional[str] = None,
):
if client:
self.client = client
elif api_key and tenant:
# Initialize ChromaDB Cloud client
logger.info("Initializing ChromaDB Cloud client")
self.client = chromadb.CloudClient(
api_key=api_key,
tenant=tenant,
database="mem0" # Use fixed database name for cloud
)
else:
# Initialize local or server client
self.settings = Settings(anonymized_telemetry=False)
if host and port:
self.settings.chroma_server_host = host
self.settings.chroma_server_http_port = port
self.settings.chroma_api_impl = "chromadb.api.fastapi.FastAPI"
else:
if path is None:
path = "db"
self.settings.persist_directory = path
self.settings.is_persistent = True
self.client = chromadb.Client(self.settings)
self.collection_name = collection_name
self.collection = self.create_col(collection_name)
def _parse_output(self, data: Dict) -> List[OutputData]:
keys = ["ids", "distances", "metadatas"]
values = []
for key in keys:
value = data.get(key, [])
if isinstance(value, list) and value and isinstance(value[0], list):
value = value[0]
values.append(value)
ids, distances, metadatas = values
max_length = max(len(v) for v in values if isinstance(v, list) and v is not None)
result = []
for i in range(max_length):
entry = OutputData(
id=ids[i] if isinstance(ids, list) and ids and i < len(ids) else None,
score=(distances[i] if isinstance(distances, list) and distances and i < len(distances) else None),
payload=(metadatas[i] if isinstance(metadatas, list) and metadatas and i < len(metadatas) else None),
)
result.append(entry)
return result
def create_col(self, name: str, embedding_fn: Optional[callable] = None):
collection = self.client.get_or_create_collection(
name=name,
embedding_function=embedding_fn,
)
return collection
def insert(
self,
vectors: List[list],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None,
):
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
self.collection.add(ids=ids, embeddings=vectors, metadatas=payloads)
def search(
self, query: str, vectors: List[list], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
where_clause = self._generate_where_clause(filters) if filters else None
results = self.collection.query(query_embeddings=vectors, where=where_clause, n_results=limit)
final_results = self._parse_output(results)
return final_results
def delete(self, vector_id: str):
self.collection.delete(ids=vector_id)
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
self.collection.update(ids=vector_id, embeddings=vector, metadatas=payload)
def get(self, vector_id: str) -> OutputData:
result = self.collection.get(ids=[vector_id])
return self._parse_output(result)[0]
def list_cols(self) -> List[chromadb.Collection]:
return self.client.list_collections()
def delete_col(self):
self.client.delete_collection(name=self.collection_name)
def col_info(self) -> Dict:
return self.client.get_collection(name=self.collection_name)
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
where_clause = self._generate_where_clause(filters) if filters else None
results = self.collection.get(where=where_clause, limit=limit)
return [self._parse_output(results)]
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.collection = self.create_col(self.collection_name)
@staticmethod
def _generate_where_clause(where: dict[str, any]) -> dict[str, any]:
if where is None:
return {}
def convert_condition(key: str, value: any) -> dict:
if value == "*":
# Wildcard - match any value (ChromaDB doesn't have direct wildcard, so we skip this filter)
return None
elif isinstance(value, dict):
# Handle comparison operators
chroma_condition = {}
for op, val in value.items():
if op == "eq":
chroma_condition[key] = {"$eq": val}
elif op == "ne":
chroma_condition[key] = {"$ne": val}
elif op == "gt":
chroma_condition[key] = {"$gt": val}
elif op == "gte":
chroma_condition[key] = {"$gte": val}
elif op == "lt":
chroma_condition[key] = {"$lt": val}
elif op == "lte":
chroma_condition[key] = {"$lte": val}
elif op == "in":
chroma_condition[key] = {"$in": val}
elif op == "nin":
chroma_condition[key] = {"$nin": val}
elif op in ["contains", "icontains"]:
# ChromaDB doesn't support contains, fallback to equality
chroma_condition[key] = {"$eq": val}
else:
# Unknown operator, treat as equality
chroma_condition[key] = {"$eq": val}
return chroma_condition
else:
# Simple equality
return {key: {"$eq": value}}
processed_filters = []
for key, value in where.items():
if key == "$or":
# Handle OR conditions
or_conditions = []
for condition in value:
or_condition = {}
for sub_key, sub_value in condition.items():
converted = convert_condition(sub_key, sub_value)
if converted:
or_condition.update(converted)
if or_condition:
or_conditions.append(or_condition)
if len(or_conditions) > 1:
processed_filters.append({"$or": or_conditions})
elif len(or_conditions) == 1:
processed_filters.append(or_conditions[0])
elif key == "$not":
# Handle NOT conditions - ChromaDB doesn't have direct NOT, so we'll skip for now
continue
else:
# Regular condition
converted = convert_condition(key, value)
if converted:
processed_filters.append(converted)
# Return appropriate format based on number of conditions
if len(processed_filters) == 0:
return {}
elif len(processed_filters) == 1:
return processed_filters[0]
else:
return {"$and": processed_filters} | --- +++ @@ -31,6 +31,18 @@ api_key: Optional[str] = None,
tenant: Optional[str] = None,
):
+ """
+ Initialize the Chromadb vector store.
+
+ Args:
+ collection_name (str): Name of the collection.
+ client (chromadb.Client, optional): Existing chromadb client instance. Defaults to None.
+ host (str, optional): Host address for chromadb server. Defaults to None.
+ port (int, optional): Port for chromadb server. Defaults to None.
+ path (str, optional): Path for local chromadb database. Defaults to None.
+ api_key (str, optional): ChromaDB Cloud API key. Defaults to None.
+ tenant (str, optional): ChromaDB Cloud tenant ID. Defaults to None.
+ """
if client:
self.client = client
elif api_key and tenant:
@@ -62,6 +74,15 @@ self.collection = self.create_col(collection_name)
def _parse_output(self, data: Dict) -> List[OutputData]:
+ """
+ Parse the output data.
+
+ Args:
+ data (Dict): Output data.
+
+ Returns:
+ List[OutputData]: Parsed output data.
+ """
keys = ["ids", "distances", "metadatas"]
values = []
@@ -86,6 +107,16 @@ return result
def create_col(self, name: str, embedding_fn: Optional[callable] = None):
+ """
+ Create a new collection.
+
+ Args:
+ name (str): Name of the collection.
+ embedding_fn (Optional[callable]): Embedding function to use. Defaults to None.
+
+ Returns:
+ chromadb.Collection: The created or retrieved collection.
+ """
collection = self.client.get_or_create_collection(
name=name,
embedding_function=embedding_fn,
@@ -98,18 +129,44 @@ payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None,
):
+ """
+ Insert vectors into a collection.
+
+ Args:
+ vectors (List[list]): List of vectors to insert.
+ payloads (Optional[List[Dict]], optional): List of payloads corresponding to vectors. Defaults to None.
+ ids (Optional[List[str]], optional): List of IDs corresponding to vectors. Defaults to None.
+ """
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
self.collection.add(ids=ids, embeddings=vectors, metadatas=payloads)
def search(
self, query: str, vectors: List[list], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
+ """
+ Search for similar vectors.
+
+ Args:
+ query (str): Query.
+ vectors (List[list]): List of vectors to search.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Optional[Dict], optional): Filters to apply to the search. Defaults to None.
+
+ Returns:
+ List[OutputData]: Search results.
+ """
where_clause = self._generate_where_clause(filters) if filters else None
results = self.collection.query(query_embeddings=vectors, where=where_clause, n_results=limit)
final_results = self._parse_output(results)
return final_results
def delete(self, vector_id: str):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
self.collection.delete(ids=vector_id)
def update(
@@ -118,37 +175,90 @@ vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (Optional[List[float]], optional): Updated vector. Defaults to None.
+ payload (Optional[Dict], optional): Updated payload. Defaults to None.
+ """
self.collection.update(ids=vector_id, embeddings=vector, metadatas=payload)
def get(self, vector_id: str) -> OutputData:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ OutputData: Retrieved vector.
+ """
result = self.collection.get(ids=[vector_id])
return self._parse_output(result)[0]
def list_cols(self) -> List[chromadb.Collection]:
+ """
+ List all collections.
+
+ Returns:
+ List[chromadb.Collection]: List of collections.
+ """
return self.client.list_collections()
def delete_col(self):
+ """
+ Delete a collection.
+ """
self.client.delete_collection(name=self.collection_name)
def col_info(self) -> Dict:
+ """
+ Get information about a collection.
+
+ Returns:
+ Dict: Collection information.
+ """
return self.client.get_collection(name=self.collection_name)
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
+ """
+ List all vectors in a collection.
+
+ Args:
+ filters (Optional[Dict], optional): Filters to apply to the list. Defaults to None.
+ limit (int, optional): Number of vectors to return. Defaults to 100.
+
+ Returns:
+ List[OutputData]: List of vectors.
+ """
where_clause = self._generate_where_clause(filters) if filters else None
results = self.collection.get(where=where_clause, limit=limit)
return [self._parse_output(results)]
def reset(self):
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.collection = self.create_col(self.collection_name)
@staticmethod
def _generate_where_clause(where: dict[str, any]) -> dict[str, any]:
+ """
+ Generate a properly formatted where clause for ChromaDB.
+
+ Args:
+ where (dict[str, any]): The filter conditions.
+
+ Returns:
+ dict[str, any]: Properly formatted where clause for ChromaDB.
+ """
if where is None:
return {}
def convert_condition(key: str, value: any) -> dict:
+ """Convert universal filter format to ChromaDB format."""
if value == "*":
# Wildcard - match any value (ChromaDB doesn't have direct wildcard, so we skip this filter)
return None
@@ -219,4 +329,4 @@ elif len(processed_filters) == 1:
return processed_filters[0]
else:
- return {"$and": processed_filters}+ return {"$and": processed_filters}
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/chroma.py |
Replace inline comments with docstrings | import logging
import time
from typing import Any, Dict, List, Optional
try:
from opensearchpy import OpenSearch, RequestsHttpConnection
except ImportError:
raise ImportError("OpenSearch requires extra dependencies. Install with `pip install opensearch-py`") from None
from pydantic import BaseModel
from mem0.configs.vector_stores.opensearch import OpenSearchConfig
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: str
score: float
payload: Dict
class OpenSearchDB(VectorStoreBase):
def __init__(self, **kwargs):
config = OpenSearchConfig(**kwargs)
# Initialize OpenSearch client
self.client = OpenSearch(
hosts=[{"host": config.host, "port": config.port or 9200}],
http_auth=config.http_auth
if config.http_auth
else ((config.user, config.password) if (config.user and config.password) else None),
use_ssl=config.use_ssl,
verify_certs=config.verify_certs,
connection_class=RequestsHttpConnection,
pool_maxsize=20,
)
self.collection_name = config.collection_name
self.embedding_model_dims = config.embedding_model_dims
self.create_col(self.collection_name, self.embedding_model_dims)
def create_index(self) -> None:
index_settings = {
"settings": {
"index": {"number_of_replicas": 1, "number_of_shards": 5, "refresh_interval": "10s", "knn": True}
},
"mappings": {
"properties": {
"text": {"type": "text"},
"vector_field": {
"type": "knn_vector",
"dimension": self.embedding_model_dims,
"method": {"engine": "nmslib", "name": "hnsw", "space_type": "cosinesimil"},
},
"metadata": {"type": "object", "properties": {"user_id": {"type": "keyword"}}},
}
},
}
if not self.client.indices.exists(index=self.collection_name):
self.client.indices.create(index=self.collection_name, body=index_settings)
logger.info(f"Created index {self.collection_name}")
else:
logger.info(f"Index {self.collection_name} already exists")
def create_col(self, name: str, vector_size: int) -> None:
index_settings = {
"settings": {"index.knn": True},
"mappings": {
"properties": {
"vector_field": {
"type": "knn_vector",
"dimension": vector_size,
"method": {"engine": "nmslib", "name": "hnsw", "space_type": "cosinesimil"},
},
"payload": {"type": "object"},
"id": {"type": "keyword"},
}
},
}
if not self.client.indices.exists(index=name):
logger.warning(f"Creating index {name}, it might take 1-2 minutes...")
self.client.indices.create(index=name, body=index_settings)
# Wait for index to be ready
max_retries = 180 # 3 minutes timeout
retry_count = 0
while retry_count < max_retries:
try:
# Check if index is ready by attempting a simple search
self.client.search(index=name, body={"query": {"match_all": {}}})
time.sleep(1)
logger.info(f"Index {name} is ready")
return
except Exception:
retry_count += 1
if retry_count == max_retries:
raise TimeoutError(f"Index {name} creation timed out after {max_retries} seconds")
time.sleep(0.5)
def insert(
self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None
) -> List[OutputData]:
if not ids:
ids = [str(i) for i in range(len(vectors))]
if payloads is None:
payloads = [{} for _ in range(len(vectors))]
results = []
for i, (vec, id_) in enumerate(zip(vectors, ids)):
body = {
"vector_field": vec,
"payload": payloads[i],
"id": id_,
}
try:
self.client.index(index=self.collection_name, body=body)
# Force refresh to make documents immediately searchable for tests
self.client.indices.refresh(index=self.collection_name)
results.append(OutputData(
id=id_,
score=1.0, # No score for inserts
payload=payloads[i]
))
except Exception as e:
logger.error(f"Error inserting vector {id_}: {e}")
raise
return results
def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
# Base KNN query
knn_query = {
"knn": {
"vector_field": {
"vector": vectors,
"k": limit * 2,
}
}
}
# Start building the full query
query_body = {"size": limit * 2, "query": None}
# Prepare filter conditions if applicable
filter_clauses = []
if filters:
for key in ["user_id", "run_id", "agent_id"]:
value = filters.get(key)
if value:
filter_clauses.append({"term": {f"payload.{key}.keyword": value}})
# Combine knn with filters if needed
if filter_clauses:
query_body["query"] = {"bool": {"must": knn_query, "filter": filter_clauses}}
else:
query_body["query"] = knn_query
try:
# Execute search
response = self.client.search(index=self.collection_name, body=query_body)
hits = response["hits"]["hits"]
results = [
OutputData(id=hit["_source"].get("id"), score=hit["_score"], payload=hit["_source"].get("payload", {}))
for hit in hits[:limit] # Ensure we don't exceed limit
]
return results
except Exception as e:
logger.error(f"Error during search: {e}")
return []
def delete(self, vector_id: str) -> None:
# First, find the document by custom ID
search_query = {"query": {"term": {"id": vector_id}}}
response = self.client.search(index=self.collection_name, body=search_query)
hits = response.get("hits", {}).get("hits", [])
if not hits:
return
opensearch_id = hits[0]["_id"]
# Delete using the actual document ID
self.client.delete(index=self.collection_name, id=opensearch_id)
def update(self, vector_id: str, vector: Optional[List[float]] = None, payload: Optional[Dict] = None) -> None:
# First, find the document by custom ID
search_query = {"query": {"term": {"id": vector_id}}}
response = self.client.search(index=self.collection_name, body=search_query)
hits = response.get("hits", {}).get("hits", [])
if not hits:
return
opensearch_id = hits[0]["_id"] # The actual document ID in OpenSearch
# Prepare updated fields
doc = {}
if vector is not None:
doc["vector_field"] = vector
if payload is not None:
doc["payload"] = payload
if doc:
try:
response = self.client.update(index=self.collection_name, id=opensearch_id, body={"doc": doc})
except Exception:
pass
def get(self, vector_id: str) -> Optional[OutputData]:
try:
search_query = {"query": {"term": {"id": vector_id}}}
response = self.client.search(index=self.collection_name, body=search_query)
hits = response["hits"]["hits"]
if not hits:
return None
return OutputData(id=hits[0]["_source"].get("id"), score=1.0, payload=hits[0]["_source"].get("payload", {}))
except Exception as e:
logger.error(f"Error retrieving vector {vector_id}: {str(e)}")
return None
def list_cols(self) -> List[str]:
return list(self.client.indices.get_alias().keys())
def delete_col(self) -> None:
self.client.indices.delete(index=self.collection_name)
def col_info(self, name: str) -> Any:
return self.client.indices.get(index=name)
def list(self, filters: Optional[Dict] = None, limit: Optional[int] = None) -> List[OutputData]:
try:
"""List all memories with optional filters."""
query: Dict = {"query": {"match_all": {}}}
filter_clauses = []
if filters:
for key in ["user_id", "run_id", "agent_id"]:
value = filters.get(key)
if value:
filter_clauses.append({"term": {f"payload.{key}.keyword": value}})
if filter_clauses:
query["query"] = {"bool": {"filter": filter_clauses}}
if limit:
query["size"] = limit
response = self.client.search(index=self.collection_name, body=query)
hits = response["hits"]["hits"]
# Return a flat list, not a nested array
results = [
OutputData(id=hit["_source"].get("id"), score=1.0, payload=hit["_source"].get("payload", {}))
for hit in hits
]
return [results] # VectorStore expects tuple/list format
except Exception as e:
logger.error(f"Error listing vectors: {e}")
return []
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.create_col(self.collection_name, self.embedding_model_dims) | --- +++ @@ -42,6 +42,7 @@ self.create_col(self.collection_name, self.embedding_model_dims)
def create_index(self) -> None:
+ """Create OpenSearch index with proper mappings if it doesn't exist."""
index_settings = {
"settings": {
"index": {"number_of_replicas": 1, "number_of_shards": 5, "refresh_interval": "10s", "knn": True}
@@ -66,6 +67,7 @@ logger.info(f"Index {self.collection_name} already exists")
def create_col(self, name: str, vector_size: int) -> None:
+ """Create a new collection (index in OpenSearch)."""
index_settings = {
"settings": {"index.knn": True},
"mappings": {
@@ -104,6 +106,7 @@ def insert(
self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None
) -> List[OutputData]:
+ """Insert vectors into the index."""
if not ids:
ids = [str(i) for i in range(len(vectors))]
@@ -136,6 +139,7 @@ def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
+ """Search for similar vectors using OpenSearch k-NN search with optional filters."""
# Base KNN query
knn_query = {
@@ -179,6 +183,7 @@ return []
def delete(self, vector_id: str) -> None:
+ """Delete a vector by custom ID."""
# First, find the document by custom ID
search_query = {"query": {"term": {"id": vector_id}}}
@@ -194,6 +199,7 @@ self.client.delete(index=self.collection_name, id=opensearch_id)
def update(self, vector_id: str, vector: Optional[List[float]] = None, payload: Optional[Dict] = None) -> None:
+ """Update a vector and its payload using the custom 'id' field."""
# First, find the document by custom ID
search_query = {"query": {"term": {"id": vector_id}}}
@@ -220,6 +226,7 @@ pass
def get(self, vector_id: str) -> Optional[OutputData]:
+ """Retrieve a vector by ID."""
try:
search_query = {"query": {"term": {"id": vector_id}}}
response = self.client.search(index=self.collection_name, body=search_query)
@@ -235,12 +242,15 @@ return None
def list_cols(self) -> List[str]:
+ """List all collections (indices)."""
return list(self.client.indices.get_alias().keys())
def delete_col(self) -> None:
+ """Delete a collection (index)."""
self.client.indices.delete(index=self.collection_name)
def col_info(self, name: str) -> Any:
+ """Get information about a collection (index)."""
return self.client.indices.get(index=name)
def list(self, filters: Optional[Dict] = None, limit: Optional[int] = None) -> List[OutputData]:
@@ -276,6 +286,7 @@
def reset(self):
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
- self.create_col(self.collection_name, self.embedding_model_dims)+ self.create_col(self.collection_name, self.embedding_model_dims)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/opensearch.py |
Add detailed docstrings explaining each function | import json
import logging
import uuid
from typing import Any, Dict, List, Optional
import numpy as np
from pydantic import BaseModel
try:
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
except ImportError:
raise ImportError(
"Apache Cassandra vector store requires cassandra-driver. "
"Please install it using 'pip install cassandra-driver'"
)
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[dict]
class CassandraDB(VectorStoreBase):
def __init__(
self,
contact_points: List[str],
port: int = 9042,
username: Optional[str] = None,
password: Optional[str] = None,
keyspace: str = "mem0",
collection_name: str = "memories",
embedding_model_dims: int = 1536,
secure_connect_bundle: Optional[str] = None,
protocol_version: int = 4,
load_balancing_policy: Optional[Any] = None,
):
self.contact_points = contact_points
self.port = port
self.username = username
self.password = password
self.keyspace = keyspace
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.secure_connect_bundle = secure_connect_bundle
self.protocol_version = protocol_version
self.load_balancing_policy = load_balancing_policy
# Initialize connection
self.cluster = None
self.session = None
self._setup_connection()
# Create keyspace and table if they don't exist
self._create_keyspace()
self._create_table()
def _setup_connection(self):
try:
# Setup authentication
auth_provider = None
if self.username and self.password:
auth_provider = PlainTextAuthProvider(
username=self.username,
password=self.password
)
# Connect to Astra DB using secure connect bundle
if self.secure_connect_bundle:
self.cluster = Cluster(
cloud={'secure_connect_bundle': self.secure_connect_bundle},
auth_provider=auth_provider,
protocol_version=self.protocol_version
)
else:
# Connect to standard Cassandra cluster
cluster_kwargs = {
'contact_points': self.contact_points,
'port': self.port,
'protocol_version': self.protocol_version
}
if auth_provider:
cluster_kwargs['auth_provider'] = auth_provider
if self.load_balancing_policy:
cluster_kwargs['load_balancing_policy'] = self.load_balancing_policy
self.cluster = Cluster(**cluster_kwargs)
self.session = self.cluster.connect()
logger.info("Successfully connected to Cassandra cluster")
except Exception as e:
logger.error(f"Failed to connect to Cassandra: {e}")
raise
def _create_keyspace(self):
try:
# Use SimpleStrategy for single datacenter, NetworkTopologyStrategy for production
query = f"""
CREATE KEYSPACE IF NOT EXISTS {self.keyspace}
WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}
"""
self.session.execute(query)
self.session.set_keyspace(self.keyspace)
logger.info(f"Keyspace '{self.keyspace}' is ready")
except Exception as e:
logger.error(f"Failed to create keyspace: {e}")
raise
def _create_table(self):
try:
# Create table with vector stored as list<float> and payload as text (JSON)
query = f"""
CREATE TABLE IF NOT EXISTS {self.keyspace}.{self.collection_name} (
id text PRIMARY KEY,
vector list<float>,
payload text
)
"""
self.session.execute(query)
logger.info(f"Table '{self.collection_name}' is ready")
except Exception as e:
logger.error(f"Failed to create table: {e}")
raise
def create_col(self, name: str = None, vector_size: int = None, distance: str = "cosine"):
table_name = name or self.collection_name
dims = vector_size or self.embedding_model_dims
try:
query = f"""
CREATE TABLE IF NOT EXISTS {self.keyspace}.{table_name} (
id text PRIMARY KEY,
vector list<float>,
payload text
)
"""
self.session.execute(query)
logger.info(f"Created collection '{table_name}' with vector dimension {dims}")
except Exception as e:
logger.error(f"Failed to create collection: {e}")
raise
def insert(
self,
vectors: List[List[float]],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None
):
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
if payloads is None:
payloads = [{}] * len(vectors)
if ids is None:
ids = [str(uuid.uuid4()) for _ in range(len(vectors))]
try:
query = f"""
INSERT INTO {self.keyspace}.{self.collection_name} (id, vector, payload)
VALUES (?, ?, ?)
"""
prepared = self.session.prepare(query)
for vector, payload, vec_id in zip(vectors, payloads, ids):
self.session.execute(
prepared,
(vec_id, vector, json.dumps(payload))
)
except Exception as e:
logger.error(f"Failed to insert vectors: {e}")
raise
def search(
self,
query: str,
vectors: List[float],
limit: int = 5,
filters: Optional[Dict] = None,
) -> List[OutputData]:
try:
# Fetch all vectors (in production, you'd want pagination or filtering)
query_cql = f"""
SELECT id, vector, payload
FROM {self.keyspace}.{self.collection_name}
"""
rows = self.session.execute(query_cql)
# Calculate cosine similarity in Python
query_vec = np.array(vectors)
scored_results = []
for row in rows:
if not row.vector:
continue
vec = np.array(row.vector)
# Cosine similarity
similarity = np.dot(query_vec, vec) / (np.linalg.norm(query_vec) * np.linalg.norm(vec))
distance = 1 - similarity
# Apply filters if provided
if filters:
try:
payload = json.loads(row.payload) if row.payload else {}
match = all(payload.get(k) == v for k, v in filters.items())
if not match:
continue
except json.JSONDecodeError:
continue
scored_results.append((row.id, distance, row.payload))
# Sort by distance and limit
scored_results.sort(key=lambda x: x[1])
scored_results = scored_results[:limit]
return [
OutputData(
id=r[0],
score=float(r[1]),
payload=json.loads(r[2]) if r[2] else {}
)
for r in scored_results
]
except Exception as e:
logger.error(f"Search failed: {e}")
raise
def delete(self, vector_id: str):
try:
query = f"""
DELETE FROM {self.keyspace}.{self.collection_name}
WHERE id = ?
"""
prepared = self.session.prepare(query)
self.session.execute(prepared, (vector_id,))
logger.info(f"Deleted vector with id: {vector_id}")
except Exception as e:
logger.error(f"Failed to delete vector: {e}")
raise
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
try:
if vector is not None:
query = f"""
UPDATE {self.keyspace}.{self.collection_name}
SET vector = ?
WHERE id = ?
"""
prepared = self.session.prepare(query)
self.session.execute(prepared, (vector, vector_id))
if payload is not None:
query = f"""
UPDATE {self.keyspace}.{self.collection_name}
SET payload = ?
WHERE id = ?
"""
prepared = self.session.prepare(query)
self.session.execute(prepared, (json.dumps(payload), vector_id))
logger.info(f"Updated vector with id: {vector_id}")
except Exception as e:
logger.error(f"Failed to update vector: {e}")
raise
def get(self, vector_id: str) -> Optional[OutputData]:
try:
query = f"""
SELECT id, vector, payload
FROM {self.keyspace}.{self.collection_name}
WHERE id = ?
"""
prepared = self.session.prepare(query)
row = self.session.execute(prepared, (vector_id,)).one()
if not row:
return None
return OutputData(
id=row.id,
score=None,
payload=json.loads(row.payload) if row.payload else {}
)
except Exception as e:
logger.error(f"Failed to get vector: {e}")
return None
def list_cols(self) -> List[str]:
try:
query = f"""
SELECT table_name
FROM system_schema.tables
WHERE keyspace_name = '{self.keyspace}'
"""
rows = self.session.execute(query)
return [row.table_name for row in rows]
except Exception as e:
logger.error(f"Failed to list collections: {e}")
return []
def delete_col(self):
try:
query = f"""
DROP TABLE IF EXISTS {self.keyspace}.{self.collection_name}
"""
self.session.execute(query)
logger.info(f"Deleted collection '{self.collection_name}'")
except Exception as e:
logger.error(f"Failed to delete collection: {e}")
raise
def col_info(self) -> Dict[str, Any]:
try:
# Get row count (approximate)
query = f"""
SELECT COUNT(*) as count
FROM {self.keyspace}.{self.collection_name}
"""
row = self.session.execute(query).one()
count = row.count if row else 0
return {
"name": self.collection_name,
"keyspace": self.keyspace,
"count": count,
"vector_dims": self.embedding_model_dims
}
except Exception as e:
logger.error(f"Failed to get collection info: {e}")
return {}
def list(
self,
filters: Optional[Dict] = None,
limit: int = 100
) -> List[List[OutputData]]:
try:
query = f"""
SELECT id, vector, payload
FROM {self.keyspace}.{self.collection_name}
LIMIT {limit}
"""
rows = self.session.execute(query)
results = []
for row in rows:
# Apply filters if provided
if filters:
try:
payload = json.loads(row.payload) if row.payload else {}
match = all(payload.get(k) == v for k, v in filters.items())
if not match:
continue
except json.JSONDecodeError:
continue
results.append(
OutputData(
id=row.id,
score=None,
payload=json.loads(row.payload) if row.payload else {}
)
)
return [results]
except Exception as e:
logger.error(f"Failed to list vectors: {e}")
return [[]]
def reset(self):
try:
logger.warning(f"Resetting collection {self.collection_name}...")
query = f"""
TRUNCATE TABLE {self.keyspace}.{self.collection_name}
"""
self.session.execute(query)
logger.info(f"Collection '{self.collection_name}' has been reset")
except Exception as e:
logger.error(f"Failed to reset collection: {e}")
raise
def __del__(self):
try:
if self.cluster:
self.cluster.shutdown()
logger.info("Cassandra cluster connection closed")
except Exception:
pass
| --- +++ @@ -40,6 +40,21 @@ protocol_version: int = 4,
load_balancing_policy: Optional[Any] = None,
):
+ """
+ Initialize the Apache Cassandra vector store.
+
+ Args:
+ contact_points (List[str]): List of contact point addresses (e.g., ['127.0.0.1'])
+ port (int): Cassandra port (default: 9042)
+ username (str, optional): Database username
+ password (str, optional): Database password
+ keyspace (str): Keyspace name (default: "mem0")
+ collection_name (str): Table name (default: "memories")
+ embedding_model_dims (int): Dimension of the embedding vector (default: 1536)
+ secure_connect_bundle (str, optional): Path to secure connect bundle for Astra DB
+ protocol_version (int): CQL protocol version (default: 4)
+ load_balancing_policy (Any, optional): Custom load balancing policy
+ """
self.contact_points = contact_points
self.port = port
self.username = username
@@ -61,6 +76,7 @@ self._create_table()
def _setup_connection(self):
+ """Setup Cassandra cluster connection."""
try:
# Setup authentication
auth_provider = None
@@ -100,6 +116,7 @@ raise
def _create_keyspace(self):
+ """Create keyspace if it doesn't exist."""
try:
# Use SimpleStrategy for single datacenter, NetworkTopologyStrategy for production
query = f"""
@@ -114,6 +131,7 @@ raise
def _create_table(self):
+ """Create table with vector column if it doesn't exist."""
try:
# Create table with vector stored as list<float> and payload as text (JSON)
query = f"""
@@ -130,6 +148,14 @@ raise
def create_col(self, name: str = None, vector_size: int = None, distance: str = "cosine"):
+ """
+ Create a new collection (table in Cassandra).
+
+ Args:
+ name (str, optional): Collection name (uses self.collection_name if not provided)
+ vector_size (int, optional): Vector dimension (uses self.embedding_model_dims if not provided)
+ distance (str): Distance metric (cosine, euclidean, dot_product)
+ """
table_name = name or self.collection_name
dims = vector_size or self.embedding_model_dims
@@ -153,6 +179,14 @@ payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None
):
+ """
+ Insert vectors into the collection.
+
+ Args:
+ vectors (List[List[float]]): List of vectors to insert
+ payloads (List[Dict], optional): List of payloads corresponding to vectors
+ ids (List[str], optional): List of IDs corresponding to vectors
+ """
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
if payloads is None:
@@ -183,6 +217,18 @@ limit: int = 5,
filters: Optional[Dict] = None,
) -> List[OutputData]:
+ """
+ Search for similar vectors using cosine similarity.
+
+ Args:
+ query (str): Query string (not used in vector search)
+ vectors (List[float]): Query vector
+ limit (int): Number of results to return
+ filters (Dict, optional): Filters to apply to the search
+
+ Returns:
+ List[OutputData]: Search results
+ """
try:
# Fetch all vectors (in production, you'd want pagination or filtering)
query_cql = f"""
@@ -234,6 +280,12 @@ raise
def delete(self, vector_id: str):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete
+ """
try:
query = f"""
DELETE FROM {self.keyspace}.{self.collection_name}
@@ -252,6 +304,14 @@ vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update
+ vector (List[float], optional): Updated vector
+ payload (Dict, optional): Updated payload
+ """
try:
if vector is not None:
query = f"""
@@ -277,6 +337,15 @@ raise
def get(self, vector_id: str) -> Optional[OutputData]:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve
+
+ Returns:
+ OutputData: Retrieved vector or None if not found
+ """
try:
query = f"""
SELECT id, vector, payload
@@ -299,6 +368,12 @@ return None
def list_cols(self) -> List[str]:
+ """
+ List all collections (tables in the keyspace).
+
+ Returns:
+ List[str]: List of collection names
+ """
try:
query = f"""
SELECT table_name
@@ -312,6 +387,7 @@ return []
def delete_col(self):
+ """Delete the collection (table)."""
try:
query = f"""
DROP TABLE IF EXISTS {self.keyspace}.{self.collection_name}
@@ -323,6 +399,12 @@ raise
def col_info(self) -> Dict[str, Any]:
+ """
+ Get information about the collection.
+
+ Returns:
+ Dict[str, Any]: Collection information
+ """
try:
# Get row count (approximate)
query = f"""
@@ -347,6 +429,16 @@ filters: Optional[Dict] = None,
limit: int = 100
) -> List[List[OutputData]]:
+ """
+ List all vectors in the collection.
+
+ Args:
+ filters (Dict, optional): Filters to apply
+ limit (int): Number of vectors to return
+
+ Returns:
+ List[List[OutputData]]: List of vectors
+ """
try:
query = f"""
SELECT id, vector, payload
@@ -381,6 +473,7 @@ return [[]]
def reset(self):
+ """Reset the collection by truncating it."""
try:
logger.warning(f"Resetting collection {self.collection_name}...")
query = f"""
@@ -393,9 +486,11 @@ raise
def __del__(self):
+ """Close the cluster connection when the object is deleted."""
try:
if self.cluster:
self.cluster.shutdown()
logger.info("Cassandra cluster connection closed")
except Exception:
pass
+
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/cassandra.py |
Add docstrings for better understanding | import logging
from typing import Any, Dict, List, Optional
try:
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
except ImportError:
raise ImportError("Elasticsearch requires extra dependencies. Install with `pip install elasticsearch`") from None
from pydantic import BaseModel
from mem0.configs.vector_stores.elasticsearch import ElasticsearchConfig
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: str
score: float
payload: Dict
class ElasticsearchDB(VectorStoreBase):
def __init__(self, **kwargs):
config = ElasticsearchConfig(**kwargs)
# Initialize Elasticsearch client
if config.cloud_id:
self.client = Elasticsearch(
cloud_id=config.cloud_id,
api_key=config.api_key,
verify_certs=config.verify_certs,
headers= config.headers or {},
)
else:
self.client = Elasticsearch(
hosts=[f"{config.host}" if config.port is None else f"{config.host}:{config.port}"],
basic_auth=(config.user, config.password) if (config.user and config.password) else None,
verify_certs=config.verify_certs,
headers= config.headers or {},
)
self.collection_name = config.collection_name
self.embedding_model_dims = config.embedding_model_dims
# Create index only if auto_create_index is True
if config.auto_create_index:
self.create_index()
if config.custom_search_query:
self.custom_search_query = config.custom_search_query
else:
self.custom_search_query = None
def create_index(self) -> None:
index_settings = {
"settings": {"index": {"number_of_replicas": 1, "number_of_shards": 5, "refresh_interval": "1s"}},
"mappings": {
"properties": {
"text": {"type": "text"},
"vector": {
"type": "dense_vector",
"dims": self.embedding_model_dims,
"index": True,
"similarity": "cosine",
},
"metadata": {"type": "object", "properties": {"user_id": {"type": "keyword"}}},
}
},
}
if not self.client.indices.exists(index=self.collection_name):
self.client.indices.create(index=self.collection_name, body=index_settings)
logger.info(f"Created index {self.collection_name}")
else:
logger.info(f"Index {self.collection_name} already exists")
def create_col(self, name: str, vector_size: int, distance: str = "cosine") -> None:
index_settings = {
"mappings": {
"properties": {
"vector": {"type": "dense_vector", "dims": vector_size, "index": True, "similarity": "cosine"},
"payload": {"type": "object"},
"id": {"type": "keyword"},
}
}
}
if not self.client.indices.exists(index=name):
self.client.indices.create(index=name, body=index_settings)
logger.info(f"Created index {name}")
def insert(
self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None
) -> List[OutputData]:
if not ids:
ids = [str(i) for i in range(len(vectors))]
if payloads is None:
payloads = [{} for _ in range(len(vectors))]
actions = []
for i, (vec, id_) in enumerate(zip(vectors, ids)):
action = {
"_index": self.collection_name,
"_id": id_,
"_source": {
"vector": vec,
"metadata": payloads[i], # Store all metadata in the metadata field
},
}
actions.append(action)
bulk(self.client, actions)
results = []
for i, id_ in enumerate(ids):
results.append(
OutputData(
id=id_,
score=1.0, # Default score for inserts
payload=payloads[i],
)
)
return results
def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
if self.custom_search_query:
search_query = self.custom_search_query(vectors, limit, filters)
else:
search_query = {
"knn": {"field": "vector", "query_vector": vectors, "k": limit, "num_candidates": limit * 2}
}
if filters:
filter_conditions = []
for key, value in filters.items():
filter_conditions.append({"term": {f"metadata.{key}": value}})
search_query["knn"]["filter"] = {"bool": {"must": filter_conditions}}
response = self.client.search(index=self.collection_name, body=search_query)
results = []
for hit in response["hits"]["hits"]:
results.append(
OutputData(id=hit["_id"], score=hit["_score"], payload=hit.get("_source", {}).get("metadata", {}))
)
return results
def delete(self, vector_id: str) -> None:
self.client.delete(index=self.collection_name, id=vector_id)
def update(self, vector_id: str, vector: Optional[List[float]] = None, payload: Optional[Dict] = None) -> None:
doc = {}
if vector is not None:
doc["vector"] = vector
if payload is not None:
doc["metadata"] = payload
self.client.update(index=self.collection_name, id=vector_id, body={"doc": doc})
def get(self, vector_id: str) -> Optional[OutputData]:
try:
response = self.client.get(index=self.collection_name, id=vector_id)
return OutputData(
id=response["_id"],
score=1.0, # Default score for direct get
payload=response["_source"].get("metadata", {}),
)
except KeyError as e:
logger.warning(f"Missing key in Elasticsearch response: {e}")
return None
except TypeError as e:
logger.warning(f"Invalid response type from Elasticsearch: {e}")
return None
except Exception as e:
logger.error(f"Unexpected error while parsing Elasticsearch response: {e}")
return None
def list_cols(self) -> List[str]:
return list(self.client.indices.get_alias().keys())
def delete_col(self) -> None:
self.client.indices.delete(index=self.collection_name)
def col_info(self, name: str) -> Any:
return self.client.indices.get(index=name)
def list(self, filters: Optional[Dict] = None, limit: Optional[int] = None) -> List[List[OutputData]]:
query: Dict[str, Any] = {"query": {"match_all": {}}}
if filters:
filter_conditions = []
for key, value in filters.items():
filter_conditions.append({"term": {f"metadata.{key}": value}})
query["query"] = {"bool": {"must": filter_conditions}}
if limit:
query["size"] = limit
response = self.client.search(index=self.collection_name, body=query)
results = []
for hit in response["hits"]["hits"]:
results.append(
OutputData(
id=hit["_id"],
score=1.0, # Default score for list operation
payload=hit.get("_source", {}).get("metadata", {}),
)
)
return [results]
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.create_index() | --- +++ @@ -54,6 +54,7 @@ self.custom_search_query = None
def create_index(self) -> None:
+ """Create Elasticsearch index with proper mappings if it doesn't exist"""
index_settings = {
"settings": {"index": {"number_of_replicas": 1, "number_of_shards": 5, "refresh_interval": "1s"}},
"mappings": {
@@ -77,6 +78,7 @@ logger.info(f"Index {self.collection_name} already exists")
def create_col(self, name: str, vector_size: int, distance: str = "cosine") -> None:
+ """Create a new collection (index in Elasticsearch)."""
index_settings = {
"mappings": {
"properties": {
@@ -94,6 +96,7 @@ def insert(
self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None
) -> List[OutputData]:
+ """Insert vectors into the index."""
if not ids:
ids = [str(i) for i in range(len(vectors))]
@@ -128,6 +131,11 @@ def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
+ """
+ Search with two options:
+ 1. Use custom search query if provided
+ 2. Use KNN search on vectors with pre-filtering if no custom search query is provided
+ """
if self.custom_search_query:
search_query = self.custom_search_query(vectors, limit, filters)
else:
@@ -151,9 +159,11 @@ return results
def delete(self, vector_id: str) -> None:
+ """Delete a vector by ID."""
self.client.delete(index=self.collection_name, id=vector_id)
def update(self, vector_id: str, vector: Optional[List[float]] = None, payload: Optional[Dict] = None) -> None:
+ """Update a vector and its payload."""
doc = {}
if vector is not None:
doc["vector"] = vector
@@ -163,6 +173,7 @@ self.client.update(index=self.collection_name, id=vector_id, body={"doc": doc})
def get(self, vector_id: str) -> Optional[OutputData]:
+ """Retrieve a vector by ID."""
try:
response = self.client.get(index=self.collection_name, id=vector_id)
return OutputData(
@@ -181,15 +192,19 @@ return None
def list_cols(self) -> List[str]:
+ """List all collections (indices)."""
return list(self.client.indices.get_alias().keys())
def delete_col(self) -> None:
+ """Delete a collection (index)."""
self.client.indices.delete(index=self.collection_name)
def col_info(self, name: str) -> Any:
+ """Get information about a collection (index)."""
return self.client.indices.get(index=name)
def list(self, filters: Optional[Dict] = None, limit: Optional[int] = None) -> List[List[OutputData]]:
+ """List all memories."""
query: Dict[str, Any] = {"query": {"match_all": {}}}
if filters:
@@ -216,6 +231,7 @@ return [results]
def reset(self):
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
- self.create_index()+ self.create_index()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/elasticsearch.py |
Expand my code with proper documentation strings | import json
import logging
import uuid
from typing import Optional, List
from datetime import datetime, date
from databricks.sdk.service.catalog import ColumnInfo, ColumnTypeName, TableType, DataSourceFormat
from databricks.sdk.service.catalog import TableConstraint, PrimaryKeyConstraint
from databricks.sdk import WorkspaceClient
from databricks.sdk.service.vectorsearch import (
VectorIndexType,
DeltaSyncVectorIndexSpecRequest,
DirectAccessVectorIndexSpec,
EmbeddingSourceColumn,
EmbeddingVectorColumn,
)
from pydantic import BaseModel
from mem0.memory.utils import extract_json
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class MemoryResult(BaseModel):
id: Optional[str] = None
score: Optional[float] = None
payload: Optional[dict] = None
excluded_keys = {"user_id", "agent_id", "run_id", "hash", "data", "created_at", "updated_at"}
class Databricks(VectorStoreBase):
def __init__(
self,
workspace_url: str,
access_token: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
azure_client_id: Optional[str] = None,
azure_client_secret: Optional[str] = None,
endpoint_name: str = None,
catalog: str = None,
schema: str = None,
table_name: str = None,
collection_name: str = "mem0",
index_type: str = "DELTA_SYNC",
embedding_model_endpoint_name: Optional[str] = None,
embedding_dimension: int = 1536,
endpoint_type: str = "STANDARD",
pipeline_type: str = "TRIGGERED",
warehouse_name: Optional[str] = None,
query_type: str = "ANN",
):
# Basic identifiers
self.workspace_url = workspace_url
self.endpoint_name = endpoint_name
self.catalog = catalog
self.schema = schema
self.table_name = table_name
self.fully_qualified_table_name = f"{self.catalog}.{self.schema}.{self.table_name}"
self.index_name = collection_name
self.fully_qualified_index_name = f"{self.catalog}.{self.schema}.{self.index_name}"
# Configuration
self.index_type = index_type
self.embedding_model_endpoint_name = embedding_model_endpoint_name
self.embedding_dimension = embedding_dimension
self.endpoint_type = endpoint_type
self.pipeline_type = pipeline_type
self.query_type = query_type
# Schema
self.columns = [
ColumnInfo(
name="memory_id",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
nullable=False,
comment="Primary key",
position=0,
),
ColumnInfo(
name="hash",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="Hash of the memory content",
position=1,
),
ColumnInfo(
name="agent_id",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="ID of the agent",
position=2,
),
ColumnInfo(
name="run_id",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="ID of the run",
position=3,
),
ColumnInfo(
name="user_id",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="ID of the user",
position=4,
),
ColumnInfo(
name="memory",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="Memory content",
position=5,
),
ColumnInfo(
name="metadata",
type_name=ColumnTypeName.STRING,
type_text="string",
type_json='{"type":"string"}',
comment="Additional metadata",
position=6,
),
ColumnInfo(
name="created_at",
type_name=ColumnTypeName.TIMESTAMP,
type_text="timestamp",
type_json='{"type":"timestamp"}',
comment="Creation timestamp",
position=7,
),
ColumnInfo(
name="updated_at",
type_name=ColumnTypeName.TIMESTAMP,
type_text="timestamp",
type_json='{"type":"timestamp"}',
comment="Last update timestamp",
position=8,
),
]
if self.index_type == VectorIndexType.DIRECT_ACCESS:
self.columns.append(
ColumnInfo(
name="embedding",
type_name=ColumnTypeName.ARRAY,
type_text="array<float>",
type_json='{"type":"array","element":"float","element_nullable":false}',
nullable=True,
comment="Embedding vector",
position=9,
)
)
self.column_names = [col.name for col in self.columns]
# Initialize Databricks workspace client
client_config = {}
if client_id and client_secret:
client_config.update(
{
"host": workspace_url,
"client_id": client_id,
"client_secret": client_secret,
}
)
elif azure_client_id and azure_client_secret:
client_config.update(
{
"host": workspace_url,
"azure_client_id": azure_client_id,
"azure_client_secret": azure_client_secret,
}
)
elif access_token:
client_config.update({"host": workspace_url, "token": access_token})
else:
# Try automatic authentication
client_config["host"] = workspace_url
try:
self.client = WorkspaceClient(**client_config)
logger.info("Initialized Databricks workspace client")
except Exception as e:
logger.error(f"Failed to initialize Databricks workspace client: {e}")
raise
# Get the warehouse ID by name
self.warehouse_id = next((w.id for w in self.client.warehouses.list() if w.name == warehouse_name), None)
# Initialize endpoint (required in Databricks)
self._ensure_endpoint_exists()
# Check if index exists and create if needed
collections = self.list_cols()
if self.fully_qualified_index_name not in collections:
self.create_col()
def _ensure_endpoint_exists(self):
try:
self.client.vector_search_endpoints.get_endpoint(endpoint_name=self.endpoint_name)
logger.info(f"Vector search endpoint '{self.endpoint_name}' already exists")
except Exception:
# Endpoint doesn't exist, create it
try:
logger.info(f"Creating vector search endpoint '{self.endpoint_name}' with type '{self.endpoint_type}'")
self.client.vector_search_endpoints.create_endpoint_and_wait(
name=self.endpoint_name, endpoint_type=self.endpoint_type
)
logger.info(f"Successfully created vector search endpoint '{self.endpoint_name}'")
except Exception as e:
logger.error(f"Failed to create vector search endpoint '{self.endpoint_name}': {e}")
raise
def _ensure_source_table_exists(self):
check = self.client.tables.exists(self.fully_qualified_table_name)
if check.table_exists:
logger.info(f"Source table '{self.fully_qualified_table_name}' already exists")
else:
logger.info(f"Source table '{self.fully_qualified_table_name}' does not exist, creating it...")
self.client.tables.create(
name=self.table_name,
catalog_name=self.catalog,
schema_name=self.schema,
table_type=TableType.MANAGED,
data_source_format=DataSourceFormat.DELTA,
storage_location=None, # Use default storage location
columns=self.columns,
properties={"delta.enableChangeDataFeed": "true"},
)
logger.info(f"Successfully created source table '{self.fully_qualified_table_name}'")
self.client.table_constraints.create(
full_name_arg="logistics_dev.ai.dev_memory",
constraint=TableConstraint(
primary_key_constraint=PrimaryKeyConstraint(
name="pk_dev_memory", # Name of the primary key constraint
child_columns=["memory_id"], # Columns that make up the primary key
)
),
)
logger.info(
f"Successfully created primary key constraint on 'memory_id' for table '{self.fully_qualified_table_name}'"
)
def create_col(self, name=None, vector_size=None, distance=None):
# Determine index configuration
embedding_dims = vector_size or self.embedding_dimension
embedding_source_columns = [
EmbeddingSourceColumn(
name="memory",
embedding_model_endpoint_name=self.embedding_model_endpoint_name,
)
]
logger.info(f"Creating vector search index '{self.fully_qualified_index_name}'")
# First, ensure the source Delta table exists
self._ensure_source_table_exists()
if self.index_type not in [VectorIndexType.DELTA_SYNC, VectorIndexType.DIRECT_ACCESS]:
raise ValueError("index_type must be either 'DELTA_SYNC' or 'DIRECT_ACCESS'")
try:
if self.index_type == VectorIndexType.DELTA_SYNC:
index = self.client.vector_search_indexes.create_index(
name=self.fully_qualified_index_name,
endpoint_name=self.endpoint_name,
primary_key="memory_id",
index_type=self.index_type,
delta_sync_index_spec=DeltaSyncVectorIndexSpecRequest(
source_table=self.fully_qualified_table_name,
pipeline_type=self.pipeline_type,
columns_to_sync=self.column_names,
embedding_source_columns=embedding_source_columns,
),
)
logger.info(
f"Successfully created vector search index '{self.fully_qualified_index_name}' with DELTA_SYNC type"
)
return index
elif self.index_type == VectorIndexType.DIRECT_ACCESS:
index = self.client.vector_search_indexes.create_index(
name=self.fully_qualified_index_name,
endpoint_name=self.endpoint_name,
primary_key="memory_id",
index_type=self.index_type,
direct_access_index_spec=DirectAccessVectorIndexSpec(
embedding_source_columns=embedding_source_columns,
embedding_vector_columns=[
EmbeddingVectorColumn(name="embedding", embedding_dimension=embedding_dims)
],
),
)
logger.info(
f"Successfully created vector search index '{self.fully_qualified_index_name}' with DIRECT_ACCESS type"
)
return index
except Exception as e:
logger.error(f"Error making index_type: {self.index_type} for index {self.fully_qualified_index_name}: {e}")
def _format_sql_value(self, v):
if v is None:
return "NULL"
if isinstance(v, bool):
return "TRUE" if v else "FALSE"
if isinstance(v, (int, float)):
return str(v)
if isinstance(v, (datetime, date)):
return f"'{v.isoformat()}'"
if isinstance(v, list):
# Render arrays (assume numeric or string elements)
elems = []
for x in v:
if x is None:
elems.append("NULL")
elif isinstance(x, (int, float)):
elems.append(str(x))
else:
s = str(x).replace("'", "''")
elems.append(f"'{s}'")
return f"array({', '.join(elems)})"
if isinstance(v, dict):
try:
s = json.dumps(v)
except Exception:
s = str(v)
s = s.replace("'", "''")
return f"'{s}'"
# Fallback: treat as string
s = str(v).replace("'", "''")
return f"'{s}'"
def insert(self, vectors: list, payloads: list = None, ids: list = None):
# Determine the number of items to process
num_items = len(payloads) if payloads else len(vectors) if vectors else 0
value_tuples = []
for i in range(num_items):
values = []
for col in self.columns:
if col.name == "memory_id":
val = ids[i] if ids and i < len(ids) else str(uuid.uuid4())
elif col.name == "embedding":
val = vectors[i] if vectors and i < len(vectors) else []
elif col.name == "memory":
val = payloads[i].get("data") if payloads and i < len(payloads) else None
else:
val = payloads[i].get(col.name) if payloads and i < len(payloads) else None
values.append(val)
formatted = [self._format_sql_value(v) for v in values]
value_tuples.append(f"({', '.join(formatted)})")
insert_sql = f"INSERT INTO {self.fully_qualified_table_name} ({', '.join(self.column_names)}) VALUES {', '.join(value_tuples)}"
# Execute the insert
try:
response = self.client.statement_execution.execute_statement(
statement=insert_sql, warehouse_id=self.warehouse_id, wait_timeout="30s"
)
if response.status.state.value == "SUCCEEDED":
logger.info(
f"Successfully inserted {num_items} items into Delta table {self.fully_qualified_table_name}"
)
return
else:
logger.error(f"Failed to insert items: {response.status.error}")
raise Exception(f"Insert operation failed: {response.status.error}")
except Exception as e:
logger.error(f"Insert operation failed: {e}")
raise
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> List[MemoryResult]:
try:
filters_json = json.dumps(filters) if filters else None
# Choose query type
if self.index_type == VectorIndexType.DELTA_SYNC and query:
# Text-based search
sdk_results = self.client.vector_search_indexes.query_index(
index_name=self.fully_qualified_index_name,
columns=self.column_names,
query_text=query,
num_results=limit,
query_type=self.query_type,
filters_json=filters_json,
)
elif self.index_type == VectorIndexType.DIRECT_ACCESS and vectors:
# Vector-based search
sdk_results = self.client.vector_search_indexes.query_index(
index_name=self.fully_qualified_index_name,
columns=self.column_names,
query_vector=vectors,
num_results=limit,
query_type=self.query_type,
filters_json=filters_json,
)
else:
raise ValueError("Must provide query text for DELTA_SYNC or vectors for DIRECT_ACCESS.")
# Parse results
result_data = sdk_results.result if hasattr(sdk_results, "result") else sdk_results
data_array = result_data.data_array if getattr(result_data, "data_array", None) else []
memory_results = []
for row in data_array:
# Map columns to values
row_dict = dict(zip(self.column_names, row)) if isinstance(row, (list, tuple)) else row
score = row_dict.get("score") or (
row[-1] if isinstance(row, (list, tuple)) and len(row) > len(self.column_names) else None
)
payload = {k: row_dict.get(k) for k in self.column_names}
payload["data"] = payload.get("memory", "")
memory_id = row_dict.get("memory_id") or row_dict.get("id")
memory_results.append(MemoryResult(id=memory_id, score=score, payload=payload))
return memory_results
except Exception as e:
logger.error(f"Search failed: {e}")
raise
def delete(self, vector_id):
try:
logger.info(f"Deleting vector with ID {vector_id} from Delta table {self.fully_qualified_table_name}")
delete_sql = f"DELETE FROM {self.fully_qualified_table_name} WHERE memory_id = '{vector_id}'"
response = self.client.statement_execution.execute_statement(
statement=delete_sql, warehouse_id=self.warehouse_id, wait_timeout="30s"
)
if response.status.state.value == "SUCCEEDED":
logger.info(f"Successfully deleted vector with ID {vector_id}")
else:
logger.error(f"Failed to delete vector with ID {vector_id}: {response.status.error}")
except Exception as e:
logger.error(f"Delete operation failed for vector ID {vector_id}: {e}")
raise
def update(self, vector_id=None, vector=None, payload=None):
update_sql = f"UPDATE {self.fully_qualified_table_name} SET "
set_clauses = []
if not vector_id:
logger.error("vector_id is required for update operation")
return
if vector is not None:
if not isinstance(vector, list):
logger.error("vector must be a list of float values")
return
set_clauses.append(f"embedding = {vector}")
if payload:
if not isinstance(payload, dict):
logger.error("payload must be a dictionary")
return
for key, value in payload.items():
if key not in excluded_keys:
set_clauses.append(f"{key} = '{value}'")
if not set_clauses:
logger.error("No fields to update")
return
update_sql += ", ".join(set_clauses)
update_sql += f" WHERE memory_id = '{vector_id}'"
try:
logger.info(f"Updating vector with ID {vector_id} in Delta table {self.fully_qualified_table_name}")
response = self.client.statement_execution.execute_statement(
statement=update_sql, warehouse_id=self.warehouse_id, wait_timeout="30s"
)
if response.status.state.value == "SUCCEEDED":
logger.info(f"Successfully updated vector with ID {vector_id}")
else:
logger.error(f"Failed to update vector with ID {vector_id}: {response.status.error}")
except Exception as e:
logger.error(f"Update operation failed for vector ID {vector_id}: {e}")
raise
def get(self, vector_id) -> MemoryResult:
try:
# Use query with ID filter to retrieve the specific vector
filters = {"memory_id": vector_id}
filters_json = json.dumps(filters)
results = self.client.vector_search_indexes.query_index(
index_name=self.fully_qualified_index_name,
columns=self.column_names,
query_text=" ", # Empty query, rely on filters
num_results=1,
query_type=self.query_type,
filters_json=filters_json,
)
# Process results
result_data = results.result if hasattr(results, "result") else results
data_array = result_data.data_array if hasattr(result_data, "data_array") else []
if not data_array:
raise KeyError(f"Vector with ID {vector_id} not found")
result = data_array[0]
columns = columns = [col.name for col in results.manifest.columns] if results.manifest and results.manifest.columns else []
row_data = dict(zip(columns, result))
# Build payload following the standard schema
payload = {
"hash": row_data.get("hash", "unknown"),
"data": row_data.get("memory", row_data.get("data", "unknown")),
"created_at": row_data.get("created_at"),
}
# Add updated_at if available
if "updated_at" in row_data:
payload["updated_at"] = row_data.get("updated_at")
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in row_data:
payload[field] = row_data[field]
# Add metadata
if "metadata" in row_data and row_data.get('metadata'):
try:
metadata = json.loads(extract_json(row_data["metadata"]))
payload.update(metadata)
except (json.JSONDecodeError, TypeError):
logger.warning(f"Failed to parse metadata: {row_data.get('metadata')}")
memory_id = row_data.get("memory_id", row_data.get("memory_id", vector_id))
return MemoryResult(id=memory_id, payload=payload)
except Exception as e:
logger.error(f"Failed to get vector with ID {vector_id}: {e}")
raise
def list_cols(self) -> List[str]:
try:
indexes = self.client.vector_search_indexes.list_indexes(endpoint_name=self.endpoint_name)
return [idx.name for idx in indexes]
except Exception as e:
logger.error(f"Failed to list collections: {e}")
raise
def delete_col(self):
try:
# Try fully qualified first
try:
self.client.vector_search_indexes.delete_index(index_name=self.fully_qualified_index_name)
logger.info(f"Successfully deleted index '{self.fully_qualified_index_name}'")
except Exception:
self.client.vector_search_indexes.delete_index(index_name=self.index_name)
logger.info(f"Successfully deleted index '{self.index_name}' (short name)")
except Exception as e:
logger.error(f"Failed to delete index '{self.index_name}': {e}")
raise
def col_info(self, name=None):
try:
index_name = name or self.index_name
index = self.client.vector_search_indexes.get_index(index_name=index_name)
return {"name": index.name, "fields": self.columns}
except Exception as e:
logger.error(f"Failed to get info for index '{name or self.index_name}': {e}")
raise
def list(self, filters: dict = None, limit: int = None) -> list[MemoryResult]:
try:
filters_json = json.dumps(filters) if filters else None
num_results = limit or 100
columns = self.column_names
sdk_results = self.client.vector_search_indexes.query_index(
index_name=self.fully_qualified_index_name,
columns=columns,
query_text=" ",
num_results=num_results,
query_type=self.query_type,
filters_json=filters_json,
)
result_data = sdk_results.result if hasattr(sdk_results, "result") else sdk_results
data_array = result_data.data_array if hasattr(result_data, "data_array") else []
memory_results = []
for row in data_array:
row_dict = dict(zip(columns, row)) if isinstance(row, (list, tuple)) else row
payload = {k: row_dict.get(k) for k in columns}
# Parse metadata if present
if "metadata" in payload and payload["metadata"]:
try:
payload.update(json.loads(payload["metadata"]))
except Exception:
pass
memory_id = row_dict.get("memory_id") or row_dict.get("id")
payload['data'] = payload['memory']
memory_results.append(MemoryResult(id=memory_id, payload=payload))
return [memory_results]
except Exception as e:
logger.error(f"Failed to list memories: {e}")
return []
def reset(self):
fq_index = self.fully_qualified_index_name
logger.warning(f"Resetting Databricks vector search index '{fq_index}'...")
try:
# Try deleting via fully qualified name first
try:
self.client.vector_search_indexes.delete_index(index_name=fq_index)
logger.info(f"Deleted index '{fq_index}'")
except Exception as e_fq:
logger.debug(f"Failed deleting fully qualified index name '{fq_index}': {e_fq}. Trying short name...")
try:
# Fallback to existing helper which may use short name
self.delete_col()
except Exception as e_short:
logger.debug(f"Failed deleting short index name '{self.index_name}': {e_short}")
# Drop the backing table (if it exists)
try:
drop_sql = f"DROP TABLE IF EXISTS {self.fully_qualified_table_name}"
resp = self.client.statement_execution.execute_statement(
statement=drop_sql, warehouse_id=self.warehouse_id, wait_timeout="30s"
)
if getattr(resp.status, "state", None) == "SUCCEEDED":
logger.info(f"Dropped table '{self.fully_qualified_table_name}'")
else:
logger.warning(
f"Attempted to drop table '{self.fully_qualified_table_name}' but state was {getattr(resp.status, 'state', 'UNKNOWN')}: {getattr(resp.status, 'error', None)}"
)
except Exception as e_drop:
logger.warning(f"Failed to drop table '{self.fully_qualified_table_name}': {e_drop}")
# Recreate table & index
self._ensure_source_table_exists()
self.create_col()
logger.info(f"Successfully reset index '{fq_index}'")
except Exception as e:
logger.error(f"Error resetting index '{fq_index}': {e}")
raise | --- +++ @@ -51,6 +51,29 @@ warehouse_name: Optional[str] = None,
query_type: str = "ANN",
):
+ """
+ Initialize the Databricks Vector Search vector store.
+
+ Args:
+ workspace_url (str): Databricks workspace URL.
+ access_token (str, optional): Personal access token for authentication.
+ client_id (str, optional): Service principal client ID for authentication.
+ client_secret (str, optional): Service principal client secret for authentication.
+ azure_client_id (str, optional): Azure AD application client ID (for Azure Databricks).
+ azure_client_secret (str, optional): Azure AD application client secret (for Azure Databricks).
+ endpoint_name (str): Vector search endpoint name.
+ catalog (str): Unity Catalog catalog name.
+ schema (str): Unity Catalog schema name.
+ table_name (str): Source Delta table name.
+ index_name (str, optional): Vector search index name (default: "mem0").
+ index_type (str, optional): Index type, either "DELTA_SYNC" or "DIRECT_ACCESS" (default: "DELTA_SYNC").
+ embedding_model_endpoint_name (str, optional): Embedding model endpoint for Databricks-computed embeddings.
+ embedding_dimension (int, optional): Vector embedding dimensions (default: 1536).
+ endpoint_type (str, optional): Endpoint type, either "STANDARD" or "STORAGE_OPTIMIZED" (default: "STANDARD").
+ pipeline_type (str, optional): Sync pipeline type, either "TRIGGERED" or "CONTINUOUS" (default: "TRIGGERED").
+ warehouse_name (str, optional): Databricks SQL warehouse Name (if using SQL warehouse).
+ query_type (str, optional): Query type, either "ANN" or "HYBRID" (default: "ANN").
+ """
# Basic identifiers
self.workspace_url = workspace_url
self.endpoint_name = endpoint_name
@@ -202,6 +225,7 @@ self.create_col()
def _ensure_endpoint_exists(self):
+ """Ensure the vector search endpoint exists, create if it doesn't."""
try:
self.client.vector_search_endpoints.get_endpoint(endpoint_name=self.endpoint_name)
logger.info(f"Vector search endpoint '{self.endpoint_name}' already exists")
@@ -218,6 +242,7 @@ raise
def _ensure_source_table_exists(self):
+ """Ensure the source Delta table exists with the proper schema."""
check = self.client.tables.exists(self.fully_qualified_table_name)
if check.table_exists:
@@ -249,6 +274,17 @@ )
def create_col(self, name=None, vector_size=None, distance=None):
+ """
+ Create a new collection (index).
+
+ Args:
+ name (str, optional): Index name. If provided, will create a new index using the provided source_table_name.
+ vector_size (int, optional): Vector dimension size.
+ distance (str, optional): Distance metric (not directly applicable for Databricks).
+
+ Returns:
+ The index object.
+ """
# Determine index configuration
embedding_dims = vector_size or self.embedding_dimension
embedding_source_columns = [
@@ -306,6 +342,9 @@ logger.error(f"Error making index_type: {self.index_type} for index {self.fully_qualified_index_name}: {e}")
def _format_sql_value(self, v):
+ """
+ Format a Python value into a safe SQL literal for Databricks.
+ """
if v is None:
return "NULL"
if isinstance(v, bool):
@@ -338,6 +377,14 @@ return f"'{s}'"
def insert(self, vectors: list, payloads: list = None, ids: list = None):
+ """
+ Insert vectors into the index.
+
+ Args:
+ vectors (List[List[float]]): List of vectors to insert.
+ payloads (List[Dict], optional): List of payloads corresponding to vectors.
+ ids (List[str], optional): List of IDs corresponding to vectors.
+ """
# Determine the number of items to process
num_items = len(payloads) if payloads else len(vectors) if vectors else 0
@@ -377,6 +424,18 @@ raise
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> List[MemoryResult]:
+ """
+ Search for similar vectors or text using the Databricks Vector Search index.
+
+ Args:
+ query (str): Search query text (for text-based search).
+ vectors (list): Query vector (for vector-based search).
+ limit (int): Maximum number of results.
+ filters (dict): Filters to apply.
+
+ Returns:
+ List of MemoryResult objects.
+ """
try:
filters_json = json.dumps(filters) if filters else None
@@ -426,6 +485,12 @@ raise
def delete(self, vector_id):
+ """
+ Delete a vector by ID from the Delta table.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
try:
logger.info(f"Deleting vector with ID {vector_id} from Delta table {self.fully_qualified_table_name}")
@@ -445,6 +510,14 @@ raise
def update(self, vector_id=None, vector=None, payload=None):
+ """
+ Update a vector and its payload in the Delta table.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (list, optional): New vector values.
+ payload (dict, optional): New payload data.
+ """
update_sql = f"UPDATE {self.fully_qualified_table_name} SET "
set_clauses = []
@@ -485,6 +558,15 @@ raise
def get(self, vector_id) -> MemoryResult:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ MemoryResult: The retrieved vector.
+ """
try:
# Use query with ID filter to retrieve the specific vector
filters = {"memory_id": vector_id}
@@ -542,6 +624,12 @@ raise
def list_cols(self) -> List[str]:
+ """
+ List all collections (indexes).
+
+ Returns:
+ List of index names.
+ """
try:
indexes = self.client.vector_search_indexes.list_indexes(endpoint_name=self.endpoint_name)
return [idx.name for idx in indexes]
@@ -550,6 +638,9 @@ raise
def delete_col(self):
+ """
+ Delete the current collection (index).
+ """
try:
# Try fully qualified first
try:
@@ -563,6 +654,15 @@ raise
def col_info(self, name=None):
+ """
+ Get information about a collection (index).
+
+ Args:
+ name (str, optional): Index name. Defaults to current index.
+
+ Returns:
+ Dict: Index information.
+ """
try:
index_name = name or self.index_name
index = self.client.vector_search_indexes.get_index(index_name=index_name)
@@ -572,6 +672,16 @@ raise
def list(self, filters: dict = None, limit: int = None) -> list[MemoryResult]:
+ """
+ List all recent created memories from the vector store.
+
+ Args:
+ filters (dict, optional): Filters to apply.
+ limit (int, optional): Maximum number of results.
+
+ Returns:
+ List containing list of MemoryResult objects.
+ """
try:
filters_json = json.dumps(filters) if filters else None
num_results = limit or 100
@@ -606,6 +716,12 @@ return []
def reset(self):
+ """Reset the vector search index and underlying source table.
+
+ This will attempt to delete the existing index (both fully qualified and short name forms
+ for robustness), drop the backing Delta table, recreate the table with the expected schema,
+ and finally recreate the index. Use with caution as all existing data will be removed.
+ """
fq_index = self.fully_qualified_index_name
logger.warning(f"Resetting Databricks vector search index '{fq_index}'...")
try:
@@ -642,4 +758,4 @@ logger.info(f"Successfully reset index '{fq_index}'")
except Exception as e:
logger.error(f"Error resetting index '{fq_index}': {e}")
- raise+ raise
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/databricks.py |
Add docstrings that explain logic | import logging
import time
import uuid
from typing import Dict, List, Optional
from pydantic import BaseModel
try:
from langchain_aws import NeptuneAnalyticsGraph
except ImportError:
raise ImportError("langchain_aws is not installed. Please install it using pip install langchain_aws")
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # distance
payload: Optional[Dict] # metadata
class NeptuneAnalyticsVector(VectorStoreBase):
_COLLECTION_PREFIX = "MEM0_VECTOR_"
_FIELD_N = 'n'
_FIELD_ID = '~id'
_FIELD_PROP = '~properties'
_FIELD_SCORE = 'score'
_FIELD_LABEL = 'label'
_TIMEZONE = "UTC"
def __init__(
self,
endpoint: str,
collection_name: str,
):
if not endpoint.startswith("neptune-graph://"):
raise ValueError("Please provide 'endpoint' with the format as 'neptune-graph://<graphid>'.")
graph_id = endpoint.replace("neptune-graph://", "")
self.graph = NeptuneAnalyticsGraph(graph_id)
self.collection_name = self._COLLECTION_PREFIX + collection_name
def create_col(self, name, vector_size, distance):
pass
def insert(self, vectors: List[list],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None):
para_list = []
for index, data_vector in enumerate(vectors):
if payloads:
payload = payloads[index]
payload[self._FIELD_LABEL] = self.collection_name
payload["updated_at"] = str(int(time.time()))
else:
payload = {}
para_list.append(dict(
node_id=ids[index] if ids else str(uuid.uuid4()),
properties=payload,
embedding=data_vector,
))
para_map_to_insert = {"rows": para_list}
query_string = (f"""
UNWIND $rows AS row
MERGE (n :{self.collection_name} {{`~id`: row.node_id}})
ON CREATE SET n = row.properties
ON MATCH SET n += row.properties
"""
)
self.execute_query(query_string, para_map_to_insert)
query_string_vector = (f"""
UNWIND $rows AS row
MATCH (n
:{self.collection_name}
{{`~id`: row.node_id}})
WITH n, row.embedding AS embedding
CALL neptune.algo.vectors.upsert(n, embedding)
YIELD success
RETURN success
"""
)
result = self.execute_query(query_string_vector, para_map_to_insert)
self._process_success_message(result, "Vector store - Insert")
def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
if not filters:
filters = {}
filters[self._FIELD_LABEL] = self.collection_name
filter_clause = self._get_node_filter_clause(filters)
query_string = f"""
CALL neptune.algo.vectors.topKByEmbeddingWithFiltering({{
topK: {limit},
embedding: {vectors}
{filter_clause}
}}
)
YIELD node, score
RETURN node as n, score
"""
query_response = self.execute_query(query_string)
if len(query_response) > 0:
return self._parse_query_responses(query_response, with_score=True)
else :
return []
def delete(self, vector_id: str):
params = dict(node_id=vector_id)
query_string = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $node_id
DETACH DELETE n
"""
self.execute_query(query_string, params)
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
if payload:
# Replace payload
payload[self._FIELD_LABEL] = self.collection_name
payload["updated_at"] = str(int(time.time()))
para_payload = {
"properties": payload,
"vector_id": vector_id
}
query_string_embedding = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $vector_id
SET n = $properties
"""
self.execute_query(query_string_embedding, para_payload)
if vector:
para_embedding = {
"embedding": vector,
"vector_id": vector_id
}
query_string_embedding = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $vector_id
WITH $embedding as embedding, n as n
CALL neptune.algo.vectors.upsert(n, embedding)
YIELD success
RETURN success
"""
self.execute_query(query_string_embedding, para_embedding)
def get(self, vector_id: str):
params = dict(node_id=vector_id)
query_string = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $node_id
RETURN n
"""
# Composite the query
result = self.execute_query(query_string, params)
if len(result) != 0:
return self._parse_query_responses(result)[0]
def list_cols(self):
query_string = f"""
CALL neptune.graph.pg_schema()
YIELD schema
RETURN [ label IN schema.nodeLabels WHERE label STARTS WITH '{self.collection_name}'] AS result
"""
result = self.execute_query(query_string)
if len(result) == 1 and "result" in result[0]:
return result[0]["result"]
else:
return []
def delete_col(self):
self.execute_query(f"MATCH (n :{self.collection_name}) DETACH DELETE n")
def col_info(self):
pass
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
where_clause = self._get_where_clause(filters) if filters else ""
para = {
"limit": limit,
}
query_string = f"""
MATCH (n :{self.collection_name})
{where_clause}
RETURN n
LIMIT $limit
"""
query_response = self.execute_query(query_string, para)
if len(query_response) > 0:
# Handle if there is no match.
return [self._parse_query_responses(query_response)]
return [[]]
def reset(self):
self.delete_col()
def _parse_query_responses(self, response: dict, with_score: bool = False):
result = []
# Handle if there is no match.
for item in response:
id = item[self._FIELD_N][self._FIELD_ID]
properties = item[self._FIELD_N][self._FIELD_PROP]
properties.pop("label", None)
if with_score:
score = item[self._FIELD_SCORE]
else:
score = None
result.append(OutputData(
id=id,
score=score,
payload=properties,
))
return result
def execute_query(self, query_string: str, params=None):
if params is None:
params = {}
logger.debug(f"Executing openCypher query:[{query_string}], with parameters:[{params}].")
return self.graph.query(query_string, params)
@staticmethod
def _get_where_clause(filters: dict):
where_clause = ""
for i, (k, v) in enumerate(filters.items()):
if i == 0:
where_clause += f"WHERE n.{k} = '{v}' "
else:
where_clause += f"AND n.{k} = '{v}' "
return where_clause
@staticmethod
def _get_node_filter_clause(filters: dict):
conditions = []
for k, v in filters.items():
conditions.append(f"{{equals:{{property: '{k}', value: '{v}'}}}}")
if len(conditions) == 1:
filter_clause = f", nodeFilter: {conditions[0]}"
else:
filter_clause = f"""
, nodeFilter: {{andAll: [ {", ".join(conditions)} ]}}
"""
return filter_clause
@staticmethod
def _process_success_message(response, context):
for success_message in response:
if "success" not in success_message:
logger.error(f"Query execution status is absent on action: [{context}]")
break
if success_message["success"] is not True:
logger.error(f"Abnormal response status on action: [{context}] with message: [{success_message['success']}] ")
break | --- +++ @@ -21,6 +21,12 @@
class NeptuneAnalyticsVector(VectorStoreBase):
+ """
+ Neptune Analytics vector store implementation for Mem0.
+
+ Provides vector storage and similarity search capabilities using Amazon Neptune Analytics,
+ a serverless graph analytics service that supports vector operations.
+ """
_COLLECTION_PREFIX = "MEM0_VECTOR_"
_FIELD_N = 'n'
@@ -35,6 +41,17 @@ endpoint: str,
collection_name: str,
):
+ """
+ Initialize the Neptune Analytics vector store.
+
+ Args:
+ endpoint (str): Neptune Analytics endpoint in format 'neptune-graph://<graphid>'.
+ collection_name (str): Name of the collection to store vectors.
+
+ Raises:
+ ValueError: If endpoint format is invalid.
+ ImportError: If langchain_aws is not installed.
+ """
if not endpoint.startswith("neptune-graph://"):
raise ValueError("Please provide 'endpoint' with the format as 'neptune-graph://<graphid>'.")
@@ -45,12 +62,34 @@
def create_col(self, name, vector_size, distance):
+ """
+ Create a collection (no-op for Neptune Analytics).
+
+ Neptune Analytics supports dynamic indices that are created implicitly
+ when vectors are inserted, so this method performs no operation.
+
+ Args:
+ name: Collection name (unused).
+ vector_size: Vector dimension (unused).
+ distance: Distance metric (unused).
+ """
pass
def insert(self, vectors: List[list],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None):
+ """
+ Insert vectors into the collection.
+
+ Creates or updates nodes in Neptune Analytics with vector embeddings and metadata.
+ Uses MERGE operation to handle both creation and updates.
+
+ Args:
+ vectors (List[list]): List of embedding vectors to insert.
+ payloads (Optional[List[Dict]]): Optional metadata for each vector.
+ ids (Optional[List[str]]): Optional IDs for vectors. Generated if not provided.
+ """
para_list = []
for index, data_vector in enumerate(vectors):
@@ -96,6 +135,21 @@ def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
+ """
+ Search for similar vectors using embedding similarity.
+
+ Performs vector similarity search using Neptune Analytics' topKByEmbeddingWithFiltering
+ algorithm to find the most similar vectors.
+
+ Args:
+ query (str): Search query text (unused in vector search).
+ vectors (List[float]): Query embedding vector.
+ limit (int, optional): Maximum number of results to return. Defaults to 5.
+ filters (Optional[Dict]): Optional filters to apply to search results.
+
+ Returns:
+ List[OutputData]: List of similar vectors with scores and metadata.
+ """
if not filters:
filters = {}
@@ -121,6 +175,14 @@
def delete(self, vector_id: str):
+ """
+ Delete a vector by its ID.
+
+ Removes the node and all its relationships from the Neptune Analytics graph.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
params = dict(node_id=vector_id)
query_string = f"""
MATCH (n :{self.collection_name})
@@ -135,6 +197,17 @@ vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
+ """
+ Update a vector's embedding and/or metadata.
+
+ Updates the node properties and/or vector embedding for an existing vector.
+ Can update either the payload, the vector, or both.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (Optional[List[float]]): New embedding vector.
+ payload (Optional[Dict]): New metadata to replace existing payload.
+ """
if payload:
# Replace payload
@@ -169,6 +242,17 @@
def get(self, vector_id: str):
+ """
+ Retrieve a vector by its ID.
+
+ Fetches the node data including metadata for the specified vector ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ OutputData: Vector data with metadata, or None if not found.
+ """
params = dict(node_id=vector_id)
query_string = f"""
MATCH (n :{self.collection_name})
@@ -184,6 +268,15 @@
def list_cols(self):
+ """
+ List all collections with the Mem0 prefix.
+
+ Queries the Neptune Analytics schema to find all node labels that start
+ with the Mem0 collection prefix.
+
+ Returns:
+ List[str]: List of collection names.
+ """
query_string = f"""
CALL neptune.graph.pg_schema()
YIELD schema
@@ -197,14 +290,38 @@
def delete_col(self):
+ """
+ Delete the entire collection.
+
+ Removes all nodes with the collection label and their relationships
+ from the Neptune Analytics graph.
+ """
self.execute_query(f"MATCH (n :{self.collection_name}) DETACH DELETE n")
def col_info(self):
+ """
+ Get collection information (no-op for Neptune Analytics).
+
+ Collections are created dynamically in Neptune Analytics, so no
+ collection-specific metadata is available.
+ """
pass
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
+ """
+ List all vectors in the collection with optional filtering.
+
+ Retrieves vectors from the collection, optionally filtered by metadata properties.
+
+ Args:
+ filters (Optional[Dict]): Optional filters to apply based on metadata.
+ limit (int, optional): Maximum number of vectors to return. Defaults to 100.
+
+ Returns:
+ List[OutputData]: List of vectors with their metadata.
+ """
where_clause = self._get_where_clause(filters) if filters else ""
para = {
@@ -225,10 +342,25 @@
def reset(self):
+ """
+ Reset the collection by deleting all vectors.
+
+ Removes all vectors from the collection, effectively resetting it to empty state.
+ """
self.delete_col()
def _parse_query_responses(self, response: dict, with_score: bool = False):
+ """
+ Parse Neptune Analytics query responses into OutputData objects.
+
+ Args:
+ response (dict): Raw query response from Neptune Analytics.
+ with_score (bool, optional): Whether to include similarity scores. Defaults to False.
+
+ Returns:
+ List[OutputData]: Parsed response data.
+ """
result = []
# Handle if there is no match.
for item in response:
@@ -248,6 +380,19 @@
def execute_query(self, query_string: str, params=None):
+ """
+ Execute an openCypher query on Neptune Analytics.
+
+ This is a wrapper method around the Neptune Analytics graph query execution
+ that provides debug logging for query monitoring and troubleshooting.
+
+ Args:
+ query_string (str): The openCypher query string to execute.
+ params (dict): Parameters to bind to the query.
+
+ Returns:
+ Query result from Neptune Analytics graph execution.
+ """
if params is None:
params = {}
logger.debug(f"Executing openCypher query:[{query_string}], with parameters:[{params}].")
@@ -256,6 +401,15 @@
@staticmethod
def _get_where_clause(filters: dict):
+ """
+ Build WHERE clause for Cypher queries from filters.
+
+ Args:
+ filters (dict): Filter conditions as key-value pairs.
+
+ Returns:
+ str: Formatted WHERE clause for Cypher query.
+ """
where_clause = ""
for i, (k, v) in enumerate(filters.items()):
if i == 0:
@@ -266,6 +420,18 @@
@staticmethod
def _get_node_filter_clause(filters: dict):
+ """
+ Build node filter clause for vector search operations.
+
+ Creates filter conditions for Neptune Analytics vector search operations
+ using the nodeFilter parameter format.
+
+ Args:
+ filters (dict): Filter conditions as key-value pairs.
+
+ Returns:
+ str: Formatted node filter clause for vector search.
+ """
conditions = []
for k, v in filters.items():
conditions.append(f"{{equals:{{property: '{k}', value: '{v}'}}}}")
@@ -281,6 +447,16 @@
@staticmethod
def _process_success_message(response, context):
+ """
+ Process and validate success messages from Neptune Analytics operations.
+
+ Checks the response from vector operations (insert/update) to ensure they
+ completed successfully. Logs errors if operations fail.
+
+ Args:
+ response: Response from Neptune Analytics vector operation.
+ context (str): Context description for logging (e.g., "Vector store - Insert").
+ """
for success_message in response:
if "success" not in success_message:
logger.error(f"Query execution status is absent on action: [{context}]")
@@ -288,4 +464,4 @@
if success_message["success"] is not True:
logger.error(f"Abnormal response status on action: [{context}] with message: [{success_message['success']}] ")
- break+ break
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/neptune_analytics.py |
Expand my code with proper documentation strings | import logging
import time
from typing import Dict, Optional
from pydantic import BaseModel
from mem0.vector_stores.base import VectorStoreBase
try:
import pymochow
from pymochow.auth.bce_credentials import BceCredentials
from pymochow.configuration import Configuration
from pymochow.exception import ServerError
from pymochow.model.enum import (
FieldType,
IndexType,
MetricType,
ServerErrCode,
TableState,
)
from pymochow.model.schema import (
AutoBuildRowCountIncrement,
Field,
FilteringIndex,
HNSWParams,
Schema,
VectorIndex,
)
from pymochow.model.table import (
FloatVector,
Partition,
Row,
VectorSearchConfig,
VectorTopkSearchRequest,
)
except ImportError:
raise ImportError("The 'pymochow' library is required. Please install it using 'pip install pymochow'.")
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # distance
payload: Optional[Dict] # metadata
class BaiduDB(VectorStoreBase):
def __init__(
self,
endpoint: str,
account: str,
api_key: str,
database_name: str,
table_name: str,
embedding_model_dims: int,
metric_type: MetricType,
) -> None:
self.endpoint = endpoint
self.account = account
self.api_key = api_key
self.database_name = database_name
self.table_name = table_name
self.embedding_model_dims = embedding_model_dims
self.metric_type = metric_type
# Initialize Mochow client
config = Configuration(credentials=BceCredentials(account, api_key), endpoint=endpoint)
self.client = pymochow.MochowClient(config)
# Ensure database and table exist
self._create_database_if_not_exists()
self.create_col(
name=self.table_name,
vector_size=self.embedding_model_dims,
distance=self.metric_type,
)
def _create_database_if_not_exists(self):
try:
# Check if database exists
databases = self.client.list_databases()
db_exists = any(db.database_name == self.database_name for db in databases)
if not db_exists:
self._database = self.client.create_database(self.database_name)
logger.info(f"Created database: {self.database_name}")
else:
self._database = self.client.database(self.database_name)
logger.info(f"Database {self.database_name} already exists")
except Exception as e:
logger.error(f"Error creating database: {e}")
raise
def create_col(self, name, vector_size, distance):
# Check if table already exists
try:
tables = self._database.list_table()
table_exists = any(table.table_name == name for table in tables)
if table_exists:
logger.info(f"Table {name} already exists. Skipping creation.")
self._table = self._database.describe_table(name)
return
# Convert distance string to MetricType enum
metric_type = None
for k, v in MetricType.__members__.items():
if k == distance:
metric_type = v
if metric_type is None:
raise ValueError(f"Unsupported metric_type: {distance}")
# Define table schema
fields = [
Field(
"id", FieldType.STRING, primary_key=True, partition_key=True, auto_increment=False, not_null=True
),
Field("vector", FieldType.FLOAT_VECTOR, dimension=vector_size),
Field("metadata", FieldType.JSON),
]
# Create vector index
indexes = [
VectorIndex(
index_name="vector_idx",
index_type=IndexType.HNSW,
field="vector",
metric_type=metric_type,
params=HNSWParams(m=16, efconstruction=200),
auto_build=True,
auto_build_index_policy=AutoBuildRowCountIncrement(row_count_increment=10000),
),
FilteringIndex(index_name="metadata_filtering_idx", fields=["metadata"]),
]
schema = Schema(fields=fields, indexes=indexes)
# Create table
self._table = self._database.create_table(
table_name=name, replication=3, partition=Partition(partition_num=1), schema=schema
)
logger.info(f"Created table: {name}")
# Wait for table to be ready
while True:
time.sleep(2)
table = self._database.describe_table(name)
if table.state == TableState.NORMAL:
logger.info(f"Table {name} is ready.")
break
logger.info(f"Waiting for table {name} to be ready, current state: {table.state}")
self._table = table
except Exception as e:
logger.error(f"Error creating table: {e}")
raise
def insert(self, vectors, payloads=None, ids=None):
# Prepare data for insertion
for idx, vector, metadata in zip(ids, vectors, payloads):
row = Row(id=idx, vector=vector, metadata=metadata)
self._table.upsert(rows=[row])
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> list:
# Add filters if provided
search_filter = None
if filters:
search_filter = self._create_filter(filters)
# Create AnnSearch for vector search
request = VectorTopkSearchRequest(
vector_field="vector",
vector=FloatVector(vectors),
limit=limit,
filter=search_filter,
config=VectorSearchConfig(ef=200),
)
# Perform search
projections = ["id", "metadata"]
res = self._table.vector_search(request=request, projections=projections)
# Parse results
output = []
for row in res.rows:
row_data = row.get("row", {})
output_data = OutputData(
id=row_data.get("id"), score=row.get("score", 0.0), payload=row_data.get("metadata", {})
)
output.append(output_data)
return output
def delete(self, vector_id):
self._table.delete(primary_key={"id": vector_id})
def update(self, vector_id=None, vector=None, payload=None):
row = Row(id=vector_id, vector=vector, metadata=payload)
self._table.upsert(rows=[row])
def get(self, vector_id):
projections = ["id", "metadata"]
result = self._table.query(primary_key={"id": vector_id}, projections=projections)
row = result.row
return OutputData(id=row.get("id"), score=None, payload=row.get("metadata", {}))
def list_cols(self):
tables = self._database.list_table()
return [table.table_name for table in tables]
def delete_col(self):
try:
tables = self._database.list_table()
# skip drop table if table not exists
table_exists = any(table.table_name == self.table_name for table in tables)
if not table_exists:
logger.info(f"Table {self.table_name} does not exist, skipping deletion")
return
# Delete the table
self._database.drop_table(self.table_name)
logger.info(f"Initiated deletion of table {self.table_name}")
# Wait for table to be completely deleted
while True:
time.sleep(2)
try:
self._database.describe_table(self.table_name)
logger.info(f"Waiting for table {self.table_name} to be deleted...")
except ServerError as e:
if e.code == ServerErrCode.TABLE_NOT_EXIST:
logger.info(f"Table {self.table_name} has been completely deleted")
break
logger.error(f"Error checking table status: {e}")
raise
except Exception as e:
logger.error(f"Error deleting table: {e}")
raise
def col_info(self):
return self._table.stats()
def list(self, filters: dict = None, limit: int = 100) -> list:
projections = ["id", "metadata"]
list_filter = self._create_filter(filters) if filters else None
result = self._table.select(filter=list_filter, projections=projections, limit=limit)
memories = []
for row in result.rows:
obj = OutputData(id=row.get("id"), score=None, payload=row.get("metadata", {}))
memories.append(obj)
return [memories]
def reset(self):
logger.warning(f"Resetting table {self.table_name}...")
try:
self.delete_col()
self.create_col(
name=self.table_name,
vector_size=self.embedding_model_dims,
distance=self.metric_type,
)
except Exception as e:
logger.warning(f"Error resetting table: {e}")
raise
def _create_filter(self, filters: dict) -> str:
conditions = []
for key, value in filters.items():
if isinstance(value, str):
conditions.append(f'metadata["{key}"] = "{value}"')
else:
conditions.append(f'metadata["{key}"] = {value}')
return " AND ".join(conditions) | --- +++ @@ -56,6 +56,17 @@ embedding_model_dims: int,
metric_type: MetricType,
) -> None:
+ """Initialize the BaiduDB database.
+
+ Args:
+ endpoint (str): Endpoint URL for Baidu VectorDB.
+ account (str): Account for Baidu VectorDB.
+ api_key (str): API Key for Baidu VectorDB.
+ database_name (str): Name of the database.
+ table_name (str): Name of the table.
+ embedding_model_dims (int): Dimensions of the embedding model.
+ metric_type (MetricType): Metric type for similarity search.
+ """
self.endpoint = endpoint
self.account = account
self.api_key = api_key
@@ -77,6 +88,7 @@ )
def _create_database_if_not_exists(self):
+ """Create database if it doesn't exist."""
try:
# Check if database exists
databases = self.client.list_databases()
@@ -92,6 +104,13 @@ raise
def create_col(self, name, vector_size, distance):
+ """Create a new table.
+
+ Args:
+ name (str): Name of the table to create.
+ vector_size (int): Dimension of the vector.
+ distance (str): Metric type for similarity search.
+ """
# Check if table already exists
try:
tables = self._database.list_table()
@@ -154,12 +173,31 @@ raise
def insert(self, vectors, payloads=None, ids=None):
+ """Insert vectors into the table.
+
+ Args:
+ vectors (List[List[float]]): List of vectors to insert.
+ payloads (List[Dict], optional): List of payloads corresponding to vectors.
+ ids (List[str], optional): List of IDs corresponding to vectors.
+ """
# Prepare data for insertion
for idx, vector, metadata in zip(ids, vectors, payloads):
row = Row(id=idx, vector=vector, metadata=metadata)
self._table.upsert(rows=[row])
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> list:
+ """
+ Search for similar vectors.
+
+ Args:
+ query (str): Query string.
+ vectors (List[float]): Query vector.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Dict, optional): Filters to apply to the search. Defaults to None.
+
+ Returns:
+ list: Search results.
+ """
# Add filters if provided
search_filter = None
if filters:
@@ -190,23 +228,53 @@ return output
def delete(self, vector_id):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
self._table.delete(primary_key={"id": vector_id})
def update(self, vector_id=None, vector=None, payload=None):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (List[float], optional): Updated vector.
+ payload (Dict, optional): Updated payload.
+ """
row = Row(id=vector_id, vector=vector, metadata=payload)
self._table.upsert(rows=[row])
def get(self, vector_id):
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ OutputData: Retrieved vector.
+ """
projections = ["id", "metadata"]
result = self._table.query(primary_key={"id": vector_id}, projections=projections)
row = result.row
return OutputData(id=row.get("id"), score=None, payload=row.get("metadata", {}))
def list_cols(self):
+ """
+ List all tables (collections).
+
+ Returns:
+ List[str]: List of table names.
+ """
tables = self._database.list_table()
return [table.table_name for table in tables]
def delete_col(self):
+ """Delete the table."""
try:
tables = self._database.list_table()
@@ -237,9 +305,25 @@ raise
def col_info(self):
+ """
+ Get information about the table.
+
+ Returns:
+ Dict[str, Any]: Table information.
+ """
return self._table.stats()
def list(self, filters: dict = None, limit: int = 100) -> list:
+ """
+ List all vectors in the table.
+
+ Args:
+ filters (Dict, optional): Filters to apply to the list.
+ limit (int, optional): Number of vectors to return. Defaults to 100.
+
+ Returns:
+ List[OutputData]: List of vectors.
+ """
projections = ["id", "metadata"]
list_filter = self._create_filter(filters) if filters else None
result = self._table.select(filter=list_filter, projections=projections, limit=limit)
@@ -252,6 +336,7 @@ return [memories]
def reset(self):
+ """Reset the table by deleting and recreating it."""
logger.warning(f"Resetting table {self.table_name}...")
try:
self.delete_col()
@@ -265,10 +350,19 @@ raise
def _create_filter(self, filters: dict) -> str:
+ """
+ Create filter expression for queries.
+
+ Args:
+ filters (dict): Filter conditions.
+
+ Returns:
+ str: Filter expression.
+ """
conditions = []
for key, value in filters.items():
if isinstance(value, str):
conditions.append(f'metadata["{key}"] = "{value}"')
else:
conditions.append(f'metadata["{key}"] = {value}')
- return " AND ".join(conditions)+ return " AND ".join(conditions)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/baidu.py |
Write Python docstrings for this snippet | import json
import logging
from datetime import datetime
from typing import Dict
import numpy as np
import pytz
import valkey
from pydantic import BaseModel
from valkey.exceptions import ResponseError
from mem0.memory.utils import extract_json
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
# Default fields for the Valkey index
DEFAULT_FIELDS = [
{"name": "memory_id", "type": "tag"},
{"name": "hash", "type": "tag"},
{"name": "agent_id", "type": "tag"},
{"name": "run_id", "type": "tag"},
{"name": "user_id", "type": "tag"},
{"name": "memory", "type": "tag"}, # Using TAG instead of TEXT for Valkey compatibility
{"name": "metadata", "type": "tag"}, # Using TAG instead of TEXT for Valkey compatibility
{"name": "created_at", "type": "numeric"},
{"name": "updated_at", "type": "numeric"},
{
"name": "embedding",
"type": "vector",
"attrs": {"distance_metric": "cosine", "algorithm": "flat", "datatype": "float32"},
},
]
excluded_keys = {"user_id", "agent_id", "run_id", "hash", "data", "created_at", "updated_at"}
class OutputData(BaseModel):
id: str
score: float
payload: Dict
class ValkeyDB(VectorStoreBase):
def __init__(
self,
valkey_url: str,
collection_name: str,
embedding_model_dims: int,
timezone: str = "UTC",
index_type: str = "hnsw",
hnsw_m: int = 16,
hnsw_ef_construction: int = 200,
hnsw_ef_runtime: int = 10,
):
self.embedding_model_dims = embedding_model_dims
self.collection_name = collection_name
self.prefix = f"mem0:{collection_name}"
self.timezone = timezone
self.index_type = index_type.lower()
self.hnsw_m = hnsw_m
self.hnsw_ef_construction = hnsw_ef_construction
self.hnsw_ef_runtime = hnsw_ef_runtime
# Validate index type
if self.index_type not in ["hnsw", "flat"]:
raise ValueError(f"Invalid index_type: {index_type}. Must be 'hnsw' or 'flat'")
# Connect to Valkey
try:
self.client = valkey.from_url(valkey_url)
logger.debug(f"Successfully connected to Valkey at {valkey_url}")
except Exception as e:
logger.exception(f"Failed to connect to Valkey at {valkey_url}: {e}")
raise
# Create the index schema
self._create_index(embedding_model_dims)
def _build_index_schema(self, collection_name, embedding_dims, distance_metric, prefix):
# Build the vector field configuration based on index type
if self.index_type == "hnsw":
vector_config = [
"embedding",
"VECTOR",
"HNSW",
"12", # Attribute count: TYPE, FLOAT32, DIM, dims, DISTANCE_METRIC, metric, M, m, EF_CONSTRUCTION, ef_construction, EF_RUNTIME, ef_runtime
"TYPE",
"FLOAT32",
"DIM",
str(embedding_dims),
"DISTANCE_METRIC",
distance_metric,
"M",
str(self.hnsw_m),
"EF_CONSTRUCTION",
str(self.hnsw_ef_construction),
"EF_RUNTIME",
str(self.hnsw_ef_runtime),
]
elif self.index_type == "flat":
vector_config = [
"embedding",
"VECTOR",
"FLAT",
"6", # Attribute count: TYPE, FLOAT32, DIM, dims, DISTANCE_METRIC, metric
"TYPE",
"FLOAT32",
"DIM",
str(embedding_dims),
"DISTANCE_METRIC",
distance_metric,
]
else:
# This should never happen due to constructor validation, but be defensive
raise ValueError(f"Unsupported index_type: {self.index_type}. Must be 'hnsw' or 'flat'")
# Build the complete command (comma is default separator for TAG fields)
cmd = [
"FT.CREATE",
collection_name,
"ON",
"HASH",
"PREFIX",
"1",
prefix,
"SCHEMA",
"memory_id",
"TAG",
"hash",
"TAG",
"agent_id",
"TAG",
"run_id",
"TAG",
"user_id",
"TAG",
"memory",
"TAG",
"metadata",
"TAG",
"created_at",
"NUMERIC",
"updated_at",
"NUMERIC",
] + vector_config
return cmd
def _create_index(self, embedding_model_dims):
# Check if the search module is available
try:
# Try to execute a search command
self.client.execute_command("FT._LIST")
except ResponseError as e:
if "unknown command" in str(e).lower():
raise ValueError(
"Valkey search module is not available. Please ensure Valkey is running with the search module enabled. "
"The search module can be loaded using the --loadmodule option with the valkey-search library. "
"For installation and setup instructions, refer to the Valkey Search documentation."
)
else:
logger.exception(f"Error checking search module: {e}")
raise
# Check if the index already exists
try:
self.client.ft(self.collection_name).info()
return
except ResponseError as e:
if "not found" not in str(e).lower():
logger.exception(f"Error checking index existence: {e}")
raise
# Build and execute the index creation command
cmd = self._build_index_schema(
self.collection_name,
embedding_model_dims,
"COSINE", # Fixed distance metric for initialization
self.prefix,
)
try:
self.client.execute_command(*cmd)
logger.info(f"Successfully created {self.index_type.upper()} index {self.collection_name}")
except Exception as e:
logger.exception(f"Error creating index {self.collection_name}: {e}")
raise
def create_col(self, name=None, vector_size=None, distance=None):
# Use provided parameters or fall back to instance attributes
collection_name = name or self.collection_name
embedding_dims = vector_size or self.embedding_model_dims
distance_metric = distance or "COSINE"
prefix = f"mem0:{collection_name}"
# Try to drop the index if it exists (cleanup before creation)
self._drop_index(collection_name, log_level="silent")
# Build and execute the index creation command
cmd = self._build_index_schema(
collection_name,
embedding_dims,
distance_metric, # Configurable distance metric
prefix,
)
try:
self.client.execute_command(*cmd)
logger.info(f"Successfully created {self.index_type.upper()} index {collection_name}")
# Update instance attributes if creating a new collection
if name:
self.collection_name = collection_name
self.prefix = prefix
return self.client.ft(collection_name)
except Exception as e:
logger.exception(f"Error creating collection {collection_name}: {e}")
raise
def insert(self, vectors: list, payloads: list = None, ids: list = None):
for vector, payload, id in zip(vectors, payloads, ids):
try:
# Create the key for the hash
key = f"{self.prefix}:{id}"
# Check for required fields and provide defaults if missing
if "data" not in payload:
# Silently use default value for missing 'data' field
pass
# Ensure created_at is present
if "created_at" not in payload:
payload["created_at"] = datetime.now(pytz.timezone(self.timezone)).isoformat()
# Prepare the hash data
hash_data = {
"memory_id": id,
"hash": payload.get("hash", f"hash_{id}"), # Use a default hash if not provided
"memory": payload.get("data", f"data_{id}"), # Use a default data if not provided
"created_at": int(datetime.fromisoformat(payload["created_at"]).timestamp()),
"embedding": np.array(vector, dtype=np.float32).tobytes(),
}
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in payload:
hash_data[field] = payload[field]
# Add metadata
hash_data["metadata"] = json.dumps({k: v for k, v in payload.items() if k not in excluded_keys})
# Store in Valkey
self.client.hset(key, mapping=hash_data)
logger.debug(f"Successfully inserted vector with ID {id}")
except KeyError as e:
logger.error(f"Error inserting vector with ID {id}: Missing required field {e}")
except Exception as e:
logger.exception(f"Error inserting vector with ID {id}: {e}")
raise
def _build_search_query(self, knn_part, filters=None):
# No filters, just use the KNN search
if not filters or not any(value is not None for key, value in filters.items()):
return f"*=>{knn_part}"
# Build filter expression
filter_parts = []
for key, value in filters.items():
if value is not None:
# Use the correct filter syntax for Valkey
filter_parts.append(f"@{key}:{{{value}}}")
# No valid filter parts
if not filter_parts:
return f"*=>{knn_part}"
# Combine filter parts with proper syntax
filter_expr = " ".join(filter_parts)
return f"{filter_expr} =>{knn_part}"
def _execute_search(self, query, params):
try:
return self.client.ft(self.collection_name).search(query, query_params=params)
except ResponseError as e:
logger.error(f"Search failed with query '{query}': {e}")
raise
def _process_search_results(self, results):
memory_results = []
for doc in results.docs:
# Extract the score
score = float(doc.vector_score) if hasattr(doc, "vector_score") else None
# Create the payload
payload = {
"hash": doc.hash,
"data": doc.memory,
"created_at": self._format_timestamp(int(doc.created_at), self.timezone),
}
# Add updated_at if available
if hasattr(doc, "updated_at"):
payload["updated_at"] = self._format_timestamp(int(doc.updated_at), self.timezone)
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if hasattr(doc, field):
payload[field] = getattr(doc, field)
# Add metadata
if hasattr(doc, "metadata"):
try:
metadata = json.loads(extract_json(doc.metadata))
payload.update(metadata)
except (json.JSONDecodeError, TypeError) as e:
logger.warning(f"Failed to parse metadata: {e}")
# Create the result
memory_results.append(OutputData(id=doc.memory_id, score=score, payload=payload))
return memory_results
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None, ef_runtime: int = None):
# Convert the vector to bytes
vector_bytes = np.array(vectors, dtype=np.float32).tobytes()
# Build the KNN part with optional EF_RUNTIME for HNSW
if self.index_type == "hnsw" and ef_runtime is not None:
knn_part = f"[KNN {limit} @embedding $vec_param EF_RUNTIME {ef_runtime} AS vector_score]"
else:
# For FLAT indexes or when ef_runtime is None, use basic KNN
knn_part = f"[KNN {limit} @embedding $vec_param AS vector_score]"
# Build the complete query
q = self._build_search_query(knn_part, filters)
# Log the query for debugging (only in debug mode)
logger.debug(f"Valkey search query: {q}")
# Set up the query parameters
params = {"vec_param": vector_bytes}
# Execute the search
results = self._execute_search(q, params)
# Process the results
return self._process_search_results(results)
def delete(self, vector_id):
try:
key = f"{self.prefix}:{vector_id}"
self.client.delete(key)
logger.debug(f"Successfully deleted vector with ID {vector_id}")
except Exception as e:
logger.exception(f"Error deleting vector with ID {vector_id}: {e}")
raise
def update(self, vector_id=None, vector=None, payload=None):
try:
key = f"{self.prefix}:{vector_id}"
# Check for required fields and provide defaults if missing
if "data" not in payload:
# Silently use default value for missing 'data' field
pass
# Ensure created_at is present
if "created_at" not in payload:
payload["created_at"] = datetime.now(pytz.timezone(self.timezone)).isoformat()
# Prepare the hash data
hash_data = {
"memory_id": vector_id,
"hash": payload.get("hash", f"hash_{vector_id}"), # Use a default hash if not provided
"memory": payload.get("data", f"data_{vector_id}"), # Use a default data if not provided
"created_at": int(datetime.fromisoformat(payload["created_at"]).timestamp()),
"embedding": np.array(vector, dtype=np.float32).tobytes(),
}
# Add updated_at if available
if "updated_at" in payload:
hash_data["updated_at"] = int(datetime.fromisoformat(payload["updated_at"]).timestamp())
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in payload:
hash_data[field] = payload[field]
# Add metadata
hash_data["metadata"] = json.dumps({k: v for k, v in payload.items() if k not in excluded_keys})
# Update in Valkey
self.client.hset(key, mapping=hash_data)
logger.debug(f"Successfully updated vector with ID {vector_id}")
except KeyError as e:
logger.error(f"Error updating vector with ID {vector_id}: Missing required field {e}")
except Exception as e:
logger.exception(f"Error updating vector with ID {vector_id}: {e}")
raise
def _format_timestamp(self, timestamp, timezone=None):
# Use UTC as default timezone if not specified
tz = pytz.timezone(timezone or "UTC")
return datetime.fromtimestamp(timestamp, tz=tz).isoformat(timespec="microseconds")
def _process_document_fields(self, result, vector_id):
# Create the payload with error handling
payload = {}
# Convert bytes to string for text fields
for k in result:
if k not in ["embedding"]:
if isinstance(result[k], bytes):
try:
result[k] = result[k].decode("utf-8")
except UnicodeDecodeError:
# If decoding fails, keep the bytes
pass
# Add required fields with error handling
for field in ["hash", "memory", "created_at"]:
if field in result:
if field == "created_at":
try:
payload[field] = self._format_timestamp(int(result[field]), self.timezone)
except (ValueError, TypeError):
payload[field] = result[field]
else:
payload[field] = result[field]
else:
# Use default values for missing fields
if field == "hash":
payload[field] = "unknown"
elif field == "memory":
payload[field] = "unknown"
elif field == "created_at":
payload[field] = self._format_timestamp(
int(datetime.now(tz=pytz.timezone(self.timezone)).timestamp()), self.timezone
)
# Rename memory to data for consistency
if "memory" in payload:
payload["data"] = payload.pop("memory")
# Add updated_at if available
if "updated_at" in result:
try:
payload["updated_at"] = self._format_timestamp(int(result["updated_at"]), self.timezone)
except (ValueError, TypeError):
payload["updated_at"] = result["updated_at"]
# Add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in result:
payload[field] = result[field]
# Add metadata
if "metadata" in result:
try:
metadata = json.loads(extract_json(result["metadata"]))
payload.update(metadata)
except (json.JSONDecodeError, TypeError):
logger.warning(f"Failed to parse metadata: {result.get('metadata')}")
# Use memory_id from result if available, otherwise use vector_id
memory_id = result.get("memory_id", vector_id)
return payload, memory_id
def _convert_bytes(self, data):
if isinstance(data, bytes):
try:
return data.decode("utf-8")
except UnicodeDecodeError:
return data
if isinstance(data, dict):
return {self._convert_bytes(key): self._convert_bytes(value) for key, value in data.items()}
if isinstance(data, list):
return [self._convert_bytes(item) for item in data]
if isinstance(data, tuple):
return tuple(self._convert_bytes(item) for item in data)
return data
def get(self, vector_id):
try:
key = f"{self.prefix}:{vector_id}"
result = self.client.hgetall(key)
if not result:
raise KeyError(f"Vector with ID {vector_id} not found")
# Convert bytes keys/values to strings
result = self._convert_bytes(result)
logger.debug(f"Retrieved result keys: {result.keys()}")
# Process the document fields
payload, memory_id = self._process_document_fields(result, vector_id)
return OutputData(id=memory_id, payload=payload, score=0.0)
except KeyError:
raise
except Exception as e:
logger.exception(f"Error getting vector with ID {vector_id}: {e}")
raise
def list_cols(self):
try:
# Use the FT._LIST command to list all indices
return self.client.execute_command("FT._LIST")
except Exception as e:
logger.exception(f"Error listing collections: {e}")
raise
def _drop_index(self, collection_name, log_level="error"):
try:
self.client.execute_command("FT.DROPINDEX", collection_name)
logger.info(f"Successfully deleted index {collection_name}")
return True
except ResponseError as e:
if "Unknown index name" in str(e):
# Index doesn't exist - handle based on context
if log_level == "silent":
pass # No logging in situations where this is expected such as initial index creation
elif log_level == "info":
logger.info(f"Index {collection_name} doesn't exist, skipping deletion")
return False
else:
# Real error - always log and raise
logger.error(f"Error deleting index {collection_name}: {e}")
raise
except Exception as e:
# Non-ResponseError exceptions - always log and raise
logger.error(f"Error deleting index {collection_name}: {e}")
raise
def delete_col(self):
return self._drop_index(self.collection_name, log_level="info")
def col_info(self, name=None):
try:
collection_name = name or self.collection_name
return self.client.ft(collection_name).info()
except Exception as e:
logger.exception(f"Error getting collection info for {collection_name}: {e}")
raise
def reset(self):
try:
collection_name = self.collection_name
logger.warning(f"Resetting index {collection_name}...")
# Delete the index
self.delete_col()
# Recreate the index
self._create_index(self.embedding_model_dims)
return True
except Exception as e:
logger.exception(f"Error resetting index {self.collection_name}: {e}")
raise
def _build_list_query(self, filters=None):
# Default query
q = "*"
# Add filters if provided
if filters and any(value is not None for key, value in filters.items()):
filter_conditions = []
for key, value in filters.items():
if value is not None:
filter_conditions.append(f"@{key}:{{{value}}}")
if filter_conditions:
q = " ".join(filter_conditions)
return q
def list(self, filters: dict = None, limit: int = None) -> list:
try:
# Since Valkey search requires vector format, use a dummy vector search
# that returns all documents by using a zero vector and large K
dummy_vector = [0.0] * self.embedding_model_dims
search_limit = limit if limit is not None else 1000 # Large default
# Use the existing search method which handles filters properly
search_results = self.search("", dummy_vector, limit=search_limit, filters=filters)
# Convert search results to list format (match Redis format)
class MemoryResult:
def __init__(self, id: str, payload: dict, score: float = None):
self.id = id
self.payload = payload
self.score = score
memory_results = []
for result in search_results:
# Create payload in the expected format
payload = {
"hash": result.payload.get("hash", ""),
"data": result.payload.get("data", ""),
"created_at": result.payload.get("created_at"),
"updated_at": result.payload.get("updated_at"),
}
# Add metadata (exclude system fields)
for key, value in result.payload.items():
if key not in ["data", "hash", "created_at", "updated_at"]:
payload[key] = value
# Create MemoryResult object (matching Redis format)
memory_results.append(MemoryResult(id=result.id, payload=payload))
# Return nested list format like Redis
return [memory_results]
except Exception as e:
logger.exception(f"Error in list method: {e}")
return [[]] # Return empty result on error | --- +++ @@ -53,6 +53,19 @@ hnsw_ef_construction: int = 200,
hnsw_ef_runtime: int = 10,
):
+ """
+ Initialize the Valkey vector store.
+
+ Args:
+ valkey_url (str): Valkey URL.
+ collection_name (str): Collection name.
+ embedding_model_dims (int): Embedding model dimensions.
+ timezone (str, optional): Timezone for timestamps. Defaults to "UTC".
+ index_type (str, optional): Index type ('hnsw' or 'flat'). Defaults to "hnsw".
+ hnsw_m (int, optional): HNSW M parameter (connections per node). Defaults to 16.
+ hnsw_ef_construction (int, optional): HNSW ef_construction parameter. Defaults to 200.
+ hnsw_ef_runtime (int, optional): HNSW ef_runtime parameter. Defaults to 10.
+ """
self.embedding_model_dims = embedding_model_dims
self.collection_name = collection_name
self.prefix = f"mem0:{collection_name}"
@@ -78,6 +91,18 @@ self._create_index(embedding_model_dims)
def _build_index_schema(self, collection_name, embedding_dims, distance_metric, prefix):
+ """
+ Build the FT.CREATE command for index creation.
+
+ Args:
+ collection_name (str): Name of the collection/index
+ embedding_dims (int): Vector embedding dimensions
+ distance_metric (str): Distance metric (e.g., "COSINE", "L2", "IP")
+ prefix (str): Key prefix for the index
+
+ Returns:
+ list: Complete FT.CREATE command as list of arguments
+ """
# Build the vector field configuration based on index type
if self.index_type == "hnsw":
vector_config = [
@@ -148,6 +173,16 @@ return cmd
def _create_index(self, embedding_model_dims):
+ """
+ Create the search index with the specified schema.
+
+ Args:
+ embedding_model_dims (int): Dimensions for the vector embeddings.
+
+ Raises:
+ ValueError: If the search module is not available.
+ Exception: For other errors during index creation.
+ """
# Check if the search module is available
try:
# Try to execute a search command
@@ -188,6 +223,17 @@ raise
def create_col(self, name=None, vector_size=None, distance=None):
+ """
+ Create a new collection (index) in Valkey.
+
+ Args:
+ name (str, optional): Name for the collection. Defaults to None, which uses the current collection_name.
+ vector_size (int, optional): Size of the vector embeddings. Defaults to None, which uses the current embedding_model_dims.
+ distance (str, optional): Distance metric to use. Defaults to None, which uses 'cosine'.
+
+ Returns:
+ The created index object.
+ """
# Use provided parameters or fall back to instance attributes
collection_name = name or self.collection_name
embedding_dims = vector_size or self.embedding_model_dims
@@ -220,6 +266,14 @@ raise
def insert(self, vectors: list, payloads: list = None, ids: list = None):
+ """
+ Insert vectors and their payloads into the index.
+
+ Args:
+ vectors (list): List of vectors to insert.
+ payloads (list, optional): List of payloads corresponding to the vectors.
+ ids (list, optional): List of IDs for the vectors.
+ """
for vector, payload, id in zip(vectors, payloads, ids):
try:
# Create the key for the hash
@@ -261,6 +315,21 @@ raise
def _build_search_query(self, knn_part, filters=None):
+ """
+ Build a search query string with filters.
+
+ Args:
+ knn_part (str): The KNN part of the query.
+ filters (dict, optional): Filters to apply to the search. Each key-value pair
+ becomes a tag filter (@key:{value}). None values are ignored.
+ Values are used as-is (no validation) - wildcards, lists, etc. are
+ passed through literally to Valkey search. Multiple filters are
+ combined with AND logic (space-separated).
+
+ Returns:
+ str: The complete search query string in format "filter_expr =>[KNN...]"
+ or "*=>[KNN...]" if no valid filters.
+ """
# No filters, just use the KNN search
if not filters or not any(value is not None for key, value in filters.items()):
return f"*=>{knn_part}"
@@ -281,6 +350,16 @@ return f"{filter_expr} =>{knn_part}"
def _execute_search(self, query, params):
+ """
+ Execute a search query.
+
+ Args:
+ query (str): The search query to execute.
+ params (dict): The query parameters.
+
+ Returns:
+ The search results.
+ """
try:
return self.client.ft(self.collection_name).search(query, query_params=params)
except ResponseError as e:
@@ -288,6 +367,15 @@ raise
def _process_search_results(self, results):
+ """
+ Process search results into OutputData objects.
+
+ Args:
+ results: The search results from Valkey.
+
+ Returns:
+ list: List of OutputData objects.
+ """
memory_results = []
for doc in results.docs:
# Extract the score
@@ -323,6 +411,19 @@ return memory_results
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None, ef_runtime: int = None):
+ """
+ Search for similar vectors in the index.
+
+ Args:
+ query (str): The search query.
+ vectors (list): The vector to search for.
+ limit (int, optional): Maximum number of results to return. Defaults to 5.
+ filters (dict, optional): Filters to apply to the search. Defaults to None.
+ ef_runtime (int, optional): HNSW ef_runtime parameter for this query. Only used with HNSW index. Defaults to None.
+
+ Returns:
+ list: List of OutputData objects.
+ """
# Convert the vector to bytes
vector_bytes = np.array(vectors, dtype=np.float32).tobytes()
@@ -349,6 +450,12 @@ return self._process_search_results(results)
def delete(self, vector_id):
+ """
+ Delete a vector from the index.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
try:
key = f"{self.prefix}:{vector_id}"
self.client.delete(key)
@@ -358,6 +465,14 @@ raise
def update(self, vector_id=None, vector=None, payload=None):
+ """
+ Update a vector in the index.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (list, optional): New vector data.
+ payload (dict, optional): New payload data.
+ """
try:
key = f"{self.prefix}:{vector_id}"
@@ -401,11 +516,32 @@ raise
def _format_timestamp(self, timestamp, timezone=None):
+ """
+ Format a timestamp with the specified timezone.
+
+ Args:
+ timestamp (int): The timestamp to format.
+ timezone (str, optional): The timezone to use. Defaults to UTC.
+
+ Returns:
+ str: The formatted timestamp.
+ """
# Use UTC as default timezone if not specified
tz = pytz.timezone(timezone or "UTC")
return datetime.fromtimestamp(timestamp, tz=tz).isoformat(timespec="microseconds")
def _process_document_fields(self, result, vector_id):
+ """
+ Process document fields from a Valkey hash result.
+
+ Args:
+ result (dict): The hash result from Valkey.
+ vector_id (str): The vector ID.
+
+ Returns:
+ dict: The processed payload.
+ str: The memory ID.
+ """
# Create the payload with error handling
payload = {}
@@ -470,6 +606,7 @@ return payload, memory_id
def _convert_bytes(self, data):
+ """Convert bytes data back to string"""
if isinstance(data, bytes):
try:
return data.decode("utf-8")
@@ -484,6 +621,15 @@ return data
def get(self, vector_id):
+ """
+ Get a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to get.
+
+ Returns:
+ OutputData: The retrieved vector.
+ """
try:
key = f"{self.prefix}:{vector_id}"
result = self.client.hgetall(key)
@@ -507,6 +653,12 @@ raise
def list_cols(self):
+ """
+ List all collections (indices) in Valkey.
+
+ Returns:
+ list: List of collection names.
+ """
try:
# Use the FT._LIST command to list all indices
return self.client.execute_command("FT._LIST")
@@ -515,6 +667,13 @@ raise
def _drop_index(self, collection_name, log_level="error"):
+ """
+ Drop an index by name using the documented FT.DROPINDEX command.
+
+ Args:
+ collection_name (str): Name of the index to drop.
+ log_level (str): Logging level for missing index ("silent", "info", "error").
+ """
try:
self.client.execute_command("FT.DROPINDEX", collection_name)
logger.info(f"Successfully deleted index {collection_name}")
@@ -537,9 +696,21 @@ raise
def delete_col(self):
+ """
+ Delete the current collection (index).
+ """
return self._drop_index(self.collection_name, log_level="info")
def col_info(self, name=None):
+ """
+ Get information about a collection (index).
+
+ Args:
+ name (str, optional): Name of the collection. Defaults to None, which uses the current collection_name.
+
+ Returns:
+ dict: Information about the collection.
+ """
try:
collection_name = name or self.collection_name
return self.client.ft(collection_name).info()
@@ -548,6 +719,9 @@ raise
def reset(self):
+ """
+ Reset the index by deleting and recreating it.
+ """
try:
collection_name = self.collection_name
logger.warning(f"Resetting index {collection_name}...")
@@ -564,6 +738,18 @@ raise
def _build_list_query(self, filters=None):
+ """
+ Build a query for listing vectors.
+
+ Args:
+ filters (dict, optional): Filters to apply to the list. Each key-value pair
+ becomes a tag filter (@key:{value}). None values are ignored.
+ Values are used as-is (no validation) - wildcards, lists, etc. are
+ passed through literally to Valkey search.
+
+ Returns:
+ str: The query string. Returns "*" if no valid filters provided.
+ """
# Default query
q = "*"
@@ -580,6 +766,22 @@ return q
def list(self, filters: dict = None, limit: int = None) -> list:
+ """
+ List all recent created memories from the vector store.
+
+ Args:
+ filters (dict, optional): Filters to apply to the list. Each key-value pair
+ becomes a tag filter (@key:{value}). None values are ignored.
+ Values are used as-is without validation - wildcards, special characters,
+ lists, etc. are passed through literally to Valkey search.
+ Multiple filters are combined with AND logic.
+ limit (int, optional): Maximum number of results to return. Defaults to 1000
+ if not specified.
+
+ Returns:
+ list: Nested list format [[MemoryResult(), ...]] matching Redis implementation.
+ Each MemoryResult contains id and payload with hash, data, timestamps, etc.
+ """
try:
# Since Valkey search requires vector format, use a dummy vector search
# that returns all documents by using a zero vector and large K
@@ -619,4 +821,4 @@
except Exception as e:
logger.exception(f"Error in list method: {e}")
- return [[]] # Return empty result on error+ return [[]] # Return empty result on error
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/valkey.py |
Generate docstrings with parameter types | import json
import logging
from contextlib import contextmanager
from typing import Any, List, Optional
from pydantic import BaseModel
# Try to import psycopg (psycopg3) first, then fall back to psycopg2
try:
from psycopg.types.json import Json
from psycopg_pool import ConnectionPool
PSYCOPG_VERSION = 3
logger = logging.getLogger(__name__)
logger.info("Using psycopg (psycopg3) with ConnectionPool for PostgreSQL connections")
except ImportError:
try:
from psycopg2.extras import Json, execute_values
from psycopg2.pool import ThreadedConnectionPool as ConnectionPool
PSYCOPG_VERSION = 2
logger = logging.getLogger(__name__)
logger.info("Using psycopg2 with ThreadedConnectionPool for PostgreSQL connections")
except ImportError:
raise ImportError(
"Neither 'psycopg' nor 'psycopg2' library is available. "
"Please install one of them using 'pip install psycopg[pool]' or 'pip install psycopg2'"
)
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[dict]
class PGVector(VectorStoreBase):
def __init__(
self,
dbname,
collection_name,
embedding_model_dims,
user,
password,
host,
port,
diskann,
hnsw,
minconn=1,
maxconn=5,
sslmode=None,
connection_string=None,
connection_pool=None,
):
self.collection_name = collection_name
self.use_diskann = diskann
self.use_hnsw = hnsw
self.embedding_model_dims = embedding_model_dims
self.connection_pool = None
# Connection setup with priority: connection_pool > connection_string > individual parameters
if connection_pool is not None:
# Use provided connection pool
self.connection_pool = connection_pool
elif connection_string:
if sslmode:
# Append sslmode to connection string if provided
if 'sslmode=' in connection_string:
# Replace existing sslmode
import re
connection_string = re.sub(r'sslmode=[^ ]*', f'sslmode={sslmode}', connection_string)
else:
# Add sslmode to connection string
connection_string = f"{connection_string} sslmode={sslmode}"
else:
connection_string = f"postgresql://{user}:{password}@{host}:{port}/{dbname}"
if sslmode:
connection_string = f"{connection_string} sslmode={sslmode}"
if self.connection_pool is None:
if PSYCOPG_VERSION == 3:
# psycopg3 ConnectionPool
self.connection_pool = ConnectionPool(conninfo=connection_string, min_size=minconn, max_size=maxconn, open=True)
else:
# psycopg2 ThreadedConnectionPool
self.connection_pool = ConnectionPool(minconn=minconn, maxconn=maxconn, dsn=connection_string)
collections = self.list_cols()
if collection_name not in collections:
self.create_col()
@contextmanager
def _get_cursor(self, commit: bool = False):
if PSYCOPG_VERSION == 3:
# psycopg3 auto-manages commit/rollback and pool return
with self.connection_pool.connection() as conn:
with conn.cursor() as cur:
try:
yield cur
if commit:
conn.commit()
except Exception:
conn.rollback()
logger.error("Error in cursor context (psycopg3)", exc_info=True)
raise
else:
# psycopg2 manual getconn/putconn
conn = self.connection_pool.getconn()
cur = conn.cursor()
try:
yield cur
if commit:
conn.commit()
except Exception as exc:
conn.rollback()
logger.error(f"Error occurred: {exc}")
raise exc
finally:
cur.close()
self.connection_pool.putconn(conn)
def create_col(self) -> None:
with self._get_cursor(commit=True) as cur:
cur.execute("CREATE EXTENSION IF NOT EXISTS vector")
cur.execute(
f"""
CREATE TABLE IF NOT EXISTS {self.collection_name} (
id UUID PRIMARY KEY,
vector vector({self.embedding_model_dims}),
payload JSONB
);
"""
)
if self.use_diskann and self.embedding_model_dims < 2000:
cur.execute("SELECT * FROM pg_extension WHERE extname = 'vectorscale'")
if cur.fetchone():
# Create DiskANN index if extension is installed for faster search
cur.execute(
f"""
CREATE INDEX IF NOT EXISTS {self.collection_name}_diskann_idx
ON {self.collection_name}
USING diskann (vector);
"""
)
elif self.use_hnsw:
cur.execute(
f"""
CREATE INDEX IF NOT EXISTS {self.collection_name}_hnsw_idx
ON {self.collection_name}
USING hnsw (vector vector_cosine_ops)
"""
)
def insert(self, vectors: list[list[float]], payloads=None, ids=None) -> None:
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
json_payloads = [json.dumps(payload) for payload in payloads]
data = [(id, vector, payload) for id, vector, payload in zip(ids, vectors, json_payloads)]
if PSYCOPG_VERSION == 3:
with self._get_cursor(commit=True) as cur:
cur.executemany(
f"INSERT INTO {self.collection_name} (id, vector, payload) VALUES (%s, %s, %s)",
data,
)
else:
with self._get_cursor(commit=True) as cur:
execute_values(
cur,
f"INSERT INTO {self.collection_name} (id, vector, payload) VALUES %s",
data,
)
def search(
self,
query: str,
vectors: list[float],
limit: Optional[int] = 5,
filters: Optional[dict] = None,
) -> List[OutputData]:
filter_conditions = []
filter_params = []
if filters:
for k, v in filters.items():
filter_conditions.append("payload->>%s = %s")
filter_params.extend([k, str(v)])
filter_clause = "WHERE " + " AND ".join(filter_conditions) if filter_conditions else ""
with self._get_cursor() as cur:
cur.execute(
f"""
SELECT id, vector <=> %s::vector AS distance, payload
FROM {self.collection_name}
{filter_clause}
ORDER BY distance
LIMIT %s
""",
(vectors, *filter_params, limit),
)
results = cur.fetchall()
return [OutputData(id=str(r[0]), score=float(r[1]), payload=r[2]) for r in results]
def delete(self, vector_id: str) -> None:
with self._get_cursor(commit=True) as cur:
cur.execute(f"DELETE FROM {self.collection_name} WHERE id = %s", (vector_id,))
def update(
self,
vector_id: str,
vector: Optional[list[float]] = None,
payload: Optional[dict] = None,
) -> None:
with self._get_cursor(commit=True) as cur:
if vector:
cur.execute(
f"UPDATE {self.collection_name} SET vector = %s WHERE id = %s",
(vector, vector_id),
)
if payload:
# Handle JSON serialization based on psycopg version
if PSYCOPG_VERSION == 3:
# psycopg3 uses psycopg.types.json.Json
cur.execute(
f"UPDATE {self.collection_name} SET payload = %s WHERE id = %s",
(Json(payload), vector_id),
)
else:
# psycopg2 uses psycopg2.extras.Json
cur.execute(
f"UPDATE {self.collection_name} SET payload = %s WHERE id = %s",
(Json(payload), vector_id),
)
def get(self, vector_id: str) -> OutputData:
with self._get_cursor() as cur:
cur.execute(
f"SELECT id, vector, payload FROM {self.collection_name} WHERE id = %s",
(vector_id,),
)
result = cur.fetchone()
if not result:
return None
return OutputData(id=str(result[0]), score=None, payload=result[2])
def list_cols(self) -> List[str]:
with self._get_cursor() as cur:
cur.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'")
return [row[0] for row in cur.fetchall()]
def delete_col(self) -> None:
with self._get_cursor(commit=True) as cur:
cur.execute(f"DROP TABLE IF EXISTS {self.collection_name}")
def col_info(self) -> dict[str, Any]:
with self._get_cursor() as cur:
cur.execute(
f"""
SELECT
table_name,
(SELECT COUNT(*) FROM {self.collection_name}) as row_count,
(SELECT pg_size_pretty(pg_total_relation_size('{self.collection_name}'))) as total_size
FROM information_schema.tables
WHERE table_schema = 'public' AND table_name = %s
""",
(self.collection_name,),
)
result = cur.fetchone()
return {"name": result[0], "count": result[1], "size": result[2]}
def list(
self,
filters: Optional[dict] = None,
limit: Optional[int] = 100
) -> List[OutputData]:
filter_conditions = []
filter_params = []
if filters:
for k, v in filters.items():
filter_conditions.append("payload->>%s = %s")
filter_params.extend([k, str(v)])
filter_clause = "WHERE " + " AND ".join(filter_conditions) if filter_conditions else ""
query = f"""
SELECT id, vector, payload
FROM {self.collection_name}
{filter_clause}
LIMIT %s
"""
with self._get_cursor() as cur:
cur.execute(query, (*filter_params, limit))
results = cur.fetchall()
return [[OutputData(id=str(r[0]), score=None, payload=r[2]) for r in results]]
def __del__(self) -> None:
try:
# Close pool appropriately
if PSYCOPG_VERSION == 3:
self.connection_pool.close()
else:
self.connection_pool.closeall()
except Exception:
pass
def reset(self) -> None:
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.create_col() | --- +++ @@ -54,6 +54,25 @@ connection_string=None,
connection_pool=None,
):
+ """
+ Initialize the PGVector database.
+
+ Args:
+ dbname (str): Database name
+ collection_name (str): Collection name
+ embedding_model_dims (int): Dimension of the embedding vector
+ user (str): Database user
+ password (str): Database password
+ host (str, optional): Database host
+ port (int, optional): Database port
+ diskann (bool, optional): Use DiskANN for faster search
+ hnsw (bool, optional): Use HNSW for faster search
+ minconn (int): Minimum number of connections to keep in the connection pool
+ maxconn (int): Maximum number of connections allowed in the connection pool
+ sslmode (str, optional): SSL mode for PostgreSQL connection (e.g., 'require', 'prefer', 'disable')
+ connection_string (str, optional): PostgreSQL connection string (overrides individual connection parameters)
+ connection_pool (Any, optional): psycopg2 connection pool object (overrides connection string and individual parameters)
+ """
self.collection_name = collection_name
self.use_diskann = diskann
self.use_hnsw = hnsw
@@ -93,6 +112,10 @@
@contextmanager
def _get_cursor(self, commit: bool = False):
+ """
+ Unified context manager to get a cursor from the appropriate pool.
+ Auto-commits or rolls back based on exception, and returns the connection to the pool.
+ """
if PSYCOPG_VERSION == 3:
# psycopg3 auto-manages commit/rollback and pool return
with self.connection_pool.connection() as conn:
@@ -122,6 +145,10 @@ self.connection_pool.putconn(conn)
def create_col(self) -> None:
+ """
+ Create a new collection (table in PostgreSQL).
+ Will also initialize vector search index if specified.
+ """
with self._get_cursor(commit=True) as cur:
cur.execute("CREATE EXTENSION IF NOT EXISTS vector")
cur.execute(
@@ -179,6 +206,18 @@ limit: Optional[int] = 5,
filters: Optional[dict] = None,
) -> List[OutputData]:
+ """
+ Search for similar vectors.
+
+ Args:
+ query (str): Query.
+ vectors (List[float]): Query vector.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Dict, optional): Filters to apply to the search. Defaults to None.
+
+ Returns:
+ list: Search results.
+ """
filter_conditions = []
filter_params = []
@@ -205,6 +244,12 @@ return [OutputData(id=str(r[0]), score=float(r[1]), payload=r[2]) for r in results]
def delete(self, vector_id: str) -> None:
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
with self._get_cursor(commit=True) as cur:
cur.execute(f"DELETE FROM {self.collection_name} WHERE id = %s", (vector_id,))
@@ -214,6 +259,14 @@ vector: Optional[list[float]] = None,
payload: Optional[dict] = None,
) -> None:
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (List[float], optional): Updated vector.
+ payload (Dict, optional): Updated payload.
+ """
with self._get_cursor(commit=True) as cur:
if vector:
cur.execute(
@@ -237,6 +290,15 @@
def get(self, vector_id: str) -> OutputData:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ OutputData: Retrieved vector.
+ """
with self._get_cursor() as cur:
cur.execute(
f"SELECT id, vector, payload FROM {self.collection_name} WHERE id = %s",
@@ -248,15 +310,28 @@ return OutputData(id=str(result[0]), score=None, payload=result[2])
def list_cols(self) -> List[str]:
+ """
+ List all collections.
+
+ Returns:
+ List[str]: List of collection names.
+ """
with self._get_cursor() as cur:
cur.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'")
return [row[0] for row in cur.fetchall()]
def delete_col(self) -> None:
+ """Delete a collection."""
with self._get_cursor(commit=True) as cur:
cur.execute(f"DROP TABLE IF EXISTS {self.collection_name}")
def col_info(self) -> dict[str, Any]:
+ """
+ Get information about a collection.
+
+ Returns:
+ Dict[str, Any]: Collection information.
+ """
with self._get_cursor() as cur:
cur.execute(
f"""
@@ -277,6 +352,16 @@ filters: Optional[dict] = None,
limit: Optional[int] = 100
) -> List[OutputData]:
+ """
+ List all vectors in a collection.
+
+ Args:
+ filters (Dict, optional): Filters to apply to the list.
+ limit (int, optional): Number of vectors to return. Defaults to 100.
+
+ Returns:
+ List[OutputData]: List of vectors.
+ """
filter_conditions = []
filter_params = []
@@ -300,6 +385,9 @@ return [[OutputData(id=str(r[0]), score=None, payload=r[2]) for r in results]]
def __del__(self) -> None:
+ """
+ Close the database connection pool when the object is deleted.
+ """
try:
# Close pool appropriately
if PSYCOPG_VERSION == 3:
@@ -310,6 +398,7 @@ pass
def reset(self) -> None:
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
- self.create_col()+ self.create_col()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/pgvector.py |
Create structured documentation for my script | import logging
import os
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
try:
from pinecone import Pinecone, PodSpec, ServerlessSpec, Vector
except ImportError:
raise ImportError(
"Pinecone requires extra dependencies. Install with `pip install pinecone pinecone-text`"
) from None
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # distance
payload: Optional[Dict] # metadata
class PineconeDB(VectorStoreBase):
def __init__(
self,
collection_name: str,
embedding_model_dims: int,
client: Optional["Pinecone"],
api_key: Optional[str],
environment: Optional[str],
serverless_config: Optional[Dict[str, Any]],
pod_config: Optional[Dict[str, Any]],
hybrid_search: bool,
metric: str,
batch_size: int,
extra_params: Optional[Dict[str, Any]],
namespace: Optional[str] = None,
):
if client:
self.client = client
else:
api_key = api_key or os.environ.get("PINECONE_API_KEY")
if not api_key:
raise ValueError(
"Pinecone API key must be provided either as a parameter or as an environment variable"
)
params = extra_params or {}
self.client = Pinecone(api_key=api_key, **params)
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.environment = environment
self.serverless_config = serverless_config
self.pod_config = pod_config
self.hybrid_search = hybrid_search
self.metric = metric
self.batch_size = batch_size
self.namespace = namespace
self.sparse_encoder = None
if self.hybrid_search:
try:
from pinecone_text.sparse import BM25Encoder
logger.info("Initializing BM25Encoder for sparse vectors...")
self.sparse_encoder = BM25Encoder.default()
except ImportError:
logger.warning("pinecone-text not installed. Hybrid search will be disabled.")
self.hybrid_search = False
self.create_col(embedding_model_dims, metric)
def create_col(self, vector_size: int, metric: str = "cosine"):
existing_indexes = self.list_cols().names()
if self.collection_name in existing_indexes:
logger.debug(f"Index {self.collection_name} already exists. Skipping creation.")
self.index = self.client.Index(self.collection_name)
return
if self.serverless_config:
spec = ServerlessSpec(**self.serverless_config)
elif self.pod_config:
spec = PodSpec(**self.pod_config)
else:
spec = ServerlessSpec(cloud="aws", region="us-west-2")
self.client.create_index(
name=self.collection_name,
dimension=vector_size,
metric=metric,
spec=spec,
)
self.index = self.client.Index(self.collection_name)
def insert(
self,
vectors: List[List[float]],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[Union[str, int]]] = None,
):
logger.info(f"Inserting {len(vectors)} vectors into index {self.collection_name}")
items = []
for idx, vector in enumerate(vectors):
item_id = str(ids[idx]) if ids is not None else str(idx)
payload = payloads[idx] if payloads else {}
vector_record = {"id": item_id, "values": vector, "metadata": payload}
if self.hybrid_search and self.sparse_encoder and "text" in payload:
sparse_vector = self.sparse_encoder.encode_documents(payload["text"])
vector_record["sparse_values"] = sparse_vector
items.append(vector_record)
if len(items) >= self.batch_size:
self.index.upsert(vectors=items, namespace=self.namespace)
items = []
if items:
self.index.upsert(vectors=items, namespace=self.namespace)
def _parse_output(self, data: Dict) -> List[OutputData]:
if isinstance(data, Vector):
result = OutputData(
id=data.id,
score=0.0,
payload=data.metadata,
)
return result
else:
result = []
for match in data:
entry = OutputData(
id=match.get("id"),
score=match.get("score"),
payload=match.get("metadata"),
)
result.append(entry)
return result
def _create_filter(self, filters: Optional[Dict]) -> Dict:
if not filters:
return {}
pinecone_filter = {}
for key, value in filters.items():
if isinstance(value, dict) and "gte" in value and "lte" in value:
pinecone_filter[key] = {"$gte": value["gte"], "$lte": value["lte"]}
else:
pinecone_filter[key] = {"$eq": value}
return pinecone_filter
def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
filter_dict = self._create_filter(filters) if filters else None
query_params = {
"vector": vectors,
"top_k": limit,
"include_metadata": True,
"include_values": False,
}
if filter_dict:
query_params["filter"] = filter_dict
if self.hybrid_search and self.sparse_encoder and "text" in filters:
query_text = filters.get("text")
if query_text:
sparse_vector = self.sparse_encoder.encode_queries(query_text)
query_params["sparse_vector"] = sparse_vector
response = self.index.query(**query_params, namespace=self.namespace)
results = self._parse_output(response.matches)
return results
def delete(self, vector_id: Union[str, int]):
self.index.delete(ids=[str(vector_id)], namespace=self.namespace)
def update(self, vector_id: Union[str, int], vector: Optional[List[float]] = None, payload: Optional[Dict] = None):
item = {
"id": str(vector_id),
}
if vector is not None:
item["values"] = vector
if payload is not None:
item["metadata"] = payload
if self.hybrid_search and self.sparse_encoder and "text" in payload:
sparse_vector = self.sparse_encoder.encode_documents(payload["text"])
item["sparse_values"] = sparse_vector
self.index.upsert(vectors=[item], namespace=self.namespace)
def get(self, vector_id: Union[str, int]) -> OutputData:
try:
response = self.index.fetch(ids=[str(vector_id)], namespace=self.namespace)
if str(vector_id) in response.vectors:
return self._parse_output(response.vectors[str(vector_id)])
return None
except Exception as e:
logger.error(f"Error retrieving vector {vector_id}: {e}")
return None
def list_cols(self):
return self.client.list_indexes()
def delete_col(self):
try:
self.client.delete_index(self.collection_name)
logger.info(f"Index {self.collection_name} deleted successfully")
except Exception as e:
logger.error(f"Error deleting index {self.collection_name}: {e}")
def col_info(self) -> Dict:
return self.client.describe_index(self.collection_name)
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
filter_dict = self._create_filter(filters) if filters else None
stats = self.index.describe_index_stats()
dimension = stats.dimension
zero_vector = [0.0] * dimension
query_params = {
"vector": zero_vector,
"top_k": limit,
"include_metadata": True,
"include_values": True,
}
if filter_dict:
query_params["filter"] = filter_dict
try:
response = self.index.query(**query_params, namespace=self.namespace)
response = response.to_dict()
results = self._parse_output(response["matches"])
return [results]
except Exception as e:
logger.error(f"Error listing vectors: {e}")
return {"points": [], "next_page_token": None}
def count(self) -> int:
stats = self.index.describe_index_stats()
if self.namespace:
# Safely get the namespace stats and return vector_count, defaulting to 0 if not found
namespace_summary = (stats.namespaces or {}).get(self.namespace)
if namespace_summary:
return namespace_summary.vector_count or 0
return 0
return stats.total_vector_count or 0
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.create_col(self.embedding_model_dims, self.metric) | --- +++ @@ -38,6 +38,23 @@ extra_params: Optional[Dict[str, Any]],
namespace: Optional[str] = None,
):
+ """
+ Initialize the Pinecone vector store.
+
+ Args:
+ collection_name (str): Name of the index/collection.
+ embedding_model_dims (int): Dimensions of the embedding model.
+ client (Pinecone, optional): Existing Pinecone client instance. Defaults to None.
+ api_key (str, optional): API key for Pinecone. Defaults to None.
+ environment (str, optional): Pinecone environment. Defaults to None.
+ serverless_config (Dict, optional): Configuration for serverless deployment. Defaults to None.
+ pod_config (Dict, optional): Configuration for pod-based deployment. Defaults to None.
+ hybrid_search (bool, optional): Whether to enable hybrid search. Defaults to False.
+ metric (str, optional): Distance metric for vector similarity. Defaults to "cosine".
+ batch_size (int, optional): Batch size for operations. Defaults to 100.
+ extra_params (Dict, optional): Additional parameters for Pinecone client. Defaults to None.
+ namespace (str, optional): Namespace for the collection. Defaults to None.
+ """
if client:
self.client = client
else:
@@ -74,6 +91,13 @@ self.create_col(embedding_model_dims, metric)
def create_col(self, vector_size: int, metric: str = "cosine"):
+ """
+ Create a new index/collection.
+
+ Args:
+ vector_size (int): Size of the vectors to be stored.
+ metric (str, optional): Distance metric for vector similarity. Defaults to "cosine".
+ """
existing_indexes = self.list_cols().names()
if self.collection_name in existing_indexes:
@@ -103,6 +127,14 @@ payloads: Optional[List[Dict]] = None,
ids: Optional[List[Union[str, int]]] = None,
):
+ """
+ Insert vectors into an index.
+
+ Args:
+ vectors (list): List of vectors to insert.
+ payloads (list, optional): List of payloads corresponding to vectors. Defaults to None.
+ ids (list, optional): List of IDs corresponding to vectors. Defaults to None.
+ """
logger.info(f"Inserting {len(vectors)} vectors into index {self.collection_name}")
items = []
@@ -126,6 +158,15 @@ self.index.upsert(vectors=items, namespace=self.namespace)
def _parse_output(self, data: Dict) -> List[OutputData]:
+ """
+ Parse the output data from Pinecone search results.
+
+ Args:
+ data (Dict): Output data from Pinecone query.
+
+ Returns:
+ List[OutputData]: Parsed output data.
+ """
if isinstance(data, Vector):
result = OutputData(
id=data.id,
@@ -146,6 +187,9 @@ return result
def _create_filter(self, filters: Optional[Dict]) -> Dict:
+ """
+ Create a filter dictionary from the provided filters.
+ """
if not filters:
return {}
@@ -162,6 +206,18 @@ def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
+ """
+ Search for similar vectors.
+
+ Args:
+ query (str): Query.
+ vectors (list): List of vectors to search.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (dict, optional): Filters to apply to the search. Defaults to None.
+
+ Returns:
+ list: Search results.
+ """
filter_dict = self._create_filter(filters) if filters else None
query_params = {
@@ -186,9 +242,23 @@ return results
def delete(self, vector_id: Union[str, int]):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (Union[str, int]): ID of the vector to delete.
+ """
self.index.delete(ids=[str(vector_id)], namespace=self.namespace)
def update(self, vector_id: Union[str, int], vector: Optional[List[float]] = None, payload: Optional[Dict] = None):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (Union[str, int]): ID of the vector to update.
+ vector (list, optional): Updated vector. Defaults to None.
+ payload (dict, optional): Updated payload. Defaults to None.
+ """
item = {
"id": str(vector_id),
}
@@ -206,6 +276,15 @@ self.index.upsert(vectors=[item], namespace=self.namespace)
def get(self, vector_id: Union[str, int]) -> OutputData:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (Union[str, int]): ID of the vector to retrieve.
+
+ Returns:
+ dict: Retrieved vector or None if not found.
+ """
try:
response = self.index.fetch(ids=[str(vector_id)], namespace=self.namespace)
if str(vector_id) in response.vectors:
@@ -216,9 +295,16 @@ return None
def list_cols(self):
+ """
+ List all indexes/collections.
+
+ Returns:
+ list: List of index information.
+ """
return self.client.list_indexes()
def delete_col(self):
+ """Delete an index/collection."""
try:
self.client.delete_index(self.collection_name)
logger.info(f"Index {self.collection_name} deleted successfully")
@@ -226,9 +312,25 @@ logger.error(f"Error deleting index {self.collection_name}: {e}")
def col_info(self) -> Dict:
+ """
+ Get information about an index/collection.
+
+ Returns:
+ dict: Index information.
+ """
return self.client.describe_index(self.collection_name)
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
+ """
+ List vectors in an index with optional filtering.
+
+ Args:
+ filters (dict, optional): Filters to apply to the list. Defaults to None.
+ limit (int, optional): Number of vectors to return. Defaults to 100.
+
+ Returns:
+ dict: List of vectors with their metadata.
+ """
filter_dict = self._create_filter(filters) if filters else None
stats = self.index.describe_index_stats()
@@ -256,6 +358,12 @@ return {"points": [], "next_page_token": None}
def count(self) -> int:
+ """
+ Count number of vectors in the index.
+
+ Returns:
+ int: Total number of vectors.
+ """
stats = self.index.describe_index_stats()
if self.namespace:
# Safely get the namespace stats and return vector_count, defaulting to 0 if not found
@@ -266,6 +374,9 @@ return stats.total_vector_count or 0
def reset(self):
+ """
+ Reset the index by deleting and recreating it.
+ """
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
- self.create_col(self.embedding_model_dims, self.metric)+ self.create_col(self.embedding_model_dims, self.metric)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/pinecone.py |
Help me document legacy Python code | import logging
from importlib.metadata import version
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
try:
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
from pymongo.errors import PyMongoError
from pymongo.operations import SearchIndexModel
except ImportError:
raise ImportError("The 'pymongo' library is required. Please install it using 'pip install pymongo'.")
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
_DRIVER_METADATA = DriverInfo(name="Mem0", version=version("mem0ai"))
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[dict]
class MongoDB(VectorStoreBase):
VECTOR_TYPE = "knnVector"
SIMILARITY_METRIC = "cosine"
def __init__(self, db_name: str, collection_name: str, embedding_model_dims: int, mongo_uri: str):
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.db_name = db_name
self.client = MongoClient(mongo_uri, driver=_DRIVER_METADATA)
self.db = self.client[db_name]
self.collection = self.create_col()
def create_col(self):
try:
database = self.client[self.db_name]
collection_names = database.list_collection_names()
if self.collection_name not in collection_names:
logger.info(f"Collection '{self.collection_name}' does not exist. Creating it now.")
collection = database[self.collection_name]
# Insert and remove a placeholder document to create the collection
collection.insert_one({"_id": 0, "placeholder": True})
collection.delete_one({"_id": 0})
logger.info(f"Collection '{self.collection_name}' created successfully.")
else:
collection = database[self.collection_name]
self.index_name = f"{self.collection_name}_vector_index"
found_indexes = list(collection.list_search_indexes(name=self.index_name))
if found_indexes:
logger.info(f"Search index '{self.index_name}' already exists in collection '{self.collection_name}'.")
else:
search_index_model = SearchIndexModel(
name=self.index_name,
definition={
"mappings": {
"dynamic": False,
"fields": {
"embedding": {
"type": self.VECTOR_TYPE,
"dimensions": self.embedding_model_dims,
"similarity": self.SIMILARITY_METRIC,
}
},
}
},
)
collection.create_search_index(search_index_model)
logger.info(
f"Search index '{self.index_name}' created successfully for collection '{self.collection_name}'."
)
return collection
except PyMongoError as e:
logger.error(f"Error creating collection and search index: {e}")
return None
def insert(
self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None
) -> None:
logger.info(f"Inserting {len(vectors)} vectors into collection '{self.collection_name}'.")
data = []
for vector, payload, _id in zip(vectors, payloads or [{}] * len(vectors), ids or [None] * len(vectors)):
document = {"_id": _id, "embedding": vector, "payload": payload}
data.append(document)
try:
self.collection.insert_many(data)
logger.info(f"Inserted {len(data)} documents into '{self.collection_name}'.")
except PyMongoError as e:
logger.error(f"Error inserting data: {e}")
def search(self, query: str, vectors: List[float], limit=5, filters: Optional[Dict] = None) -> List[OutputData]:
found_indexes = list(self.collection.list_search_indexes(name=self.index_name))
if not found_indexes:
logger.error(f"Index '{self.index_name}' does not exist.")
return []
results = []
try:
collection = self.client[self.db_name][self.collection_name]
pipeline = [
{
"$vectorSearch": {
"index": self.index_name,
"limit": limit,
"numCandidates": limit,
"queryVector": vectors,
"path": "embedding",
}
},
{"$set": {"score": {"$meta": "vectorSearchScore"}}},
{"$project": {"embedding": 0}},
]
# Add filter stage if filters are provided
if filters:
filter_conditions = []
for key, value in filters.items():
filter_conditions.append({"payload." + key: value})
if filter_conditions:
# Add a $match stage after vector search to apply filters
pipeline.insert(1, {"$match": {"$and": filter_conditions}})
results = list(collection.aggregate(pipeline))
logger.info(f"Vector search completed. Found {len(results)} documents.")
except Exception as e:
logger.error(f"Error during vector search for query {query}: {e}")
return []
output = [OutputData(id=str(doc["_id"]), score=doc.get("score"), payload=doc.get("payload")) for doc in results]
return output
def delete(self, vector_id: str) -> None:
try:
result = self.collection.delete_one({"_id": vector_id})
if result.deleted_count > 0:
logger.info(f"Deleted document with ID '{vector_id}'.")
else:
logger.warning(f"No document found with ID '{vector_id}' to delete.")
except PyMongoError as e:
logger.error(f"Error deleting document: {e}")
def update(self, vector_id: str, vector: Optional[List[float]] = None, payload: Optional[Dict] = None) -> None:
update_fields = {}
if vector is not None:
update_fields["embedding"] = vector
if payload is not None:
update_fields["payload"] = payload
if update_fields:
try:
result = self.collection.update_one({"_id": vector_id}, {"$set": update_fields})
if result.matched_count > 0:
logger.info(f"Updated document with ID '{vector_id}'.")
else:
logger.warning(f"No document found with ID '{vector_id}' to update.")
except PyMongoError as e:
logger.error(f"Error updating document: {e}")
def get(self, vector_id: str) -> Optional[OutputData]:
try:
doc = self.collection.find_one({"_id": vector_id})
if doc:
logger.info(f"Retrieved document with ID '{vector_id}'.")
return OutputData(id=str(doc["_id"]), score=None, payload=doc.get("payload"))
else:
logger.warning(f"Document with ID '{vector_id}' not found.")
return None
except PyMongoError as e:
logger.error(f"Error retrieving document: {e}")
return None
def list_cols(self) -> List[str]:
try:
collections = self.db.list_collection_names()
logger.info(f"Listing collections in database '{self.db_name}': {collections}")
return collections
except PyMongoError as e:
logger.error(f"Error listing collections: {e}")
return []
def delete_col(self) -> None:
try:
self.collection.drop()
logger.info(f"Deleted collection '{self.collection_name}'.")
except PyMongoError as e:
logger.error(f"Error deleting collection: {e}")
def col_info(self) -> Dict[str, Any]:
try:
stats = self.db.command("collstats", self.collection_name)
info = {"name": self.collection_name, "count": stats.get("count"), "size": stats.get("size")}
logger.info(f"Collection info: {info}")
return info
except PyMongoError as e:
logger.error(f"Error getting collection info: {e}")
return {}
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
try:
query = {}
if filters:
# Apply filters to the payload field
filter_conditions = []
for key, value in filters.items():
filter_conditions.append({"payload." + key: value})
if filter_conditions:
query = {"$and": filter_conditions}
cursor = self.collection.find(query).limit(limit)
results = [OutputData(id=str(doc["_id"]), score=None, payload=doc.get("payload")) for doc in cursor]
logger.info(f"Retrieved {len(results)} documents from collection '{self.collection_name}'.")
return results
except PyMongoError as e:
logger.error(f"Error listing documents: {e}")
return []
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.collection = self.create_col(self.collection_name)
def __del__(self) -> None:
if hasattr(self, "client"):
self.client.close()
logger.info("MongoClient connection closed.") | --- +++ @@ -30,6 +30,15 @@ SIMILARITY_METRIC = "cosine"
def __init__(self, db_name: str, collection_name: str, embedding_model_dims: int, mongo_uri: str):
+ """
+ Initialize the MongoDB vector store with vector search capabilities.
+
+ Args:
+ db_name (str): Database name
+ collection_name (str): Collection name
+ embedding_model_dims (int): Dimension of the embedding vector
+ mongo_uri (str): MongoDB connection URI
+ """
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.db_name = db_name
@@ -39,6 +48,7 @@ self.collection = self.create_col()
def create_col(self):
+ """Create new collection with vector search index."""
try:
database = self.client[self.db_name]
collection_names = database.list_collection_names()
@@ -84,6 +94,14 @@ def insert(
self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None
) -> None:
+ """
+ Insert vectors into the collection.
+
+ Args:
+ vectors (List[List[float]]): List of vectors to insert.
+ payloads (List[Dict], optional): List of payloads corresponding to vectors.
+ ids (List[str], optional): List of IDs corresponding to vectors.
+ """
logger.info(f"Inserting {len(vectors)} vectors into collection '{self.collection_name}'.")
data = []
@@ -97,6 +115,18 @@ logger.error(f"Error inserting data: {e}")
def search(self, query: str, vectors: List[float], limit=5, filters: Optional[Dict] = None) -> List[OutputData]:
+ """
+ Search for similar vectors using the vector search index.
+
+ Args:
+ query (str): Query string
+ vectors (List[float]): Query vector.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Dict, optional): Filters to apply to the search.
+
+ Returns:
+ List[OutputData]: Search results.
+ """
found_indexes = list(self.collection.list_search_indexes(name=self.index_name))
if not found_indexes:
@@ -140,6 +170,12 @@ return output
def delete(self, vector_id: str) -> None:
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
try:
result = self.collection.delete_one({"_id": vector_id})
if result.deleted_count > 0:
@@ -150,6 +186,14 @@ logger.error(f"Error deleting document: {e}")
def update(self, vector_id: str, vector: Optional[List[float]] = None, payload: Optional[Dict] = None) -> None:
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (List[float], optional): Updated vector.
+ payload (Dict, optional): Updated payload.
+ """
update_fields = {}
if vector is not None:
update_fields["embedding"] = vector
@@ -167,6 +211,15 @@ logger.error(f"Error updating document: {e}")
def get(self, vector_id: str) -> Optional[OutputData]:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ Optional[OutputData]: Retrieved vector or None if not found.
+ """
try:
doc = self.collection.find_one({"_id": vector_id})
if doc:
@@ -180,6 +233,12 @@ return None
def list_cols(self) -> List[str]:
+ """
+ List all collections in the database.
+
+ Returns:
+ List[str]: List of collection names.
+ """
try:
collections = self.db.list_collection_names()
logger.info(f"Listing collections in database '{self.db_name}': {collections}")
@@ -189,6 +248,7 @@ return []
def delete_col(self) -> None:
+ """Delete the collection."""
try:
self.collection.drop()
logger.info(f"Deleted collection '{self.collection_name}'.")
@@ -196,6 +256,12 @@ logger.error(f"Error deleting collection: {e}")
def col_info(self) -> Dict[str, Any]:
+ """
+ Get information about the collection.
+
+ Returns:
+ Dict[str, Any]: Collection information.
+ """
try:
stats = self.db.command("collstats", self.collection_name)
info = {"name": self.collection_name, "count": stats.get("count"), "size": stats.get("size")}
@@ -206,6 +272,16 @@ return {}
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
+ """
+ List vectors in the collection.
+
+ Args:
+ filters (Dict, optional): Filters to apply to the list.
+ limit (int, optional): Number of vectors to return.
+
+ Returns:
+ List[OutputData]: List of vectors.
+ """
try:
query = {}
if filters:
@@ -225,11 +301,13 @@ return []
def reset(self):
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.collection = self.create_col(self.collection_name)
def __del__(self) -> None:
+ """Close the database connection when the object is deleted."""
if hasattr(self, "client"):
self.client.close()
- logger.info("MongoClient connection closed.")+ logger.info("MongoClient connection closed.")
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/mongodb.py |
Help me document legacy Python code | import logging
from typing import Dict, List, Optional
from pydantic import BaseModel
from mem0.vector_stores.base import VectorStoreBase
try:
from upstash_vector import Index
except ImportError:
raise ImportError("The 'upstash_vector' library is required. Please install it using 'pip install upstash_vector'.")
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # is None for `get` method
payload: Optional[Dict] # metadata
class UpstashVector(VectorStoreBase):
def __init__(
self,
collection_name: str,
url: Optional[str] = None,
token: Optional[str] = None,
client: Optional[Index] = None,
enable_embeddings: bool = False,
):
if client:
self.client = client
elif url and token:
self.client = Index(url, token)
else:
raise ValueError("Either a client or URL and token must be provided.")
self.collection_name = collection_name
self.enable_embeddings = enable_embeddings
def insert(
self,
vectors: List[list],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None,
):
logger.info(f"Inserting {len(vectors)} vectors into namespace {self.collection_name}")
if self.enable_embeddings:
if not payloads or any("data" not in m or m["data"] is None for m in payloads):
raise ValueError("When embeddings are enabled, all payloads must contain a 'data' field.")
processed_vectors = [
{
"id": ids[i] if ids else None,
"data": payloads[i]["data"],
"metadata": payloads[i],
}
for i, v in enumerate(vectors)
]
else:
processed_vectors = [
{
"id": ids[i] if ids else None,
"vector": vectors[i],
"metadata": payloads[i] if payloads else None,
}
for i, v in enumerate(vectors)
]
self.client.upsert(
vectors=processed_vectors,
namespace=self.collection_name,
)
def _stringify(self, x):
return f'"{x}"' if isinstance(x, str) else x
def search(
self,
query: str,
vectors: List[list],
limit: int = 5,
filters: Optional[Dict] = None,
) -> List[OutputData]:
filters_str = " AND ".join([f"{k} = {self._stringify(v)}" for k, v in filters.items()]) if filters else None
response = []
if self.enable_embeddings:
response = self.client.query(
data=query,
top_k=limit,
filter=filters_str or "",
include_metadata=True,
namespace=self.collection_name,
)
else:
queries = [
{
"vector": v,
"top_k": limit,
"filter": filters_str or "",
"include_metadata": True,
"namespace": self.collection_name,
}
for v in vectors
]
responses = self.client.query_many(queries=queries)
# flatten
response = [res for res_list in responses for res in res_list]
return [
OutputData(
id=res.id,
score=res.score,
payload=res.metadata,
)
for res in response
]
def delete(self, vector_id: int):
self.client.delete(
ids=[str(vector_id)],
namespace=self.collection_name,
)
def update(
self,
vector_id: int,
vector: Optional[list] = None,
payload: Optional[dict] = None,
):
self.client.update(
id=str(vector_id),
vector=vector,
data=payload.get("data") if payload else None,
metadata=payload,
namespace=self.collection_name,
)
def get(self, vector_id: int) -> Optional[OutputData]:
response = self.client.fetch(
ids=[str(vector_id)],
namespace=self.collection_name,
include_metadata=True,
)
if len(response) == 0:
return None
vector = response[0]
if not vector:
return None
return OutputData(id=vector.id, score=None, payload=vector.metadata)
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[List[OutputData]]:
filters_str = " AND ".join([f"{k} = {self._stringify(v)}" for k, v in filters.items()]) if filters else None
info = self.client.info()
ns_info = info.namespaces.get(self.collection_name)
if not ns_info or ns_info.vector_count == 0:
return [[]]
random_vector = [1.0] * self.client.info().dimension
results, query = self.client.resumable_query(
vector=random_vector,
filter=filters_str or "",
include_metadata=True,
namespace=self.collection_name,
top_k=100,
)
with query:
while True:
if len(results) >= limit:
break
res = query.fetch_next(100)
if not res:
break
results.extend(res)
parsed_result = [
OutputData(
id=res.id,
score=res.score,
payload=res.metadata,
)
for res in results
]
return [parsed_result]
def create_col(self, name, vector_size, distance):
pass
def list_cols(self) -> List[str]:
return self.client.list_namespaces()
def delete_col(self):
self.client.reset(namespace=self.collection_name)
pass
def col_info(self):
return self.client.info()
def reset(self):
self.delete_col() | --- +++ @@ -29,6 +29,15 @@ client: Optional[Index] = None,
enable_embeddings: bool = False,
):
+ """
+ Initialize the UpstashVector vector store.
+
+ Args:
+ url (str, optional): URL for Upstash Vector index. Defaults to None.
+ token (int, optional): Token for Upstash Vector index. Defaults to None.
+ client (Index, optional): Existing `upstash_vector.Index` client instance. Defaults to None.
+ namespace (str, optional): Default namespace for the index. Defaults to None.
+ """
if client:
self.client = client
elif url and token:
@@ -46,6 +55,14 @@ payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None,
):
+ """
+ Insert vectors
+
+ Args:
+ vectors (list): List of vectors to insert.
+ payloads (list, optional): List of payloads corresponding to vectors. These will be passed as metadatas to the Upstash Vector client. Defaults to None.
+ ids (list, optional): List of IDs corresponding to vectors. Defaults to None.
+ """
logger.info(f"Inserting {len(vectors)} vectors into namespace {self.collection_name}")
if self.enable_embeddings:
@@ -84,6 +101,17 @@ limit: int = 5,
filters: Optional[Dict] = None,
) -> List[OutputData]:
+ """
+ Search for similar vectors.
+
+ Args:
+ query (list): Query vector.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Dict, optional): Filters to apply to the search.
+
+ Returns:
+ List[OutputData]: Search results.
+ """
filters_str = " AND ".join([f"{k} = {self._stringify(v)}" for k, v in filters.items()]) if filters else None
@@ -122,6 +150,12 @@ ]
def delete(self, vector_id: int):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (int): ID of the vector to delete.
+ """
self.client.delete(
ids=[str(vector_id)],
namespace=self.collection_name,
@@ -133,6 +167,14 @@ vector: Optional[list] = None,
payload: Optional[dict] = None,
):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (int): ID of the vector to update.
+ vector (list, optional): Updated vector. Defaults to None.
+ payload (dict, optional): Updated payload. Defaults to None.
+ """
self.client.update(
id=str(vector_id),
vector=vector,
@@ -142,6 +184,15 @@ )
def get(self, vector_id: int) -> Optional[OutputData]:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (int): ID of the vector to retrieve.
+
+ Returns:
+ dict: Retrieved vector.
+ """
response = self.client.fetch(
ids=[str(vector_id)],
namespace=self.collection_name,
@@ -155,6 +206,14 @@ return OutputData(id=vector.id, score=None, payload=vector.metadata)
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[List[OutputData]]:
+ """
+ List all memories.
+ Args:
+ filters (Dict, optional): Filters to apply to the search. Defaults to None.
+ limit (int, optional): Number of results to return. Defaults to 100.
+ Returns:
+ List[OutputData]: Search results.
+ """
filters_str = " AND ".join([f"{k} = {self._stringify(v)}" for k, v in filters.items()]) if filters else None
info = self.client.info()
@@ -192,17 +251,43 @@ return [parsed_result]
def create_col(self, name, vector_size, distance):
+ """
+ Upstash Vector has namespaces instead of collections. A namespace is created when the first vector is inserted.
+
+ This method is a placeholder to maintain the interface.
+ """
pass
def list_cols(self) -> List[str]:
+ """
+ Lists all namespaces in the Upstash Vector index.
+ Returns:
+ List[str]: List of namespaces.
+ """
return self.client.list_namespaces()
def delete_col(self):
+ """
+ Delete the namespace and all vectors in it.
+ """
self.client.reset(namespace=self.collection_name)
pass
def col_info(self):
+ """
+ Return general information about the Upstash Vector index.
+
+ - Total number of vectors across all namespaces
+ - Total number of vectors waiting to be indexed across all namespaces
+ - Total size of the index on disk in bytes
+ - Vector dimension
+ - Similarity function used
+ - Per-namespace vector and pending vector counts
+ """
return self.client.info()
def reset(self):
- self.delete_col()+ """
+ Reset the Upstash Vector index.
+ """
+ self.delete_col()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/upstash_vector.py |
Generate docstrings with parameter types | import json
import logging
from datetime import datetime
from functools import reduce
import numpy as np
import pytz
import redis
from redis.commands.search.query import Query
from redisvl.index import SearchIndex
from redisvl.query import VectorQuery
from redisvl.query.filter import Tag
from mem0.memory.utils import extract_json
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
# TODO: Improve as these are not the best fields for the Redis's perspective. Might do away with them.
DEFAULT_FIELDS = [
{"name": "memory_id", "type": "tag"},
{"name": "hash", "type": "tag"},
{"name": "agent_id", "type": "tag"},
{"name": "run_id", "type": "tag"},
{"name": "user_id", "type": "tag"},
{"name": "memory", "type": "text"},
{"name": "metadata", "type": "text"},
# TODO: Although it is numeric but also accepts string
{"name": "created_at", "type": "numeric"},
{"name": "updated_at", "type": "numeric"},
{
"name": "embedding",
"type": "vector",
"attrs": {"distance_metric": "cosine", "algorithm": "flat", "datatype": "float32"},
},
]
excluded_keys = {"user_id", "agent_id", "run_id", "hash", "data", "created_at", "updated_at"}
class MemoryResult:
def __init__(self, id: str, payload: dict, score: float = None):
self.id = id
self.payload = payload
self.score = score
class RedisDB(VectorStoreBase):
def __init__(
self,
redis_url: str,
collection_name: str,
embedding_model_dims: int,
):
self.embedding_model_dims = embedding_model_dims
index_schema = {
"name": collection_name,
"prefix": f"mem0:{collection_name}",
}
fields = DEFAULT_FIELDS.copy()
fields[-1]["attrs"]["dims"] = embedding_model_dims
self.schema = {"index": index_schema, "fields": fields}
self.client = redis.Redis.from_url(redis_url)
self.index = SearchIndex.from_dict(self.schema)
self.index.set_client(self.client)
self.index.create(overwrite=True)
def create_col(self, name=None, vector_size=None, distance=None):
# Use provided parameters or fall back to instance attributes
collection_name = name or self.schema["index"]["name"]
embedding_dims = vector_size or self.embedding_model_dims
distance_metric = distance or "cosine"
# Create a new schema with the specified parameters
index_schema = {
"name": collection_name,
"prefix": f"mem0:{collection_name}",
}
# Copy the default fields and update the vector field with the specified dimensions
fields = DEFAULT_FIELDS.copy()
fields[-1]["attrs"]["dims"] = embedding_dims
fields[-1]["attrs"]["distance_metric"] = distance_metric
# Create the schema
schema = {"index": index_schema, "fields": fields}
# Create the index
index = SearchIndex.from_dict(schema)
index.set_client(self.client)
index.create(overwrite=True)
# Update instance attributes if creating a new collection
if name:
self.schema = schema
self.index = index
return index
def insert(self, vectors: list, payloads: list = None, ids: list = None):
data = []
for vector, payload, id in zip(vectors, payloads, ids):
# Start with required fields
entry = {
"memory_id": id,
"hash": payload["hash"],
"memory": payload["data"],
"created_at": int(datetime.fromisoformat(payload["created_at"]).timestamp()),
"embedding": np.array(vector, dtype=np.float32).tobytes(),
}
# Conditionally add optional fields
for field in ["agent_id", "run_id", "user_id"]:
if field in payload:
entry[field] = payload[field]
# Add metadata excluding specific keys
entry["metadata"] = json.dumps({k: v for k, v in payload.items() if k not in excluded_keys})
data.append(entry)
self.index.load(data, id_field="memory_id")
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None):
conditions = [Tag(key) == value for key, value in filters.items() if value is not None]
filter = reduce(lambda x, y: x & y, conditions)
v = VectorQuery(
vector=np.array(vectors, dtype=np.float32).tobytes(),
vector_field_name="embedding",
return_fields=["memory_id", "hash", "agent_id", "run_id", "user_id", "memory", "metadata", "created_at"],
filter_expression=filter,
num_results=limit,
)
results = self.index.query(v)
return [
MemoryResult(
id=result["memory_id"],
score=float(result["vector_distance"]),
payload={
"hash": result["hash"],
"data": result["memory"],
"created_at": datetime.fromtimestamp(
int(result["created_at"]), tz=pytz.timezone("US/Pacific")
).isoformat(timespec="microseconds"),
**(
{
"updated_at": datetime.fromtimestamp(
int(result["updated_at"]), tz=pytz.timezone("US/Pacific")
).isoformat(timespec="microseconds")
}
if "updated_at" in result
else {}
),
**{field: result[field] for field in ["agent_id", "run_id", "user_id"] if field in result},
**{k: v for k, v in json.loads(extract_json(result["metadata"])).items()},
},
)
for result in results
]
def delete(self, vector_id):
self.index.drop_keys(f"{self.schema['index']['prefix']}:{vector_id}")
def update(self, vector_id=None, vector=None, payload=None):
data = {
"memory_id": vector_id,
"hash": payload["hash"],
"memory": payload["data"],
"created_at": int(datetime.fromisoformat(payload["created_at"]).timestamp()),
"updated_at": int(datetime.fromisoformat(payload["updated_at"]).timestamp()),
"embedding": np.array(vector, dtype=np.float32).tobytes(),
}
for field in ["agent_id", "run_id", "user_id"]:
if field in payload:
data[field] = payload[field]
data["metadata"] = json.dumps({k: v for k, v in payload.items() if k not in excluded_keys})
self.index.load(data=[data], keys=[f"{self.schema['index']['prefix']}:{vector_id}"], id_field="memory_id")
def get(self, vector_id):
result = self.index.fetch(vector_id)
payload = {
"hash": result["hash"],
"data": result["memory"],
"created_at": datetime.fromtimestamp(int(result["created_at"]), tz=pytz.timezone("US/Pacific")).isoformat(
timespec="microseconds"
),
**(
{
"updated_at": datetime.fromtimestamp(
int(result["updated_at"]), tz=pytz.timezone("US/Pacific")
).isoformat(timespec="microseconds")
}
if "updated_at" in result
else {}
),
**{field: result[field] for field in ["agent_id", "run_id", "user_id"] if field in result},
**{k: v for k, v in json.loads(extract_json(result["metadata"])).items()},
}
return MemoryResult(id=result["memory_id"], payload=payload)
def list_cols(self):
return self.index.listall()
def delete_col(self):
self.index.delete()
def col_info(self, name):
return self.index.info()
def reset(self):
collection_name = self.schema["index"]["name"]
logger.warning(f"Resetting index {collection_name}...")
self.delete_col()
self.index = SearchIndex.from_dict(self.schema)
self.index.set_client(self.client)
self.index.create(overwrite=True)
# or use
# self.create_col(collection_name, self.embedding_model_dims)
# Recreate the index with the same parameters
self.create_col(collection_name, self.embedding_model_dims)
def list(self, filters: dict = None, limit: int = None) -> list:
conditions = [Tag(key) == value for key, value in filters.items() if value is not None]
filter = reduce(lambda x, y: x & y, conditions)
query = Query(str(filter)).sort_by("created_at", asc=False)
if limit is not None:
query = Query(str(filter)).sort_by("created_at", asc=False).paging(0, limit)
results = self.index.search(query)
return [
[
MemoryResult(
id=result["memory_id"],
payload={
"hash": result["hash"],
"data": result["memory"],
"created_at": datetime.fromtimestamp(
int(result["created_at"]), tz=pytz.timezone("US/Pacific")
).isoformat(timespec="microseconds"),
**(
{
"updated_at": datetime.fromtimestamp(
int(result["updated_at"]), tz=pytz.timezone("US/Pacific")
).isoformat(timespec="microseconds")
}
if result.__dict__.get("updated_at")
else {}
),
**{
field: result[field]
for field in ["agent_id", "run_id", "user_id"]
if field in result.__dict__
},
**{k: v for k, v in json.loads(extract_json(result["metadata"])).items()},
},
)
for result in results.docs
]
] | --- +++ @@ -52,6 +52,14 @@ collection_name: str,
embedding_model_dims: int,
):
+ """
+ Initialize the Redis vector store.
+
+ Args:
+ redis_url (str): Redis URL.
+ collection_name (str): Collection name.
+ embedding_model_dims (int): Embedding model dimensions.
+ """
self.embedding_model_dims = embedding_model_dims
index_schema = {
"name": collection_name,
@@ -69,6 +77,17 @@ self.index.create(overwrite=True)
def create_col(self, name=None, vector_size=None, distance=None):
+ """
+ Create a new collection (index) in Redis.
+
+ Args:
+ name (str, optional): Name for the collection. Defaults to None, which uses the current collection_name.
+ vector_size (int, optional): Size of the vector embeddings. Defaults to None, which uses the current embedding_model_dims.
+ distance (str, optional): Distance metric to use. Defaults to None, which uses 'cosine'.
+
+ Returns:
+ The created index object.
+ """
# Use provided parameters or fall back to instance attributes
collection_name = name or self.schema["index"]["name"]
embedding_dims = vector_size or self.embedding_model_dims
@@ -216,6 +235,9 @@ return self.index.info()
def reset(self):
+ """
+ Reset the index by deleting and recreating it.
+ """
collection_name = self.schema["index"]["name"]
logger.warning(f"Resetting index {collection_name}...")
self.delete_col()
@@ -231,6 +253,9 @@ self.create_col(collection_name, self.embedding_model_dims)
def list(self, filters: dict = None, limit: int = None) -> list:
+ """
+ List all recent created memories from the vector store.
+ """
conditions = [Tag(key) == value for key, value in filters.items() if value is not None]
filter = reduce(lambda x, y: x & y, conditions)
query = Query(str(filter)).sort_by("created_at", asc=False)
@@ -267,4 +292,4 @@ )
for result in results.docs
]
- ]+ ]
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/redis.py |
Document all public functions with docstrings | import logging
from typing import Dict, Optional
from pydantic import BaseModel
from mem0.configs.vector_stores.milvus import MetricType
from mem0.vector_stores.base import VectorStoreBase
try:
import pymilvus # noqa: F401
except ImportError:
raise ImportError("The 'pymilvus' library is required. Please install it using 'pip install pymilvus'.")
from pymilvus import CollectionSchema, DataType, FieldSchema, MilvusClient
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # distance
payload: Optional[Dict] # metadata
class MilvusDB(VectorStoreBase):
def __init__(
self,
url: str,
token: str,
collection_name: str,
embedding_model_dims: int,
metric_type: MetricType,
db_name: str,
) -> None:
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.metric_type = metric_type
self.client = MilvusClient(uri=url, token=token, db_name=db_name)
self.create_col(
collection_name=self.collection_name,
vector_size=self.embedding_model_dims,
metric_type=self.metric_type,
)
def create_col(
self,
collection_name: str,
vector_size: int,
metric_type: MetricType = MetricType.COSINE,
) -> None:
if self.client.has_collection(collection_name):
logger.info(f"Collection {collection_name} already exists. Skipping creation.")
else:
fields = [
FieldSchema(name="id", dtype=DataType.VARCHAR, is_primary=True, max_length=512),
FieldSchema(name="vectors", dtype=DataType.FLOAT_VECTOR, dim=vector_size),
FieldSchema(name="metadata", dtype=DataType.JSON),
]
schema = CollectionSchema(fields, enable_dynamic_field=True)
index = self.client.prepare_index_params(
field_name="vectors", metric_type=metric_type, index_type="AUTOINDEX", index_name="vector_index"
)
self.client.create_collection(collection_name=collection_name, schema=schema, index_params=index)
def insert(self, ids, vectors, payloads, **kwargs: Optional[dict[str, any]]):
# Batch insert all records at once for better performance and consistency
data = [
{"id": idx, "vectors": embedding, "metadata": metadata}
for idx, embedding, metadata in zip(ids, vectors, payloads)
]
self.client.insert(collection_name=self.collection_name, data=data, **kwargs)
def _create_filter(self, filters: dict):
operands = []
for key, value in filters.items():
if isinstance(value, str):
operands.append(f'(metadata["{key}"] == "{value}")')
else:
operands.append(f'(metadata["{key}"] == {value})')
return " and ".join(operands)
def _parse_output(self, data: list):
memory = []
for value in data:
uid, score, metadata = (
value.get("id"),
value.get("distance"),
value.get("entity", {}).get("metadata"),
)
memory_obj = OutputData(id=uid, score=score, payload=metadata)
memory.append(memory_obj)
return memory
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> list:
query_filter = self._create_filter(filters) if filters else None
hits = self.client.search(
collection_name=self.collection_name,
data=[vectors],
limit=limit,
filter=query_filter,
output_fields=["*"],
)
result = self._parse_output(data=hits[0])
return result
def delete(self, vector_id):
self.client.delete(collection_name=self.collection_name, ids=vector_id)
def update(self, vector_id=None, vector=None, payload=None):
schema = {"id": vector_id, "vectors": vector, "metadata": payload}
self.client.upsert(collection_name=self.collection_name, data=schema)
def get(self, vector_id):
result = self.client.get(collection_name=self.collection_name, ids=vector_id)
output = OutputData(
id=result[0].get("id", None),
score=None,
payload=result[0].get("metadata", None),
)
return output
def list_cols(self):
return self.client.list_collections()
def delete_col(self):
return self.client.drop_collection(collection_name=self.collection_name)
def col_info(self):
return self.client.get_collection_stats(collection_name=self.collection_name)
def list(self, filters: dict = None, limit: int = 100) -> list:
query_filter = self._create_filter(filters) if filters else None
result = self.client.query(collection_name=self.collection_name, filter=query_filter, limit=limit)
memories = []
for data in result:
obj = OutputData(id=data.get("id"), score=None, payload=data.get("metadata"))
memories.append(obj)
return [memories]
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.create_col(self.collection_name, self.embedding_model_dims, self.metric_type) | --- +++ @@ -32,6 +32,16 @@ metric_type: MetricType,
db_name: str,
) -> None:
+ """Initialize the MilvusDB database.
+
+ Args:
+ url (str): Full URL for Milvus/Zilliz server.
+ token (str): Token/api_key for Zilliz server / for local setup defaults to None.
+ collection_name (str): Name of the collection (defaults to mem0).
+ embedding_model_dims (int): Dimensions of the embedding model (defaults to 1536).
+ metric_type (MetricType): Metric type for similarity search (defaults to L2).
+ db_name (str): Name of the database (defaults to "").
+ """
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.metric_type = metric_type
@@ -48,6 +58,13 @@ vector_size: int,
metric_type: MetricType = MetricType.COSINE,
) -> None:
+ """Create a new collection with index_type AUTOINDEX.
+
+ Args:
+ collection_name (str): Name of the collection (defaults to mem0).
+ vector_size (int): Dimensions of the embedding model (defaults to 1536).
+ metric_type (MetricType, optional): etric type for similarity search. Defaults to MetricType.COSINE.
+ """
if self.client.has_collection(collection_name):
logger.info(f"Collection {collection_name} already exists. Skipping creation.")
@@ -66,6 +83,13 @@ self.client.create_collection(collection_name=collection_name, schema=schema, index_params=index)
def insert(self, ids, vectors, payloads, **kwargs: Optional[dict[str, any]]):
+ """Insert vectors into a collection.
+
+ Args:
+ vectors (List[List[float]]): List of vectors to insert.
+ payloads (List[Dict], optional): List of payloads corresponding to vectors.
+ ids (List[str], optional): List of IDs corresponding to vectors.
+ """
# Batch insert all records at once for better performance and consistency
data = [
{"id": idx, "vectors": embedding, "metadata": metadata}
@@ -74,6 +98,14 @@ self.client.insert(collection_name=self.collection_name, data=data, **kwargs)
def _create_filter(self, filters: dict):
+ """Prepare filters for efficient query.
+
+ Args:
+ filters (dict): filters [user_id, agent_id, run_id]
+
+ Returns:
+ str: formated filter.
+ """
operands = []
for key, value in filters.items():
if isinstance(value, str):
@@ -84,6 +116,15 @@ return " and ".join(operands)
def _parse_output(self, data: list):
+ """
+ Parse the output data.
+
+ Args:
+ data (Dict): Output data.
+
+ Returns:
+ List[OutputData]: Parsed output data.
+ """
memory = []
for value in data:
@@ -99,6 +140,18 @@ return memory
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> list:
+ """
+ Search for similar vectors.
+
+ Args:
+ query (str): Query.
+ vectors (List[float]): Query vector.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Dict, optional): Filters to apply to the search. Defaults to None.
+
+ Returns:
+ list: Search results.
+ """
query_filter = self._create_filter(filters) if filters else None
hits = self.client.search(
collection_name=self.collection_name,
@@ -111,13 +164,36 @@ return result
def delete(self, vector_id):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
self.client.delete(collection_name=self.collection_name, ids=vector_id)
def update(self, vector_id=None, vector=None, payload=None):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (List[float], optional): Updated vector.
+ payload (Dict, optional): Updated payload.
+ """
schema = {"id": vector_id, "vectors": vector, "metadata": payload}
self.client.upsert(collection_name=self.collection_name, data=schema)
def get(self, vector_id):
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ OutputData: Retrieved vector.
+ """
result = self.client.get(collection_name=self.collection_name, ids=vector_id)
output = OutputData(
id=result[0].get("id", None),
@@ -127,15 +203,38 @@ return output
def list_cols(self):
+ """
+ List all collections.
+
+ Returns:
+ List[str]: List of collection names.
+ """
return self.client.list_collections()
def delete_col(self):
+ """Delete a collection."""
return self.client.drop_collection(collection_name=self.collection_name)
def col_info(self):
+ """
+ Get information about a collection.
+
+ Returns:
+ Dict[str, Any]: Collection information.
+ """
return self.client.get_collection_stats(collection_name=self.collection_name)
def list(self, filters: dict = None, limit: int = 100) -> list:
+ """
+ List all vectors in a collection.
+
+ Args:
+ filters (Dict, optional): Filters to apply to the list.
+ limit (int, optional): Number of vectors to return. Defaults to 100.
+
+ Returns:
+ List[OutputData]: List of vectors.
+ """
query_filter = self._create_filter(filters) if filters else None
result = self.client.query(collection_name=self.collection_name, filter=query_filter, limit=limit)
memories = []
@@ -145,6 +244,7 @@ return [memories]
def reset(self):
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
- self.create_col(self.collection_name, self.embedding_model_dims, self.metric_type)+ self.create_col(self.collection_name, self.embedding_model_dims, self.metric_type)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/milvus.py |
Add docstrings following best practices | import logging
import os
import pickle
import uuid
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
from pydantic import BaseModel
import warnings
try:
# Suppress SWIG deprecation warnings from FAISS
warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*SwigPy.*")
warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*swigvarlink.*")
logging.getLogger("faiss").setLevel(logging.WARNING)
logging.getLogger("faiss.loader").setLevel(logging.WARNING)
import faiss
except ImportError:
raise ImportError(
"Could not import faiss python package. "
"Please install it with `pip install faiss-gpu` (for CUDA supported GPU) "
"or `pip install faiss-cpu` (depending on Python version)."
)
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # distance
payload: Optional[Dict] # metadata
class FAISS(VectorStoreBase):
def __init__(
self,
collection_name: str,
path: Optional[str] = None,
distance_strategy: str = "euclidean",
normalize_L2: bool = False,
embedding_model_dims: int = 1536,
):
self.collection_name = collection_name
self.path = path or f"/tmp/faiss/{collection_name}"
self.distance_strategy = distance_strategy
self.normalize_L2 = normalize_L2
self.embedding_model_dims = embedding_model_dims
# Initialize storage structures
self.index = None
self.docstore = {}
self.index_to_id = {}
# Create directory if it doesn't exist
if self.path:
os.makedirs(os.path.dirname(self.path), exist_ok=True)
# Try to load existing index if available
index_path = f"{self.path}/{collection_name}.faiss"
docstore_path = f"{self.path}/{collection_name}.pkl"
if os.path.exists(index_path) and os.path.exists(docstore_path):
self._load(index_path, docstore_path)
else:
self.create_col(collection_name)
def _load(self, index_path: str, docstore_path: str):
try:
self.index = faiss.read_index(index_path)
with open(docstore_path, "rb") as f:
self.docstore, self.index_to_id = pickle.load(f)
logger.info(f"Loaded FAISS index from {index_path} with {self.index.ntotal} vectors")
except Exception as e:
logger.warning(f"Failed to load FAISS index: {e}")
self.docstore = {}
self.index_to_id = {}
def _save(self):
if not self.path or not self.index:
return
try:
os.makedirs(self.path, exist_ok=True)
index_path = f"{self.path}/{self.collection_name}.faiss"
docstore_path = f"{self.path}/{self.collection_name}.pkl"
faiss.write_index(self.index, index_path)
with open(docstore_path, "wb") as f:
pickle.dump((self.docstore, self.index_to_id), f)
except Exception as e:
logger.warning(f"Failed to save FAISS index: {e}")
def _parse_output(self, scores, ids, limit=None) -> List[OutputData]:
if limit is None:
limit = len(ids)
results = []
for i in range(min(len(ids), limit)):
if ids[i] == -1: # FAISS returns -1 for empty results
continue
index_id = int(ids[i])
vector_id = self.index_to_id.get(index_id)
if vector_id is None:
continue
payload = self.docstore.get(vector_id)
if payload is None:
continue
payload_copy = payload.copy()
score = float(scores[i])
entry = OutputData(
id=vector_id,
score=score,
payload=payload_copy,
)
results.append(entry)
return results
def create_col(self, name: str, distance: str = None):
distance_strategy = distance or self.distance_strategy
# Create index based on distance strategy
if distance_strategy.lower() == "inner_product" or distance_strategy.lower() == "cosine":
self.index = faiss.IndexFlatIP(self.embedding_model_dims)
else:
self.index = faiss.IndexFlatL2(self.embedding_model_dims)
self.collection_name = name
self._save()
return self
def insert(
self,
vectors: List[list],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None,
):
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
if ids is None:
ids = [str(uuid.uuid4()) for _ in range(len(vectors))]
if payloads is None:
payloads = [{} for _ in range(len(vectors))]
if len(vectors) != len(ids) or len(vectors) != len(payloads):
raise ValueError("Vectors, payloads, and IDs must have the same length")
vectors_np = np.array(vectors, dtype=np.float32)
if self.normalize_L2 and self.distance_strategy.lower() == "euclidean":
faiss.normalize_L2(vectors_np)
self.index.add(vectors_np)
starting_idx = len(self.index_to_id)
for i, (vector_id, payload) in enumerate(zip(ids, payloads)):
self.docstore[vector_id] = payload.copy()
self.index_to_id[starting_idx + i] = vector_id
self._save()
logger.info(f"Inserted {len(vectors)} vectors into collection {self.collection_name}")
def search(
self, query: str, vectors: List[list], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
query_vectors = np.array(vectors, dtype=np.float32)
if len(query_vectors.shape) == 1:
query_vectors = query_vectors.reshape(1, -1)
if self.normalize_L2 and self.distance_strategy.lower() == "euclidean":
faiss.normalize_L2(query_vectors)
fetch_k = limit * 2 if filters else limit
scores, indices = self.index.search(query_vectors, fetch_k)
results = self._parse_output(scores[0], indices[0], limit)
if filters:
filtered_results = []
for result in results:
if self._apply_filters(result.payload, filters):
filtered_results.append(result)
if len(filtered_results) >= limit:
break
results = filtered_results[:limit]
return results
def _apply_filters(self, payload: Dict, filters: Dict) -> bool:
if not filters or not payload:
return True
for key, value in filters.items():
if key not in payload:
return False
if isinstance(value, list):
if payload[key] not in value:
return False
elif payload[key] != value:
return False
return True
def delete(self, vector_id: str):
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
index_to_delete = None
for idx, vid in self.index_to_id.items():
if vid == vector_id:
index_to_delete = idx
break
if index_to_delete is not None:
self.docstore.pop(vector_id, None)
self.index_to_id.pop(index_to_delete, None)
self._save()
logger.info(f"Deleted vector {vector_id} from collection {self.collection_name}")
else:
logger.warning(f"Vector {vector_id} not found in collection {self.collection_name}")
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
if vector_id not in self.docstore:
raise ValueError(f"Vector {vector_id} not found")
current_payload = self.docstore[vector_id].copy()
if payload is not None:
self.docstore[vector_id] = payload.copy()
current_payload = self.docstore[vector_id].copy()
if vector is not None:
self.delete(vector_id)
self.insert([vector], [current_payload], [vector_id])
else:
self._save()
logger.info(f"Updated vector {vector_id} in collection {self.collection_name}")
def get(self, vector_id: str) -> OutputData:
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
if vector_id not in self.docstore:
return None
payload = self.docstore[vector_id].copy()
return OutputData(
id=vector_id,
score=None,
payload=payload,
)
def list_cols(self) -> List[str]:
if not self.path:
return [self.collection_name] if self.index else []
try:
collections = []
path = Path(self.path).parent
for file in path.glob("*.faiss"):
collections.append(file.stem)
return collections
except Exception as e:
logger.warning(f"Failed to list collections: {e}")
return [self.collection_name] if self.index else []
def delete_col(self):
if self.path:
try:
index_path = f"{self.path}/{self.collection_name}.faiss"
docstore_path = f"{self.path}/{self.collection_name}.pkl"
if os.path.exists(index_path):
os.remove(index_path)
if os.path.exists(docstore_path):
os.remove(docstore_path)
logger.info(f"Deleted collection {self.collection_name}")
except Exception as e:
logger.warning(f"Failed to delete collection: {e}")
self.index = None
self.docstore = {}
self.index_to_id = {}
def col_info(self) -> Dict:
if self.index is None:
return {"name": self.collection_name, "count": 0}
return {
"name": self.collection_name,
"count": self.index.ntotal,
"dimension": self.index.d,
"distance": self.distance_strategy,
}
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
if self.index is None:
return []
results = []
count = 0
for vector_id, payload in self.docstore.items():
if filters and not self._apply_filters(payload, filters):
continue
payload_copy = payload.copy()
results.append(
OutputData(
id=vector_id,
score=None,
payload=payload_copy,
)
)
count += 1
if count >= limit:
break
return [results]
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.create_col(self.collection_name) | --- +++ @@ -46,6 +46,17 @@ normalize_L2: bool = False,
embedding_model_dims: int = 1536,
):
+ """
+ Initialize the FAISS vector store.
+
+ Args:
+ collection_name (str): Name of the collection.
+ path (str, optional): Path for local FAISS database. Defaults to None.
+ distance_strategy (str, optional): Distance strategy to use. Options: 'euclidean', 'inner_product', 'cosine'.
+ Defaults to "euclidean".
+ normalize_L2 (bool, optional): Whether to normalize L2 vectors. Only applicable for euclidean distance.
+ Defaults to False.
+ """
self.collection_name = collection_name
self.path = path or f"/tmp/faiss/{collection_name}"
self.distance_strategy = distance_strategy
@@ -70,6 +81,13 @@ self.create_col(collection_name)
def _load(self, index_path: str, docstore_path: str):
+ """
+ Load FAISS index and docstore from disk.
+
+ Args:
+ index_path (str): Path to FAISS index file.
+ docstore_path (str): Path to docstore pickle file.
+ """
try:
self.index = faiss.read_index(index_path)
with open(docstore_path, "rb") as f:
@@ -82,6 +100,7 @@ self.index_to_id = {}
def _save(self):
+ """Save FAISS index and docstore to disk."""
if not self.path or not self.index:
return
@@ -97,6 +116,17 @@ logger.warning(f"Failed to save FAISS index: {e}")
def _parse_output(self, scores, ids, limit=None) -> List[OutputData]:
+ """
+ Parse the output data.
+
+ Args:
+ scores: Similarity scores from FAISS.
+ ids: Indices from FAISS.
+ limit: Maximum number of results to return.
+
+ Returns:
+ List[OutputData]: Parsed output data.
+ """
if limit is None:
limit = len(ids)
@@ -127,6 +157,17 @@ return results
def create_col(self, name: str, distance: str = None):
+ """
+ Create a new collection.
+
+ Args:
+ name (str): Name of the collection.
+ distance (str, optional): Distance metric to use. Overrides the distance_strategy
+ passed during initialization. Defaults to None.
+
+ Returns:
+ self: The FAISS instance.
+ """
distance_strategy = distance or self.distance_strategy
# Create index based on distance strategy
@@ -147,6 +188,14 @@ payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None,
):
+ """
+ Insert vectors into a collection.
+
+ Args:
+ vectors (List[list]): List of vectors to insert.
+ payloads (Optional[List[Dict]], optional): List of payloads corresponding to vectors. Defaults to None.
+ ids (Optional[List[str]], optional): List of IDs corresponding to vectors. Defaults to None.
+ """
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
@@ -178,6 +227,18 @@ def search(
self, query: str, vectors: List[list], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
+ """
+ Search for similar vectors.
+
+ Args:
+ query (str): Query (not used, kept for API compatibility).
+ vectors (List[list]): List of vectors to search.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Optional[Dict], optional): Filters to apply to the search. Defaults to None.
+
+ Returns:
+ List[OutputData]: Search results.
+ """
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
@@ -206,6 +267,16 @@ return results
def _apply_filters(self, payload: Dict, filters: Dict) -> bool:
+ """
+ Apply filters to a payload.
+
+ Args:
+ payload (Dict): Payload to filter.
+ filters (Dict): Filters to apply.
+
+ Returns:
+ bool: True if payload passes filters, False otherwise.
+ """
if not filters or not payload:
return True
@@ -222,6 +293,12 @@ return True
def delete(self, vector_id: str):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
@@ -247,6 +324,14 @@ vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (Optional[List[float]], optional): Updated vector. Defaults to None.
+ payload (Optional[Dict], optional): Updated payload. Defaults to None.
+ """
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
@@ -268,6 +353,15 @@ logger.info(f"Updated vector {vector_id} in collection {self.collection_name}")
def get(self, vector_id: str) -> OutputData:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ OutputData: Retrieved vector.
+ """
if self.index is None:
raise ValueError("Collection not initialized. Call create_col first.")
@@ -283,6 +377,12 @@ )
def list_cols(self) -> List[str]:
+ """
+ List all collections.
+
+ Returns:
+ List[str]: List of collection names.
+ """
if not self.path:
return [self.collection_name] if self.index else []
@@ -297,6 +397,9 @@ return [self.collection_name] if self.index else []
def delete_col(self):
+ """
+ Delete a collection.
+ """
if self.path:
try:
index_path = f"{self.path}/{self.collection_name}.faiss"
@@ -316,6 +419,12 @@ self.index_to_id = {}
def col_info(self) -> Dict:
+ """
+ Get information about a collection.
+
+ Returns:
+ Dict: Collection information.
+ """
if self.index is None:
return {"name": self.collection_name, "count": 0}
@@ -327,6 +436,16 @@ }
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
+ """
+ List all vectors in a collection.
+
+ Args:
+ filters (Optional[Dict], optional): Filters to apply to the list. Defaults to None.
+ limit (int, optional): Number of vectors to return. Defaults to 100.
+
+ Returns:
+ List[OutputData]: List of vectors.
+ """
if self.index is None:
return []
@@ -354,6 +473,7 @@ return [results]
def reset(self):
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
- self.create_col(self.collection_name)+ self.create_col(self.collection_name)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/faiss.py |
Add missing documentation to my Python functions | import json
import logging
import re
from typing import List, Optional
from pydantic import BaseModel
from mem0.memory.utils import extract_json
from mem0.vector_stores.base import VectorStoreBase
try:
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import DefaultAzureCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
BinaryQuantizationCompression,
HnswAlgorithmConfiguration,
ScalarQuantizationCompression,
SearchField,
SearchFieldDataType,
SearchIndex,
SimpleField,
VectorSearch,
VectorSearchProfile,
)
from azure.search.documents.models import VectorizedQuery
except ImportError:
raise ImportError(
"The 'azure-search-documents' library is required. Please install it using 'pip install azure-search-documents==11.5.2'."
)
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str]
score: Optional[float]
payload: Optional[dict]
class AzureAISearch(VectorStoreBase):
def __init__(
self,
service_name,
collection_name,
api_key,
embedding_model_dims,
compression_type: Optional[str] = None,
use_float16: bool = False,
hybrid_search: bool = False,
vector_filter_mode: Optional[str] = None,
):
self.service_name = service_name
self.api_key = api_key
self.index_name = collection_name
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
# If compression_type is None, treat it as "none".
self.compression_type = (compression_type or "none").lower()
self.use_float16 = use_float16
self.hybrid_search = hybrid_search
self.vector_filter_mode = vector_filter_mode
# If the API key is not provided or is a placeholder, use DefaultAzureCredential.
if self.api_key is None or self.api_key == "" or self.api_key == "your-api-key":
credential = DefaultAzureCredential()
self.api_key = None
else:
credential = AzureKeyCredential(self.api_key)
self.search_client = SearchClient(
endpoint=f"https://{service_name}.search.windows.net",
index_name=self.index_name,
credential=credential,
)
self.index_client = SearchIndexClient(
endpoint=f"https://{service_name}.search.windows.net",
credential=credential,
)
self.search_client._client._config.user_agent_policy.add_user_agent("mem0")
self.index_client._client._config.user_agent_policy.add_user_agent("mem0")
collections = self.list_cols()
if collection_name not in collections:
self.create_col()
def create_col(self):
# Determine vector type based on use_float16 setting.
if self.use_float16:
vector_type = "Collection(Edm.Half)"
else:
vector_type = "Collection(Edm.Single)"
# Configure compression settings based on the specified compression_type.
compression_configurations = []
compression_name = None
if self.compression_type == "scalar":
compression_name = "myCompression"
# For SQ, rescoring defaults to True and oversampling defaults to 4.
compression_configurations = [
ScalarQuantizationCompression(
compression_name=compression_name
# rescoring defaults to True and oversampling defaults to 4
)
]
elif self.compression_type == "binary":
compression_name = "myCompression"
# For BQ, rescoring defaults to True and oversampling defaults to 10.
compression_configurations = [
BinaryQuantizationCompression(
compression_name=compression_name
# rescoring defaults to True and oversampling defaults to 10
)
]
# If no compression is desired, compression_configurations remains empty.
fields = [
SimpleField(name="id", type=SearchFieldDataType.String, key=True),
SimpleField(name="user_id", type=SearchFieldDataType.String, filterable=True),
SimpleField(name="run_id", type=SearchFieldDataType.String, filterable=True),
SimpleField(name="agent_id", type=SearchFieldDataType.String, filterable=True),
SearchField(
name="vector",
type=vector_type,
searchable=True,
vector_search_dimensions=self.embedding_model_dims,
vector_search_profile_name="my-vector-config",
),
SearchField(name="payload", type=SearchFieldDataType.String, searchable=True),
]
vector_search = VectorSearch(
profiles=[
VectorSearchProfile(
name="my-vector-config",
algorithm_configuration_name="my-algorithms-config",
compression_name=compression_name if self.compression_type != "none" else None,
)
],
algorithms=[HnswAlgorithmConfiguration(name="my-algorithms-config")],
compressions=compression_configurations,
)
index = SearchIndex(name=self.index_name, fields=fields, vector_search=vector_search)
self.index_client.create_or_update_index(index)
def _generate_document(self, vector, payload, id):
document = {"id": id, "vector": vector, "payload": json.dumps(payload)}
# Extract additional fields if they exist.
for field in ["user_id", "run_id", "agent_id"]:
if field in payload:
document[field] = payload[field]
return document
# Note: Explicit "insert" calls may later be decoupled from memory management decisions.
def insert(self, vectors, payloads=None, ids=None):
logger.info(f"Inserting {len(vectors)} vectors into index {self.index_name}")
documents = [
self._generate_document(vector, payload, id) for id, vector, payload in zip(ids, vectors, payloads)
]
response = self.search_client.upload_documents(documents)
for doc in response:
if not hasattr(doc, "status_code") and doc.get("status_code") != 201:
raise Exception(f"Insert failed for document {doc.get('id')}: {doc}")
return response
def _sanitize_key(self, key: str) -> str:
return re.sub(r"[^\w]", "", key)
def _build_filter_expression(self, filters):
filter_conditions = []
for key, value in filters.items():
safe_key = self._sanitize_key(key)
if isinstance(value, str):
safe_value = value.replace("'", "''")
condition = f"{safe_key} eq '{safe_value}'"
else:
condition = f"{safe_key} eq {value}"
filter_conditions.append(condition)
filter_expression = " and ".join(filter_conditions)
return filter_expression
def search(self, query, vectors, limit=5, filters=None):
filter_expression = None
if filters:
filter_expression = self._build_filter_expression(filters)
vector_query = VectorizedQuery(vector=vectors, k_nearest_neighbors=limit, fields="vector")
if self.hybrid_search:
search_results = self.search_client.search(
search_text=query,
vector_queries=[vector_query],
filter=filter_expression,
top=limit,
vector_filter_mode=self.vector_filter_mode,
search_fields=["payload"],
)
else:
search_results = self.search_client.search(
vector_queries=[vector_query],
filter=filter_expression,
top=limit,
vector_filter_mode=self.vector_filter_mode,
)
results = []
for result in search_results:
payload = json.loads(extract_json(result["payload"]))
results.append(OutputData(id=result["id"], score=result["@search.score"], payload=payload))
return results
def delete(self, vector_id):
response = self.search_client.delete_documents(documents=[{"id": vector_id}])
for doc in response:
if not hasattr(doc, "status_code") and doc.get("status_code") != 200:
raise Exception(f"Delete failed for document {vector_id}: {doc}")
logger.info(f"Deleted document with ID '{vector_id}' from index '{self.index_name}'.")
return response
def update(self, vector_id, vector=None, payload=None):
document = {"id": vector_id}
if vector:
document["vector"] = vector
if payload:
json_payload = json.dumps(payload)
document["payload"] = json_payload
for field in ["user_id", "run_id", "agent_id"]:
document[field] = payload.get(field)
response = self.search_client.merge_or_upload_documents(documents=[document])
for doc in response:
if not hasattr(doc, "status_code") and doc.get("status_code") != 200:
raise Exception(f"Update failed for document {vector_id}: {doc}")
return response
def get(self, vector_id) -> OutputData:
try:
result = self.search_client.get_document(key=vector_id)
except ResourceNotFoundError:
return None
payload = json.loads(extract_json(result["payload"]))
return OutputData(id=result["id"], score=None, payload=payload)
def list_cols(self) -> List[str]:
try:
names = self.index_client.list_index_names()
except AttributeError:
names = [index.name for index in self.index_client.list_indexes()]
return names
def delete_col(self):
self.index_client.delete_index(self.index_name)
def col_info(self):
index = self.index_client.get_index(self.index_name)
return {"name": index.name, "fields": index.fields}
def list(self, filters=None, limit=100):
filter_expression = None
if filters:
filter_expression = self._build_filter_expression(filters)
search_results = self.search_client.search(search_text="*", filter=filter_expression, top=limit)
results = []
for result in search_results:
payload = json.loads(extract_json(result["payload"]))
results.append(OutputData(id=result["id"], score=result["@search.score"], payload=payload))
return [results]
def __del__(self):
self.search_client.close()
self.index_client.close()
def reset(self):
logger.warning(f"Resetting index {self.index_name}...")
try:
# Close the existing clients
self.search_client.close()
self.index_client.close()
# Delete the collection
self.delete_col()
# If the API key is not provided or is a placeholder, use DefaultAzureCredential.
if self.api_key is None or self.api_key == "" or self.api_key == "your-api-key":
credential = DefaultAzureCredential()
self.api_key = None
else:
credential = AzureKeyCredential(self.api_key)
# Reinitialize the clients
service_endpoint = f"https://{self.service_name}.search.windows.net"
self.search_client = SearchClient(
endpoint=service_endpoint,
index_name=self.index_name,
credential=credential,
)
self.index_client = SearchIndexClient(
endpoint=service_endpoint,
credential=credential,
)
# Add user agent
self.search_client._client._config.user_agent_policy.add_user_agent("mem0")
self.index_client._client._config.user_agent_policy.add_user_agent("mem0")
# Create the collection
self.create_col()
except Exception as e:
logger.error(f"Error resetting index {self.index_name}: {e}")
raise | --- +++ @@ -52,6 +52,21 @@ hybrid_search: bool = False,
vector_filter_mode: Optional[str] = None,
):
+ """
+ Initialize the Azure AI Search vector store.
+
+ Args:
+ service_name (str): Azure AI Search service name.
+ collection_name (str): Index name.
+ api_key (str): API key for the Azure AI Search service.
+ embedding_model_dims (int): Dimension of the embedding vector.
+ compression_type (Optional[str]): Specifies the type of quantization to use.
+ Allowed values are None (no quantization), "scalar", or "binary".
+ use_float16 (bool): Whether to store vectors in half precision (Edm.Half) or full precision (Edm.Single).
+ (Note: This flag is preserved from the initial implementation per feedback.)
+ hybrid_search (bool): Whether to use hybrid search. Default is False.
+ vector_filter_mode (Optional[str]): Mode for vector filtering. Default is "preFilter".
+ """
self.service_name = service_name
self.api_key = api_key
self.index_name = collection_name
@@ -88,6 +103,7 @@ self.create_col()
def create_col(self):
+ """Create a new index in Azure AI Search."""
# Determine vector type based on use_float16 setting.
if self.use_float16:
vector_type = "Collection(Edm.Half)"
@@ -155,6 +171,14 @@
# Note: Explicit "insert" calls may later be decoupled from memory management decisions.
def insert(self, vectors, payloads=None, ids=None):
+ """
+ Insert vectors into the index.
+
+ Args:
+ vectors (List[List[float]]): List of vectors to insert.
+ payloads (List[Dict], optional): List of payloads corresponding to vectors.
+ ids (List[str], optional): List of IDs corresponding to vectors.
+ """
logger.info(f"Inserting {len(vectors)} vectors into index {self.index_name}")
documents = [
self._generate_document(vector, payload, id) for id, vector, payload in zip(ids, vectors, payloads)
@@ -182,6 +206,18 @@ return filter_expression
def search(self, query, vectors, limit=5, filters=None):
+ """
+ Search for similar vectors.
+
+ Args:
+ query (str): Query.
+ vectors (List[float]): Query vector.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Dict, optional): Filters to apply to the search. Defaults to None.
+
+ Returns:
+ List[OutputData]: Search results.
+ """
filter_expression = None
if filters:
filter_expression = self._build_filter_expression(filters)
@@ -211,6 +247,12 @@ return results
def delete(self, vector_id):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to delete.
+ """
response = self.search_client.delete_documents(documents=[{"id": vector_id}])
for doc in response:
if not hasattr(doc, "status_code") and doc.get("status_code") != 200:
@@ -219,6 +261,14 @@ return response
def update(self, vector_id, vector=None, payload=None):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (str): ID of the vector to update.
+ vector (List[float], optional): Updated vector.
+ payload (Dict, optional): Updated payload.
+ """
document = {"id": vector_id}
if vector:
document["vector"] = vector
@@ -234,6 +284,15 @@ return response
def get(self, vector_id) -> OutputData:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+
+ Returns:
+ OutputData: Retrieved vector.
+ """
try:
result = self.search_client.get_document(key=vector_id)
except ResourceNotFoundError:
@@ -242,6 +301,12 @@ return OutputData(id=result["id"], score=None, payload=payload)
def list_cols(self) -> List[str]:
+ """
+ List all collections (indexes).
+
+ Returns:
+ List[str]: List of index names.
+ """
try:
names = self.index_client.list_index_names()
except AttributeError:
@@ -249,13 +314,30 @@ return names
def delete_col(self):
+ """Delete the index."""
self.index_client.delete_index(self.index_name)
def col_info(self):
+ """
+ Get information about the index.
+
+ Returns:
+ dict: Index information.
+ """
index = self.index_client.get_index(self.index_name)
return {"name": index.name, "fields": index.fields}
def list(self, filters=None, limit=100):
+ """
+ List all vectors in the index.
+
+ Args:
+ filters (dict, optional): Filters to apply to the list.
+ limit (int, optional): Number of vectors to return. Defaults to 100.
+
+ Returns:
+ List[OutputData]: List of vectors.
+ """
filter_expression = None
if filters:
filter_expression = self._build_filter_expression(filters)
@@ -268,10 +350,12 @@ return [results]
def __del__(self):
+ """Close the search client when the object is deleted."""
self.search_client.close()
self.index_client.close()
def reset(self):
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.index_name}...")
try:
@@ -309,4 +393,4 @@ self.create_col()
except Exception as e:
logger.error(f"Error resetting index {self.index_name}: {e}")
- raise+ raise
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/azure_ai_search.py |
Add minimal docstrings for each function | import json
import os
from typing import Dict, List, Optional
try:
from groq import Groq
except ImportError:
raise ImportError("The 'groq' library is required. Please install it using 'pip install groq'.")
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
class GroqLLM(LLMBase):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config)
if not self.config.model:
self.config.model = "llama3-70b-8192"
api_key = self.config.api_key or os.getenv("GROQ_API_KEY")
self.client = Groq(api_key=api_key)
def _parse_response(self, response, tools):
if tools:
processed_response = {
"content": response.choices[0].message.content,
"tool_calls": [],
}
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
processed_response["tool_calls"].append(
{
"name": tool_call.function.name,
"arguments": json.loads(extract_json(tool_call.function.arguments)),
}
)
return processed_response
else:
return response.choices[0].message.content
def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
):
params = {
"model": self.config.model,
"messages": messages,
"temperature": self.config.temperature,
"max_tokens": self.config.max_tokens,
"top_p": self.config.top_p,
}
if response_format:
params["response_format"] = response_format
if tools:
params["tools"] = tools
params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
return self._parse_response(response, tools) | --- +++ @@ -23,6 +23,16 @@ self.client = Groq(api_key=api_key)
def _parse_response(self, response, tools):
+ """
+ Process the response based on whether tools are used or not.
+
+ Args:
+ response: The raw response from API.
+ tools: The list of tools provided in the request.
+
+ Returns:
+ str or dict: The processed response.
+ """
if tools:
processed_response = {
"content": response.choices[0].message.content,
@@ -49,6 +59,18 @@ tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
):
+ """
+ Generate a response based on the given messages using Groq.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ response_format (str or object, optional): Format of the response. Defaults to "text".
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+
+ Returns:
+ str: The generated response.
+ """
params = {
"model": self.config.model,
"messages": messages,
@@ -63,4 +85,4 @@ params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
- return self._parse_response(response, tools)+ return self._parse_response(response, tools)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/llms/groq.py |
Improve documentation using docstrings | import logging
import traceback
import uuid
from typing import Any, Dict, List, Optional, Tuple
import google.api_core.exceptions
from google.cloud import aiplatform, aiplatform_v1
from google.cloud.aiplatform.matching_engine.matching_engine_index_endpoint import Namespace
from google.oauth2 import service_account
from pydantic import BaseModel
try:
from langchain_core.documents import Document
except ImportError: # pragma: no cover - fallback for older LangChain versions
from langchain.schema import Document # type: ignore[no-redef]
from mem0.configs.vector_stores.vertex_ai_vector_search import (
GoogleMatchingEngineConfig,
)
from mem0.vector_stores.base import VectorStoreBase
# Configure logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class OutputData(BaseModel):
id: Optional[str] # memory id
score: Optional[float] # distance
payload: Optional[Dict] # metadata
class GoogleMatchingEngine(VectorStoreBase):
def __init__(self, **kwargs):
logger.debug("Initializing Google Matching Engine with kwargs: %s", kwargs)
# If collection_name is passed, use it as deployment_index_id if deployment_index_id is not provided
if "collection_name" in kwargs and "deployment_index_id" not in kwargs:
kwargs["deployment_index_id"] = kwargs["collection_name"]
logger.debug("Using collection_name as deployment_index_id: %s", kwargs["deployment_index_id"])
elif "deployment_index_id" in kwargs and "collection_name" not in kwargs:
kwargs["collection_name"] = kwargs["deployment_index_id"]
logger.debug("Using deployment_index_id as collection_name: %s", kwargs["collection_name"])
try:
config = GoogleMatchingEngineConfig(**kwargs)
logger.debug("Config created: %s", config.model_dump())
logger.debug("Config collection_name: %s", getattr(config, "collection_name", None))
except Exception as e:
logger.error("Failed to validate config: %s", str(e))
raise
self.project_id = config.project_id
self.project_number = config.project_number
self.region = config.region
self.endpoint_id = config.endpoint_id
self.index_id = config.index_id # The actual index ID
self.deployment_index_id = config.deployment_index_id # The deployment-specific ID
self.collection_name = config.collection_name
self.vector_search_api_endpoint = config.vector_search_api_endpoint
logger.debug("Using project=%s, location=%s", self.project_id, self.region)
# Initialize Vertex AI with credentials if provided
init_args = {
"project": self.project_id,
"location": self.region,
}
# Support both credentials_path and service_account_json
if hasattr(config, "credentials_path") and config.credentials_path:
logger.debug("Using credentials from file: %s", config.credentials_path)
credentials = service_account.Credentials.from_service_account_file(config.credentials_path)
init_args["credentials"] = credentials
elif hasattr(config, "service_account_json") and config.service_account_json:
logger.debug("Using credentials from provided JSON dict")
credentials = service_account.Credentials.from_service_account_info(config.service_account_json)
init_args["credentials"] = credentials
try:
aiplatform.init(**init_args)
logger.debug("Vertex AI initialized successfully")
except Exception as e:
logger.error("Failed to initialize Vertex AI: %s", str(e))
raise
try:
# Format the index path properly using the configured index_id
index_path = f"projects/{self.project_number}/locations/{self.region}/indexes/{self.index_id}"
logger.debug("Initializing index with path: %s", index_path)
self.index = aiplatform.MatchingEngineIndex(index_name=index_path)
logger.debug("Index initialized successfully")
# Format the endpoint name properly
endpoint_name = self.endpoint_id
logger.debug("Initializing endpoint with name: %s", endpoint_name)
self.index_endpoint = aiplatform.MatchingEngineIndexEndpoint(index_endpoint_name=endpoint_name)
logger.debug("Endpoint initialized successfully")
except Exception as e:
logger.error("Failed to initialize Matching Engine components: %s", str(e))
raise ValueError(f"Invalid configuration: {str(e)}")
def _parse_output(self, data: Dict) -> List[OutputData]:
results = data.get("nearestNeighbors", {}).get("neighbors", [])
output_data = []
for result in results:
output_data.append(
OutputData(
id=result.get("datapoint").get("datapointId"),
score=result.get("distance"),
payload=result.get("datapoint").get("metadata"),
)
)
return output_data
def _create_restriction(self, key: str, value: Any) -> aiplatform_v1.types.index.IndexDatapoint.Restriction:
str_value = str(value) if value is not None else ""
return aiplatform_v1.types.index.IndexDatapoint.Restriction(namespace=key, allow_list=[str_value])
def _create_datapoint(
self, vector_id: str, vector: List[float], payload: Optional[Dict] = None
) -> aiplatform_v1.types.index.IndexDatapoint:
restrictions = []
if payload:
restrictions = [self._create_restriction(key, value) for key, value in payload.items()]
return aiplatform_v1.types.index.IndexDatapoint(
datapoint_id=vector_id, feature_vector=vector, restricts=restrictions
)
def insert(
self,
vectors: List[list],
payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None,
) -> None:
if not vectors:
raise ValueError("No vectors provided for insertion")
if payloads and len(payloads) != len(vectors):
raise ValueError(f"Number of payloads ({len(payloads)}) does not match number of vectors ({len(vectors)})")
if ids and len(ids) != len(vectors):
raise ValueError(f"Number of ids ({len(ids)}) does not match number of vectors ({len(vectors)})")
logger.debug("Starting insert of %d vectors", len(vectors))
try:
datapoints = [
self._create_datapoint(
vector_id=ids[i] if ids else str(uuid.uuid4()),
vector=vector,
payload=payloads[i] if payloads and i < len(payloads) else None,
)
for i, vector in enumerate(vectors)
]
logger.debug("Created %d datapoints", len(datapoints))
self.index.upsert_datapoints(datapoints=datapoints)
logger.debug("Successfully inserted datapoints")
except google.api_core.exceptions.GoogleAPIError as e:
logger.error("Failed to insert vectors: %s", str(e))
raise
except Exception as e:
logger.error("Unexpected error during insert: %s", str(e))
logger.error("Stack trace: %s", traceback.format_exc())
raise
def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
logger.debug("Starting search")
logger.debug("Limit: %d, Filters: %s", limit, filters)
try:
filter_namespaces = []
if filters:
logger.debug("Processing filters")
for key, value in filters.items():
logger.debug("Processing filter %s=%s (type=%s)", key, value, type(value))
if isinstance(value, (str, int, float)):
logger.debug("Adding simple filter for %s", key)
filter_namespaces.append(Namespace(key, [str(value)], []))
elif isinstance(value, dict):
logger.debug("Adding complex filter for %s", key)
includes = value.get("include", [])
excludes = value.get("exclude", [])
filter_namespaces.append(Namespace(key, includes, excludes))
logger.debug("Final filter_namespaces: %s", filter_namespaces)
response = self.index_endpoint.find_neighbors(
deployed_index_id=self.deployment_index_id,
queries=[vectors],
num_neighbors=limit,
filter=filter_namespaces if filter_namespaces else None,
return_full_datapoint=True,
)
if not response or len(response) == 0 or len(response[0]) == 0:
logger.debug("No results found")
return []
results = []
for neighbor in response[0]:
logger.debug("Processing neighbor - id: %s, distance: %s", neighbor.id, neighbor.distance)
payload = {}
if hasattr(neighbor, "restricts"):
logger.debug("Processing restricts")
for restrict in neighbor.restricts:
if hasattr(restrict, "name") and hasattr(restrict, "allow_tokens") and restrict.allow_tokens:
logger.debug("Adding %s: %s", restrict.name, restrict.allow_tokens[0])
payload[restrict.name] = restrict.allow_tokens[0]
output_data = OutputData(id=neighbor.id, score=neighbor.distance, payload=payload)
results.append(output_data)
logger.debug("Returning %d results", len(results))
return results
except Exception as e:
logger.error("Error occurred: %s", str(e))
logger.error("Error type: %s", type(e))
logger.error("Stack trace: %s", traceback.format_exc())
raise
def delete(self, vector_id: Optional[str] = None, ids: Optional[List[str]] = None) -> bool:
logger.debug("Starting delete, vector_id: %s, ids: %s", vector_id, ids)
try:
# Handle both single vector_id and list of ids
if vector_id:
datapoint_ids = [vector_id]
elif ids:
datapoint_ids = ids
else:
raise ValueError("Either vector_id or ids must be provided")
logger.debug("Deleting ids: %s", datapoint_ids)
try:
self.index.remove_datapoints(datapoint_ids=datapoint_ids)
logger.debug("Delete completed successfully")
return True
except google.api_core.exceptions.NotFound:
# If the datapoint is already deleted, consider it a success
logger.debug("Datapoint already deleted")
return True
except google.api_core.exceptions.PermissionDenied as e:
logger.error("Permission denied: %s", str(e))
return False
except google.api_core.exceptions.InvalidArgument as e:
logger.error("Invalid argument: %s", str(e))
return False
except Exception as e:
logger.error("Error occurred: %s", str(e))
logger.error("Error type: %s", type(e))
logger.error("Stack trace: %s", traceback.format_exc())
return False
def update(
self,
vector_id: str,
vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
) -> bool:
logger.debug("Starting update for vector_id: %s", vector_id)
if vector is None and payload is None:
raise ValueError("Either vector or payload must be provided for update")
# First check if the vector exists
try:
existing = self.get(vector_id)
if existing is None:
logger.error("Vector ID not found: %s", vector_id)
return False
datapoint = self._create_datapoint(
vector_id=vector_id, vector=vector if vector is not None else [], payload=payload
)
logger.debug("Upserting datapoint: %s", datapoint)
self.index.upsert_datapoints(datapoints=[datapoint])
logger.debug("Update completed successfully")
return True
except google.api_core.exceptions.GoogleAPIError as e:
logger.error("API error during update: %s", str(e))
return False
except Exception as e:
logger.error("Unexpected error during update: %s", str(e))
logger.error("Stack trace: %s", traceback.format_exc())
raise
def get(self, vector_id: str) -> Optional[OutputData]:
logger.debug("Starting get for vector_id: %s", vector_id)
try:
if not self.vector_search_api_endpoint:
raise ValueError("vector_search_api_endpoint is required for get operation")
vector_search_client = aiplatform_v1.MatchServiceClient(
client_options={"api_endpoint": self.vector_search_api_endpoint},
)
datapoint = aiplatform_v1.IndexDatapoint(datapoint_id=vector_id)
query = aiplatform_v1.FindNeighborsRequest.Query(datapoint=datapoint, neighbor_count=1)
request = aiplatform_v1.FindNeighborsRequest(
index_endpoint=f"projects/{self.project_number}/locations/{self.region}/indexEndpoints/{self.endpoint_id}",
deployed_index_id=self.deployment_index_id,
queries=[query],
return_full_datapoint=True,
)
try:
response = vector_search_client.find_neighbors(request)
logger.debug("Got response")
if response and response.nearest_neighbors:
nearest = response.nearest_neighbors[0]
if nearest.neighbors:
neighbor = nearest.neighbors[0]
payload = {}
if hasattr(neighbor.datapoint, "restricts"):
for restrict in neighbor.datapoint.restricts:
if restrict.allow_list:
payload[restrict.namespace] = restrict.allow_list[0]
return OutputData(id=neighbor.datapoint.datapoint_id, score=neighbor.distance, payload=payload)
logger.debug("No results found")
return None
except google.api_core.exceptions.NotFound:
logger.debug("Datapoint not found")
return None
except google.api_core.exceptions.PermissionDenied as e:
logger.error("Permission denied: %s", str(e))
return None
except Exception as e:
logger.error("Error occurred: %s", str(e))
logger.error("Error type: %s", type(e))
logger.error("Stack trace: %s", traceback.format_exc())
raise
def list_cols(self) -> List[str]:
return [self.deployment_index_id]
def delete_col(self):
logger.warning("Delete collection operation is not supported for Google Matching Engine")
pass
def col_info(self) -> Dict:
return {
"index_id": self.index_id,
"endpoint_id": self.endpoint_id,
"project_id": self.project_id,
"region": self.region,
}
def list(self, filters: Optional[Dict] = None, limit: Optional[int] = None) -> List[List[OutputData]]:
logger.debug("Starting list operation")
logger.debug("Filters: %s", filters)
logger.debug("Limit: %s", limit)
try:
# Use a zero vector for the search
dimension = 768 # This should be configurable based on the model
zero_vector = [0.0] * dimension
# Use a large limit if none specified
search_limit = limit if limit is not None else 10000
results = self.search(query=zero_vector, limit=search_limit, filters=filters)
logger.debug("Found %d results", len(results))
return [results] # Wrap in extra array to match interface
except Exception as e:
logger.error("Error in list operation: %s", str(e))
logger.error("Stack trace: %s", traceback.format_exc())
raise
def create_col(self, name=None, vector_size=None, distance=None):
# Google Matching Engine indexes are created through Google Cloud Console
# This method is included only to satisfy the abstract base class
pass
def add(self, text: str, metadata: Optional[Dict] = None, user_id: Optional[str] = None) -> str:
logger.debug("Starting add operation")
logger.debug("Text: %s", text)
logger.debug("Metadata: %s", metadata)
logger.debug("User ID: %s", user_id)
try:
# Generate a unique ID for this entry
vector_id = str(uuid.uuid4())
# Create the payload with all necessary fields
payload = {
"data": text, # Store the text in the data field
"user_id": user_id,
**(metadata or {}),
}
# Get the embedding
vector = self.embedder.embed_query(text)
# Insert using the insert method
self.insert(vectors=[vector], payloads=[payload], ids=[vector_id])
return vector_id
except Exception as e:
logger.error("Error occurred: %s", str(e))
raise
def add_texts(
self,
texts: List[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
) -> List[str]:
if not texts:
raise ValueError("No texts provided")
if metadatas and len(metadatas) != len(texts):
raise ValueError(
f"Number of metadata items ({len(metadatas)}) does not match number of texts ({len(texts)})"
)
if ids and len(ids) != len(texts):
raise ValueError(f"Number of ids ({len(ids)}) does not match number of texts ({len(texts)})")
logger.debug("Starting add_texts operation")
logger.debug("Number of texts: %d", len(texts))
logger.debug("Has metadatas: %s", metadatas is not None)
logger.debug("Has ids: %s", ids is not None)
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
try:
# Get embeddings
embeddings = self.embedder.embed_documents(texts)
# Add to store
self.insert(vectors=embeddings, payloads=metadatas if metadatas else [{}] * len(texts), ids=ids)
return ids
except Exception as e:
logger.error("Error in add_texts: %s", str(e))
logger.error("Stack trace: %s", traceback.format_exc())
raise
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Any,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> "GoogleMatchingEngine":
logger.debug("Creating instance from texts")
store = cls(**kwargs)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return store
def similarity_search_with_score(
self,
query: str,
k: int = 5,
filter: Optional[Dict] = None,
) -> List[Tuple[Document, float]]:
logger.debug("Starting similarity search with score")
logger.debug("Query: %s", query)
logger.debug("k: %d", k)
logger.debug("Filter: %s", filter)
embedding = self.embedder.embed_query(query)
results = self.search(query=embedding, limit=k, filters=filter)
docs_and_scores = [
(Document(page_content=result.payload.get("text", ""), metadata=result.payload), result.score)
for result in results
]
logger.debug("Found %d results", len(docs_and_scores))
return docs_and_scores
def similarity_search(
self,
query: str,
k: int = 5,
filter: Optional[Dict] = None,
) -> List[Document]:
logger.debug("Starting similarity search")
docs_and_scores = self.similarity_search_with_score(query, k, filter)
return [doc for doc, _ in docs_and_scores]
def reset(self):
logger.warning("Reset operation is not supported for Google Matching Engine")
pass | --- +++ @@ -32,6 +32,7 @@
class GoogleMatchingEngine(VectorStoreBase):
def __init__(self, **kwargs):
+ """Initialize Google Matching Engine client."""
logger.debug("Initializing Google Matching Engine with kwargs: %s", kwargs)
# If collection_name is passed, use it as deployment_index_id if deployment_index_id is not provided
@@ -101,6 +102,13 @@ raise ValueError(f"Invalid configuration: {str(e)}")
def _parse_output(self, data: Dict) -> List[OutputData]:
+ """
+ Parse the output data.
+ Args:
+ data (Dict): Output data.
+ Returns:
+ List[OutputData]: Parsed output data.
+ """
results = data.get("nearestNeighbors", {}).get("neighbors", [])
output_data = []
for result in results:
@@ -114,12 +122,31 @@ return output_data
def _create_restriction(self, key: str, value: Any) -> aiplatform_v1.types.index.IndexDatapoint.Restriction:
+ """Create a restriction object for the Matching Engine index.
+
+ Args:
+ key: The namespace/key for the restriction
+ value: The value to restrict on
+
+ Returns:
+ Restriction object for the index
+ """
str_value = str(value) if value is not None else ""
return aiplatform_v1.types.index.IndexDatapoint.Restriction(namespace=key, allow_list=[str_value])
def _create_datapoint(
self, vector_id: str, vector: List[float], payload: Optional[Dict] = None
) -> aiplatform_v1.types.index.IndexDatapoint:
+ """Create a datapoint object for the Matching Engine index.
+
+ Args:
+ vector_id: The ID for the datapoint
+ vector: The vector to store
+ payload: Optional metadata to store with the vector
+
+ Returns:
+ IndexDatapoint object
+ """
restrictions = []
if payload:
restrictions = [self._create_restriction(key, value) for key, value in payload.items()]
@@ -134,6 +161,17 @@ payloads: Optional[List[Dict]] = None,
ids: Optional[List[str]] = None,
) -> None:
+ """Insert vectors into the Matching Engine index.
+
+ Args:
+ vectors: List of vectors to insert
+ payloads: Optional list of metadata dictionaries
+ ids: Optional list of IDs for the vectors
+
+ Raises:
+ ValueError: If vectors is empty or lengths don't match
+ GoogleAPIError: If the API call fails
+ """
if not vectors:
raise ValueError("No vectors provided for insertion")
@@ -170,6 +208,16 @@ def search(
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
) -> List[OutputData]:
+ """
+ Search for similar vectors.
+ Args:
+ query (str): Query.
+ vectors (List[float]): Query vector.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (Optional[Dict], optional): Filters to apply to the search. Defaults to None.
+ Returns:
+ List[OutputData]: Search results (unwrapped)
+ """
logger.debug("Starting search")
logger.debug("Limit: %d, Filters: %s", limit, filters)
@@ -227,6 +275,14 @@ raise
def delete(self, vector_id: Optional[str] = None, ids: Optional[List[str]] = None) -> bool:
+ """
+ Delete vectors from the Matching Engine index.
+ Args:
+ vector_id (Optional[str]): Single ID to delete (for backward compatibility)
+ ids (Optional[List[str]]): List of IDs of vectors to delete
+ Returns:
+ bool: True if vectors were deleted successfully or already deleted, False if error
+ """
logger.debug("Starting delete, vector_id: %s, ids: %s", vector_id, ids)
try:
# Handle both single vector_id and list of ids
@@ -265,6 +321,20 @@ vector: Optional[List[float]] = None,
payload: Optional[Dict] = None,
) -> bool:
+ """Update a vector and its payload.
+
+ Args:
+ vector_id: ID of the vector to update
+ vector: Optional new vector values
+ payload: Optional new metadata payload
+
+ Returns:
+ bool: True if update was successful
+
+ Raises:
+ ValueError: If neither vector nor payload is provided
+ GoogleAPIError: If the API call fails
+ """
logger.debug("Starting update for vector_id: %s", vector_id)
if vector is None and payload is None:
@@ -295,6 +365,13 @@ raise
def get(self, vector_id: str) -> Optional[OutputData]:
+ """
+ Retrieve a vector by ID.
+ Args:
+ vector_id (str): ID of the vector to retrieve.
+ Returns:
+ Optional[OutputData]: Retrieved vector or None if not found.
+ """
logger.debug("Starting get for vector_id: %s", vector_id)
try:
@@ -348,13 +425,27 @@ raise
def list_cols(self) -> List[str]:
+ """
+ List all collections (indexes).
+ Returns:
+ List[str]: List of collection names.
+ """
return [self.deployment_index_id]
def delete_col(self):
+ """
+ Delete a collection (index).
+ Note: This operation is not supported through the API.
+ """
logger.warning("Delete collection operation is not supported for Google Matching Engine")
pass
def col_info(self) -> Dict:
+ """
+ Get information about a collection (index).
+ Returns:
+ Dict: Collection information.
+ """
return {
"index_id": self.index_id,
"endpoint_id": self.endpoint_id,
@@ -363,6 +454,16 @@ }
def list(self, filters: Optional[Dict] = None, limit: Optional[int] = None) -> List[List[OutputData]]:
+ """List vectors matching the given filters.
+
+ Args:
+ filters: Optional filters to apply
+ limit: Optional maximum number of results to return
+
+ Returns:
+ List[List[OutputData]]: List of matching vectors wrapped in an extra array
+ to match the interface
+ """
logger.debug("Starting list operation")
logger.debug("Filters: %s", filters)
logger.debug("Limit: %s", limit)
@@ -386,6 +487,16 @@ raise
def create_col(self, name=None, vector_size=None, distance=None):
+ """
+ Create a new collection. For Google Matching Engine, collections (indexes)
+ are created through the Google Cloud Console or API separately.
+ This method is a no-op since indexes are pre-created.
+
+ Args:
+ name: Ignored for Google Matching Engine
+ vector_size: Ignored for Google Matching Engine
+ distance: Ignored for Google Matching Engine
+ """
# Google Matching Engine indexes are created through Google Cloud Console
# This method is included only to satisfy the abstract base class
pass
@@ -425,6 +536,19 @@ metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
) -> List[str]:
+ """Add texts to the vector store.
+
+ Args:
+ texts: List of texts to add
+ metadatas: Optional list of metadata dicts
+ ids: Optional list of IDs to use
+
+ Returns:
+ List[str]: List of IDs of the added texts
+
+ Raises:
+ ValueError: If texts is empty or lengths don't match
+ """
if not texts:
raise ValueError("No texts provided")
@@ -466,6 +590,7 @@ ids: Optional[List[str]] = None,
**kwargs: Any,
) -> "GoogleMatchingEngine":
+ """Create an instance from texts."""
logger.debug("Creating instance from texts")
store = cls(**kwargs)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids)
@@ -477,6 +602,7 @@ k: int = 5,
filter: Optional[Dict] = None,
) -> List[Tuple[Document, float]]:
+ """Return documents most similar to query with scores."""
logger.debug("Starting similarity search with score")
logger.debug("Query: %s", query)
logger.debug("k: %d", k)
@@ -498,10 +624,14 @@ k: int = 5,
filter: Optional[Dict] = None,
) -> List[Document]:
+ """Return documents most similar to query."""
logger.debug("Starting similarity search")
docs_and_scores = self.similarity_search_with_score(query, k, filter)
return [doc for doc, _ in docs_and_scores]
def reset(self):
+ """
+ Reset the Google Matching Engine index.
+ """
logger.warning("Reset operation is not supported for Google Matching Engine")
- pass+ pass
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/vertex_ai_vector_search.py |
Add docstrings explaining edge cases | import os
import sys
from logging.config import fileConfig
from alembic import context
from dotenv import load_dotenv
from sqlalchemy import engine_from_config, pool
# Add the parent directory to the Python path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Load environment variables
load_dotenv()
# Import your models here - moved after path setup
from app.database import Base # noqa: E402
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
url = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
configuration = config.get_section(config.config_ini_section)
configuration["sqlalchemy.url"] = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
connectable = engine_from_config(
configuration,
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online() | --- +++ @@ -35,6 +35,17 @@
def run_migrations_offline() -> None:
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the
+ script output.
+
+ """
url = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
context.configure(
url=url,
@@ -48,6 +59,12 @@
def run_migrations_online() -> None:
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
configuration = config.get_section(config.config_ini_section)
configuration["sqlalchemy.url"] = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
connectable = engine_from_config(
@@ -68,4 +85,4 @@ if context.is_offline_mode():
run_migrations_offline()
else:
- run_migrations_online()+ run_migrations_online()
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/openmemory/api/alembic/env.py |
Add docstrings to improve code quality | import logging
import os
import shutil
from qdrant_client import QdrantClient
from qdrant_client.models import (
Distance,
FieldCondition,
Filter,
MatchValue,
PointIdsList,
PointStruct,
Range,
VectorParams,
)
from mem0.vector_stores.base import VectorStoreBase
logger = logging.getLogger(__name__)
class Qdrant(VectorStoreBase):
def __init__(
self,
collection_name: str,
embedding_model_dims: int,
client: QdrantClient = None,
host: str = None,
port: int = None,
path: str = None,
url: str = None,
api_key: str = None,
on_disk: bool = False,
):
if client:
self.client = client
self.is_local = False
else:
params = {}
if api_key:
params["api_key"] = api_key
if url:
params["url"] = url
if host and port:
params["host"] = host
params["port"] = port
if not params:
params["path"] = path
self.is_local = True
if not on_disk:
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
else:
self.is_local = False
self.client = QdrantClient(**params)
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.on_disk = on_disk
self.create_col(embedding_model_dims, on_disk)
def create_col(self, vector_size: int, on_disk: bool, distance: Distance = Distance.COSINE):
# Skip creating collection if already exists
response = self.list_cols()
for collection in response.collections:
if collection.name == self.collection_name:
logger.debug(f"Collection {self.collection_name} already exists. Skipping creation.")
self._create_filter_indexes()
return
self.client.create_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(size=vector_size, distance=distance, on_disk=on_disk),
)
self._create_filter_indexes()
def _create_filter_indexes(self):
# Only create payload indexes for remote Qdrant servers
if self.is_local:
logger.debug("Skipping payload index creation for local Qdrant (not supported)")
return
common_fields = ["user_id", "agent_id", "run_id", "actor_id"]
for field in common_fields:
try:
self.client.create_payload_index(
collection_name=self.collection_name,
field_name=field,
field_schema="keyword"
)
logger.info(f"Created index for {field} in collection {self.collection_name}")
except Exception as e:
logger.debug(f"Index for {field} might already exist: {e}")
def insert(self, vectors: list, payloads: list = None, ids: list = None):
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
points = [
PointStruct(
id=idx if ids is None else ids[idx],
vector=vector,
payload=payloads[idx] if payloads else {},
)
for idx, vector in enumerate(vectors)
]
self.client.upsert(collection_name=self.collection_name, points=points)
def _create_filter(self, filters: dict) -> Filter:
if not filters:
return None
conditions = []
for key, value in filters.items():
if isinstance(value, dict) and "gte" in value and "lte" in value:
conditions.append(FieldCondition(key=key, range=Range(gte=value["gte"], lte=value["lte"])))
else:
conditions.append(FieldCondition(key=key, match=MatchValue(value=value)))
return Filter(must=conditions) if conditions else None
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> list:
query_filter = self._create_filter(filters) if filters else None
hits = self.client.query_points(
collection_name=self.collection_name,
query=vectors,
query_filter=query_filter,
limit=limit,
)
return hits.points
def delete(self, vector_id: int):
self.client.delete(
collection_name=self.collection_name,
points_selector=PointIdsList(
points=[vector_id],
),
)
def update(self, vector_id: int, vector: list = None, payload: dict = None):
point = PointStruct(id=vector_id, vector=vector, payload=payload)
self.client.upsert(collection_name=self.collection_name, points=[point])
def get(self, vector_id: int) -> dict:
result = self.client.retrieve(collection_name=self.collection_name, ids=[vector_id], with_payload=True)
return result[0] if result else None
def list_cols(self) -> list:
return self.client.get_collections()
def delete_col(self):
self.client.delete_collection(collection_name=self.collection_name)
def col_info(self) -> dict:
return self.client.get_collection(collection_name=self.collection_name)
def list(self, filters: dict = None, limit: int = 100) -> list:
query_filter = self._create_filter(filters) if filters else None
result = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=query_filter,
limit=limit,
with_payload=True,
with_vectors=False,
)
return result
def reset(self):
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
self.create_col(self.embedding_model_dims, self.on_disk) | --- +++ @@ -32,6 +32,20 @@ api_key: str = None,
on_disk: bool = False,
):
+ """
+ Initialize the Qdrant vector store.
+
+ Args:
+ collection_name (str): Name of the collection.
+ embedding_model_dims (int): Dimensions of the embedding model.
+ client (QdrantClient, optional): Existing Qdrant client instance. Defaults to None.
+ host (str, optional): Host address for Qdrant server. Defaults to None.
+ port (int, optional): Port for Qdrant server. Defaults to None.
+ path (str, optional): Path for local Qdrant database. Defaults to None.
+ url (str, optional): Full URL for Qdrant server. Defaults to None.
+ api_key (str, optional): API key for Qdrant server. Defaults to None.
+ on_disk (bool, optional): Enables persistent storage. Defaults to False.
+ """
if client:
self.client = client
self.is_local = False
@@ -62,6 +76,14 @@ self.create_col(embedding_model_dims, on_disk)
def create_col(self, vector_size: int, on_disk: bool, distance: Distance = Distance.COSINE):
+ """
+ Create a new collection.
+
+ Args:
+ vector_size (int): Size of the vectors to be stored.
+ on_disk (bool): Enables persistent storage.
+ distance (Distance, optional): Distance metric for vector similarity. Defaults to Distance.COSINE.
+ """
# Skip creating collection if already exists
response = self.list_cols()
for collection in response.collections:
@@ -77,6 +99,7 @@ self._create_filter_indexes()
def _create_filter_indexes(self):
+ """Create indexes for commonly used filter fields to enable filtering."""
# Only create payload indexes for remote Qdrant servers
if self.is_local:
logger.debug("Skipping payload index creation for local Qdrant (not supported)")
@@ -96,6 +119,14 @@ logger.debug(f"Index for {field} might already exist: {e}")
def insert(self, vectors: list, payloads: list = None, ids: list = None):
+ """
+ Insert vectors into a collection.
+
+ Args:
+ vectors (list): List of vectors to insert.
+ payloads (list, optional): List of payloads corresponding to vectors. Defaults to None.
+ ids (list, optional): List of IDs corresponding to vectors. Defaults to None.
+ """
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
points = [
PointStruct(
@@ -108,6 +139,15 @@ self.client.upsert(collection_name=self.collection_name, points=points)
def _create_filter(self, filters: dict) -> Filter:
+ """
+ Create a Filter object from the provided filters.
+
+ Args:
+ filters (dict): Filters to apply.
+
+ Returns:
+ Filter: The created Filter object.
+ """
if not filters:
return None
@@ -120,6 +160,18 @@ return Filter(must=conditions) if conditions else None
def search(self, query: str, vectors: list, limit: int = 5, filters: dict = None) -> list:
+ """
+ Search for similar vectors.
+
+ Args:
+ query (str): Query.
+ vectors (list): Query vector.
+ limit (int, optional): Number of results to return. Defaults to 5.
+ filters (dict, optional): Filters to apply to the search. Defaults to None.
+
+ Returns:
+ list: Search results.
+ """
query_filter = self._create_filter(filters) if filters else None
hits = self.client.query_points(
collection_name=self.collection_name,
@@ -130,6 +182,12 @@ return hits.points
def delete(self, vector_id: int):
+ """
+ Delete a vector by ID.
+
+ Args:
+ vector_id (int): ID of the vector to delete.
+ """
self.client.delete(
collection_name=self.collection_name,
points_selector=PointIdsList(
@@ -138,23 +196,63 @@ )
def update(self, vector_id: int, vector: list = None, payload: dict = None):
+ """
+ Update a vector and its payload.
+
+ Args:
+ vector_id (int): ID of the vector to update.
+ vector (list, optional): Updated vector. Defaults to None.
+ payload (dict, optional): Updated payload. Defaults to None.
+ """
point = PointStruct(id=vector_id, vector=vector, payload=payload)
self.client.upsert(collection_name=self.collection_name, points=[point])
def get(self, vector_id: int) -> dict:
+ """
+ Retrieve a vector by ID.
+
+ Args:
+ vector_id (int): ID of the vector to retrieve.
+
+ Returns:
+ dict: Retrieved vector.
+ """
result = self.client.retrieve(collection_name=self.collection_name, ids=[vector_id], with_payload=True)
return result[0] if result else None
def list_cols(self) -> list:
+ """
+ List all collections.
+
+ Returns:
+ list: List of collection names.
+ """
return self.client.get_collections()
def delete_col(self):
+ """Delete a collection."""
self.client.delete_collection(collection_name=self.collection_name)
def col_info(self) -> dict:
+ """
+ Get information about a collection.
+
+ Returns:
+ dict: Collection information.
+ """
return self.client.get_collection(collection_name=self.collection_name)
def list(self, filters: dict = None, limit: int = 100) -> list:
+ """
+ List all vectors in a collection.
+
+ Args:
+ filters (dict, optional): Filters to apply to the list. Defaults to None.
+ limit (int, optional): Number of vectors to return. Defaults to 100.
+
+ Returns:
+ list: List of vectors.
+ """
query_filter = self._create_filter(filters) if filters else None
result = self.client.scroll(
collection_name=self.collection_name,
@@ -166,6 +264,7 @@ return result
def reset(self):
+ """Reset the index by deleting and recreating it."""
logger.warning(f"Resetting index {self.collection_name}...")
self.delete_col()
- self.create_col(self.embedding_model_dims, self.on_disk)+ self.create_col(self.embedding_model_dims, self.on_disk)
| https://raw.githubusercontent.com/mem0ai/mem0/HEAD/mem0/vector_stores/qdrant.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.