File size: 2,592 Bytes
3e6f99d
 
bd16fb2
3e6f99d
 
 
 
 
8f7e3ff
 
3e6f99d
 
bd16fb2
 
 
 
 
 
 
 
 
 
 
 
 
3e6f99d
 
 
bd16fb2
3e6f99d
 
 
bd16fb2
 
 
 
 
3e6f99d
bd16fb2
3e6f99d
 
 
 
 
 
 
 
 
 
 
 
174863b
3e6f99d
8f7e3ff
8255670
174863b
8f7e3ff
174863b
 
 
8f7e3ff
174863b
8f7e3ff
174863b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os
import fitz  # PyMuPDF
import requests
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.docstore.document import Document

CHROMA_DIR = os.path.abspath("chroma")
print("πŸ“‚ Loading vectorstore from:", CHROMA_DIR)
MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"

# Set this to your actual file on HF
HF_FILE_URL = "https://huggingface.co/spaces/DurgaDeepak/eat2fit/resolve/main/meal_plans/Lafayette%2C%20Natasha%20-%20Fit%20By%20Tasha%20High%20Protein%20Recipes%20_%2052%20High%20Protein%20Clean%20Recipes%20%26%20Meal%20Plan%20(2021).pdf"

def ensure_pdf_downloaded(local_path: str, url: str):
    if not os.path.exists(local_path):
        print(f"Downloading large PDF from: {url}")
        response = requests.get(url)
        if response.status_code == 200:
            with open(local_path, "wb") as f:
                f.write(response.content)
            print("PDF downloaded successfully.")
        else:
            raise RuntimeError(f"Failed to download PDF: {response.status_code}")

def load_and_chunk_pdfs(folder_path):
    documents = []

    for filename in os.listdir(folder_path):
        if filename.endswith(".pdf"):
            path = os.path.join(folder_path, filename)

            # Try downloading the file if it's missing or an LFS pointer
            if os.path.getsize(path) < 1000:  # LFS pointer files are tiny
                ensure_pdf_downloaded(path, HF_FILE_URL)

            doc = fitz.open(path)
            text = "\n".join(page.get_text() for page in doc if page.get_text())
            documents.append(Document(page_content=text, metadata={"source": filename}))

    splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    chunks = splitter.split_documents(documents)
    return chunks

def create_vectorstore(chunks):
    embeddings = HuggingFaceEmbeddings(model_name=MODEL_NAME)
    db = Chroma.from_documents(chunks, embeddings, persist_directory=CHROMA_DIR)
    return db

def load_vectorstore():
    print("πŸ“‚ Loading from:", CHROMA_DIR)
    embeddings = HuggingFaceEmbeddings(model_name=MODEL_NAME)
    db = Chroma(persist_directory=CHROMA_DIR, embedding_function=embeddings)

    # Debug block
    try:
        docs = db.get()
        print(f"βœ… Loaded vectorstore with {len(docs['documents'])} docs")
        print(f"🧾 First doc snippet: {docs['documents'][0][:100]}...")
    except Exception as e:
        print(f"❌ Vectorstore load error: {e}")

    return db