File size: 4,633 Bytes
4a73ab0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# app/ui_streamlit.py
import os, json
from pathlib import Path
import streamlit as st

from app.main import get_env, ensure_index_exists
from app.search import search

# Streamlit config should be the first Streamlit call
st.set_page_config(page_title="Grants Discovery RAG", layout="wide")

# Environment + index
_env = get_env()
ensure_index_exists(_env)

# ---------- helpers ----------
def _dedup_records(rows):
    seen, out = set(), []
    for r in rows or []:
        k = r.get("id") or r.get("url") or r.get("title")
        if not k or k in seen:
            continue
        seen.add(k)
        out.append(r)
    return out
# ---------- end helpers ----------

# ---------- optional diagnostics ----------
with st.expander("Diagnostics (optional)", expanded=False):
    idx = Path(_env["INDEX_DIR"])
    st.write("INDEX_DIR:", str(idx))
    st.write("faiss.index exists:", (idx / "faiss.index").exists())
    st.write("meta.json exists:", (idx / "meta.json").exists())
    if (idx / "meta.json").exists():
        try:
            meta = json.loads((idx / "meta.json").read_text())
            st.write("meta.json count:", len(meta))
            st.write("meta head:", [{"id": m.get("id"), "title": m.get("title")} for m in meta[:2]])
        except Exception as e:
            st.error(f"Failed to read meta.json: {e!r}")
    try:
        demo = search("transportation", _env, top_k=3, filters={})
        st.write("sample search('transportation') results:", len(demo))
        if demo:
            st.write(demo[:3])
    except Exception as e:
        st.error(f"search() raised: {e!r}")
# ---------- end diagnostics ----------

st.title("Grants Discovery RAG (Capacity Building)")

preset = st.radio(
    "Quick topic:",
    ["General", "Elderly", "Prison Ministry", "Evangelism", "Vehicles/Transport", "FTA 5310"],
    horizontal=True
)

default_q = {
    "General": "capacity building",
    "Elderly": "capacity building for seniors and aging services",
    "Prison Ministry": "capacity building for reentry and prison ministry",
    "Evangelism": "capacity building for faith and community outreach",
    "Vehicles/Transport": "capacity building transportation vehicles vans buses mobility",
    "FTA 5310": "5310 Enhanced Mobility Seniors Individuals with Disabilities",
}.get(preset, "capacity building")

# --- controls ---
q = st.text_input("Search query", value=default_q)

# No defaults -> no filtering unless the user selects something
geo = st.multiselect("Geo filter (optional)", options=["US", "MD", "MA"], default=[])
categories = st.multiselect(
    "Category filter (optional)",
    options=["capacity_building", "elderly", "prison_ministry", "evangelism", "transportation", "vehicle"],
    default=[]
)

top_k = st.slider("Results", 5, 50, 15)

# Build filters only when selected
filters = {}
if geo:
    filters["geo"] = geo
if categories:
    filters["categories"] = categories  # <- use 'categories' key (not 'cats')

col1, col2 = st.columns([1, 1])

with col1:
    if st.button("Search"):
        try:
            results = search(q, _env, top_k=top_k, filters=filters)
            results = _dedup_records(results)
            st.session_state["results"] = results
        except Exception as e:
            st.error(str(e))

with col2:
    if st.button("Export Results to CSV"):
        results = st.session_state.get("results", [])
        if not results:
            st.warning("No results to export. Run a search first.")
        else:
            os.makedirs(_env["EXPORT_DIR"], exist_ok=True)
            out_path = os.path.join(_env["EXPORT_DIR"], "results.csv")
            import pandas as pd
            pd.DataFrame(results).to_csv(out_path, index=False)
            st.success(f"Exported to {out_path}")

st.markdown("---")

results = st.session_state.get("results", [])
if results:
    st.caption(f"Results: {len(results)}")
    for r in results:
        title = r.get("title", "(no title)")
        url = r.get("url", "")
        cats = r.get("categories") or r.get("cats") or []
        geo_tags = r.get("geo") or []

        st.markdown(f"### {title}")
        st.write(f"**Source:** {r.get('source','')} | **Geo:** {', '.join(geo_tags) if isinstance(geo_tags, list) else geo_tags} | **Categories:** {', '.join(cats) if isinstance(cats, list) else cats}")

        if url and not url.startswith("http"):
            st.caption("Note: This item may display an ID or number instead of a full link. Open on Grants.gov if needed.")
        st.write(f"[Open Link]({url})  \nScore: {r.get('score', 0):.3f}")
        st.markdown("---")
else:
    st.info("Enter a query and click Search.")