import streamlit as st import pandas as pd from streamlit_gsheets import GSheetsConnection def load_data(): GSheetsConnection = "https://docs.google.com/spreadsheets/d/1Ui9gZoSKxSIW0B7fG8ryuN0nhkVhAckFxl2hWf6CaJQ/edit?usp=sharing" conn = st.connection("gsheets", type=GSheetsConnection) df = conn.read() return df def case_insensitive_search(data, query, column): if query: return data[data[column].str.lower().str.contains(query.lower())] return data def display_table(data, rows_per_page=10): container = st.container() with container: height = min(40 + rows_per_page * 38, 800) st.dataframe(data, height=height) def main(): st.title("Multihop-RAG Benchmark 💡") data = load_data() st.sidebar.header("Search Options") chat_model_query = st.sidebar.text_input("Chat Model") embedding_model_query = st.sidebar.text_input("Embedding Model") chunk_query = st.sidebar.text_input("Chunk") frame_query = st.sidebar.text_input("Framework") if chat_model_query: data = case_insensitive_search(data, chat_model_query, 'chat_model') if embedding_model_query: data = case_insensitive_search(data, embedding_model_query, 'embedding_model') if chunk_query: data = case_insensitive_search(data, chunk_query, 'chunk') if frame_query: data = case_insensitive_search(data, frame_query, 'framework') st.write("Displaying results across different frameworks, embedding models, chat models, and chunks.") st.info("Retrieval Stage: MRR@10 and Hit@10; Response Stage: Accuracy ") display_table(data) st.sidebar.header("Citation") st.sidebar.info( "Please cite this dataset as:\n" "Tang, Yixuan, and Yi Yang. MultiHop-RAG: Benchmarking Retrieval-Augmented Generation for Multi-Hop Queries. ArXiv, 2024, /abs/2401.15391." ) st.markdown("---") st.caption("For citation, please use: 'Tang, Yixuan, and Yi Yang. MultiHop-RAG: Benchmarking Retrieval-Augmented Generation for Multi-Hop Queries. ArXiv, 2024, /abs/2401.15391. '") st.markdown("---") st.caption("For results self-reporting, please send an email to ytangch@connect.ust.hk") if __name__ == "__main__": main()