import os import re import random import pandas as pd import gradio as gr from gradio_pdf import PDF from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload from google.oauth2 import service_account from io import BytesIO import tempfile import markdown import numpy as np from sklearn.metrics.pairwise import cosine_similarity from sentence_transformers import SentenceTransformer from supabase import create_client from pydantic import BaseModel, Field from phi.agent import Agent from phi.model.groq import Groq import gradio as gr import json # Environment variables for sensitive data SUPABASE_URL = os.getenv("SUPABASE_URL", ) SUPABASE_KEY = os.getenv("SUPABASE_KEY", ) os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY") VALID_USERNAME=os.getenv("VALID_USERNAME") VALID_PASSWORD=os.getenv("VALID_PASSWORD") # Google Drive API credentials SCOPES = ["https://www.googleapis.com/auth/drive"] SERVICE_ACCOUNT_FILE = os.getenv("SERVICE_ACCOUNT_FILE") # Store in HF Space storage if not SERVICE_ACCOUNT_FILE: raise ValueError("GOOGLE_CREDENTIALS environment variable not set") # Parse the JSON string into a Python dictionary credentials_info = json.loads(SERVICE_ACCOUNT_FILE) # Create credentials from the JSON content creds = service_account.Credentials.from_service_account_info( credentials_info, scopes=SCOPES ) #creds = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES) drive_service = build("drive", "v3", credentials=creds) # Pydantic model for column weights class ColumnWeights(BaseModel): report_no: float = Field(0.0, description="Weight for report number") date_of_issue: float = Field(0.0, description="Weight for issue date") subject: float = Field(0.0, description="Weight for subject") short_summary: float = Field(0.0, description="Weight for short summary") report_category: float = Field(0.0, description="Weight for report category") subject_classification: float = Field(0.0, description="Weight for subject classification") cth_cti: float = Field(0.0, description="Weight for CTH/CTI") notification_number: float = Field(0.0, description="Weight for notification number") notification_type: float = Field(0.0, description="Weight for notification type") issue_year: float = Field(0.0, description="Weight for issue year") def normalize_weights(self): total_weight = sum([getattr(self, field) for field in self.__fields__]) if total_weight == 0: raise ValueError("Total weight cannot be zero.") factor = 10 / total_weight for field in self.__fields__: setattr(self, field, round(getattr(self, field) * factor, 2)) # Weight Assigner Agent weight_assigner = Agent( name="Weight Assigner", role="Assigns dynamic weights to columns based on the query.", instructions=[ "Analyze the query and assign weights to the relevant columns.", "Focus on the keywords in the query to determine which columns are most relevant.", "For queries related to Chapter, CTH, CTI, tariff heading give more weightage to cth_cti column", "For queries related to product name, item description give more weightage to short_summary", "Report category tells the report is related to export or import only", "For any queries related to risk types such as overvaluation, misclassification, BCD, IGST, CVD duty evasion etc., give additional weightage to subject_classification", "Ensure the total weight sums to approximately 10 (decimal values allowed). Minimum weight value for a column should be 0.5 and round to single decimal place" ], model=Groq(id="deepseek-r1-distill-llama-70b"), response_model=ColumnWeights, markdown=True, debug_mode=True, show_tool_calls=True, monitoring=True ) # Supabase data fetch def fetch_data_from_supabase(): supabase1 = create_client(SUPABASE_URL, SUPABASE_KEY) response = supabase1.table("risk_report_embeddings").select("*").execute() data = response.data if not data: raise ValueError("No data found in the Supabase table.") return pd.DataFrame(data) # Relevant Reports Selector class RelevantReports(BaseModel): report_numbers: list[str] = Field(..., description="List of report numbers extracted (report_no)") relevant_report_selector = Agent( name="Relevant Report Selector", role="Selects the reports after dropping not related reports based on the query and provided data.", instructions=[ "Analyze the query and given reports and give more importance to products and CTH/CTI.", "Drop the reports that are not fairly related to the query.", "Extract Report numbers, generally shown against the field 'report_no' in the given data, not against 'id'" ], model=Groq(id="deepseek-r1-distill-llama-70b"), response_model=RelevantReports, markdown=True, debug_mode=True, show_tool_calls=True, monitoring=True ) # Chatbot Agent chat_agent = Agent( name="NCTCGpt", role="You are a Knowledge Management System Chat Assistant answering questions only based on NCTC Risk Revenue Reports on Revenue Risks.", instructions=[ "Your goal is to provide a professional and detailed answer to the given query based on NCTC risk revenue reports mentioned in the context. If no reports is given in CONTEXT, mention no risk reports found and dont try to give answer on ur own", "The report should be in proper markdown and nicely formatted with headings and subheadings.", "If more than one report matches the queries, discuss the relevance of each report one by one.", "If there are no reports given in the context of the prompt, mention there is no Risk report found related to this query from repository ", "Cite the report numbers with the date of issue if risk reports are found .report number are against report_no of format (12/2021-22) ", "CTI/CTH refers to WCO HSN codes of 8 digits which follow a system of reference. e.g., 84012000 - first 2 digits indicate Chapter 84, 4 digits - 8401 indicate Heading, 6 digits - 840120 indicate subheading, 8 digits fully indicate an item." ], model=Groq(id="deepseek-r1-distill-llama-70b"), stream=True, markdown=True, debug_mode=True, show_tool_calls=True, monitoring=True ) # Main query processing pipeline def process_query(query, model): gr.Info('Fetching relevant Risk reports from repository...') weights_response = weight_assigner.run(query).content weights_response.normalize_weights() weights = {field: getattr(weights_response, field) for field in weights_response.__fields__} df = fetch_data_from_supabase() metadata_cols = [col for col in df.columns if col != 'embedding'] query_embedding = model.encode(query.lower().strip()) gr.Info('Searching the knowledge base ...') scores = [] for i in range(len(df)): embeddings_dict = df['embedding'][i] final_score = 0 for col, weight in weights.items(): if col not in embeddings_dict: continue col_embedding = embeddings_dict[col] if np.all(col_embedding == 0): continue similarity = cosine_similarity([query_embedding], [col_embedding])[0][0] final_score += weight * similarity scores.append(final_score) df['relevance_score'] = scores df = df.sort_values(by='relevance_score', ascending=False) top_n = 15 prompt_df = df[[col for col in metadata_cols if col != 'id']].copy() top_n_rows = prompt_df.head(top_n) markdown_prompt = f"Query: {query}\n\nFind Top Relevant Reports from below reports to answer above query and return list of their report_no:\n\n" for row in top_n_rows.to_dict(orient="records"): markdown_prompt += f"**Report**:\n" for key, value in row.items(): markdown_prompt += f"- {key}: {value}\n" markdown_prompt += "\n" agent_response = relevant_report_selector.run(markdown_prompt) relevant_report_numbers = agent_response.content.report_numbers gr.Info('Agent analysing Relevancy of fetched reports..') selected_rows = prompt_df[prompt_df['report_no'].isin(relevant_report_numbers)] top_n_data = selected_rows.to_dict(orient="records") markdown_prompt = f"Query: {query}\n\nUse the below context to answer the question ,\nCONTEXT : \n\n" for row in top_n_data: markdown_prompt += f"**Report**:\n" for key, value in row.items(): markdown_prompt += f"- {key}: {value}\n" markdown_prompt += "\n" gr.Info('Almost over..Chatbot Agent pitches in...') chatbot_response = chat_agent.run(markdown_prompt) return chatbot_response.content, relevant_report_numbers # Google Drive PDF download def download_pdf_from_drive(file_id): if file_id == "N/A": return None try: request = drive_service.files().get_media(fileId=file_id) file_content = BytesIO() downloader = MediaIoBaseDownload(file_content, request) done = False while not done: status, done = downloader.next_chunk() temp_dir = tempfile.mkdtemp() temp_file_path = os.path.join(temp_dir, f"{file_id}.pdf") with open(temp_file_path, "wb") as temp_file: temp_file.write(file_content.getvalue()) return temp_file_path except Exception as e: print(f"Error downloading file: {e}") return None # Filtered DataFrame for report details def filtered_df(report_no): filtered_data = df_filtered[df_filtered['report_no'] == report_no] if filtered_data.empty: return ("
{value}
Please log in to continue.