import os import re import random import pandas as pd import gradio as gr from gradio_pdf import PDF from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload from google.oauth2 import service_account from io import BytesIO import tempfile import markdown import numpy as np from sklearn.metrics.pairwise import cosine_similarity from sentence_transformers import SentenceTransformer from supabase import create_client from pydantic import BaseModel, Field from phi.agent import Agent from phi.model.groq import Groq import gradio as gr import json # Environment variables for sensitive data SUPABASE_URL = os.getenv("SUPABASE_URL", ) SUPABASE_KEY = os.getenv("SUPABASE_KEY", ) os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY") VALID_USERNAME=os.getenv("VALID_USERNAME") VALID_PASSWORD=os.getenv("VALID_PASSWORD") # Google Drive API credentials SCOPES = ["https://www.googleapis.com/auth/drive"] SERVICE_ACCOUNT_FILE = os.getenv("SERVICE_ACCOUNT_FILE") # Store in HF Space storage if not SERVICE_ACCOUNT_FILE: raise ValueError("GOOGLE_CREDENTIALS environment variable not set") # Parse the JSON string into a Python dictionary credentials_info = json.loads(SERVICE_ACCOUNT_FILE) # Create credentials from the JSON content creds = service_account.Credentials.from_service_account_info( credentials_info, scopes=SCOPES ) #creds = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES) drive_service = build("drive", "v3", credentials=creds) # Pydantic model for column weights class ColumnWeights(BaseModel): report_no: float = Field(0.0, description="Weight for report number") date_of_issue: float = Field(0.0, description="Weight for issue date") subject: float = Field(0.0, description="Weight for subject") short_summary: float = Field(0.0, description="Weight for short summary") report_category: float = Field(0.0, description="Weight for report category") subject_classification: float = Field(0.0, description="Weight for subject classification") cth_cti: float = Field(0.0, description="Weight for CTH/CTI") notification_number: float = Field(0.0, description="Weight for notification number") notification_type: float = Field(0.0, description="Weight for notification type") issue_year: float = Field(0.0, description="Weight for issue year") def normalize_weights(self): total_weight = sum([getattr(self, field) for field in self.__fields__]) if total_weight == 0: raise ValueError("Total weight cannot be zero.") factor = 10 / total_weight for field in self.__fields__: setattr(self, field, round(getattr(self, field) * factor, 2)) # Weight Assigner Agent weight_assigner = Agent( name="Weight Assigner", role="Assigns dynamic weights to columns based on the query.", instructions=[ "Analyze the query and assign weights to the relevant columns.", "Focus on the keywords in the query to determine which columns are most relevant.", "For queries related to Chapter, CTH, CTI, tariff heading give more weightage to cth_cti column", "For queries related to product name, item description give more weightage to short_summary", "Report category tells the report is related to export or import only", "For any queries related to risk types such as overvaluation, misclassification, BCD, IGST, CVD duty evasion etc., give additional weightage to subject_classification", "Ensure the total weight sums to approximately 10 (decimal values allowed). Minimum weight value for a column should be 0.5 and round to single decimal place" ], model=Groq(id="deepseek-r1-distill-llama-70b"), response_model=ColumnWeights, markdown=True, debug_mode=True, show_tool_calls=True, monitoring=True ) # Supabase data fetch def fetch_data_from_supabase(): supabase1 = create_client(SUPABASE_URL, SUPABASE_KEY) response = supabase1.table("risk_report_embeddings").select("*").execute() data = response.data if not data: raise ValueError("No data found in the Supabase table.") return pd.DataFrame(data) # Relevant Reports Selector class RelevantReports(BaseModel): report_numbers: list[str] = Field(..., description="List of report numbers extracted (report_no)") relevant_report_selector = Agent( name="Relevant Report Selector", role="Selects the reports after dropping not related reports based on the query and provided data.", instructions=[ "Analyze the query and given reports and give more importance to products and CTH/CTI.", "Drop the reports that are not fairly related to the query.", "Extract Report numbers, generally shown against the field 'report_no' in the given data, not against 'id'" ], model=Groq(id="deepseek-r1-distill-llama-70b"), response_model=RelevantReports, markdown=True, debug_mode=True, show_tool_calls=True, monitoring=True ) # Chatbot Agent chat_agent = Agent( name="NCTCGpt", role="You are a Knowledge Management System Chat Assistant answering questions only based on NCTC Risk Revenue Reports on Revenue Risks.", instructions=[ "Your goal is to provide a professional and detailed answer to the given query based on NCTC risk revenue reports mentioned in the context. If no reports is given in CONTEXT, mention no risk reports found and dont try to give answer on ur own", "The report should be in proper markdown and nicely formatted with headings and subheadings.", "If more than one report matches the queries, discuss the relevance of each report one by one.", "If there are no reports given in the context of the prompt, mention there is no Risk report found related to this query from repository ", "Cite the report numbers with the date of issue if risk reports are found .report number are against report_no of format (12/2021-22) ", "CTI/CTH refers to WCO HSN codes of 8 digits which follow a system of reference. e.g., 84012000 - first 2 digits indicate Chapter 84, 4 digits - 8401 indicate Heading, 6 digits - 840120 indicate subheading, 8 digits fully indicate an item." ], model=Groq(id="deepseek-r1-distill-llama-70b"), stream=True, markdown=True, debug_mode=True, show_tool_calls=True, monitoring=True ) # Main query processing pipeline def process_query(query, model): gr.Info('Fetching relevant Risk reports from repository...') weights_response = weight_assigner.run(query).content weights_response.normalize_weights() weights = {field: getattr(weights_response, field) for field in weights_response.__fields__} df = fetch_data_from_supabase() metadata_cols = [col for col in df.columns if col != 'embedding'] query_embedding = model.encode(query.lower().strip()) gr.Info('Searching the knowledge base ...') scores = [] for i in range(len(df)): embeddings_dict = df['embedding'][i] final_score = 0 for col, weight in weights.items(): if col not in embeddings_dict: continue col_embedding = embeddings_dict[col] if np.all(col_embedding == 0): continue similarity = cosine_similarity([query_embedding], [col_embedding])[0][0] final_score += weight * similarity scores.append(final_score) df['relevance_score'] = scores df = df.sort_values(by='relevance_score', ascending=False) top_n = 15 prompt_df = df[[col for col in metadata_cols if col != 'id']].copy() top_n_rows = prompt_df.head(top_n) markdown_prompt = f"Query: {query}\n\nFind Top Relevant Reports from below reports to answer above query and return list of their report_no:\n\n" for row in top_n_rows.to_dict(orient="records"): markdown_prompt += f"**Report**:\n" for key, value in row.items(): markdown_prompt += f"- {key}: {value}\n" markdown_prompt += "\n" agent_response = relevant_report_selector.run(markdown_prompt) relevant_report_numbers = agent_response.content.report_numbers gr.Info('Agent analysing Relevancy of fetched reports..') selected_rows = prompt_df[prompt_df['report_no'].isin(relevant_report_numbers)] top_n_data = selected_rows.to_dict(orient="records") markdown_prompt = f"Query: {query}\n\nUse the below context to answer the question ,\nCONTEXT : \n\n" for row in top_n_data: markdown_prompt += f"**Report**:\n" for key, value in row.items(): markdown_prompt += f"- {key}: {value}\n" markdown_prompt += "\n" gr.Info('Almost over..Chatbot Agent pitches in...') chatbot_response = chat_agent.run(markdown_prompt) return chatbot_response.content, relevant_report_numbers # Google Drive PDF download def download_pdf_from_drive(file_id): if file_id == "N/A": return None try: request = drive_service.files().get_media(fileId=file_id) file_content = BytesIO() downloader = MediaIoBaseDownload(file_content, request) done = False while not done: status, done = downloader.next_chunk() temp_dir = tempfile.mkdtemp() temp_file_path = os.path.join(temp_dir, f"{file_id}.pdf") with open(temp_file_path, "wb") as temp_file: temp_file.write(file_content.getvalue()) return temp_file_path except Exception as e: print(f"Error downloading file: {e}") return None # Filtered DataFrame for report details def filtered_df(report_no): filtered_data = df_filtered[df_filtered['report_no'] == report_no] if filtered_data.empty: return ("
No data found for the selected Report No.
", "N/A") row = filtered_data.iloc[0] html_content = """

Report Details

""" fields_to_display = { "Report Number": "report_no", "Report Issue Date": "date_of_issue", "Subject": "subject", "Short Summary": "short_summary", "Total Assessible Value": "total_assessible_value", "Total Differential Duty Involved": "total_revenue_difference", "Total Transactions (Approx)": "total_transactions", "Total Entities Involved": "total_importers", "Report Category": "report_category", "Report Classification": "subject_classification", "CTH/CTI Involved": "cth_cti", "Notifications Involved": "notification_number", "Notification Type": "notification_type", "Notification Date": "notification_date", } for field_name, column in fields_to_display.items(): value = row[column] if column in row.index and pd.notna(row[column]) else "N/A" html_content += f"""

{field_name}

{value}

""" html_content += "
" file_id = row.get("file_id", "N/A") return html_content, file_id def update_report_and_pdf(report_no): html_content, file_id = filtered_df(report_no) google_drive_link = f"[Open in Google Drive](https://drive.google.com/file/d/{file_id}/view)" if file_id != "N/A" else "File ID not available" pdf_path = download_pdf_from_drive(file_id) return html_content, google_drive_link, pdf_path # Login authentication def authenticate_user(username, password): return username == VALID_USERNAME and password == VALID_PASSWORD def login_action(username, password): if authenticate_user(username, password): return gr.update(visible=False), gr.update(visible=True), gr.update(value="", visible=False) return gr.update(visible=True), gr.update(visible=False), gr.update(value="❌ Invalid credentials. Try again.", visible=True) # Load data and model df = fetch_data_from_supabase() model = SentenceTransformer('nomic-ai/nomic-embed-text-v1', trust_remote_code=True) df_filtered = df.drop(columns=['full ninety'], errors='ignore') df_filtered['total_assessible_value'] = pd.to_numeric(df_filtered['total_assessible_value'], errors='coerce') df_filtered['total_revenue_difference'] = pd.to_numeric(df_filtered['total_revenue_difference'], errors='coerce') df_filtered['issue_year'] = df_filtered['issue_year'].astype(str).str.split('.').str[0] df_filtered = df_filtered[df_filtered["issue_year"] != '0'] df_filtered = df_filtered[df_filtered["subject_classification"] != "0"] df_filtered = df_filtered[df_filtered["report_category"] != "0"] # Gradio interface with gr.Blocks(css=".gr-button {background-color: #1E3A8A; color: white;}") as demo: with gr.Column(visible=True) as login_section: gr.HTML("""

NCTC AI Assisted Knowledge Management System

National Customs Targeting Center – Mumbai

Please log in to continue.

""") username_input = gr.Textbox(label="Username", placeholder="Enter username") password_input = gr.Textbox(label="Password", type="password", placeholder="Enter password") login_button = gr.Button("🔐 Login") login_error = gr.Textbox(visible=False, interactive=False) with gr.Column(visible=False) as main_app_section: title = gr.Markdown("

NCTC Risk Report Analytics

") subtitle = gr.Markdown("

Powered by AI Agents

") description = gr.Markdown("

This application provides interactive visualizations and detailed insights into NCTC risk reports. Users can explore yearly trends, subject classifications, and revenue impacts through dynamic charts and a chatbot to interact with the knowledge repository.

") with gr.TabItem("Risk Report Visualizations"): with gr.Row(): plt1 = gr.BarPlot( value=df_filtered.groupby("issue_year")["report_no"].count().reset_index(), x="issue_year", y="report_no", title="Number of Revenue Risk Reports by Issue Year", color="issue_year" ) plt2 = gr.BarPlot( value=df_filtered.groupby("issue_year")["total_revenue_difference"].sum().reset_index(), x="issue_year", y="total_revenue_difference", title="Year-wise Total Differential Revenue Involved" ) with gr.Row(): plt3 = gr.BarPlot( value=df_filtered.groupby("subject_classification")["report_no"].count().reset_index(), x="subject_classification", y="report_no", title="Risk Type Classification", x_label_angle=60, x_axis_labels_visible=False ) plt4 = gr.BarPlot( value=df_filtered.groupby("report_category")["report_no"].count().reset_index(), x="report_category", y="report_no", title="Report Category", color="report_category" ) with gr.TabItem("View Report"): report_dropdown = gr.Dropdown(choices=df_filtered['report_no'].unique().tolist(), label="Select Report No") with gr.Row(): with gr.Column(scale=1): report_output = gr.HTML() file_id_link = gr.Markdown() with gr.Column(scale=1): pdf_viewer = PDF(label="PDF Viewer") report_dropdown.change( fn=update_report_and_pdf, inputs=[report_dropdown], outputs=[report_output, file_id_link, pdf_viewer] ) with gr.Tab("AI ChatBot on RRR"): with gr.Row(): query_input = gr.Textbox(label="Enter your query here", placeholder="e.g., What revenue reports deal with mechanical parts?") submit_button = gr.Button("Submit") image_urls = [ "1.gif", # Replace with actual URLs or store GIFs in HF Space "2.gif", "3.gif", "4.gif" ] gif_image = gr.Image(width=100, height=100, value=random.choice(image_urls), visible=False, label="Loading...") with gr.Row(): with gr.Column(scale=1): chatbot_output = gr.HTML(label="ChatBot Response", show_label=True, container=True) with gr.Column(scale=1): pdf_viewer_chatbot = PDF(label="PDF Viewer") report_links = gr.Radio(label="Relevant Reports", choices=[], interactive=True) with gr.Row(): think_tokens_output = gr.HTML(label="Agent Reasoning", show_label=True, container=True) def handle_submit(query): gif_update = gr.update(visible=True) chatbot_response, relevant_report_numbers = process_query(query, model) think_pattern = r"(.*?)" think_tokens_match = re.search(think_pattern, chatbot_response, re.DOTALL) think_tokens = think_tokens_match.group(1).strip() if think_tokens_match else "No intermediate reasoning provided." final_answer = re.sub(think_pattern, "", chatbot_response, flags=re.DOTALL).strip() think_tokens_html = markdown.markdown(think_tokens) final_answer_html = markdown.markdown(final_answer) gif_update = gr.update(visible=False) choices = relevant_report_numbers if relevant_report_numbers else [] value = relevant_report_numbers[0] if relevant_report_numbers else None interactive = bool(relevant_report_numbers) if not relevant_report_numbers: gr.Info("No relevant reports found.") return ( final_answer_html, gr.Radio(choices=choices, value=value, interactive=interactive), think_tokens_html ) def handle_report_selection(report_no): if not report_no: gr.Info("No report selected.") return None filtered_data = df_filtered[df_filtered['report_no'] == report_no] if filtered_data.empty: return None file_id = filtered_data.iloc[0]['file_id'] pdf_path = download_pdf_from_drive(file_id) if pdf_path: return pdf_path gr.Info(f"PDF for report {report_no} could not be loaded.") return None def show_gif(): return gr.Image(visible=True) submit_button.click( show_gif, None, gif_image ).then( fn=handle_submit, inputs=[query_input], outputs=[chatbot_output, report_links, think_tokens_output] ) report_links.change( fn=handle_report_selection, inputs=[report_links], outputs=[pdf_viewer_chatbot] ) login_button.click( fn=login_action, inputs=[username_input, password_input], outputs=[login_section, main_app_section, login_error] ) demo.launch(debug=True)