# -*- coding: utf-8 -*- """app.py Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1fuzwSkyjYRLfvGxQGclxwX0Y9OD2IK6s """ # app.py # Step 1: Install necessary libraries # This is handled by requirements.txt in Hugging Face Spaces, # but you would run this line in a fresh environment: # !pip install -q gradio transformers torch sentencepiece # Step 2: Import libraries import gradio as gr import re from transformers import pipeline # --- Backend Logic --- # Step 3: Load the Hugging Face Model print("Loading Hugging Face model (google/flan-t5-small)... This may take a moment.") text_generator = pipeline( "text2text-generation", model="google/flan-t5-small" ) print("Model loaded successfully!") def parse_chat_file(file_content): """ A robust parser for both WhatsApp and Telegram text exports. """ lines = file_content.split('\n') chat_data = [] pattern = re.compile( r'^(?:\u200e)?\[?(\d{1,2}[/.]\d{1,2}[/.]\d{2,4}),?\s+(\d{1,2}:\d{2}(?::\d{2})?(?:\s*[AP]M)?)\]?\s*-\s*([^:]+):\s*(.*)', re.IGNORECASE ) for line in lines: match = pattern.match(line) if match: sender, message = match.group(3), match.group(4) if "created this group" not in message and "added" not in message and "changed the subject" not in message: chat_data.append(f"{sender}: {message}") elif chat_data and line.strip(): chat_data[-1] += "\n" + line return "\n".join(chat_data) if chat_data else "Could not parse chat file." def process_chat_request(user_question, chat_history, state_data): """ The main function that handles the chat logic using the local Hugging Face model. """ context_size = state_data.get("context_size") chat_content = state_data.get("chat_content") temperature = state_data.get("temperature") if not all([context_size, chat_content, temperature is not None]): raise gr.Error("Chat content or configuration is missing. Please restart by uploading a file.") if not user_question: raise gr.Error("Please enter a question.") context_to_use = chat_content[-int(context_size):] prompt = f""" Based on the following chat history, provide a detailed answer to the user's question. CONTEXT: --- {context_to_use} --- QUESTION: {user_question} ANSWER: """ try: result = text_generator( prompt, max_length=300, num_beams=3, temperature=temperature ) bot_response = result[0]['generated_text'] except Exception as e: raise gr.Error(f"An error occurred with the model: {e}") chat_history.append((user_question, bot_response)) return "", chat_history # --- Gradio UI Definition --- with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange", secondary_hue="orange"), title="Local Chat Analyzer") as demo: app_state = gr.State({}) with gr.Column(visible=True) as welcome_page: gr.Markdown( """
Powered by a Hugging Face Model. No API key needed!