chat-analyzer / app.py
Rohitface's picture
Upload app.py
39c88cb verified
# -*- coding: utf-8 -*-
"""app.py
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1fuzwSkyjYRLfvGxQGclxwX0Y9OD2IK6s
"""
# app.py
# Step 1: Install necessary libraries
# This is handled by requirements.txt in Hugging Face Spaces,
# but you would run this line in a fresh environment:
# !pip install -q gradio transformers torch sentencepiece
# Step 2: Import libraries
import gradio as gr
import re
from transformers import pipeline
# --- Backend Logic ---
# Step 3: Load the Hugging Face Model
print("Loading Hugging Face model (google/flan-t5-small)... This may take a moment.")
text_generator = pipeline(
"text2text-generation",
model="google/flan-t5-small"
)
print("Model loaded successfully!")
def parse_chat_file(file_content):
"""
A robust parser for both WhatsApp and Telegram text exports.
"""
lines = file_content.split('\n')
chat_data = []
pattern = re.compile(
r'^(?:\u200e)?\[?(\d{1,2}[/.]\d{1,2}[/.]\d{2,4}),?\s+(\d{1,2}:\d{2}(?::\d{2})?(?:\s*[AP]M)?)\]?\s*-\s*([^:]+):\s*(.*)',
re.IGNORECASE
)
for line in lines:
match = pattern.match(line)
if match:
sender, message = match.group(3), match.group(4)
if "created this group" not in message and "added" not in message and "changed the subject" not in message:
chat_data.append(f"{sender}: {message}")
elif chat_data and line.strip():
chat_data[-1] += "\n" + line
return "\n".join(chat_data) if chat_data else "Could not parse chat file."
def process_chat_request(user_question, chat_history, state_data):
"""
The main function that handles the chat logic using the local Hugging Face model.
"""
context_size = state_data.get("context_size")
chat_content = state_data.get("chat_content")
temperature = state_data.get("temperature")
if not all([context_size, chat_content, temperature is not None]):
raise gr.Error("Chat content or configuration is missing. Please restart by uploading a file.")
if not user_question:
raise gr.Error("Please enter a question.")
context_to_use = chat_content[-int(context_size):]
prompt = f"""
Based on the following chat history, provide a detailed answer to the user's question.
CONTEXT:
---
{context_to_use}
---
QUESTION: {user_question}
ANSWER:
"""
try:
result = text_generator(
prompt,
max_length=300,
num_beams=3,
temperature=temperature
)
bot_response = result[0]['generated_text']
except Exception as e:
raise gr.Error(f"An error occurred with the model: {e}")
chat_history.append((user_question, bot_response))
return "", chat_history
# --- Gradio UI Definition ---
with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange", secondary_hue="orange"), title="Local Chat Analyzer") as demo:
app_state = gr.State({})
with gr.Column(visible=True) as welcome_page:
gr.Markdown(
"""
<div style='text-align: center; font-family: "Garamond", serif; padding-top: 30px;'>
<h1 style='font-size: 3.5em;'>Local Chat Analyzer</h1>
<p style='font-size: 1.5em; color: #555;'>Powered by a Hugging Face Model. No API key needed!</p>
</div>
"""
)
gr.HTML(
"""
<div style='text-align: center; padding: 20px;'>
<img src='https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExd2Vjb3M2eGZzN2FkNWZpZzZ0bWl0c2JqZzZlMHVwZ2l4b2t0eXFpcyZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/YWjDA4k2n6d5Ew42zC/giphy.gif'
style='max-width: 350px; margin: auto; border-radius: 20px; box-shadow: 0 8px 16px rgba(0,0,0,0.1);' />
</div>
"""
)
with gr.Row():
with gr.Column():
with gr.Accordion("How do I get my chat file?", open=False):
gr.Markdown("""
### Exporting your WhatsApp Chat
1. **On your phone**, open the WhatsApp chat you want to analyze.
2. Tap the **three dots** (â‹®) in the top-right corner.
3. Select **More** > **Export chat**.
4. Choose **Without media**. This will create a smaller `.txt` file.
5. Save the file to your phone or email it to yourself to access it on your computer.
6. For more details, visit the [official WhatsApp Help Center](https://faq.whatsapp.com/1180414079177245/).
""")
gr.Markdown("### 1. Upload Your Chat File")
chat_file_upload = gr.File(label="Upload WhatsApp/Telegram .txt Export")
with gr.Column():
gr.Markdown("### 2. Customize Parameters")
context_slider = gr.Slider(500, 20000, value=5000, step=500, label="Context Window Size (Characters)")
temp_slider = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature (Creativity)")
lets_chat_button = gr.Button("💬 Start Chatting 💬", variant="primary")
with gr.Column(visible=False) as chat_page:
gr.Markdown("<h1 style='text-align: center;'>Chat Analyzer</h1>")
chatbot_ui = gr.Chatbot(height=600, bubble_full_width=False)
with gr.Row():
user_input_box = gr.Textbox(placeholder="Ask a question about your chat...", scale=5)
submit_button = gr.Button("Send", variant="primary", scale=1)
def go_to_chat(current_state, chat_file, context_size, temperature):
if chat_file is None:
raise gr.Error("A chat file must be uploaded.")
with open(chat_file.name, 'r', encoding='utf-8') as f:
content = f.read()
parsed_content = parse_chat_file(content)
if "Could not parse" in parsed_content:
raise gr.Error("Failed to parse the chat file. Please check the format.")
new_state = {
"chat_content": parsed_content,
"context_size": context_size,
"temperature": temperature,
}
return (
new_state,
gr.Column(visible=False),
gr.Column(visible=True)
)
lets_chat_button.click(
fn=go_to_chat,
inputs=[app_state, chat_file_upload, context_slider, temp_slider],
outputs=[app_state, welcome_page, chat_page]
)
submit_button.click(
fn=process_chat_request,
inputs=[user_input_box, chatbot_ui, app_state],
outputs=[user_input_box, chatbot_ui]
)
user_input_box.submit(
fn=process_chat_request,
inputs=[user_input_box, chatbot_ui, app_state],
outputs=[user_input_box, chatbot_ui]
)
if __name__ == "__main__":
demo.launch()