Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
| from datasets import load_dataset, Dataset | |
| from huggingface_hub import login | |
| import os | |
| # --- Hugging Face Dataset Setup --- | |
| HF_TOKEN = os.environ.get("dataset_HF_TOKEN") # Secret in your HF Space | |
| login(token=HF_TOKEN) | |
| dataset_name = "YOUR_USERNAME/guardian-ai-qna" # Replace YOUR_USERNAME | |
| try: | |
| dataset = load_dataset(dataset_name) | |
| except: | |
| # If dataset is empty or not yet created, create an empty one | |
| dataset = Dataset.from_dict({"question": [], "answer": []}) | |
| # --- Load model & tokenizer --- | |
| model_id = "google/gemma-2b-it" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForCausalLM.from_pretrained(model_id) | |
| generator = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| device=-1 # CPU, change to 0 if GPU available | |
| ) | |
| # --- System instruction --- | |
| SYSTEM_PROMPT = """You are Guardian AI, a friendly cybersecurity educator. | |
| Your goal is to explain cybersecurity concepts in simple, engaging language with examples. | |
| Always keep answers clear, short, and focused on security awareness. | |
| """ | |
| # --- Save Q&A to dataset --- | |
| def save_qna(question, answer): | |
| global dataset | |
| new_entry = Dataset.from_dict({"question": [question], "answer": [answer]}) | |
| dataset = dataset.concat(new_entry) | |
| dataset.push_to_hub(dataset_name, private=False) # push updates | |
| # --- Chat function --- | |
| def chat(history, user_input): | |
| prompt = SYSTEM_PROMPT + "\nUser: " + user_input + "\nGuardian AI:" | |
| result = generator( | |
| prompt, | |
| max_new_tokens=200, | |
| do_sample=True, | |
| temperature=0.7, | |
| top_p=0.9 | |
| )[0]['generated_text'] | |
| response = result.split("Guardian AI:")[-1].strip() | |
| history.append((user_input, response)) | |
| # Save to dataset | |
| save_qna(user_input, response) | |
| return history, history | |
| # --- Gradio UI --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🛡️ Guardian AI – Cybersecurity Educator") | |
| chatbot = gr.Chatbot() | |
| state = gr.State([]) | |
| with gr.Row(): | |
| with gr.Column(scale=8): | |
| user_input = gr.Textbox(show_label=False, placeholder="Ask me about cybersecurity...") | |
| with gr.Column(scale=2): | |
| send_btn = gr.Button("Send") | |
| send_btn.click(chat, [state, user_input], [chatbot, state]) | |
| user_input.submit(chat, [state, user_input], [chatbot, state]) | |
| demo.launch() | |