import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer import torch import threading import time model_id = "lambdaindie/lambdai" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) css = """ @import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono&display=swap'); * { font-family: 'JetBrains Mono', monospace !important; } body { background-color: #111; color: #e0e0e0; } .markdown-think { background-color: #1e1e1e; border-left: 4px solid #555; padding: 10px; margin-bottom: 8px; font-style: italic; white-space: pre-wrap; animation: pulse 1.5s infinite ease-in-out; } @keyframes pulse { 0% { opacity: 0.6; } 50% { opacity: 1.0; } 100% { opacity: 0.6; } } """ def respond(message, history, system_message, max_tokens, temperature, top_p): messages = [{"role": "system", "content": system_message}] if system_message else [] for user, assistant in history: if user: messages.append({"role": "user", "content": user}) if assistant: messages.append({"role": "assistant", "content": assistant}) thinking_prompt = messages + [{"role": "user", "content": f"{message}\n\nThink step-by-step."}] prompt = tokenizer.apply_chat_template(thinking_prompt, tokenize=False, add_generation_prompt=True) inputs = tokenizer(prompt, return_tensors="pt").to(device) streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) reasoning = "" yield '