Chat_tester / app.py
yarenty's picture
use proper models
eb28058
import os
import time
import gc
import threading
import logging
from itertools import islice
from datetime import datetime
import re # for parsing <think> blocks
import gradio as gr
import torch
from transformers import pipeline, TextIteratorStreamer
from transformers import AutoTokenizer
from ddgs import DDGS
import spaces # Import spaces early to enable ZeroGPU support
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler('app.log')
]
)
logger = logging.getLogger(__name__)
# Get Hugging Face token - works in both local and HF Spaces environments
access_token = os.environ.get('HF_TOKEN') or os.environ.get('HUGGINGFACE_HUB_TOKEN') or None
logger.info(f"πŸ”‘ Hugging Face token status: {'Available' if access_token else 'Not available (using public models only)'}")
# Optional: Disable GPU visibility if you wish to force CPU usage
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
# ------------------------------
# Global Cancellation Event
# ------------------------------
cancel_event = threading.Event()
# ------------------------------
# Torch-Compatible Model Definitions with Adjusted Descriptions
# ------------------------------
MODELS = {
# Accessible models (no gating required)
"Qwen2.5-3B-Instruct": {"repo_id": "Qwen/Qwen2.5-3B-Instruct", "description": "Qwen2.5-3B-Instruct - accessible and reliable"},
"Qwen2.5-7B-Instruct": {"repo_id": "Qwen/Qwen2.5-7B-Instruct", "description": "Qwen2.5-7B-Instruct - accessible and reliable"},
"microsoft-DialoGPT-medium": {"repo_id": "microsoft/DialoGPT-medium", "description": "Microsoft DialoGPT Medium - accessible conversational model"},
"microsoft-DialoGPT-large": {"repo_id": "microsoft/DialoGPT-large", "description": "Microsoft DialoGPT Large - accessible conversational model"},
# … your existing entries …
"gpt-oss-20b": {"repo_id": "openai/gpt-oss-20b", "description": "openai/gpt-oss-20b"},
"Qwen2.5-Taiwan-1.5B-Instruct": {"repo_id": "benchang1110/Qwen2.5-Taiwan-1.5B-Instruct", "description": "Qwen2.5-Taiwan-1.5B-Instruct"},
"parser_model_ner_gemma_v0.1": {
"repo_id": "myfi/parser_model_ner_gemma_v0.1",
"description": "A lightweight named‑entity‑like (NER) parser fine‑tuned from Google’s **Gemma‑3‑270M** model. The base Gemma‑3‑270M is a 270β€―M‑parameter, hyper‑efficient LLM designed for on‑device inference, supporting >140 languages, a 128β€―k‑token context window, and instruction‑following capabilitiesβ€―[2][7]. This variant is further trained on standard NER corpora (e.g., CoNLL‑2003, OntoNotes) to extract PERSON, ORG, LOC, and MISC entities with high precision while keeping the memory footprint low (β‰ˆ240β€―MB VRAM in BF16 quantized form)β€―[1]. It is released under the Apache‑2.0 license and can be used for fast, cost‑effective entity extraction in low‑resource environments."
},
"Gemma-3-Taiwan-270M-it":{
"repo_id":"lianghsun/Gemma-3-Taiwan-270M-it",
"description": "google/gemma-3-270m-it fintuned on Taiwan Chinese dataset"
},
"gemma-2-2b-it":{
"repo_id":"google/gemma-2-2b-it",
"description":"Gemma 2 2B Instruction-Tuned model - accessible alternative to Gemma 3",
},
"SmolLM-135M-Taiwan-Instruct-v1.0": {
"repo_id": "benchang1110/SmolLM-135M-Taiwan-Instruct-v1.0",
"description": "135-million-parameter F32 safetensors instruction-finetuned variant of SmolLM-135M-Taiwan, trained on the 416 k-example ChatTaiwan dataset for Traditional Chinese conversational and instruction-following tasks"
},
"Llama-3.2-Taiwan-1B": {
"repo_id": "lianghsun/Llama-3.2-Taiwan-1B",
"description": "Llama-3.2-Taiwan base model with 1 B parameters"
},
"Qwen2.5-0.5B-Taiwan-Instruct": {
"repo_id": "ShengweiPeng/Qwen2.5-0.5B-Taiwan-Instruct",
"description": "Qwen2.5-Taiwan model with 0.5 B parameters, instruction-tuned"
},
"Qwen3-0.6B-Taiwan": {
"repo_id": "ShengweiPeng/Qwen3-0.6B-Taiwan",
"description": "Qwen3-Taiwan model with 0.6 B parameters"
},
"Qwen2.5-Taiwan-3B-Reason-GRPO": {
"repo_id": "benchang1110/Qwen2.5-Taiwan-3B-Reason-GRPO",
"description":"Qwen2.5-Taiwan model with 3 B parameters, Reason-GRPO fine-tuned"
},
"Llama-3.2-Taiwan-1B": {
"repo_id": "lianghsun/Llama-3.2-Taiwan-1B",
"description":"Llama-3.2-Taiwan base model with 1 B parameters"
},
# Gemma 3n β€œeffective” variants (official Google repos)
"Gemma-3n-E2B": {
"repo_id": "google/gemma-3n-E2B",
"description":"Gemma 3n base model with effective 2β€―B parameters (β‰ˆ2β€―GB VRAM)"
},
"Gemma-3n-E4B": {
"repo_id": "google/gemma-3n-E4B",
"description":"Gemma 3n base model with effective 4β€―B parameters (β‰ˆ3β€―GB VRAM)"
},
# PowerInfer SmallThinker (instruction‑tuned)
"SmallThinker-4BA0.6B-Instruct": {
"repo_id": "PowerInfer/SmallThinker-4BA0.6B-Instruct",
"description":"SmallThinker 4β€―B backbone with 0.6β€―B activated parameters, instruction‑tuned"
},
# TIIUAE Falcon‑H1 (instruction‑tuned)
"Falcon-H1-1.5B-Instruct": {
"repo_id": "tiiuae/Falcon-H1-1.5B-Instruct",
"description":"Falcon‑H1 model with 1.5β€―B parameters, instruction‑tuned"
},
"Qwen/Qwen3-14B-FP8": {"repo_id": "Qwen/Qwen3-14B-FP8", "description": "Qwen/Qwen3-14B-FP8"},
#"Qwen/Qwen3-32B-FP8": {"repo_id": "Qwen/Qwen3-32B-FP8", "description": "Qwen/Qwen3-32B-FP8"},
"DeepSeek-R1-0528-Qwen3-8B": {"repo_id": "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", "description": "DeepSeek-R1-0528-Qwen3-8B"},
"Nemotron-Research-Reasoning-Qwen-1.5B": {"repo_id": "nvidia/Nemotron-Research-Reasoning-Qwen-1.5B", "description": "Nemotron-Research-Reasoning-Qwen-1.5B"},
"Taiwan-ELM-1_1B-Instruct": {"repo_id": "liswei/Taiwan-ELM-1_1B-Instruct", "description": "Taiwan-ELM-1_1B-Instruct"},
"Taiwan-ELM-270M-Instruct": {"repo_id": "liswei/Taiwan-ELM-270M-Instruct", "description": "Taiwan-ELM-270M-Instruct"},
# "Granite-4.0-Tiny-Preview": {"repo_id": "ibm-granite/granite-4.0-tiny-preview", "description": "Granite-4.0-Tiny-Preview"},
"Qwen3-0.6B": {"repo_id":"Qwen/Qwen3-0.6B","description":"Dense causal language model with 0.6 B total parameters (0.44 B non-embedding), 28 transformer layers, 16 query heads & 8 KV heads, native 32 768-token context window, dual-mode generation, full multilingual & agentic capabilities."},
"Qwen3-1.7B": {"repo_id":"Qwen/Qwen3-1.7B","description":"Dense causal language model with 1.7 B total parameters (1.4 B non-embedding), 28 layers, 16 query heads & 8 KV heads, 32 768-token context, stronger reasoning vs. 0.6 B variant, dual-mode inference, instruction following across 100+ languages."},
"Qwen3-4B": {"repo_id":"Qwen/Qwen3-4B","description":"Dense causal language model with 4.0 B total parameters (3.6 B non-embedding), 36 layers, 32 query heads & 8 KV heads, native 32 768-token context (extendable to 131 072 via YaRN), balanced mid-range capacity & long-context reasoning."},
"Qwen3-8B": {"repo_id":"Qwen/Qwen3-8B","description":"Dense causal language model with 8.2 B total parameters (6.95 B non-embedding), 36 layers, 32 query heads & 8 KV heads, 32 768-token context (131 072 via YaRN), excels at multilingual instruction following & zero-shot tasks."},
"Qwen3-14B": {"repo_id":"Qwen/Qwen3-14B","description":"Dense causal language model with 14.8 B total parameters (13.2 B non-embedding), 40 layers, 40 query heads & 8 KV heads, 32 768-token context (131 072 via YaRN), enhanced human preference alignment & advanced agent integration."},
# "Qwen3-32B": {"repo_id":"Qwen/Qwen3-32B","description":"Dense causal language model with 32.8 B total parameters (31.2 B non-embedding), 64 layers, 64 query heads & 8 KV heads, 32 768-token context (131 072 via YaRN), flagship variant delivering state-of-the-art reasoning & instruction following."},
# "Qwen3-30B-A3B": {"repo_id":"Qwen/Qwen3-30B-A3B","description":"Mixture-of-Experts model with 30.5 B total parameters (29.9 B non-embedding, 3.3 B activated per token), 48 layers, 128 experts (8 activated per token), 32 query heads & 4 KV heads, 32 768-token context (131 072 via YaRN), MoE routing for scalable specialized reasoning."},
# "Qwen3-235B-A22B":{"repo_id":"Qwen/Qwen3-235B-A22B","description":"Mixture-of-Experts model with 235 B total parameters (234 B non-embedding, 22 B activated per token), 94 layers, 128 experts (8 activated per token), 64 query heads & 4 KV heads, 32 768-token context (131 072 via YaRN), ultra-scale reasoning & agentic workflows."},
"Gemma-3-4B-IT": {"repo_id": "unsloth/gemma-3-4b-it", "description": "Gemma-3-4B-IT"},
"SmolLM2_135M_Grpo_Gsm8k":{"repo_id":"prithivMLmods/SmolLM2_135M_Grpo_Gsm8k", "desscription":"SmolLM2_135M_Grpo_Gsm8k"},
"SmolLM2-135M-Instruct-TaiwanChat": {"repo_id": "Luigi/SmolLM2-135M-Instruct-TaiwanChat", "description": "SmolLM2‑135M Instruct fine-tuned on TaiwanChat"},
"SmolLM2-135M-Instruct": {"repo_id": "HuggingFaceTB/SmolLM2-135M-Instruct", "description": "Original SmolLM2‑135M Instruct"},
"SmolLM2-360M-Instruct-TaiwanChat": {"repo_id": "Luigi/SmolLM2-360M-Instruct-TaiwanChat", "description": "SmolLM2‑360M Instruct fine-tuned on TaiwanChat"},
"SmolLM2-360M-Instruct": {"repo_id": "HuggingFaceTB/SmolLM2-360M-Instruct", "description": "Original SmolLM2‑360M Instruct"},
"Llama-3.2-Taiwan-3B-Instruct": {"repo_id": "lianghsun/Llama-3.2-Taiwan-3B-Instruct", "description": "Llama-3.2-Taiwan-3B-Instruct"},
"MiniCPM3-4B": {"repo_id": "openbmb/MiniCPM3-4B", "description": "MiniCPM3-4B"},
"Qwen2.5-3B-Instruct": {"repo_id": "Qwen/Qwen2.5-3B-Instruct", "description": "Qwen2.5-3B-Instruct"},
"Qwen2.5-7B-Instruct": {"repo_id": "Qwen/Qwen2.5-7B-Instruct", "description": "Qwen2.5-7B-Instruct"},
"Phi-4-mini-Reasoning": {"repo_id": "microsoft/Phi-4-mini-reasoning", "description": "Phi-4-mini-Reasoning"},
# "Phi-4-Reasoning": {"repo_id": "microsoft/Phi-4-reasoning", "description": "Phi-4-Reasoning"},
"Phi-4-mini-Instruct": {"repo_id": "microsoft/Phi-4-mini-instruct", "description": "Phi-4-mini-Instruct"},
"Meta-Llama-3.1-8B-Instruct": {"repo_id": "MaziyarPanahi/Meta-Llama-3.1-8B-Instruct", "description": "Meta-Llama-3.1-8B-Instruct"},
"DeepSeek-R1-Distill-Llama-8B": {"repo_id": "unsloth/DeepSeek-R1-Distill-Llama-8B", "description": "DeepSeek-R1-Distill-Llama-8B"},
"Mistral-7B-Instruct-v0.3": {"repo_id": "MaziyarPanahi/Mistral-7B-Instruct-v0.3", "description": "Mistral-7B-Instruct-v0.3"},
"Qwen2.5-Coder-7B-Instruct": {"repo_id": "Qwen/Qwen2.5-Coder-7B-Instruct", "description": "Qwen2.5-Coder-7B-Instruct"},
"Qwen2.5-Omni-3B": {"repo_id": "Qwen/Qwen2.5-Omni-3B", "description": "Qwen2.5-Omni-3B"},
"MiMo-7B-RL": {"repo_id": "XiaomiMiMo/MiMo-7B-RL", "description": "MiMo-7B-RL"},
}
# Global cache for pipelines to avoid re-loading.
PIPELINES = {}
def check_model_accessibility(repo_id, token=None):
"""
Check if a model is accessible without actually loading it.
Returns True if accessible, False if gated, raises exception for other errors.
"""
try:
from huggingface_hub import HfApi
api = HfApi(token=token)
model_info = api.model_info(repo_id)
# Check if model is gated
if hasattr(model_info, 'gated') and model_info.gated:
logger.warning(f"⚠️ Model {repo_id} is gated and requires special access")
return False
logger.info(f"βœ… Model {repo_id} is accessible")
return True
except Exception as e:
error_msg = str(e)
if "gated" in error_msg.lower() or "401" in error_msg or "access" in error_msg.lower():
logger.warning(f"⚠️ Model {repo_id} appears to be gated or requires access")
return False
else:
logger.error(f"❌ Error checking model accessibility: {e}")
raise
def load_pipeline(model_name):
"""
Load and cache a transformers pipeline for text generation.
Tries bfloat16, falls back to float16 or float32 if unsupported.
"""
global PIPELINES
logger.info(f"πŸ€– Loading model: {model_name}")
if model_name in PIPELINES:
logger.info(f"βœ… Model {model_name} already cached, using existing pipeline")
return PIPELINES[model_name]
repo = MODELS[model_name]["repo_id"]
logger.info(f"πŸ“¦ Repository: {repo}")
# Check model accessibility first
try:
if not check_model_accessibility(repo, access_token):
raise Exception(f"Model {repo} is gated and requires special access. Please request access at https://huggingface.co/{repo} or choose a different model.")
except Exception as e:
if "gated" in str(e).lower() or "access" in str(e).lower():
raise
else:
logger.warning(f"⚠️ Could not check model accessibility, proceeding with load attempt: {e}")
# Load tokenizer
logger.info(f"πŸ”€ Loading tokenizer for {repo}...")
try:
tokenizer = AutoTokenizer.from_pretrained(repo,
token=access_token if access_token else None)
logger.info(f"βœ… Tokenizer loaded successfully")
except Exception as e:
error_msg = str(e)
if "gated repo" in error_msg or "401" in error_msg or "Access to model" in error_msg:
logger.error(f"❌ Model {repo} is gated and requires special access permissions")
logger.error(f"πŸ’‘ Please visit https://huggingface.co/{repo} to request access")
logger.error(f"πŸ’‘ Or try a different model from the list")
raise Exception(f"Model {repo} requires special access. Please request access at https://huggingface.co/{repo} or choose a different model.")
else:
logger.error(f"❌ Failed to load tokenizer: {e}")
raise
# Try different data types for optimal performance
dtypes_to_try = [
(torch.bfloat16, "bfloat16 (recommended)"),
(torch.float16, "float16 (good performance)"),
(torch.float32, "float32 (fallback)")
]
for dtype, dtype_desc in dtypes_to_try:
logger.info(f"πŸ”„ Attempting to load model with {dtype_desc}...")
try:
pipe = pipeline(
task="text-generation",
model=repo,
tokenizer=tokenizer,
trust_remote_code=True,
torch_dtype=dtype,
device_map="auto",
use_cache=False, # ← disable past-key-value caching
token=access_token if access_token else None)
PIPELINES[model_name] = pipe
logger.info(f"βœ… Model {model_name} loaded successfully with {dtype_desc}")
logger.info(f"πŸ’Ύ Model cached for future use")
return pipe
except Exception as e:
error_msg = str(e)
if "gated repo" in error_msg or "401" in error_msg or "Access to model" in error_msg:
logger.error(f"❌ Model {repo} is gated and requires special access permissions")
logger.error(f"πŸ’‘ Please visit https://huggingface.co/{repo} to request access")
raise Exception(f"Model {repo} requires special access. Please request access at https://huggingface.co/{repo} or choose a different model.")
else:
logger.warning(f"⚠️ Failed to load with {dtype_desc}: {e}")
continue
# Final fallback without specific dtype
logger.warning(f"πŸ”„ Attempting final fallback load without specific dtype...")
try:
pipe = pipeline(
task="text-generation",
model=repo,
tokenizer=tokenizer,
trust_remote_code=True,
device_map="auto"
)
PIPELINES[model_name] = pipe
logger.info(f"βœ… Model {model_name} loaded with fallback configuration")
return pipe
except Exception as e:
logger.error(f"❌ Failed to load model {model_name}: {e}")
raise
def retrieve_context(query, max_results=6, max_chars=600):
"""
Retrieve search snippets from DuckDuckGo (runs in background).
Returns a list of result strings.
"""
logger.info(f"πŸ” Starting web search for query: '{query[:100]}{'...' if len(query) > 100 else ''}'")
logger.info(f"πŸ“Š Search parameters: max_results={max_results}, max_chars={max_chars}")
try:
with DDGS() as ddgs:
logger.info("🌐 Connected to DuckDuckGo search API")
results = []
for i, r in enumerate(islice(ddgs.text(query, region="wt-wt", safesearch="off", timelimit="y"), max_results)):
title = r.get('title', 'No Title')
body = r.get('body', '')[:max_chars]
result = f"{i+1}. {title} - {body}"
results.append(result)
logger.info(f"πŸ“„ Found result {i+1}: {title[:50]}{'...' if len(title) > 50 else ''}")
logger.info(f"βœ… Web search completed: {len(results)} results found")
return results
except Exception as e:
logger.error(f"❌ Web search failed: {e}")
return []
def format_conversation(history, system_prompt, tokenizer):
# Convert Gradio tuple format to message format for tokenizer
messages = [{"role": "system", "content": system_prompt.strip()}]
for user_msg, bot_msg in history:
if user_msg: # Add user message
messages.append({"role": "user", "content": user_msg})
if bot_msg: # Add bot message
messages.append({"role": "assistant", "content": bot_msg})
if hasattr(tokenizer, "chat_template") and tokenizer.chat_template:
return tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True, enable_thinking=True)
else:
# Fallback for base LMs without chat template
prompt = system_prompt.strip() + "\n"
for msg in messages[1:]: # Skip system message
if msg['role'] == 'user':
prompt += "User: " + msg['content'].strip() + "\n"
elif msg['role'] == 'assistant':
prompt += "Assistant: " + msg['content'].strip() + "\n"
if not prompt.strip().endswith("Assistant:"):
prompt += "Assistant: "
return prompt
@spaces.GPU(duration=60)
def chat_response(user_msg, chat_history, system_prompt,
enable_search, max_results, max_chars,
model_name, max_tokens, temperature,
top_k, top_p, repeat_penalty, search_timeout):
"""
Generates streaming chat responses, optionally with background web search.
"""
logger.info("=" * 60)
logger.info("πŸš€ Starting new chat response generation")
logger.info(f"πŸ‘€ User message: '{user_msg[:100]}{'...' if len(user_msg) > 100 else ''}'")
logger.info(f"πŸ€– Selected model: {model_name}")
logger.info(f"πŸ” Web search enabled: {enable_search}")
logger.info(f"βš™οΈ Generation params: max_tokens={max_tokens}, temp={temperature}, top_k={top_k}, top_p={top_p}")
# Validate inputs
if not user_msg or not user_msg.strip():
logger.error("❌ Empty user message received")
return [], "Error: Empty message received"
if model_name not in MODELS:
logger.error(f"❌ Invalid model name: {model_name}")
return [], f"Error: Invalid model '{model_name}'"
cancel_event.clear()
history = list(chat_history or [])
history.append((user_msg, None)) # Add user message, bot response will be added later
logger.info(f"πŸ“ Chat history length: {len(history)} messages")
# Launch web search if enabled
debug = ''
search_results = []
if enable_search:
logger.info("πŸ” Initiating background web search...")
debug = 'Search task started.'
thread_search = threading.Thread(
target=lambda: search_results.extend(
retrieve_context(user_msg, int(max_results), int(max_chars))
)
)
thread_search.daemon = True
thread_search.start()
logger.info("βœ… Web search thread started in background")
else:
logger.info("🚫 Web search disabled by user")
debug = 'Web search disabled.'
try:
cur_date = datetime.now().strftime('%Y-%m-%d')
# merge any fetched search results into the system prompt
if search_results:
enriched = system_prompt.strip() + \
f'''\n# The following contents are the search results related to the user's message:
{search_results}
In the search results I provide to you, each result is formatted as [webpage X begin]...[webpage X end], where X represents the numerical index of each article. Please cite the context at the end of the relevant sentence when appropriate. Use the citation format [citation:X] in the corresponding part of your answer. If a sentence is derived from multiple contexts, list all relevant citation numbers, such as [citation:3][citation:5]. Be sure not to cluster all citations at the end; instead, include them in the corresponding parts of the answer.
When responding, please keep the following points in mind:
- Today is {cur_date}.
- Not all content in the search results is closely related to the user's question. You need to evaluate and filter the search results based on the question.
- For listing-type questions (e.g., listing all flight information), try to limit the answer to 10 key points and inform the user that they can refer to the search sources for complete information. Prioritize providing the most complete and relevant items in the list. Avoid mentioning content not provided in the search results unless necessary.
- For creative tasks (e.g., writing an essay), ensure that references are cited within the body of the text, such as [citation:3][citation:5], rather than only at the end of the text. You need to interpret and summarize the user's requirements, choose an appropriate format, fully utilize the search results, extract key information, and generate an answer that is insightful, creative, and professional. Extend the length of your response as much as possible, addressing each point in detail and from multiple perspectives, ensuring the content is rich and thorough.
- If the response is lengthy, structure it well and summarize it in paragraphs. If a point-by-point format is needed, try to limit it to 5 points and merge related content.
- For objective Q&A, if the answer is very brief, you may add one or two related sentences to enrich the content.
- Choose an appropriate and visually appealing format for your response based on the user's requirements and the content of the answer, ensuring strong readability.
- Your answer should synthesize information from multiple relevant webpages and avoid repeatedly citing the same webpage.
- Unless the user requests otherwise, your response should be in the same language as the user's question.
# The user's message is:
'''
else:
enriched = system_prompt
# wait up to search_timeout for snippets, then replace debug with them
if enable_search:
logger.info(f"⏳ Waiting for search results (timeout: {search_timeout}s)...")
thread_search.join(timeout=float(search_timeout))
if search_results:
logger.info(f"βœ… Search completed: {len(search_results)} results found")
debug = "### Search results merged into prompt\n\n" + "\n".join(
f"- {r}" for r in search_results
)
else:
logger.warning("⚠️ No web search results found")
debug = "*No web search results found.*"
# merge fetched snippets into the system prompt
if search_results:
enriched = system_prompt.strip() + \
f'''\n# The following contents are the search results related to the user's message:
{search_results}
In the search results I provide to you, each result is formatted as [webpage X begin]...[webpage X end], where X represents the numerical index of each article. Please cite the context at the end of the relevant sentence when appropriate. Use the citation format [citation:X] in the corresponding part of your answer. If a sentence is derived from multiple contexts, list all relevant citation numbers, such as [citation:3][citation:5]. Be sure not to cluster all citations at the end; instead, include them in the corresponding parts of the answer.
When responding, please keep the following points in mind:
- Today is {cur_date}.
- Not all content in the search results is closely related to the user's question. You need to evaluate and filter the search results based on the question.
- For listing-type questions (e.g., listing all flight information), try to limit the answer to 10 key points and inform the user that they can refer to the search sources for complete information. Prioritize providing the most complete and relevant items in the list. Avoid mentioning content not provided in the search results unless necessary.
- For creative tasks (e.g., writing an essay), ensure that references are cited within the body of the text, such as [citation:3][citation:5], rather than only at the end of the text. You need to interpret and summarize the user's requirements, choose an appropriate format, fully utilize the search results, extract key information, and generate an answer that is insightful, creative, and professional. Extend the length of your response as much as possible, addressing each point in detail and from multiple perspectives, ensuring the content is rich and thorough.
- If the response is lengthy, structure it well and summarize it in paragraphs. If a point-by-point format is needed, try to limit it to 5 points and merge related content.
- For objective Q&A, if the answer is very brief, you may add one or two related sentences to enrich the content.
- Choose an appropriate and visually appealing format for your response based on the user's requirements and the content of the answer, ensuring strong readability.
- Your answer should synthesize information from multiple relevant webpages and avoid repeatedly citing the same webpage.
- Unless the user requests otherwise, your response should be in the same language as the user's question.
# The user's message is:
'''
else:
enriched = system_prompt
logger.info("πŸ€– Loading model pipeline...")
pipe = load_pipeline(model_name)
logger.info("πŸ“ Formatting conversation prompt...")
prompt = format_conversation(history, enriched, pipe.tokenizer)
prompt_debug = f"\n\n--- Prompt Preview ---\n```\n{prompt}\n```"
logger.info(f"πŸ“Š Prompt length: {len(prompt)} characters")
logger.info("🎯 Setting up text streaming...")
streamer = TextIteratorStreamer(pipe.tokenizer,
skip_prompt=True,
skip_special_tokens=True)
logger.info("πŸš€ Starting text generation...")
gen_thread = threading.Thread(
target=pipe,
args=(prompt,),
kwargs={
'max_new_tokens': max_tokens,
'temperature': temperature,
'top_k': top_k,
'top_p': top_p,
'repetition_penalty': repeat_penalty,
'streamer': streamer,
'return_full_text': False,
}
)
gen_thread.start()
logger.info("βœ… Generation thread started")
# Buffers for thought vs answer
thought_buf = ''
answer_buf = ''
in_thought = False
token_count = 0
logger.info("πŸ“‘ Starting token streaming...")
# Stream tokens
for chunk in streamer:
if cancel_event.is_set():
logger.info("πŸ›‘ Generation cancelled by user")
break
text = chunk
token_count += 1
# Detect start of thinking
if not in_thought and '<think>' in text:
logger.info("πŸ’­ Detected thinking block start")
in_thought = True
# Insert thought placeholder
history.append((None, "πŸ’­ Thinking..."))
# Capture after opening tag
after = text.split('<think>', 1)[1]
thought_buf += after
# If closing tag in same chunk
if '</think>' in thought_buf:
before, after2 = thought_buf.split('</think>', 1)
history[-1] = (None, f"πŸ’­ {before.strip()}")
in_thought = False
logger.info("πŸ’­ Thinking block completed, starting answer")
# Start answer buffer
answer_buf = after2
history.append((None, answer_buf))
else:
history[-1] = (None, f"πŸ’­ {thought_buf}")
yield history, debug
continue
# Continue thought streaming
if in_thought:
thought_buf += text
if '</think>' in thought_buf:
before, after2 = thought_buf.split('</think>', 1)
history[-1] = (None, f"πŸ’­ {before.strip()}")
in_thought = False
logger.info("πŸ’­ Thinking block completed, starting answer")
# Start answer buffer
answer_buf = after2
history.append((None, answer_buf))
else:
history[-1] = (None, f"πŸ’­ {thought_buf}")
yield history, debug
continue
# Stream answer
if not answer_buf:
logger.info("πŸ“ Starting answer generation")
history.append((None, ''))
answer_buf += text
history[-1] = (None, answer_buf)
yield history, debug
gen_thread.join()
logger.info(f"βœ… Generation completed: {token_count} tokens generated")
yield history, debug + prompt_debug
except Exception as e:
logger.error(f"❌ Error during generation: {e}")
history.append((None, f"Error: {e}"))
yield history, debug
finally:
logger.info("🧹 Cleaning up memory...")
gc.collect()
logger.info("=" * 60)
def cancel_generation():
logger.info("πŸ›‘ User requested generation cancellation")
cancel_event.set()
return 'Generation cancelled.'
def update_default_prompt(enable_search):
return f"You are a helpful assistant."
# ------------------------------
# Gradio UI
# ------------------------------
with gr.Blocks(title="LLM Inference") as demo:
gr.Markdown("## 🧠 LLM Inference with Web Search")
gr.Markdown("Interact with the model. Select parameters and chat below.")
gr.Markdown("πŸ’‘ **Tip**: If you get access errors, try models like 'Qwen2.5-3B-Instruct' or 'microsoft-DialoGPT-medium' which are publicly accessible.")
with gr.Row():
with gr.Column(scale=3):
model_dd = gr.Dropdown(label="Select Model", choices=list(MODELS.keys()), value=list(MODELS.keys())[0])
search_chk = gr.Checkbox(label="Enable Web Search", value=True)
sys_prompt = gr.Textbox(label="System Prompt", lines=3, value=update_default_prompt(search_chk.value))
gr.Markdown("### Generation Parameters")
max_tok = gr.Slider(64, 16384, value=2048, step=32, label="Max Tokens")
temp = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
k = gr.Slider(1, 100, value=40, step=1, label="Top-K")
p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-P")
rp = gr.Slider(1.0, 2.0, value=1.2, step=0.1, label="Repetition Penalty")
gr.Markdown("### Web Search Settings")
mr = gr.Number(value=6, precision=0, label="Max Results")
mc = gr.Number(value=600, precision=0, label="Max Chars/Result")
st = gr.Slider(minimum=0.0, maximum=30.0, step=0.5, value=5.0, label="Search Timeout (s)")
clr = gr.Button("Clear Chat")
cnl = gr.Button("Cancel Generation")
with gr.Column(scale=7):
chat = gr.Chatbot()
txt = gr.Textbox(placeholder="Type your message and press Enter...")
dbg = gr.Markdown()
search_chk.change(fn=update_default_prompt, inputs=search_chk, outputs=sys_prompt)
clr.click(fn=lambda: ([], "", ""), outputs=[chat, txt, dbg])
cnl.click(fn=cancel_generation, outputs=dbg)
txt.submit(fn=chat_response,
inputs=[txt, chat, sys_prompt, search_chk, mr, mc,
model_dd, max_tok, temp, k, p, rp, st],
outputs=[chat, dbg])
logger.info("πŸš€ Starting Gradio application...")
try:
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
except Exception as e:
logger.error(f"❌ Failed to launch Gradio app: {e}")
raise