Spaces:
Sleeping
Sleeping
File size: 10,398 Bytes
88d24d8 239d62b 88d24d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
import os
import gradio as gr
from groq import Groq
from datetime import datetime
import time
class RealTimeFactChecker:
def __init__(self):
self.client = None
self.model_options = ["compound-beta", "compound-beta-mini"]
def initialize_client(self, api_key):
"""Initialize Groq client with API key"""
try:
self.client = Groq(api_key=api_key)
return True, "β
API Key validated successfully!"
except Exception as e:
return False, f"β Error initializing client: {str(e)}"
def get_system_prompt(self):
"""Get the system prompt for consistent behavior"""
return """You are a Real-time Fact Checker and News Agent. Your primary role is to provide accurate, up-to-date information by leveraging web search when needed.
CORE RESPONSIBILITIES:
1. **Fact Verification**: Always verify claims with current, reliable sources
2. **Real-time Information**: Use web search for any information that changes frequently (news, stocks, weather, current events)
3. **Source Transparency**: When using web search, mention the sources or indicate that you've searched for current information
4. **Accuracy First**: If information is uncertain or conflicting, acknowledge this clearly
RESPONSE GUIDELINES:
- **Structure**: Start with a clear, direct answer, then provide supporting details
- **Recency**: Always prioritize the most recent, reliable information
- **Clarity**: Use clear, professional language while remaining accessible
- **Completeness**: Provide comprehensive answers but stay focused on the query
- **Source Awareness**: When you've searched for information, briefly indicate this (e.g., "Based on current reports..." or "Recent data shows...")
WHEN TO SEARCH:
- Breaking news or current events
- Stock prices, market data, or financial information
- Weather conditions or forecasts
- Recent scientific discoveries or research
- Current political developments
- Real-time statistics or data
- Verification of recent claims or rumors
RESPONSE FORMAT:
- Lead with key facts
- Include relevant context
- Mention timeframe when relevant (e.g., "as of today", "this week")
- If multiple sources conflict, acknowledge this
- End with a clear summary for complex topics
Remember: Your goal is to be the most reliable, up-to-date source of information possible."""
def query_compound_model(self, query, model, temperature=0.7):
"""Query the compound model and return response with tool execution info"""
if not self.client:
return "β Please set a valid API key first.", None, None
try:
start_time = time.time()
system_prompt = self.get_system_prompt()
chat_completion = self.client.chat.completions.create(
messages=[
{
"role": "system",
"content": system_prompt
},
{
"role": "user",
"content": query,
}
],
model=model,
temperature=temperature,
max_tokens=1500
)
end_time = time.time()
response_time = round(end_time - start_time, 2)
response_content = chat_completion.choices[0].message.content
executed_tools = getattr(chat_completion.choices[0].message, 'executed_tools', None)
tool_info = self.format_tool_info(executed_tools)
return response_content, tool_info, response_time
except Exception as e:
return f"β Error querying model: {str(e)}", None, None
def format_tool_info(self, executed_tools):
"""Format executed tools information for display"""
if not executed_tools:
return "π Tools Used: None (Used existing knowledge)"
tool_info = "π Tools Used:\n"
for i, tool in enumerate(executed_tools, 1):
try:
if hasattr(tool, 'name'):
tool_name = tool.name
elif hasattr(tool, 'tool_name'):
tool_name = tool.tool_name
elif isinstance(tool, dict):
tool_name = tool.get('name', 'Unknown')
else:
tool_name = str(tool)
tool_info += f"{i}. {tool_name}\n"
if hasattr(tool, 'parameters'):
params = tool.parameters
if isinstance(params, dict):
for key, value in params.items():
tool_info += f" - {key}: {value}\n"
elif hasattr(tool, 'input'):
tool_info += f" - Input: {tool.input}\n"
except Exception as e:
tool_info += f"{i}. Tool {i} (Error parsing details)\n"
return tool_info
def create_interface():
fact_checker = RealTimeFactChecker()
def validate_api_key(api_key):
if not api_key or api_key.strip() == "":
return "β Please enter a valid API key", False
success, message = fact_checker.initialize_client(api_key.strip())
return message, success
def process_query(query, model, temperature, api_key):
if not api_key or api_key.strip() == "":
return "β Please set your API key first", "", ""
if not query or query.strip() == "":
return "β Please enter a query", "", ""
if not fact_checker.client:
success, message = fact_checker.initialize_client(api_key.strip())
if not success:
return message, "", ""
response, tool_info, response_time = fact_checker.query_compound_model(
query.strip(), model, temperature
)
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
formatted_response = f"**Query:** {query}\n\n**Response:**\n{response}\n\n---\n*Generated at {timestamp} in {response_time}s*"
return formatted_response, tool_info or "", f"Response time: {response_time}s"
with gr.Blocks(title="Real-time Fact Checker & News Agent", theme=gr.themes.Ocean()) as demo:
gr.Markdown("# Real-time Fact Checker & News Agent")
gr.Markdown("Powered by Groq's Compound Models with Built-in Web Search")
with gr.Row():
with gr.Column():
api_key_input = gr.Textbox(
label="Groq API Key",
placeholder="Enter your Groq API key here...",
type="password",
info="Get your free API key from https://console.groq.com/"
)
api_status = gr.Textbox(
label="Status",
value="Please enter your API key",
interactive=False
)
validate_btn = gr.Button("Validate API Key")
query_input = gr.Textbox(
label="Query",
placeholder="e.g., What are the latest AI developments today?",
lines=4
)
with gr.Row():
model_choice = gr.Dropdown(
choices=fact_checker.model_options,
value="compound-beta",
label="Model",
info="compound-beta: More capable | compound-beta-mini: Faster"
)
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
label="Temperature",
info="Higher = more creative, Lower = more focused"
)
with gr.Row():
submit_btn = gr.Button("Get Real-time Information")
clear_btn = gr.Button("Clear")
with gr.Column():
response_output = gr.Markdown(
label="Response",
value="*Your response will appear here...*"
)
tool_info_output = gr.Markdown(
label="Tool Execution Info",
value="*Tool execution details will appear here...*"
)
performance_output = gr.Textbox(
label="Performance",
value="",
interactive=False
)
validate_btn.click(
fn=validate_api_key,
inputs=[api_key_input],
outputs=[api_status, gr.State()]
)
submit_btn.click(
fn=process_query,
inputs=[query_input, model_choice, temperature, api_key_input],
outputs=[response_output, tool_info_output, performance_output]
)
clear_btn.click(
fn=lambda: ("", "*Your response will appear here...*", "*Tool execution details will appear here...*", ""),
outputs=[query_input, response_output, tool_info_output, performance_output]
)
gr.Markdown("""
### Useful Links
- [Groq Console](https://console.groq.com/) - Get your free API key
- [Groq Documentation](https://console.groq.com/docs/quickstart) - Learn more about Groq models
- [Compound Models Info](https://console.groq.com/docs/models) - Details about compound models
### Tips
- The compound models automatically use web search when real-time information is needed
- Try different temperature settings: 0.1 for factual queries, 0.7-0.9 for creative questions
- compound-beta is more capable but slower, compound-beta-mini is faster but less capable
- Check the Tool Execution Info to see when web search was used
""")
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch(
share=True
) |