Spaces:
Running
Running
File size: 7,110 Bytes
5d21581 9c9855b 3e4e649 9c9855b f777e0d 819c84b e72507a 5d21581 9c9855b e72507a 9c9855b e72507a 3e4e649 9c9855b 4e847b8 9c9855b 3e4e649 9c9855b 3e4e649 5d21581 a1f96e4 4e847b8 f777e0d 5d21581 3e4e649 474cc40 f777e0d 474cc40 819c84b 474cc40 b2bd09b e21b20b badf556 474cc40 f777e0d 06db2bd f777e0d 474cc40 b2bd09b 3e4e649 f777e0d 06db2bd b2bd09b e21b20b 9c9855b 3e4e649 06db2bd b2bd09b f777e0d e21b20b badf556 474cc40 b560897 0029087 aabc428 474cc40 f777e0d b2bd09b badf556 f777e0d badf556 3e4e649 f777e0d badf556 f777e0d badf556 c5cb30b b46518b f777e0d 3e4e649 88377db aabc428 3e4e649 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import streamlit as st
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from html import escape
import edge_tts
import asyncio
import os
import uuid
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
class CodeAssistantBot:
def __init__(self):
self.model = ChatOpenAI(
model="meta-llama/llama-3-70b-instruct",
base_url="https://openrouter.ai/api/v1",
api_key=OPENROUTER_API_KEY,
temperature=0.6
)
self.analysis_prompt = ChatPromptTemplate.from_messages([
("system",
"You are a skilled coding assistant. Use the following context and user input to help."
" Refer to previous summary and recent interactions to make answers accurate."
" Keep your response short, relevant, and conversational."),
("user",
"Code: {code}\nInput: {input}\nOutput: {output}\nError: {error}\n"
"Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
])
self.summary_prompt = ChatPromptTemplate.from_messages([
("system", "Summarize key technical points from the conversation so far."),
("user", "Conversation: {conversation}")
])
self.voice_prompt = ChatPromptTemplate.from_messages([
("system",
"You are a friendly narrator voice bot. Given a technical answer and its context,"
" explain it aloud like you're helping someone understand the topic clearly and confidently."
" Keep your response conversational and short not too long, but not over short."),
("user",
"Code: {code}\nInput: {input}\nOutput: {output}\nError: {error}\n"
"Conversation so far: {summary}\nAnswer to explain: {answer}")
])
def analyze_code(self, code, input, output, error, question, summary="", history=None):
parser = StrOutputParser()
recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
chain = self.analysis_prompt | self.model | parser
return chain.invoke({
'code': code,
'input': input,
'output': output,
'error': error,
'summary': summary,
'recent': recent,
'question': question
})
def narrate_response(self, code, input, output, error, answer, summary=""):
parser = StrOutputParser()
narration_chain = self.voice_prompt | self.model | parser
return narration_chain.invoke({
'code': code,
'input': input,
'output': output,
'error': error,
'summary': summary,
'answer': answer
})
async def text_to_speech(text, filename):
voice = "fr-FR-VivienneMultilingualNeural"
communicate = edge_tts.Communicate(text, voice)
await communicate.save(filename)
def render_chatbot(code, input, output, error):
st.markdown("""
<style>
.chat-container {
max-height: 60vh;
overflow-y: auto;
padding-right: 0.5rem;
border: 1px solid #ddd;
border-radius: 8px;
margin-top: 1rem;
padding: 1rem;
background-color: #f9f9f9;
}
.chat-message {
margin-bottom: 1rem;
word-wrap: break-word;
}
.user-message {
font-weight: bold;
color: #1a73e8;
}
.bot-message pre {
background-color: #f0f0f0;
padding: 0.5rem;
border-radius: 5px;
overflow-x: auto;
}
</style>
""", unsafe_allow_html=True)
st.session_state.setdefault('conversation', [])
st.session_state.setdefault('chat_summary', "")
st.session_state.setdefault('chat_display_count', 5)
st.session_state.setdefault('narrated_audio', {})
c1, c2 = st.columns([4, 1], gap='small')
with c1:
question = st.text_input("Ask something about your code...", key="chat_input")
with c2:
send = st.button("π")
if send and question:
bot = CodeAssistantBot()
history = st.session_state.conversation[-4:]
summary = st.session_state.chat_summary
response = bot.analyze_code(code, input, output, error, question, summary, history)
st.session_state.conversation.append((question, response))
st.session_state.chat_display_count = 5
if len(st.session_state.conversation) >= 3:
try:
full_chat = "\n".join([f"User: {q}\nBot: {a}" for q, a in st.session_state.conversation[-10:]])
summarizer = bot.summary_prompt | bot.model | StrOutputParser()
st.session_state.chat_summary = summarizer.invoke({'conversation': full_chat})
except:
pass
total = len(st.session_state.conversation)
start = max(0, total - st.session_state.chat_display_count)
visible = list(reversed(st.session_state.conversation[start:]))
for idx, (q, a) in enumerate(visible):
st.markdown(f'<div class="chat-message user-message">{escape(q)}</div>', unsafe_allow_html=True)
def format_response(txt):
parts = txt.split('```')
result = ''
for j, part in enumerate(parts):
if j % 2 == 1:
lines = part.splitlines()
if lines and lines[0].isalpha():
lines = lines[1:]
code_html = escape("\n".join(lines))
result += f'<pre><code>{code_html}</code></pre>'
else:
result += escape(part)
return result
formatted = format_response(a)
st.markdown(f'<div class="chat-message bot-message">{formatted}</div>', unsafe_allow_html=True)
audio_file = st.session_state.narrated_audio.get((q, a))
if not audio_file:
if st.button("π Narrate", key=f"narrate_{idx}"):
status_placeholder = st.empty()
status_placeholder.info("π§ Generating narration...")
bot = CodeAssistantBot()
narration = bot.narrate_response(code, input, output, error, a, st.session_state.chat_summary)
status_placeholder.info("ποΈ Converting to audio...")
audio_file = f"audio_{uuid.uuid4().hex}.mp3"
asyncio.run(text_to_speech(narration, audio_file))
st.session_state.narrated_audio[(q, a)] = audio_file
status_placeholder.success("π Narration ready!")
st.audio(audio_file, format="audio/mp3", autoplay=True)
else:
st.audio(audio_file, format="audio/mp3", autoplay=False)
if start > 0 and st.button("π½ Show more"):
st.session_state.chat_display_count += 5
st.rerun()
st.markdown("""
<script>
const c = window.parent.document.querySelector('.chat-container');
if (c) c.scrollTop = c.scrollHeight;
</script>
""", unsafe_allow_html=True) |