| --- |
| license: mit |
| language: |
| - fr |
| --- |
| # Clone this dataset and go crazy |
| import streamlit as st |
| from transformers import pipeline |
|
|
| # Configuration de la page |
| st.title("Ray - Mon IA") |
|
|
| # Chargement du modèle (mis en cache pour aller vite) |
| @st.cache_resource |
| def load_model(): |
| return pipeline("text-generation", model="HuggingFaceTB/SmolLM-135M-Instruct") |
| |
| chat_ia = load_model() |
|
|
| # Initialisation de la mémoire |
| if "messages" not in st.session_state: |
| st.session_state.messages = [] |
|
|
| # Affichage des anciens messages |
| for message in st.session_state.messages: |
| with st.chat_message(message["role"]): |
| st.markdown(message["content"]) |
| |
| # Zone de saisie |
| if prompt := st.chat_input("Dis quelque chose à Ray..."): |
| st.session_state.messages.append({"role": "user", "content": prompt}) |
| with st.chat_message("user"): |
| st.markdown(prompt) |
| |
| # Génération de la réponse |
| with st.chat_message("assistant"): |
| full_prompt = f"Tu es Ray, un assistant poli en français.\nUser: {prompt}\nAssistant:" |
| res = chat_ia(full_prompt, max_length=150, do_sample=True) |
| reponse = res[0]['generated_text'].split("Assistant:")[-1].strip() |
| st.markdown(reponse) |
| st.session_state.messages.append({"role": "assistant", "content": reponse}) |