datacipen commited on
Commit
406109c
·
verified ·
1 Parent(s): cf30aa3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -18,6 +18,7 @@ _dash_renderer._set_react_version("18.2.0")
18
  import plotly.io as pio
19
  from langchain_community.llms import HuggingFaceEndpoint
20
  from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
 
21
  from langchain.schema.output_parser import StrOutputParser
22
  from pinecone import Pinecone
23
  from bs4 import BeautifulSoup
@@ -1461,12 +1462,13 @@ def run_chatbot(n_clicks, n_submit, user_input, chat_history, array_value):
1461
 
1462
  os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN']
1463
  #repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
1464
- repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
1465
  #repo_id = "microsoft/Phi-3.5-mini-instruct"
1466
  #mistral_url = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x22B-Instruct-v0.1"
1467
- llm = HuggingFaceEndpoint(
1468
- repo_id=repo_id, task="text2text-generation", max_new_tokens=8000, temperature=0.7, streaming=True
1469
- )
 
1470
  model_output = ""
1471
  chain = prompt | llm | StrOutputParser()
1472
  for s in chain.stream({"question":"D'après le contexte, " + user_input,"context":context_p}):
 
18
  import plotly.io as pio
19
  from langchain_community.llms import HuggingFaceEndpoint
20
  from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
21
+ from langchain_openai import ChatOpenAI
22
  from langchain.schema.output_parser import StrOutputParser
23
  from pinecone import Pinecone
24
  from bs4 import BeautifulSoup
 
1462
 
1463
  os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN']
1464
  #repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
1465
+ #repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
1466
  #repo_id = "microsoft/Phi-3.5-mini-instruct"
1467
  #mistral_url = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x22B-Instruct-v0.1"
1468
+ #llm = HuggingFaceEndpoint(
1469
+ # repo_id=repo_id, task="text2text-generation", max_new_tokens=8000, temperature=0.7, streaming=True
1470
+ #)
1471
+ llm = ChatOpenAI(model_name="mistralai/Mistral-Small-3.1-24B-Instruct-2503", base_url=os.environ['BASEURL_RENNES_API_KEY'], api_key=os.environ['ENDPOINT_RENNES_API_KEY'])
1472
  model_output = ""
1473
  chain = prompt | llm | StrOutputParser()
1474
  for s in chain.stream({"question":"D'après le contexte, " + user_input,"context":context_p}):