Spaces:
Running
Running
import gradio as gr | |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import os | |
import sys | |
sys.path.append('../..') | |
#langchain | |
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter | |
from langchain.embeddings import HuggingFaceEmbeddings | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import RetrievalQA | |
from langchain.prompts import ChatPromptTemplate | |
from langchain.schema import StrOutputParser | |
from langchain.schema.runnable import Runnable | |
from langchain.schema.runnable.config import RunnableConfig | |
from langchain.chains import ( | |
LLMChain, ConversationalRetrievalChain) | |
from langchain.vectorstores import Chroma | |
from langchain.memory import ConversationBufferMemory | |
from langchain.chains import LLMChain | |
from langchain.prompts.prompt import PromptTemplate | |
from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate | |
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder | |
from langchain.document_loaders import PyPDFDirectoryLoader | |
from langchain_community.llms import HuggingFaceHub | |
from pydantic import BaseModel | |
import shutil | |
# Cell 1: Image Classification Model | |
image_pipeline = pipeline(task="image-classification", model="microsoft/resnet-50") | |
def predict_image(input_img): | |
predictions = image_pipeline(input_img) | |
return input_img, {p["label"]: p["score"] for p in predictions} | |
image_gradio_app = gr.Interface( | |
fn=predict_image, | |
inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"), | |
outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)], | |
title="Hot Hot Dog? Or Not?", | |
) | |
# Cell 2: Chatbot Model | |
loader = PyPDFDirectoryLoader('pdfs') | |
data=loader.load() | |
# split documents | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150) | |
docs = text_splitter.split_documents(data) | |
# define embedding | |
embeddings = HuggingFaceEmbeddings(model_name='thenlper/gte-base') | |
# create vector database from data | |
persist_directory = 'docs/chroma/' | |
# Remove old database files if any | |
shutil.rmtree(persist_directory, ignore_errors=True) | |
vectordb = Chroma.from_documents( | |
documents=docs, | |
embedding=embeddings, | |
persist_directory=persist_directory | |
) | |
# define retriever | |
retriever = vectordb.as_retriever(search_type="mmr") | |
template = """Your name is AngryGreta and you are a recycling chatbot with the objective and poorpose to help people with waste management to improve environmental situation. Use the following pieces of context to answer the question if the question is related with recycling. Answer in the same language of the question. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer. | |
CONTEXT: {context} | |
CHAT HISTORY: | |
{chat_history} | |
Question: {question} | |
Helpful Answer:""" | |
# Create the chat prompt templates | |
system_prompt = SystemMessagePromptTemplate.from_template(template) | |
qa_prompt = ChatPromptTemplate( | |
messages=[ | |
system_prompt, | |
MessagesPlaceholder(variable_name="chat_history"), | |
HumanMessagePromptTemplate.from_template("{question}") | |
] | |
) | |
llm = HuggingFaceHub( | |
repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", | |
task="text-generation", | |
model_kwargs={ | |
"max_new_tokens": 512, | |
"top_k": 30, | |
"temperature": 0.1, | |
"repetition_penalty": 1.03, | |
}, | |
) | |
llm_chain = LLMChain(llm=llm, prompt=qa_prompt) | |
memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", output_key='answer', return_messages=True) | |
qa_chain = ConversationalRetrievalChain.from_llm( | |
llm = llm, | |
memory = memory, | |
retriever = retriever, | |
verbose = True, | |
combine_docs_chain_kwargs={'prompt': qa_prompt}, | |
get_chat_history = lambda h : h | |
) | |
def chat_interface(inputs, chat_history): | |
question = inputs[0] # Use integer index 0 for the "Question" input | |
chat_history = inputs[1] # Use integer index 1 for the "Chat History" input | |
# ConversationalRetrievalChain | |
result = qa_chain.run({"question": question, "chat_history": chat_history}) | |
print("Debug: Result from qa_chain.run:", result) # Add this line for debugging | |
return result | |
chatbot_gradio_app = gr.Interface( | |
fn=chat_interface, | |
inputs=[ | |
gr.inputs.Textbox(lines=1, label="Question"), | |
gr.inputs.Textbox(lines=5, label="Chat History"), | |
], | |
outputs="text" | |
) | |
# Combine both interfaces into a single app | |
app=gr.TabbedInterface( | |
[image_gradio_app, chatbot_gradio_app], | |
tab_names=["image","chatbot"] | |
) | |
app.queue() | |
app.launch() |