Spaces:
Sleeping
Sleeping
| from langchain.prompts import PromptTemplate | |
| from langchain.llms import HuggingFaceHub | |
| from langchain.chains import LLMChain, SequentialChain | |
| from dotenv import load_dotenv | |
| import os | |
| # Load environment variables from .env file | |
| load_dotenv() | |
| # Hugging Face Hub API token | |
| huggingfacehub_api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| # Configuration for language model | |
| config = {'max_new_tokens': 512, 'temperature': 0.6} | |
| def GetLLMResponse(selected_topic_level, selected_topic, num_quizzes): | |
| # Ensure that the Hugging Face Hub API token is available | |
| if huggingfacehub_api_token is None: | |
| raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable is not set. Set the API token and try again.") | |
| # Initialize Hugging Face Hub with API token | |
| llm = HuggingFaceHub( | |
| repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", | |
| model_kwargs=config, | |
| huggingfacehub_api_token=huggingfacehub_api_token | |
| ) | |
| # Create LLM Chaining for generating questions | |
| questions_template = "Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Generate only {num_quizzes} questions not more and without providing answers. The Question should not be in image format/link" | |
| questions_prompt = PromptTemplate(input_variables=["selected_topic_level", "selected_topic", "num_quizzes"], | |
| template=questions_template) | |
| questions_chain = LLMChain(llm=llm, prompt=questions_prompt, output_key="questions") | |
| # Create LLM Chaining for generating answers | |
| answer_template = "I want you to become a teacher and answer this specific Question:\n{questions}\n\nYou should give me a straightforward and concise explanation and answer to each one of them." | |
| answer_prompt = PromptTemplate(input_variables=["questions"], template=answer_template) | |
| answer_chain = LLMChain(llm=llm, prompt=answer_prompt, output_key="answer") | |
| # Create Sequential Chaining | |
| seq_chain = SequentialChain(chains=[questions_chain, answer_chain], | |
| input_variables=['selected_topic_level', 'selected_topic', 'num_quizzes'], | |
| output_variables=['questions', 'answer']) | |
| # Execute the chained prompts | |
| response = seq_chain({ | |
| 'selected_topic_level': selected_topic_level, | |
| 'selected_topic': selected_topic, | |
| 'num_quizzes': num_quizzes | |
| }) | |
| # Print the response for debugging purposes | |
| print(response) | |
| # Return the response | |
| return response | |