import os from dotenv import load_dotenv import gradio as gr from langchain_core.prompts import PromptTemplate from langchain_huggingface import HuggingFaceEndpoint from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableSequence # Load environment variables load_dotenv() HF_TOKEN = os.getenv("HF_TOKEN") # Initialize the HuggingFace model llm = HuggingFaceEndpoint( repo_id="mistralai/Mistral-7B-Instruct-v0.3", huggingfacehub_api_token=HF_TOKEN, temperature=0.7, max_new_tokens=700 ) # Define a prompt template for generating a blog TEMPLATE = """ Write a detailed blog post on the following topic: Topic: {topic} Make sure the blog post is informative, engaging, well-structured, and complete in 500 words only. """ # Create a prompt template instance blog_prompt_template = PromptTemplate(input_variables=["topic"], template=TEMPLATE) # Create a chain blog_chain = blog_prompt_template | llm | StrOutputParser() def generate_blog_post(topic: str) -> str: if topic: # Generate the blog post blog_post = blog_chain.invoke({"topic": topic}) return blog_post else: return "Please enter a topic for the blog post." # Define the Gradio interface interface = gr.Interface( fn=generate_blog_post, inputs=[ gr.Textbox(label="Blog Topic", placeholder="Enter the topic here"), ], outputs="text", title="AI Blog Generator", description="Welcome to the AI Blog Generator. This tool allows you to generate high-quality, engaging blog posts in just a few clicks. Simply provide a topic, and the AI will create a detailed blog post for you.", theme="default" ) if __name__ == "__main__": interface.launch()