import gradio as gr import textstat from langchain_huggingface import HuggingFaceEndpoint import os # Set up Hugging Face API token and model endpoint HF_TOKEN = os.getenv("HF_TOKEN") # Ensure you have your token set in your environment llm = HuggingFaceEndpoint( repo_id="mistralai/Mistral-7B-Instruct-v0.3", huggingfacehub_api_token=HF_TOKEN, temperature=0.7, max_new_tokens=500 ) def generate_keywords(content): prompt = f"Generate a list of most appropriate 10 SEO keywords for the following content:\n\n{content}" response = llm(prompt) keywords = response.split(",") # Assuming the model returns a comma-separated list return [keyword.strip() for keyword in keywords] def check_and_improve_seo(content): # Define basic SEO criteria keywords = generate_keywords(content) keyword_found = any(keyword.lower() in content.lower() for keyword in keywords) # Check readability score readability_score = textstat.flesch_reading_ease(content) # Prepare a prompt for the LLM to improve content prompt = ( "Optimize the following content for SEO. Ensure it includes appropriate keywords in text, " "is easy to read, and meets SEO best practices.\n\n" "Content:\n" + content ) # Generate SEO-optimized content using the Hugging Face model response = llm(prompt) optimized_content = response # Format the output as plain text output = ( # f"**Generated Keywords:**\n\n" #f"Relevant SEO keywords: {', '.join(keywords)}\n\n" f"**Keywords Present:** {keyword_found}\n\n" f"**Readability Score (Flesch):** {readability_score}\n\n" f"**Optimized Content:**\n{optimized_content}" ) return output # Define Gradio interface interface = gr.Interface( fn=check_and_improve_seo, inputs=gr.Textbox(lines=10, placeholder="Enter your content here..."), outputs="text", # Change output to 'text' to return plain text title="SEO Compatibility Checker and Optimizer", description="Check if the given content is SEO compatible and get an improved version based on SEO best practices." ) # Launch the app if __name__ == "__main__": interface.launch()