import os import asyncio import logging from typing import Optional, List, Union, Literal from pathlib import Path from pydantic import BaseModel, Field from gradio import Interface, Blocks from gradio.components import Textbox, Image from gradio.data_classes import FileData, GradioModel, GradioRootModel from transformers import pipeline from diffusers import DiffusionPipeline import torch import gradio as gr # Load gated image model securely hf_token = os.getenv("HUGGINGFACE_TOKEN") if not hf_token: raise RuntimeError("Missing HUGGINGFACE_TOKEN env var for gated model access.") image_model = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32, use_auth_token=hf_token ) image_model.enable_model_cpu_offload() # Data models class FileDataDict(BaseModel): path: str url: Optional[str] = None size: Optional[int] = None orig_name: Optional[str] = None mime_type: Optional[str] = None is_stream: Optional[bool] = False class Config: arbitrary_types_allowed = True class MessageDict(BaseModel): content: Union[str, FileDataDict, tuple, str] role: Literal["user", "assistant", "system"] metadata: Optional[dict] = None options: Optional[List[dict]] = None class Config: arbitrary_types_allowed = True class ChatMessage(GradioModel): role: Literal["user", "assistant", "system"] content: Union[str, FileData, str] metadata: dict = Field(default_factory=dict) options: Optional[List[dict]] = None class Config: arbitrary_types_allowed = True class ChatbotDataMessages(GradioRootModel): root: List[ChatMessage] # Reasoning Engine class UniversalReasoning: def __init__(self, config): self.config = config self.context_history = [] self.sentiment_analyzer = pipeline("sentiment-analysis") self.deepseek_model = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english") self.davinci_model = pipeline("text2text-generation", model="t5-small") self.additional_model = pipeline("text-generation", model="EleutherAI/gpt-neo-125M") self.image_model = image_model async def generate_response(self, question: str) -> str: self.context_history.append(question) sentiment_score = self.analyze_sentiment(question) deepseek_response = self.deepseek_model(question) davinci_response = self.davinci_model(question, max_length=50) additional_response = self.additional_model(question, max_length=100) responses = [ f"Sentiment score: {sentiment_score}", f"DeepSeek Response: {deepseek_response}", f"T5 Response: {davinci_response}", f"GPT-Neo Response: {additional_response}" ] return "\n\n".join(responses) def generate_image(self, prompt: str): image = self.image_model( prompt, height=1024, width=1024, guidance_scale=3.5, num_inference_steps=50, generator=torch.Generator('cpu').manual_seed(0) ).images[0] image.save("flux-dev.png") return image def analyze_sentiment(self, text: str) -> list: sentiment_score = self.sentiment_analyzer(text) logging.info(f"Sentiment analysis result: {sentiment_score}") return sentiment_score # Main Gradio App class HuggingFaceChatbot: def __init__(self): self.universal_reasoning = UniversalReasoning(config={}) def setup_interface(self): async def chatbot_logic(input_text: str) -> str: return await self.universal_reasoning.generate_response(input_text) def image_logic(prompt: str): return self.universal_reasoning.generate_image(prompt) text_interface = Interface( fn=chatbot_logic, inputs=Textbox(label="Ask anything"), outputs=Textbox(label="Reasoned Answer"), title="🧠 Codettes-BlackForest Chatbot" ) image_interface = Interface( fn=image_logic, inputs=Textbox(label="Describe an image"), outputs=Image(label="Generated Image"), title="🎨 Image Generator (FLUX.1-dev)" ) return Blocks([text_interface, image_interface]) def launch(self): app = self.setup_interface() app.launch() # Launch the app if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') HuggingFaceChatbot().launch()