Spaces:
Running
Running
| import os | |
| import gradio as gr | |
| from groq import Groq | |
| from dotenv import load_dotenv | |
| import json | |
| from deep_translator import GoogleTranslator | |
| load_dotenv() | |
| api1 = os.getenv("GROQ_API_KEY") | |
| api2 = os.getenv("Groq_key") | |
| api3 = os.getenv("GRoq_key") | |
| # api2 = os.getenv("Groq_key") | |
| # api2 = os.getenv("Groq_key") | |
| # api2 = os.getenv("Groq_key") | |
| # api2 = os.getenv("Groq_key") | |
| apis = [ | |
| api1, | |
| api2, | |
| api3, | |
| ] | |
| def make_call(data): | |
| print(data) | |
| newdata = data.replace("'", '"') | |
| items = json.loads(newdata) | |
| language = items['lang'] | |
| query = items['text'] | |
| query = query.lower() | |
| answer = None | |
| while True: | |
| for api in apis: | |
| client = Groq( | |
| api_key=api, | |
| ) # Configure the model with the API key | |
| # query = st.text_input("Enter your query") | |
| prmptquery= f"Answer this query in a short message with wisdom, love and compassion, in context to bhagwat geeta, that feels like chatting to a person and provide references of shloks from chapters of bhagwat geeta which is relevant to the query. keep the answer short, precise and simple. Query= {query}" | |
| try: | |
| response = client.chat.completions.create( | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": prmptquery, | |
| } | |
| ], | |
| model="mixtral-8x7b-32768", | |
| ) | |
| answer = response.choices[0].message.content | |
| translated = GoogleTranslator(source='auto', target=language).translate(answer) | |
| except Exception as e: | |
| print(f"API call failed for: {e}") | |
| if answer: | |
| break | |
| if answer: | |
| break | |
| respo = { | |
| "message": translated, | |
| "action": "nothing", | |
| "function": "nothing", | |
| } | |
| print(translated) | |
| return json.dumps(respo) | |
| gradio_interface = gr.Interface(fn=make_call, inputs="text", outputs="text") | |
| gradio_interface.launch(share=True) | |
| # print(chat_completion) | |
| # # Text to 3D | |
| # import streamlit as st | |
| # import torch | |
| # from diffusers import ShapEPipeline | |
| # from diffusers.utils import export_to_gif | |
| # # Model loading (Ideally done once at the start for efficiency) | |
| # ckpt_id = "openai/shap-e" | |
| # @st.cache_resource # Caches the model for faster subsequent runs | |
| # def load_model(): | |
| # return ShapEPipeline.from_pretrained(ckpt_id).to("cuda") | |
| # pipe = load_model() | |
| # # App Title | |
| # st.title("Shark 3D Image Generator") | |
| # # User Inputs | |
| # prompt = st.text_input("Enter your prompt:", "a shark") | |
| # guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5) | |
| # # Generate and Display Images | |
| # if st.button("Generate"): | |
| # with st.spinner("Generating images..."): | |
| # images = pipe( | |
| # prompt, | |
| # guidance_scale=guidance_scale, | |
| # num_inference_steps=64, | |
| # size=256, | |
| # ).images | |
| # gif_path = export_to_gif(images, "shark_3d.gif") | |
| # st.image(images[0]) # Display the first image | |
| # st.success("GIF saved as shark_3d.gif") |