File size: 6,225 Bytes
2694503
b65b755
80c53a2
9f1e952
b3630c7
542bd20
3874de6
 
a229453
dca59c7
ce2abbe
dca59c7
 
 
ce2abbe
2ac511b
43c14e0
f24a592
 
b3630c7
 
f24a592
3874de6
 
feb7e00
3874de6
 
 
 
 
 
 
 
251084c
2c02972
3874de6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dca59c7
1c96088
dca59c7
 
 
 
e4c6668
1c96088
b65b755
 
d945551
ce2abbe
dca59c7
 
58e10d4
dca59c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2694503
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dca59c7
2694503
dca59c7
 
 
 
2694503
dca59c7
 
 
 
 
2694503
dca59c7
2694503
dca59c7
 
2694503
dca59c7
 
 
2694503
dca59c7
 
 
 
 
 
 
 
 
 
2694503
dca59c7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
import os
import gradio as gr
from dotenv import load_dotenv
import json
from deep_translator import GoogleTranslator
import google.generativeai as genai
import time
import random

load_dotenv()

# Configure the Gemini API with your API key
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
genai.configure(api_key=GEMINI_API_KEY)

def make_call(data):
    print(data)
    newdata = data.replace("'", '"')
    items = json.loads(newdata)
    language = items['lang']
    query = items['text']
    query = query.lower()
    translated = None

    model = genai.GenerativeModel('gemini-2.5-flash-lite')

    retries = 0
    max_retries = 5  # You can adjust this number
    base_delay = 1  # Initial delay in seconds

    while retries < max_retries:
        try:
            prompt_query = (
                f"Answer the given query in a very short message with wisdom, love, and compassion, answer the query as Krishna would have answered"
                f"in context to Bhagavad Gita, that feels like talking to God Krishna itself "
                f"provide references of shlokas from chapters of Bhagavad Gita which are "
                f"relevant to the query. Keep the answer short, precise, and simple. "
                f"Query: {query}"
            )

            response = model.generate_content(prompt_query)
            answer = response.text
            translated = GoogleTranslator(source='auto', target=language).translate(answer)
            break  # Exit the loop if the call is successful
        except Exception as e:
            if "429 Quota exceeded" in str(e):
                delay = base_delay * (2 ** retries) + random.uniform(0, 1)  # Exponential backoff with jitter
                print(f"Quota exceeded. Retrying in {delay:.2f} seconds... (Attempt {retries + 1}/{max_retries})")
                time.sleep(delay)
                retries += 1
            else:
                print(f"API call failed: {e}")
                translated = f"An error occurred while fetching the answer: {e}"
                break  # Exit the loop for other errors
    else:
        # This block executes if the loop completes without a successful break (i.e., max_retries reached)
        translated = "Maximum retry attempts reached. Please try again later."


    respo = {
        "message": translated,
        "action": "nothing",
        "function": "nothing",
    }
    print(translated)
    return json.dumps(respo)

gradio_interface = gr.Interface(fn=make_call, inputs="text", outputs="text")
gradio_interface.launch()




# import os
# import gradio as gr
# from groq import Groq
# from dotenv import load_dotenv
# import json
# from deep_translator import GoogleTranslator
# import google.generativeai as genai
# load_dotenv()


# api1 = os.getenv("GEMINI_API_KEY")
# genai.configure(api_key=api1)


# # api2 = os.getenv("Groq_key")
# # api3 = os.getenv("GRoq_key")
# # api2 = os.getenv("Groq_key")
# # api2 = os.getenv("Groq_key")
# # api2 = os.getenv("Groq_key")
# # api2 = os.getenv("Groq_key")

# # apis = [
# #     api1
# # ]
# # from google import genai

# # client = genai.Client()

# # response = client.models.generate_content(
# #     model="gemini-2.5-flash",
# #     contents="Explain how AI works in a few words",
# # )

# # print(response.text)


# def make_call(data):
#     print(data)
#     newdata = data.replace("'", '"')
#     items = json.loads(newdata)
#     language = items['lang']
#     query = items['text']
#     query = query.lower()
#     answer = None
#     while True:
#         for api in apis:
#             client = genai.Client(
#                     api_key=api,
#                 )  # Configure the model with the API key
#             # query = st.text_input("Enter your query")
#             prmptquery= f"Answer this query in a short message with wisdom, love and compassion, in context to bhagwat geeta, that feels like chatting to a person and provide references of shloks from chapters of bhagwat geeta which is relevant to the query. keep the answer short, precise and simple. Query= {query}"
#             try:
#                 response = client.chat.completions.create(
#                 messages=[
#                     {
#                         "role": "user",
#                         "content": prmptquery,
#                     }
#                 ],
#                 model="mixtral-8x7b-32768",
#                 )
#                 answer = response.choices[0].message.content
#                 translated = GoogleTranslator(source='auto', target=language).translate(answer)
#             except Exception as e:
#                 print(f"API call failed for: {e}")
#             if answer:
#                 break
#         if answer:
#                 break
#     respo = {
#                 "message": translated,
#                 "action": "nothing",
#                 "function": "nothing",
#             }
#     print(translated)
#     return json.dumps(respo)



# gradio_interface = gr.Interface(fn=make_call, inputs="text", outputs="text")
# gradio_interface.launch()

# # print(chat_completion)

























# # # Text to 3D

# # import streamlit as st
# # import torch
# # from diffusers import ShapEPipeline
# # from diffusers.utils import export_to_gif

# # # Model loading (Ideally done once at the start for efficiency)
# # ckpt_id = "openai/shap-e"  
# # @st.cache_resource  # Caches the model for faster subsequent runs
# # def load_model():
# #     return ShapEPipeline.from_pretrained(ckpt_id).to("cuda")  

# # pipe = load_model()

# # # App Title
# # st.title("Shark 3D Image Generator")

# # # User Inputs
# # prompt = st.text_input("Enter your prompt:", "a shark")
# # guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5)

# # # Generate and Display Images
# # if st.button("Generate"):
# #     with st.spinner("Generating images..."):
# #         images = pipe(
# #             prompt,
# #             guidance_scale=guidance_scale,
# #             num_inference_steps=64,
# #             size=256,
# #         ).images
# #         gif_path = export_to_gif(images, "shark_3d.gif")

# #         st.image(images[0])  # Display the first image
# #         st.success("GIF saved as shark_3d.gif")