warhawkmonk commited on
Commit
900564e
·
verified ·
1 Parent(s): a790317

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -39
app.py CHANGED
@@ -29,27 +29,38 @@ import streamlit.components.v1 as components
29
  from datetime import datetime
30
  from streamlit_js_eval import streamlit_js_eval
31
  from streamlit_pdf_viewer import pdf_viewer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def consume_llm_api(prompt):
33
- """
34
- Sends a prompt to the LLM API and processes the streamed response.
35
- """
36
- url = "https://ff44-70-167-32-130.ngrok-free.app/api/llm-response"
37
- headers = {"Content-Type": "application/json"}
38
- payload = {"prompt": prompt}
39
-
40
- try:
41
- print("Sending prompt to the LLM API...")
42
- with requests.post(url, json=payload, headers=headers, stream=True) as response:
43
- response.raise_for_status()
44
- print("Response from LLM API:\n")
45
- for line in response:
46
- yield(line.decode('utf-8'))
47
- # print(type(response))
48
- # yield(response)
49
- except requests.RequestException as e:
50
- print(f"Error consuming API: {e}")
51
- except Exception as e:
52
- print(f"Unexpected error: {e}")
53
  def send_prompt():
54
  return "please respond according to the prompt asked below from the above context"
55
 
@@ -105,25 +116,36 @@ def model_single_out(prompt):
105
  image = pipe(prompt).images[0]
106
  return image
107
 
108
- def model_out_put(init_image,mask_image,prompt,negative_prompt):
109
- API_URL = "https://ff44-70-167-32-130.ngrok-free.app/api/llm-response"
110
- initial_image_base64 = numpy_to_list(np.array(init_image))
111
- mask_image_base64 = numpy_to_list(np.array(mask_image))
112
- payload = {
113
- "prompt": prompt, # Replace with your desired prompt
114
- "initial_img": initial_image_base64,
115
- "masked_img": mask_image_base64,
116
- "negative_prompt": negative_prompt # Replace with your negative prompt
117
- }
118
- response_ = requests.post(API_URL, json=payload)
119
- response_data = response_.json()
120
- output_image_base64 = response_data.get("img", "")
121
-
122
- output_image=np.array(output_image_base64,dtype=np.uint8)
123
-
124
- output_image = Image.fromarray(output_image)
125
- # output_image.show()
126
- return output_image
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  @st.cache_resource
129
  def multimodel():
 
29
  from datetime import datetime
30
  from streamlit_js_eval import streamlit_js_eval
31
  from streamlit_pdf_viewer import pdf_viewer
32
+ # def consume_llm_api(prompt):
33
+ # """
34
+ # Sends a prompt to the LLM API and processes the streamed response.
35
+ # """
36
+ # url = "https://ff44-70-167-32-130.ngrok-free.app/api/llm-response"
37
+ # headers = {"Content-Type": "application/json"}
38
+ # payload = {"prompt": prompt}
39
+
40
+ # try:
41
+ # print("Sending prompt to the LLM API...")
42
+ # with requests.post(url, json=payload, headers=headers, stream=True) as response:
43
+ # response.raise_for_status()
44
+ # print("Response from LLM API:\n")
45
+ # for line in response:
46
+ # yield(line.decode('utf-8'))
47
+ # # print(type(response))
48
+ # # yield(response)
49
+ # except requests.RequestException as e:
50
+ # print(f"Error consuming API: {e}")
51
+ # except Exception as e:
52
+ # print(f"Unexpected error: {e}")
53
+
54
  def consume_llm_api(prompt):
55
+ llm_stream = llm_text_response()(prompt)
56
+
57
+ # Create a generator to stream the data
58
+
59
+ for chunk in llm_stream:
60
+
61
+ yield chunk
62
+
63
+
 
 
 
 
 
 
 
 
 
 
 
64
  def send_prompt():
65
  return "please respond according to the prompt asked below from the above context"
66
 
 
116
  image = pipe(prompt).images[0]
117
  return image
118
 
119
+ # def model_out_put(init_image,mask_image,prompt,negative_prompt):
120
+ # API_URL = "https://ff44-70-167-32-130.ngrok-free.app/api/llm-response"
121
+ # initial_image_base64 = numpy_to_list(np.array(init_image))
122
+ # mask_image_base64 = numpy_to_list(np.array(mask_image))
123
+ # payload = {
124
+ # "prompt": prompt, # Replace with your desired prompt
125
+ # "initial_img": initial_image_base64,
126
+ # "masked_img": mask_image_base64,
127
+ # "negative_prompt": negative_prompt # Replace with your negative prompt
128
+ # }
129
+ # response_ = requests.post(API_URL, json=payload)
130
+ # response_data = response_.json()
131
+ # output_image_base64 = response_data.get("img", "")
132
+
133
+ # output_image=np.array(output_image_base64,dtype=np.uint8)
134
+
135
+ # output_image = Image.fromarray(output_image)
136
+ # # output_image.show()
137
+ # return output_image
138
+
139
+ def model_out_put(init_image, mask_image, prompt, negative_prompt):
140
+ # Run the inpainting pipeline
141
+ pipeline_ = load_model()
142
+ image = pipeline_(
143
+ prompt=prompt,
144
+ negative_prompt=negative_prompt,
145
+ image=init_image,
146
+ mask_image=mask_image
147
+ ).images[0]
148
+ return image
149
 
150
  @st.cache_resource
151
  def multimodel():