warhawkmonk commited on
Commit
c98029e
·
verified ·
1 Parent(s): f44e411

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -22
app.py CHANGED
@@ -103,10 +103,10 @@ def image_to_base64(image_path):
103
  return base64.b64encode(img_file.read()).decode()
104
 
105
 
106
- @st.cache_resource
107
- def load_model():
108
- pipeline_ = AutoPipelineForInpainting.from_pretrained("kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16).to("cuda")
109
- return pipeline_
110
 
111
  @st.cache_resource
112
  def prompt_improvment(pre_prompt):
@@ -140,15 +140,15 @@ def numpy_to_list(array):
140
 
141
 
142
 
143
- @st.cache_resource
144
- def llm_text_response():
145
- llm = Ollama(model="llama3:latest",num_ctx=1000)
146
- return llm.stream
147
 
148
- def model_single_out(prompt):
149
- pipe=load_model()
150
- image = pipe(prompt).images[0]
151
- return image
152
 
153
  def model_out_put(init_image,mask_image,prompt,negative_prompt):
154
  API_URL = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
@@ -170,16 +170,16 @@ def model_out_put(init_image,mask_image,prompt,negative_prompt):
170
  # output_image.show()
171
  return output_image
172
 
173
- def model_out_put(init_image, mask_image, prompt, negative_prompt):
174
- # Run the inpainting pipeline
175
- pipeline_ = load_model()
176
- image = pipeline_(
177
- prompt=prompt,
178
- negative_prompt=negative_prompt,
179
- image=init_image,
180
- mask_image=mask_image
181
- ).images[0]
182
- return image
183
 
184
  @st.cache_resource
185
  def multimodel():
 
103
  return base64.b64encode(img_file.read()).decode()
104
 
105
 
106
+ # @st.cache_resource
107
+ # def load_model():
108
+ # pipeline_ = AutoPipelineForInpainting.from_pretrained("kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16).to("cuda")
109
+ # return pipeline_
110
 
111
  @st.cache_resource
112
  def prompt_improvment(pre_prompt):
 
140
 
141
 
142
 
143
+ # @st.cache_resource
144
+ # def llm_text_response():
145
+ # llm = Ollama(model="llama3:latest",num_ctx=1000)
146
+ # return llm.stream
147
 
148
+ # def model_single_out(prompt):
149
+ # pipe=load_model()
150
+ # image = pipe(prompt).images[0]
151
+ # return image
152
 
153
  def model_out_put(init_image,mask_image,prompt,negative_prompt):
154
  API_URL = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
 
170
  # output_image.show()
171
  return output_image
172
 
173
+ # def model_out_put(init_image, mask_image, prompt, negative_prompt):
174
+ # # Run the inpainting pipeline
175
+ # pipeline_ = load_model()
176
+ # image = pipeline_(
177
+ # prompt=prompt,
178
+ # negative_prompt=negative_prompt,
179
+ # image=init_image,
180
+ # mask_image=mask_image
181
+ # ).images[0]
182
+ # return image
183
 
184
  @st.cache_resource
185
  def multimodel():