celinah HF Staff commited on
Commit
16c0efc
·
1 Parent(s): ef502da
Files changed (2) hide show
  1. app.py +31 -11
  2. requirements.txt +2 -1
app.py CHANGED
@@ -2,8 +2,11 @@ import os
2
 
3
  import gradio as gr
4
  import numpy as np
 
5
  from huggingface_hub import InferenceClient, login
6
 
 
 
7
  MAX_SEED = np.iinfo(np.int32).max
8
  MAX_IMAGE_SIZE = 2048
9
  TOKEN = None
@@ -11,11 +14,29 @@ TOKEN = None
11
  def get_token(oauth_token: gr.OAuthToken | None):
12
  global TOKEN
13
  if oauth_token and oauth_token.token:
 
14
  TOKEN = oauth_token.token
15
  else:
16
- TOKEN = os.getenv("HF_TOKEN")
 
17
 
18
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  client = InferenceClient(provider="fal-ai", token=TOKEN)
20
  image = client.text_to_image(
21
  prompt=prompt,
@@ -41,11 +62,12 @@ css="""
41
  """
42
 
43
  with gr.Blocks(css=css) as demo:
 
44
  with gr.Sidebar():
45
  gr.Markdown("# Inference Provider")
46
  gr.Markdown("This Space showcases the black-forest-labs/FLUX.1-dev model, served by the nebius API. Sign in with your Hugging Face account to use this API.")
47
  button = gr.LoginButton("Sign in")
48
- button.click(fn=get_token, inputs=button, outputs=[])
49
 
50
  with gr.Column(elem_id="col-container"):
51
  gr.Markdown(f"""# FLUX.1 [schnell] with fal-ai through HF Inference Providers ⚡
@@ -72,11 +94,9 @@ learn more about HF Inference Providers [here](https://huggingface.co/docs/infer
72
  minimum=0,
73
  maximum=MAX_SEED,
74
  step=1,
75
- value=0,
76
  )
77
-
78
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
79
-
80
  with gr.Row():
81
 
82
  width = gr.Slider(
@@ -103,12 +123,12 @@ learn more about HF Inference Providers [here](https://huggingface.co/docs/infer
103
  minimum=1,
104
  maximum=50,
105
  step=1,
106
- value=4,
107
  )
108
 
109
  gr.Examples(
110
  examples = examples,
111
- fn = infer,
112
  inputs = [prompt],
113
  outputs = [result, seed],
114
  cache_examples="lazy"
@@ -116,8 +136,8 @@ learn more about HF Inference Providers [here](https://huggingface.co/docs/infer
116
 
117
  gr.on(
118
  triggers=[run_button.click, prompt.submit],
119
- fn = infer,
120
- inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps],
121
  outputs = [result, seed]
122
  )
123
 
 
2
 
3
  import gradio as gr
4
  import numpy as np
5
+ from dotenv import load_dotenv
6
  from huggingface_hub import InferenceClient, login
7
 
8
+ load_dotenv()
9
+
10
  MAX_SEED = np.iinfo(np.int32).max
11
  MAX_IMAGE_SIZE = 2048
12
  TOKEN = None
 
14
  def get_token(oauth_token: gr.OAuthToken | None):
15
  global TOKEN
16
  if oauth_token and oauth_token.token:
17
+ print("Received OAuth token, logging in...")
18
  TOKEN = oauth_token.token
19
  else:
20
+ print("No OAuth token provided, using environment variable HF_TOKEN.")
21
+ TOKEN = os.environ.get("HF_TOKEN")
22
 
23
+ def generate(prompt: str, seed: int =42, width: int =1024, height: int =1024, num_inference_steps: int = 25):
24
+ """
25
+ Generate an image from a prompt.
26
+ Args:
27
+ prompt (str):
28
+ The prompt to generate an image from.
29
+ seed (int, default=42):
30
+ Seed for the random number generator.
31
+ height (int, default=1024):
32
+ The height in pixels of the output image
33
+ width (int, default=1024):
34
+ The width in pixels of the output image
35
+ num_inference_steps (int, default=25):
36
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
37
+ expense of slower inference.
38
+
39
+ """
40
  client = InferenceClient(provider="fal-ai", token=TOKEN)
41
  image = client.text_to_image(
42
  prompt=prompt,
 
62
  """
63
 
64
  with gr.Blocks(css=css) as demo:
65
+ demo.load(get_token, inputs=None, outputs=None)
66
  with gr.Sidebar():
67
  gr.Markdown("# Inference Provider")
68
  gr.Markdown("This Space showcases the black-forest-labs/FLUX.1-dev model, served by the nebius API. Sign in with your Hugging Face account to use this API.")
69
  button = gr.LoginButton("Sign in")
70
+ button.click(fn=get_token, inputs=[], outputs=[])
71
 
72
  with gr.Column(elem_id="col-container"):
73
  gr.Markdown(f"""# FLUX.1 [schnell] with fal-ai through HF Inference Providers ⚡
 
94
  minimum=0,
95
  maximum=MAX_SEED,
96
  step=1,
97
+ value=42,
98
  )
99
+
 
 
100
  with gr.Row():
101
 
102
  width = gr.Slider(
 
123
  minimum=1,
124
  maximum=50,
125
  step=1,
126
+ value=25,
127
  )
128
 
129
  gr.Examples(
130
  examples = examples,
131
+ fn = generate,
132
  inputs = [prompt],
133
  outputs = [result, seed],
134
  cache_examples="lazy"
 
136
 
137
  gr.on(
138
  triggers=[run_button.click, prompt.submit],
139
+ fn = generate,
140
+ inputs = [prompt, seed, width, height, num_inference_steps],
141
  outputs = [result, seed]
142
  )
143
 
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  huggingface-hub
2
- numpy
 
 
1
  huggingface-hub
2
+ numpy
3
+ python-dotenv