LPX55 commited on
Commit
24d850e
Β·
verified Β·
1 Parent(s): 7266c03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -6,13 +6,20 @@ from PIL import Image
6
  import torch
7
  import os, time
8
 
 
 
 
 
 
 
 
9
  # Load the model and processor
10
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
11
- "daniel3303/QwenStoryteller",
12
  torch_dtype=torch.float16,
13
  device_map="auto"
14
  )
15
- processor = AutoProcessor.from_pretrained("daniel3303/QwenStoryteller")
16
 
17
  @spaces.GPU()
18
  @torch.no_grad()
@@ -107,7 +114,7 @@ with gr.Blocks(fill_height=True) as demo:
107
  * Grounded Storytelling: Uses specialized XML tags to link narrative elements directly to visual entities
108
  * Reduced Hallucinations: Achieves 12.3% fewer hallucinations compared to the non-fine-tuned base model
109
 
110
- Model trained by daniel3303, [repository here.](https://huggingface.co/daniel3303/QwenStoryteller)
111
  """
112
  )
113
 
 
6
  import torch
7
  import os, time
8
 
9
+ # Load the model and processor
10
+ # model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
11
+ # "daniel3303/QwenStoryteller",
12
+ # torch_dtype=torch.float16,
13
+ # device_map="auto"
14
+ # )
15
+ # processor = AutoProcessor.from_pretrained("daniel3303/QwenStoryteller")
16
  # Load the model and processor
17
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
18
+ "daniel3303/QwenStoryteller2",
19
  torch_dtype=torch.float16,
20
  device_map="auto"
21
  )
22
+ processor = AutoProcessor.from_pretrained("daniel3303/QwenStoryteller2")
23
 
24
  @spaces.GPU()
25
  @torch.no_grad()
 
114
  * Grounded Storytelling: Uses specialized XML tags to link narrative elements directly to visual entities
115
  * Reduced Hallucinations: Achieves 12.3% fewer hallucinations compared to the non-fine-tuned base model
116
 
117
+ Model trained by daniel3303, [repository here.](https://huggingface.co/daniel3303/QwenStoryteller2)
118
  """
119
  )
120