Update model.py
Browse files
model.py
CHANGED
|
@@ -96,7 +96,7 @@ text_tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
|
|
| 96 |
embeddings= text_model.forward(texts, text_tokenizer, device )
|
| 97 |
# 1. Load the autoencoder model which will be used to decode the latents into image space.
|
| 98 |
vae = AutoencoderKL.from_pretrained(
|
| 99 |
-
'CompVis/stable-diffusion-v1-4', subfolder='vae', use_auth_token=
|
| 100 |
vae = vae.to(device)
|
| 101 |
|
| 102 |
# 2. Load the tokenizer and text encoder to tokenize and encode the text.
|
|
@@ -105,7 +105,7 @@ text_encoder = text_model
|
|
| 105 |
|
| 106 |
# 3. The UNet model for generating the latents.
|
| 107 |
unet = UNet2DConditionModel.from_pretrained(
|
| 108 |
-
'CompVis/stable-diffusion-v1-4', subfolder='unet', use_auth_token=
|
| 109 |
unet = unet.to(device)
|
| 110 |
|
| 111 |
# 4. Create a scheduler for inference
|
|
|
|
| 96 |
embeddings= text_model.forward(texts, text_tokenizer, device )
|
| 97 |
# 1. Load the autoencoder model which will be used to decode the latents into image space.
|
| 98 |
vae = AutoencoderKL.from_pretrained(
|
| 99 |
+
'CompVis/stable-diffusion-v1-4', subfolder='vae', use_auth_token=False)
|
| 100 |
vae = vae.to(device)
|
| 101 |
|
| 102 |
# 2. Load the tokenizer and text encoder to tokenize and encode the text.
|
|
|
|
| 105 |
|
| 106 |
# 3. The UNet model for generating the latents.
|
| 107 |
unet = UNet2DConditionModel.from_pretrained(
|
| 108 |
+
'CompVis/stable-diffusion-v1-4', subfolder='unet', use_auth_token=False)
|
| 109 |
unet = unet.to(device)
|
| 110 |
|
| 111 |
# 4. Create a scheduler for inference
|