Update app.py
Browse files
app.py
CHANGED
|
@@ -36,6 +36,7 @@ torch.backends.cudnn.deterministic = False
|
|
| 36 |
torch.backends.cudnn.benchmark = False
|
| 37 |
#torch.backends.cuda.preferred_blas_library="cublas"
|
| 38 |
#torch.backends.cuda.preferred_linalg_library="cusolver"
|
|
|
|
| 39 |
|
| 40 |
hftoken = os.getenv("HF_AUTH_TOKEN")
|
| 41 |
|
|
@@ -115,7 +116,6 @@ def infer_30(
|
|
| 115 |
pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
| 116 |
pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
| 117 |
pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
| 118 |
-
torch.set_float32_matmul_precision("highest")
|
| 119 |
seed = random.randint(0, MAX_SEED)
|
| 120 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 121 |
print('-- generating image --')
|
|
@@ -165,7 +165,6 @@ def infer_60(
|
|
| 165 |
pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
| 166 |
pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
| 167 |
pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
| 168 |
-
torch.set_float32_matmul_precision("highest")
|
| 169 |
seed = random.randint(0, MAX_SEED)
|
| 170 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 171 |
print('-- generating image --')
|
|
@@ -215,7 +214,6 @@ def infer_90(
|
|
| 215 |
pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
| 216 |
pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
| 217 |
pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
| 218 |
-
torch.set_float32_matmul_precision("highest")
|
| 219 |
seed = random.randint(0, MAX_SEED)
|
| 220 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 221 |
print('-- generating image --')
|
|
@@ -262,7 +260,6 @@ def infer_100(
|
|
| 262 |
num_inference_steps,
|
| 263 |
progress=gr.Progress(track_tqdm=True),
|
| 264 |
):
|
| 265 |
-
torch.set_float32_matmul_precision("highest")
|
| 266 |
seed = random.randint(0, MAX_SEED)
|
| 267 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 268 |
print('-- generating image --')
|
|
|
|
| 36 |
torch.backends.cudnn.benchmark = False
|
| 37 |
#torch.backends.cuda.preferred_blas_library="cublas"
|
| 38 |
#torch.backends.cuda.preferred_linalg_library="cusolver"
|
| 39 |
+
torch.set_float32_matmul_precision("highest")
|
| 40 |
|
| 41 |
hftoken = os.getenv("HF_AUTH_TOKEN")
|
| 42 |
|
|
|
|
| 116 |
pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
| 117 |
pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
| 118 |
pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
|
|
|
| 119 |
seed = random.randint(0, MAX_SEED)
|
| 120 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 121 |
print('-- generating image --')
|
|
|
|
| 165 |
pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
| 166 |
pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
| 167 |
pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
|
|
|
| 168 |
seed = random.randint(0, MAX_SEED)
|
| 169 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 170 |
print('-- generating image --')
|
|
|
|
| 214 |
pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
| 215 |
pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
| 216 |
pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
|
|
|
| 217 |
seed = random.randint(0, MAX_SEED)
|
| 218 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 219 |
print('-- generating image --')
|
|
|
|
| 260 |
num_inference_steps,
|
| 261 |
progress=gr.Progress(track_tqdm=True),
|
| 262 |
):
|
|
|
|
| 263 |
seed = random.randint(0, MAX_SEED)
|
| 264 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 265 |
print('-- generating image --')
|