Spaces:
Runtime error
Runtime error
WeichenFan
commited on
Commit
·
a03b19d
1
Parent(s):
f28a5b1
update demo
Browse files
app.py
CHANGED
|
@@ -5,7 +5,48 @@ import time
|
|
| 5 |
import gradio as gr
|
| 6 |
import torch
|
| 7 |
# from diffusers import CogVideoXPipeline
|
|
|
|
|
|
|
| 8 |
from models.pipeline import VchitectXLPipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
from diffusers.utils import export_to_video
|
| 10 |
from datetime import datetime, timedelta
|
| 11 |
# from openai import OpenAI
|
|
@@ -21,9 +62,302 @@ dtype = torch.float16
|
|
| 21 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
pipe = VchitectXLPipeline("Vchitect/Vchitect-XL-2B",device)
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
os.makedirs("./output", exist_ok=True)
|
| 25 |
os.makedirs("./gradio_tmp", exist_ok=True)
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
@spaces.GPU(duration=120)
|
| 28 |
def infer(prompt: str, progress=gr.Progress(track_tqdm=True)):
|
| 29 |
torch.cuda.empty_cache()
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
import torch
|
| 7 |
# from diffusers import CogVideoXPipeline
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
from models.pipeline import VchitectXLPipeline
|
| 11 |
+
import random
|
| 12 |
+
import numpy as np
|
| 13 |
+
import os
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from transformers import (
|
| 21 |
+
CLIPTextModelWithProjection,
|
| 22 |
+
CLIPTokenizer,
|
| 23 |
+
T5TokenizerFast,
|
| 24 |
+
)
|
| 25 |
+
from models.modeling_t5 import T5EncoderModel
|
| 26 |
+
from models.VchitectXL import VchitectXLTransformerModel
|
| 27 |
+
from transformers import AutoTokenizer, PretrainedConfig, CLIPTextModel, CLIPTextModelWithProjection
|
| 28 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 29 |
+
from diffusers.loaders import FromSingleFileMixin, SD3LoraLoaderMixin
|
| 30 |
+
from diffusers.models.autoencoders import AutoencoderKL
|
| 31 |
+
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
| 32 |
+
from diffusers.utils import (
|
| 33 |
+
is_torch_xla_available,
|
| 34 |
+
logging,
|
| 35 |
+
replace_example_docstring,
|
| 36 |
+
)
|
| 37 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 38 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 39 |
+
|
| 40 |
+
from patch_conv import convert_model
|
| 41 |
+
from op_replace import replace_all_layernorms
|
| 42 |
+
if is_torch_xla_available():
|
| 43 |
+
import torch_xla.core.xla_model as xm
|
| 44 |
+
XLA_AVAILABLE = True
|
| 45 |
+
else:
|
| 46 |
+
XLA_AVAILABLE = False
|
| 47 |
+
|
| 48 |
+
import math
|
| 49 |
+
|
| 50 |
from diffusers.utils import export_to_video
|
| 51 |
from datetime import datetime, timedelta
|
| 52 |
# from openai import OpenAI
|
|
|
|
| 62 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 63 |
pipe = VchitectXLPipeline("Vchitect/Vchitect-XL-2B",device)
|
| 64 |
|
| 65 |
+
# pipe.acc_call = acc_call.__get__(pipe)
|
| 66 |
+
import types
|
| 67 |
+
# pipe.__call__ = types.MethodType(acc_call, pipe)
|
| 68 |
+
pipe.__class__.__call__ = acc_call
|
| 69 |
+
|
| 70 |
os.makedirs("./output", exist_ok=True)
|
| 71 |
os.makedirs("./gradio_tmp", exist_ok=True)
|
| 72 |
|
| 73 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 74 |
+
def retrieve_timesteps(
|
| 75 |
+
scheduler,
|
| 76 |
+
num_inference_steps: Optional[int] = None,
|
| 77 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 78 |
+
timesteps: Optional[List[int]] = None,
|
| 79 |
+
sigmas: Optional[List[float]] = None,
|
| 80 |
+
**kwargs,
|
| 81 |
+
):
|
| 82 |
+
if timesteps is not None and sigmas is not None:
|
| 83 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
| 84 |
+
if timesteps is not None:
|
| 85 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 86 |
+
if not accepts_timesteps:
|
| 87 |
+
raise ValueError(
|
| 88 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 89 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 90 |
+
)
|
| 91 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 92 |
+
timesteps = scheduler.timesteps
|
| 93 |
+
num_inference_steps = len(timesteps)
|
| 94 |
+
elif sigmas is not None:
|
| 95 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 96 |
+
if not accept_sigmas:
|
| 97 |
+
raise ValueError(
|
| 98 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 99 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 100 |
+
)
|
| 101 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 102 |
+
timesteps = scheduler.timesteps
|
| 103 |
+
num_inference_steps = len(timesteps)
|
| 104 |
+
else:
|
| 105 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 106 |
+
timesteps = scheduler.timesteps
|
| 107 |
+
return timesteps, num_inference_steps
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
import torch.fft
|
| 111 |
+
@torch.no_grad()
|
| 112 |
+
def myfft(tensor):
|
| 113 |
+
if True:
|
| 114 |
+
if True:
|
| 115 |
+
tensor_fft = torch.fft.fft2(tensor)
|
| 116 |
+
# 将频谱中心移到图像中心
|
| 117 |
+
tensor_fft_shifted = torch.fft.fftshift(tensor_fft)
|
| 118 |
+
# 获取张量的尺寸
|
| 119 |
+
B, C, H, W = tensor.size()
|
| 120 |
+
# 定义频率分离的半径
|
| 121 |
+
radius = min(H, W) // 5 # 可以调整此值
|
| 122 |
+
|
| 123 |
+
# 创建一个中心为(H/2, W/2)的圆形掩码
|
| 124 |
+
Y, X = torch.meshgrid(torch.arange(H), torch.arange(W))
|
| 125 |
+
center_x, center_y = W // 2, H // 2
|
| 126 |
+
mask = (X - center_x) ** 2 + (Y - center_y) ** 2 <= radius ** 2
|
| 127 |
+
# 创建高频和低频掩码
|
| 128 |
+
low_freq_mask = mask.unsqueeze(0).unsqueeze(0).to(tensor.device)
|
| 129 |
+
high_freq_mask = ~low_freq_mask
|
| 130 |
+
|
| 131 |
+
# 获取低频分量
|
| 132 |
+
low_freq_fft = tensor_fft_shifted * low_freq_mask
|
| 133 |
+
# low_freq_fft_shifted = torch.fft.ifftshift(low_freq_fft)
|
| 134 |
+
# low_freq = torch.fft.ifft2(low_freq_fft_shifted).real
|
| 135 |
+
|
| 136 |
+
# 获取高频分量
|
| 137 |
+
high_freq_fft = tensor_fft_shifted * high_freq_mask
|
| 138 |
+
# high_freq_fft_shifted = torch.fft.ifftshift(high_freq_fft)
|
| 139 |
+
# high_freq = torch.fft.ifft2(high_freq_fft_shifted).real
|
| 140 |
+
|
| 141 |
+
return low_freq_fft, high_freq_fft
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@torch.no_grad()
|
| 145 |
+
def acc_call(
|
| 146 |
+
self,
|
| 147 |
+
prompt: Union[str, List[str]] = None,
|
| 148 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 149 |
+
prompt_3: Optional[Union[str, List[str]]] = None,
|
| 150 |
+
height: Optional[int] = None,
|
| 151 |
+
width: Optional[int] = None,
|
| 152 |
+
frames: Optional[int] = None,
|
| 153 |
+
num_inference_steps: int = 28,
|
| 154 |
+
timesteps: List[int] = None,
|
| 155 |
+
guidance_scale: float = 7.0,
|
| 156 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 157 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 158 |
+
negative_prompt_3: Optional[Union[str, List[str]]] = None,
|
| 159 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 160 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 161 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 162 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 163 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 164 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 165 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 166 |
+
output_type: Optional[str] = "pil",
|
| 167 |
+
return_dict: bool = True,
|
| 168 |
+
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 169 |
+
clip_skip: Optional[int] = None,
|
| 170 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 171 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 172 |
+
):
|
| 173 |
+
if True:
|
| 174 |
+
# print('acc call.......')
|
| 175 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 176 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 177 |
+
frames = frames or 24
|
| 178 |
+
|
| 179 |
+
# 1. Check inputs. Raise error if not correct
|
| 180 |
+
self.check_inputs(
|
| 181 |
+
prompt,
|
| 182 |
+
prompt_2,
|
| 183 |
+
prompt_3,
|
| 184 |
+
height,
|
| 185 |
+
width,
|
| 186 |
+
negative_prompt=negative_prompt,
|
| 187 |
+
negative_prompt_2=negative_prompt_2,
|
| 188 |
+
negative_prompt_3=negative_prompt_3,
|
| 189 |
+
prompt_embeds=prompt_embeds,
|
| 190 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 191 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 192 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 193 |
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
self._guidance_scale = guidance_scale
|
| 197 |
+
self._clip_skip = clip_skip
|
| 198 |
+
self._joint_attention_kwargs = joint_attention_kwargs
|
| 199 |
+
self._interrupt = False
|
| 200 |
+
|
| 201 |
+
# 2. Define call parameters
|
| 202 |
+
if prompt is not None and isinstance(prompt, str):
|
| 203 |
+
batch_size = 1
|
| 204 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 205 |
+
batch_size = len(prompt)
|
| 206 |
+
else:
|
| 207 |
+
batch_size = prompt_embeds.shape[0]
|
| 208 |
+
|
| 209 |
+
device = self.execution_device
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
(
|
| 213 |
+
prompt_embeds,
|
| 214 |
+
negative_prompt_embeds,
|
| 215 |
+
pooled_prompt_embeds,
|
| 216 |
+
negative_pooled_prompt_embeds,
|
| 217 |
+
) = self.encode_prompt(
|
| 218 |
+
prompt=prompt,
|
| 219 |
+
prompt_2=prompt_2,
|
| 220 |
+
prompt_3=prompt_3,
|
| 221 |
+
negative_prompt=negative_prompt,
|
| 222 |
+
negative_prompt_2=negative_prompt_2,
|
| 223 |
+
negative_prompt_3=negative_prompt_3,
|
| 224 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 225 |
+
prompt_embeds=prompt_embeds,
|
| 226 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 227 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 228 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 229 |
+
device=device,
|
| 230 |
+
clip_skip=self.clip_skip,
|
| 231 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
if self.do_classifier_free_guidance:
|
| 235 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 236 |
+
pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
|
| 237 |
+
|
| 238 |
+
# 4. Prepare timesteps
|
| 239 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 240 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 241 |
+
self._num_timesteps = len(timesteps)
|
| 242 |
+
|
| 243 |
+
# 5. Prepare latent variables
|
| 244 |
+
num_channels_latents = self.transformer.config.in_channels
|
| 245 |
+
latents = self.prepare_latents(
|
| 246 |
+
batch_size * num_images_per_prompt,
|
| 247 |
+
num_channels_latents,
|
| 248 |
+
height,
|
| 249 |
+
width,
|
| 250 |
+
frames,
|
| 251 |
+
prompt_embeds.dtype,
|
| 252 |
+
device,
|
| 253 |
+
generator,
|
| 254 |
+
latents,
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
# 6. Denoising loop
|
| 258 |
+
# with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 259 |
+
from tqdm import tqdm
|
| 260 |
+
for i, t in tqdm(enumerate(timesteps)):
|
| 261 |
+
if self.interrupt:
|
| 262 |
+
continue
|
| 263 |
+
|
| 264 |
+
# print(i, t,'******',timesteps)
|
| 265 |
+
# expand the latents if we are doing classifier free guidance
|
| 266 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 267 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
| 268 |
+
timestep = t.expand(latents.shape[0])
|
| 269 |
+
|
| 270 |
+
noise_pred_text = self.transformer(
|
| 271 |
+
hidden_states=latent_model_input[1,:].unsqueeze(0),
|
| 272 |
+
timestep=timestep,
|
| 273 |
+
encoder_hidden_states=prompt_embeds[1,:].unsqueeze(0),
|
| 274 |
+
pooled_projections=pooled_prompt_embeds[1,:].unsqueeze(0),
|
| 275 |
+
joint_attention_kwargs=self.joint_attention_kwargs,
|
| 276 |
+
return_dict=False,
|
| 277 |
+
# idx=i,
|
| 278 |
+
)[0]
|
| 279 |
+
|
| 280 |
+
if i<30 or (i>30 and i%5==0):
|
| 281 |
+
noise_pred_uncond = self.transformer(
|
| 282 |
+
hidden_states=latent_model_input[0,:].unsqueeze(0),
|
| 283 |
+
timestep=timestep,
|
| 284 |
+
encoder_hidden_states=prompt_embeds[0,:].unsqueeze(0),
|
| 285 |
+
pooled_projections=pooled_prompt_embeds[0,:].unsqueeze(0),
|
| 286 |
+
joint_attention_kwargs=self.joint_attention_kwargs,
|
| 287 |
+
return_dict=False,
|
| 288 |
+
# idx=i,
|
| 289 |
+
)[0]
|
| 290 |
+
# print(noise_pred_uncond.shape,noise_pred_text.shape)
|
| 291 |
+
# exit(0)
|
| 292 |
+
# torch.Size([80, 16, 54, 96]) torch.Size([80, 16, 54, 96])
|
| 293 |
+
if i>=28:
|
| 294 |
+
lf_uc,hf_uc = myfft(noise_pred_uncond.float())
|
| 295 |
+
lf_c, hf_c = myfft(noise_pred_text.float())
|
| 296 |
+
delta_lf = lf_uc -lf_c
|
| 297 |
+
delta_hf = hf_uc - hf_c
|
| 298 |
+
else:
|
| 299 |
+
lf_c, hf_c = myfft(noise_pred_text.float())
|
| 300 |
+
delta_lf = delta_lf * 1.1
|
| 301 |
+
delta_hf = delta_hf * 1.25
|
| 302 |
+
new_lf_uc = delta_lf + lf_c
|
| 303 |
+
new_hf_uc = delta_hf + hf_c
|
| 304 |
+
|
| 305 |
+
combine_uc = new_lf_uc + new_hf_uc
|
| 306 |
+
combined_fft = torch.fft.ifftshift(combine_uc)
|
| 307 |
+
noise_pred_uncond = torch.fft.ifft2(combined_fft).real
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
self._guidance_scale = 1 + guidance_scale * (
|
| 312 |
+
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
|
| 313 |
+
)
|
| 314 |
+
# perform guidance
|
| 315 |
+
if self.do_classifier_free_guidance:
|
| 316 |
+
# noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 317 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 318 |
+
|
| 319 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 320 |
+
latents_dtype = latents.dtype
|
| 321 |
+
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
| 322 |
+
|
| 323 |
+
if latents.dtype != latents_dtype:
|
| 324 |
+
if torch.backends.mps.is_available():
|
| 325 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
| 326 |
+
latents = latents.to(latents_dtype)
|
| 327 |
+
|
| 328 |
+
if callback_on_step_end is not None:
|
| 329 |
+
callback_kwargs = {}
|
| 330 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 331 |
+
callback_kwargs[k] = locals()[k]
|
| 332 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 333 |
+
|
| 334 |
+
latents = callback_outputs.pop("latents", latents)
|
| 335 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 336 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 337 |
+
negative_pooled_prompt_embeds = callback_outputs.pop(
|
| 338 |
+
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
# call the callback, if provided
|
| 342 |
+
# if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 343 |
+
# progress_bar.update()
|
| 344 |
+
|
| 345 |
+
if XLA_AVAILABLE:
|
| 346 |
+
xm.mark_step()
|
| 347 |
+
|
| 348 |
+
# if output_type == "latent":
|
| 349 |
+
# image = latents
|
| 350 |
+
|
| 351 |
+
# else:
|
| 352 |
+
latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
|
| 353 |
+
videos = []
|
| 354 |
+
for v_idx in range(latents.shape[1]):
|
| 355 |
+
image = self.vae.decode(latents[:,v_idx], return_dict=False)[0]
|
| 356 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 357 |
+
videos.append(image[0])
|
| 358 |
+
|
| 359 |
+
return videos
|
| 360 |
+
|
| 361 |
@spaces.GPU(duration=120)
|
| 362 |
def infer(prompt: str, progress=gr.Progress(track_tqdm=True)):
|
| 363 |
torch.cuda.empty_cache()
|