From fcdb36a041d90f9661e1b691d4e2ef153f716c48 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 16 Jan 2025 17:33:38 +0530 Subject: [PATCH] updates --- finetrainers/trainer.py | 4 ++++ finetrainers/utils/diffusion_utils.py | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/finetrainers/trainer.py b/finetrainers/trainer.py index 392e7f30..2dabc6c5 100644 --- a/finetrainers/trainer.py +++ b/finetrainers/trainer.py @@ -696,6 +696,10 @@ def train(self) -> None: if "pooled_prompt_embeds" in text_conditions: text_conditions["pooled_prompt_embeds"].fill_(0) + # TODO: Mochi only does: sigmas = torch.rand(latents.shape[0]) + # It doesn't rely on `sigmas` configured in the scheduler. To handle that, should + # Mochi implement its own `prepare_sigmas()` similar to how + # `calculate_noisy_latents()` is implemented? sigmas = prepare_sigmas( scheduler=self.scheduler, sigmas=scheduler_sigmas, diff --git a/finetrainers/utils/diffusion_utils.py b/finetrainers/utils/diffusion_utils.py index bb2e14b0..f0265956 100644 --- a/finetrainers/utils/diffusion_utils.py +++ b/finetrainers/utils/diffusion_utils.py @@ -93,9 +93,6 @@ def prepare_sigmas( device: torch.device = torch.device("cpu"), generator: Optional[torch.Generator] = None, ) -> torch.Tensor: - # TODO: Mochi only does: sigmas = torch.rand(latents.shape[0]) - # It doesn't rely on `sigmas` configured in the scheduler. To handle that, should - # Mochi implement its own `prepare_sigmas()` similar to how `calculate_noisy_latents()` is implemented? if isinstance(scheduler, FlowMatchEulerDiscreteScheduler): weights = compute_density_for_timestep_sampling( weighting_scheme=flow_weighting_scheme,