Skip to content

Commit

Permalink
Merge branch 'main' into layerwise-fp8-upcasting
Browse files Browse the repository at this point in the history
  • Loading branch information
a-r-r-o-w committed Jan 14, 2025
2 parents 15afe73 + f5f9cc0 commit ef5a274
Show file tree
Hide file tree
Showing 12 changed files with 28 additions and 10 deletions.
2 changes: 1 addition & 1 deletion finetrainers/cogvideox/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from .cogvideox_lora import COGVIDEOX_T2V_LORA_CONFIG
from .full_finetune import COGVIDEOX_T2V_FULL_FINETUNE_CONFIG
from .lora import COGVIDEOX_T2V_LORA_CONFIG
2 changes: 1 addition & 1 deletion finetrainers/cogvideox/full_finetune.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from diffusers import CogVideoXPipeline

from .cogvideox_lora import (
from .lora import (
calculate_noisy_latents,
collate_fn_t2v,
forward_pass,
Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion finetrainers/hunyuan_video/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from .full_finetune import HUNYUAN_VIDEO_T2V_FULL_FINETUNE_CONFIG
from .hunyuan_video_lora import HUNYUAN_VIDEO_T2V_LORA_CONFIG
from .lora import HUNYUAN_VIDEO_T2V_LORA_CONFIG
2 changes: 1 addition & 1 deletion finetrainers/hunyuan_video/full_finetune.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from diffusers import HunyuanVideoPipeline

from .hunyuan_video_lora import (
from .lora import (
collate_fn_t2v,
forward_pass,
initialize_pipeline,
Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion finetrainers/ltx_video/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from .full_finetune import LTX_VIDEO_T2V_FULL_FINETUNE_CONFIG
from .ltx_video_lora import LTX_VIDEO_T2V_LORA_CONFIG
from .lora import LTX_VIDEO_T2V_LORA_CONFIG
2 changes: 1 addition & 1 deletion finetrainers/ltx_video/full_finetune.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from diffusers import LTXPipeline

from .ltx_video_lora import (
from .lora import (
collate_fn_t2v,
forward_pass,
initialize_pipeline,
Expand Down
File renamed without changes.
Empty file modified tests/test_model_runs_minimally_lora.sh
100644 → 100755
Empty file.
4 changes: 0 additions & 4 deletions training/mochi-1/text_to_video_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,6 @@
from dataset_simple import LatentEmbedDataset

import sys


sys.path.append("..")

from utils import print_memory, reset_memory # isort:skip


Expand Down
22 changes: 22 additions & 0 deletions training/mochi-1/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import gc
import inspect
from typing import Optional, Tuple, Union

import torch

logger = get_logger(__name__)

def reset_memory(device: Union[str, torch.device]) -> None:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats(device)
torch.cuda.reset_accumulated_memory_stats(device)


def print_memory(device: Union[str, torch.device]) -> None:
memory_allocated = torch.cuda.memory_allocated(device) / 1024**3
max_memory_allocated = torch.cuda.max_memory_allocated(device) / 1024**3
max_memory_reserved = torch.cuda.max_memory_reserved(device) / 1024**3
print(f"{memory_allocated=:.3f} GB")
print(f"{max_memory_allocated=:.3f} GB")
print(f"{max_memory_reserved=:.3f} GB")

0 comments on commit ef5a274

Please sign in to comment.