-
Notifications
You must be signed in to change notification settings - Fork 13
Expand file tree
/
Copy pathextendvideo.py
More file actions
1941 lines (1614 loc) · 88.9 KB
/
extendvideo.py
File metadata and controls
1941 lines (1614 loc) · 88.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import argparse
from datetime import datetime
import gc
import random
import os
import re
import time
import math
from typing import Tuple, Optional, List, Union, Any
from pathlib import Path # Added for glob_images in V2V
import torch
import accelerate
from accelerate import Accelerator
from safetensors.torch import load_file, save_file
from safetensors import safe_open
from PIL import Image
import cv2 # Added for V2V video loading/resizing
import numpy as np # Added for V2V video processing
import torchvision.transforms.functional as TF
from tqdm import tqdm
from networks import lora_wan
from utils.safetensors_utils import mem_eff_save_file, load_safetensors
from wan.configs import WAN_CONFIGS, SUPPORTED_SIZES
import wan
from wan.modules.model import WanModel, load_wan_model, detect_wan_sd_dtype
from wan.modules.vae import WanVAE
from wan.modules.t5 import T5EncoderModel
from wan.modules.clip import CLIPModel
from modules.scheduling_flow_match_discrete import FlowMatchDiscreteScheduler
from wan.utils.fm_solvers import FlowDPMSolverMultistepScheduler, get_sampling_sigmas, retrieve_timesteps
from wan.utils.fm_solvers_unipc import FlowUniPCMultistepScheduler
try:
from lycoris.kohya import create_network_from_weights
except:
pass
from utils.model_utils import str_to_dtype
from utils.device_utils import clean_memory_on_device
# Original load_video/load_images are still needed for Fun-Control / image loading
from hv_generate_video import save_images_grid, save_videos_grid, synchronize_device, load_images as hv_load_images, load_video as hv_load_video
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def parse_args() -> argparse.Namespace:
"""parse command line arguments"""
parser = argparse.ArgumentParser(description="Wan 2.1 inference script")
# WAN arguments
parser.add_argument("--ckpt_dir", type=str, default=None, help="The path to the checkpoint directory (Wan 2.1 official).")
parser.add_argument("--task", type=str, default="t2v-14B", choices=list(WAN_CONFIGS.keys()), help="The task to run.")
parser.add_argument(
"--sample_solver", type=str, default="unipc", choices=["unipc", "dpm++", "vanilla"], help="The solver used to sample."
)
parser.add_argument("--dit", type=str, default=None, help="DiT checkpoint path")
parser.add_argument("--vae", type=str, default=None, help="VAE checkpoint path")
parser.add_argument("--vae_dtype", type=str, default=None, help="data type for VAE, default is bfloat16")
parser.add_argument("--vae_cache_cpu", action="store_true", help="cache features in VAE on CPU")
parser.add_argument("--t5", type=str, default=None, help="text encoder (T5) checkpoint path")
parser.add_argument("--clip", type=str, default=None, help="text encoder (CLIP) checkpoint path")
# LoRA
parser.add_argument("--lora_weight", type=str, nargs="*", required=False, default=None, help="LoRA weight path")
parser.add_argument("--lora_multiplier", type=float, nargs="*", default=1.0, help="LoRA multiplier")
parser.add_argument("--include_patterns", type=str, nargs="*", default=None, help="LoRA module include patterns")
parser.add_argument("--exclude_patterns", type=str, nargs="*", default=None, help="LoRA module exclude patterns")
parser.add_argument(
"--save_merged_model",
type=str,
default=None,
help="Save merged model to path. If specified, no inference will be performed.",
)
# inference
parser.add_argument("--prompt", type=str, required=True, help="prompt for generation (describe the continuation for extension)")
parser.add_argument(
"--negative_prompt",
type=str,
default=None,
help="negative prompt for generation, use default negative prompt if not specified",
)
parser.add_argument("--video_size", type=int, nargs=2, default=[256, 256], help="video size, height and width")
parser.add_argument("--video_length", type=int, default=None, help="Total video length (input+generated) for diffusion processing. Default depends on task/mode.")
parser.add_argument("--fps", type=int, default=16, help="video fps, Default is 16")
parser.add_argument("--infer_steps", type=int, default=None, help="number of inference steps")
parser.add_argument("--save_path", type=str, required=True, help="path to save generated video")
parser.add_argument("--seed", type=int, default=None, help="Seed for evaluation.")
parser.add_argument(
"--cpu_noise", action="store_true", help="Use CPU to generate noise (compatible with ComfyUI). Default is False."
)
parser.add_argument(
"--guidance_scale",
type=float,
default=5.0,
help="Guidance scale for classifier free guidance. Default is 5.0.",
)
# Modes (mutually exclusive)
parser.add_argument("--video_path", type=str, default=None, help="path to video for video2video inference (standard Wan V2V)")
parser.add_argument("--image_path", type=str, default=None, help="path to image for image2video inference")
parser.add_argument("--extend_video", type=str, default=None, help="path to video for extending it using initial frames")
# Mode specific args
parser.add_argument("--strength", type=float, default=0.75, help="Strength for video2video inference (0.0-1.0)")
parser.add_argument("--end_image_path", type=str, default=None, help="path to end image for image2video or extension inference")
parser.add_argument("--num_input_frames", type=int, default=4, help="Number of frames from start of --extend_video to use as input (min 1)")
parser.add_argument("--extend_length", type=int, default=None, help="Number of frames to generate *after* the input frames for --extend_video. Default makes total length match task default (e.g., 81).")
# Fun-Control argument (distinct from V2V/I2V/Extend)
parser.add_argument(
"--control_strength",
type=float,
default=1.0,
help="Strength of control video influence for Fun-Control (1.0 = normal)",
)
parser.add_argument(
"--control_path",
type=str,
default=None,
help="path to control video for inference with controlnet (Fun-Control model only). video file or directory with images",
)
parser.add_argument("--trim_tail_frames", type=int, default=0, help="trim tail N frames from the video before saving")
parser.add_argument(
"--cfg_skip_mode",
type=str,
default="none",
choices=["early", "late", "middle", "early_late", "alternate", "none"],
help="CFG skip mode. each mode skips different parts of the CFG. "
" early: initial steps, late: later steps, middle: middle steps, early_late: both early and late, alternate: alternate, none: no skip (default)",
)
parser.add_argument(
"--cfg_apply_ratio",
type=float,
default=None,
help="The ratio of steps to apply CFG (0.0 to 1.0). Default is None (apply all steps).",
)
parser.add_argument(
"--slg_layers", type=str, default=None, help="Skip block (layer) indices for SLG (Skip Layer Guidance), comma separated"
)
parser.add_argument(
"--slg_scale",
type=float,
default=3.0,
help="scale for SLG classifier free guidance. Default is 3.0. Ignored if slg_mode is None or uncond",
)
parser.add_argument("--slg_start", type=float, default=0.0, help="start ratio for inference steps for SLG. Default is 0.0.")
parser.add_argument("--slg_end", type=float, default=0.3, help="end ratio for inference steps for SLG. Default is 0.3.")
parser.add_argument(
"--slg_mode",
type=str,
default=None,
choices=["original", "uncond"],
help="SLG mode. original: same as SD3, uncond: replace uncond pred with SLG pred",
)
# Flow Matching
parser.add_argument(
"--flow_shift",
type=float,
default=None,
help="Shift factor for flow matching schedulers. Default depends on task.",
)
parser.add_argument("--fp8", action="store_true", help="use fp8 for DiT model")
parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT, only for fp8")
parser.add_argument("--fp8_fast", action="store_true", help="Enable fast FP8 arithmetic (RTX 4XXX+), only for fp8_scaled")
parser.add_argument("--fp8_t5", action="store_true", help="use fp8 for Text Encoder model")
parser.add_argument(
"--device", type=str, default=None, help="device to use for inference. If None, use CUDA if available, otherwise use CPU"
)
parser.add_argument(
"--attn_mode",
type=str,
default="torch",
choices=["flash", "flash2", "flash3", "torch", "sageattn", "xformers", "sdpa"],
help="attention mode",
)
parser.add_argument("--blocks_to_swap", type=int, default=0, help="number of blocks to swap in the model")
parser.add_argument(
"--output_type", type=str, default="video", choices=["video", "images", "latent", "both"], help="output type"
)
parser.add_argument("--no_metadata", action="store_true", help="do not save metadata")
parser.add_argument("--latent_path", type=str, nargs="*", default=None, help="path to latent for decode. no inference")
parser.add_argument("--lycoris", action="store_true", help="use lycoris for inference")
parser.add_argument("--compile", action="store_true", help="Enable torch.compile")
parser.add_argument(
"--compile_args",
nargs=4,
metavar=("BACKEND", "MODE", "DYNAMIC", "FULLGRAPH"),
default=["inductor", "max-autotune-no-cudagraphs", "False", "False"],
help="Torch.compile settings",
)
args = parser.parse_args()
assert (args.latent_path is None or len(args.latent_path) == 0) or (
args.output_type == "images" or args.output_type == "video"
), "latent_path is only supported for images or video output"
# --- Mode Exclusivity Checks ---
modes = [args.video_path, args.image_path, args.extend_video, args.control_path]
num_modes_set = sum(1 for mode in modes if mode is not None)
if num_modes_set > 1:
active_modes = []
if args.video_path: active_modes.append("--video_path (V2V)")
if args.image_path: active_modes.append("--image_path (I2V)")
if args.extend_video: active_modes.append("--extend_video (Extend)")
if args.control_path: active_modes.append("--control_path (Fun-Control)")
# Allow Fun-Control + another mode conceptually, but the script logic needs adjustment
if not (num_modes_set == 2 and args.control_path is not None):
raise ValueError(f"Only one operation mode can be specified. Found: {', '.join(active_modes)}")
# Special case: Fun-Control can technically be combined, but let's check task compatibility
if args.control_path is not None and not WAN_CONFIGS[args.task].is_fun_control:
raise ValueError("--control_path is provided, but the selected task does not support Fun-Control.")
# --- Specific Mode Validations ---
if args.extend_video is not None:
if args.num_input_frames < 1:
raise ValueError("--num_input_frames must be at least 1 for video extension.")
if "t2v" in args.task:
logger.warning("--extend_video provided, but task is t2v. Using I2V-like conditioning.")
# We'll set video_length later based on num_input_frames and extend_length
if args.image_path is not None:
logger.warning("--image_path is provided. This is standard single-frame I2V.")
if "t2v" in args.task:
logger.warning("--image_path provided, but task is t2v. Using I2V conditioning.")
if args.video_path is not None:
logger.info("Running in V2V mode.")
# V2V length is determined later if not specified
if args.control_path is not None and not WAN_CONFIGS[args.task].is_fun_control:
raise ValueError("--control_path is provided, but the selected task does not support Fun-Control.")
return args
def get_task_defaults(task: str, size: Optional[Tuple[int, int]] = None, is_extend_mode: bool = False) -> Tuple[int, float, int, bool]:
"""Return default values for each task
Args:
task: task name (t2v, t2i, i2v etc.)
size: size of the video (width, height)
is_extend_mode: whether we are in video extension mode
Returns:
Tuple[int, float, int, bool]: (infer_steps, flow_shift, video_length, needs_clip)
"""
width, height = size if size else (0, 0)
# I2V and Extend mode share similar defaults
is_i2v_like = "i2v" in task or is_extend_mode
if "t2i" in task:
return 50, 5.0, 1, False
elif is_i2v_like:
flow_shift = 3.0 if (width == 832 and height == 480) or (width == 480 and height == 832) else 5.0
return 40, flow_shift, 81, True # Default total length 81
else: # t2v or default
return 50, 5.0, 81, False # Default total length 81
def setup_args(args: argparse.Namespace) -> argparse.Namespace:
"""Validate and set default values for optional arguments
Args:
args: command line arguments
Returns:
argparse.Namespace: updated arguments
"""
is_extend_mode = args.extend_video is not None
# Get default values for the task
default_infer_steps, default_flow_shift, default_video_length, _ = get_task_defaults(args.task, tuple(args.video_size), is_extend_mode)
# Apply default values to unset arguments
if args.infer_steps is None:
args.infer_steps = default_infer_steps
if args.flow_shift is None:
args.flow_shift = default_flow_shift
# --- Video Length Handling ---
if is_extend_mode:
if args.extend_length is None:
# Calculate extend_length to reach the default total length
args.extend_length = max(1, default_video_length - args.num_input_frames)
logger.info(f"Defaulting --extend_length to {args.extend_length} to reach total length {default_video_length}")
# Set the total video_length for processing
args.video_length = args.num_input_frames + args.extend_length
if args.video_length <= args.num_input_frames:
raise ValueError(f"Total video length ({args.video_length}) must be greater than input frames ({args.num_input_frames}). Increase --extend_length.")
elif args.video_length is None and args.video_path is None: # T2V, I2V (not extend)
args.video_length = default_video_length
elif args.video_length is None and args.video_path is not None: # V2V auto-detect
pass # Delay setting default if V2V and length not specified
elif args.video_length is not None: # User specified length
pass
# Force video_length to 1 for t2i tasks
if "t2i" in task:
assert args.video_length == 1, f"video_length should be 1 for task {args.task}"
# parse slg_layers
if args.slg_layers is not None:
args.slg_layers = list(map(int, args.slg_layers.split(",")))
return args
def check_inputs(args: argparse.Namespace) -> Tuple[int, int, Optional[int]]:
"""Validate video size and potentially length (if not V2V auto-detect)
Args:
args: command line arguments
Returns:
Tuple[int, int, Optional[int]]: (height, width, video_length)
"""
height = args.video_size[0]
width = args.video_size[1]
size = f"{width}*{height}"
is_extend_mode = args.extend_video is not None
is_v2v_mode = args.video_path is not None
# Check supported sizes unless it's V2V/Extend (input video dictates size) or FunControl
if not is_v2v_mode and not is_extend_mode and not WAN_CONFIGS[args.task].is_fun_control:
if size not in SUPPORTED_SIZES[args.task]:
logger.warning(f"Size {size} is not supported for task {args.task}. Supported sizes are {SUPPORTED_SIZES[args.task]}.")
video_length = args.video_length # Might be None if V2V auto-detect
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
return height, width, video_length
def calculate_dimensions(video_size: Tuple[int, int], video_length: int, config) -> Tuple[Tuple[int, int, int, int], int]:
"""calculate dimensions for the generation
Args:
video_size: video frame size (height, width)
video_length: number of frames in the video being processed
config: model configuration
Returns:
Tuple[Tuple[int, int, int, int], int]:
((channels, frames, height, width), seq_len)
"""
height, width = video_size
frames = video_length
# calculate latent space dimensions
lat_f = (frames - 1) // config.vae_stride[0] + 1
lat_h = height // config.vae_stride[1]
lat_w = width // config.vae_stride[2]
# calculate sequence length
seq_len = math.ceil((lat_h * lat_w) / (config.patch_size[1] * config.patch_size[2]) * lat_f)
return ((16, lat_f, lat_h, lat_w), seq_len)
# Modified function (replace the original)
def load_vae(args: argparse.Namespace, config, device: torch.device, dtype: torch.dtype) -> WanVAE:
"""load VAE model with robust path handling
Args:
args: command line arguments
config: model configuration
device: device to use
dtype: data type for the model
Returns:
WanVAE: loaded VAE model
"""
vae_override_path = args.vae
vae_filename = config.vae_checkpoint # Get expected filename, e.g., "Wan2.1_VAE.pth"
# Assume models are in 'wan' dir relative to script if not otherwise specified
vae_base_dir = "wan"
final_vae_path = None
# 1. Check if args.vae is a valid *existing file path*
if vae_override_path and isinstance(vae_override_path, str) and \
(vae_override_path.endswith(".pth") or vae_override_path.endswith(".safetensors")) and \
os.path.isfile(vae_override_path):
final_vae_path = vae_override_path
logger.info(f"Using VAE override path from --vae: {final_vae_path}")
# 2. If override is invalid or not provided, construct default path
if final_vae_path is None:
constructed_path = os.path.join(vae_base_dir, vae_filename)
if os.path.isfile(constructed_path):
final_vae_path = constructed_path
logger.info(f"Constructed default VAE path: {final_vae_path}")
if vae_override_path:
logger.warning(f"Ignoring potentially invalid --vae argument: {vae_override_path}")
else:
# 3. Fallback using ckpt_dir if provided and default construction failed
if args.ckpt_dir:
fallback_path = os.path.join(args.ckpt_dir, vae_filename)
if os.path.isfile(fallback_path):
final_vae_path = fallback_path
logger.info(f"Using VAE path from --ckpt_dir fallback: {final_vae_path}")
else:
# If all attempts fail, raise error
raise FileNotFoundError(f"Cannot find VAE. Checked override '{vae_override_path}', constructed '{constructed_path}', and fallback '{fallback_path}'")
else:
raise FileNotFoundError(f"Cannot find VAE. Checked override '{vae_override_path}' and constructed '{constructed_path}'. No --ckpt_dir provided for fallback.")
# At this point, final_vae_path should be valid
logger.info(f"Loading VAE model from final path: {final_vae_path}")
cache_device = torch.device("cpu") if args.vae_cache_cpu else None
vae = WanVAE(vae_path=final_vae_path, device=device, dtype=dtype, cache_device=cache_device)
return vae
def load_text_encoder(args: argparse.Namespace, config, device: torch.device) -> T5EncoderModel:
"""load text encoder (T5) model
Args:
args: command line arguments
config: model configuration
device: device to use
Returns:
T5EncoderModel: loaded text encoder model
"""
checkpoint_path = None if args.ckpt_dir is None else os.path.join(args.ckpt_dir, config.t5_checkpoint)
tokenizer_path = None if args.ckpt_dir is None else os.path.join(args.ckpt_dir, config.t5_tokenizer)
text_encoder = T5EncoderModel(
text_len=config.text_len,
dtype=config.t5_dtype,
device=device,
checkpoint_path=checkpoint_path,
tokenizer_path=tokenizer_path,
weight_path=args.t5,
fp8=args.fp8_t5,
)
return text_encoder
def load_clip_model(args: argparse.Namespace, config, device: torch.device) -> CLIPModel:
"""load CLIP model (for I2V / Extend only)
Args:
args: command line arguments
config: model configuration
device: device to use
Returns:
CLIPModel: loaded CLIP model
"""
checkpoint_path = None if args.ckpt_dir is None else os.path.join(args.ckpt_dir, config.clip_checkpoint)
tokenizer_path = None if args.ckpt_dir is None else os.path.join(args.ckpt_dir, config.clip_tokenizer)
clip = CLIPModel(
dtype=config.clip_dtype,
device=device,
checkpoint_path=checkpoint_path,
tokenizer_path=tokenizer_path,
weight_path=args.clip,
)
return clip
def load_dit_model(
args: argparse.Namespace,
config,
device: torch.device,
dit_dtype: torch.dtype,
dit_weight_dtype: Optional[torch.dtype] = None,
is_i2v_like: bool = False, # Combined flag for I2V and Extend modes
) -> WanModel:
"""load DiT model
Args:
args: command line arguments
config: model configuration
device: device to use
dit_dtype: data type for the model
dit_weight_dtype: data type for the model weights. None for as-is
is_i2v_like: I2V or Extend mode (might affect some model config details)
Returns:
WanModel: loaded DiT model
"""
loading_device = "cpu"
if args.blocks_to_swap == 0 and args.lora_weight is None and not args.fp8_scaled:
loading_device = device
loading_weight_dtype = dit_weight_dtype
if args.fp8_scaled or args.lora_weight is not None:
loading_weight_dtype = dit_dtype # load as-is
# do not fp8 optimize because we will merge LoRA weights
# Pass the is_i2v_like flag if the underlying loading function uses it
model = load_wan_model(config, device, args.dit, args.attn_mode, False, loading_device, loading_weight_dtype, is_i2v_like)
return model
def merge_lora_weights(model: WanModel, args: argparse.Namespace, device: torch.device) -> None:
"""merge LoRA weights to the model
Args:
model: DiT model
args: command line arguments
device: device to use
"""
if args.lora_weight is None or len(args.lora_weight) == 0:
return
for i, lora_weight in enumerate(args.lora_weight):
if args.lora_multiplier is not None and len(args.lora_multiplier) > i:
lora_multiplier = args.lora_multiplier[i]
else:
lora_multiplier = 1.0
logger.info(f"Loading LoRA weights from {lora_weight} with multiplier {lora_multiplier}")
weights_sd = load_file(lora_weight)
# apply include/exclude patterns
original_key_count = len(weights_sd.keys())
if args.include_patterns is not None and len(args.include_patterns) > i:
include_pattern = args.include_patterns[i]
regex_include = re.compile(include_pattern)
weights_sd = {k: v for k, v in weights_sd.items() if regex_include.search(k)}
logger.info(f"Filtered keys with include pattern {include_pattern}: {original_key_count} -> {len(weights_sd.keys())}")
if args.exclude_patterns is not None and len(args.exclude_patterns) > i:
original_key_count_ex = len(weights_sd.keys())
exclude_pattern = args.exclude_patterns[i]
regex_exclude = re.compile(exclude_pattern)
weights_sd = {k: v for k, v in weights_sd.items() if not regex_exclude.search(k)}
logger.info(
f"Filtered keys with exclude pattern {exclude_pattern}: {original_key_count_ex} -> {len(weights_sd.keys())}"
)
if len(weights_sd) != original_key_count:
remaining_keys = list(set([k.split(".", 1)[0] for k in weights_sd.keys()]))
remaining_keys.sort()
logger.info(f"Remaining LoRA modules after filtering: {remaining_keys}")
if len(weights_sd) == 0:
logger.warning(f"No keys left after filtering.")
if args.lycoris:
lycoris_net, _ = create_network_from_weights(
multiplier=lora_multiplier,
file=None,
weights_sd=weights_sd,
unet=model,
text_encoder=None,
vae=None,
for_inference=True,
)
lycoris_net.merge_to(None, model, weights_sd, dtype=None, device=device)
else:
network = lora_wan.create_arch_network_from_weights(lora_multiplier, weights_sd, unet=model, for_inference=True)
network.merge_to(None, model, weights_sd, device=device, non_blocking=True)
synchronize_device(device)
logger.info("LoRA weights loaded")
# save model here before casting to dit_weight_dtype
if args.save_merged_model:
logger.info(f"Saving merged model to {args.save_merged_model}")
mem_eff_save_file(model.state_dict(), args.save_merged_model) # save_file needs a lot of memory
logger.info("Merged model saved")
def optimize_model(
model: WanModel, args: argparse.Namespace, device: torch.device, dit_dtype: torch.dtype, dit_weight_dtype: torch.dtype
) -> None:
"""optimize the model (FP8 conversion, device move etc.)
Args:
model: dit model
args: command line arguments
device: device to use
dit_dtype: dtype for the model
dit_weight_dtype: dtype for the model weights
"""
if args.fp8_scaled:
# load state dict as-is and optimize to fp8
state_dict = model.state_dict()
# if no blocks to swap, we can move the weights to GPU after optimization on GPU (omit redundant CPU->GPU copy)
move_to_device = args.blocks_to_swap == 0 # if blocks_to_swap > 0, we will keep the model on CPU
state_dict = model.fp8_optimization(state_dict, device, move_to_device, use_scaled_mm=args.fp8_fast)
info = model.load_state_dict(state_dict, strict=True, assign=True)
logger.info(f"Loaded FP8 optimized weights: {info}")
if args.blocks_to_swap == 0:
model.to(device) # make sure all parameters are on the right device (e.g. RoPE etc.)
else:
# simple cast to dit_dtype
target_dtype = None # load as-is (dit_weight_dtype == dtype of the weights in state_dict)
target_device = None
if dit_weight_dtype is not None: # in case of args.fp8 and not args.fp8_scaled
logger.info(f"Convert model to {dit_weight_dtype}")
target_dtype = dit_weight_dtype
if args.blocks_to_swap == 0:
logger.info(f"Move model to device: {device}")
target_device = device
model.to(target_device, target_dtype) # move and cast at the same time. this reduces redundant copy operations
if args.compile:
compile_backend, compile_mode, compile_dynamic, compile_fullgraph = args.compile_args
logger.info(
f"Torch Compiling[Backend: {compile_backend}; Mode: {compile_mode}; Dynamic: {compile_dynamic}; Fullgraph: {compile_fullgraph}]"
)
torch._dynamo.config.cache_size_limit = 32
for i in range(len(model.blocks)):
model.blocks[i] = torch.compile(
model.blocks[i],
backend=compile_backend,
mode=compile_mode,
dynamic=compile_dynamic.lower() in "true",
fullgraph=compile_fullgraph.lower() in "true",
)
if args.blocks_to_swap > 0:
logger.info(f"Enable swap {args.blocks_to_swap} blocks to CPU from device: {device}")
model.enable_block_swap(args.blocks_to_swap, device, supports_backward=False)
model.move_to_device_except_swap_blocks(device)
model.prepare_block_swap_before_forward()
else:
# make sure the model is on the right device
model.to(device)
model.eval().requires_grad_(False)
clean_memory_on_device(device)
def prepare_t2v_inputs(
args: argparse.Namespace, config, accelerator: Accelerator, device: torch.device, vae: Optional[WanVAE] = None
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Tuple[dict, dict]]:
"""Prepare inputs for T2V (including Fun-Control variation)
Args:
args: command line arguments
config: model configuration
accelerator: Accelerator instance
device: device to use
vae: VAE model, required only for Fun-Control
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Tuple[dict, dict]]:
(noise, context, context_null, (arg_c, arg_null))
"""
# Prepare inputs for T2V
# calculate dimensions and sequence length
height, width = args.video_size
# T2V/FunControl length should be set by setup_args
frames = args.video_length
if frames is None:
raise ValueError("video_length must be determined before calling prepare_t2v_inputs")
(_, lat_f, lat_h, lat_w), seq_len = calculate_dimensions(args.video_size, frames, config)
target_shape = (16, lat_f, lat_h, lat_w) # Latent channel dim is 16
# configure negative prompt
n_prompt = args.negative_prompt if args.negative_prompt else config.sample_neg_prompt
# set seed
seed = args.seed # Seed should be set in generate()
if not args.cpu_noise:
seed_g = torch.Generator(device=device)
seed_g.manual_seed(seed)
else:
# ComfyUI compatible noise
seed_g = torch.manual_seed(seed)
# load text encoder
text_encoder = load_text_encoder(args, config, device)
text_encoder.model.to(device)
# encode prompt
with torch.no_grad():
if args.fp8_t5:
with torch.amp.autocast(device_type=device.type, dtype=config.t5_dtype):
context = text_encoder([args.prompt], device)
context_null = text_encoder([n_prompt], device)
else:
context = text_encoder([args.prompt], device)
context_null = text_encoder([n_prompt], device)
# free text encoder and clean memory
del text_encoder
clean_memory_on_device(device)
# Fun-Control: encode control video to latent space
y = None
if config.is_fun_control and args.control_path:
if vae is None:
raise ValueError("VAE must be provided for Fun-Control input preparation.")
logger.info(f"Encoding control video for Fun-Control")
control_video = load_control_video(args.control_path, frames, height, width).to(device)
vae.to_device(device)
with accelerator.autocast(), torch.no_grad():
y = vae.encode([control_video])[0] # Encode video
y = y * args.control_strength # Apply strength
vae.to_device("cpu" if args.vae_cache_cpu else "cpu") # Move VAE back
clean_memory_on_device(device)
logger.info(f"Fun-Control conditioning 'y' shape: {y.shape}")
# generate noise
noise = torch.randn(target_shape, dtype=torch.float32, generator=seed_g, device=device if not args.cpu_noise else "cpu")
noise = noise.to(device)
# prepare model input arguments
arg_c = {"context": context, "seq_len": seq_len}
arg_null = {"context": context_null, "seq_len": seq_len}
if y is not None: # Add 'y' only if Fun-Control generated it
arg_c["y"] = [y]
arg_null["y"] = [y]
return noise, context, context_null, (arg_c, arg_null)
def load_video_frames(video_path: str, num_frames: int, target_reso: Tuple[int, int]) -> Tuple[List[np.ndarray], torch.Tensor]:
"""Load the first N frames from a video, resize, return numpy list and normalized tensor.
Args:
video_path (str): Path to the video file.
num_frames (int): Number of frames to load from the start.
target_reso (Tuple[int, int]): Target resolution (height, width).
Returns:
Tuple[List[np.ndarray], torch.Tensor]:
- List of numpy arrays (frames) in HWC, RGB, uint8 format.
- Tensor of shape [C, F, H, W], float32, range [0, 1].
"""
logger.info(f"Loading first {num_frames} frames from {video_path}, target reso {target_reso}")
target_h, target_w = target_reso
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError(f"Failed to open video file: {video_path}")
# Get total frame count and check if enough frames exist
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if total_frames < num_frames:
cap.release()
raise ValueError(f"Video has only {total_frames} frames, but {num_frames} were requested for input.")
# Read frames
frames_np = []
for i in range(num_frames):
ret, frame = cap.read()
if not ret:
logger.warning(f"Could only read {len(frames_np)} frames out of {num_frames} requested from {video_path}.")
break
# Convert BGR to RGB
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Resize
current_h, current_w = frame_rgb.shape[:2]
interpolation = cv2.INTER_AREA if target_h * target_w < current_h * current_w else cv2.INTER_LANCZOS4
frame_resized = cv2.resize(frame_rgb, (target_w, target_h), interpolation=interpolation)
frames_np.append(frame_resized)
cap.release()
if len(frames_np) != num_frames:
raise RuntimeError(f"Failed to load the required {num_frames} frames.")
# Convert list of numpy arrays to tensor [F, H, W, C] -> [C, F, H, W], range [0, 1]
frames_tensor = torch.from_numpy(np.stack(frames_np, axis=0)).permute(0, 3, 1, 2).float() / 255.0
frames_tensor = frames_tensor.permute(1, 0, 2, 3) # [C, F, H, W]
logger.info(f"Loaded {len(frames_np)} input frames. Tensor shape: {frames_tensor.shape}")
# Return both the original numpy frames (for saving later) and the normalized tensor
return frames_np, frames_tensor
# Combined function for I2V and Extend modes
def prepare_i2v_or_extend_inputs(
args: argparse.Namespace, config, accelerator: Accelerator, device: torch.device, vae: WanVAE,
input_frames_tensor: Optional[torch.Tensor] = None # Required for Extend mode
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Tuple[dict, dict]]:
"""Prepare inputs for I2V (single image) or Extend (multiple frames)."""
if vae is None:
raise ValueError("VAE must be provided for I2V/Extend input preparation.")
is_extend_mode = input_frames_tensor is not None
is_i2v_mode = args.image_path is not None
# --- Get Dimensions and Frame Counts ---
height, width = args.video_size
frames = args.video_length # Total frames for diffusion process
if frames is None:
raise ValueError("video_length must be set before calling prepare_i2v_or_extend_inputs")
num_input_frames = 0
if is_extend_mode:
num_input_frames = args.num_input_frames
if num_input_frames >= frames:
raise ValueError(f"Number of input frames ({num_input_frames}) must be less than total video length ({frames})")
elif is_i2v_mode:
num_input_frames = 1
# --- Load Input Image(s) / Frames ---
img_tensor_for_clip = None # Representative tensor for CLIP
img_tensor_for_vae = None # Tensor containing all input frames/image for VAE
if is_extend_mode:
# Input frames tensor already provided (normalized [0,1])
img_tensor_for_vae = input_frames_tensor.to(device)
# Use first frame for CLIP
img_tensor_for_clip = img_tensor_for_vae[:, 0:1, :, :] # [C, 1, H, W]
logger.info(f"Preparing inputs for Extend mode with {num_input_frames} input frames.")
elif is_i2v_mode:
# Load single image
img = Image.open(args.image_path).convert("RGB")
img_cv2 = np.array(img)
interpolation = cv2.INTER_AREA if height < img_cv2.shape[0] else cv2.INTER_CUBIC
img_resized_np = cv2.resize(img_cv2, (width, height), interpolation=interpolation)
# Normalized [0,1], shape [C, H, W]
img_tensor_single = TF.to_tensor(img_resized_np).to(device)
# Add frame dimension -> [C, 1, H, W]
img_tensor_for_vae = img_tensor_single.unsqueeze(1)
img_tensor_for_clip = img_tensor_for_vae
logger.info("Preparing inputs for standard I2V mode.")
else:
raise ValueError("Neither extend_video nor image_path provided for I2V/Extend preparation.")
# --- Optional End Frame ---
has_end_image = args.end_image_path is not None
end_img_tensor_vae = None # Normalized [-1, 1], shape [C, 1, H, W]
if has_end_image:
end_img = Image.open(args.end_image_path).convert("RGB")
end_img_cv2 = np.array(end_img)
interpolation_end = cv2.INTER_AREA if height < end_img_cv2.shape[0] else cv2.INTER_CUBIC
end_img_resized_np = cv2.resize(end_img_cv2, (width, height), interpolation=interpolation_end)
# Normalized [0,1], shape [C, H, W] -> [C, 1, H, W]
end_img_tensor_load = TF.to_tensor(end_img_resized_np).unsqueeze(1).to(device)
end_img_tensor_vae = (end_img_tensor_load * 2.0 - 1.0) # Scale to [-1, 1] for VAE
logger.info(f"Loaded end image: {args.end_image_path}")
# --- Calculate Latent Dimensions ---
lat_f = (frames - 1) // config.vae_stride[0] + 1 # Total latent frames
lat_h = height // config.vae_stride[1]
lat_w = width // config.vae_stride[2]
# Latent frames corresponding to the input pixel frames
lat_input_f = (num_input_frames - 1) // config.vae_stride[0] + 1
max_seq_len = math.ceil((lat_f + (1 if has_end_image else 0)) * lat_h * lat_w / (config.patch_size[1] * config.patch_size[2]))
logger.info(f"Target latent shape: ({lat_f}, {lat_h}, {lat_w}), Input latent frames: {lat_input_f}, Seq len: {max_seq_len}")
# --- Set Seed ---
seed = args.seed
seed_g = torch.Generator(device=device) if not args.cpu_noise else torch.manual_seed(seed)
if not args.cpu_noise:
seed_g.manual_seed(seed)
# --- Generate Noise ---
# Noise for the *entire* processing duration (including input frame slots)
noise = torch.randn(
16, lat_f + (1 if has_end_image else 0), lat_h, lat_w,
dtype=torch.float32, generator=seed_g, device=device if not args.cpu_noise else "cpu"
).to(device)
# --- Text Encoding ---
n_prompt = args.negative_prompt if args.negative_prompt else config.sample_neg_prompt
text_encoder = load_text_encoder(args, config, device)
text_encoder.model.to(device)
with torch.no_grad():
if args.fp8_t5:
with torch.amp.autocast(device_type=device.type, dtype=config.t5_dtype):
context = text_encoder([args.prompt], device)
context_null = text_encoder([n_prompt], device)
else:
context = text_encoder([args.prompt], device)
context_null = text_encoder([n_prompt], device)
del text_encoder
clean_memory_on_device(device)
# --- CLIP Encoding ---
clip = load_clip_model(args, config, device)
clip.model.to(device)
with torch.amp.autocast(device_type=device.type, dtype=torch.float16), torch.no_grad():
# Input needs to be [-1, 1], shape [C, 1, H, W] (or maybe [C, F, H, W] if model supports?)
# Assuming visual encoder takes one frame: use the representative clip tensor
clip_input = img_tensor_for_clip.sub_(0.5).div_(0.5) # Scale [0,1] -> [-1,1]
clip_context = clip.visual([clip_input]) # Pass as list [tensor]
del clip
clean_memory_on_device(device)
# --- VAE Encoding for Conditioning Tensor 'y' ---
vae.to_device(device)
y_latent_part = torch.zeros(config.latent_channels, lat_f + (1 if has_end_image else 0), lat_h, lat_w, device=device, dtype=vae.dtype)
with accelerator.autocast(), torch.no_grad():
# Encode the input frames/image (scale [0,1] -> [-1,1])
input_frames_vae = (img_tensor_for_vae * 2.0 - 1.0).to(dtype=vae.dtype) # [-1, 1]
# Pad with zeros if needed to match VAE chunking? Assume encode handles variable length for now.
encoded_input_latents = vae.encode([input_frames_vae])[0] # [C', F_in', H', W']
actual_encoded_input_f = encoded_input_latents.shape[1]
if actual_encoded_input_f > lat_input_f:
logger.warning(f"VAE encoded {actual_encoded_input_f} frames, expected {lat_input_f}. Truncating.")
encoded_input_latents = encoded_input_latents[:, :lat_input_f, :, :]
elif actual_encoded_input_f < lat_input_f:
logger.warning(f"VAE encoded {actual_encoded_input_f} frames, expected {lat_input_f}. Padding needed for mask.")
# This case shouldn't happen if lat_input_f calculation is correct, but handle defensively
# Place encoded input latents into the full y tensor
y_latent_part[:, :actual_encoded_input_f, :, :] = encoded_input_latents
# Encode end image if present
if has_end_image and end_img_tensor_vae is not None:
encoded_end_latent = vae.encode([end_img_tensor_vae.to(dtype=vae.dtype)])[0] # [C', 1, H', W']
y_latent_part[:, -1:, :, :] = encoded_end_latent # Place at the end
# --- Create Mask ---
msk = torch.zeros(4, lat_f + (1 if has_end_image else 0), lat_h, lat_w, device=device, dtype=vae.dtype)
msk[:, :lat_input_f, :, :] = 1 # Mask the input frames
if has_end_image:
msk[:, -1:, :, :] = 1 # Mask the end frame
# --- Combine Mask and Latent Part for 'y' ---
y = torch.cat([msk, y_latent_part], dim=0) # Shape [4+C', F_total', H', W']
logger.info(f"Constructed conditioning 'y' tensor shape: {y.shape}")
# --- Fun-Control Integration (Optional, might need adjustment for Extend mode) ---
if config.is_fun_control and args.control_path:
logger.warning("Fun-Control with Extend mode is experimental. Control signal might conflict with input frames.")
control_video = load_control_video(args.control_path, frames + (1 if has_end_image else 0), height, width).to(device)
with accelerator.autocast(), torch.no_grad():
control_latent = vae.encode([control_video])[0] # Encode control video
control_latent = control_latent * args.control_strength # Apply strength
# How to combine? Replace y? Add? For now, let's assume control replaces the VAE part of y
y = torch.cat([msk, control_latent], dim=0) # Overwrite latent part with control
logger.info(f"Replaced latent part of 'y' with Fun-Control latent. New 'y' shape: {y.shape}")
vae.to_device("cpu" if args.vae_cache_cpu else "cpu") # Move VAE back
clean_memory_on_device(device)
# --- Prepare Model Input Dictionaries ---
arg_c = {
"context": [context[0]], # Needs list format? Check model forward
"clip_fea": clip_context,
"seq_len": max_seq_len,
"y": [y], # Pass conditioning tensor y
}
arg_null = {
"context": context_null,
"clip_fea": clip_context,
"seq_len": max_seq_len,
"y": [y], # Pass conditioning tensor y
}
return noise, context, context_null, y, (arg_c, arg_null)
# --- V2V Helper Functions ---
def load_video(video_path, start_frame=0, num_frames=None, bucket_reso=(256, 256)):
"""Load video frames and resize them to the target resolution for V2V.
Args:
video_path (str): Path to the video file
start_frame (int): First frame to load (0-indexed)
num_frames (int, optional): Number of frames to load. If None, load all frames from start_frame.
bucket_reso (tuple): Target resolution (height, width)
Returns:
list: List of numpy arrays containing video frames in RGB format, resized.
int: Actual number of frames loaded.
"""
logger.info(f"Loading video for V2V from {video_path}, target reso {bucket_reso}, frames {start_frame}-{start_frame+num_frames if num_frames else 'end'}")
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():