[WAN 14B] LoRA
训练配置:
dataset.toml
# 分辨率设置。
resolutions = [524]
# 长宽比分桶设置
enable_ar_bucket = true
min_ar = 0.5
max_ar = 2.0
num_ar_buckets = 7
# 帧分桶(1 用于图像)
frame_buckets = [1]
[[directory]] # 图像
# 包含图像及其对应标注文件的目录路径。
path = '/mnt/d/huanvideo/training_data/images'
num_repeats = 5
resolutions = [720]
frame_buckets = [1] # 图像使用 1 帧。
[[directory]] # 视频
# 包含视频及其对应标注文件的目录路径。
path = '/mnt/d/huanvideo/training_data/videos'
num_repeats = 5
resolutions = [256] # 将视频分辨率设置为 256(例如 244p)。
frame_buckets = [28, 30, 37, 38, 41, 42, 47, 48, 50, 52, 57]
config.toml
# 数据集配置文件。
output_dir = '/mnt/d/wan/training_output'
dataset = 'dataset.toml'
# 训练设置
epochs = 50
micro_batch_size_per_gpu = 1
pipeline_stages = 1
gradient_accumulation_steps = 4
gradient_clipping = 1.0
warmup_steps = 100
# 评估设置
eval_every_n_epochs = 5
eval_before_first_step = true
eval_micro_batch_size_per_gpu = 1
eval_gradient_accumulation_steps = 1
# 其他设置
save_every_n_epochs = 5
checkpoint_every_n_minutes = 30
activation_checkpointing = true
partition_method = 'parameters'
save_dtype = 'bfloat16'
caching_batch_size = 1
steps_per_print = 1
video_clip_mode = 'single_middle'
[model]
type = 'wan'
ckpt_path = '../Wan2.1-T2V-14B'
dtype = 'bfloat16'
# 训练 LoRA 时可使用 fp8 作为 transformer。
transformer_dtype = 'float8'
timestep_sample_method = 'logit_normal'
[adapter]
type = 'lora'
rank = 32
dtype = 'bfloat16'
[optimizer]
type = 'adamw_optimi'
lr = 5e-5
betas = [0.9, 0.99]
weight_decay = 0.02
eps = 1e-8