139枚の画像で学習、2回繰り返し、50エポック実行。各エポック完了時にモデルを保存しサンプリング。50エポック中の22エポック目。
[[subsets]]
num_repeats = 2
keep_tokens = 1
caption_extension = ".txt"
shuffle_caption = true
flip_aug = false
is_reg = false
image_dir = "E:/Projects/vpl_lora/v7/dataset"
[noise_args]
[logging_args]
[general_args.args]
pretrained_model_name_or_path = "E:/Projects/v1-5-pruned.safetensors"
mixed_precision = "fp16"
seed = 23
clip_skip = 1
xformers = true
max_data_loader_n_workers = 1
persistent_data_loader_workers = true
max_token_length = 225
prior_loss_weight = 1.0
cache_latents = true
max_train_epochs = 50
[general_args.dataset_args]
resolution = 512
batch_size = 2
[network_args.args]
network_dim = 16
network_alpha = 9.0
[optimizer_args.args]
optimizer_type = "AdamW8bit"
lr_scheduler = "cosine"
learning_rate = 0.0001
[saving_args.args]
output_dir = "E:/Projects/vpl_lora/v7/build"
save_precision = "fp16"
save_model_as = "safetensors"
output_name = "hardvpl"
save_every_n_epochs = 1
[bucket_args.dataset_args]
enable_bucket = true
min_bucket_reso = 256
max_bucket_reso = 1024
bucket_reso_steps = 64
[sample_args.args]
sample_prompts = "E:/Projects/vpl_lora/v7/prompt.txt"
sample_sampler = "ddim"
sample_every_n_epochs = 1
[optimizer_args.args.optimizer_args]
weight_decay = "0.1"
betas = "0.9,0.99"