CCCP poster style
下载文件
关于此版本
{
"__version": 6,
"training_method": "LORA",
"model_type": "STABLE_DIFFUSION_XL_10_BASE",
"debug_mode": false,
"debug_dir": "debug",
"workspace_dir": "G:/OneTrainer/outputs",
"cache_dir": "workspace-cache/run",
"tensorboard": true,
"tensorboard_expose": false,
"tensorboard_port": 6007,
"validation": false,
"validate_after": 1,
"validate_after_unit": "EPOCH",
"continue_last_backup": false,
"include_train_config": "SETTINGS",
"base_model_name": "G:/stable-diffusion-webui-reForge/models/Stable-diffusion/illustrious/noobaiXLNAIXL_vPred10Version.safetensors",
"weight_dtype": "FLOAT_16",
"output_dtype": "BFLOAT_16",
"output_model_format": "SAFETENSORS",
"output_model_destination": "G:/OneTrainer/outputs/CCCPposter-NOOB-VPRED1.0-V1-128-drop0.1.safetensors",
"gradient_checkpointing": "ON",
"enable_async_offloading": true,
"enable_activation_offloading": true,
"layer_offload_fraction": 0.0,
"force_circular_padding": false,
"concept_file_name": "training_concepts/concepts.json",
"concepts": [
{
"__version": 1,
"image": {
"__version": 0,
"enable_crop_jitter": true,
"enable_random_flip": true,
"enable_fixed_flip": false,
"enable_random_rotate": false,
"enable_fixed_rotate": false,
"random_rotate_max_angle": 0.0,
"enable_random_brightness": false,
"enable_fixed_brightness": false,
"random_brightness_max_strength": 0.0,
"enable_random_contrast": false,
"enable_fixed_contrast": false,
"random_contrast_max_strength": 0.0,
"enable_random_saturation": false,
"enable_fixed_saturation": false,
"random_saturation_max_strength": 0.0,
"enable_random_hue": false,
"enable_fixed_hue": false,
"random_hue_max_strength": 0.0,
"enable_resolution_override": false,
"resolution_override": "512",
"enable_random_circular_mask_shrink": false,
"enable_random_mask_rotate_crop": false
},
"text": {
"__version": 0,
"prompt_source": "sample",
"prompt_path": "",
"enable_tag_shuffling": true,
"tag_delimiter": ",",
"keep_tags_count": 1,
"tag_dropout_enable": false,
"tag_dropout_mode": "FULL",
"tag_dropout_probability": 0.0,
"tag_dropout_special_tags_mode": "NONE",
"tag_dropout_special_tags": "",
"tag_dropout_special_tags_regex": false,
"caps_randomize_enable": false,
"caps_randomize_mode": "capslock, title, first, random",
"caps_randomize_probability": 0.0,
"caps_randomize_lowercase": false
},
"name": "",
"path": "G:/AI/sovposter_V2/10_s-m",
"seed": -166583343,
"enabled": true,
"validation_concept": false,
"include_subdirectories": false,
"image_variations": 1,
"text_variations": 1,
"balancing": 10.0,
"balancing_strategy": "REPEATS",
"loss_weight": 1.0
},
{
"__version": 1,
"image": {
"__version": 0,
"enable_crop_jitter": true,
"enable_random_flip": true,
"enable_fixed_flip": false,
"enable_random_rotate": false,
"enable_fixed_rotate": false,
"random_rotate_max_angle": 0.0,
"enable_random_brightness": false,
"enable_fixed_brightness": false,
"random_brightness_max_strength": 0.0,
"enable_random_contrast": false,
"enable_fixed_contrast": false,
"random_contrast_max_strength": 0.0,
"enable_random_saturation": false,
"enable_fixed_saturation": false,
"random_saturation_max_strength": 0.0,
"enable_random_hue": false,
"enable_fixed_hue": false,
"random_hue_max_strength": 0.0,
"enable_resolution_override": false,
"resolution_override": "512",
"enable_random_circular_mask_shrink": false,
"enable_random_mask_rotate_crop": false
},
"text": {
"__version": 0,
"prompt_source": "sample",
"prompt_path": "",
"enable_tag_shuffling": true,
"tag_delimiter": ",",
"keep_tags_count": 1,
"tag_dropout_enable": false,
"tag_dropout_mode": "FULL",
"tag_dropout_probability": 0.0,
"tag_dropout_special_tags_mode": "NONE",
"tag_dropout_special_tags": "",
"tag_dropout_special_tags_regex": false,
"caps_randomize_enable": false,
"caps_randomize_mode": "capslock, title, first, random",
"caps_randomize_probability": 0.0,
"caps_randomize_lowercase": false
},
"name": "",
"path": "G:/AI/sovposter_V2/10_s-o",
"seed": 586590013,
"enabled": true,
"validation_concept": false,
"include_subdirectories": false,
"image_variations": 1,
"text_variations": 1,
"balancing": 10.0,
"balancing_strategy": "REPEATS",
"loss_weight": 1.0
}
],
"aspect_ratio_bucketing": true,
"latent_caching": true,
"clear_cache_before_training": true,
"learning_rate_scheduler": "COSINE",
"custom_learning_rate_scheduler": null,
"scheduler_params": [],
"learning_rate": 1.0,
"learning_rate_warmup_steps": 0.0,
"learning_rate_cycles": 1.0,
"learning_rate_min_factor": 0.0,
"epochs": 10,
"batch_size": 2,
"gradient_accumulation_steps": 1,
"ema": "OFF",
"ema_decay": 0.999,
"ema_update_step_interval": 5,
"dataloader_threads": 2,
"train_device": "cuda",
"temp_device": "cpu",
"train_dtype": "BFLOAT_16",
"fallback_train_dtype": "BFLOAT_16",
"enable_autocast_cache": true,
"only_cache": false,
"resolution": "1024",
"frames": "25",
"mse_strength": 1.0,
"mae_strength": 0.0,
"log_cosh_strength": 0.0,
"vb_loss_strength": 1.0,
"loss_weight_fn": "CONSTANT",
"loss_weight_strength": 5.0,
"dropout_probability": 0.1,
"loss_scaler": "NONE",
"learning_rate_scaler": "NONE",
"clip_grad_norm": 1.0,
"offset_noise_weight": 0.0,
"perturbation_noise_weight": 0.0,
"rescale_noise_scheduler_to_zero_terminal_snr": true,
"force_v_prediction": false,
"force_epsilon_prediction": false,
"min_noising_strength": 0.0,
"max_noising_strength": 1.0,
"timestep_distribution": "LOGIT_NORMAL",
"noising_weight": 0.2,
"noising_bias": 0.0,
"timestep_shift": 1.0,
"dynamic_timestep_shifting": false,
"unet": {
"__version": 0,
"model_name": "",
"include": true,
"train": true,
"stop_training_after": 0,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"prior": {
"__version": 0,
"model_name": "",
"include": true,
"train": true,
"stop_training_after": 0,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"text_encoder": {
"__version": 0,
"model_name": "",
"include": true,
"train": false,
"stop_training_after": 20,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"text_encoder_layer_skip": 0,
"text_encoder_2": {
"__version": 0,
"model_name": "",
"include": true,
"train": false,
"stop_training_after": 20,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"text_encoder_2_layer_skip": 0,
"text_encoder_3": {
"__version": 0,
"model_name": "",
"include": true,
"train": true,
"stop_training_after": 30,
"stop_training_after_unit": "EPOCH",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"text_encoder_3_layer_skip": 0,
"vae": {
"__version": 0,
"model_name": "",
"include": true,
"train": true,
"stop_training_after": null,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "FLOAT_32",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"effnet_encoder": {
"__version": 0,
"model_name": "",
"include": true,
"train": true,
"stop_training_after": null,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"decoder": {
"__version": 0,
"model_name": "",
"include": true,
"train": true,
"stop_training_after": null,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"decoder_text_encoder": {
"__version": 0,
"model_name": "",
"include": true,
"train": true,
"stop_training_after": null,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"decoder_vqgan": {
"__version": 0,
"model_name": "",
"include": true,
"train": true,
"stop_training_after": null,
"stop_training_after_unit": "NEVER",
"learning_rate": null,
"weight_dtype": "NONE",
"dropout_probability": 0.0,
"train_embedding": true,
"attention_mask": false,
"guidance_scale": 1.0
},
"masked_training": false,
"unmasked_probability": 0.1,
"unmasked_weight": 0.1,
"normalize_masked_area_loss": false,
"embedding_learning_rate": null,
"preserve_embedding_norm": false,
"embedding": {
"__version": 0,
"uuid": "491c91cb-4f84-4282-b0e2-cdc342cf8ea2",
"model_name": "",
"placeholder": "
模型描述
关于NOOB V-PRED 2.0
基于NOOB V-PRED V1.0使用OneTrainer进行训练。
筛选训练集并增加训练次数,以获得更优的色彩与效果表现。
已移除style-abstract触发词,因为NOOB对这类艺术风格的理解较差。
具体训练细节请参阅右侧版本相关信息。
About NOOB V-PRED 2.0
Train using OneTrainer based on NOOB V-PRED V1.0.
Filter the training set and increase the number of training repeats to achieve better colors and effects.
The triggerword s-o (style-abstract) has been removed because NOOB has a poor understanding of this type of art style.
Please refer to the relevant version on the right for specific training details.
关于NOOB V-PRED V1.0
基于NOOB V-PRED V1.0进行训练。
减少训练数据以获得更佳效果,目前在单色风格(monochrome)表现更优。
细化触发词以生成不同艺术风格:s-a 表示 style-abstract,s-o 表示 style-oil painting,s-m 表示 style-monochrome。
具体训练细节请参阅右侧版本相关信息。
About NOOB V-PRED V1.0
Trained based on NOOB V-PRED V1.0.
I reduced the training data for better effect. Now it performs better in monochrome.
I divided the trigger words to generate different types of art styles, where s-a means style-abstract, s-o means style-oil painting, and s-m means style-monochrome.
It is recommended to use NOOB V-PRED 0.5 as the generative model for better aesthetic performance.
Please refer to the about version on the right for specific training details.
关于NOOB E-PRED V1.0
基于NOOB E-PRED V1.0进行训练。
重新收集训练数据,并细化触发词以生成不同艺术风格:s-a 表示 style-abstract,s-o 表示 style-oil painting,s-m 表示 style-monochrome。
建议使用NOOB V-PRED 0.5作为生成模型,以获得更优的美学表现。
具体训练细节请参阅右侧版本相关信息。
About NOOB E-PRED V1.0
Trained based on NOOB E-PRED V1.0.
I recollected the training data and divide the trigger words to generate different types of art styles, where s-a means style-abstract, s-o means style-oil painting, and s-m means style-monochrome.
It is recommended to use NOOB V-PRED 0.5 as the generative model for better aesthetic performance.
Please refer to the about version on the right for specific training details.
关于PonyXL-experimental
尝试以PonyXL作为底模进行训练,结果勉强接近3.0版本,但图像细节缺乏足够质感,目前仍在探索更优版本。训练参数详见版本相关。
About the PonyXL experimental
Attempting to train with PonyXL as the base model resulted in a result that was barely close to 3.0, but lacked a lot of texture in image details. We are still exploring a better version. See about the version for training parameters.
关于3.0
在3.0版本中,使用了多分辨率噪声技术(金字塔噪声),未启用正则化,所有图像均参与训练。
光影效果明显增强,对规则物体的描绘也更为敏感。其缺点是似乎更偏好绘制建筑,因此在绘制人物时建议提高1girl或1male的权重。
训练参数如下:
multires_noise_iterations="6"
multires_noise_discount=0.3
Introduction
这是一个用于绘制苏联海报风格的LyCORIS。权重较低时偏向模型自身风格,权重较高时则更贴近海报风格。
目前仅在二次元模型上测试,主要测试模型为AOM3A和viewerMix,效果较好。若用于偏真实的模型,可能更能还原原版海报风格。
Training parameters
基于NAI Final作为底模进行训练,使用36张图片作为训练集,35张图片作为回归训练集,训练前进行了镜像增强;每张图片重复训练10次,共训练10个epoch,最终步数约为7000步,主要设置参数如下:
network_dim=32
network_alpha=32
keep_tokens=4
conv_dim=4
conv_alpha=4
lr="1e-4"
unet_lr="1e-4"
text_encoder_lr="1e-5"
batch_size = 2








