Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- job: "extension"
- config:
- name: "targeted_layers_bf16"
- process:
- - type: "diffusion_trainer"
- training_folder: "/app/ai-toolkit/output"
- sqlite_db_path: "./aitk_db.db"
- device: "cuda"
- trigger_word: null
- performance_log_every: 10
- network:
- type: "lora"
- linear: 64
- linear_alpha: 64
- conv: 32
- conv_alpha: 32
- lokr_full_rank: true
- lokr_factor: -1
- network_kwargs:
- only_if_contains:
- - "layers.14.attention.to_q"
- - "layers.14.attention.to_k"
- - "layers.14.attention.to_v"
- - "layers.14.attention.to_out.0"
- - "layers.14.feed_forward.w2"
- - "layers.14.feed_forward.w3"
- - "layers.15.attention.to_q"
- - "layers.15.attention.to_k"
- - "layers.15.attention.to_v"
- - "layers.15.attention.to_out.0"
- - "layers.15.feed_forward.w2"
- - "layers.15.feed_forward.w3"
- - "layers.16.attention.to_q"
- - "layers.16.attention.to_k"
- - "layers.16.attention.to_v"
- - "layers.16.attention.to_out.0"
- - "layers.16.feed_forward.w2"
- - "layers.16.feed_forward.w3"
- - "layers.17.attention.to_q"
- - "layers.17.attention.to_k"
- - "layers.17.attention.to_v"
- - "layers.17.attention.to_out.0"
- - "layers.17.feed_forward.w2"
- - "layers.17.feed_forward.w3"
- - "layers.18.attention.to_q"
- - "layers.18.attention.to_k"
- - "layers.18.attention.to_v"
- - "layers.18.attention.to_out.0"
- - "layers.18.feed_forward.w2"
- - "layers.18.feed_forward.w3"
- - "layers.19.attention.to_q"
- - "layers.19.attention.to_k"
- - "layers.19.attention.to_v"
- - "layers.19.attention.to_out.0"
- - "layers.19.feed_forward.w2"
- - "layers.19.feed_forward.w3"
- - "layers.20.attention.to_q"
- - "layers.20.attention.to_k"
- - "layers.20.attention.to_v"
- - "layers.20.attention.to_out.0"
- - "layers.20.feed_forward.w2"
- - "layers.20.feed_forward.w3"
- - "layers.21.attention.to_q"
- - "layers.21.attention.to_k"
- - "layers.21.attention.to_v"
- - "layers.21.attention.to_out.0"
- - "layers.21.feed_forward.w2"
- - "layers.21.feed_forward.w3"
- - "layers.22.attention.to_q"
- - "layers.22.attention.to_k"
- - "layers.22.attention.to_v"
- - "layers.22.attention.to_out.0"
- - "layers.22.feed_forward.w2"
- - "layers.22.feed_forward.w3"
- - "layers.23.attention.to_q"
- - "layers.23.attention.to_k"
- - "layers.23.attention.to_v"
- - "layers.23.attention.to_out.0"
- - "layers.23.feed_forward.w2"
- - "layers.23.feed_forward.w3"
- - "layers.24.attention.to_q"
- - "layers.24.attention.to_k"
- - "layers.24.attention.to_v"
- - "layers.24.attention.to_out.0"
- - "layers.24.feed_forward.w2"
- - "layers.24.feed_forward.w3"
- - "layers.25.attention.to_q"
- - "layers.25.attention.to_k"
- - "layers.25.attention.to_v"
- - "layers.25.attention.to_out.0"
- - "layers.25.feed_forward.w2"
- - "layers.25.feed_forward.w3"
- save:
- dtype: "bf16"
- save_every: 250
- max_step_saves_to_keep: 12
- save_format: "diffusers"
- push_to_hub: false
- datasets:
- - folder_path: "/app/ai-toolkit/datasets/new"
- mask_path: null
- mask_min_value: 0.1
- default_caption: ""
- caption_ext: "txt"
- caption_dropout_rate: 0.05
- cache_latents_to_disk: true
- is_reg: false
- network_weight: 1
- resolution:
- - 512
- controls: []
- shrink_video_to_frames: true
- num_frames: 1
- flip_x: false
- flip_y: false
- num_repeats: 1
- train:
- batch_size: 1
- bypass_guidance_embedding: false
- steps: 5000
- gradient_accumulation: 1
- train_unet: true
- train_text_encoder: false
- gradient_checkpointing: true
- noise_scheduler: "flowmatch"
- optimizer: "prodigy"
- timestep_type: "sigmoid"
- content_or_style: "balanced"
- optimizer_params:
- weight_decay: 0.01
- d_coef: 2
- safeguard_warmup: true
- use_bias_correction: true
- betas:
- - 0.9
- - 0.99
- unload_text_encoder: false
- cache_text_embeddings: true
- lr: 1
- ema_config:
- use_ema: false
- ema_decay: 0.99
- skip_first_sample: true
- force_first_sample: false
- disable_sampling: false
- dtype: "bf16"
- diff_output_preservation: false
- diff_output_preservation_multiplier: 1
- diff_output_preservation_class: "person"
- switch_boundary_every: 1
- loss_type: "mse"
- logging:
- log_every: 1
- use_ui_logger: true
- model:
- name_or_path: "Tongyi-MAI/Z-Image-Turbo"
- quantize: false
- qtype: "qfloat8"
- quantize_te: false
- qtype_te: "qfloat8"
- arch: "zimage:turbo"
- low_vram: false
- model_kwargs: {}
- layer_offloading: false
- layer_offloading_text_encoder_percent: 1
- layer_offloading_transformer_percent: 1
- assistant_lora_path: "ostris/zimage_turbo_training_adapter/zimage_turbo_training_adapter_v2.safetensors"
- sample:
- sampler: "flowmatch"
- sample_every: 250
- width: 1024
- height: 1024
- samples:
- - prompt: "from a close range we see, a beautiful woman staring at the camera"
- seed: 42
- walk_seed: true
- guidance_scale: 1
- sample_steps: 8
- num_frames: 1
- fps: 1
- meta:
- name: "targeted_layers_bf16"
- version: "1.0"
Advertisement
Add Comment
Please, Sign In to add comment