capitan01r

prodigy_fp32_targeted

Feb 2nd, 2026
122
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.19 KB | None | 0 0
  1. ---
  2. job: "extension"
  3. config:
  4. name: "targeted_layers"
  5. process:
  6. - type: "diffusion_trainer"
  7. training_folder: "/app/ai-toolkit/output"
  8. sqlite_db_path: "./aitk_db.db"
  9. device: "cuda"
  10. trigger_word: null
  11. performance_log_every: 10
  12. network:
  13. type: "lora"
  14. linear: 64
  15. linear_alpha: 64
  16. conv: 32
  17. conv_alpha: 32
  18. lokr_full_rank: true
  19. lokr_factor: -1
  20. network_kwargs:
  21. only_if_contains:
  22. - "layers.14.attention.to_q"
  23. - "layers.14.attention.to_k"
  24. - "layers.14.attention.to_v"
  25. - "layers.14.attention.to_out.0"
  26. - "layers.14.feed_forward.w2"
  27. - "layers.14.feed_forward.w3"
  28. - "layers.15.attention.to_q"
  29. - "layers.15.attention.to_k"
  30. - "layers.15.attention.to_v"
  31. - "layers.15.attention.to_out.0"
  32. - "layers.15.feed_forward.w2"
  33. - "layers.15.feed_forward.w3"
  34. - "layers.16.attention.to_q"
  35. - "layers.16.attention.to_k"
  36. - "layers.16.attention.to_v"
  37. - "layers.16.attention.to_out.0"
  38. - "layers.16.feed_forward.w2"
  39. - "layers.16.feed_forward.w3"
  40. - "layers.17.attention.to_q"
  41. - "layers.17.attention.to_k"
  42. - "layers.17.attention.to_v"
  43. - "layers.17.attention.to_out.0"
  44. - "layers.17.feed_forward.w2"
  45. - "layers.17.feed_forward.w3"
  46. - "layers.18.attention.to_q"
  47. - "layers.18.attention.to_k"
  48. - "layers.18.attention.to_v"
  49. - "layers.18.attention.to_out.0"
  50. - "layers.18.feed_forward.w2"
  51. - "layers.18.feed_forward.w3"
  52. - "layers.19.attention.to_q"
  53. - "layers.19.attention.to_k"
  54. - "layers.19.attention.to_v"
  55. - "layers.19.attention.to_out.0"
  56. - "layers.19.feed_forward.w2"
  57. - "layers.19.feed_forward.w3"
  58. - "layers.20.attention.to_q"
  59. - "layers.20.attention.to_k"
  60. - "layers.20.attention.to_v"
  61. - "layers.20.attention.to_out.0"
  62. - "layers.20.feed_forward.w2"
  63. - "layers.20.feed_forward.w3"
  64. - "layers.21.attention.to_q"
  65. - "layers.21.attention.to_k"
  66. - "layers.21.attention.to_v"
  67. - "layers.21.attention.to_out.0"
  68. - "layers.21.feed_forward.w2"
  69. - "layers.21.feed_forward.w3"
  70. - "layers.22.attention.to_q"
  71. - "layers.22.attention.to_k"
  72. - "layers.22.attention.to_v"
  73. - "layers.22.attention.to_out.0"
  74. - "layers.22.feed_forward.w2"
  75. - "layers.22.feed_forward.w3"
  76. - "layers.23.attention.to_q"
  77. - "layers.23.attention.to_k"
  78. - "layers.23.attention.to_v"
  79. - "layers.23.attention.to_out.0"
  80. - "layers.23.feed_forward.w2"
  81. - "layers.23.feed_forward.w3"
  82. - "layers.24.attention.to_q"
  83. - "layers.24.attention.to_k"
  84. - "layers.24.attention.to_v"
  85. - "layers.24.attention.to_out.0"
  86. - "layers.24.feed_forward.w2"
  87. - "layers.24.feed_forward.w3"
  88. - "layers.25.attention.to_q"
  89. - "layers.25.attention.to_k"
  90. - "layers.25.attention.to_v"
  91. - "layers.25.attention.to_out.0"
  92. - "layers.25.feed_forward.w2"
  93. - "layers.25.feed_forward.w3"
  94. save:
  95. dtype: "fp32"
  96. save_every: 250
  97. max_step_saves_to_keep: 12
  98. save_format: "diffusers"
  99. push_to_hub: false
  100. datasets:
  101. - folder_path: "/app/ai-toolkit/datasets/new"
  102. mask_path: null
  103. mask_min_value: 0.1
  104. default_caption: ""
  105. caption_ext: "txt"
  106. caption_dropout_rate: 0.05
  107. cache_latents_to_disk: true
  108. is_reg: false
  109. network_weight: 1
  110. resolution:
  111. - 512
  112. controls: []
  113. shrink_video_to_frames: true
  114. num_frames: 1
  115. flip_x: false
  116. flip_y: false
  117. num_repeats: 1
  118. train:
  119. batch_size: 1
  120. bypass_guidance_embedding: false
  121. steps: 5000
  122. gradient_accumulation: 1
  123. train_unet: true
  124. train_text_encoder: false
  125. gradient_checkpointing: true
  126. noise_scheduler: "flowmatch"
  127. optimizer: "prodigy"
  128. timestep_type: "sigmoid"
  129. content_or_style: "balanced"
  130. optimizer_params:
  131. weight_decay: 0.01
  132. d_coef: 2
  133. safeguard_warmup: true
  134. use_bias_correction: true
  135. betas:
  136. - 0.9
  137. - 0.99
  138. unload_text_encoder: false
  139. cache_text_embeddings: true
  140. lr: 1
  141. ema_config:
  142. use_ema: false
  143. ema_decay: 0.99
  144. skip_first_sample: true
  145. force_first_sample: false
  146. disable_sampling: false
  147. dtype: "fp32"
  148. diff_output_preservation: false
  149. diff_output_preservation_multiplier: 1
  150. diff_output_preservation_class: "person"
  151. switch_boundary_every: 1
  152. loss_type: "mse"
  153. logging:
  154. log_every: 1
  155. use_ui_logger: true
  156. model:
  157. name_or_path: "Tongyi-MAI/Z-Image-Turbo"
  158. quantize: false
  159. qtype: "qfloat8"
  160. quantize_te: false
  161. qtype_te: "qfloat8"
  162. arch: "zimage:turbo"
  163. low_vram: false
  164. model_kwargs: {}
  165. layer_offloading: false
  166. layer_offloading_text_encoder_percent: 1
  167. layer_offloading_transformer_percent: 1
  168. assistant_lora_path: "ostris/zimage_turbo_training_adapter/zimage_turbo_training_adapter_v2.safetensors"
  169. sample:
  170. sampler: "flowmatch"
  171. sample_every: 250
  172. width: 1024
  173. height: 1024
  174. samples:
  175. - prompt: "from a close range we see, a beautiful woman staring at the camera"
  176. seed: 42
  177. walk_seed: true
  178. guidance_scale: 1
  179. sample_steps: 8
  180. num_frames: 1
  181. fps: 1
  182. meta:
  183. name: "targeted_layers"
  184. version: "1.0"
  185.  
Advertisement
Add Comment
Please, Sign In to add comment