Advertisement
Guest User

use_me_prodigy.ps1

a guest
Sep 27th, 2023
72
1
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.49 KB | None | 1 0
  1. # https://github.com/kohya-ss/sd-scripts
  2.  
  3. # Based on the powershell script in Raven's LoRA Training Rentry: https://rentry.org/59xed3
  4. # Last edited 2023-08-05 (feffy)
  5. # - Change default optimizer to prodigy: run `pip install prodigyopt` in your venv first
  6. # - Reduced network_dim to 32 because 128 is overkill
  7. # - Add `--scale_weight_norms=1.0` to default flags
  8. # - (experimental) vpred support: enables v_parameterization and zero_terminal_snr
  9. # - (fix) Round bucket resolutions to multiples of 64, using ceiling on max to avoid biasing resolutions downward
  10.  
  11. # Don't be out of date! Ensure that you are using newer versions when possible.
  12. # Ask me for updated versions: ArgentFrequencies#9944
  13. # Alternatively, go to Furry Diffusion, look for the LoRA training thread: https://discord.gg/furrydiffusion
  14.  
  15. # Directories Config
  16. $image_dir = ".\.image-dir\"; # Training images folder
  17. $output = ".\.output\"; # Output folder for your baked LORAs.
  18. $reg_dir = ".\.reg-dir\"; # Regularisation images folder
  19. $model_dir = "C:\stable-diffusion-webui\models\Stable-diffusion"; # Path to your models folder
  20.  
  21. $model = "v1-5-pruned-emaonly.safetensors"; # Filename of the base model you wish to train on.
  22. $clip_skip = 1 # Set this to the clip skip that the base model was trained on. 1 for FluffyRock/Base SD, 2 for Fluffusion/NAI.
  23.  
  24. $prompts = ""; # Direct to a text file containing your prompts. If you don't want preview images, leave this blank.
  25.  
  26. # Training Config
  27. $lora_name = "MyLora" # Name of LoRA
  28. $version = "v1.0" # Version number (Completely optional, but recommended)
  29.  
  30. # Basic Settings:
  31. $real_steps = 2000 # Total number of images processed. Actual step count will be lower when effective batch size > 1
  32. $save_amount = 10 # How many LoRA checkpoints to save (e.g., 2000 steps / 10 saves == 1 save every 200 steps, 10 saves in total)
  33. $base_res = 640 # The "base resolution" to train at.
  34. $max_aspect = 1.5 # Determines the most extreme allowed aspect ratio for bucketing.
  35. $batch_size = 1 # Amount of images to process per step. Speeds things up, but demands VRAM.
  36.  
  37. # Advanced Settings:
  38. $unet_lr = 1.0 # Unet learning rate.
  39. $text_enc_lr = 1.0 # Text encoder learning rate.
  40. $grad_acc_step = 1 # Accumulates gradient over multiple steps to simulate higher batch size.
  41. $net_dim = 32 # Network dimensions.
  42. $net_alpha = 1 # Network alpha. Leave at 1 when using Prodigy
  43. $optimizer = "Prodigy" # Valid values: "AdamW", "AdamW8bit", "Lion", "SGDNesterov", "SDGNesterov8bit", "DAdaptation", "AdaFactor", "Prodigy"
  44. $scheduler = "cosine" # Valid values: "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup", "adafactor"
  45. $noise_offset = 0.0 # Ugly hack to increases dynamic range of outputs. Every 0.1 dampens learning quite a bit, do more steps or higher training rates to compensate. Prefer `--zero_terminal_snr` instead
  46. $keep_tags = 1 # Keeps <n> tags at the front without shuffling them. 0 if no regularization, 1 with regularization, multi concepts may need > 1
  47. $enable_vpred = $false # Train with v_parameterization
  48.  
  49. # ==================================================================================
  50. # BEYOND THIS POINT IS STUFF YOU SHOULD NOT TOUCH UNLESS YOU KNOW WHAT YOU'RE DOING!
  51. # ==================================================================================
  52. $eff_batch_size = $batch_size * $grad_acc_step
  53.  
  54. # Scale learn rates by batch size if not using Prodigy
  55. if ($optimizer -ine "prodigy") {
  56. $unet_lr = $unet_lr * $eff_batch_size # Learning rates multiplied by batch size
  57. $text_enc_lr = $text_enc_lr * $eff_batch_size
  58. }
  59.  
  60. $real_steps = [int]($real_steps / $eff_batch_size) # Step count is divided by batch size.
  61. $save_nth_step = [int]($real_steps / $save_amount) # Save count is divided by current real steps.
  62.  
  63. # Additional lines to automatically generated named folders:
  64. $full_name = $lora_name + "_" + $version
  65. $unique_output = $output + $full_name
  66.  
  67. # Bucketing res calculation [1]
  68. if ($max_aspect -lt 1) {
  69. $max_aspect = 1/$max_aspect # Flip aspect ratio if it's less than 1
  70. }
  71. $max_bucket_res = [int]([Math]::Ceiling([Math]::Sqrt(($base_res * $base_res * $max_aspect)) / 64) * 64)
  72. $min_bucket_res = [int]([Math]::Floor([Math]::Sqrt(($base_res * $base_res / $max_aspect)) / 64) * 64)
  73.  
  74. $vpred = ""
  75. if ($enable_vpred) {
  76. $vpred = "--v_parameterization --zero_terminal_snr"
  77. }
  78.  
  79. .\venv\scripts\activate
  80.  
  81. accelerate launch --num_cpu_threads_per_process 8 train_network.py `
  82. --logging_dir="logs" --log_prefix="$lora_name" `
  83. --network_module="networks.lora" `
  84. --max_data_loader_n_workers=1 --persistent_data_loader_workers `
  85. --caption_extension=".txt" --shuffle_caption --keep_tokens="$keep_tags" --max_token_length=225 `
  86. --prior_loss_weight=1 `
  87. --mixed_precision="fp16" --save_precision="fp16" `
  88. --xformers --cache_latents `
  89. --save_model_as=safetensors `
  90. --train_data_dir="$image_dir" --output_dir="$unique_output" --reg_data_dir="$reg_dir" --pretrained_model_name_or_path="$model_dir\$model" `
  91. --output_name="$full_name"_ `
  92. --learning_rate="$unet_lr" --text_encoder_lr="$text_enc_lr" `
  93. --max_train_steps="$real_steps" --save_every_n_steps="$save_nth_step" `
  94. --resolution="$base_res" `
  95. --enable_bucket --min_bucket_reso="$min_bucket_res" --max_bucket_reso="$max_bucket_res" `
  96. --train_batch_size="$batch_size" `
  97. --network_dim="$net_dim" --network_alpha="$net_alpha" `
  98. --optimizer_type="$optimizer" `
  99. --lr_scheduler="$scheduler" `
  100. --noise_offset="$noise_offset" `
  101. --seed=0 `
  102. --clip_skip="$clip_skip" `
  103. --sample_every_n_steps="$save_nth_step" `
  104. --sample_prompts="$prompts" `
  105. --sample_sampler="k_euler_a" `
  106. --gradient_accumulation_steps="$grad_acc_step" `
  107. --min_snr_gamma=5 `
  108. --scale_weight_norms=1.0 `
  109. $vpred
  110.  
  111. pause
  112.  
  113. # If you are using outdated torch, run this in a fresh powershell window (do not copy <##>):
  114.  
  115. <#
  116. cd sd-scripts
  117. .\venv\Scripts\activate
  118. pip install torch==2.0.1+cu118 torchvision --extra-index-url https://download.pytorch.org/whl/cu118 xformers==0.0.20
  119.  
  120.  
  121. #>
  122.  
  123. # If you are unsure of your torch version, run this in a fresh powershell window:
  124.  
  125. <#
  126. cd sd-scripts
  127. pip show torch
  128. #>
  129.  
  130. # Sources:
  131. # [1] == https://math.stackexchange.com/questions/2133509/how-do-i-calculate-the-length-and-width-for-a-known-area-and-ratio
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement