Advertisement
Guest User

ai-toolkit lora config

a guest
Jun 3rd, 2025
177
0
353 days
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.08 KB | None | 0 0
  1. ---
  2. job: extension
  3. config:
  4.   # this name will be the folder and filename name
  5.   name: "<CONCEPT_NAME>"
  6.   process:
  7.     - type: 'sd_trainer'
  8.       training_folder: "output"
  9.       performance_log_every: 10
  10.       device: cuda:0
  11.       network:
  12.         type: "lora"
  13.         linear: 16
  14.         linear_alpha: 8
  15.       save:
  16.         dtype: float16 # precision to save
  17.         save_every: 250 # save every this many steps
  18.         max_step_saves_to_keep: 5 # how many intermittent saves to keep
  19.         push_to_hub: false #change this to True to push your trained model to Hugging Face.
  20.       datasets:
  21.         - folder_path: "G:\\<PATH_TO_YOUR_FOLDER>"
  22.           caption_ext: "txt"
  23.           caption_dropout_rate: 0.05  # will drop out the caption 5% of time
  24.           shuffle_tokens: false  # shuffle caption order, split by commas
  25.           cache_latents_to_disk: true  # leave this true unless you know what you're doing
  26.           resolution: [ 1024 ]  # chroma enjoys multiple resolutions
  27.       train:
  28.         batch_size: 1
  29.         steps: 8000  # total number of steps to train 500 - 4000 is a good range
  30.         gradient_accumulation: 1
  31.         train_unet: true
  32.         train_text_encoder: false  # probably won't work with chroma
  33.         gradient_checkpointing: true  # need the on unless you have a ton of vram
  34.         noise_scheduler: "flowmatch" # for training only
  35.         optimizer: "adamw8bit"
  36.         lr: 4e-4
  37.         content_or_style: "balanced"
  38.         # uncomment this to skip the pre training sample
  39.         # skip_first_sample: true
  40.         # uncomment to completely disable sampling
  41.         # disable_sampling: true
  42.         # uncomment to use new vell curved weighting. Experimental but may produce better results
  43.         # linear_timesteps: true
  44.         ema_config:
  45.           use_ema: true
  46.           ema_decay: 0.99
  47.         dtype: bf16
  48.       model:
  49.         name_or_path: "G:\\<PATH_TO_CHROMA_MODEL>\\chroma-unlocked-v33.safetensors"
  50.         arch: "chroma"
  51.         quantize: true  # run 8bit mixed precision
  52.       sample:
  53.         sampler: "flowmatch" # must match train.noise_scheduler
  54.         sample_every: 100 # sample every this many steps
  55.         width: 512
  56.         height: 512
  57.         prompts:
  58.           - "<CUSTOM_PROMPT>"
  59.           - "Example Prompt: xyhr, A middle-aged h3m7n, in a room with a folding screen, a painting, and a chair." # Here I have 2 trigger words, 1 for main concept or lora and the other which is the specific detail I try to train into a word
  60.           - "Dog on a motorcycle" # Used to determine if original model gets overtrained
  61.         neg: ""  # negative prompt, optional
  62.         seed: 42
  63.         walk_seed: true
  64.         guidance_scale: 4
  65.         sample_steps: 25
  66. # you can add any additional meta info here. [name] is replaced with config name at top
  67. meta:
  68.   name: "[name]"
  69.   version: '1.0'
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement