Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ---
- job: extension
- config:
- # this name will be the folder and filename name
- name: "<CONCEPT_NAME>"
- process:
- - type: 'sd_trainer'
- training_folder: "output"
- performance_log_every: 10
- device: cuda:0
- network:
- type: "lora"
- linear: 16
- linear_alpha: 8
- save:
- dtype: float16 # precision to save
- save_every: 250 # save every this many steps
- max_step_saves_to_keep: 5 # how many intermittent saves to keep
- push_to_hub: false #change this to True to push your trained model to Hugging Face.
- datasets:
- - folder_path: "G:\\<PATH_TO_YOUR_FOLDER>"
- caption_ext: "txt"
- caption_dropout_rate: 0.05 # will drop out the caption 5% of time
- shuffle_tokens: false # shuffle caption order, split by commas
- cache_latents_to_disk: true # leave this true unless you know what you're doing
- resolution: [ 1024 ] # chroma enjoys multiple resolutions
- train:
- batch_size: 1
- steps: 8000 # total number of steps to train 500 - 4000 is a good range
- gradient_accumulation: 1
- train_unet: true
- train_text_encoder: false # probably won't work with chroma
- gradient_checkpointing: true # need the on unless you have a ton of vram
- noise_scheduler: "flowmatch" # for training only
- optimizer: "adamw8bit"
- lr: 4e-4
- content_or_style: "balanced"
- # uncomment this to skip the pre training sample
- # skip_first_sample: true
- # uncomment to completely disable sampling
- # disable_sampling: true
- # uncomment to use new vell curved weighting. Experimental but may produce better results
- # linear_timesteps: true
- ema_config:
- use_ema: true
- ema_decay: 0.99
- dtype: bf16
- model:
- name_or_path: "G:\\<PATH_TO_CHROMA_MODEL>\\chroma-unlocked-v33.safetensors"
- arch: "chroma"
- quantize: true # run 8bit mixed precision
- sample:
- sampler: "flowmatch" # must match train.noise_scheduler
- sample_every: 100 # sample every this many steps
- width: 512
- height: 512
- prompts:
- - "<CUSTOM_PROMPT>"
- - "Example Prompt: xyhr, A middle-aged h3m7n, in a room with a folding screen, a painting, and a chair." # Here I have 2 trigger words, 1 for main concept or lora and the other which is the specific detail I try to train into a word
- - "Dog on a motorcycle" # Used to determine if original model gets overtrained
- neg: "" # negative prompt, optional
- seed: 42
- walk_seed: true
- guidance_scale: 4
- sample_steps: 25
- # you can add any additional meta info here. [name] is replaced with config name at top
- meta:
- name: "[name]"
- version: '1.0'
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement