Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- [model_arguments]
- pretrained_model_name_or_path = "C:/stable-diffusion-trainer/fluffusion_r1_e9_640x.ckpt"
- vae = "C:/stable-diffusion-trainer/stablediffusion.vae.pt"
- # Parametrization is for V2.768
- v2 = false
- v_parameterization = false
- [additional_network_arguments]
- network_train_unet_only = false # Train everything
- network_train_text_encoder_only = false # both UNet and TE
- # Text encoder = Unet / 2
- unet_lr = 1e-4
- text_encoder_lr = 5e-5
- # Module = Standart LORA
- network_module = "networks.lora"
- network_dim = 32
- network_alpha = 1 # Alpha must be = or less than dim.
- # IDK whats that, probably wants to incopropate metadata in lora.
- no_metadata = false
- [optimizer_arguments]
- learning_rate = 1e-4 # Should be equal to unet_lr!!!
- lr_scheduler = "constant"
- lr_warmup_steps = 0
- optimizer_type = "AdamW8bit"
- max_grad_norm = 1.0
- [dataset_arguments]
- cache_latents = true
- debug_dataset = false
- vae_batch_size = 4
- [training_arguments]
- max_train_epochs = 20
- clip_skip = 1
- save_every_n_epochs = 1
- #noise_offset = 0
- output_dir = "C:/stable-diffusion-trainer/Loras"
- output_name = "sks"
- logging_dir = "C:/stable-diffusion-trainer/Logs"
- log_prefix = "sks"
- gradient_checkpointing = false
- gradient_accumulation_steps = 1
- save_precision = "fp16"
- train_batch_size = 6
- max_token_length = 225
- mem_eff_attn = false
- xformers = true
- max_data_loader_n_workers = 8
- persistent_data_loader_workers = true
- mixed_precision = "fp16"
- lowram = true
- [sample_prompt_arguments]
- sample_every_n_epochs = 999999
- sample_sampler = "ddim"
- [dreambooth_arguments]
- prior_loss_weight = 1.0
- [saving_arguments]
- save_model_as = "safetensors"
Advertisement
Add Comment
Please, Sign In to add comment