Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Output path for training runs. Each training run makes a new directory in here.
- output_dir = '/workspace/output/'
- dataset = 'dataset.toml'
- # I usually set this to a really high value because I don't know how long I want to train.
- epochs = 1000
- micro_batch_size_per_gpu = 2
- pipeline_stages = 1
- gradient_accumulation_steps = 2
- gradient_clipping = 1.0
- warmup_steps = 25
- #blocks_to_swap = 30
- # Block swapping is supported for Wan, HunyuanVideo, Flux, and Chroma. This value controls the number
- # of blocks kept offloaded to RAM. Increasing it lowers VRAM use, but has a performance penalty. The
- # exactly performance penalty depends on the model and the type of training you are doing (e.g. images vs video).
- # Block swapping only works for LoRA training, and requires pipeline_stages=1.
- #blocks_to_swap = 20
- # eval settings
- eval_every_n_epochs = 1
- eval_before_first_step = true
- eval_micro_batch_size_per_gpu = 1
- eval_gradient_accumulation_steps = 1
- #disable_block_swap_for_eval = true
- # misc settings
- # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
- save_every_n_epochs = 2
- #checkpoint_every_n_epochs = 1
- checkpoint_every_n_minutes = 60
- activation_checkpointing = true
- # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
- partition_method = 'parameters'
- save_dtype = 'bfloat16'
- caching_batch_size = 1
- steps_per_print = 1
- # How to extract video clips for training from a single input video file.
- # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
- # number of frames for that bucket.
- # single_beginning: one clip starting at the beginning of the video
- # single_middle: one clip from the middle of the video (cutting off the start and end equally)
- # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
- # default is single_beginning
- video_clip_mode = 'single_beginning'
- # This is how you configure HunyuanVideo. Other models will be different. See docs/supported_models.md for
- # details on the configuration and options for each model.
- [model]
- # type = 'chroma'
- # diffusers_path = '/mnt/d/models/flux.dev'
- # transformer_path = '/mnt/d/models/chroma-unlocked-v23.safetensors'
- # dtype = 'bfloat16'
- # # You can optionally load the transformer in fp8 when training LoRAs.
- # transformer_dtype = 'float8'
- # flux_shift = true
- type = 'hidream'
- diffusers_path = '/workspace/models/hidream'
- llama3_path = '/workspace/models/llama31'
- llama3_4bit = true
- dtype = 'bfloat16'
- transformer_dtype = 'float8'
- max_llama3_sequence_length = 128
- # Can use a resolution-dependent timestep shift, like Flux. Unsure if results are better.
- #flux_shift = true
- # For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
- [adapter]
- type = 'lora'
- rank = 32
- # Dtype for the LoRA weights you are training.
- dtype = 'bfloat16'
- #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
- [optimizer]
- # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
- # Look at train.py for other options. You could also easily edit the file and add your own.
- type = 'adamw_optimi'
- lr = 1e-4
- betas = [0.9, 0.99]
- weight_decay = 0.01
- eps = 1e-8
- # Can use this optimizer for a bit less memory usage.
- # [optimizer]
- # type = 'AdamW8bitKahan'
- # lr = 2e-5
- # betas = [0.9, 0.99]
- # weight_decay = 0.01
- # stabilize = false
- # Any optimizer not explicitly supported will be dynamically loaded from the pytorch-optimizer library.
- # [optimizer]
- # type = 'Prodigy'
- # lr = 1
- # betas = [0.9, 0.99]
- # weight_decay = 0.01
Advertisement
Add Comment
Please, Sign In to add comment