Advertisement
Guest User

Untitled

a guest
Feb 18th, 2025
36
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.40 KB | None | 0 0
  1. from unsloth import FastVisionModel # FastLanguageModel for LLMs
  2.  
  3. model, tokenizer = FastVisionModel.from_pretrained(
  4. "unsloth/Qwen2-VL-7B-Instruct-bnb-4bit",
  5. load_in_4bit = True, # Use 4bit to reduce memory use. False for 16bit LoRA.
  6. use_gradient_checkpointing = "unsloth", # True or "unsloth" for long context
  7. )
  8.  
  9. model = FastVisionModel.get_peft_model(
  10. model,
  11. finetune_vision_layers = False, # False if not finetuning vision layers
  12. finetune_language_layers = True, # False if not finetuning language layers
  13. finetune_attention_modules = True, # False if not finetuning attention layers
  14. finetune_mlp_modules = True, # False if not finetuning MLP layers
  15.  
  16. r = 16, # The larger, the higher the accuracy, but might overfit
  17. lora_alpha = 16, # Recommended alpha == r at least
  18. lora_dropout = 0,
  19. bias = "none",
  20. random_state = 3407,
  21. use_rslora = False, # We support rank stabilized LoRA
  22. loftq_config = None, # And LoftQ
  23. # target_modules = "all-linear", # Optional now! Can specify a list if needed
  24. )
  25. from unsloth import is_bf16_supported
  26. from unsloth.trainer import UnslothVisionDataCollator
  27. from trl import SFTTrainer, SFTConfig
  28.  
  29. FastVisionModel.for_training(model)
  30.  
  31. trainer = SFTTrainer(
  32. model = model,
  33. tokenizer = tokenizer,
  34. data_collator = UnslothVisionDataCollator(model, tokenizer), # Must use!
  35. train_dataset = finetune_data,
  36. args = SFTConfig(
  37. per_device_train_batch_size = 1,
  38. gradient_accumulation_steps = 3,
  39. warmup_steps = 5,
  40. #max_steps = 30,
  41. num_train_epochs = 1, # Set this instead of max_steps for full training runs
  42. learning_rate = 2e-4,
  43. fp16 = not is_bf16_supported(),
  44. bf16 = is_bf16_supported(),
  45. logging_steps = 1,
  46. optim = "adamw_8bit",
  47. weight_decay = 0.01,
  48. lr_scheduler_type = "linear",
  49. seed = 3407,
  50. output_dir = "outputs",
  51. report_to = "none", # For Weights and Biases
  52.  
  53. # You MUST put the below items for vision finetuning:
  54. remove_unused_columns = False,
  55. dataset_text_field = "",
  56. dataset_kwargs = {"skip_prepare_dataset": True},
  57. dataset_num_proc = 4,
  58. max_seq_length = 2048,
  59. ),
  60. )
  61.  
  62. trainer_stats = trainer.train()
  63.  
  64. model.save_pretrained("lora_model") # Local saving
  65. tokenizer.save_pretrained("lora_model")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement