Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from transformers import AutoTokenizer, AutoModelForCausalLM
- import torch
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it", token="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")
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it",
- token="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",
- torch_dtype=torch.bfloat16)
- tokenizer.save_pretrained('gemma_2d_it')
- model.save_pretrained('gemma_2d_it')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement