Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- powershell -ExecutionPolicy ByPass -NoExit -Command "& 'C:\Users\myself\miniconda3\shell\condabin\conda-hook.ps1' ; conda activate 'C:\Users\myself\miniconda3'
- miniconda link: https://docs.conda.io/en/latest/miniconda.html
- cuda information link: https://github.com/bycloudai/SwapCudaVersionWindows
- 8bit modification link: https://www.reddit.com/r/LocalLLaMA/comments/11o6o3f/how_to_install_llama_8bit_and_4bit/
- conda create -n textgen python=3.10.9
- conda activate textgen
- conda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia
- conda install -c conda-forge cudatoolkit=11.7
- conda install -c conda-forge ninja
- conda install -c conda-forge accelerate
- conda install -c conda-forge sentencepiece
- pip install git+https://github.com/huggingface/transformers.git
- pip install git+https://github.com/huggingface/peft.git
- cd F:\OoBaboogaMarch17\
- git clone https://github.com/oobabooga/text-generation-webui
- cd text-generation-webui
- pip install -r requirements.txt
- ******************************** Testing model to make sure things are working
- cd F:\OoBaboogaMarch17\text-generation-webui
- conda activate textgen
- python .\server.py --auto-devices --cai-chat
- ******************************** Testing model to make sure things are working, things are good!
- Now do 8bit modifications
- ******************************** Testing model to make sure things are working in 8bit
- cd F:\OoBaboogaMarch17\text-generation-webui
- conda activate textgen
- python .\server.py --auto-devices --load-in-8bit --cai-chat
- ******************************** Testing model to make sure things are working, things are good!
- cd F:\OoBaboogaMarch17\text-generation-webui
- conda activate textgen
- mkdir repositories
- cd repositories
- git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa
- cd GPTQ-for-LLaMa
- python setup_cuda.py install
- ******************************** Convert Weights of original LLaMA Model *Make sure to move tokenizer files too!!
- cd F:\OoBaboogaMarch17\text-generation-webui\repositories\GPTQ-for-LLaMa
- conda activate textgen
- python convert_llama_weights_to_hf.py --input_dir F:\OoBaboogaMarch17\text-generation-webui\models --model_size 13B --output_dir F:\OoBaboogaMarch17\text-generation-webui\models\llama-13b
- *example formating*
- python convert_llama_weights_to_hf.py --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir ./llama-hf
- ******************************** Convert Weights of original LLaMA Model
- ******************************** Testing model to make sure things are working in 8bit
- cd F:\OoBaboogaMarch17\text-generation-webui
- conda activate textgen
- python .\server.py --auto-devices --load-in-8bit --cai-chat
- ******************************** Testing model to make sure things are working, things are good!
- cd F:\OoBaboogaMarch17\text-generation-webui
- conda activate textgen
- conda install datasets -c conda-forge
- ******************************** CREATE 4-BIT Addon Model
- **ATTENTION ATTENTION PAY ATTENTION TO THE DIRECTION OF THE SLASHES WHEN TELLIGN THIS CODE THE DIRECTORY THE ARE / NOT \
- cd F:\OoBaboogaMarch17\text-generation-webui\repositories\GPTQ-for-LLaMa
- conda activate textgen
- python llama.py F:/OoBaboogaMarch17/text-generation-webui/models/llama-13b c4 --wbits 4 --groupsize 128 --save llama-13b-4bit.pt
- ******************************** Convert Weights of original LLaMA Model
- ******************************** Testing model to make sure things are working in 4 bit
- cd F:\OoBaboogaMarch17\text-generation-webui
- conda activate textgen
- python server.py --wbits 4 --groupsize 128 --cai-chat
- ******************************** Testing model to make sure things are working , things are good!
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement