Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- wsl -l -v
- wsl --unregister Ubuntu-22.04
- wsl --set-default-version 2
- cuda information link: https://github.com/bycloudai/SwapCudaVersionWindows
- *downoad ubuntu 22.04.2 LTS
- sudo apt update
- sudo apt upgrade
- curl -sL "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh" > "Miniconda3.sh"
- bash Miniconda3.sh
- conda create -n textgen python=3.10.9
- conda activate textgen
- pip3 install torch torchvision torchaudio
- git clone https://github.com/oobabooga/text-generation-webui
- cd text-generation-webui
- pip install -r requirements.txt
- conda activate textgen
- conda install -c conda-forge cudatoolkit-dev
- mkdir repositories
- cd repositories
- git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa
- cd GPTQ-for-LLaMa
- sudo apt install build-essential
- python setup_cuda.py install
- make shortcut
- cd ~/text-generation-webui
- conda activate textgen
- ln -s /mnt/f/OoBaboogaMarch17/text-generation-webui/models models
- **check if 4bit is working
- cd ~/text-generation-webui
- conda activate textgen
- python server.py --wbits 4 --groupsize 128 --cai-chat
- cd ~/text-generation-webui
- conda activate textgen
- python server.py --listen --load-in-8bit --cai-chat
- cd ~/text-generation-webui/repositories/GPTQ-for-LLaMa
- conda activate textgen
- python convert_llama_weights_to_hf.py --input_dir ~/text-generation-webui/models --model_size 13B --output_dir ~/text-generation-webui/models/
Advertisement
Add Comment
Please, Sign In to add comment