Clone the LLAMA2- repository to your local machine:
git clone https://github.com/Hasnainbold/LLAMA2-.git
create virtual enviroment
conada create -n cpullama python=3.8 -y
conda activate python
pip install -r requirements.txt
python app.py
###Download the quantize model from the link provided in model folder & keep the model in the model directory:
llama-2-7b-chat.ggmlv3.q4_0.bin
##from the flowwing link
https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main