Spaces:
Sleeping
Sleeping
Commit
·
007fe97
1
Parent(s):
4705650
update/removed quantization
Browse files- app.py +1 -1
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -17,7 +17,7 @@ def load_model():
|
|
| 17 |
|
| 18 |
model = AutoPeftModelForCausalLM.from_pretrained(
|
| 19 |
lora_model_name, # YOUR MODEL YOU USED FOR TRAINING
|
| 20 |
-
load_in_4bit =
|
| 21 |
)
|
| 22 |
tokenizer = AutoTokenizer.from_pretrained(lora_model_name)
|
| 23 |
model.eval()
|
|
|
|
| 17 |
|
| 18 |
model = AutoPeftModelForCausalLM.from_pretrained(
|
| 19 |
lora_model_name, # YOUR MODEL YOU USED FOR TRAINING
|
| 20 |
+
load_in_4bit = False, # False
|
| 21 |
)
|
| 22 |
tokenizer = AutoTokenizer.from_pretrained(lora_model_name)
|
| 23 |
model.eval()
|
requirements.txt
CHANGED
|
@@ -3,4 +3,4 @@ gradio
|
|
| 3 |
transformers
|
| 4 |
peft
|
| 5 |
torch
|
| 6 |
-
bitsandbytes
|
|
|
|
| 3 |
transformers
|
| 4 |
peft
|
| 5 |
torch
|
| 6 |
+
# bitsandbytes
|