Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -19,7 +19,7 @@ print("Using device:", device)
|
|
| 19 |
# -----------------------------
|
| 20 |
# 2️⃣ Load base model (skip compilation)
|
| 21 |
# -----------------------------
|
| 22 |
-
base_model_name = "unsloth/gemma-3-4b-it-unsloth-bnb-4bit" #"unsloth/llama-3.2-3b-instruct-unsloth-bnb-4bit"
|
| 23 |
base_model, tokenizer = FastLanguageModel.from_pretrained(
|
| 24 |
model_name=base_model_name,
|
| 25 |
max_seq_length=2048,
|
|
@@ -30,7 +30,7 @@ base_model, tokenizer = FastLanguageModel.from_pretrained(
|
|
| 30 |
# -----------------------------
|
| 31 |
# 3️⃣ Load LoRA
|
| 32 |
# -----------------------------
|
| 33 |
-
lora_repo = "Ephraimmm/PIDGIN_gemma-3" #"Ephraimmm/Pidgin_llamma_model"
|
| 34 |
lora_model = PeftModel.from_pretrained(base_model, lora_repo, adapter_name="adapter_model")
|
| 35 |
FastLanguageModel.for_inference(lora_model)
|
| 36 |
|
|
|
|
| 19 |
# -----------------------------
|
| 20 |
# 2️⃣ Load base model (skip compilation)
|
| 21 |
# -----------------------------
|
| 22 |
+
base_model_name = "unsloth/gemma-3-4b-it-unsloth-bnb-4bit"#"unsloth/gemma-3-4b-it-unsloth-bnb-4bit" #"unsloth/llama-3.2-3b-instruct-unsloth-bnb-4bit"
|
| 23 |
base_model, tokenizer = FastLanguageModel.from_pretrained(
|
| 24 |
model_name=base_model_name,
|
| 25 |
max_seq_length=2048,
|
|
|
|
| 30 |
# -----------------------------
|
| 31 |
# 3️⃣ Load LoRA
|
| 32 |
# -----------------------------
|
| 33 |
+
lora_repo = "Ephraimmm/pigin-gemma-3-0.2" #"Ephraimmm/PIDGIN_gemma-3" #"Ephraimmm/Pidgin_llamma_model"
|
| 34 |
lora_model = PeftModel.from_pretrained(base_model, lora_repo, adapter_name="adapter_model")
|
| 35 |
FastLanguageModel.for_inference(lora_model)
|
| 36 |
|