Ephraimmm commited on
Commit
c857032
·
verified ·
1 Parent(s): 2eb12af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -19,7 +19,7 @@ print("Using device:", device)
19
  # -----------------------------
20
  # 2️⃣ Load base model (skip compilation)
21
  # -----------------------------
22
- base_model_name = "unsloth/gemma-3-4b-it-unsloth-bnb-4bit" #"unsloth/llama-3.2-3b-instruct-unsloth-bnb-4bit"
23
  base_model, tokenizer = FastLanguageModel.from_pretrained(
24
  model_name=base_model_name,
25
  max_seq_length=2048,
@@ -30,7 +30,7 @@ base_model, tokenizer = FastLanguageModel.from_pretrained(
30
  # -----------------------------
31
  # 3️⃣ Load LoRA
32
  # -----------------------------
33
- lora_repo = "Ephraimmm/PIDGIN_gemma-3" #"Ephraimmm/Pidgin_llamma_model"
34
  lora_model = PeftModel.from_pretrained(base_model, lora_repo, adapter_name="adapter_model")
35
  FastLanguageModel.for_inference(lora_model)
36
 
 
19
  # -----------------------------
20
  # 2️⃣ Load base model (skip compilation)
21
  # -----------------------------
22
+ base_model_name = "unsloth/gemma-3-4b-it-unsloth-bnb-4bit"#"unsloth/gemma-3-4b-it-unsloth-bnb-4bit" #"unsloth/llama-3.2-3b-instruct-unsloth-bnb-4bit"
23
  base_model, tokenizer = FastLanguageModel.from_pretrained(
24
  model_name=base_model_name,
25
  max_seq_length=2048,
 
30
  # -----------------------------
31
  # 3️⃣ Load LoRA
32
  # -----------------------------
33
+ lora_repo = "Ephraimmm/pigin-gemma-3-0.2" #"Ephraimmm/PIDGIN_gemma-3" #"Ephraimmm/Pidgin_llamma_model"
34
  lora_model = PeftModel.from_pretrained(base_model, lora_repo, adapter_name="adapter_model")
35
  FastLanguageModel.for_inference(lora_model)
36