Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -21,7 +21,7 @@ from transformers import LlamaForCausalLM, LlamaTokenizer
|
|
| 21 |
#base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
|
| 22 |
#base_model = "meta-llama/Llama-2-13b"
|
| 23 |
#base_model="codellama/CodeLlama-13b-Instruct-hf"
|
| 24 |
-
base_model = "tiiuae/falcon-
|
| 25 |
#base_model = "MAGAer13/mPLUG-Owl" #load_8bit = False (in load_tokenizer_and_model)
|
| 26 |
#base_model = "alexkueck/li-tis-tuned-2" #load_8bit = False (in load_tokenizer_and_model)
|
| 27 |
#base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
|
|
|
|
| 21 |
#base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
|
| 22 |
#base_model = "meta-llama/Llama-2-13b"
|
| 23 |
#base_model="codellama/CodeLlama-13b-Instruct-hf"
|
| 24 |
+
base_model = "tiiuae/falcon-7b"
|
| 25 |
#base_model = "MAGAer13/mPLUG-Owl" #load_8bit = False (in load_tokenizer_and_model)
|
| 26 |
#base_model = "alexkueck/li-tis-tuned-2" #load_8bit = False (in load_tokenizer_and_model)
|
| 27 |
#base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
|