Spaces:
Build error
Build error
Commit
Β·
5b166b1
1
Parent(s):
dffe42a
fix bug
Browse files- README.md +4 -0
- app.py +9 -4
- requirements.txt +1 -0
README.md
CHANGED
|
@@ -10,6 +10,10 @@ pinned: false
|
|
| 10 |
# suggested_hardware: a10g-small
|
| 11 |
models:
|
| 12 |
- mistralai/Mixtral-8x7B-Instruct-v0.1
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
---
|
| 14 |
|
| 15 |
Check out [original repo](https://github.com/AgainstEntropy/kanji) for mroe details!
|
|
|
|
| 10 |
# suggested_hardware: a10g-small
|
| 11 |
models:
|
| 12 |
- mistralai/Mixtral-8x7B-Instruct-v0.1
|
| 13 |
+
- runwayml/stable-diffusion-v1-5
|
| 14 |
+
- AgainstEntropy/kanji-lora-sd-v1-5
|
| 15 |
+
- AgainstEntropy/kanji-lcm-lora-sd-v1-5
|
| 16 |
+
|
| 17 |
---
|
| 18 |
|
| 19 |
Check out [original repo](https://github.com/AgainstEntropy/kanji) for mroe details!
|
app.py
CHANGED
|
@@ -153,6 +153,7 @@ def generate(
|
|
| 153 |
formatted_prompt = format_prompt(f"{system_prompt}, {message}", chat_history)
|
| 154 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 155 |
|
|
|
|
| 156 |
outputs = ""
|
| 157 |
prompt_queue = SimpleQueue()
|
| 158 |
|
|
@@ -165,9 +166,12 @@ def generate(
|
|
| 165 |
|
| 166 |
def append_to_queue():
|
| 167 |
for response in stream:
|
| 168 |
-
|
|
|
|
|
|
|
| 169 |
text = text.strip()
|
| 170 |
-
if text:
|
|
|
|
| 171 |
prompt_queue.put(text)
|
| 172 |
prompt_queue.put(None)
|
| 173 |
|
|
@@ -179,7 +183,6 @@ def generate(
|
|
| 179 |
img_path = None
|
| 180 |
if image is not None:
|
| 181 |
img_path = stitcher.add(image, text)
|
| 182 |
-
print(img_path)
|
| 183 |
return img_path
|
| 184 |
|
| 185 |
while True:
|
|
@@ -199,6 +202,7 @@ def generate(
|
|
| 199 |
break
|
| 200 |
|
| 201 |
response_cache = outputs
|
|
|
|
| 202 |
return outputs
|
| 203 |
|
| 204 |
|
|
@@ -266,4 +270,5 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 266 |
gr.Markdown(LICENSE)
|
| 267 |
|
| 268 |
if __name__ == "__main__":
|
| 269 |
-
demo.queue(max_size=20).launch(show_api=False)
|
|
|
|
|
|
| 153 |
formatted_prompt = format_prompt(f"{system_prompt}, {message}", chat_history)
|
| 154 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 155 |
|
| 156 |
+
global outputs
|
| 157 |
outputs = ""
|
| 158 |
prompt_queue = SimpleQueue()
|
| 159 |
|
|
|
|
| 166 |
|
| 167 |
def append_to_queue():
|
| 168 |
for response in stream:
|
| 169 |
+
text = response.token.text
|
| 170 |
+
global outputs
|
| 171 |
+
outputs += text
|
| 172 |
text = text.strip()
|
| 173 |
+
if text and text not in ['</s>']:
|
| 174 |
+
if text.endswith("."): text = text[:-1]
|
| 175 |
prompt_queue.put(text)
|
| 176 |
prompt_queue.put(None)
|
| 177 |
|
|
|
|
| 183 |
img_path = None
|
| 184 |
if image is not None:
|
| 185 |
img_path = stitcher.add(image, text)
|
|
|
|
| 186 |
return img_path
|
| 187 |
|
| 188 |
while True:
|
|
|
|
| 202 |
break
|
| 203 |
|
| 204 |
response_cache = outputs
|
| 205 |
+
print(outputs)
|
| 206 |
return outputs
|
| 207 |
|
| 208 |
|
|
|
|
| 270 |
gr.Markdown(LICENSE)
|
| 271 |
|
| 272 |
if __name__ == "__main__":
|
| 273 |
+
# demo.queue(max_size=20).launch(show_api=False)
|
| 274 |
+
demo.queue(max_size=20).launch(server_name="0.0.0.0", share=False)
|
requirements.txt
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
torch
|
| 2 |
diffusers
|
| 3 |
transformers
|
|
|
|
| 1 |
+
accelarate
|
| 2 |
torch
|
| 3 |
diffusers
|
| 4 |
transformers
|