Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -255,6 +255,16 @@ optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB",
|
|
| 255 |
# --- UI Constants and Helpers ---
|
| 256 |
MAX_SEED = np.iinfo(np.int32).max
|
| 257 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
# --- Main Inference Function (with hardcoded negative prompt) ---
|
| 259 |
@spaces.GPU(duration=60)
|
| 260 |
def infer(
|
|
@@ -267,12 +277,14 @@ def infer(
|
|
| 267 |
height=None,
|
| 268 |
width=None,
|
| 269 |
num_images_per_prompt=1,
|
|
|
|
| 270 |
progress=gr.Progress(track_tqdm=True),
|
| 271 |
):
|
| 272 |
"""
|
| 273 |
Generates an image using the local Qwen-Image diffusers pipeline.
|
| 274 |
"""
|
| 275 |
# Hardcode the negative prompt as requested
|
|
|
|
| 276 |
negative_prompt = "Vibrant colors, overexposed, static, blurry details, subtitles, style, artwork, painting, image, still, overall grayish, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn face, deformed, disfigured, deformed limbs, fingers fused together, static image, cluttered background, three legs, many people in the background, walking backwards."
|
| 277 |
rewrite_prompt=False
|
| 278 |
if randomize_seed:
|
|
@@ -287,6 +299,11 @@ def infer(
|
|
| 287 |
prompt = prompt.replace(expected_key, "")
|
| 288 |
# Load input images into PIL Images
|
| 289 |
pil_images = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 290 |
if images:
|
| 291 |
for item in images:
|
| 292 |
try:
|
|
@@ -361,7 +378,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 361 |
show_label=False,
|
| 362 |
type="pil",
|
| 363 |
interactive=True)
|
| 364 |
-
|
| 365 |
prompt = gr.Text(
|
| 366 |
label="Prompt 🪄",
|
| 367 |
show_label=True,
|
|
@@ -438,6 +455,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 438 |
num_inference_steps,
|
| 439 |
height,
|
| 440 |
width,
|
|
|
|
| 441 |
],
|
| 442 |
outputs=[result,upscaled, seed],
|
| 443 |
|
|
|
|
| 255 |
# --- UI Constants and Helpers ---
|
| 256 |
MAX_SEED = np.iinfo(np.int32).max
|
| 257 |
|
| 258 |
+
import requests
|
| 259 |
+
|
| 260 |
+
def load_image_from_url(url):
|
| 261 |
+
try:
|
| 262 |
+
response = requests.get(url, timeout=10)
|
| 263 |
+
response.raise_for_status()
|
| 264 |
+
return Image.open(BytesIO(response.content)).convert("RGB")
|
| 265 |
+
except Exception as e:
|
| 266 |
+
print(f"Error loading image from URL: {e}")
|
| 267 |
+
return None
|
| 268 |
# --- Main Inference Function (with hardcoded negative prompt) ---
|
| 269 |
@spaces.GPU(duration=60)
|
| 270 |
def infer(
|
|
|
|
| 277 |
height=None,
|
| 278 |
width=None,
|
| 279 |
num_images_per_prompt=1,
|
| 280 |
+
image_url=None,
|
| 281 |
progress=gr.Progress(track_tqdm=True),
|
| 282 |
):
|
| 283 |
"""
|
| 284 |
Generates an image using the local Qwen-Image diffusers pipeline.
|
| 285 |
"""
|
| 286 |
# Hardcode the negative prompt as requested
|
| 287 |
+
|
| 288 |
negative_prompt = "Vibrant colors, overexposed, static, blurry details, subtitles, style, artwork, painting, image, still, overall grayish, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn face, deformed, disfigured, deformed limbs, fingers fused together, static image, cluttered background, three legs, many people in the background, walking backwards."
|
| 289 |
rewrite_prompt=False
|
| 290 |
if randomize_seed:
|
|
|
|
| 299 |
prompt = prompt.replace(expected_key, "")
|
| 300 |
# Load input images into PIL Images
|
| 301 |
pil_images = []
|
| 302 |
+
if not images and image_url:
|
| 303 |
+
img = load_image_from_url(image_url)
|
| 304 |
+
if img:
|
| 305 |
+
pil_images = [img]
|
| 306 |
+
print(f"Loaded image from URL: {image_url}")
|
| 307 |
if images:
|
| 308 |
for item in images:
|
| 309 |
try:
|
|
|
|
| 378 |
show_label=False,
|
| 379 |
type="pil",
|
| 380 |
interactive=True)
|
| 381 |
+
image_url = gr.Textbox(label="", placeholder="")
|
| 382 |
prompt = gr.Text(
|
| 383 |
label="Prompt 🪄",
|
| 384 |
show_label=True,
|
|
|
|
| 455 |
num_inference_steps,
|
| 456 |
height,
|
| 457 |
width,
|
| 458 |
+
image_url,
|
| 459 |
],
|
| 460 |
outputs=[result,upscaled, seed],
|
| 461 |
|