Abhiroopvanaone commited on
Commit
e3b05fc
Β·
verified Β·
1 Parent(s): 09e65dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -179
app.py CHANGED
@@ -1,17 +1,18 @@
1
- import spaces # MUST be imported FIRST before torch
2
  import gradio as gr
3
  import torch
4
  from transformers import pipeline
5
  from PIL import Image
6
- import traceback
7
  import time
8
 
9
  # Global model storage
10
  models = {}
11
 
12
  @spaces.GPU
13
- def load_glm_model(model_choice):
14
- """Load GLM model with Zero GPU allocation."""
 
 
15
  model_map = {
16
  "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
17
  "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
@@ -20,81 +21,39 @@ def load_glm_model(model_choice):
20
 
21
  model_name = model_map[model_choice]
22
 
23
- if model_name in models:
24
- return True, f"βœ… {model_choice} already loaded"
25
-
26
  try:
27
- print(f"πŸ”„ Loading {model_name} on Zero GPU...")
28
-
29
- pipe = pipeline(
30
- "image-text-to-text",
31
- model=model_name,
32
- device_map="auto",
33
- torch_dtype=torch.float16,
34
- trust_remote_code=True
35
- )
36
-
37
- models[model_name] = pipe
38
- print(f"βœ… Successfully loaded {model_name}")
39
- return True, f"βœ… {model_choice} loaded successfully"
40
 
41
- except Exception as e:
42
- error_msg = f"❌ Failed to load {model_choice}: {str(e)[:200]}"
43
- print(error_msg)
44
- return False, error_msg
45
-
46
- @spaces.GPU(duration=120)
47
- def generate_cadquery_code_gpu(image, model_choice, prompt_style):
48
- """Generate CADQuery code using Zero GPU allocation."""
49
-
50
- if image is None:
51
- return "❌ Please upload an image first."
52
-
53
- success, message = load_glm_model(model_choice)
54
- if not success:
55
- return f"❌ {message}"
56
-
57
- prompts = {
58
- "Simple": "Generate CADQuery Python code for this 3D model:",
59
- "Detailed": """Analyze this 3D CAD model and generate Python CADQuery code.
60
-
61
- Requirements:
62
- - Import cadquery as cq
63
- - Store result in 'result' variable
64
- - Use proper CADQuery syntax
65
-
66
- Code:""",
67
- "Chain-of-Thought": """Analyze this 3D CAD model step by step:
68
-
69
- Step 1: Identify the basic geometry (box, cylinder, etc.)
70
- Step 2: Note any features (holes, fillets, etc.)
71
- Step 3: Generate clean CADQuery Python code
72
-
73
- ```python
74
- import cadquery as cq
75
-
76
- # Generated code:"""
77
- }
78
-
79
- prompt = prompts[prompt_style]
80
-
81
- try:
82
- start_time = time.time()
83
 
84
- model_map = {
85
- "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
86
- "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
87
- "GLM-4.5V": "zai-org/GLM-4.5V"
 
88
  }
89
 
90
- model_name = model_map[model_choice]
91
- pipe = models[model_name]
 
 
92
 
93
  messages = [
94
  {
95
- "role": "user",
96
  "content": [
97
- {"type": "image", "image": image},
98
  {"type": "text", "text": prompt}
99
  ]
100
  }
@@ -102,74 +61,66 @@ import cadquery as cq
102
 
103
  result = pipe(messages, max_new_tokens=512, temperature=0.7, do_sample=True)
104
 
105
- if isinstance(result, list) and len(result) > 0:
106
  generated_text = result[0].get("generated_text", str(result))
107
  else:
108
  generated_text = str(result)
109
 
110
  generation_time = time.time() - start_time
111
- clean_code = extract_cadquery_code(generated_text)
112
 
 
 
 
 
113
  output = f"""## 🎯 Generated CADQuery Code
114
 
115
  ```python
116
  {clean_code}
117
  ```
118
 
119
- ## πŸ“Š Generation Info
120
- - **Model**: {model_choice}
121
- - **Time**: {generation_time:.2f} seconds
122
- - **Prompt**: {prompt_style}
123
- - **Compute**: Zero GPU (A100)
124
 
125
  ## πŸ”§ Usage
126
  ```bash
127
  pip install cadquery
128
- python your_script.py
129
  ```
130
  """
131
 
132
  return output
133
 
134
  except Exception as e:
135
- return f"❌ **Generation Failed**: {str(e)[:500]}"
136
 
137
- def extract_cadquery_code(generated_text: str) -> str:
138
- """Extract clean CADQuery code from generated text."""
139
- text = generated_text.strip()
140
 
141
  if "```python" in text:
142
  start = text.find("```python") + 9
143
  end = text.find("```", start)
144
- if end > start:
145
- code = text[start:end].strip()
146
- else:
147
- code = text[start:].strip()
148
  elif "import cadquery" in text.lower():
149
  lines = text.split('\n')
150
  code_lines = []
151
  started = False
152
-
153
  for line in lines:
154
  if "import cadquery" in line.lower():
155
  started = True
156
  if started:
157
  code_lines.append(line)
158
-
159
  code = '\n'.join(code_lines)
160
  else:
161
  code = text
162
 
163
- lines = code.split('\n')
164
- cleaned_lines = []
165
-
166
- for line in lines:
167
- line = line.strip()
168
- if line and not line.startswith('```'):
169
- cleaned_lines.append(line)
170
-
171
- final_code = '\n'.join(cleaned_lines)
172
 
 
173
  if "import cadquery" not in final_code:
174
  final_code = "import cadquery as cq\n\n" + final_code
175
 
@@ -184,92 +135,93 @@ def extract_cadquery_code(generated_text: str) -> str:
184
  return final_code
185
 
186
  @spaces.GPU
187
- def test_model_loading(model_choice):
188
- """Test loading a specific model."""
189
- success, message = load_glm_model(model_choice)
190
- return f"## Test Result\n\n{message}"
191
-
192
- def create_interface():
193
- """Create the Gradio interface."""
194
 
195
- with gr.Blocks(title="GLM-4.5V CAD Generator", theme=gr.themes.Soft()) as demo:
196
- gr.Markdown("""
197
- # πŸ”§ GLM-4.5V CAD Generator (Zero GPU)
198
-
199
- Upload a 3D CAD model image and generate CADQuery Python code!
200
-
201
- **Models:** GLM-4.5V-AWQ (fastest) β€’ GLM-4.5V-FP8 (balanced) β€’ GLM-4.5V (best quality)
202
- """)
203
-
204
- with gr.Tab("πŸš€ Generate"):
205
- with gr.Row():
206
- with gr.Column(scale=1):
207
- image_input = gr.Image(type="pil", label="Upload CAD Image", height=400)
208
-
209
- model_choice = gr.Dropdown(
210
- choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
211
- value="GLM-4.5V-AWQ",
212
- label="Model"
213
- )
214
-
215
- prompt_style = gr.Dropdown(
216
- choices=["Simple", "Detailed", "Chain-of-Thought"],
217
- value="Chain-of-Thought",
218
- label="Prompt Style"
219
- )
220
-
221
- generate_btn = gr.Button("πŸš€ Generate", variant="primary")
222
-
223
- with gr.Column(scale=2):
224
- output_text = gr.Markdown("Upload image and click Generate!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
- generate_btn.click(
227
- fn=generate_cadquery_code_gpu, # Direct call - no wrapper
228
- inputs=[image_input, model_choice, prompt_style],
229
- outputs=output_text
230
- )
231
 
232
- with gr.Tab("πŸ§ͺ Test"):
233
- test_model_choice = gr.Dropdown(
234
- choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
235
- value="GLM-4.5V-AWQ",
236
- label="Model to Test"
237
- )
238
- test_btn = gr.Button("Test Model")
239
- test_output = gr.Markdown()
240
-
241
- test_btn.click(
242
- fn=test_model_loading,
243
- inputs=test_model_choice,
244
- outputs=test_output
245
- )
246
 
247
- with gr.Tab("ℹ️ Info"):
248
- gr.Markdown("""
249
- ## Zero GPU Benefits
250
- - **A100 GPU** allocated on-demand
251
- - **Pay per use** - no idle costs
252
- - **Automatic scaling**
253
-
254
- ## Usage Tips
255
- - Clear CAD images work best
256
- - GLM-4.5V-AWQ is fastest for testing
257
- - Chain-of-Thought prompts give best results
258
-
259
- ## Generated Code
260
- Install CADQuery: `pip install cadquery`
261
-
262
- Run your generated script to create 3D models!
263
- """)
264
 
265
- return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
 
267
  if __name__ == "__main__":
268
- print("πŸš€ GLM-4.5V CAD Generator with Zero GPU")
269
-
270
- demo = create_interface()
271
- demo.launch(
272
- server_name="0.0.0.0",
273
- server_port=7860,
274
- show_error=True
275
- )
 
1
+ import spaces
2
  import gradio as gr
3
  import torch
4
  from transformers import pipeline
5
  from PIL import Image
 
6
  import time
7
 
8
  # Global model storage
9
  models = {}
10
 
11
  @spaces.GPU
12
+ def generate_cadquery_with_zero_gpu(image_data, model_choice, prompt_style):
13
+ """Single function that handles everything on Zero GPU."""
14
+
15
+ # Model mapping
16
  model_map = {
17
  "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
18
  "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
 
21
 
22
  model_name = model_map[model_choice]
23
 
 
 
 
24
  try:
25
+ # Load model if not already loaded
26
+ if model_name not in models:
27
+ print(f"πŸ”„ Loading {model_name}...")
28
+ pipe = pipeline(
29
+ "image-text-to-text",
30
+ model=model_name,
31
+ device_map="auto",
32
+ torch_dtype=torch.float16,
33
+ trust_remote_code=True
34
+ )
35
+ models[model_name] = pipe
36
+ print(f"βœ… Loaded {model_name}")
 
37
 
38
+ pipe = models[model_name]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ # Create prompt
41
+ prompts = {
42
+ "Simple": "Generate CADQuery Python code for this 3D model:",
43
+ "Detailed": "Analyze this 3D CAD model and generate Python CADQuery code. Requirements: Import cadquery as cq, store result in 'result' variable, use proper syntax.",
44
+ "Chain-of-Thought": "Analyze this 3D CAD model step by step: 1) Identify geometry 2) Note features 3) Generate CADQuery code. ```python\nimport cadquery as cq\n# Generated code:"
45
  }
46
 
47
+ prompt = prompts[prompt_style]
48
+
49
+ # Generate
50
+ start_time = time.time()
51
 
52
  messages = [
53
  {
54
+ "role": "user",
55
  "content": [
56
+ {"type": "image", "image": image_data},
57
  {"type": "text", "text": prompt}
58
  ]
59
  }
 
61
 
62
  result = pipe(messages, max_new_tokens=512, temperature=0.7, do_sample=True)
63
 
64
+ if isinstance(result, list):
65
  generated_text = result[0].get("generated_text", str(result))
66
  else:
67
  generated_text = str(result)
68
 
69
  generation_time = time.time() - start_time
 
70
 
71
+ # Extract code
72
+ clean_code = extract_code(generated_text)
73
+
74
+ # Format output
75
  output = f"""## 🎯 Generated CADQuery Code
76
 
77
  ```python
78
  {clean_code}
79
  ```
80
 
81
+ ## πŸ“Š Info
82
+ - **Model**: {model_choice}
83
+ - **Time**: {generation_time:.2f}s
84
+ - **Style**: {prompt_style}
 
85
 
86
  ## πŸ”§ Usage
87
  ```bash
88
  pip install cadquery
89
+ python script.py
90
  ```
91
  """
92
 
93
  return output
94
 
95
  except Exception as e:
96
+ return f"❌ **Error**: {str(e)[:300]}"
97
 
98
+ def extract_code(text):
99
+ """Extract CADQuery code from generated text."""
100
+ text = text.strip()
101
 
102
  if "```python" in text:
103
  start = text.find("```python") + 9
104
  end = text.find("```", start)
105
+ code = text[start:end].strip() if end > start else text[start:].strip()
 
 
 
106
  elif "import cadquery" in text.lower():
107
  lines = text.split('\n')
108
  code_lines = []
109
  started = False
 
110
  for line in lines:
111
  if "import cadquery" in line.lower():
112
  started = True
113
  if started:
114
  code_lines.append(line)
 
115
  code = '\n'.join(code_lines)
116
  else:
117
  code = text
118
 
119
+ # Clean up
120
+ lines = [line.strip() for line in code.split('\n') if line.strip() and not line.strip().startswith('```')]
121
+ final_code = '\n'.join(lines)
 
 
 
 
 
 
122
 
123
+ # Ensure proper structure
124
  if "import cadquery" not in final_code:
125
  final_code = "import cadquery as cq\n\n" + final_code
126
 
 
135
  return final_code
136
 
137
  @spaces.GPU
138
+ def test_model(model_choice):
139
+ """Test model loading."""
140
+ model_map = {
141
+ "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
142
+ "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
143
+ "GLM-4.5V": "zai-org/GLM-4.5V"
144
+ }
145
 
146
+ try:
147
+ model_name = model_map[model_choice]
148
+ if model_name not in models:
149
+ pipe = pipeline("image-text-to-text", model=model_name, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True)
150
+ models[model_name] = pipe
151
+ return f"βœ… **{model_choice}** loaded successfully!"
152
+ except Exception as e:
153
+ return f"❌ **{model_choice}** failed: {str(e)[:200]}"
154
+
155
+ # Simple wrapper functions that don't use Gradio context
156
+ def generate_wrapper(image, model, style):
157
+ if image is None:
158
+ return "❌ Please upload an image first."
159
+ return generate_cadquery_with_zero_gpu(image, model, style)
160
+
161
+ def test_wrapper(model):
162
+ return test_model(model)
163
+
164
+ # Create interface
165
+ with gr.Blocks(title="GLM CAD Generator", theme=gr.themes.Soft()) as demo:
166
+
167
+ gr.Markdown("""
168
+ # πŸ”§ GLM-4.5V CAD Generator
169
+
170
+ Generate CADQuery code from 3D model images using Zero GPU!
171
+
172
+ **Models:** AWQ (fastest) β€’ FP8 (balanced) β€’ Full (best quality)
173
+ """)
174
+
175
+ with gr.Tab("πŸš€ Generate"):
176
+ with gr.Row():
177
+ with gr.Column():
178
+ image_input = gr.Image(type="pil", label="CAD Image")
179
+ model_select = gr.Dropdown(
180
+ ["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
181
+ value="GLM-4.5V-AWQ",
182
+ label="Model"
183
+ )
184
+ style_select = gr.Dropdown(
185
+ ["Simple", "Detailed", "Chain-of-Thought"],
186
+ value="Chain-of-Thought",
187
+ label="Style"
188
+ )
189
+ gen_btn = gr.Button("πŸš€ Generate", variant="primary")
190
 
191
+ with gr.Column():
192
+ output_area = gr.Markdown("Upload image and generate!")
 
 
 
193
 
194
+ gen_btn.click(generate_wrapper, [image_input, model_select, style_select], output_area)
195
+
196
+ with gr.Tab("πŸ§ͺ Test"):
197
+ test_select = gr.Dropdown(
198
+ ["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
199
+ value="GLM-4.5V-AWQ",
200
+ label="Test Model"
201
+ )
202
+ test_btn = gr.Button("Test")
203
+ test_out = gr.Markdown()
 
 
 
 
204
 
205
+ test_btn.click(test_wrapper, test_select, test_out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
+ with gr.Tab("ℹ️ Help"):
208
+ gr.Markdown("""
209
+ ## How to Use
210
+ 1. Upload clear CAD model image
211
+ 2. Select GLM model variant
212
+ 3. Choose prompt style
213
+ 4. Click Generate
214
+
215
+ ## Zero GPU
216
+ - A100 allocated automatically
217
+ - Pay only when generating
218
+ - No idle costs
219
+
220
+ ## Tips
221
+ - AWQ model is fastest
222
+ - Chain-of-Thought works best
223
+ - Clear images get better results
224
+ """)
225
 
226
  if __name__ == "__main__":
227
+ demo.launch(server_name="0.0.0.0", server_port=7860)