aniket47 commited on
Commit
29ed9e9
·
1 Parent(s): 86e7db6

Switch to minimal FastAPI for testing deployment

Browse files
Files changed (6) hide show
  1. app.py +12 -345
  2. app_full.py +365 -0
  3. app_simple.py +32 -0
  4. requirements.txt +1 -18
  5. requirements_full.txt +19 -0
  6. requirements_simple.txt +2 -0
app.py CHANGED
@@ -1,365 +1,32 @@
1
  """
2
- FastAPI Backend for Text-to-3D Model Converter
3
- Deployed on Hugging Face Spaces with direct model loading
4
  """
5
 
6
- import os
7
  import logging
8
- import time
9
- import uuid
10
- import asyncio
11
- from typing import Optional
12
- from contextlib import asynccontextmanager
13
-
14
- import uvicorn
15
- from fastapi import FastAPI, File, UploadFile, HTTPException, BackgroundTasks
16
- from fastapi.middleware.cors import CORSMiddleware
17
- from fastapi.responses import JSONResponse
18
- from pydantic import BaseModel
19
-
20
- from models.depth_processor import DepthProcessor
21
- from models.image_generator import ImageGenerator
22
- from utils.job_manager import JobManager
23
- from utils.cloudinary_client import CloudinaryClient
24
 
25
  # Configure logging
26
  logging.basicConfig(level=logging.INFO)
27
  logger = logging.getLogger(__name__)
28
 
29
- # Global variables for models
30
- depth_processor = None
31
- image_generator = None
32
- job_manager = None
33
- cloudinary_client = None
34
-
35
- @asynccontextmanager
36
- async def lifespan(app: FastAPI):
37
- """Initialize models on startup"""
38
- global depth_processor, image_generator, job_manager, cloudinary_client
39
-
40
- logger.info("🚀 Starting Text-to-3D Backend...")
41
-
42
- # Initialize utilities
43
- job_manager = JobManager()
44
- cloudinary_client = CloudinaryClient()
45
-
46
- # Initialize models
47
- logger.info("📦 Loading AI models...")
48
- try:
49
- # Initialize depth processor
50
- depth_processor = DepthProcessor()
51
- await asyncio.to_thread(depth_processor.load_model)
52
- logger.info("✅ Depth estimation model loaded")
53
-
54
- # Initialize image generator
55
- image_generator = ImageGenerator()
56
- await asyncio.to_thread(image_generator.load_model)
57
- logger.info("✅ Image generation model loaded")
58
-
59
- logger.info("🎉 All models loaded successfully!")
60
-
61
- except Exception as e:
62
- logger.error(f"❌ Failed to load models: {str(e)}")
63
- raise e
64
-
65
- yield
66
-
67
- # Cleanup on shutdown
68
- logger.info("🔄 Shutting down...")
69
-
70
  # Initialize FastAPI app
71
- app = FastAPI(
72
- title="Text-to-3D Backend",
73
- description="Convert text prompts and images to 3D models",
74
- version="1.0.0",
75
- lifespan=lifespan
76
- )
77
-
78
- # Configure CORS
79
- app.add_middleware(
80
- CORSMiddleware,
81
- allow_origins=[
82
- "http://localhost:3000", # Local development
83
- "https://*.render.com", # Render deployment
84
- "*" # Allow all for now, restrict in production
85
- ],
86
- allow_credentials=True,
87
- allow_methods=["*"],
88
- allow_headers=["*"],
89
- )
90
-
91
- # Request/Response models
92
- class GenerateRequest(BaseModel):
93
- prompt: str
94
- user_id: Optional[str] = None
95
-
96
- class GenerateResponse(BaseModel):
97
- success: bool
98
- job_id: str
99
- image_url: Optional[str] = None
100
- model_url: Optional[str] = None
101
- depth_map_url: Optional[str] = None
102
- error: Optional[str] = None
103
-
104
- class ProgressResponse(BaseModel):
105
- stage: str
106
- progress: int
107
- message: str
108
- timestamp: Optional[float] = None
109
 
110
  @app.get("/")
111
  async def root():
112
- """Health check endpoint"""
 
113
  return {
114
- "status": "Text-to-3D Backend is running! 🚀",
115
- "version": "1.0.0",
116
- "models_loaded": {
117
- "depth_processor": depth_processor is not None,
118
- "image_generator": image_generator is not None
119
- },
120
- "gpu_available": depth_processor.device.type == "cuda" if depth_processor else False
121
  }
122
 
123
  @app.get("/health")
124
- async def health_check():
125
- """Detailed health check"""
126
- return {
127
- "status": "healthy",
128
- "models": {
129
- "depth_estimation": "loaded" if depth_processor else "not_loaded",
130
- "image_generation": "loaded" if image_generator else "not_loaded"
131
- },
132
- "device": str(depth_processor.device) if depth_processor else "unknown",
133
- "active_jobs": job_manager.get_active_job_count() if job_manager else 0
134
- }
135
-
136
- @app.post("/generate", response_model=GenerateResponse)
137
- async def generate_from_text(
138
- request: GenerateRequest,
139
- background_tasks: BackgroundTasks
140
- ):
141
- """Generate 3D model from text prompt"""
142
- try:
143
- if not request.prompt.strip():
144
- raise HTTPException(status_code=400, detail="Prompt cannot be empty")
145
-
146
- # Create job ID
147
- job_id = str(uuid.uuid4())
148
- job_manager.register_job(job_id)
149
-
150
- logger.info(f"🎨 Starting text-to-3D generation: '{request.prompt}' (Job: {job_id})")
151
-
152
- # Start background processing
153
- background_tasks.add_task(
154
- process_text_to_3d,
155
- job_id,
156
- request.prompt,
157
- request.user_id
158
- )
159
-
160
- return GenerateResponse(
161
- success=True,
162
- job_id=job_id,
163
- message="Generation started"
164
- )
165
-
166
- except Exception as e:
167
- logger.error(f"❌ Error in generate endpoint: {str(e)}")
168
- raise HTTPException(status_code=500, detail=str(e))
169
-
170
- @app.post("/upload")
171
- async def upload_image(
172
- file: UploadFile = File(...),
173
- background_tasks: BackgroundTasks = None,
174
- user_id: Optional[str] = None
175
- ):
176
- """Convert uploaded image to 3D model"""
177
- try:
178
- # Validate file type
179
- if not file.content_type.startswith('image/'):
180
- raise HTTPException(status_code=400, detail="File must be an image")
181
-
182
- # Create job ID
183
- job_id = str(uuid.uuid4())
184
- job_manager.register_job(job_id)
185
-
186
- logger.info(f"📤 Processing uploaded image: {file.filename} (Job: {job_id})")
187
-
188
- # Read file content
189
- file_content = await file.read()
190
-
191
- # Start background processing
192
- background_tasks.add_task(
193
- process_upload_to_3d,
194
- job_id,
195
- file_content,
196
- file.filename,
197
- user_id
198
- )
199
-
200
- return {
201
- "success": True,
202
- "job_id": job_id,
203
- "message": "Upload processing started"
204
- }
205
-
206
- except Exception as e:
207
- logger.error(f"❌ Error in upload endpoint: {str(e)}")
208
- raise HTTPException(status_code=500, detail=str(e))
209
-
210
- @app.get("/progress/{job_id}", response_model=ProgressResponse)
211
- async def get_progress(job_id: str):
212
- """Get job progress"""
213
- try:
214
- progress = job_manager.get_job_progress(job_id)
215
- if not progress:
216
- raise HTTPException(status_code=404, detail="Job not found")
217
-
218
- return ProgressResponse(**progress)
219
-
220
- except Exception as e:
221
- logger.error(f"❌ Error getting progress: {str(e)}")
222
- raise HTTPException(status_code=500, detail=str(e))
223
-
224
- @app.post("/cancel")
225
- async def cancel_job(job_id: str):
226
- """Cancel a running job"""
227
- try:
228
- success = job_manager.cancel_job(job_id)
229
- if success:
230
- return {"success": True, "message": f"Job {job_id} cancelled"}
231
- else:
232
- raise HTTPException(status_code=404, detail="Job not found")
233
-
234
- except Exception as e:
235
- logger.error(f"❌ Error cancelling job: {str(e)}")
236
- raise HTTPException(status_code=500, detail=str(e))
237
-
238
- async def process_text_to_3d(job_id: str, prompt: str, user_id: Optional[str]):
239
- """Background task to process text to 3D"""
240
- try:
241
- # Update progress
242
- job_manager.update_job_progress(job_id, "generating_image", 10, "Generating image from text...")
243
-
244
- # Generate image from text
245
- image_result = await asyncio.to_thread(
246
- image_generator.generate_image,
247
- prompt
248
- )
249
-
250
- if job_manager.is_job_cancelled(job_id):
251
- return
252
-
253
- job_manager.update_job_progress(job_id, "uploading_image", 40, "Uploading generated image...")
254
-
255
- # Upload image to Cloudinary
256
- image_url = await asyncio.to_thread(
257
- cloudinary_client.upload_image_from_bytes,
258
- image_result['image_bytes'],
259
- f"generated_{job_id}"
260
- )
261
-
262
- if job_manager.is_job_cancelled(job_id):
263
- return
264
-
265
- job_manager.update_job_progress(job_id, "creating_depth", 60, "Creating depth map...")
266
-
267
- # Generate depth map and 3D model
268
- depth_result = await asyncio.to_thread(
269
- depth_processor.process_image_to_3d,
270
- image_result['image_pil'],
271
- job_id
272
- )
273
-
274
- if job_manager.is_job_cancelled(job_id):
275
- return
276
-
277
- job_manager.update_job_progress(job_id, "uploading_results", 90, "Uploading 3D model...")
278
-
279
- # Upload results
280
- model_url = await asyncio.to_thread(
281
- cloudinary_client.upload_file,
282
- depth_result['obj_path'],
283
- f"model_{job_id}.obj"
284
- )
285
-
286
- depth_map_url = await asyncio.to_thread(
287
- cloudinary_client.upload_image_from_path,
288
- depth_result['depth_map_path'],
289
- f"depth_{job_id}"
290
- )
291
-
292
- # Complete job
293
- job_manager.complete_job(job_id, {
294
- "image_url": image_url,
295
- "model_url": model_url,
296
- "depth_map_url": depth_map_url
297
- })
298
-
299
- logger.info(f"✅ Text-to-3D generation completed: {job_id}")
300
-
301
- except Exception as e:
302
- logger.error(f"❌ Error in text-to-3D processing: {str(e)}")
303
- job_manager.fail_job(job_id, str(e))
304
-
305
- async def process_upload_to_3d(job_id: str, file_content: bytes, filename: str, user_id: Optional[str]):
306
- """Background task to process uploaded image to 3D"""
307
- try:
308
- job_manager.update_job_progress(job_id, "uploading", 20, "Uploading image to cloud...")
309
-
310
- # Upload original image
311
- image_url = await asyncio.to_thread(
312
- cloudinary_client.upload_image_from_bytes,
313
- file_content,
314
- f"upload_{job_id}_{filename}"
315
- )
316
-
317
- if job_manager.is_job_cancelled(job_id):
318
- return
319
-
320
- job_manager.update_job_progress(job_id, "processing", 50, "Processing image to 3D...")
321
-
322
- # Convert to PIL Image
323
- from PIL import Image
324
- image_pil = Image.open(io.BytesIO(file_content))
325
-
326
- # Generate depth map and 3D model
327
- depth_result = await asyncio.to_thread(
328
- depth_processor.process_image_to_3d,
329
- image_pil,
330
- job_id
331
- )
332
-
333
- if job_manager.is_job_cancelled(job_id):
334
- return
335
-
336
- job_manager.update_job_progress(job_id, "uploading_results", 90, "Uploading 3D model...")
337
-
338
- # Upload results
339
- model_url = await asyncio.to_thread(
340
- cloudinary_client.upload_file,
341
- depth_result['obj_path'],
342
- f"model_{job_id}.obj"
343
- )
344
-
345
- depth_map_url = await asyncio.to_thread(
346
- cloudinary_client.upload_image_from_path,
347
- depth_result['depth_map_path'],
348
- f"depth_{job_id}"
349
- )
350
-
351
- # Complete job
352
- job_manager.complete_job(job_id, {
353
- "image_url": image_url,
354
- "model_url": model_url,
355
- "depth_map_url": depth_map_url
356
- })
357
-
358
- logger.info(f"✅ Upload-to-3D processing completed: {job_id}")
359
-
360
- except Exception as e:
361
- logger.error(f"❌ Error in upload-to-3D processing: {str(e)}")
362
- job_manager.fail_job(job_id, str(e))
363
 
364
  if __name__ == "__main__":
 
365
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
  """
2
+ Minimal FastAPI test for debugging HF Spaces deployment
 
3
  """
4
 
5
+ from fastapi import FastAPI
6
  import logging
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  # Configure logging
9
  logging.basicConfig(level=logging.INFO)
10
  logger = logging.getLogger(__name__)
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # Initialize FastAPI app
13
+ app = FastAPI(title="Text-to-3D Backend Test", version="1.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  @app.get("/")
16
  async def root():
17
+ """Simple health check"""
18
+ logger.info("Health check requested")
19
  return {
20
+ "status": "FastAPI is running! 🚀",
21
+ "message": "Basic setup working",
22
+ "test": True
 
 
 
 
23
  }
24
 
25
  @app.get("/health")
26
+ async def health():
27
+ """Health endpoint"""
28
+ return {"status": "healthy", "service": "text-to-3d-backend"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  if __name__ == "__main__":
31
+ import uvicorn
32
  uvicorn.run(app, host="0.0.0.0", port=7860)
app_full.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FastAPI Backend for Text-to-3D Model Converter
3
+ Deployed on Hugging Face Spaces with direct model loading
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ import time
9
+ import uuid
10
+ import asyncio
11
+ from typing import Optional
12
+ from contextlib import asynccontextmanager
13
+
14
+ import uvicorn
15
+ from fastapi import FastAPI, File, UploadFile, HTTPException, BackgroundTasks
16
+ from fastapi.middleware.cors import CORSMiddleware
17
+ from fastapi.responses import JSONResponse
18
+ from pydantic import BaseModel
19
+
20
+ from models.depth_processor import DepthProcessor
21
+ from models.image_generator import ImageGenerator
22
+ from utils.job_manager import JobManager
23
+ from utils.cloudinary_client import CloudinaryClient
24
+
25
+ # Configure logging
26
+ logging.basicConfig(level=logging.INFO)
27
+ logger = logging.getLogger(__name__)
28
+
29
+ # Global variables for models
30
+ depth_processor = None
31
+ image_generator = None
32
+ job_manager = None
33
+ cloudinary_client = None
34
+
35
+ @asynccontextmanager
36
+ async def lifespan(app: FastAPI):
37
+ """Initialize models on startup"""
38
+ global depth_processor, image_generator, job_manager, cloudinary_client
39
+
40
+ logger.info("🚀 Starting Text-to-3D Backend...")
41
+
42
+ # Initialize utilities
43
+ job_manager = JobManager()
44
+ cloudinary_client = CloudinaryClient()
45
+
46
+ # Initialize models
47
+ logger.info("📦 Loading AI models...")
48
+ try:
49
+ # Initialize depth processor
50
+ depth_processor = DepthProcessor()
51
+ await asyncio.to_thread(depth_processor.load_model)
52
+ logger.info("✅ Depth estimation model loaded")
53
+
54
+ # Initialize image generator
55
+ image_generator = ImageGenerator()
56
+ await asyncio.to_thread(image_generator.load_model)
57
+ logger.info("✅ Image generation model loaded")
58
+
59
+ logger.info("🎉 All models loaded successfully!")
60
+
61
+ except Exception as e:
62
+ logger.error(f"❌ Failed to load models: {str(e)}")
63
+ raise e
64
+
65
+ yield
66
+
67
+ # Cleanup on shutdown
68
+ logger.info("🔄 Shutting down...")
69
+
70
+ # Initialize FastAPI app
71
+ app = FastAPI(
72
+ title="Text-to-3D Backend",
73
+ description="Convert text prompts and images to 3D models",
74
+ version="1.0.0",
75
+ lifespan=lifespan
76
+ )
77
+
78
+ # Configure CORS
79
+ app.add_middleware(
80
+ CORSMiddleware,
81
+ allow_origins=[
82
+ "http://localhost:3000", # Local development
83
+ "https://*.render.com", # Render deployment
84
+ "*" # Allow all for now, restrict in production
85
+ ],
86
+ allow_credentials=True,
87
+ allow_methods=["*"],
88
+ allow_headers=["*"],
89
+ )
90
+
91
+ # Request/Response models
92
+ class GenerateRequest(BaseModel):
93
+ prompt: str
94
+ user_id: Optional[str] = None
95
+
96
+ class GenerateResponse(BaseModel):
97
+ success: bool
98
+ job_id: str
99
+ image_url: Optional[str] = None
100
+ model_url: Optional[str] = None
101
+ depth_map_url: Optional[str] = None
102
+ error: Optional[str] = None
103
+
104
+ class ProgressResponse(BaseModel):
105
+ stage: str
106
+ progress: int
107
+ message: str
108
+ timestamp: Optional[float] = None
109
+
110
+ @app.get("/")
111
+ async def root():
112
+ """Health check endpoint"""
113
+ return {
114
+ "status": "Text-to-3D Backend is running! 🚀",
115
+ "version": "1.0.0",
116
+ "models_loaded": {
117
+ "depth_processor": depth_processor is not None,
118
+ "image_generator": image_generator is not None
119
+ },
120
+ "gpu_available": depth_processor.device.type == "cuda" if depth_processor else False
121
+ }
122
+
123
+ @app.get("/health")
124
+ async def health_check():
125
+ """Detailed health check"""
126
+ return {
127
+ "status": "healthy",
128
+ "models": {
129
+ "depth_estimation": "loaded" if depth_processor else "not_loaded",
130
+ "image_generation": "loaded" if image_generator else "not_loaded"
131
+ },
132
+ "device": str(depth_processor.device) if depth_processor else "unknown",
133
+ "active_jobs": job_manager.get_active_job_count() if job_manager else 0
134
+ }
135
+
136
+ @app.post("/generate", response_model=GenerateResponse)
137
+ async def generate_from_text(
138
+ request: GenerateRequest,
139
+ background_tasks: BackgroundTasks
140
+ ):
141
+ """Generate 3D model from text prompt"""
142
+ try:
143
+ if not request.prompt.strip():
144
+ raise HTTPException(status_code=400, detail="Prompt cannot be empty")
145
+
146
+ # Create job ID
147
+ job_id = str(uuid.uuid4())
148
+ job_manager.register_job(job_id)
149
+
150
+ logger.info(f"🎨 Starting text-to-3D generation: '{request.prompt}' (Job: {job_id})")
151
+
152
+ # Start background processing
153
+ background_tasks.add_task(
154
+ process_text_to_3d,
155
+ job_id,
156
+ request.prompt,
157
+ request.user_id
158
+ )
159
+
160
+ return GenerateResponse(
161
+ success=True,
162
+ job_id=job_id,
163
+ message="Generation started"
164
+ )
165
+
166
+ except Exception as e:
167
+ logger.error(f"❌ Error in generate endpoint: {str(e)}")
168
+ raise HTTPException(status_code=500, detail=str(e))
169
+
170
+ @app.post("/upload")
171
+ async def upload_image(
172
+ file: UploadFile = File(...),
173
+ background_tasks: BackgroundTasks = None,
174
+ user_id: Optional[str] = None
175
+ ):
176
+ """Convert uploaded image to 3D model"""
177
+ try:
178
+ # Validate file type
179
+ if not file.content_type.startswith('image/'):
180
+ raise HTTPException(status_code=400, detail="File must be an image")
181
+
182
+ # Create job ID
183
+ job_id = str(uuid.uuid4())
184
+ job_manager.register_job(job_id)
185
+
186
+ logger.info(f"📤 Processing uploaded image: {file.filename} (Job: {job_id})")
187
+
188
+ # Read file content
189
+ file_content = await file.read()
190
+
191
+ # Start background processing
192
+ background_tasks.add_task(
193
+ process_upload_to_3d,
194
+ job_id,
195
+ file_content,
196
+ file.filename,
197
+ user_id
198
+ )
199
+
200
+ return {
201
+ "success": True,
202
+ "job_id": job_id,
203
+ "message": "Upload processing started"
204
+ }
205
+
206
+ except Exception as e:
207
+ logger.error(f"❌ Error in upload endpoint: {str(e)}")
208
+ raise HTTPException(status_code=500, detail=str(e))
209
+
210
+ @app.get("/progress/{job_id}", response_model=ProgressResponse)
211
+ async def get_progress(job_id: str):
212
+ """Get job progress"""
213
+ try:
214
+ progress = job_manager.get_job_progress(job_id)
215
+ if not progress:
216
+ raise HTTPException(status_code=404, detail="Job not found")
217
+
218
+ return ProgressResponse(**progress)
219
+
220
+ except Exception as e:
221
+ logger.error(f"❌ Error getting progress: {str(e)}")
222
+ raise HTTPException(status_code=500, detail=str(e))
223
+
224
+ @app.post("/cancel")
225
+ async def cancel_job(job_id: str):
226
+ """Cancel a running job"""
227
+ try:
228
+ success = job_manager.cancel_job(job_id)
229
+ if success:
230
+ return {"success": True, "message": f"Job {job_id} cancelled"}
231
+ else:
232
+ raise HTTPException(status_code=404, detail="Job not found")
233
+
234
+ except Exception as e:
235
+ logger.error(f"❌ Error cancelling job: {str(e)}")
236
+ raise HTTPException(status_code=500, detail=str(e))
237
+
238
+ async def process_text_to_3d(job_id: str, prompt: str, user_id: Optional[str]):
239
+ """Background task to process text to 3D"""
240
+ try:
241
+ # Update progress
242
+ job_manager.update_job_progress(job_id, "generating_image", 10, "Generating image from text...")
243
+
244
+ # Generate image from text
245
+ image_result = await asyncio.to_thread(
246
+ image_generator.generate_image,
247
+ prompt
248
+ )
249
+
250
+ if job_manager.is_job_cancelled(job_id):
251
+ return
252
+
253
+ job_manager.update_job_progress(job_id, "uploading_image", 40, "Uploading generated image...")
254
+
255
+ # Upload image to Cloudinary
256
+ image_url = await asyncio.to_thread(
257
+ cloudinary_client.upload_image_from_bytes,
258
+ image_result['image_bytes'],
259
+ f"generated_{job_id}"
260
+ )
261
+
262
+ if job_manager.is_job_cancelled(job_id):
263
+ return
264
+
265
+ job_manager.update_job_progress(job_id, "creating_depth", 60, "Creating depth map...")
266
+
267
+ # Generate depth map and 3D model
268
+ depth_result = await asyncio.to_thread(
269
+ depth_processor.process_image_to_3d,
270
+ image_result['image_pil'],
271
+ job_id
272
+ )
273
+
274
+ if job_manager.is_job_cancelled(job_id):
275
+ return
276
+
277
+ job_manager.update_job_progress(job_id, "uploading_results", 90, "Uploading 3D model...")
278
+
279
+ # Upload results
280
+ model_url = await asyncio.to_thread(
281
+ cloudinary_client.upload_file,
282
+ depth_result['obj_path'],
283
+ f"model_{job_id}.obj"
284
+ )
285
+
286
+ depth_map_url = await asyncio.to_thread(
287
+ cloudinary_client.upload_image_from_path,
288
+ depth_result['depth_map_path'],
289
+ f"depth_{job_id}"
290
+ )
291
+
292
+ # Complete job
293
+ job_manager.complete_job(job_id, {
294
+ "image_url": image_url,
295
+ "model_url": model_url,
296
+ "depth_map_url": depth_map_url
297
+ })
298
+
299
+ logger.info(f"✅ Text-to-3D generation completed: {job_id}")
300
+
301
+ except Exception as e:
302
+ logger.error(f"❌ Error in text-to-3D processing: {str(e)}")
303
+ job_manager.fail_job(job_id, str(e))
304
+
305
+ async def process_upload_to_3d(job_id: str, file_content: bytes, filename: str, user_id: Optional[str]):
306
+ """Background task to process uploaded image to 3D"""
307
+ try:
308
+ job_manager.update_job_progress(job_id, "uploading", 20, "Uploading image to cloud...")
309
+
310
+ # Upload original image
311
+ image_url = await asyncio.to_thread(
312
+ cloudinary_client.upload_image_from_bytes,
313
+ file_content,
314
+ f"upload_{job_id}_{filename}"
315
+ )
316
+
317
+ if job_manager.is_job_cancelled(job_id):
318
+ return
319
+
320
+ job_manager.update_job_progress(job_id, "processing", 50, "Processing image to 3D...")
321
+
322
+ # Convert to PIL Image
323
+ from PIL import Image
324
+ image_pil = Image.open(io.BytesIO(file_content))
325
+
326
+ # Generate depth map and 3D model
327
+ depth_result = await asyncio.to_thread(
328
+ depth_processor.process_image_to_3d,
329
+ image_pil,
330
+ job_id
331
+ )
332
+
333
+ if job_manager.is_job_cancelled(job_id):
334
+ return
335
+
336
+ job_manager.update_job_progress(job_id, "uploading_results", 90, "Uploading 3D model...")
337
+
338
+ # Upload results
339
+ model_url = await asyncio.to_thread(
340
+ cloudinary_client.upload_file,
341
+ depth_result['obj_path'],
342
+ f"model_{job_id}.obj"
343
+ )
344
+
345
+ depth_map_url = await asyncio.to_thread(
346
+ cloudinary_client.upload_image_from_path,
347
+ depth_result['depth_map_path'],
348
+ f"depth_{job_id}"
349
+ )
350
+
351
+ # Complete job
352
+ job_manager.complete_job(job_id, {
353
+ "image_url": image_url,
354
+ "model_url": model_url,
355
+ "depth_map_url": depth_map_url
356
+ })
357
+
358
+ logger.info(f"✅ Upload-to-3D processing completed: {job_id}")
359
+
360
+ except Exception as e:
361
+ logger.error(f"❌ Error in upload-to-3D processing: {str(e)}")
362
+ job_manager.fail_job(job_id, str(e))
363
+
364
+ if __name__ == "__main__":
365
+ uvicorn.run(app, host="0.0.0.0", port=7860)
app_simple.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Minimal FastAPI test for debugging HF Spaces deployment
3
+ """
4
+
5
+ from fastapi import FastAPI
6
+ import logging
7
+
8
+ # Configure logging
9
+ logging.basicConfig(level=logging.INFO)
10
+ logger = logging.getLogger(__name__)
11
+
12
+ # Initialize FastAPI app
13
+ app = FastAPI(title="Text-to-3D Backend Test", version="1.0.0")
14
+
15
+ @app.get("/")
16
+ async def root():
17
+ """Simple health check"""
18
+ logger.info("Health check requested")
19
+ return {
20
+ "status": "FastAPI is running! 🚀",
21
+ "message": "Basic setup working",
22
+ "test": True
23
+ }
24
+
25
+ @app.get("/health")
26
+ async def health():
27
+ """Health endpoint"""
28
+ return {"status": "healthy", "service": "text-to-3d-backend"}
29
+
30
+ if __name__ == "__main__":
31
+ import uvicorn
32
+ uvicorn.run(app, host="0.0.0.0", port=7860)
requirements.txt CHANGED
@@ -1,19 +1,2 @@
1
  fastapi==0.104.1
2
- uvicorn[standard]==0.24.0
3
- python-multipart==0.0.6
4
- pydantic==2.5.0
5
- torch==2.1.1
6
- torchvision==0.16.1
7
- torchaudio==2.1.1
8
- transformers==4.39.3
9
- diffusers==0.27.0
10
- accelerate==0.27.0
11
- Pillow==10.3.0
12
- numpy==1.24.3
13
- open3d==0.18.0
14
- matplotlib==3.7.2
15
- cloudinary==1.37.0
16
- python-dotenv==1.0.0
17
- safetensors==0.4.2
18
- huggingface_hub==0.20.2
19
- requests==2.31.0
 
1
  fastapi==0.104.1
2
+ uvicorn[standard]==0.24.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements_full.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.104.1
2
+ uvicorn[standard]==0.24.0
3
+ python-multipart==0.0.6
4
+ pydantic==2.5.0
5
+ torch==2.1.1
6
+ torchvision==0.16.1
7
+ torchaudio==2.1.1
8
+ transformers==4.39.3
9
+ diffusers==0.27.0
10
+ accelerate==0.27.0
11
+ Pillow==10.3.0
12
+ numpy==1.24.3
13
+ open3d==0.18.0
14
+ matplotlib==3.7.2
15
+ cloudinary==1.37.0
16
+ python-dotenv==1.0.0
17
+ safetensors==0.4.2
18
+ huggingface_hub==0.20.2
19
+ requests==2.31.0
requirements_simple.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ fastapi==0.104.1
2
+ uvicorn[standard]==0.24.0