Akashmj22122002 commited on
Commit
d669276
Β·
verified Β·
1 Parent(s): 44af812

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +35 -108
app.py CHANGED
@@ -1,22 +1,14 @@
1
- # # deep_search_mine.py (Renamed to app.py)
 
2
  # # --------------------------------------------------------------
3
 
4
  # import asyncio
5
  # import os
6
  # import time
7
  # import re
8
- # import textwrap
9
  # from typing import List
10
  # from xml.sax.saxutils import escape
11
 
12
- # # Graph & Plotting
13
- # import networkx as nx
14
- # import matplotlib.pyplot as plt
15
- # import matplotlib
16
-
17
- # # Set non-GUI backend to prevent crashes
18
- # matplotlib.use('Agg')
19
-
20
  # from dotenv import load_dotenv
21
  # from pydantic import BaseModel
22
  # import gradio as gr
@@ -34,7 +26,7 @@
34
 
35
  # # PDF Generation
36
  # from reportlab.lib.pagesizes import letter
37
- # from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image as PlatypusImage
38
  # from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
39
  # from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT
40
 
@@ -106,7 +98,7 @@
106
  # return await robust_run(chat_agent, prompt)
107
 
108
  # # --------------------------------------------------------------
109
- # # πŸ“‚ FILE GENERATORS (FIXED ALIGNMENT)
110
  # # --------------------------------------------------------------
111
 
112
  # def clean_text_for_pdf(text):
@@ -121,7 +113,6 @@
121
  # doc = SimpleDocTemplate(filename, pagesize=letter,
122
  # rightMargin=72, leftMargin=72,
123
  # topMargin=72, bottomMargin=72)
124
-
125
  # styles = getSampleStyleSheet()
126
  # styles.add(ParagraphStyle(name='ProTitle', parent=styles['Title'], fontName='Helvetica-Bold', fontSize=24, spaceAfter=24, alignment=TA_LEFT, textColor="black"))
127
  # styles.add(ParagraphStyle(name='ProH1', parent=styles['Heading1'], fontName='Helvetica-Bold', fontSize=16, spaceBefore=18, spaceAfter=12, alignment=TA_LEFT, textColor="black"))
@@ -132,7 +123,6 @@
132
  # for p in text.split('\n'):
133
  # p = p.strip()
134
  # if not p: continue
135
-
136
  # if p.startswith("# "):
137
  # story.append(Paragraph(clean_text_for_pdf(p.replace("# ", "")), styles["ProTitle"]))
138
  # elif p.startswith("## "):
@@ -145,11 +135,9 @@
145
  # else:
146
  # p_content = clean_text_for_pdf(p)
147
  # story.append(Paragraph(p_content, styles["ProBody"]))
148
-
149
  # doc.build(story)
150
  # return filename
151
  # except Exception as e:
152
- # print(f"PDF Error: {e}")
153
  # return None
154
 
155
  # def create_docx(text: str, filename="research_report.docx"):
@@ -164,7 +152,6 @@
164
  # for line in text.split('\n'):
165
  # line = line.strip()
166
  # if not line: continue
167
-
168
  # if line.startswith("# "):
169
  # h = doc.add_heading(line.replace("# ", "").replace("**", ""), 0)
170
  # h.alignment = WD_ALIGN_PARAGRAPH.LEFT
@@ -176,11 +163,9 @@
176
  # doc.add_paragraph(line[2:], style='List Bullet')
177
  # else:
178
  # doc.add_paragraph(line)
179
-
180
  # doc.save(filename)
181
  # return filename
182
  # except Exception as e:
183
- # print(f"Docx Error: {e}")
184
  # return None
185
 
186
  # def create_pptx(text: str, filename="presentation.pptx"):
@@ -188,7 +173,6 @@
188
  # prs = Presentation()
189
  # TITLE_SLIDE_LAYOUT = 0
190
  # CONTENT_SLIDE_LAYOUT = 1
191
-
192
  # lines = [l.strip() for l in text.split('\n') if l.strip()]
193
  # if not lines: return None
194
 
@@ -212,11 +196,9 @@
212
  # current_slide.shapes.title.text = clean_text
213
  # else:
214
  # current_slide.shapes.title.text = "Continued..."
215
-
216
  # body_text_frame = current_slide.placeholders[1].text_frame
217
  # body_text_frame.clear()
218
  # current_line_count = 0
219
-
220
  # if is_overflow and not is_heading:
221
  # p = body_text_frame.add_paragraph()
222
  # p.text = clean_text
@@ -240,11 +222,9 @@
240
  # p.font.size = PptxPt(18)
241
  # p.level = 0
242
  # current_line_count += 1
243
-
244
  # prs.save(filename)
245
  # return filename
246
  # except Exception as e:
247
- # print(f"PPTX Error: {e}")
248
  # return None
249
 
250
  # # --------------------------------------------------------------
@@ -260,11 +240,7 @@
260
 
261
  # planner_agent = Agent(name="Planner", instructions=f"{persona_instruction} Plan 3 search queries in English.", model=MODEL)
262
  # search_agent = Agent(name="Search", instructions=f"{persona_instruction} concise summary in English.", tools=[WebSearchTool()], model=MODEL)
263
-
264
- # optimist_agent = Agent(name="Optimist", instructions="Visionary optimist. Highlight benefits. Be brief.", model=MODEL)
265
- # skeptic_agent = Agent(name="Skeptic", instructions="Critical skeptic. Highlight risks. Be brief.", model=MODEL)
266
-
267
- # writer_agent = Agent(name="Writer", instructions=f"Professional translator. Write output strictly in {language}.", model=MODEL)
268
 
269
  # yield f"πŸ•΅οΈβ€β™‚οΈ **Persona:** {persona_name}\n"
270
  # yield f"🌍 **Language:** {language}\n"
@@ -281,21 +257,14 @@
281
  # results.append(res)
282
 
283
  # context = "\n".join(results)
284
-
285
- # yield "βš”οΈ **Debate Mode:** Simulating viewpoints...\n\n"
286
- # optimist_take = await robust_run(optimist_agent, f"Analyze: {context}")
287
- # yield f"**🟒 Optimist:**\n{optimist_take[:300]}...\n\n"
288
- # skeptic_take = await robust_run(skeptic_agent, f"Analyze: {context}")
289
- # yield f"**πŸ”΄ Skeptic:**\n{skeptic_take[:300]}...\n\n"
290
-
291
  # yield f"πŸ“ **Drafting:** Synthesizing report in {language}...\n\n"
292
- # full_report = f"# Research Report: {query}\n\n"
293
- # writer_context = f"{context}\n\nOPPOSING VIEWS:\nPositive: {optimist_take}\nNegative: {skeptic_take}"
294
 
295
- # sections = ["Executive Summary", "Key Findings", "Debate Analysis", "Conclusion"]
 
296
  # for sec in sections:
297
  # yield f"✍️ **Writing:** {sec}...\n"
298
- # prompt = f"Write the '{sec}' section for '{query}'.\nContext: {writer_context}\nIMPORTANT: Write strictly in {language}."
299
  # content = await robust_run(writer_agent, prompt)
300
  # full_report += f"## {sec}\n\n{content}\n\n"
301
  # yield f"## {sec}\n\n{content}\n\n"
@@ -309,10 +278,11 @@
309
  # pdf_path = create_pdf(full_report)
310
 
311
  # yield "βœ… **Done:** Process Completed Successfully.\n"
 
312
  # yield full_report, pdf_path, docx_path, pptx_path, full_report
313
 
314
  # # --------------------------------------------------------------
315
- # # ✨ STANDARD UI (Clean & Compatible)
316
  # # --------------------------------------------------------------
317
 
318
  # with gr.Blocks(title="Deep Research AI") as ui:
@@ -354,7 +324,7 @@
354
  # with gr.TabItem("πŸ“„ Live Report"):
355
  # report_output = gr.Markdown(label="System Output")
356
  # with gr.TabItem("πŸ’¬ Chat with Report"):
357
- # # CHANGED: Removed type="messages" to support all Gradio versions
358
  # chatbot = gr.Chatbot(height=450, show_label=False)
359
  # with gr.Row():
360
  # msg = gr.Textbox(placeholder="Ask a question about the report...", show_label=False, scale=4)
@@ -377,19 +347,21 @@
377
  # outputs=[report_output, pdf_out, docx_out, pptx_out, report_state]
378
  # )
379
 
380
- # # CHANGED: Updated Chat Logic for compatibility
 
381
  # async def user_chat(message, history, report_context):
382
- # # Handle history list if None
383
  # history = history or []
384
-
385
- # # Append user message in [user, bot] format (Classic Gradio Style)
386
- # history.append([message, None])
387
  # yield "", history
388
 
389
- # # Get response
390
- # bot_response = await chat_with_report_logic(message, history, report_context)
 
 
 
391
 
392
- # # Update the last entry with bot response
393
  # history[-1][1] = bot_response
394
  # yield "", history
395
 
@@ -397,12 +369,12 @@
397
  # send_btn.click(user_chat, [msg, chatbot, report_state], [msg, chatbot])
398
 
399
  # if __name__ == "__main__":
400
- # ui.launch()
401
 
402
 
403
- # working code lets see
404
 
405
- # app.py (Universal Compatibility Version)
406
  # --------------------------------------------------------------
407
 
408
  import asyncio
@@ -485,23 +457,7 @@ def parse_planner_output_to_plan(text: str) -> WebSearchPlan:
485
  return WebSearchPlan(searches=items)
486
 
487
  # --------------------------------------------------------------
488
- # πŸ’¬ CHAT LOGIC
489
- # --------------------------------------------------------------
490
-
491
- async def chat_with_report_logic(user_message, history, report_context):
492
- if not report_context:
493
- return "⚠️ No report available. Please generate one first."
494
-
495
- chat_agent = Agent(
496
- name="ReportChat",
497
- instructions="You are a helpful assistant. Answer based ONLY on the provided Report Context. Be concise.",
498
- model=MODEL
499
- )
500
- prompt = f"CONTEXT:\n{report_context}\n\nUSER QUESTION:\n{user_message}\n\nANSWER:"
501
- return await robust_run(chat_agent, prompt)
502
-
503
- # --------------------------------------------------------------
504
- # πŸ“‚ FILE GENERATORS
505
  # --------------------------------------------------------------
506
 
507
  def clean_text_for_pdf(text):
@@ -682,16 +638,15 @@ async def stream_workflow(query: str, persona_name: str, language: str):
682
 
683
  yield "βœ… **Done:** Process Completed Successfully.\n"
684
 
685
- yield full_report, pdf_path, docx_path, pptx_path, full_report
 
686
 
687
  # --------------------------------------------------------------
688
- # ✨ STANDARD UI
689
  # --------------------------------------------------------------
690
 
691
  with gr.Blocks(title="Deep Research AI") as ui:
692
 
693
- report_state = gr.State(value="")
694
-
695
  gr.Markdown("# πŸš€ Deep Research Engine")
696
  gr.Markdown("Autonomous agent for professional research, analysis, and document generation.")
697
 
@@ -723,53 +678,25 @@ with gr.Blocks(title="Deep Research AI") as ui:
723
  pptx_out = gr.DownloadButton(label="Download PowerPoint")
724
 
725
  with gr.Column(scale=2):
726
- with gr.Tabs():
727
- with gr.TabItem("πŸ“„ Live Report"):
728
- report_output = gr.Markdown(label="System Output")
729
- with gr.TabItem("πŸ’¬ Chat with Report"):
730
- # CHANGED: REMOVED 'type' argument entirely. This is essential for older Gradio servers.
731
- chatbot = gr.Chatbot(height=450, show_label=False)
732
- with gr.Row():
733
- msg = gr.Textbox(placeholder="Ask a question about the report...", show_label=False, scale=4)
734
- send_btn = gr.Button("Send", scale=1)
735
 
736
  async def run_stream_wrapper(q, p, l):
737
  async for chunk in stream_workflow(q, p, l):
738
  if isinstance(chunk, tuple):
 
739
  yield gr.update(value=chunk[0]), \
740
  gr.update(value=chunk[1]), \
741
  gr.update(value=chunk[2]), \
742
- gr.update(value=chunk[3]), \
743
- chunk[4]
744
  else:
745
- yield chunk, None, None, None, None
 
746
 
747
  run_btn.click(
748
  fn=run_stream_wrapper,
749
  inputs=[query_input, persona_dropdown, language_dropdown],
750
- outputs=[report_output, pdf_out, docx_out, pptx_out, report_state]
751
  )
752
 
753
- # CHANGED: Classic Chat Logic (List of Lists)
754
- # This format: [[user_msg, bot_msg], [user_msg, bot_msg]] works on ALL versions.
755
- async def user_chat(message, history, report_context):
756
- # 1. Yield user message immediately
757
- history = history or []
758
- history.append([message, None])
759
- yield "", history
760
-
761
- # 2. Generate response
762
- if not report_context:
763
- bot_response = "⚠️ Please generate a report first."
764
- else:
765
- bot_response = await chat_with_report_logic(message, history, report_context)
766
-
767
- # 3. Update history with bot response
768
- history[-1][1] = bot_response
769
- yield "", history
770
-
771
- msg.submit(user_chat, [msg, chatbot, report_state], [msg, chatbot])
772
- send_btn.click(user_chat, [msg, chatbot, report_state], [msg, chatbot])
773
-
774
  if __name__ == "__main__":
775
  ui.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)
 
1
+ # # working code lets see
2
+ # # app.py (Universal Compatibility Version)
3
  # # --------------------------------------------------------------
4
 
5
  # import asyncio
6
  # import os
7
  # import time
8
  # import re
 
9
  # from typing import List
10
  # from xml.sax.saxutils import escape
11
 
 
 
 
 
 
 
 
 
12
  # from dotenv import load_dotenv
13
  # from pydantic import BaseModel
14
  # import gradio as gr
 
26
 
27
  # # PDF Generation
28
  # from reportlab.lib.pagesizes import letter
29
+ # from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
30
  # from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
31
  # from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT
32
 
 
98
  # return await robust_run(chat_agent, prompt)
99
 
100
  # # --------------------------------------------------------------
101
+ # # πŸ“‚ FILE GENERATORS
102
  # # --------------------------------------------------------------
103
 
104
  # def clean_text_for_pdf(text):
 
113
  # doc = SimpleDocTemplate(filename, pagesize=letter,
114
  # rightMargin=72, leftMargin=72,
115
  # topMargin=72, bottomMargin=72)
 
116
  # styles = getSampleStyleSheet()
117
  # styles.add(ParagraphStyle(name='ProTitle', parent=styles['Title'], fontName='Helvetica-Bold', fontSize=24, spaceAfter=24, alignment=TA_LEFT, textColor="black"))
118
  # styles.add(ParagraphStyle(name='ProH1', parent=styles['Heading1'], fontName='Helvetica-Bold', fontSize=16, spaceBefore=18, spaceAfter=12, alignment=TA_LEFT, textColor="black"))
 
123
  # for p in text.split('\n'):
124
  # p = p.strip()
125
  # if not p: continue
 
126
  # if p.startswith("# "):
127
  # story.append(Paragraph(clean_text_for_pdf(p.replace("# ", "")), styles["ProTitle"]))
128
  # elif p.startswith("## "):
 
135
  # else:
136
  # p_content = clean_text_for_pdf(p)
137
  # story.append(Paragraph(p_content, styles["ProBody"]))
 
138
  # doc.build(story)
139
  # return filename
140
  # except Exception as e:
 
141
  # return None
142
 
143
  # def create_docx(text: str, filename="research_report.docx"):
 
152
  # for line in text.split('\n'):
153
  # line = line.strip()
154
  # if not line: continue
 
155
  # if line.startswith("# "):
156
  # h = doc.add_heading(line.replace("# ", "").replace("**", ""), 0)
157
  # h.alignment = WD_ALIGN_PARAGRAPH.LEFT
 
163
  # doc.add_paragraph(line[2:], style='List Bullet')
164
  # else:
165
  # doc.add_paragraph(line)
 
166
  # doc.save(filename)
167
  # return filename
168
  # except Exception as e:
 
169
  # return None
170
 
171
  # def create_pptx(text: str, filename="presentation.pptx"):
 
173
  # prs = Presentation()
174
  # TITLE_SLIDE_LAYOUT = 0
175
  # CONTENT_SLIDE_LAYOUT = 1
 
176
  # lines = [l.strip() for l in text.split('\n') if l.strip()]
177
  # if not lines: return None
178
 
 
196
  # current_slide.shapes.title.text = clean_text
197
  # else:
198
  # current_slide.shapes.title.text = "Continued..."
 
199
  # body_text_frame = current_slide.placeholders[1].text_frame
200
  # body_text_frame.clear()
201
  # current_line_count = 0
 
202
  # if is_overflow and not is_heading:
203
  # p = body_text_frame.add_paragraph()
204
  # p.text = clean_text
 
222
  # p.font.size = PptxPt(18)
223
  # p.level = 0
224
  # current_line_count += 1
 
225
  # prs.save(filename)
226
  # return filename
227
  # except Exception as e:
 
228
  # return None
229
 
230
  # # --------------------------------------------------------------
 
240
 
241
  # planner_agent = Agent(name="Planner", instructions=f"{persona_instruction} Plan 3 search queries in English.", model=MODEL)
242
  # search_agent = Agent(name="Search", instructions=f"{persona_instruction} concise summary in English.", tools=[WebSearchTool()], model=MODEL)
243
+ # writer_agent = Agent(name="Writer", instructions=f"You are a professional translator and writer. Write output strictly in {language}.", model=MODEL)
 
 
 
 
244
 
245
  # yield f"πŸ•΅οΈβ€β™‚οΈ **Persona:** {persona_name}\n"
246
  # yield f"🌍 **Language:** {language}\n"
 
257
  # results.append(res)
258
 
259
  # context = "\n".join(results)
260
+
 
 
 
 
 
 
261
  # yield f"πŸ“ **Drafting:** Synthesizing report in {language}...\n\n"
 
 
262
 
263
+ # full_report = f"# Research Report: {query}\n\n"
264
+ # sections = ["Executive Summary", "Key Findings", "Conclusion"]
265
  # for sec in sections:
266
  # yield f"✍️ **Writing:** {sec}...\n"
267
+ # prompt = f"Write the '{sec}' section for '{query}'.\nContext: {context}\nIMPORTANT: Write strictly in {language}."
268
  # content = await robust_run(writer_agent, prompt)
269
  # full_report += f"## {sec}\n\n{content}\n\n"
270
  # yield f"## {sec}\n\n{content}\n\n"
 
278
  # pdf_path = create_pdf(full_report)
279
 
280
  # yield "βœ… **Done:** Process Completed Successfully.\n"
281
+
282
  # yield full_report, pdf_path, docx_path, pptx_path, full_report
283
 
284
  # # --------------------------------------------------------------
285
+ # # ✨ STANDARD UI
286
  # # --------------------------------------------------------------
287
 
288
  # with gr.Blocks(title="Deep Research AI") as ui:
 
324
  # with gr.TabItem("πŸ“„ Live Report"):
325
  # report_output = gr.Markdown(label="System Output")
326
  # with gr.TabItem("πŸ’¬ Chat with Report"):
327
+ # # CHANGED: REMOVED 'type' argument entirely. This is essential for older Gradio servers.
328
  # chatbot = gr.Chatbot(height=450, show_label=False)
329
  # with gr.Row():
330
  # msg = gr.Textbox(placeholder="Ask a question about the report...", show_label=False, scale=4)
 
347
  # outputs=[report_output, pdf_out, docx_out, pptx_out, report_state]
348
  # )
349
 
350
+ # # CHANGED: Classic Chat Logic (List of Lists)
351
+ # # This format: [[user_msg, bot_msg], [user_msg, bot_msg]] works on ALL versions.
352
  # async def user_chat(message, history, report_context):
353
+ # # 1. Yield user message immediately
354
  # history = history or []
355
+ # history.append([message, None])
 
 
356
  # yield "", history
357
 
358
+ # # 2. Generate response
359
+ # if not report_context:
360
+ # bot_response = "⚠️ Please generate a report first."
361
+ # else:
362
+ # bot_response = await chat_with_report_logic(message, history, report_context)
363
 
364
+ # # 3. Update history with bot response
365
  # history[-1][1] = bot_response
366
  # yield "", history
367
 
 
369
  # send_btn.click(user_chat, [msg, chatbot, report_state], [msg, chatbot])
370
 
371
  # if __name__ == "__main__":
372
+ # ui.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)
373
 
374
 
375
+ # chatbot removed code
376
 
377
+ # app.py
378
  # --------------------------------------------------------------
379
 
380
  import asyncio
 
457
  return WebSearchPlan(searches=items)
458
 
459
  # --------------------------------------------------------------
460
+ # πŸ“‚ FILE GENERATORS (FIXED ALIGNMENT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461
  # --------------------------------------------------------------
462
 
463
  def clean_text_for_pdf(text):
 
638
 
639
  yield "βœ… **Done:** Process Completed Successfully.\n"
640
 
641
+ # 4 ITEMS YIELDED (Report, PDF, Word, PPTX) - No chat state needed
642
+ yield full_report, pdf_path, docx_path, pptx_path
643
 
644
  # --------------------------------------------------------------
645
+ # ✨ STANDARD UI (No Chat, No Errors)
646
  # --------------------------------------------------------------
647
 
648
  with gr.Blocks(title="Deep Research AI") as ui:
649
 
 
 
650
  gr.Markdown("# πŸš€ Deep Research Engine")
651
  gr.Markdown("Autonomous agent for professional research, analysis, and document generation.")
652
 
 
678
  pptx_out = gr.DownloadButton(label="Download PowerPoint")
679
 
680
  with gr.Column(scale=2):
681
+ report_output = gr.Markdown(label="System Output")
 
 
 
 
 
 
 
 
682
 
683
  async def run_stream_wrapper(q, p, l):
684
  async for chunk in stream_workflow(q, p, l):
685
  if isinstance(chunk, tuple):
686
+ # Final output: Yield all files
687
  yield gr.update(value=chunk[0]), \
688
  gr.update(value=chunk[1]), \
689
  gr.update(value=chunk[2]), \
690
+ gr.update(value=chunk[3])
 
691
  else:
692
+ # Streaming output: Yield Text, None for files
693
+ yield chunk, None, None, None
694
 
695
  run_btn.click(
696
  fn=run_stream_wrapper,
697
  inputs=[query_input, persona_dropdown, language_dropdown],
698
+ outputs=[report_output, pdf_out, docx_out, pptx_out]
699
  )
700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
701
  if __name__ == "__main__":
702
  ui.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)