# # working code lets see
# # app.py (Universal Compatibility Version)
# # --------------------------------------------------------------
# import asyncio
# import os
# import time
# import re
# from typing import List
# from xml.sax.saxutils import escape
# from dotenv import load_dotenv
# from pydantic import BaseModel
# import gradio as gr
# # Agents
# from agents import Agent, WebSearchTool, Runner
# from agents.model_settings import ModelSettings
# # Document Generators
# from docx import Document
# from docx.shared import Pt
# from docx.enum.text import WD_ALIGN_PARAGRAPH
# from pptx import Presentation
# from pptx.util import Inches, Pt as PptxPt
# # PDF Generation
# from reportlab.lib.pagesizes import letter
# from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
# from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
# from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT
# # -----------------------
# # Load Environment
# # -----------------------
# load_dotenv(override=True)
# if os.getenv("OPENAI_API_KEY") is None:
# os.environ["OPENAI_API_KEY"] = os.getenv("OPENROUTER_API_KEY")
# os.environ["OPENAI_BASE_URL"] = "https://openrouter.ai/api/v1"
# MODEL = "openai/gpt-oss-20b:free"
# # --------------------------------------------------------------
# # Data Models
# # --------------------------------------------------------------
# class WebSearchItem(BaseModel):
# reason: str
# query: str
# class WebSearchPlan(BaseModel):
# searches: List[WebSearchItem]
# PERSONAS = {
# "General Researcher": "You are a helpful assistant. Write in a neutral, informative tone.",
# "Market Analyst": "You are a Wall Street analyst. Focus on market trends, numbers, growth percentages. Use professional business language.",
# "Academic Scientist": "You are a strict researcher. Focus on proven facts, citations, technical details. Use formal academic language.",
# "Tech Journalist": "You are a tech blogger. Write in an engaging, punchy style. Use analogies and focus on the 'why this matters' aspect."
# }
# # --------------------------------------------------------------
# # Utilities
# # --------------------------------------------------------------
# async def robust_run(agent: Agent, prompt: str, max_attempts=3):
# last = ""
# for n in range(max_attempts):
# try:
# r = await Runner.run(agent, prompt)
# out = (r.final_output or "").strip()
# if out: return out
# last = out
# except Exception:
# time.sleep(1)
# return last
# def parse_planner_output_to_plan(text: str) -> WebSearchPlan:
# lines = [l.replace("- ", "").strip() for l in text.splitlines() if l.strip() and not l[0].isdigit()]
# if not lines: lines = ["Latest AI trends", "Future of AI", "AI in 2025"]
# items = [WebSearchItem(reason="Planned search", query=q) for q in lines[:3]]
# return WebSearchPlan(searches=items)
# # --------------------------------------------------------------
# # 💬 CHAT LOGIC
# # --------------------------------------------------------------
# async def chat_with_report_logic(user_message, history, report_context):
# if not report_context:
# return "⚠️ No report available. Please generate one first."
# chat_agent = Agent(
# name="ReportChat",
# instructions="You are a helpful assistant. Answer based ONLY on the provided Report Context. Be concise.",
# model=MODEL
# )
# prompt = f"CONTEXT:\n{report_context}\n\nUSER QUESTION:\n{user_message}\n\nANSWER:"
# return await robust_run(chat_agent, prompt)
# # --------------------------------------------------------------
# # 📂 FILE GENERATORS
# # --------------------------------------------------------------
# def clean_text_for_pdf(text):
# if not text: return ""
# safe_text = escape(text)
# safe_text = re.sub(r'\*\*(.*?)\*\*', r'\1', safe_text)
# safe_text = safe_text.replace('\n', '
')
# return safe_text
# def create_pdf(text: str, filename="research_report.pdf"):
# try:
# doc = SimpleDocTemplate(filename, pagesize=letter,
# rightMargin=72, leftMargin=72,
# topMargin=72, bottomMargin=72)
# styles = getSampleStyleSheet()
# styles.add(ParagraphStyle(name='ProTitle', parent=styles['Title'], fontName='Helvetica-Bold', fontSize=24, spaceAfter=24, alignment=TA_LEFT, textColor="black"))
# styles.add(ParagraphStyle(name='ProH1', parent=styles['Heading1'], fontName='Helvetica-Bold', fontSize=16, spaceBefore=18, spaceAfter=12, alignment=TA_LEFT, textColor="black"))
# styles.add(ParagraphStyle(name='ProH2', parent=styles['Heading2'], fontName='Helvetica-Bold', fontSize=13, spaceBefore=12, spaceAfter=6, alignment=TA_LEFT, textColor="black"))
# styles.add(ParagraphStyle(name='ProBody', parent=styles['Normal'], fontName='Helvetica', fontSize=11, leading=15, spaceAfter=10, alignment=TA_JUSTIFY, textColor="black"))
# story = []
# for p in text.split('\n'):
# p = p.strip()
# if not p: continue
# if p.startswith("# "):
# story.append(Paragraph(clean_text_for_pdf(p.replace("# ", "")), styles["ProTitle"]))
# elif p.startswith("## "):
# story.append(Paragraph(clean_text_for_pdf(p.replace("## ", "")), styles["ProH1"]))
# elif p.startswith("### "):
# story.append(Paragraph(clean_text_for_pdf(p.replace("### ", "")), styles["ProH2"]))
# else:
# if p.startswith("- "):
# p_content = f"• {clean_text_for_pdf(p[2:])}"
# else:
# p_content = clean_text_for_pdf(p)
# story.append(Paragraph(p_content, styles["ProBody"]))
# doc.build(story)
# return filename
# except Exception as e:
# return None
# def create_docx(text: str, filename="research_report.docx"):
# try:
# doc = Document()
# style = doc.styles['Normal']
# font = style.font
# font.name = 'Arial'
# font.size = Pt(11)
# style.paragraph_format.space_after = Pt(12)
# for line in text.split('\n'):
# line = line.strip()
# if not line: continue
# if line.startswith("# "):
# h = doc.add_heading(line.replace("# ", "").replace("**", ""), 0)
# h.alignment = WD_ALIGN_PARAGRAPH.LEFT
# elif line.startswith("## "):
# doc.add_heading(line.replace("## ", "").replace("**", ""), 1)
# elif line.startswith("### "):
# doc.add_heading(line.replace("### ", "").replace("**", ""), 2)
# elif line.startswith("- "):
# doc.add_paragraph(line[2:], style='List Bullet')
# else:
# doc.add_paragraph(line)
# doc.save(filename)
# return filename
# except Exception as e:
# return None
# def create_pptx(text: str, filename="presentation.pptx"):
# try:
# prs = Presentation()
# TITLE_SLIDE_LAYOUT = 0
# CONTENT_SLIDE_LAYOUT = 1
# lines = [l.strip() for l in text.split('\n') if l.strip()]
# if not lines: return None
# slide = prs.slides.add_slide(prs.slide_layouts[TITLE_SLIDE_LAYOUT])
# slide.shapes.title.text = lines[0].replace("# ", "").replace("**", "")
# slide.placeholders[1].text = "Generated by Deep Research AI"
# current_slide = None
# body_text_frame = None
# MAX_LINES_PER_SLIDE = 7
# current_line_count = 0
# for line in lines[1:]:
# clean_text = line.replace("**", "").replace("## ", "").replace("### ", "").replace("- ", "")
# is_heading = line.startswith("## ")
# is_overflow = current_line_count >= MAX_LINES_PER_SLIDE
# if is_heading or is_overflow:
# current_slide = prs.slides.add_slide(prs.slide_layouts[CONTENT_SLIDE_LAYOUT])
# if is_heading:
# current_slide.shapes.title.text = clean_text
# else:
# current_slide.shapes.title.text = "Continued..."
# body_text_frame = current_slide.placeholders[1].text_frame
# body_text_frame.clear()
# current_line_count = 0
# if is_overflow and not is_heading:
# p = body_text_frame.add_paragraph()
# p.text = clean_text
# p.level = 0
# current_line_count += 1
# continue
# if body_text_frame:
# p = body_text_frame.add_paragraph()
# p.text = clean_text
# if line.startswith("### "):
# p.font.bold = True
# p.font.size = PptxPt(20)
# p.space_before = PptxPt(10)
# p.level = 0
# elif line.startswith("- "):
# p.font.size = PptxPt(18)
# p.space_before = PptxPt(6)
# p.level = 0
# else:
# p.font.size = PptxPt(18)
# p.level = 0
# current_line_count += 1
# prs.save(filename)
# return filename
# except Exception as e:
# return None
# # --------------------------------------------------------------
# # Main Workflow
# # --------------------------------------------------------------
# async def stream_workflow(query: str, persona_name: str, language: str):
# if not query.strip():
# yield "⚠️ Please enter a topic."
# return
# persona_instruction = PERSONAS.get(persona_name, PERSONAS["General Researcher"])
# planner_agent = Agent(name="Planner", instructions=f"{persona_instruction} Plan 3 search queries in English.", model=MODEL)
# search_agent = Agent(name="Search", instructions=f"{persona_instruction} concise summary in English.", tools=[WebSearchTool()], model=MODEL)
# writer_agent = Agent(name="Writer", instructions=f"You are a professional translator and writer. Write output strictly in {language}.", model=MODEL)
# yield f"🕵️♂️ **Persona:** {persona_name}\n"
# yield f"🌍 **Language:** {language}\n"
# yield f"🔍 **Status:** Analyzing '{query}'...\n\n"
# raw_plan = await robust_run(planner_agent, f"Plan searches for: {query}")
# plan = parse_planner_output_to_plan(raw_plan)
# yield f"📋 **Plan:** {len(plan.searches)} steps defined.\n"
# results = []
# for item in plan.searches:
# yield f"🌐 **Searching:** {item.query}...\n"
# res = await robust_run(search_agent, f"Search: {item.query}")
# results.append(res)
# context = "\n".join(results)
# yield f"📝 **Drafting:** Synthesizing report in {language}...\n\n"
# full_report = f"# Research Report: {query}\n\n"
# sections = ["Executive Summary", "Key Findings", "Conclusion"]
# for sec in sections:
# yield f"✍️ **Writing:** {sec}...\n"
# prompt = f"Write the '{sec}' section for '{query}'.\nContext: {context}\nIMPORTANT: Write strictly in {language}."
# content = await robust_run(writer_agent, prompt)
# full_report += f"## {sec}\n\n{content}\n\n"
# yield f"## {sec}\n\n{content}\n\n"
# yield "💾 **Finalizing:** Generating documents...\n"
# docx_path = create_docx(full_report)
# pptx_path = create_pptx(full_report)
# pdf_path = None
# if language == "English":
# pdf_path = create_pdf(full_report)
# yield "✅ **Done:** Process Completed Successfully.\n"
# yield full_report, pdf_path, docx_path, pptx_path, full_report
# # --------------------------------------------------------------
# # ✨ STANDARD UI
# # --------------------------------------------------------------
# with gr.Blocks(title="Deep Research AI") as ui:
# report_state = gr.State(value="")
# gr.Markdown("# 🚀 Deep Research Engine")
# gr.Markdown("Autonomous agent for professional research, analysis, and document generation.")
# with gr.Row():
# with gr.Column(scale=1):
# with gr.Group():
# query_input = gr.Textbox(label="Research Topic", placeholder="Enter your topic here...", lines=3)
# with gr.Row():
# persona_dropdown = gr.Dropdown(
# choices=list(PERSONAS.keys()),
# value="General Researcher",
# label="Persona"
# )
# language_dropdown = gr.Dropdown(
# choices=["English", "Tamil", "Hindi", "French"],
# value="English",
# label="Language"
# )
# run_btn = gr.Button("Generate Report", variant="primary")
# with gr.Group():
# gr.Markdown("### 📂 Export Results")
# with gr.Row():
# pdf_out = gr.DownloadButton(label="Download PDF (English Only)")
# docx_out = gr.DownloadButton(label="Download Word")
# pptx_out = gr.DownloadButton(label="Download PowerPoint")
# with gr.Column(scale=2):
# with gr.Tabs():
# with gr.TabItem("📄 Live Report"):
# report_output = gr.Markdown(label="System Output")
# with gr.TabItem("💬 Chat with Report"):
# # CHANGED: REMOVED 'type' argument entirely. This is essential for older Gradio servers.
# chatbot = gr.Chatbot(height=450, show_label=False)
# with gr.Row():
# msg = gr.Textbox(placeholder="Ask a question about the report...", show_label=False, scale=4)
# send_btn = gr.Button("Send", scale=1)
# async def run_stream_wrapper(q, p, l):
# async for chunk in stream_workflow(q, p, l):
# if isinstance(chunk, tuple):
# yield gr.update(value=chunk[0]), \
# gr.update(value=chunk[1]), \
# gr.update(value=chunk[2]), \
# gr.update(value=chunk[3]), \
# chunk[4]
# else:
# yield chunk, None, None, None, None
# run_btn.click(
# fn=run_stream_wrapper,
# inputs=[query_input, persona_dropdown, language_dropdown],
# outputs=[report_output, pdf_out, docx_out, pptx_out, report_state]
# )
# # CHANGED: Classic Chat Logic (List of Lists)
# # This format: [[user_msg, bot_msg], [user_msg, bot_msg]] works on ALL versions.
# async def user_chat(message, history, report_context):
# # 1. Yield user message immediately
# history = history or []
# history.append([message, None])
# yield "", history
# # 2. Generate response
# if not report_context:
# bot_response = "⚠️ Please generate a report first."
# else:
# bot_response = await chat_with_report_logic(message, history, report_context)
# # 3. Update history with bot response
# history[-1][1] = bot_response
# yield "", history
# msg.submit(user_chat, [msg, chatbot, report_state], [msg, chatbot])
# send_btn.click(user_chat, [msg, chatbot, report_state], [msg, chatbot])
# if __name__ == "__main__":
# ui.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)
# chatbot removed code
# app.py
# --------------------------------------------------------------
import asyncio
import os
import time
import re
from typing import List
from xml.sax.saxutils import escape
from dotenv import load_dotenv
from pydantic import BaseModel
import gradio as gr
# Agents
from agents import Agent, WebSearchTool, Runner
from agents.model_settings import ModelSettings
# Document Generators
from docx import Document
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH
from pptx import Presentation
from pptx.util import Inches, Pt as PptxPt
# PDF Generation
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT
# -----------------------
# Load Environment
# -----------------------
load_dotenv(override=True)
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = os.getenv("OPENROUTER_API_KEY")
os.environ["OPENAI_BASE_URL"] = "https://openrouter.ai/api/v1"
MODEL = "openai/gpt-oss-20b:free"
# --------------------------------------------------------------
# Data Models
# --------------------------------------------------------------
class WebSearchItem(BaseModel):
reason: str
query: str
class WebSearchPlan(BaseModel):
searches: List[WebSearchItem]
PERSONAS = {
"General Researcher": "You are a helpful assistant. Write in a neutral, informative tone.",
"Market Analyst": "You are a Wall Street analyst. Focus on market trends, numbers, growth percentages. Use professional business language.",
"Academic Scientist": "You are a strict researcher. Focus on proven facts, citations, technical details. Use formal academic language.",
"Tech Journalist": "You are a tech blogger. Write in an engaging, punchy style. Use analogies and focus on the 'why this matters' aspect."
}
# --------------------------------------------------------------
# Utilities
# --------------------------------------------------------------
async def robust_run(agent: Agent, prompt: str, max_attempts=3):
last = ""
for n in range(max_attempts):
try:
r = await Runner.run(agent, prompt)
out = (r.final_output or "").strip()
if out: return out
last = out
except Exception:
time.sleep(1)
return last
def parse_planner_output_to_plan(text: str) -> WebSearchPlan:
lines = [l.replace("- ", "").strip() for l in text.splitlines() if l.strip() and not l[0].isdigit()]
if not lines: lines = ["Latest AI trends", "Future of AI", "AI in 2025"]
items = [WebSearchItem(reason="Planned search", query=q) for q in lines[:3]]
return WebSearchPlan(searches=items)
# --------------------------------------------------------------
# 📂 FILE GENERATORS (FIXED ALIGNMENT)
# --------------------------------------------------------------
def clean_text_for_pdf(text):
if not text: return ""
safe_text = escape(text)
safe_text = re.sub(r'\*\*(.*?)\*\*', r'\1', safe_text)
safe_text = safe_text.replace('\n', '
')
return safe_text
def create_pdf(text: str, filename="research_report.pdf"):
try:
doc = SimpleDocTemplate(filename, pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=72)
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='ProTitle', parent=styles['Title'], fontName='Helvetica-Bold', fontSize=24, spaceAfter=24, alignment=TA_LEFT, textColor="black"))
styles.add(ParagraphStyle(name='ProH1', parent=styles['Heading1'], fontName='Helvetica-Bold', fontSize=16, spaceBefore=18, spaceAfter=12, alignment=TA_LEFT, textColor="black"))
styles.add(ParagraphStyle(name='ProH2', parent=styles['Heading2'], fontName='Helvetica-Bold', fontSize=13, spaceBefore=12, spaceAfter=6, alignment=TA_LEFT, textColor="black"))
styles.add(ParagraphStyle(name='ProBody', parent=styles['Normal'], fontName='Helvetica', fontSize=11, leading=15, spaceAfter=10, alignment=TA_JUSTIFY, textColor="black"))
story = []
for p in text.split('\n'):
p = p.strip()
if not p: continue
if p.startswith("# "):
story.append(Paragraph(clean_text_for_pdf(p.replace("# ", "")), styles["ProTitle"]))
elif p.startswith("## "):
story.append(Paragraph(clean_text_for_pdf(p.replace("## ", "")), styles["ProH1"]))
elif p.startswith("### "):
story.append(Paragraph(clean_text_for_pdf(p.replace("### ", "")), styles["ProH2"]))
else:
if p.startswith("- "):
p_content = f"• {clean_text_for_pdf(p[2:])}"
else:
p_content = clean_text_for_pdf(p)
story.append(Paragraph(p_content, styles["ProBody"]))
doc.build(story)
return filename
except Exception as e:
return None
def create_docx(text: str, filename="research_report.docx"):
try:
doc = Document()
style = doc.styles['Normal']
font = style.font
font.name = 'Arial'
font.size = Pt(11)
style.paragraph_format.space_after = Pt(12)
for line in text.split('\n'):
line = line.strip()
if not line: continue
if line.startswith("# "):
h = doc.add_heading(line.replace("# ", "").replace("**", ""), 0)
h.alignment = WD_ALIGN_PARAGRAPH.LEFT
elif line.startswith("## "):
doc.add_heading(line.replace("## ", "").replace("**", ""), 1)
elif line.startswith("### "):
doc.add_heading(line.replace("### ", "").replace("**", ""), 2)
elif line.startswith("- "):
doc.add_paragraph(line[2:], style='List Bullet')
else:
doc.add_paragraph(line)
doc.save(filename)
return filename
except Exception as e:
return None
def create_pptx(text: str, filename="presentation.pptx"):
try:
prs = Presentation()
TITLE_SLIDE_LAYOUT = 0
CONTENT_SLIDE_LAYOUT = 1
lines = [l.strip() for l in text.split('\n') if l.strip()]
if not lines: return None
slide = prs.slides.add_slide(prs.slide_layouts[TITLE_SLIDE_LAYOUT])
slide.shapes.title.text = lines[0].replace("# ", "").replace("**", "")
slide.placeholders[1].text = "Generated by Deep Research AI"
current_slide = None
body_text_frame = None
MAX_LINES_PER_SLIDE = 7
current_line_count = 0
for line in lines[1:]:
clean_text = line.replace("**", "").replace("## ", "").replace("### ", "").replace("- ", "")
is_heading = line.startswith("## ")
is_overflow = current_line_count >= MAX_LINES_PER_SLIDE
if is_heading or is_overflow:
current_slide = prs.slides.add_slide(prs.slide_layouts[CONTENT_SLIDE_LAYOUT])
if is_heading:
current_slide.shapes.title.text = clean_text
else:
current_slide.shapes.title.text = "Continued..."
body_text_frame = current_slide.placeholders[1].text_frame
body_text_frame.clear()
current_line_count = 0
if is_overflow and not is_heading:
p = body_text_frame.add_paragraph()
p.text = clean_text
p.level = 0
current_line_count += 1
continue
if body_text_frame:
p = body_text_frame.add_paragraph()
p.text = clean_text
if line.startswith("### "):
p.font.bold = True
p.font.size = PptxPt(20)
p.space_before = PptxPt(10)
p.level = 0
elif line.startswith("- "):
p.font.size = PptxPt(18)
p.space_before = PptxPt(6)
p.level = 0
else:
p.font.size = PptxPt(18)
p.level = 0
current_line_count += 1
prs.save(filename)
return filename
except Exception as e:
return None
# --------------------------------------------------------------
# Main Workflow
# --------------------------------------------------------------
async def stream_workflow(query: str, persona_name: str, language: str):
if not query.strip():
yield "⚠️ Please enter a topic."
return
persona_instruction = PERSONAS.get(persona_name, PERSONAS["General Researcher"])
planner_agent = Agent(name="Planner", instructions=f"{persona_instruction} Plan 3 search queries in English.", model=MODEL)
search_agent = Agent(name="Search", instructions=f"{persona_instruction} concise summary in English.", tools=[WebSearchTool()], model=MODEL)
writer_agent = Agent(name="Writer", instructions=f"You are a professional translator and writer. Write output strictly in {language}.", model=MODEL)
yield f"🕵️♂️ **Persona:** {persona_name}\n"
yield f"🌍 **Language:** {language}\n"
yield f"🔍 **Status:** Analyzing '{query}'...\n\n"
raw_plan = await robust_run(planner_agent, f"Plan searches for: {query}")
plan = parse_planner_output_to_plan(raw_plan)
yield f"📋 **Plan:** {len(plan.searches)} steps defined.\n"
results = []
for item in plan.searches:
yield f"🌐 **Searching:** {item.query}...\n"
res = await robust_run(search_agent, f"Search: {item.query}")
results.append(res)
context = "\n".join(results)
yield f"📝 **Drafting:** Synthesizing report in {language}...\n\n"
full_report = f"# Research Report: {query}\n\n"
sections = ["Executive Summary", "Key Findings", "Conclusion"]
for sec in sections:
yield f"✍️ **Writing:** {sec}...\n"
prompt = f"Write the '{sec}' section for '{query}'.\nContext: {context}\nIMPORTANT: Write strictly in {language}."
content = await robust_run(writer_agent, prompt)
full_report += f"## {sec}\n\n{content}\n\n"
yield f"## {sec}\n\n{content}\n\n"
yield "💾 **Finalizing:** Generating documents...\n"
docx_path = create_docx(full_report)
pptx_path = create_pptx(full_report)
pdf_path = None
if language == "English":
pdf_path = create_pdf(full_report)
yield "✅ **Done:** Process Completed Successfully.\n"
# 4 ITEMS YIELDED (Report, PDF, Word, PPTX) - No chat state needed
yield full_report, pdf_path, docx_path, pptx_path
# --------------------------------------------------------------
# ✨ STANDARD UI (No Chat, No Errors)
# --------------------------------------------------------------
with gr.Blocks(title="Deep Research AI") as ui:
gr.Markdown("# 🚀 Deep Research Engine")
gr.Markdown("Autonomous agent for professional research, analysis, and document generation.")
with gr.Row():
with gr.Column(scale=1):
with gr.Group():
query_input = gr.Textbox(label="Research Topic", placeholder="Enter your topic here...", lines=3)
with gr.Row():
persona_dropdown = gr.Dropdown(
choices=list(PERSONAS.keys()),
value="General Researcher",
label="Persona"
)
language_dropdown = gr.Dropdown(
choices=["English", "Tamil", "Hindi", "French"],
value="English",
label="Language"
)
run_btn = gr.Button("Generate Report", variant="primary")
with gr.Group():
gr.Markdown("### 📂 Export Results")
with gr.Row():
pdf_out = gr.DownloadButton(label="Download PDF (English Only)")
docx_out = gr.DownloadButton(label="Download Word")
pptx_out = gr.DownloadButton(label="Download PowerPoint")
with gr.Column(scale=2):
report_output = gr.Markdown(label="System Output")
async def run_stream_wrapper(q, p, l):
async for chunk in stream_workflow(q, p, l):
if isinstance(chunk, tuple):
# Final output: Yield all files
yield gr.update(value=chunk[0]), \
gr.update(value=chunk[1]), \
gr.update(value=chunk[2]), \
gr.update(value=chunk[3])
else:
# Streaming output: Yield Text, None for files
yield chunk, None, None, None
run_btn.click(
fn=run_stream_wrapper,
inputs=[query_input, persona_dropdown, language_dropdown],
outputs=[report_output, pdf_out, docx_out, pptx_out]
)
if __name__ == "__main__":
ui.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)