Jpete20001 commited on
Commit
d9cba7a
·
verified ·
1 Parent(s): b4b85eb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +283 -0
app.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py for Gradio App on Hugging Face Spaces
2
+ import gradio as gr
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ import random
6
+ import time
7
+ import json
8
+ from typing import List, Dict, Tuple, Optional
9
+
10
+ # --- Configuration ---
11
+ MODEL_NAME = "Qwen/Qwen3-VL-4B-Instruct-FP8"
12
+ # For Hugging Face Spaces, loading the model once here is common.
13
+ # Performance on CPU will be slower.
14
+ print("Loading model...")
15
+ try:
16
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ MODEL_NAME,
19
+ torch_dtype=torch.float16, # Use float16 for FP8 model if supported, or bfloat16
20
+ device_map="auto", # Automatically map to available devices (CPU or GPU)
21
+ trust_remote_code=True
22
+ ).eval() # Set to evaluation mode
23
+ print("Model loaded successfully.")
24
+ except Exception as e:
25
+ print(f"Failed to load model: {e}")
26
+ model = None
27
+ tokenizer = None
28
+
29
+ # --- Simulated Cost Database ---
30
+ COSTS = {
31
+ "ask_question": 10.0,
32
+ "physical_exam": 25.0,
33
+ "order_cbc": 50.0,
34
+ "order_xray": 150.0,
35
+ "administer_med": 30.0,
36
+ "end_case": 0.0,
37
+ "start_case": 0.0 # Cost for starting is typically 0
38
+ }
39
+
40
+ # --- State Management Class ---
41
+ class MedicalSimulatorState:
42
+ def __init__(self):
43
+ self.patient_profile: Optional[Dict] = None
44
+ self.chat_history: List[Tuple[Optional[str], Optional[str]]] = [] # Gradio chat format: [(user_msg, bot_msg), ...]
45
+ self.vitals: Dict[str, float] = {"HR": 72.0, "BP_Sys": 120.0, "BP_Dia": 80.0, "Temp": 98.6, "O2_Sat": 98.0}
46
+ self.total_cost: float = 0.0
47
+ self.is_case_active: bool = False
48
+ self.underlying_diagnosis: str = ""
49
+ self.ordered_tests: Dict[str, str] = {} # e.g., {"cbc": "pending", "xray": "result..."}
50
+ # Add more state variables as needed
51
+
52
+ # --- Core AI Interaction Function ---
53
+ def get_ai_response(user_input: str, history: List[Tuple[Optional[str], Optional[str]]], patient_profile: Dict, underlying_diagnosis: str) -> str:
54
+ if not model or not tokenizer:
55
+ return "Error: AI model is not loaded."
56
+
57
+ # Construct a prompt for the AI based on history and patient profile
58
+ history_str = "\n".join([f"{'User' if h[0] else 'System/AI'}: {h[0] or h[1]}" for h in history])
59
+ context = f"Patient Profile: Name: {patient_profile['name']}, Age: {patient_profile['age']}, Gender: {patient_profile['gender']}, Chief Complaint: {patient_profile['chief_complaint']}, History: {patient_profile['history']}, Medications: {patient_profile['medications']}, Allergies: {patient_profile['allergies']}, Social History: {patient_profile['social_history']}, Financial Status: {patient_profile['financial_status']}, Code Status: {patient_profile['code_status']}\nCurrent Chat History:\n{history_str}\nUser Action/Question: {user_input}\n"
60
+ prompt = f"<|system|>You are an AI patient in a medical simulation. Role-play as the patient described in the profile. Be consistent with their history, demographics, and potential complaints. Respond to the user's input (which could be a question, exam instruction, or order). Your responses should simulate realistic patient dialogue and reactions, including potential anxiety or concerns. Do not reveal the secret diagnosis '{underlying_diagnosis}' directly, but your responses should be consistent with having that condition. Respond as if you are the patient speaking.<|user|>{context}<|assistant|>"
61
+
62
+ try:
63
+ inputs = tokenizer(prompt, return_tensors="pt")
64
+ if inputs["input_ids"].shape[1] > 32768: # Check for model max length
65
+ return "Error: Input prompt is too long for the model."
66
+
67
+ # Generate response
68
+ generate_ids = model.generate(
69
+ inputs.input_ids,
70
+ max_new_tokens=512, # Limit generated tokens
71
+ do_sample=True,
72
+ temperature=0.7,
73
+ top_p=0.9,
74
+ pad_token_id=tokenizer.eos_token_id
75
+ )
76
+
77
+ # Decode the generated response
78
+ response_text = tokenizer.decode(generate_ids[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
79
+ return response_text.strip()
80
+
81
+ except Exception as e:
82
+ print(f"Error during AI generation: {e}")
83
+ return f"An error occurred while processing the AI response: {e}"
84
+
85
+ # --- Tool Functions (Modify State) ---
86
+ def start_case(case_type: str, state: MedicalSimulatorState) -> Tuple[MedicalSimulatorState, List[Tuple[Optional[str], Optional[str]]], str, str]:
87
+ # --- Generate Patient Profile (Simplified Example) ---
88
+ names = ["John Smith", "Emily Johnson", "Michael Brown", "Sarah Davis"]
89
+ chief_complaints = {
90
+ "General": ["Chest pain", "Shortness of breath", "Abdominal pain"],
91
+ "Pediatric": ["Fever", "Cough", "Ear ache"],
92
+ "Psychiatry": ["Feeling anxious", "Difficulty sleeping", "Low mood"],
93
+ "Dual Diagnosis": ["Chest pain and feels anxious", "Abdominal pain after drinking"]
94
+ }
95
+ complaint_options = chief_complaints.get(case_type, chief_complaints["General"])
96
+ name = random.choice(names)
97
+ age = random.randint(18, 80) if case_type != "Pediatric" else random.randint(0, 17)
98
+ gender = random.choice(["Male", "Female"])
99
+ chief_complaint = random.choice(complaint_options)
100
+ # Define underlying diagnosis based on complaint or case type
101
+ diag_map = {
102
+ "Chest pain": "Acute Myocardial Infarction",
103
+ "Shortness of breath": "Pneumonia",
104
+ "Abdominal pain": "Appendicitis",
105
+ "Fever": "Viral Infection",
106
+ "Cough": "Bronchitis",
107
+ "Ear ache": "Otitis Media",
108
+ "Feeling anxious": "Generalized Anxiety Disorder",
109
+ "Difficulty sleeping": "Insomnia",
110
+ "Low mood": "Major Depressive Disorder",
111
+ "Chest pain and feels anxious": "Acute MI with Anxiety",
112
+ "Abdominal pain after drinking": "Alcoholic Gastritis with Substance Use Disorder"
113
+ }
114
+ underlying_diagnosis = diag_map.get(chief_complaint, "Unknown Condition")
115
+
116
+ patient = {
117
+ "name": name,
118
+ "age": age,
119
+ "gender": gender,
120
+ "chief_complaint": chief_complaint,
121
+ "history": "Patient history relevant to complaint.",
122
+ "medications": "Current medications.",
123
+ "allergies": "Known allergies (e.g., Penicillin).",
124
+ "social_history": "Social history details.",
125
+ "financial_status": "Insurance status.",
126
+ "code_status": "Full Code",
127
+ "language": "English"
128
+ }
129
+
130
+ # Reset state
131
+ state.patient_profile = patient
132
+ state.chat_history = [("System", "New Case Started."), ("AI Patient", f"Hi, I'm {patient['name']}. I've been having {patient['chief_complaint']}.")]
133
+ state.vitals = {"HR": 72.0, "BP_Sys": 120.0, "BP_Dia": 80.0, "Temp": 98.6, "O2_Sat": 98.0}
134
+ state.total_cost = 0.0
135
+ state.is_case_active = True
136
+ state.underlying_diagnosis = underlying_diagnosis
137
+ state.ordered_tests = {}
138
+
139
+ profile_str = "\n".join([f"{k.replace('_', ' ').title()}: {v}" for k, v in patient.items()])
140
+ return state, state.chat_history, f"${state.total_cost:.2f}", profile_str
141
+
142
+
143
+ def handle_chat(user_input: str, history: List[Tuple[Optional[str], Optional[str]]], state: MedicalSimulatorState) -> Tuple[List[Tuple[Optional[str], Optional[str]]], str]:
144
+ if not state.is_case_active or not user_input.strip():
145
+ return history, f"${state.total_cost:.2f}"
146
+
147
+ # Add user message to history
148
+ history.append((user_input, None))
149
+
150
+ # Get AI response
151
+ ai_response = get_ai_response(user_input, history[:-1], state.patient_profile, state.underlying_diagnosis) # Pass history without the user's new message yet
152
+
153
+ # Add AI response to history
154
+ history[-1] = (user_input, ai_response) # Update the last entry with the AI's response
155
+
156
+ return history, f"${state.total_cost:.2f}" # Cost doesn't change here, just return current
157
+
158
+
159
+ def use_tool(tool_name: str, state: MedicalSimulatorState) -> Tuple[MedicalSimulatorState, List[Tuple[Optional[str], Optional[str]]], str]:
160
+ if not state.is_case_active:
161
+ return state, state.chat_history, f"${state.total_cost:.2f}"
162
+
163
+ cost = COSTS.get(tool_name, 0.0)
164
+ state.total_cost += cost
165
+
166
+ if tool_name == "ask_question":
167
+ ai_response = get_ai_response("The user asks a general question to gather more history.", state.chat_history, state.patient_profile, state.underlying_diagnosis)
168
+ state.chat_history.append(("System", f"[Action: {tool_name}, Cost: ${cost:.2f}]"))
169
+ state.chat_history.append(("AI Patient", ai_response))
170
+
171
+ elif tool_name == "order_cbc":
172
+ state.chat_history.append(("System", f"[Action: {tool_name}, Cost: ${cost:.2f}]"))
173
+ state.chat_history.append(("Lab", "CBC Ordered. Result pending..."))
174
+ # Simulate result appearing after a delay or another action
175
+ # For now, add a simple result shortly after
176
+ time.sleep(0.5) # Simulate processing delay
177
+ state.chat_history.append(("Lab", "CBC Result: WBC slightly elevated, otherwise unremarkable."))
178
+
179
+ elif tool_name == "administer_med":
180
+ med_name = "Medication X" # Simplified, could take input
181
+ state.chat_history.append(("System", f"[Action: {tool_name} - {med_name}, Cost: ${cost:.2f}]"))
182
+ # Check for allergies here in a real app
183
+ state.chat_history.append(("AI Patient", f"Okay, I took the {med_name}."))
184
+
185
+ elif tool_name == "physical_exam":
186
+ state.chat_history.append(("System", f"[Action: {tool_name}, Cost: ${cost:.2f}]"))
187
+ state.chat_history.append(("System", "Physical Exam Performed. Findings noted."))
188
+
189
+ elif tool_name == "order_xray":
190
+ state.chat_history.append(("System", f"[Action: {tool_name}, Cost: ${cost:.2f}]"))
191
+ state.chat_history.append(("Imaging", "X-Ray Ordered. Result pending..."))
192
+ # Placeholder for image result (could be a URL or base64 string)
193
+ time.sleep(0.5) # Simulate processing delay
194
+ state.chat_history.append(("Imaging", "Chest X-Ray Result: Normal lung fields, no acute findings. (Placeholder Image)"))
195
+
196
+ # Add other tools as needed...
197
+
198
+ return state, state.chat_history, f"${state.total_cost:.2f}"
199
+
200
+
201
+ def end_case(state: MedicalSimulatorState) -> Tuple[MedicalSimulatorState, List[Tuple[Optional[str], Optional[str]]], str, str]:
202
+ if not state.is_case_active:
203
+ # Return current state if no case is active
204
+ profile_str = "\n".join([f"{k.replace('_', ' ').title()}: {v}" for k, v in (state.patient_profile or {}).items()])
205
+ return state, state.chat_history, f"${state.total_cost:.2f}", profile_str
206
+
207
+ state.chat_history.append(("System", "Case Ended by User."))
208
+ state.is_case_active = False
209
+
210
+ # In a full implementation, trigger the evaluation logic here
211
+ # evaluation = run_evaluation(state) # Placeholder
212
+ # state.chat_history.append(("System", f"Evaluation: {evaluation}")) # Add evaluation to chat or separate component
213
+
214
+ profile_str = "\n".join([f"{k.replace('_', ' ').title()}: {v}" for k, v in (state.patient_profile or {}).items()])
215
+ return state, state.chat_history, f"${state.total_cost:.2f}", profile_str
216
+
217
+
218
+ # --- Gradio Interface ---
219
+ with gr.Blocks(title="Advanced Medical Simulator") as demo:
220
+ # State component to hold the simulator state across interactions
221
+ state = gr.State(lambda: MedicalSimulatorState())
222
+
223
+ gr.Markdown("# Advanced Medical Simulator")
224
+
225
+ with gr.Row():
226
+ with gr.Column(scale=2):
227
+ # Chat Interface
228
+ chatbot = gr.Chatbot(label="Patient Interaction", height=400, bubble_full_width=False)
229
+ with gr.Row():
230
+ user_input = gr.Textbox(label="Your Action / Question", placeholder="Type your action or question here...", scale=4)
231
+ submit_btn = gr.Button("Submit", scale=1)
232
+
233
+ with gr.Column(scale=1):
234
+ # Patient Chart / Info
235
+ patient_chart = gr.Markdown(label="Patient Chart", value="Click 'Start New Case' to begin.")
236
+ cost_display = gr.Textbox(label="Total Cost", value="$0.00", interactive=False)
237
+
238
+ with gr.Row():
239
+ # Tool Panel
240
+ with gr.Column():
241
+ gr.Markdown("### Tools")
242
+ with gr.Row():
243
+ ask_btn = gr.Button("Ask Question ($10)")
244
+ exam_btn = gr.Button("Physical Exam ($25)")
245
+ with gr.Row():
246
+ cbc_btn = gr.Button("Order CBC ($50)")
247
+ xray_btn = gr.Button("Order X-Ray ($150)")
248
+ with gr.Row():
249
+ med_btn = gr.Button("Administer Med ($30)")
250
+ end_btn = gr.Button("End Case", variant="stop") # Red button for ending
251
+
252
+ with gr.Row():
253
+ # Case Controls
254
+ start_case_btn = gr.Button("Start New Case (General)")
255
+ case_type_dropdown = gr.Dropdown(["General", "Psychiatry", "Pediatric", "Dual Diagnosis"], label="Case Type", value="General")
256
+
257
+ # Event Handling
258
+ start_case_btn.click(
259
+ fn=start_case,
260
+ inputs=[case_type_dropdown, state],
261
+ outputs=[state, chatbot, cost_display, patient_chart]
262
+ )
263
+
264
+ submit_btn.click(
265
+ fn=handle_chat,
266
+ inputs=[user_input, chatbot, state],
267
+ outputs=[chatbot, cost_display]
268
+ ).then(
269
+ fn=lambda: "", # Clear the input textbox after submission
270
+ inputs=[],
271
+ outputs=[user_input]
272
+ )
273
+
274
+ ask_btn.click(fn=lambda s: use_tool("ask_question", s), inputs=[state], outputs=[state, chatbot, cost_display])
275
+ exam_btn.click(fn=lambda s: use_tool("physical_exam", s), inputs=[state], outputs=[state, chatbot, cost_display])
276
+ cbc_btn.click(fn=lambda s: use_tool("order_cbc", s), inputs=[state], outputs=[state, chatbot, cost_display])
277
+ xray_btn.click(fn=lambda s: use_tool("order_xray", s), inputs=[state], outputs=[state, chatbot, cost_display])
278
+ med_btn.click(fn=lambda s: use_tool("administer_med", s), inputs=[state], outputs=[state, chatbot, cost_display])
279
+ end_btn.click(fn=end_case, inputs=[state], outputs=[state, chatbot, cost_display, patient_chart])
280
+
281
+ # Launch the app
282
+ # For Hugging Face Spaces, Gradio handles the launch.
283
+ demo.launch()