massgen 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (76) hide show
  1. massgen/__init__.py +94 -0
  2. massgen/agent_config.py +507 -0
  3. massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
  4. massgen/backend/Function calling openai responses.md +1161 -0
  5. massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
  6. massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
  7. massgen/backend/__init__.py +25 -0
  8. massgen/backend/base.py +180 -0
  9. massgen/backend/chat_completions.py +228 -0
  10. massgen/backend/claude.py +661 -0
  11. massgen/backend/gemini.py +652 -0
  12. massgen/backend/grok.py +187 -0
  13. massgen/backend/response.py +397 -0
  14. massgen/chat_agent.py +440 -0
  15. massgen/cli.py +686 -0
  16. massgen/configs/README.md +293 -0
  17. massgen/configs/creative_team.yaml +53 -0
  18. massgen/configs/gemini_4o_claude.yaml +31 -0
  19. massgen/configs/news_analysis.yaml +51 -0
  20. massgen/configs/research_team.yaml +51 -0
  21. massgen/configs/single_agent.yaml +18 -0
  22. massgen/configs/single_flash2.5.yaml +44 -0
  23. massgen/configs/technical_analysis.yaml +51 -0
  24. massgen/configs/three_agents_default.yaml +31 -0
  25. massgen/configs/travel_planning.yaml +51 -0
  26. massgen/configs/two_agents.yaml +39 -0
  27. massgen/frontend/__init__.py +20 -0
  28. massgen/frontend/coordination_ui.py +945 -0
  29. massgen/frontend/displays/__init__.py +24 -0
  30. massgen/frontend/displays/base_display.py +83 -0
  31. massgen/frontend/displays/rich_terminal_display.py +3497 -0
  32. massgen/frontend/displays/simple_display.py +93 -0
  33. massgen/frontend/displays/terminal_display.py +381 -0
  34. massgen/frontend/logging/__init__.py +9 -0
  35. massgen/frontend/logging/realtime_logger.py +197 -0
  36. massgen/message_templates.py +431 -0
  37. massgen/orchestrator.py +1222 -0
  38. massgen/tests/__init__.py +10 -0
  39. massgen/tests/multi_turn_conversation_design.md +214 -0
  40. massgen/tests/multiturn_llm_input_analysis.md +189 -0
  41. massgen/tests/test_case_studies.md +113 -0
  42. massgen/tests/test_claude_backend.py +310 -0
  43. massgen/tests/test_grok_backend.py +160 -0
  44. massgen/tests/test_message_context_building.py +293 -0
  45. massgen/tests/test_rich_terminal_display.py +378 -0
  46. massgen/tests/test_v3_3agents.py +117 -0
  47. massgen/tests/test_v3_simple.py +216 -0
  48. massgen/tests/test_v3_three_agents.py +272 -0
  49. massgen/tests/test_v3_two_agents.py +176 -0
  50. massgen/utils.py +79 -0
  51. massgen/v1/README.md +330 -0
  52. massgen/v1/__init__.py +91 -0
  53. massgen/v1/agent.py +605 -0
  54. massgen/v1/agents.py +330 -0
  55. massgen/v1/backends/gemini.py +584 -0
  56. massgen/v1/backends/grok.py +410 -0
  57. massgen/v1/backends/oai.py +571 -0
  58. massgen/v1/cli.py +351 -0
  59. massgen/v1/config.py +169 -0
  60. massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
  61. massgen/v1/examples/fast_config.yaml +44 -0
  62. massgen/v1/examples/production.yaml +70 -0
  63. massgen/v1/examples/single_agent.yaml +39 -0
  64. massgen/v1/logging.py +974 -0
  65. massgen/v1/main.py +368 -0
  66. massgen/v1/orchestrator.py +1138 -0
  67. massgen/v1/streaming_display.py +1190 -0
  68. massgen/v1/tools.py +160 -0
  69. massgen/v1/types.py +245 -0
  70. massgen/v1/utils.py +199 -0
  71. massgen-0.0.3.dist-info/METADATA +568 -0
  72. massgen-0.0.3.dist-info/RECORD +76 -0
  73. massgen-0.0.3.dist-info/WHEEL +5 -0
  74. massgen-0.0.3.dist-info/entry_points.txt +2 -0
  75. massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
  76. massgen-0.0.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,571 @@
1
+ import os
2
+ import threading
3
+ import time
4
+ import json
5
+ import copy
6
+
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv()
10
+
11
+ from openai import OpenAI
12
+
13
+ # Import utility functions
14
+ from massgen.v1.utils import function_to_json, execute_function_calls
15
+ from massgen.v1.types import AgentResponse
16
+
17
+
18
+ def parse_completion(response, add_citations=True):
19
+ """Parse the completion response from OpenAI API.
20
+
21
+ Mainly three types of output in the response:
22
+ - reasoning (no summary provided): ResponseReasoningItem(id='rs_6876b0d566d08198ab9f992e1911bd0a02ec808107751c1f', summary=[], type='reasoning', status=None)
23
+ - web_search_call (actions: search, open_page, find_in_page): ResponseFunctionWebSearch(id='ws_6876b0e3b83081988d6cddd9770357c402ec808107751c1f', status='completed', type='web_search_call', action={'type': 'search', 'query': 'Economy of China Wikipedia GDP table'})
24
+ - message: response output (including text and citations, optional)
25
+ - code_interpreter_call (code provided): code output
26
+ - function_call: function call, arguments, and name provided
27
+ """
28
+ text = ""
29
+ code = []
30
+ citations = []
31
+ function_calls = []
32
+ reasoning_items = []
33
+
34
+ # Process the response output
35
+ for r in response.output:
36
+ if (
37
+ r.type == "message"
38
+ ): # Final response, including text and citations (optional)
39
+ for c in r.content:
40
+ text += c.text
41
+ if add_citations and hasattr(c, "annotations") and c.annotations:
42
+ for annotation in c.annotations:
43
+ citations.append(
44
+ {
45
+ "url": annotation.url,
46
+ "title": annotation.title,
47
+ "start_index": annotation.start_index,
48
+ "end_index": annotation.end_index,
49
+ }
50
+ )
51
+ elif r.type == "code_interpreter_call":
52
+ code.append(r.code)
53
+ elif r.type == "web_search_call":
54
+ # detailed web search actions: search, open_page, find_in_page, etc
55
+ pass
56
+ elif r.type == "reasoning":
57
+ reasoning_items.append(
58
+ {"type": "reasoning", "id": r.id, "summary": r.summary}
59
+ )
60
+ elif r.type == "function_call":
61
+ # tool output - include call_id for Responses API
62
+ function_calls.append(
63
+ {
64
+ "type": "function_call",
65
+ "name": r.name,
66
+ "arguments": r.arguments,
67
+ "call_id": getattr(r, "call_id", None),
68
+ "id": getattr(r, "id", None),
69
+ }
70
+ )
71
+
72
+ # Add citations to text if available
73
+ if add_citations and citations:
74
+ try:
75
+ new_text = text
76
+ # Sort citations by end_index in descending order to avoid shifting issues when inserting
77
+ sorted_citations = sorted(
78
+ citations, key=lambda c: c["end_index"], reverse=True
79
+ )
80
+
81
+ for idx, citation in enumerate(sorted_citations):
82
+ end_index = citation["end_index"]
83
+ citation_link = f"[{len(citations) - idx}]({citation['url']})"
84
+ new_text = new_text[:end_index] + citation_link + new_text[end_index:]
85
+ text = new_text
86
+ except Exception as e:
87
+ print(f"[OAI] Error adding citations to text: {e}")
88
+
89
+ return AgentResponse(
90
+ text=text, code=code, citations=citations, function_calls=function_calls
91
+ )
92
+
93
+
94
+ def process_message(
95
+ messages,
96
+ model="gpt-4.1-mini",
97
+ tools=None,
98
+ max_retries=10,
99
+ max_tokens=None,
100
+ temperature=None,
101
+ top_p=None,
102
+ api_key=None,
103
+ stream=False,
104
+ stream_callback=None,
105
+ ):
106
+ """
107
+ Generate content using OpenAI API with optional streaming support.
108
+
109
+ Args:
110
+ messages: List of messages in OpenAI format
111
+ model: The OpenAI model to use
112
+ tools: List of tools to use
113
+ max_retries: Maximum number of retry attempts
114
+ max_tokens: Maximum number of tokens in response
115
+ temperature: Temperature for generation
116
+ top_p: Top-p value for generation
117
+ api_key: OpenAI API key (if None, will get from environment)
118
+ stream: Whether to stream the response (default: False)
119
+ stream_callback: Optional callback function for streaming chunks
120
+
121
+ Returns:
122
+ dict: {"text": text, "code": code, "citations": citations, "function_calls": function_calls}
123
+ """
124
+
125
+ """Internal function that contains all the processing logic."""
126
+ # Get the API key
127
+ if api_key is None:
128
+ api_key_val = os.getenv("OPENAI_API_KEY")
129
+ else:
130
+ api_key_val = api_key
131
+
132
+ if not api_key_val:
133
+ raise ValueError("OPENAI_API_KEY not found in environment variables")
134
+
135
+ # Create OpenAI client without individual timeouts
136
+ client = OpenAI(api_key=api_key_val)
137
+
138
+ # All models can use the same Responses API - no need to distinguish
139
+
140
+ # Prepare tools (Responses API format - same for all models)
141
+ formatted_tools = []
142
+
143
+ # Add other custom tools
144
+ if tools:
145
+ for tool in tools:
146
+ if isinstance(tool, dict):
147
+ formatted_tools.append(tool)
148
+ elif callable(tool):
149
+ formatted_tools.append(function_to_json(tool))
150
+ elif tool == "live_search": # built-in tools
151
+ formatted_tools.append({"type": "web_search_preview"})
152
+ elif tool == "code_execution": # built-in tools
153
+ formatted_tools.append(
154
+ {"type": "code_interpreter", "container": {"type": "auto"}}
155
+ )
156
+ else:
157
+ raise ValueError(f"Invalid tool type: {type(tool)}")
158
+
159
+ # Convert messages to the format expected by OpenAI responses API
160
+ # For now, we'll use the last user message as input
161
+ input_text = []
162
+ instructions = ""
163
+ for message in messages:
164
+ if message.get("role", "") == "system":
165
+ instructions = message["content"]
166
+ else:
167
+ # Clean the function calls' id to avoid the requirements of related reasoning items
168
+ if (
169
+ message.get("type", "") == "function_call"
170
+ and message.get("id", None) is not None
171
+ ):
172
+ del message["id"]
173
+ input_text.append(message)
174
+
175
+ # Make API request with retry logic (use Responses API for all models)
176
+ completion = None
177
+ retry = 0
178
+ while retry < max_retries:
179
+ try:
180
+ # Create a local copy of model to avoid scoping issues
181
+ model_name = model
182
+
183
+ # Use responses API for all models (supports streaming)
184
+ # Note: Response models doesn't support temperature parameter
185
+ params = {
186
+ "model": model_name,
187
+ "tools": formatted_tools if formatted_tools else None,
188
+ "instructions": instructions if instructions else None,
189
+ "input": input_text,
190
+ "max_output_tokens": max_tokens if max_tokens else None,
191
+ "stream": True if stream and stream_callback else False,
192
+ }
193
+
194
+ # CRITICAL: Include code interpreter outputs for streaming
195
+ # Without this, code execution results (stdout/stderr) won't be available
196
+ if formatted_tools and any(
197
+ tool.get("type") == "code_interpreter" for tool in formatted_tools
198
+ ):
199
+ params["include"] = ["code_interpreter_call.outputs"]
200
+
201
+ # Only add temperature and top_p for models that support them
202
+ # All o-series models (o1, o3, o4, etc.) don't support temperature/top_p
203
+ if temperature is not None and not model_name.startswith("o"):
204
+ params["temperature"] = temperature
205
+ if top_p is not None and not model_name.startswith("o"):
206
+ params["top_p"] = top_p
207
+ if model_name.startswith("o"):
208
+ if model_name.endswith("-low"):
209
+ params["reasoning"] = {"effort": "low"}
210
+ model_name = model_name.replace("-low", "")
211
+ elif model_name.endswith("-medium"):
212
+ params["reasoning"] = {"effort": "medium"}
213
+ model_name = model_name.replace("-medium", "")
214
+ elif model_name.endswith("-high"):
215
+ params["reasoning"] = {"effort": "high"}
216
+ model_name = model_name.replace("-high", "")
217
+ else:
218
+ params["reasoning"] = {"effort": "low"}
219
+ params["model"] = model_name
220
+
221
+ # Inference
222
+ response = client.responses.create(**params)
223
+ completion = response
224
+ break
225
+ except Exception as e:
226
+ print(f"Error on attempt {retry + 1}: {e}")
227
+ retry += 1
228
+ import time # Local import to ensure availability in threading context
229
+
230
+ time.sleep(1.5)
231
+
232
+ if completion is None:
233
+ # If we failed all retries, return empty response instead of raising exception
234
+ print(
235
+ f"Failed to get completion after {max_retries} retries, returning empty response"
236
+ )
237
+ return AgentResponse(text="", code=[], citations=[], function_calls=[])
238
+
239
+ # Handle Responses API response (same for all models)
240
+ if stream and stream_callback:
241
+ # Handle streaming response
242
+ text = ""
243
+ code = []
244
+ citations = []
245
+ function_calls = []
246
+
247
+ # Code streaming tracking
248
+ code_lines_shown = 0
249
+ current_code_chunk = ""
250
+ truncation_message_sent = False
251
+
252
+ # Function call arguments streaming tracking
253
+ current_function_call = None
254
+ current_function_arguments = ""
255
+
256
+ for chunk in completion:
257
+ # Handle different event types from responses API streaming
258
+ if hasattr(chunk, "type"):
259
+ if chunk.type == "response.output_text.delta":
260
+ # This is a text delta event
261
+ if hasattr(chunk, "delta") and chunk.delta:
262
+ chunk_text = chunk.delta
263
+ text += chunk_text
264
+ try:
265
+ stream_callback(chunk_text)
266
+ except Exception as e:
267
+ print(f"Stream callback error: {e}")
268
+ elif chunk.type == "response.function_call_output.delta":
269
+ # Function call streaming
270
+ try:
271
+ stream_callback(
272
+ f"\nšŸ”§ {chunk.delta if hasattr(chunk, 'delta') else 'Function call'}\n"
273
+ )
274
+ except Exception as e:
275
+ print(f"Stream callback error: {e}")
276
+ elif chunk.type == "response.function_call_output.done":
277
+ # Function call completed
278
+ try:
279
+ stream_callback("\nšŸ”§ Function call completed\n")
280
+ except Exception as e:
281
+ print(f"Stream callback error: {e}")
282
+ elif chunk.type == "response.code_interpreter_call.in_progress":
283
+ # Code interpreter call started
284
+ # Reset code streaming tracking for new code block
285
+ code_lines_shown = 0
286
+ current_code_chunk = ""
287
+ truncation_message_sent = False
288
+ try:
289
+ stream_callback("\nšŸ’» Starting code execution...\n")
290
+ except Exception as e:
291
+ print(f"Stream callback error: {e}")
292
+ elif chunk.type == "response.code_interpreter_call_code.delta":
293
+ # Code being written/streamed
294
+ if hasattr(chunk, "delta") and chunk.delta:
295
+ try:
296
+ # Add to current code chunk for tracking
297
+ current_code_chunk += chunk.delta
298
+
299
+ # Count lines in this delta
300
+ new_lines = chunk.delta.count("\n")
301
+
302
+ if code_lines_shown < 3:
303
+ # Still within first 3 lines - send normally for display & logging
304
+ stream_callback(chunk.delta)
305
+ code_lines_shown += new_lines
306
+
307
+ # Check if we just exceeded 3 lines with this chunk
308
+ if (
309
+ code_lines_shown >= 3
310
+ and not truncation_message_sent
311
+ ):
312
+ # Send truncation message for display only (not logging)
313
+ stream_callback(
314
+ "\n[CODE_DISPLAY_ONLY]\nšŸ’» ... (full code in log file)\n"
315
+ )
316
+ truncation_message_sent = True
317
+ else:
318
+ # Beyond 3 lines - send with special prefix for logging only
319
+ # The workflow can detect this prefix and log but not display
320
+ stream_callback(f"[CODE_LOG_ONLY]{chunk.delta}")
321
+
322
+ except Exception as e:
323
+ print(f"Stream callback error: {e}")
324
+ elif chunk.type == "response.code_interpreter_call_code.done":
325
+ # Code writing completed - capture the code
326
+ if current_code_chunk:
327
+ code.append(current_code_chunk)
328
+ try:
329
+ stream_callback("\nšŸ’» Code writing completed\n")
330
+ except Exception as e:
331
+ print(f"Stream callback error: {e}")
332
+ elif (
333
+ chunk.type == "response.code_interpreter_call_execution.in_progress"
334
+ ):
335
+ # Code execution started
336
+ try:
337
+ stream_callback("\nšŸ’» Executing code...\n")
338
+ except Exception as e:
339
+ print(f"Stream callback error: {e}")
340
+ elif chunk.type == "response.code_interpreter_call_execution.done":
341
+ # Code execution completed
342
+ try:
343
+ stream_callback("\nšŸ’» Code execution completed\n")
344
+ except Exception as e:
345
+ print(f"Stream callback error: {e}")
346
+ elif chunk.type == "response.output_item.added":
347
+ # New output item added
348
+ if hasattr(chunk, "item") and chunk.item:
349
+ if (
350
+ hasattr(chunk.item, "type")
351
+ and chunk.item.type == "web_search_call"
352
+ ):
353
+ try:
354
+ stream_callback("\nšŸ” Starting web search...\n")
355
+ except Exception as e:
356
+ print(f"Stream callback error: {e}")
357
+ elif (
358
+ hasattr(chunk.item, "type")
359
+ and chunk.item.type == "reasoning"
360
+ ):
361
+ try:
362
+ stream_callback("\n🧠 Reasoning in progress...\n")
363
+ except Exception as e:
364
+ print(f"Stream callback error: {e}")
365
+ elif (
366
+ hasattr(chunk.item, "type")
367
+ and chunk.item.type == "code_interpreter_call"
368
+ ):
369
+ try:
370
+ stream_callback("\nšŸ’» Code interpreter starting...\n")
371
+ except Exception as e:
372
+ print(f"Stream callback error: {e}")
373
+ elif (
374
+ hasattr(chunk.item, "type")
375
+ and chunk.item.type == "function_call"
376
+ ):
377
+ # Function call started - create initial function call object
378
+ function_call_data = {
379
+ "type": "function_call",
380
+ "name": getattr(chunk.item, "name", None),
381
+ "arguments": getattr(chunk.item, "arguments", None),
382
+ "call_id": getattr(chunk.item, "call_id", None),
383
+ "id": getattr(chunk.item, "id", None),
384
+ }
385
+ function_calls.append(function_call_data)
386
+ current_function_call = function_call_data
387
+ current_function_arguments = ""
388
+
389
+ # Notify function call started
390
+ function_name = function_call_data.get("name", "unknown")
391
+ try:
392
+ stream_callback(
393
+ f"\nšŸ”§ Calling function '{function_name}'...\n"
394
+ )
395
+ except Exception as e:
396
+ print(f"Stream callback error: {e}")
397
+ elif chunk.type == "response.output_item.done":
398
+ # Check if this is a completed web search with query or reasoning completion
399
+ if hasattr(chunk, "item") and chunk.item:
400
+ if (
401
+ hasattr(chunk.item, "type")
402
+ and chunk.item.type == "web_search_call"
403
+ ):
404
+ if hasattr(chunk.item, "action") and hasattr(
405
+ chunk.item.action, "query"
406
+ ):
407
+ search_query = chunk.item.action.query
408
+ if search_query:
409
+ try:
410
+ stream_callback(
411
+ f"\nšŸ” Completed search for: {search_query}\n"
412
+ )
413
+ except Exception as e:
414
+ print(f"Stream callback error: {e}")
415
+ elif (
416
+ hasattr(chunk.item, "type")
417
+ and chunk.item.type == "reasoning"
418
+ ):
419
+ try:
420
+ stream_callback("\n🧠 Reasoning completed\n")
421
+ except Exception as e:
422
+ print(f"Stream callback error: {e}")
423
+ elif (
424
+ hasattr(chunk.item, "type")
425
+ and chunk.item.type == "code_interpreter_call"
426
+ ):
427
+ # CRITICAL: Capture code execution outputs (stdout/stderr)
428
+ # This is the actual result of running the code
429
+ if hasattr(chunk.item, "outputs") and chunk.item.outputs:
430
+ for output in chunk.item.outputs:
431
+ # Check if it's a dict-like object with a 'type' key (most common)
432
+ if (
433
+ hasattr(output, "get")
434
+ and output.get("type") == "logs"
435
+ ):
436
+ logs_content = output.get("logs")
437
+ if logs_content:
438
+ # Add execution result to text output
439
+ execution_result = f"\n[Code Execution Output]\n{logs_content}\n"
440
+ text += execution_result
441
+ try:
442
+ stream_callback(execution_result)
443
+ except Exception as e:
444
+ print(f"Stream callback error: {e}")
445
+ # Also check for attribute-based access (fallback)
446
+ elif (
447
+ hasattr(output, "type")
448
+ and output.type == "logs"
449
+ ):
450
+ if hasattr(output, "logs") and output.logs:
451
+ # Add execution result to text output
452
+ execution_result = f"\n[Code Execution Output]\n{output.logs}\n"
453
+ text += execution_result
454
+ try:
455
+ stream_callback(execution_result)
456
+ except Exception as e:
457
+ print(f"Stream callback error: {e}")
458
+ try:
459
+ stream_callback("\nšŸ’» Code interpreter completed\n")
460
+ except Exception as e:
461
+ print(f"Stream callback error: {e}")
462
+ elif (
463
+ hasattr(chunk.item, "type")
464
+ and chunk.item.type == "function_call"
465
+ ):
466
+ # Function call completed - update the function call data if needed
467
+ if hasattr(chunk.item, "arguments"):
468
+ # Find and update the corresponding function call
469
+ for fc in function_calls:
470
+ if fc.get("id") == getattr(chunk.item, "id", None):
471
+ fc["arguments"] = chunk.item.arguments
472
+ break
473
+
474
+ # Also update with accumulated arguments if available
475
+ if current_function_call and current_function_arguments:
476
+ current_function_call["arguments"] = (
477
+ current_function_arguments
478
+ )
479
+
480
+ # Reset tracking
481
+ current_function_call = None
482
+ current_function_arguments = ""
483
+
484
+ # Notify function call completed
485
+ function_name = getattr(chunk.item, "name", "unknown")
486
+ try:
487
+ stream_callback(
488
+ f"\nšŸ”§ Function '{function_name}' completed\n"
489
+ )
490
+ except Exception as e:
491
+ print(f"Stream callback error: {e}")
492
+ elif chunk.type == "response.web_search_call.in_progress":
493
+ try:
494
+ stream_callback("\nšŸ” Search in progress...\n")
495
+ except Exception as e:
496
+ print(f"Stream callback error: {e}")
497
+ elif chunk.type == "response.web_search_call.searching":
498
+ try:
499
+ stream_callback("\nšŸ” Searching...\n")
500
+ except Exception as e:
501
+ print(f"Stream callback error: {e}")
502
+ elif chunk.type == "response.web_search_call.completed":
503
+ try:
504
+ stream_callback("\nšŸ” Search completed\n")
505
+ except Exception as e:
506
+ print(f"Stream callback error: {e}")
507
+ elif chunk.type == "response.output_text.annotation.added":
508
+ # Citation added - capture citation information
509
+ if hasattr(chunk, "annotation"):
510
+ citation_data = {
511
+ "url": getattr(chunk.annotation, "url", None),
512
+ "title": getattr(chunk.annotation, "title", None),
513
+ "start_index": getattr(
514
+ chunk.annotation, "start_index", None
515
+ ),
516
+ "end_index": getattr(chunk.annotation, "end_index", None),
517
+ }
518
+ citations.append(citation_data)
519
+ try:
520
+ stream_callback("\nšŸ“š Citation added\n")
521
+ except Exception as e:
522
+ print(f"Stream callback error: {e}")
523
+ elif chunk.type == "response.function_call_arguments.delta":
524
+ # Handle function call arguments streaming
525
+ if hasattr(chunk, "delta") and chunk.delta:
526
+ current_function_arguments += chunk.delta
527
+ try:
528
+ # Stream the function arguments as they're generated
529
+ stream_callback(chunk.delta)
530
+ except Exception as e:
531
+ print(f"Stream callback error: {e}")
532
+ elif chunk.type == "response.function_call_arguments.done":
533
+ # Function arguments completed - update the function call
534
+ if hasattr(chunk, "arguments") and hasattr(chunk, "item_id"):
535
+ # Find and update the corresponding function call
536
+ for fc in function_calls:
537
+ if fc.get("id") == chunk.item_id:
538
+ fc["arguments"] = chunk.arguments
539
+ break
540
+
541
+ # Also update with accumulated arguments if available
542
+ if current_function_call and current_function_arguments:
543
+ current_function_call["arguments"] = current_function_arguments
544
+
545
+ # Reset tracking
546
+ current_function_call = None
547
+ current_function_arguments = ""
548
+
549
+ try:
550
+ stream_callback("\nšŸ”§ Function arguments complete\n")
551
+ except Exception as e:
552
+ print(f"Stream callback error: {e}")
553
+ elif chunk.type == "response.completed":
554
+ try:
555
+ stream_callback("\nāœ… Response complete\n")
556
+ except Exception as e:
557
+ print(f"Stream callback error: {e}")
558
+
559
+ result = AgentResponse(
560
+ text=text, code=code, citations=citations, function_calls=function_calls
561
+ )
562
+ else:
563
+ # Parse non-streaming response using existing parse_completion function
564
+ result = parse_completion(completion, add_citations=True)
565
+
566
+ return result
567
+
568
+
569
+ # Example usage (you can remove this if not needed)
570
+ if __name__ == "__main__":
571
+ pass