massgen 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (76) hide show
  1. massgen/__init__.py +94 -0
  2. massgen/agent_config.py +507 -0
  3. massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
  4. massgen/backend/Function calling openai responses.md +1161 -0
  5. massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
  6. massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
  7. massgen/backend/__init__.py +25 -0
  8. massgen/backend/base.py +180 -0
  9. massgen/backend/chat_completions.py +228 -0
  10. massgen/backend/claude.py +661 -0
  11. massgen/backend/gemini.py +652 -0
  12. massgen/backend/grok.py +187 -0
  13. massgen/backend/response.py +397 -0
  14. massgen/chat_agent.py +440 -0
  15. massgen/cli.py +686 -0
  16. massgen/configs/README.md +293 -0
  17. massgen/configs/creative_team.yaml +53 -0
  18. massgen/configs/gemini_4o_claude.yaml +31 -0
  19. massgen/configs/news_analysis.yaml +51 -0
  20. massgen/configs/research_team.yaml +51 -0
  21. massgen/configs/single_agent.yaml +18 -0
  22. massgen/configs/single_flash2.5.yaml +44 -0
  23. massgen/configs/technical_analysis.yaml +51 -0
  24. massgen/configs/three_agents_default.yaml +31 -0
  25. massgen/configs/travel_planning.yaml +51 -0
  26. massgen/configs/two_agents.yaml +39 -0
  27. massgen/frontend/__init__.py +20 -0
  28. massgen/frontend/coordination_ui.py +945 -0
  29. massgen/frontend/displays/__init__.py +24 -0
  30. massgen/frontend/displays/base_display.py +83 -0
  31. massgen/frontend/displays/rich_terminal_display.py +3497 -0
  32. massgen/frontend/displays/simple_display.py +93 -0
  33. massgen/frontend/displays/terminal_display.py +381 -0
  34. massgen/frontend/logging/__init__.py +9 -0
  35. massgen/frontend/logging/realtime_logger.py +197 -0
  36. massgen/message_templates.py +431 -0
  37. massgen/orchestrator.py +1222 -0
  38. massgen/tests/__init__.py +10 -0
  39. massgen/tests/multi_turn_conversation_design.md +214 -0
  40. massgen/tests/multiturn_llm_input_analysis.md +189 -0
  41. massgen/tests/test_case_studies.md +113 -0
  42. massgen/tests/test_claude_backend.py +310 -0
  43. massgen/tests/test_grok_backend.py +160 -0
  44. massgen/tests/test_message_context_building.py +293 -0
  45. massgen/tests/test_rich_terminal_display.py +378 -0
  46. massgen/tests/test_v3_3agents.py +117 -0
  47. massgen/tests/test_v3_simple.py +216 -0
  48. massgen/tests/test_v3_three_agents.py +272 -0
  49. massgen/tests/test_v3_two_agents.py +176 -0
  50. massgen/utils.py +79 -0
  51. massgen/v1/README.md +330 -0
  52. massgen/v1/__init__.py +91 -0
  53. massgen/v1/agent.py +605 -0
  54. massgen/v1/agents.py +330 -0
  55. massgen/v1/backends/gemini.py +584 -0
  56. massgen/v1/backends/grok.py +410 -0
  57. massgen/v1/backends/oai.py +571 -0
  58. massgen/v1/cli.py +351 -0
  59. massgen/v1/config.py +169 -0
  60. massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
  61. massgen/v1/examples/fast_config.yaml +44 -0
  62. massgen/v1/examples/production.yaml +70 -0
  63. massgen/v1/examples/single_agent.yaml +39 -0
  64. massgen/v1/logging.py +974 -0
  65. massgen/v1/main.py +368 -0
  66. massgen/v1/orchestrator.py +1138 -0
  67. massgen/v1/streaming_display.py +1190 -0
  68. massgen/v1/tools.py +160 -0
  69. massgen/v1/types.py +245 -0
  70. massgen/v1/utils.py +199 -0
  71. massgen-0.0.3.dist-info/METADATA +568 -0
  72. massgen-0.0.3.dist-info/RECORD +76 -0
  73. massgen-0.0.3.dist-info/WHEEL +5 -0
  74. massgen-0.0.3.dist-info/entry_points.txt +2 -0
  75. massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
  76. massgen-0.0.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,584 @@
1
+ import os
2
+ import threading
3
+ import time
4
+ import json
5
+
6
+ try:
7
+ from google import genai
8
+ from google.genai import types
9
+
10
+ GEMINI_AVAILABLE = True
11
+ except ImportError:
12
+ GEMINI_AVAILABLE = False
13
+
14
+ # Create dummy classes to prevent import errors
15
+ class genai:
16
+ @staticmethod
17
+ def configure(**kwargs):
18
+ raise ImportError(
19
+ "Google genai package not installed. Install with: pip install google-genai"
20
+ )
21
+
22
+ class types:
23
+ pass
24
+
25
+
26
+ from dotenv import load_dotenv
27
+ import copy
28
+
29
+ load_dotenv()
30
+
31
+ # Import utility functions and tools
32
+ from massgen.v1.utils import (
33
+ function_to_json,
34
+ execute_function_calls,
35
+ generate_random_id,
36
+ )
37
+ from massgen.v1.types import AgentResponse
38
+
39
+
40
+ def add_citations_to_response(response):
41
+ text = response.text
42
+
43
+ # Check if grounding_metadata exists
44
+ if not hasattr(response, "candidates") or not response.candidates:
45
+ return text
46
+
47
+ candidate = response.candidates[0]
48
+ if not hasattr(candidate, "grounding_metadata") or not candidate.grounding_metadata:
49
+ return text
50
+
51
+ grounding_metadata = candidate.grounding_metadata
52
+
53
+ # Check if grounding_supports and grounding_chunks exist and are not None
54
+ supports = getattr(grounding_metadata, "grounding_supports", None)
55
+ chunks = getattr(grounding_metadata, "grounding_chunks", None)
56
+
57
+ if not supports or not chunks:
58
+ return text
59
+
60
+ # Sort supports by end_index in descending order to avoid shifting issues when inserting.
61
+ sorted_supports = sorted(supports, key=lambda s: s.segment.end_index, reverse=True)
62
+
63
+ for support in sorted_supports:
64
+ end_index = support.segment.end_index
65
+ if support.grounding_chunk_indices:
66
+ # Create citation string like [1](link1)[2](link2)
67
+ citation_links = []
68
+ for i in support.grounding_chunk_indices:
69
+ if i < len(chunks):
70
+ uri = chunks[i].web.uri
71
+ citation_links.append(f"[{i + 1}]({uri})")
72
+
73
+ citation_string = ", ".join(citation_links)
74
+ text = text[:end_index] + citation_string + text[end_index:]
75
+
76
+ return text
77
+
78
+
79
+ def parse_completion(completion, add_citations=True):
80
+ """Parse the completion response from Gemini API using the official SDK."""
81
+ text = ""
82
+ code = []
83
+ citations = []
84
+ function_calls = []
85
+ reasoning_items = []
86
+
87
+ # Handle response from the official SDK
88
+ # Always parse candidates.content.parts for complete information
89
+ # even if completion.text is available, as it may be incomplete
90
+ if hasattr(completion, "candidates") and completion.candidates:
91
+ candidate = completion.candidates[0]
92
+ if hasattr(candidate, "content") and hasattr(candidate.content, "parts"):
93
+ for part in candidate.content.parts:
94
+ # Handle text parts
95
+ if hasattr(part, "text") and part.text:
96
+ text += part.text
97
+ # Handle executable code parts
98
+ elif hasattr(part, "executable_code") and part.executable_code:
99
+ if (
100
+ hasattr(part.executable_code, "code")
101
+ and part.executable_code.code
102
+ ):
103
+ code.append(part.executable_code.code)
104
+ elif hasattr(part.executable_code, "language") and hasattr(
105
+ part.executable_code, "code"
106
+ ):
107
+ # Alternative format for executable code
108
+ code.append(part.executable_code.code)
109
+ # Handle code execution results
110
+ elif (
111
+ hasattr(part, "code_execution_result")
112
+ and part.code_execution_result
113
+ ):
114
+ if (
115
+ hasattr(part.code_execution_result, "output")
116
+ and part.code_execution_result.output
117
+ ):
118
+ # Add execution result as text output
119
+ text += (
120
+ f"\n[Code Output]\n{part.code_execution_result.output}\n"
121
+ )
122
+ # Handle function calls
123
+ elif hasattr(part, "function_call"):
124
+ if part.function_call:
125
+ # Extract function name and arguments
126
+ func_name = getattr(part.function_call, "name", "unknown")
127
+ func_args = {}
128
+ call_id = getattr(
129
+ part.function_call, "id", generate_random_id()
130
+ )
131
+ if (
132
+ hasattr(part.function_call, "args")
133
+ and part.function_call.args
134
+ ):
135
+ # Convert args to dict if it's a struct/object
136
+ if hasattr(part.function_call.args, "_pb"):
137
+ # It's a protobuf struct, need to convert to dict
138
+ import json
139
+
140
+ try:
141
+ func_args = dict(part.function_call.args)
142
+ except:
143
+ func_args = {}
144
+ else:
145
+ func_args = part.function_call.args
146
+
147
+ function_calls.append(
148
+ {
149
+ "type": "function_call",
150
+ "call_id": call_id,
151
+ "name": func_name,
152
+ "arguments": func_args,
153
+ }
154
+ )
155
+ # Handle function responses
156
+ elif hasattr(part, "function_response"):
157
+ # Function responses are typically handled in multi-turn scenarios
158
+ pass
159
+
160
+ # Handle grounding metadata (citations from search)
161
+ if hasattr(completion, "candidates") and completion.candidates:
162
+ candidate = completion.candidates[0]
163
+ if hasattr(candidate, "grounding_metadata") and candidate.grounding_metadata:
164
+ grounding = candidate.grounding_metadata
165
+ if hasattr(grounding, "grounding_chunks") and grounding.grounding_chunks:
166
+ for chunk in grounding.grounding_chunks:
167
+ if hasattr(chunk, "web") and chunk.web:
168
+ web_chunk = chunk.web
169
+ citation = {
170
+ "url": getattr(web_chunk, "uri", ""),
171
+ "title": getattr(web_chunk, "title", ""),
172
+ "start_index": -1, # Not available in grounding metadata
173
+ "end_index": -1, # Not available in grounding metadata
174
+ }
175
+ citations.append(citation)
176
+
177
+ # Handle search entry point (if available)
178
+ if (
179
+ hasattr(grounding, "search_entry_point")
180
+ and grounding.search_entry_point
181
+ ):
182
+ entry_point = grounding.search_entry_point
183
+ if (
184
+ hasattr(entry_point, "rendered_content")
185
+ and entry_point.rendered_content
186
+ ):
187
+ # Add search summary to citations if available
188
+ pass
189
+
190
+ # Add citations to text if available and requested
191
+ if add_citations:
192
+ try:
193
+ text = add_citations_to_response(completion)
194
+ except Exception as e:
195
+ print(f"[GEMINI] Error adding citations to text: {e}")
196
+
197
+ return AgentResponse(
198
+ text=text, code=code, citations=citations, function_calls=function_calls
199
+ )
200
+
201
+
202
+ def process_message(
203
+ messages,
204
+ model="gemini-2.5-flash",
205
+ tools=None,
206
+ max_retries=10,
207
+ max_tokens=None,
208
+ temperature=None,
209
+ top_p=None,
210
+ api_key=None,
211
+ stream=False,
212
+ stream_callback=None,
213
+ ):
214
+ """
215
+ Generate content using Gemini API with the official google.genai SDK.
216
+
217
+ Args:
218
+ messages: List of messages in OpenAI format
219
+ model: The Gemini model to use
220
+ tools: List of tools to use
221
+ max_retries: Maximum number of retry attempts
222
+ max_tokens: Maximum number of tokens in response
223
+ temperature: Temperature for generation
224
+ top_p: Top-p value for generation
225
+ api_key: Gemini API key (if None, will get from environment)
226
+ stream: Whether to stream the response (default: False)
227
+ stream_callback: Function to call with each chunk when streaming (default: None)
228
+
229
+ Returns:
230
+ dict: {"text": text, "code": code, "citations": citations, "function_calls": function_calls}
231
+ """
232
+
233
+ """Internal function that contains all the processing logic."""
234
+ # Get the API key
235
+ if api_key is None:
236
+ api_key_val = os.getenv("GEMINI_API_KEY")
237
+ else:
238
+ api_key_val = api_key
239
+
240
+ if not api_key_val:
241
+ raise ValueError("GEMINI_API_KEY not found in environment variables")
242
+
243
+ # Set the API key for the client
244
+ client = genai.Client(api_key=api_key_val)
245
+
246
+ # Convert messages from OpenAI format to Gemini format
247
+ gemini_messages = []
248
+ system_instruction = None
249
+ function_calls = {}
250
+
251
+ for message in messages:
252
+ role = message.get("role", None)
253
+ content = message.get("content", None)
254
+
255
+ if role == "system":
256
+ system_instruction = content
257
+ elif role == "user":
258
+ gemini_messages.append(
259
+ types.Content(role="user", parts=[types.Part(text=content)])
260
+ )
261
+ elif role == "assistant":
262
+ gemini_messages.append(
263
+ types.Content(role="model", parts=[types.Part(text=content)])
264
+ )
265
+ elif message.get("type", None) == "function_call":
266
+ function_calls[message["call_id"]] = message
267
+ elif message.get("type", None) == "function_call_output":
268
+ func_name = function_calls[message["call_id"]]["name"]
269
+ func_resp = message["output"]
270
+ function_response_part = types.Part.from_function_response(
271
+ name=func_name, response={"result": func_resp}
272
+ )
273
+ # Append the function response
274
+ gemini_messages.append(
275
+ types.Content(role="user", parts=[function_response_part])
276
+ )
277
+
278
+ # Set up generation config
279
+ generation_config = {}
280
+ if temperature is not None:
281
+ generation_config["temperature"] = temperature
282
+ if top_p is not None:
283
+ generation_config["top_p"] = top_p
284
+ if max_tokens is not None:
285
+ generation_config["max_output_tokens"] = max_tokens
286
+
287
+ # Set up tools - separate native tools from custom functions
288
+ # due to Gemini API limitation: can't combine native tools with custom functions
289
+ gemini_tools = []
290
+ has_native_tools = False
291
+ custom_functions = []
292
+
293
+ if tools:
294
+ for tool in tools:
295
+ if "live_search" == tool:
296
+ gemini_tools.append(types.Tool(google_search=types.GoogleSearch()))
297
+ has_native_tools = True
298
+ elif "code_execution" == tool:
299
+ gemini_tools.append(
300
+ types.Tool(code_execution=types.ToolCodeExecution())
301
+ )
302
+ has_native_tools = True
303
+ else:
304
+ # Collect custom function declarations
305
+ # Old format: {"type": "function", "function": {...}}
306
+ if hasattr(tool, "function"):
307
+ function_declaration = tool["function"]
308
+ else: # New OpenAI format: {"type": "function", "name": ..., "description": ...}
309
+ function_declaration = copy.deepcopy(tool)
310
+ if "type" in function_declaration:
311
+ del function_declaration["type"]
312
+ custom_functions.append(function_declaration)
313
+
314
+ if custom_functions and has_native_tools:
315
+ print(
316
+ f"[WARNING] Gemini API doesn't support combining native tools with custom functions. Prioritizing built-in tools."
317
+ )
318
+ elif custom_functions and not has_native_tools:
319
+ # add custom functions to the tools
320
+ gemini_tools.append(types.Tool(function_declarations=custom_functions))
321
+
322
+ # Set up safety settings
323
+ safety_settings = [
324
+ types.SafetySetting(
325
+ category=types.HarmCategory.HARM_CATEGORY_HARASSMENT,
326
+ threshold=types.HarmBlockThreshold.BLOCK_NONE,
327
+ ),
328
+ types.SafetySetting(
329
+ category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
330
+ threshold=types.HarmBlockThreshold.BLOCK_NONE,
331
+ ),
332
+ types.SafetySetting(
333
+ category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
334
+ threshold=types.HarmBlockThreshold.BLOCK_NONE,
335
+ ),
336
+ types.SafetySetting(
337
+ category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
338
+ threshold=types.HarmBlockThreshold.BLOCK_NONE,
339
+ ),
340
+ ]
341
+
342
+ # Prepare the request parameters
343
+ request_params = {
344
+ "model": model,
345
+ "contents": gemini_messages,
346
+ "config": types.GenerateContentConfig(
347
+ safety_settings=safety_settings, **generation_config
348
+ ),
349
+ }
350
+
351
+ if system_instruction:
352
+ request_params["config"].system_instruction = types.Content(
353
+ parts=[types.Part(text=system_instruction)]
354
+ )
355
+
356
+ if gemini_tools:
357
+ request_params["config"].tools = gemini_tools
358
+
359
+ # Make API request with retry logic
360
+ completion = None
361
+ retry = 0
362
+ while retry < max_retries:
363
+ try:
364
+ if stream and stream_callback:
365
+ # Handle streaming response
366
+ text = ""
367
+ code = []
368
+ citations = []
369
+ function_calls = [] # Initialize function_calls list
370
+
371
+ # Code streaming tracking
372
+ code_lines_shown = 0
373
+ current_code_chunk = ""
374
+ truncation_message_sent = False # Track if truncation message was sent
375
+
376
+ stream_response = client.models.generate_content_stream(
377
+ **request_params
378
+ )
379
+
380
+ for chunk in stream_response:
381
+ # Handle text chunks - be very careful to avoid duplication
382
+ chunk_text_processed = False
383
+
384
+ # First, try to get text from the most direct source
385
+ if hasattr(chunk, "text") and chunk.text:
386
+ chunk_text = chunk.text
387
+ text += chunk_text
388
+ try:
389
+ stream_callback(chunk_text)
390
+ chunk_text_processed = True
391
+ except Exception as e:
392
+ print(f"Stream callback error: {e}")
393
+
394
+ # Only process candidates if we haven't already processed text from chunk.text
395
+ elif hasattr(chunk, "candidates") and chunk.candidates:
396
+ candidate = chunk.candidates[0]
397
+ if hasattr(candidate, "content") and hasattr(
398
+ candidate.content, "parts"
399
+ ):
400
+ for part in candidate.content.parts:
401
+ if (
402
+ hasattr(part, "text")
403
+ and part.text
404
+ and not chunk_text_processed
405
+ ):
406
+ chunk_text = part.text
407
+ text += chunk_text
408
+ try:
409
+ stream_callback(chunk_text)
410
+ chunk_text_processed = True # Mark as processed to avoid further processing
411
+ except Exception as e:
412
+ print(f"Stream callback error: {e}")
413
+ elif (
414
+ hasattr(part, "executable_code")
415
+ and part.executable_code
416
+ and hasattr(part.executable_code, "code")
417
+ and part.executable_code.code
418
+ ):
419
+ # Handle code execution streaming
420
+ code_text = part.executable_code.code
421
+ code.append(code_text)
422
+
423
+ # Apply similar code streaming logic as in oai.py
424
+ code_lines = code_text.split("\n")
425
+
426
+ if code_lines_shown == 0:
427
+ try:
428
+ stream_callback(
429
+ "\n💻 Starting code execution...\n"
430
+ )
431
+ except Exception as e:
432
+ print(f"Stream callback error: {e}")
433
+
434
+ for line in code_lines:
435
+ if code_lines_shown < 3:
436
+ try:
437
+ stream_callback(line + "\n")
438
+ code_lines_shown += 1
439
+ except Exception as e:
440
+ print(f"Stream callback error: {e}")
441
+ elif (
442
+ code_lines_shown == 3
443
+ and not truncation_message_sent
444
+ ):
445
+ try:
446
+ stream_callback(
447
+ "\n[CODE_DISPLAY_ONLY]\n💻 ... (full code in log file)\n"
448
+ )
449
+ truncation_message_sent = True # Ensure this message is only sent once
450
+ code_lines_shown += 1
451
+ except Exception as e:
452
+ print(f"Stream callback error: {e}")
453
+ else:
454
+ try:
455
+ stream_callback(
456
+ f"[CODE_LOG_ONLY]{line}\n"
457
+ )
458
+ except Exception as e:
459
+ print(f"Stream callback error: {e}")
460
+
461
+ elif (
462
+ hasattr(part, "function_call")
463
+ and part.function_call
464
+ ):
465
+ # Handle function calls - extract the actual function call data
466
+ func_name = getattr(
467
+ part.function_call, "name", "unknown"
468
+ )
469
+ func_args = {}
470
+ if (
471
+ hasattr(part.function_call, "args")
472
+ and part.function_call.args
473
+ ):
474
+ # Convert args to dict if it's a struct/object
475
+ if hasattr(part.function_call.args, "_pb"):
476
+ # It's a protobuf struct, need to convert to dict
477
+ import json
478
+
479
+ try:
480
+ func_args = dict(
481
+ part.function_call.args
482
+ )
483
+ except:
484
+ func_args = {}
485
+ else:
486
+ func_args = part.function_call.args
487
+
488
+ function_calls.append(
489
+ {
490
+ "type": "function_call",
491
+ "call_id": part.function_call.id,
492
+ "name": func_name,
493
+ "arguments": func_args,
494
+ }
495
+ )
496
+
497
+ try:
498
+ stream_callback(f"\n🔧 Calling {func_name}\n")
499
+ except Exception as e:
500
+ print(f"Stream callback error: {e}")
501
+
502
+ elif hasattr(part, "function_response"):
503
+ try:
504
+ stream_callback(
505
+ "\n🔧 Function response received\n"
506
+ )
507
+ except Exception as e:
508
+ print(f"Stream callback error: {e}")
509
+
510
+ elif (
511
+ hasattr(part, "code_execution_result")
512
+ and part.code_execution_result
513
+ ):
514
+ if (
515
+ hasattr(part.code_execution_result, "output")
516
+ and part.code_execution_result.output
517
+ ):
518
+ # Add execution result as text output
519
+ result_text = f"\n[Code Output]\n{part.code_execution_result.output}\n"
520
+ text += result_text
521
+ try:
522
+ stream_callback(result_text)
523
+ except Exception as e:
524
+ print(f"Stream callback error: {e}")
525
+
526
+ # Handle grounding metadata (citations from search) at the candidate level
527
+ if (
528
+ hasattr(candidate, "grounding_metadata")
529
+ and candidate.grounding_metadata
530
+ ):
531
+ grounding = candidate.grounding_metadata
532
+ if (
533
+ hasattr(grounding, "grounding_chunks")
534
+ and grounding.grounding_chunks
535
+ ):
536
+ for chunk_item in grounding.grounding_chunks:
537
+ if hasattr(chunk_item, "web") and chunk_item.web:
538
+ web_chunk = chunk_item.web
539
+ citation = {
540
+ "url": getattr(web_chunk, "uri", ""),
541
+ "title": getattr(web_chunk, "title", ""),
542
+ "start_index": -1, # Not available in grounding metadata
543
+ "end_index": -1, # Not available in grounding metadata
544
+ }
545
+ # Avoid duplicate citations
546
+ if citation not in citations:
547
+ citations.append(citation)
548
+
549
+ # Handle completion
550
+ try:
551
+ stream_callback("\n✅ Generation finished\n")
552
+ except Exception as e:
553
+ print(f"Stream callback error: {e}")
554
+
555
+ return AgentResponse(
556
+ text=text,
557
+ code=code,
558
+ citations=citations,
559
+ function_calls=function_calls, # Return the captured function calls
560
+ )
561
+ else:
562
+ # Handle non-streaming response
563
+ completion = client.models.generate_content(**request_params)
564
+ break
565
+ except Exception as e:
566
+ print(f"Error on attempt {retry + 1}: {e}")
567
+ retry += 1
568
+ time.sleep(1.5)
569
+
570
+ if completion is None:
571
+ # If we failed all retries, return empty response instead of raising exception
572
+ print(
573
+ f"Failed to get completion after {max_retries} retries, returning empty response"
574
+ )
575
+ return AgentResponse(text="", code=[], citations=[], function_calls=[])
576
+
577
+ # Parse the completion and return text, code, and citations
578
+ result = parse_completion(completion, add_citations=True)
579
+ return result
580
+
581
+
582
+ # Example usage (you can remove this if not needed)
583
+ if __name__ == "__main__":
584
+ pass