massgen 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (76) hide show
  1. massgen/__init__.py +94 -0
  2. massgen/agent_config.py +507 -0
  3. massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
  4. massgen/backend/Function calling openai responses.md +1161 -0
  5. massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
  6. massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
  7. massgen/backend/__init__.py +25 -0
  8. massgen/backend/base.py +180 -0
  9. massgen/backend/chat_completions.py +228 -0
  10. massgen/backend/claude.py +661 -0
  11. massgen/backend/gemini.py +652 -0
  12. massgen/backend/grok.py +187 -0
  13. massgen/backend/response.py +397 -0
  14. massgen/chat_agent.py +440 -0
  15. massgen/cli.py +686 -0
  16. massgen/configs/README.md +293 -0
  17. massgen/configs/creative_team.yaml +53 -0
  18. massgen/configs/gemini_4o_claude.yaml +31 -0
  19. massgen/configs/news_analysis.yaml +51 -0
  20. massgen/configs/research_team.yaml +51 -0
  21. massgen/configs/single_agent.yaml +18 -0
  22. massgen/configs/single_flash2.5.yaml +44 -0
  23. massgen/configs/technical_analysis.yaml +51 -0
  24. massgen/configs/three_agents_default.yaml +31 -0
  25. massgen/configs/travel_planning.yaml +51 -0
  26. massgen/configs/two_agents.yaml +39 -0
  27. massgen/frontend/__init__.py +20 -0
  28. massgen/frontend/coordination_ui.py +945 -0
  29. massgen/frontend/displays/__init__.py +24 -0
  30. massgen/frontend/displays/base_display.py +83 -0
  31. massgen/frontend/displays/rich_terminal_display.py +3497 -0
  32. massgen/frontend/displays/simple_display.py +93 -0
  33. massgen/frontend/displays/terminal_display.py +381 -0
  34. massgen/frontend/logging/__init__.py +9 -0
  35. massgen/frontend/logging/realtime_logger.py +197 -0
  36. massgen/message_templates.py +431 -0
  37. massgen/orchestrator.py +1222 -0
  38. massgen/tests/__init__.py +10 -0
  39. massgen/tests/multi_turn_conversation_design.md +214 -0
  40. massgen/tests/multiturn_llm_input_analysis.md +189 -0
  41. massgen/tests/test_case_studies.md +113 -0
  42. massgen/tests/test_claude_backend.py +310 -0
  43. massgen/tests/test_grok_backend.py +160 -0
  44. massgen/tests/test_message_context_building.py +293 -0
  45. massgen/tests/test_rich_terminal_display.py +378 -0
  46. massgen/tests/test_v3_3agents.py +117 -0
  47. massgen/tests/test_v3_simple.py +216 -0
  48. massgen/tests/test_v3_three_agents.py +272 -0
  49. massgen/tests/test_v3_two_agents.py +176 -0
  50. massgen/utils.py +79 -0
  51. massgen/v1/README.md +330 -0
  52. massgen/v1/__init__.py +91 -0
  53. massgen/v1/agent.py +605 -0
  54. massgen/v1/agents.py +330 -0
  55. massgen/v1/backends/gemini.py +584 -0
  56. massgen/v1/backends/grok.py +410 -0
  57. massgen/v1/backends/oai.py +571 -0
  58. massgen/v1/cli.py +351 -0
  59. massgen/v1/config.py +169 -0
  60. massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
  61. massgen/v1/examples/fast_config.yaml +44 -0
  62. massgen/v1/examples/production.yaml +70 -0
  63. massgen/v1/examples/single_agent.yaml +39 -0
  64. massgen/v1/logging.py +974 -0
  65. massgen/v1/main.py +368 -0
  66. massgen/v1/orchestrator.py +1138 -0
  67. massgen/v1/streaming_display.py +1190 -0
  68. massgen/v1/tools.py +160 -0
  69. massgen/v1/types.py +245 -0
  70. massgen/v1/utils.py +199 -0
  71. massgen-0.0.3.dist-info/METADATA +568 -0
  72. massgen-0.0.3.dist-info/RECORD +76 -0
  73. massgen-0.0.3.dist-info/WHEEL +5 -0
  74. massgen-0.0.3.dist-info/entry_points.txt +2 -0
  75. massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
  76. massgen-0.0.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,410 @@
1
+ import os
2
+ import threading
3
+ import time
4
+ import json
5
+ import inspect
6
+ import copy
7
+
8
+ from dotenv import load_dotenv
9
+ from xai_sdk import Client
10
+ from xai_sdk.chat import assistant, system, user, tool_result, tool as xai_tool_func
11
+ from xai_sdk.search import SearchParameters
12
+
13
+ # Import utility functions and tools
14
+ from massgen.v1.utils import function_to_json, execute_function_calls
15
+ from massgen.v1.types import AgentResponse
16
+
17
+ load_dotenv()
18
+
19
+
20
+ def parse_completion(response, add_citations=True):
21
+ """Parse the completion response from Grok API."""
22
+ text = response.content
23
+ code = []
24
+ citations = []
25
+ function_calls = []
26
+ reasoning_items = []
27
+
28
+ if hasattr(response, "citations") and response.citations:
29
+ for citation in response.citations:
30
+ citations.append(
31
+ {"url": citation, "title": "", "start_index": -1, "end_index": -1}
32
+ )
33
+
34
+ if citations and add_citations:
35
+ citation_content = []
36
+ for idx, citation in enumerate(citations):
37
+ citation_content.append(f"[{idx}]({citation['url']})")
38
+ text = text + "\n\nReferences:\n" + "\n".join(citation_content)
39
+
40
+ # Check if response has tool_calls directly (some SDK formats)
41
+ if hasattr(response, "tool_calls") and response.tool_calls:
42
+ for tool_call in response.tool_calls:
43
+ if hasattr(tool_call, "function"):
44
+ # OpenAI-style structure: tool_call.function.name, tool_call.function.arguments
45
+ function_calls.append(
46
+ {
47
+ "type": "function_call",
48
+ "call_id": tool_call.id,
49
+ "name": tool_call.function.name,
50
+ "arguments": tool_call.function.arguments,
51
+ }
52
+ )
53
+ elif hasattr(tool_call, "name") and hasattr(tool_call, "arguments"):
54
+ # Direct structure: tool_call.name, tool_call.arguments
55
+ function_calls.append(
56
+ {
57
+ "type": "function_call",
58
+ "call_id": tool_call.id,
59
+ "name": tool_call.name,
60
+ "arguments": tool_call.arguments,
61
+ }
62
+ )
63
+
64
+ return AgentResponse(
65
+ text=text, code=code, citations=citations, function_calls=function_calls
66
+ )
67
+
68
+
69
+ def process_message(
70
+ messages,
71
+ model="grok-3-mini",
72
+ tools=None,
73
+ max_retries=10,
74
+ max_tokens=None,
75
+ temperature=None,
76
+ top_p=None,
77
+ api_key=None,
78
+ stream=False,
79
+ stream_callback=None,
80
+ ):
81
+ """
82
+ Generate content using Grok API with optional streaming support and custom tools.
83
+
84
+ Args:
85
+ messages: List of message dictionaries with 'role' and 'content' keys
86
+ model: Model name to use (default: "grok-4")
87
+ tools: List of tool definitions for function calling, each tool should be a dict with OpenAI-compatible format:
88
+ [
89
+ {
90
+ "type": "function",
91
+ "function": {
92
+ "name": "function_name",
93
+ "description": "Function description",
94
+ "parameters": {
95
+ "type": "object",
96
+ "properties": {
97
+ "param1": {"type": "string", "description": "Parameter description"},
98
+ "param2": {"type": "number", "description": "Another parameter"}
99
+ },
100
+ "required": ["param1"]
101
+ }
102
+ }
103
+ }
104
+ ]
105
+ enable_search: Boolean to enable live search functionality (default: False)
106
+ max_retries: Maximum number of retry attempts (default: 10)
107
+ max_tokens: Maximum tokens in response (default: 32000)
108
+ temperature: Sampling temperature (default: None)
109
+ top_p: Top-p sampling parameter (default: None)
110
+ api_key: XAI API key (default: None, uses environment variable)
111
+ stream: Enable streaming response (default: False)
112
+ stream_callback: Callback function for streaming (default: None)
113
+
114
+ Returns:
115
+ Dict with keys: 'text', 'code', 'citations', 'function_calls'
116
+
117
+ Note:
118
+ - For backward compatibility, tools=["live_search"] is still supported and will enable search
119
+ - Function calls will be returned in the 'function_calls' key as a list of dicts with 'name' and 'arguments'
120
+ - The 'arguments' field will contain the function arguments as returned by the model
121
+ """
122
+
123
+ """Internal function that contains all the processing logic."""
124
+ if api_key is None:
125
+ api_key_val = os.getenv("XAI_API_KEY")
126
+ else:
127
+ api_key_val = api_key
128
+
129
+ if not api_key_val:
130
+ raise ValueError("XAI_API_KEY not found in environment variables")
131
+
132
+ client = Client(api_key=api_key_val)
133
+
134
+ # Handle backward compatibility for old tools=["live_search"] format
135
+ enable_search = False
136
+ custom_tools = []
137
+
138
+ if tools and isinstance(tools, list) and len(tools) > 0:
139
+ for tool in tools:
140
+ if tool == "live_search":
141
+ enable_search = True
142
+ elif isinstance(tool, str):
143
+ # Skip unexpected strings to avoid API errors
144
+ continue
145
+ else:
146
+ # This should be a proper tool object
147
+ custom_tools.append(tool)
148
+
149
+ # Handle search parameters
150
+ search_parameters = None
151
+ if enable_search:
152
+ search_parameters = SearchParameters(
153
+ mode="auto",
154
+ return_citations=True,
155
+ )
156
+
157
+ # Prepare tools for the API call
158
+ api_tools = None
159
+ if custom_tools and isinstance(custom_tools, list) and len(custom_tools) > 0:
160
+ # Convert OpenAI format tools to X.AI SDK format for the API call
161
+ api_tools = []
162
+ for custom_tool in custom_tools:
163
+ if isinstance(custom_tool, dict) and custom_tool.get("type") == "function":
164
+ # Check if it's the OpenAI nested format or the direct format from function_to_json
165
+ if "function" in custom_tool:
166
+ # OpenAI format: {"type": "function", "function": {...}}
167
+ func_def = custom_tool["function"]
168
+ else:
169
+ # Older format: {"type": "function", "name": ..., "description": ...}
170
+ func_def = custom_tool
171
+
172
+ xai_tool = xai_tool_func(
173
+ name=func_def["name"],
174
+ description=func_def["description"],
175
+ parameters=func_def["parameters"],
176
+ )
177
+ api_tools.append(xai_tool)
178
+ else:
179
+ # If it's already in X.AI format, use as-is
180
+ api_tools.append(custom_tool)
181
+
182
+ def make_grok_request(stream=False):
183
+ # Build chat creation parameters
184
+ chat_params = {
185
+ "model": model,
186
+ "search_parameters": search_parameters,
187
+ }
188
+
189
+ # Add optional parameters only if they have values
190
+ if temperature is not None:
191
+ chat_params["temperature"] = temperature
192
+ if top_p is not None:
193
+ chat_params["top_p"] = top_p
194
+ if max_tokens is not None:
195
+ chat_params["max_tokens"] = max_tokens
196
+ if api_tools is not None:
197
+ chat_params["tools"] = api_tools
198
+
199
+ chat = client.chat.create(**chat_params)
200
+
201
+ for message in messages:
202
+ role = message.get("role", None)
203
+ content = message.get("content", None)
204
+
205
+ if role == "system":
206
+ chat.append(system(content))
207
+ elif role == "user":
208
+ chat.append(user(content))
209
+ elif role == "assistant":
210
+ chat.append(assistant(content))
211
+ elif message.get("type", None) == "function_call":
212
+ pass
213
+ elif message.get("type", None) == "function_call_output":
214
+ content = message.get("output", None)
215
+ chat.append(tool_result(content))
216
+
217
+ if stream:
218
+ return chat.stream()
219
+ else:
220
+ return chat.sample()
221
+
222
+ completion = None
223
+ retry = 0
224
+ while retry < max_retries:
225
+ try:
226
+ is_streaming = stream and stream_callback is not None
227
+ completion = make_grok_request(stream=is_streaming)
228
+ break
229
+ except Exception as e:
230
+ print(f"Error on attempt {retry + 1}: {e}")
231
+ retry += 1
232
+ import time # Local import to ensure availability in threading context
233
+
234
+ time.sleep(1.5)
235
+
236
+ if completion is None:
237
+ print(
238
+ f"Failed to get completion after {max_retries} retries, returning empty response"
239
+ )
240
+ return AgentResponse(text="", code=[], citations=[], function_calls=[])
241
+
242
+ if stream and stream_callback is not None:
243
+ text = ""
244
+ code = []
245
+ citations = []
246
+ function_calls = []
247
+ thinking_count = 0
248
+ has_shown_search_indicator = False
249
+
250
+ try:
251
+ has_delta_content = False
252
+ for response, chunk in completion:
253
+ # Handle XAI SDK streaming format: (response, chunk)
254
+ # The XAI SDK follows OpenAI-like streaming format
255
+ delta_content = None
256
+
257
+ # Extract delta content from chunk - XAI SDK specific format
258
+ # Primary method: check for choices structure and extract content directly
259
+ if (
260
+ hasattr(chunk, "choices")
261
+ and chunk.choices
262
+ and len(chunk.choices) > 0
263
+ ):
264
+ choice = chunk.choices[0]
265
+ # XAI SDK stores content directly in choice.content, not choice.delta.content
266
+ if hasattr(choice, "content") and choice.content:
267
+ delta_content = choice.content
268
+
269
+ # Fallback method: direct content attribute on chunk
270
+ elif hasattr(chunk, "content") and chunk.content:
271
+ delta_content = chunk.content
272
+
273
+ # Additional fallback: text attribute
274
+ elif hasattr(chunk, "text") and chunk.text:
275
+ delta_content = chunk.text
276
+
277
+ if delta_content:
278
+ has_delta_content = True
279
+ # Check if this is a "Thinking..." chunk (indicates processing/search)
280
+ if delta_content.strip() == "Thinking...":
281
+ thinking_count += 1
282
+ # Show search indicator after first few thinking chunks
283
+ if (
284
+ thinking_count == 3
285
+ and not has_shown_search_indicator
286
+ and search_parameters
287
+ ):
288
+ try:
289
+ stream_callback("\n🧠 Thinking...\n")
290
+ except Exception as e:
291
+ print(f"Stream callback error: {e}")
292
+ has_shown_search_indicator = True
293
+
294
+ # Stream the "Thinking..." to user but don't add to final text
295
+ try:
296
+ stream_callback(delta_content)
297
+ except Exception as e:
298
+ print(f"Stream callback error: {e}")
299
+ else:
300
+ # This is actual content - add to text and stream
301
+ text += delta_content
302
+ try:
303
+ stream_callback(delta_content)
304
+ except Exception as e:
305
+ print(f"Stream callback error: {e}")
306
+
307
+ # Check for function calls in streaming response
308
+ if hasattr(response, "tool_calls") and response.tool_calls:
309
+ for tool_call in response.tool_calls:
310
+ if hasattr(tool_call, "function"):
311
+ _func_call = {
312
+ "type": "function_call",
313
+ "call_id": tool_call.id,
314
+ "name": tool_call.function.name,
315
+ "arguments": tool_call.function.arguments,
316
+ }
317
+ if _func_call not in function_calls:
318
+ function_calls.append(_func_call)
319
+ elif hasattr(tool_call, "name") and hasattr(
320
+ tool_call, "arguments"
321
+ ):
322
+ _func_call = {
323
+ "type": "function_call",
324
+ "call_id": tool_call.id,
325
+ "name": tool_call.name,
326
+ "arguments": tool_call.arguments,
327
+ }
328
+ if _func_call not in function_calls:
329
+ function_calls.append(_func_call)
330
+ elif hasattr(response, "choices") and response.choices:
331
+ for choice in response.choices:
332
+ if (
333
+ hasattr(choice, "message")
334
+ and hasattr(choice.message, "tool_calls")
335
+ and choice.message.tool_calls
336
+ ):
337
+ for tool_call in choice.message.tool_calls:
338
+ if hasattr(tool_call, "function"):
339
+ _func_call = {
340
+ "type": "function_call",
341
+ "call_id": tool_call.id,
342
+ "name": tool_call.function.name,
343
+ "arguments": tool_call.function.arguments,
344
+ }
345
+ if _func_call not in function_calls:
346
+ function_calls.append(_func_call)
347
+ elif hasattr(tool_call, "name") and hasattr(
348
+ tool_call, "arguments"
349
+ ):
350
+ _func_call = {
351
+ "type": "function_call",
352
+ "call_id": tool_call.id,
353
+ "name": tool_call.name,
354
+ "arguments": tool_call.arguments,
355
+ }
356
+ if _func_call not in function_calls:
357
+ function_calls.append(_func_call)
358
+
359
+ # Check if this is the final chunk with citations
360
+ if hasattr(response, "citations") and response.citations:
361
+ citations = []
362
+ for citation in response.citations:
363
+ citations.append(
364
+ {
365
+ "url": citation,
366
+ "title": "",
367
+ "start_index": -1,
368
+ "end_index": -1,
369
+ }
370
+ )
371
+
372
+ # Notify about found sources if we had search enabled
373
+ if citations and enable_search and stream_callback is not None:
374
+ try:
375
+ stream_callback(
376
+ f"\n\nšŸ” Found {len(citations)} web sources\n"
377
+ )
378
+ except Exception as e:
379
+ print(f"Stream callback error: {e}")
380
+
381
+ # Streaming the complete text response at the end if the above streaming fails
382
+ if not has_delta_content:
383
+ if text:
384
+ stream_callback(text)
385
+ if function_calls:
386
+ for function_call in function_calls:
387
+ stream_callback(
388
+ f"šŸ”§ Calling function: {function_call['name']}\n"
389
+ )
390
+ stream_callback(
391
+ f"šŸ”§ Arguments: {json.dumps(function_call['arguments'], indent=4)}\n\n"
392
+ )
393
+
394
+ except Exception as e:
395
+ # Fall back to non-streaming
396
+ completion = make_grok_request(stream=False)
397
+ result = parse_completion(completion, add_citations=True)
398
+ return result
399
+
400
+ result = AgentResponse(
401
+ text=text, code=code, citations=citations, function_calls=function_calls
402
+ )
403
+ else:
404
+ result = parse_completion(completion, add_citations=True)
405
+
406
+ return result
407
+
408
+
409
+ if __name__ == "__main__":
410
+ pass