massgen 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +94 -0
- massgen/agent_config.py +507 -0
- massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
- massgen/backend/Function calling openai responses.md +1161 -0
- massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
- massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
- massgen/backend/__init__.py +25 -0
- massgen/backend/base.py +180 -0
- massgen/backend/chat_completions.py +228 -0
- massgen/backend/claude.py +661 -0
- massgen/backend/gemini.py +652 -0
- massgen/backend/grok.py +187 -0
- massgen/backend/response.py +397 -0
- massgen/chat_agent.py +440 -0
- massgen/cli.py +686 -0
- massgen/configs/README.md +293 -0
- massgen/configs/creative_team.yaml +53 -0
- massgen/configs/gemini_4o_claude.yaml +31 -0
- massgen/configs/news_analysis.yaml +51 -0
- massgen/configs/research_team.yaml +51 -0
- massgen/configs/single_agent.yaml +18 -0
- massgen/configs/single_flash2.5.yaml +44 -0
- massgen/configs/technical_analysis.yaml +51 -0
- massgen/configs/three_agents_default.yaml +31 -0
- massgen/configs/travel_planning.yaml +51 -0
- massgen/configs/two_agents.yaml +39 -0
- massgen/frontend/__init__.py +20 -0
- massgen/frontend/coordination_ui.py +945 -0
- massgen/frontend/displays/__init__.py +24 -0
- massgen/frontend/displays/base_display.py +83 -0
- massgen/frontend/displays/rich_terminal_display.py +3497 -0
- massgen/frontend/displays/simple_display.py +93 -0
- massgen/frontend/displays/terminal_display.py +381 -0
- massgen/frontend/logging/__init__.py +9 -0
- massgen/frontend/logging/realtime_logger.py +197 -0
- massgen/message_templates.py +431 -0
- massgen/orchestrator.py +1222 -0
- massgen/tests/__init__.py +10 -0
- massgen/tests/multi_turn_conversation_design.md +214 -0
- massgen/tests/multiturn_llm_input_analysis.md +189 -0
- massgen/tests/test_case_studies.md +113 -0
- massgen/tests/test_claude_backend.py +310 -0
- massgen/tests/test_grok_backend.py +160 -0
- massgen/tests/test_message_context_building.py +293 -0
- massgen/tests/test_rich_terminal_display.py +378 -0
- massgen/tests/test_v3_3agents.py +117 -0
- massgen/tests/test_v3_simple.py +216 -0
- massgen/tests/test_v3_three_agents.py +272 -0
- massgen/tests/test_v3_two_agents.py +176 -0
- massgen/utils.py +79 -0
- massgen/v1/README.md +330 -0
- massgen/v1/__init__.py +91 -0
- massgen/v1/agent.py +605 -0
- massgen/v1/agents.py +330 -0
- massgen/v1/backends/gemini.py +584 -0
- massgen/v1/backends/grok.py +410 -0
- massgen/v1/backends/oai.py +571 -0
- massgen/v1/cli.py +351 -0
- massgen/v1/config.py +169 -0
- massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
- massgen/v1/examples/fast_config.yaml +44 -0
- massgen/v1/examples/production.yaml +70 -0
- massgen/v1/examples/single_agent.yaml +39 -0
- massgen/v1/logging.py +974 -0
- massgen/v1/main.py +368 -0
- massgen/v1/orchestrator.py +1138 -0
- massgen/v1/streaming_display.py +1190 -0
- massgen/v1/tools.py +160 -0
- massgen/v1/types.py +245 -0
- massgen/v1/utils.py +199 -0
- massgen-0.0.3.dist-info/METADATA +568 -0
- massgen-0.0.3.dist-info/RECORD +76 -0
- massgen-0.0.3.dist-info/WHEEL +5 -0
- massgen-0.0.3.dist-info/entry_points.txt +2 -0
- massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
- massgen-0.0.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Base class for backends using OpenAI Chat Completions API format.
|
|
5
|
+
Handles common message processing, tool conversion, and streaming patterns.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
from typing import Dict, List, Any, AsyncGenerator, Optional
|
|
10
|
+
from .base import LLMBackend, StreamChunk
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ChatCompletionsBackend(LLMBackend):
|
|
14
|
+
"""Base class for backends using Chat Completions API with shared streaming logic."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, api_key: Optional[str] = None, **kwargs):
|
|
17
|
+
super().__init__(api_key, **kwargs)
|
|
18
|
+
|
|
19
|
+
def convert_tools_to_chat_completions_format(
|
|
20
|
+
self, tools: List[Dict[str, Any]]
|
|
21
|
+
) -> List[Dict[str, Any]]:
|
|
22
|
+
"""Convert tools from Response API format to Chat Completions format if needed.
|
|
23
|
+
|
|
24
|
+
Response API format: {"type": "function", "name": ..., "description": ..., "parameters": ...}
|
|
25
|
+
Chat Completions format: {"type": "function", "function": {"name": ..., "description": ..., "parameters": ...}}
|
|
26
|
+
"""
|
|
27
|
+
if not tools:
|
|
28
|
+
return tools
|
|
29
|
+
|
|
30
|
+
converted_tools = []
|
|
31
|
+
for tool in tools:
|
|
32
|
+
if tool.get("type") == "function":
|
|
33
|
+
if "function" in tool:
|
|
34
|
+
# Already in Chat Completions format
|
|
35
|
+
converted_tools.append(tool)
|
|
36
|
+
elif "name" in tool and "description" in tool:
|
|
37
|
+
# Response API format - convert to Chat Completions format
|
|
38
|
+
converted_tools.append(
|
|
39
|
+
{
|
|
40
|
+
"type": "function",
|
|
41
|
+
"function": {
|
|
42
|
+
"name": tool["name"],
|
|
43
|
+
"description": tool["description"],
|
|
44
|
+
"parameters": tool.get("parameters", {}),
|
|
45
|
+
},
|
|
46
|
+
}
|
|
47
|
+
)
|
|
48
|
+
else:
|
|
49
|
+
# Unknown format - keep as-is
|
|
50
|
+
converted_tools.append(tool)
|
|
51
|
+
else:
|
|
52
|
+
# Non-function tool - keep as-is
|
|
53
|
+
converted_tools.append(tool)
|
|
54
|
+
|
|
55
|
+
return converted_tools
|
|
56
|
+
|
|
57
|
+
async def handle_chat_completions_stream(
|
|
58
|
+
self, stream, enable_web_search: bool = False
|
|
59
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
60
|
+
"""Handle standard Chat Completions API streaming format."""
|
|
61
|
+
content = ""
|
|
62
|
+
current_tool_calls = {}
|
|
63
|
+
search_sources_used = 0
|
|
64
|
+
citations = []
|
|
65
|
+
|
|
66
|
+
async for chunk in stream:
|
|
67
|
+
try:
|
|
68
|
+
if hasattr(chunk, "choices") and chunk.choices:
|
|
69
|
+
choice = chunk.choices[0]
|
|
70
|
+
|
|
71
|
+
# Handle content delta
|
|
72
|
+
if hasattr(choice, "delta") and choice.delta:
|
|
73
|
+
if hasattr(choice.delta, "content") and choice.delta.content:
|
|
74
|
+
content_chunk = choice.delta.content
|
|
75
|
+
content += content_chunk
|
|
76
|
+
yield StreamChunk(type="content", content=content_chunk)
|
|
77
|
+
|
|
78
|
+
# Handle tool calls streaming
|
|
79
|
+
if (
|
|
80
|
+
hasattr(choice.delta, "tool_calls")
|
|
81
|
+
and choice.delta.tool_calls
|
|
82
|
+
):
|
|
83
|
+
for tool_call_delta in choice.delta.tool_calls:
|
|
84
|
+
index = getattr(tool_call_delta, "index", 0)
|
|
85
|
+
|
|
86
|
+
if index not in current_tool_calls:
|
|
87
|
+
current_tool_calls[index] = {
|
|
88
|
+
"id": "",
|
|
89
|
+
"name": "",
|
|
90
|
+
"arguments": "",
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if (
|
|
94
|
+
hasattr(tool_call_delta, "id")
|
|
95
|
+
and tool_call_delta.id
|
|
96
|
+
):
|
|
97
|
+
current_tool_calls[index]["id"] = tool_call_delta.id
|
|
98
|
+
|
|
99
|
+
if (
|
|
100
|
+
hasattr(tool_call_delta, "function")
|
|
101
|
+
and tool_call_delta.function
|
|
102
|
+
):
|
|
103
|
+
if (
|
|
104
|
+
hasattr(tool_call_delta.function, "name")
|
|
105
|
+
and tool_call_delta.function.name
|
|
106
|
+
):
|
|
107
|
+
current_tool_calls[index][
|
|
108
|
+
"name"
|
|
109
|
+
] = tool_call_delta.function.name
|
|
110
|
+
|
|
111
|
+
if (
|
|
112
|
+
hasattr(tool_call_delta.function, "arguments")
|
|
113
|
+
and tool_call_delta.function.arguments
|
|
114
|
+
):
|
|
115
|
+
current_tool_calls[index][
|
|
116
|
+
"arguments"
|
|
117
|
+
] += tool_call_delta.function.arguments
|
|
118
|
+
|
|
119
|
+
# Handle finish reason
|
|
120
|
+
if hasattr(choice, "finish_reason") and choice.finish_reason:
|
|
121
|
+
if choice.finish_reason == "tool_calls" and current_tool_calls:
|
|
122
|
+
# Convert accumulated tool calls to final format
|
|
123
|
+
final_tool_calls = []
|
|
124
|
+
for index in sorted(current_tool_calls.keys()):
|
|
125
|
+
tool_call = current_tool_calls[index]
|
|
126
|
+
|
|
127
|
+
# Parse arguments as JSON
|
|
128
|
+
arguments = tool_call["arguments"]
|
|
129
|
+
if isinstance(arguments, str):
|
|
130
|
+
try:
|
|
131
|
+
import json
|
|
132
|
+
|
|
133
|
+
arguments = (
|
|
134
|
+
json.loads(arguments)
|
|
135
|
+
if arguments.strip()
|
|
136
|
+
else {}
|
|
137
|
+
)
|
|
138
|
+
except json.JSONDecodeError:
|
|
139
|
+
arguments = {}
|
|
140
|
+
|
|
141
|
+
final_tool_calls.append(
|
|
142
|
+
{
|
|
143
|
+
"id": tool_call["id"] or f"call_{index}",
|
|
144
|
+
"type": "function",
|
|
145
|
+
"function": {
|
|
146
|
+
"name": tool_call["name"],
|
|
147
|
+
"arguments": arguments,
|
|
148
|
+
},
|
|
149
|
+
}
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
yield StreamChunk(
|
|
153
|
+
type="tool_calls", tool_calls=final_tool_calls
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Build and yield complete message
|
|
157
|
+
complete_message = {
|
|
158
|
+
"role": "assistant",
|
|
159
|
+
"content": content.strip(),
|
|
160
|
+
}
|
|
161
|
+
if final_tool_calls:
|
|
162
|
+
complete_message["tool_calls"] = final_tool_calls
|
|
163
|
+
yield StreamChunk(
|
|
164
|
+
type="complete_message",
|
|
165
|
+
complete_message=complete_message,
|
|
166
|
+
)
|
|
167
|
+
elif choice.finish_reason in ["stop", "length"]:
|
|
168
|
+
if search_sources_used > 0:
|
|
169
|
+
yield StreamChunk(
|
|
170
|
+
type="content",
|
|
171
|
+
content=f"\nā
[Live Search Complete] Used {search_sources_used} sources\n",
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Check for citations before building complete message
|
|
175
|
+
if (
|
|
176
|
+
hasattr(chunk, "citations")
|
|
177
|
+
and chunk.citations
|
|
178
|
+
and len(chunk.citations) > 0
|
|
179
|
+
):
|
|
180
|
+
if enable_web_search:
|
|
181
|
+
citation_text = "\nš **Citations:**\n"
|
|
182
|
+
for i, citation in enumerate(chunk.citations, 1):
|
|
183
|
+
citation_text += f"{i}. {citation}\n"
|
|
184
|
+
yield StreamChunk(
|
|
185
|
+
type="content", content=citation_text
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
# Build and yield complete message (no tool calls)
|
|
189
|
+
complete_message = {
|
|
190
|
+
"role": "assistant",
|
|
191
|
+
"content": content.strip(),
|
|
192
|
+
}
|
|
193
|
+
yield StreamChunk(
|
|
194
|
+
type="complete_message",
|
|
195
|
+
complete_message=complete_message,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
yield StreamChunk(type="done")
|
|
199
|
+
return
|
|
200
|
+
|
|
201
|
+
# Check for usage information (search sources) and citations
|
|
202
|
+
if hasattr(chunk, "usage") and chunk.usage:
|
|
203
|
+
if (
|
|
204
|
+
hasattr(chunk.usage, "num_sources_used")
|
|
205
|
+
and chunk.usage.num_sources_used
|
|
206
|
+
):
|
|
207
|
+
search_sources_used = chunk.usage.num_sources_used
|
|
208
|
+
if enable_web_search and search_sources_used > 0:
|
|
209
|
+
yield StreamChunk(
|
|
210
|
+
type="content",
|
|
211
|
+
content=f"\nš [Live Search] Using {search_sources_used} sources for real-time data\n",
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
except Exception as chunk_error:
|
|
215
|
+
yield StreamChunk(
|
|
216
|
+
type="error", error=f"Chunk processing error: {chunk_error}"
|
|
217
|
+
)
|
|
218
|
+
continue
|
|
219
|
+
|
|
220
|
+
yield StreamChunk(type="done")
|
|
221
|
+
|
|
222
|
+
def extract_tool_name(self, tool_call: Dict[str, Any]) -> str:
|
|
223
|
+
"""Extract tool name from Chat Completions format."""
|
|
224
|
+
return tool_call.get("function", {}).get("name", "unknown")
|
|
225
|
+
|
|
226
|
+
def extract_tool_arguments(self, tool_call: Dict[str, Any]) -> Dict[str, Any]:
|
|
227
|
+
"""Extract tool arguments from Chat Completions format."""
|
|
228
|
+
return tool_call.get("function", {}).get("arguments", {})
|