massgen 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +1 -1
- massgen/backend/base_with_custom_tool_and_mcp.py +453 -23
- massgen/backend/capabilities.py +39 -0
- massgen/backend/chat_completions.py +111 -197
- massgen/backend/claude.py +210 -181
- massgen/backend/gemini.py +1015 -1559
- massgen/backend/grok.py +3 -2
- massgen/backend/response.py +160 -220
- massgen/cli.py +73 -6
- massgen/config_builder.py +20 -54
- massgen/config_validator.py +931 -0
- massgen/configs/README.md +51 -8
- massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +1 -1
- massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/computer_use_browser_example.yaml +1 -1
- massgen/configs/tools/custom_tools/computer_use_docker_example.yaml +1 -1
- massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/interop/ag2_and_langgraph_lesson_planner.yaml +65 -0
- massgen/configs/tools/custom_tools/interop/ag2_and_openai_assistant_lesson_planner.yaml +65 -0
- massgen/configs/tools/custom_tools/interop/ag2_lesson_planner_example.yaml +48 -0
- massgen/configs/tools/custom_tools/interop/agentscope_lesson_planner_example.yaml +48 -0
- massgen/configs/tools/custom_tools/interop/langgraph_lesson_planner_example.yaml +49 -0
- massgen/configs/tools/custom_tools/interop/openai_assistant_lesson_planner_example.yaml +50 -0
- massgen/configs/tools/custom_tools/interop/smolagent_lesson_planner_example.yaml +49 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/two_models_with_tools_example.yaml +44 -0
- massgen/formatter/_gemini_formatter.py +61 -15
- massgen/tests/test_ag2_lesson_planner.py +223 -0
- massgen/tests/test_config_validator.py +1156 -0
- massgen/tests/test_langgraph_lesson_planner.py +223 -0
- massgen/tool/__init__.py +2 -9
- massgen/tool/_decorators.py +52 -0
- massgen/tool/_extraframework_agents/ag2_lesson_planner_tool.py +251 -0
- massgen/tool/_extraframework_agents/agentscope_lesson_planner_tool.py +303 -0
- massgen/tool/_extraframework_agents/langgraph_lesson_planner_tool.py +275 -0
- massgen/tool/_extraframework_agents/openai_assistant_lesson_planner_tool.py +247 -0
- massgen/tool/_extraframework_agents/smolagent_lesson_planner_tool.py +180 -0
- massgen/tool/_manager.py +102 -16
- massgen/tool/_registered_tool.py +3 -0
- massgen/tool/_result.py +3 -0
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/METADATA +104 -76
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/RECORD +50 -39
- massgen/backend/gemini_mcp_manager.py +0 -545
- massgen/backend/gemini_trackers.py +0 -344
- massgen/configs/tools/custom_tools/multimodal_tools/playwright_with_img_understanding.yaml +0 -98
- massgen/configs/tools/custom_tools/multimodal_tools/understand_video_example.yaml +0 -54
- massgen/tools/__init__.py +0 -8
- massgen/tools/_planning_mcp_server.py +0 -520
- massgen/tools/planning_dataclasses.py +0 -434
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/WHEEL +0 -0
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/entry_points.txt +0 -0
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
OpenAI Assistant Lesson Planner Tool (Multi-Agent Streaming Version)
|
|
4
|
+
This tool demonstrates interoperability by wrapping OpenAI's Chat Completions API with streaming support
|
|
5
|
+
and multi-agent collaboration pattern similar to AG2.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
from typing import Any, AsyncGenerator, Dict, List
|
|
10
|
+
|
|
11
|
+
from openai import AsyncOpenAI
|
|
12
|
+
|
|
13
|
+
from massgen.tool import context_params
|
|
14
|
+
from massgen.tool._result import ExecutionResult, TextContent
|
|
15
|
+
|
|
16
|
+
# Define role-specific system prompts (similar to AG2 agents)
|
|
17
|
+
CURRICULUM_AGENT_PROMPT = """You are a curriculum standards expert for fourth grade education.
|
|
18
|
+
When given a topic, you provide relevant grade-level standards and learning objectives.
|
|
19
|
+
Format every response as:
|
|
20
|
+
STANDARDS:
|
|
21
|
+
- [Standard 1]
|
|
22
|
+
- [Standard 2]
|
|
23
|
+
OBJECTIVES:
|
|
24
|
+
- By the end of this lesson, students will be able to [objective 1]
|
|
25
|
+
- By the end of this lesson, students will be able to [objective 2]"""
|
|
26
|
+
|
|
27
|
+
LESSON_PLANNER_AGENT_PROMPT = """You are a lesson planning specialist.
|
|
28
|
+
Given standards and objectives, you create detailed lesson plans including:
|
|
29
|
+
- Opening/Hook (5-10 minutes)
|
|
30
|
+
- Main Activity (20-30 minutes)
|
|
31
|
+
- Practice Activity (15-20 minutes)
|
|
32
|
+
- Assessment/Closure (5-10 minutes)
|
|
33
|
+
Format as a structured lesson plan with clear timing and materials needed."""
|
|
34
|
+
|
|
35
|
+
LESSON_REVIEWER_AGENT_PROMPT = """You are a lesson plan reviewer who ensures:
|
|
36
|
+
1. Age-appropriate content and activities
|
|
37
|
+
2. Alignment with provided standards
|
|
38
|
+
3. Realistic timing
|
|
39
|
+
4. Clear instructions
|
|
40
|
+
5. Differentiation opportunities
|
|
41
|
+
Provide specific feedback in these areas and suggest improvements if needed."""
|
|
42
|
+
|
|
43
|
+
LESSON_FORMATTER_AGENT_PROMPT = """You are a lesson plan formatter. Format the complete plan as follows:
|
|
44
|
+
<title>Lesson plan title</title>
|
|
45
|
+
<standards>Standards covered</standards>
|
|
46
|
+
<learning_objectives>Key learning objectives</learning_objectives>
|
|
47
|
+
<materials>Materials required</materials>
|
|
48
|
+
<activities>Detailed lesson plan activities with timing</activities>
|
|
49
|
+
<assessment>Assessment details</assessment>"""
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
async def run_agent_step(
|
|
53
|
+
client: AsyncOpenAI,
|
|
54
|
+
role_prompt: str,
|
|
55
|
+
user_message: str,
|
|
56
|
+
temperature: float = 0.7,
|
|
57
|
+
) -> str:
|
|
58
|
+
"""
|
|
59
|
+
Run a single agent step with streaming and collect the full response.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
client: AsyncOpenAI client
|
|
63
|
+
role_prompt: System prompt for this agent role
|
|
64
|
+
user_message: User message to process
|
|
65
|
+
temperature: Temperature for generation
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Complete response from the agent
|
|
69
|
+
"""
|
|
70
|
+
messages = [
|
|
71
|
+
{"role": "system", "content": role_prompt},
|
|
72
|
+
{"role": "user", "content": user_message},
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
stream = await client.chat.completions.create(
|
|
76
|
+
model="gpt-4o",
|
|
77
|
+
messages=messages,
|
|
78
|
+
stream=True,
|
|
79
|
+
temperature=temperature,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
full_response = ""
|
|
83
|
+
async for chunk in stream:
|
|
84
|
+
if chunk.choices and len(chunk.choices) > 0:
|
|
85
|
+
delta = chunk.choices[0].delta
|
|
86
|
+
if delta.content:
|
|
87
|
+
full_response += delta.content
|
|
88
|
+
|
|
89
|
+
return full_response
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@context_params("prompt")
|
|
93
|
+
async def openai_assistant_lesson_planner(
|
|
94
|
+
prompt: List[Dict[str, Any]],
|
|
95
|
+
) -> AsyncGenerator[ExecutionResult, None]:
|
|
96
|
+
"""
|
|
97
|
+
MassGen custom tool wrapper for OpenAI lesson planner with multi-agent collaboration.
|
|
98
|
+
|
|
99
|
+
This version uses multiple specialized agents (similar to AG2) to collaboratively create
|
|
100
|
+
a lesson plan through sequential steps:
|
|
101
|
+
1. Curriculum Agent: Identifies standards and objectives
|
|
102
|
+
2. Lesson Planner Agent: Creates the detailed lesson plan
|
|
103
|
+
3. Lesson Reviewer Agent: Reviews and provides feedback
|
|
104
|
+
4. Formatter Agent: Formats the final plan
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
prompt: processed message list from orchestrator (auto-injected via execution_context)
|
|
108
|
+
|
|
109
|
+
Yields:
|
|
110
|
+
ExecutionResult containing text chunks as they arrive, or error messages
|
|
111
|
+
"""
|
|
112
|
+
# Get API key from environment
|
|
113
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
114
|
+
|
|
115
|
+
if not api_key:
|
|
116
|
+
yield ExecutionResult(
|
|
117
|
+
output_blocks=[
|
|
118
|
+
TextContent(data="Error: OPENAI_API_KEY not found. Please set the environment variable."),
|
|
119
|
+
],
|
|
120
|
+
)
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
if not prompt:
|
|
124
|
+
yield ExecutionResult(
|
|
125
|
+
output_blocks=[
|
|
126
|
+
TextContent(data="Error: No messages provided for lesson planning."),
|
|
127
|
+
],
|
|
128
|
+
)
|
|
129
|
+
return
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
# Initialize OpenAI client
|
|
133
|
+
client = AsyncOpenAI(api_key=api_key)
|
|
134
|
+
|
|
135
|
+
# Extract the user's request
|
|
136
|
+
user_request = str(prompt)
|
|
137
|
+
|
|
138
|
+
# Yield an initial message
|
|
139
|
+
yield ExecutionResult(
|
|
140
|
+
output_blocks=[
|
|
141
|
+
TextContent(data="OpenAI Lesson Planner (Multi-Agent Collaboration):\n\n"),
|
|
142
|
+
],
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
# Step 1: Curriculum Agent - Determine standards and objectives
|
|
146
|
+
yield ExecutionResult(
|
|
147
|
+
output_blocks=[
|
|
148
|
+
TextContent(data="[Curriculum Agent] Identifying standards and objectives...\n"),
|
|
149
|
+
],
|
|
150
|
+
is_log=True,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
standards_and_objectives = await run_agent_step(
|
|
154
|
+
client,
|
|
155
|
+
CURRICULUM_AGENT_PROMPT,
|
|
156
|
+
f"Please provide fourth grade standards and objectives for: {user_request}",
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
yield ExecutionResult(
|
|
160
|
+
output_blocks=[
|
|
161
|
+
TextContent(data=f"{standards_and_objectives}\n\n"),
|
|
162
|
+
],
|
|
163
|
+
is_log=True,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
# Step 2: Lesson Planner Agent - Create detailed lesson plan
|
|
167
|
+
yield ExecutionResult(
|
|
168
|
+
output_blocks=[
|
|
169
|
+
TextContent(data="[Lesson Planner Agent] Creating detailed lesson plan...\n"),
|
|
170
|
+
],
|
|
171
|
+
is_log=True,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
lesson_plan = await run_agent_step(
|
|
175
|
+
client,
|
|
176
|
+
LESSON_PLANNER_AGENT_PROMPT,
|
|
177
|
+
f"Based on these standards and objectives:\n{standards_and_objectives}\n\nCreate a detailed lesson plan for: {user_request}",
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
yield ExecutionResult(
|
|
181
|
+
output_blocks=[
|
|
182
|
+
TextContent(data=f"{lesson_plan}\n\n"),
|
|
183
|
+
],
|
|
184
|
+
is_log=True,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Step 3: Lesson Reviewer Agent - Review and provide feedback
|
|
188
|
+
yield ExecutionResult(
|
|
189
|
+
output_blocks=[
|
|
190
|
+
TextContent(data="[Lesson Reviewer Agent] Reviewing lesson plan...\n"),
|
|
191
|
+
],
|
|
192
|
+
is_log=True,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
review_feedback = await run_agent_step(
|
|
196
|
+
client,
|
|
197
|
+
LESSON_REVIEWER_AGENT_PROMPT,
|
|
198
|
+
f"Review this lesson plan:\n{lesson_plan}\n\nProvide feedback and suggest improvements.",
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
yield ExecutionResult(
|
|
202
|
+
output_blocks=[
|
|
203
|
+
TextContent(data=f"{review_feedback}\n\n"),
|
|
204
|
+
],
|
|
205
|
+
is_log=True,
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# Step 4: Formatter Agent - Format the final plan with streaming
|
|
209
|
+
yield ExecutionResult(
|
|
210
|
+
output_blocks=[
|
|
211
|
+
TextContent(data="[Formatter Agent] Formatting final lesson plan...\n\n"),
|
|
212
|
+
],
|
|
213
|
+
is_log=True,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
messages = [
|
|
217
|
+
{"role": "system", "content": LESSON_FORMATTER_AGENT_PROMPT},
|
|
218
|
+
{
|
|
219
|
+
"role": "user",
|
|
220
|
+
"content": f"Format this complete lesson plan:\n\nStandards and Objectives:\n{standards_and_objectives}\n\nLesson Plan:\n{lesson_plan}\n\nReview Feedback:\n{review_feedback}",
|
|
221
|
+
},
|
|
222
|
+
]
|
|
223
|
+
|
|
224
|
+
stream = await client.chat.completions.create(
|
|
225
|
+
model="gpt-4o",
|
|
226
|
+
messages=messages,
|
|
227
|
+
stream=True,
|
|
228
|
+
temperature=0.7,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
# Stream the final formatted output
|
|
232
|
+
async for chunk in stream:
|
|
233
|
+
if chunk.choices and len(chunk.choices) > 0:
|
|
234
|
+
delta = chunk.choices[0].delta
|
|
235
|
+
if delta.content:
|
|
236
|
+
yield ExecutionResult(
|
|
237
|
+
output_blocks=[
|
|
238
|
+
TextContent(data=delta.content),
|
|
239
|
+
],
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
except Exception as e:
|
|
243
|
+
yield ExecutionResult(
|
|
244
|
+
output_blocks=[
|
|
245
|
+
TextContent(data=f"\nError during lesson planning: {str(e)}"),
|
|
246
|
+
],
|
|
247
|
+
)
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
SmolAgent Lesson Planner Tool
|
|
4
|
+
This tool demonstrates interoperability by wrapping HuggingFace's SmolAgent framework as a MassGen custom tool.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from typing import Any, AsyncGenerator, Dict, List
|
|
9
|
+
|
|
10
|
+
from smolagents import CodeAgent, LiteLLMModel, tool
|
|
11
|
+
|
|
12
|
+
from massgen.tool import context_params
|
|
13
|
+
from massgen.tool._result import ExecutionResult, TextContent
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run_smolagent_lesson_planner_agent(
|
|
17
|
+
messages: List[Dict[str, Any]],
|
|
18
|
+
api_key: str,
|
|
19
|
+
) -> str:
|
|
20
|
+
"""
|
|
21
|
+
Core SmolAgent lesson planner agent - pure SmolAgent implementation.
|
|
22
|
+
|
|
23
|
+
This function contains the pure SmolAgent logic for creating lesson plans
|
|
24
|
+
using custom tools and CodeAgent.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
messages: Complete message history from orchestrator
|
|
28
|
+
api_key: OpenAI API key for the agents
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
The formatted lesson plan as a string
|
|
32
|
+
|
|
33
|
+
Raises:
|
|
34
|
+
Exception: Any errors during agent execution
|
|
35
|
+
"""
|
|
36
|
+
if not messages:
|
|
37
|
+
raise ValueError("No messages provided for lesson planning.")
|
|
38
|
+
|
|
39
|
+
# Define custom tools for the lesson planning workflow
|
|
40
|
+
@tool
|
|
41
|
+
def get_curriculum_standards(topic: str) -> str:
|
|
42
|
+
"""
|
|
43
|
+
Determine fourth grade curriculum standards and learning objectives for a given topic.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
topic: The lesson topic to get standards for
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
A formatted string with standards and objectives
|
|
50
|
+
"""
|
|
51
|
+
# This tool would interact with the LLM to generate standards
|
|
52
|
+
return f"Generate fourth grade curriculum standards and learning objectives for: {topic}"
|
|
53
|
+
|
|
54
|
+
@tool
|
|
55
|
+
def create_lesson_plan(topic: str, standards: str) -> str:
|
|
56
|
+
"""
|
|
57
|
+
Create a detailed lesson plan based on topic and standards.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
topic: The lesson topic
|
|
61
|
+
standards: The curriculum standards and objectives
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
A detailed lesson plan with activities and timing
|
|
65
|
+
"""
|
|
66
|
+
return f"Create a detailed lesson plan for '{topic}' based on these standards: {standards}"
|
|
67
|
+
|
|
68
|
+
@tool
|
|
69
|
+
def review_lesson_plan(lesson_plan: str) -> str:
|
|
70
|
+
"""
|
|
71
|
+
Review a lesson plan for age-appropriateness, timing, and engagement.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
lesson_plan: The lesson plan to review
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
An improved version of the lesson plan
|
|
78
|
+
"""
|
|
79
|
+
return f"Review and improve this lesson plan: {lesson_plan}"
|
|
80
|
+
|
|
81
|
+
@tool
|
|
82
|
+
def format_lesson_plan(lesson_plan: str) -> str:
|
|
83
|
+
"""
|
|
84
|
+
Format a lesson plan to a standardized structure.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
lesson_plan: The lesson plan to format
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
A formatted lesson plan with XML-like tags
|
|
91
|
+
"""
|
|
92
|
+
return (
|
|
93
|
+
f"Format this lesson plan with the following structure:\n"
|
|
94
|
+
f"<title>Lesson plan title</title>\n"
|
|
95
|
+
f"<standards>Standards covered</standards>\n"
|
|
96
|
+
f"<learning_objectives>Key learning objectives</learning_objectives>\n"
|
|
97
|
+
f"<materials>Materials required</materials>\n"
|
|
98
|
+
f"<activities>Detailed lesson plan activities with timing</activities>\n"
|
|
99
|
+
f"<assessment>Assessment details</assessment>\n\n"
|
|
100
|
+
f"Lesson plan to format: {lesson_plan}"
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Initialize the model
|
|
104
|
+
model = LiteLLMModel(
|
|
105
|
+
model_id="openai/gpt-4o",
|
|
106
|
+
api_key=api_key,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
# Create the agent with custom tools
|
|
110
|
+
agent = CodeAgent(
|
|
111
|
+
tools=[get_curriculum_standards, create_lesson_plan, review_lesson_plan, format_lesson_plan],
|
|
112
|
+
model=model,
|
|
113
|
+
max_steps=10,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Build the task from messages
|
|
117
|
+
task = f"Create a comprehensive fourth grade lesson plan for: {messages}\n\n"
|
|
118
|
+
task += "Please follow these steps:\n"
|
|
119
|
+
task += "1. Use get_curriculum_standards to identify relevant standards\n"
|
|
120
|
+
task += "2. Use create_lesson_plan to create a detailed plan\n"
|
|
121
|
+
task += "3. Use review_lesson_plan to review and improve the plan\n"
|
|
122
|
+
task += "4. Use format_lesson_plan to format the final output\n\n"
|
|
123
|
+
task += "The final plan should include:\n"
|
|
124
|
+
task += "- Opening/Hook (5-10 minutes)\n"
|
|
125
|
+
task += "- Main Activity (20-30 minutes)\n"
|
|
126
|
+
task += "- Practice Activity (15-20 minutes)\n"
|
|
127
|
+
task += "- Assessment/Closure (5-10 minutes)"
|
|
128
|
+
|
|
129
|
+
# Run the agent
|
|
130
|
+
result = agent.run(task)
|
|
131
|
+
|
|
132
|
+
return result
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
@context_params("prompt")
|
|
136
|
+
async def smolagent_lesson_planner(
|
|
137
|
+
prompt: List[Dict[str, Any]],
|
|
138
|
+
) -> AsyncGenerator[ExecutionResult, None]:
|
|
139
|
+
"""
|
|
140
|
+
MassGen custom tool wrapper for SmolAgent lesson planner.
|
|
141
|
+
|
|
142
|
+
This is the interface exposed to MassGen's backend. It handles environment setup,
|
|
143
|
+
error handling, and wraps the core agent logic in ExecutionResult.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
prompt: processed message list from orchestrator (auto-injected via execution_context)
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
ExecutionResult containing the formatted lesson plan or error message
|
|
150
|
+
"""
|
|
151
|
+
# Get API key from environment
|
|
152
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
153
|
+
|
|
154
|
+
if not api_key:
|
|
155
|
+
yield ExecutionResult(
|
|
156
|
+
output_blocks=[
|
|
157
|
+
TextContent(data="Error: OPENAI_API_KEY not found. Please set the environment variable."),
|
|
158
|
+
],
|
|
159
|
+
)
|
|
160
|
+
return
|
|
161
|
+
|
|
162
|
+
try:
|
|
163
|
+
# Call the core agent function with processed messages
|
|
164
|
+
lesson_plan = run_smolagent_lesson_planner_agent(
|
|
165
|
+
messages=prompt,
|
|
166
|
+
api_key=api_key,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
yield ExecutionResult(
|
|
170
|
+
output_blocks=[
|
|
171
|
+
TextContent(data=f"SmolAgent Lesson Planner Result:\n\n{lesson_plan}"),
|
|
172
|
+
],
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
except Exception as e:
|
|
176
|
+
yield ExecutionResult(
|
|
177
|
+
output_blocks=[
|
|
178
|
+
TextContent(data=f"Error creating lesson plan: {str(e)}"),
|
|
179
|
+
],
|
|
180
|
+
)
|
massgen/tool/_manager.py
CHANGED
|
@@ -220,17 +220,11 @@ class ToolManager:
|
|
|
220
220
|
if description:
|
|
221
221
|
tool_schema["function"]["description"] = description
|
|
222
222
|
|
|
223
|
-
#
|
|
224
|
-
|
|
225
|
-
if arg in tool_schema["function"]["parameters"]["properties"]:
|
|
226
|
-
tool_schema["function"]["parameters"]["properties"].pop(arg)
|
|
227
|
-
|
|
228
|
-
if "required" in tool_schema["function"]["parameters"]:
|
|
229
|
-
if arg in tool_schema["function"]["parameters"]["required"]:
|
|
230
|
-
tool_schema["function"]["parameters"]["required"].remove(arg)
|
|
223
|
+
# Extract context param names from decorator
|
|
224
|
+
context_param_names = getattr(base_func, "__context_params__", set())
|
|
231
225
|
|
|
232
|
-
|
|
233
|
-
|
|
226
|
+
# Remove preset args and context params from schema
|
|
227
|
+
self._remove_params_from_schema(tool_schema, set(preset_args or {}) | context_param_names)
|
|
234
228
|
|
|
235
229
|
tool_entry = RegisteredToolEntry(
|
|
236
230
|
tool_name=tool_name,
|
|
@@ -239,6 +233,7 @@ class ToolManager:
|
|
|
239
233
|
base_function=base_func,
|
|
240
234
|
schema_def=tool_schema,
|
|
241
235
|
preset_params=preset_args or {},
|
|
236
|
+
context_param_names=context_param_names,
|
|
242
237
|
extension_model=None,
|
|
243
238
|
post_processor=post_processor,
|
|
244
239
|
)
|
|
@@ -290,11 +285,13 @@ class ToolManager:
|
|
|
290
285
|
async def execute_tool(
|
|
291
286
|
self,
|
|
292
287
|
tool_request: dict,
|
|
288
|
+
execution_context: Optional[Dict[str, Any]] = None,
|
|
293
289
|
) -> AsyncGenerator[ExecutionResult, None]:
|
|
294
290
|
"""Execute a tool and return results as async generator.
|
|
295
291
|
|
|
296
292
|
Args:
|
|
297
293
|
tool_request: Tool execution request with name and input
|
|
294
|
+
execution_context: Optional execution context (messages, agent_id, etc.)
|
|
298
295
|
|
|
299
296
|
Yields:
|
|
300
297
|
ExecutionResult objects (accumulated)
|
|
@@ -313,13 +310,28 @@ class ToolManager:
|
|
|
313
310
|
|
|
314
311
|
tool_entry = self.registered_tools[tool_name]
|
|
315
312
|
|
|
316
|
-
#
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
313
|
+
# Extract context values for marked params only
|
|
314
|
+
context_values = {}
|
|
315
|
+
if execution_context and tool_entry.context_param_names:
|
|
316
|
+
context_values = {k: v for k, v in execution_context.items() if k in tool_entry.context_param_names}
|
|
317
|
+
|
|
318
|
+
# Validate all parameters match function signature
|
|
319
|
+
self._validate_params_match_signature(
|
|
320
|
+
tool_entry.base_function,
|
|
321
|
+
tool_entry.preset_params,
|
|
322
|
+
tool_entry.context_param_names,
|
|
323
|
+
tool_request,
|
|
324
|
+
tool_name,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
# Merge all parameters (validation ensures all are valid):
|
|
328
|
+
# 1. Static preset params (from registration)
|
|
329
|
+
# 2. Dynamic context values (from execution_context, marked by decorator)
|
|
330
|
+
# 3. LLM input (from tool request)
|
|
320
331
|
exec_kwargs = {
|
|
321
|
-
**
|
|
322
|
-
**
|
|
332
|
+
**tool_entry.preset_params,
|
|
333
|
+
**context_values,
|
|
334
|
+
**(tool_request.get("input", {}) or {}),
|
|
323
335
|
}
|
|
324
336
|
|
|
325
337
|
# Prepare post-processor if exists
|
|
@@ -372,6 +384,73 @@ class ToolManager:
|
|
|
372
384
|
f"Tool must return ExecutionResult or Generator, got {type(result)}",
|
|
373
385
|
)
|
|
374
386
|
|
|
387
|
+
@staticmethod
|
|
388
|
+
def _validate_params_match_signature(
|
|
389
|
+
func: Callable,
|
|
390
|
+
preset_params: dict,
|
|
391
|
+
context_param_names: set,
|
|
392
|
+
tool_request: dict,
|
|
393
|
+
tool_name: str,
|
|
394
|
+
) -> None:
|
|
395
|
+
"""Validate that all provided parameters match function signature.
|
|
396
|
+
|
|
397
|
+
Args:
|
|
398
|
+
func: The function to validate against
|
|
399
|
+
preset_params: Static preset parameters
|
|
400
|
+
context_param_names: Context parameter names from decorator
|
|
401
|
+
tool_request: Tool request with LLM input
|
|
402
|
+
tool_name: Tool name for error messages
|
|
403
|
+
|
|
404
|
+
Raises:
|
|
405
|
+
ValueError: If any provided parameter doesn't match function signature
|
|
406
|
+
"""
|
|
407
|
+
sig = inspect.signature(func)
|
|
408
|
+
valid_params = set(sig.parameters.keys())
|
|
409
|
+
|
|
410
|
+
# Check preset args
|
|
411
|
+
invalid_preset = set(preset_params.keys()) - valid_params
|
|
412
|
+
if invalid_preset:
|
|
413
|
+
raise ValueError(
|
|
414
|
+
f"Tool '{tool_name}': preset_args contains invalid parameters: {invalid_preset}. " f"Valid parameters: {valid_params}",
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
# Check context params
|
|
418
|
+
invalid_context = context_param_names - valid_params
|
|
419
|
+
if invalid_context:
|
|
420
|
+
raise ValueError(
|
|
421
|
+
f"Tool '{tool_name}': @context_params decorator specifies invalid parameters: {invalid_context}. " f"Valid parameters: {valid_params}",
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
# Check LLM input
|
|
425
|
+
llm_input = tool_request.get("input", {}) or {}
|
|
426
|
+
invalid_llm = set(llm_input.keys()) - valid_params
|
|
427
|
+
if invalid_llm:
|
|
428
|
+
raise ValueError(
|
|
429
|
+
f"Tool '{tool_name}': LLM provided invalid parameters: {invalid_llm}. " f"Valid parameters: {valid_params}",
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
@staticmethod
|
|
433
|
+
def _remove_params_from_schema(tool_schema: dict, param_names: set) -> None:
|
|
434
|
+
"""Remove parameters from tool schema (for preset args and context params).
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
tool_schema: The tool schema to modify
|
|
438
|
+
param_names: Set of parameter names to remove
|
|
439
|
+
"""
|
|
440
|
+
for arg in param_names:
|
|
441
|
+
# Remove from properties
|
|
442
|
+
if arg in tool_schema["function"]["parameters"]["properties"]:
|
|
443
|
+
tool_schema["function"]["parameters"]["properties"].pop(arg)
|
|
444
|
+
|
|
445
|
+
# Remove from required list
|
|
446
|
+
if "required" in tool_schema["function"]["parameters"]:
|
|
447
|
+
if arg in tool_schema["function"]["parameters"]["required"]:
|
|
448
|
+
tool_schema["function"]["parameters"]["required"].remove(arg)
|
|
449
|
+
|
|
450
|
+
# Clean up empty required list
|
|
451
|
+
if not tool_schema["function"]["parameters"]["required"]:
|
|
452
|
+
tool_schema["function"]["parameters"].pop("required", None)
|
|
453
|
+
|
|
375
454
|
def fetch_category_hints(self) -> str:
|
|
376
455
|
"""Get usage hints from active categories.
|
|
377
456
|
|
|
@@ -570,12 +649,19 @@ class ToolManager:
|
|
|
570
649
|
|
|
571
650
|
func_desc = "\n\n".join(desc_parts)
|
|
572
651
|
|
|
652
|
+
# Get context param names to exclude from schema
|
|
653
|
+
context_param_names = getattr(func, "__context_params__", set())
|
|
654
|
+
|
|
573
655
|
# Build parameter fields
|
|
574
656
|
param_fields = {}
|
|
575
657
|
for param_name, param_info in inspect.signature(func).parameters.items():
|
|
576
658
|
if param_name in ["self", "cls"]:
|
|
577
659
|
continue
|
|
578
660
|
|
|
661
|
+
# Skip context params (they'll be injected at runtime)
|
|
662
|
+
if param_name in context_param_names:
|
|
663
|
+
continue
|
|
664
|
+
|
|
579
665
|
if param_info.kind == inspect.Parameter.VAR_KEYWORD:
|
|
580
666
|
if not include_varkwargs:
|
|
581
667
|
continue
|
massgen/tool/_registered_tool.py
CHANGED
|
@@ -32,6 +32,9 @@ class RegisteredToolEntry:
|
|
|
32
32
|
preset_params: dict[str, Any] = field(default_factory=dict)
|
|
33
33
|
"""Pre-configured parameters hidden from schema."""
|
|
34
34
|
|
|
35
|
+
context_param_names: set[str] = field(default_factory=set)
|
|
36
|
+
"""Parameter names to inject from execution context at runtime."""
|
|
37
|
+
|
|
35
38
|
extension_model: Optional[Type[BaseModel]] = None
|
|
36
39
|
"""Optional model for extending the base schema."""
|
|
37
40
|
|
massgen/tool/_result.py
CHANGED
|
@@ -59,6 +59,9 @@ class ExecutionResult:
|
|
|
59
59
|
is_final: bool = True
|
|
60
60
|
"""Indicates if this is the final result in a stream."""
|
|
61
61
|
|
|
62
|
+
is_log: bool = False
|
|
63
|
+
"""Indicates if this result is for logging purposes only."""
|
|
64
|
+
|
|
62
65
|
was_interrupted: bool = False
|
|
63
66
|
"""Indicates if the execution was interrupted."""
|
|
64
67
|
|