massgen 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (57) hide show
  1. massgen/__init__.py +1 -1
  2. massgen/backend/base_with_custom_tool_and_mcp.py +453 -23
  3. massgen/backend/capabilities.py +39 -0
  4. massgen/backend/chat_completions.py +111 -197
  5. massgen/backend/claude.py +210 -181
  6. massgen/backend/gemini.py +1015 -1559
  7. massgen/backend/grok.py +3 -2
  8. massgen/backend/response.py +160 -220
  9. massgen/cli.py +73 -6
  10. massgen/config_builder.py +20 -54
  11. massgen/config_validator.py +931 -0
  12. massgen/configs/README.md +51 -8
  13. massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +1 -0
  14. massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +1 -1
  15. massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +1 -0
  16. massgen/configs/tools/custom_tools/computer_use_browser_example.yaml +1 -1
  17. massgen/configs/tools/custom_tools/computer_use_docker_example.yaml +1 -1
  18. massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +1 -0
  19. massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +1 -0
  20. massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +1 -0
  21. massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +1 -0
  22. massgen/configs/tools/custom_tools/interop/ag2_and_langgraph_lesson_planner.yaml +65 -0
  23. massgen/configs/tools/custom_tools/interop/ag2_and_openai_assistant_lesson_planner.yaml +65 -0
  24. massgen/configs/tools/custom_tools/interop/ag2_lesson_planner_example.yaml +48 -0
  25. massgen/configs/tools/custom_tools/interop/agentscope_lesson_planner_example.yaml +48 -0
  26. massgen/configs/tools/custom_tools/interop/langgraph_lesson_planner_example.yaml +49 -0
  27. massgen/configs/tools/custom_tools/interop/openai_assistant_lesson_planner_example.yaml +50 -0
  28. massgen/configs/tools/custom_tools/interop/smolagent_lesson_planner_example.yaml +49 -0
  29. massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +1 -0
  30. massgen/configs/tools/custom_tools/two_models_with_tools_example.yaml +44 -0
  31. massgen/formatter/_gemini_formatter.py +61 -15
  32. massgen/tests/test_ag2_lesson_planner.py +223 -0
  33. massgen/tests/test_config_validator.py +1156 -0
  34. massgen/tests/test_langgraph_lesson_planner.py +223 -0
  35. massgen/tool/__init__.py +2 -9
  36. massgen/tool/_decorators.py +52 -0
  37. massgen/tool/_extraframework_agents/ag2_lesson_planner_tool.py +251 -0
  38. massgen/tool/_extraframework_agents/agentscope_lesson_planner_tool.py +303 -0
  39. massgen/tool/_extraframework_agents/langgraph_lesson_planner_tool.py +275 -0
  40. massgen/tool/_extraframework_agents/openai_assistant_lesson_planner_tool.py +247 -0
  41. massgen/tool/_extraframework_agents/smolagent_lesson_planner_tool.py +180 -0
  42. massgen/tool/_manager.py +102 -16
  43. massgen/tool/_registered_tool.py +3 -0
  44. massgen/tool/_result.py +3 -0
  45. {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/METADATA +104 -76
  46. {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/RECORD +50 -39
  47. massgen/backend/gemini_mcp_manager.py +0 -545
  48. massgen/backend/gemini_trackers.py +0 -344
  49. massgen/configs/tools/custom_tools/multimodal_tools/playwright_with_img_understanding.yaml +0 -98
  50. massgen/configs/tools/custom_tools/multimodal_tools/understand_video_example.yaml +0 -54
  51. massgen/tools/__init__.py +0 -8
  52. massgen/tools/_planning_mcp_server.py +0 -520
  53. massgen/tools/planning_dataclasses.py +0 -434
  54. {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/WHEEL +0 -0
  55. {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/entry_points.txt +0 -0
  56. {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/licenses/LICENSE +0 -0
  57. {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,303 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ AgentScope Lesson Planner Tool
4
+ This tool demonstrates interoperability by wrapping AgentScope's multi-agent framework as a MassGen custom tool.
5
+ Compatible with AgentScope 1.0.6+
6
+ """
7
+
8
+ import os
9
+ from typing import Any, AsyncGenerator, Dict, List
10
+
11
+ import agentscope
12
+ from agentscope.agent import AgentBase
13
+ from agentscope.formatter import OpenAIChatFormatter
14
+ from agentscope.memory import InMemoryMemory
15
+ from agentscope.message import Msg
16
+ from agentscope.model import OpenAIChatModel
17
+
18
+ from massgen.tool import context_params
19
+ from massgen.tool._result import ExecutionResult, TextContent
20
+
21
+
22
+ class SimpleDialogAgent(AgentBase):
23
+ """
24
+ A simple dialog agent for AgentScope 1.0.6+
25
+ This is a lightweight replacement for the deprecated DialogAgent.
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ name: str,
31
+ sys_prompt: str,
32
+ model: OpenAIChatModel,
33
+ ):
34
+ """
35
+ Initialize the dialog agent.
36
+
37
+ Args:
38
+ name: Agent name
39
+ sys_prompt: System prompt for the agent
40
+ model: OpenAI chat model instance
41
+ """
42
+ super().__init__()
43
+ self.name = name
44
+ self.sys_prompt = sys_prompt
45
+ self.model = model
46
+ self.formatter = OpenAIChatFormatter()
47
+ self.memory = InMemoryMemory()
48
+
49
+ # Store system prompt to add later (memory operations are async)
50
+ self.pending_sys_msg = None
51
+ if sys_prompt:
52
+ self.pending_sys_msg = Msg(name="system", content=sys_prompt, role="system")
53
+
54
+ async def reply(self, x: Msg = None) -> Msg:
55
+ """
56
+ Generate a reply to the input message.
57
+
58
+ Args:
59
+ x: Input message
60
+
61
+ Returns:
62
+ Response message
63
+ """
64
+ # Add system prompt on first call
65
+ if self.pending_sys_msg is not None:
66
+ await self.memory.add(self.pending_sys_msg)
67
+ self.pending_sys_msg = None
68
+
69
+ # Add user message to memory
70
+ if x is not None:
71
+ await self.memory.add(x)
72
+
73
+ # Get conversation history
74
+ history = await self.memory.get_memory()
75
+
76
+ # Format messages for the model
77
+ formatted_msgs = []
78
+ for msg in history:
79
+ formatted_msgs.append(
80
+ {
81
+ "role": msg.role,
82
+ "content": msg.content if isinstance(msg.content, str) else str(msg.content),
83
+ },
84
+ )
85
+
86
+ # Generate response using the model
87
+ response = await self.model(formatted_msgs)
88
+
89
+ # Extract content from ChatResponse
90
+ # response.content is a list like [{'type': 'text', 'text': '...'}]
91
+ content = ""
92
+ if hasattr(response, "content") and isinstance(response.content, list):
93
+ for item in response.content:
94
+ if isinstance(item, dict) and item.get("type") == "text":
95
+ content += item.get("text", "")
96
+ elif hasattr(response, "content"):
97
+ content = str(response.content)
98
+ else:
99
+ content = str(response)
100
+
101
+ # Create response message
102
+ response_msg = Msg(
103
+ name=self.name,
104
+ content=content,
105
+ role="assistant",
106
+ )
107
+
108
+ # Add response to memory
109
+ await self.memory.add(response_msg)
110
+
111
+ return response_msg
112
+
113
+
114
+ async def run_agentscope_lesson_planner_agent(
115
+ messages: List[Dict[str, Any]],
116
+ api_key: str,
117
+ ) -> str:
118
+ """
119
+ Core AgentScope lesson planner agent - pure AgentScope implementation.
120
+
121
+ This function contains the pure AgentScope logic for creating lesson plans
122
+ using multiple specialized DialogAgents in a sequential pipeline.
123
+
124
+ Args:
125
+ messages: Complete message history from orchestrator
126
+ api_key: OpenAI API key for the agents
127
+
128
+ Returns:
129
+ The formatted lesson plan as a string
130
+
131
+ Raises:
132
+ Exception: Any errors during agent execution
133
+ """
134
+ if not messages:
135
+ raise ValueError("No messages provided for lesson planning.")
136
+
137
+ # Extract the user's topic/request from messages
138
+ # Messages is typically a list of dicts with 'role' and 'content'
139
+ user_prompt = ""
140
+ for msg in messages:
141
+ if isinstance(msg, dict) and msg.get("role") == "user":
142
+ user_prompt = msg.get("content", "")
143
+ break
144
+
145
+ if not user_prompt:
146
+ # Fallback: use the entire messages as string
147
+ user_prompt = str(messages)
148
+
149
+ # Initialize AgentScope (simplified for 1.0.6)
150
+ agentscope.init(
151
+ project="massgen_lesson_planner",
152
+ name="agentscope_lesson_planner_run",
153
+ logging_level="WARNING",
154
+ )
155
+
156
+ # Create shared model instance
157
+ model = OpenAIChatModel(
158
+ model_name="gpt-4o",
159
+ api_key=api_key,
160
+ stream=False,
161
+ generate_kwargs={
162
+ "temperature": 0.7,
163
+ },
164
+ )
165
+
166
+ # Create specialized agents for each step
167
+ curriculum_agent = SimpleDialogAgent(
168
+ name="Curriculum_Standards_Expert",
169
+ sys_prompt="""You are a curriculum standards expert for fourth grade education.
170
+ When given a topic, you provide relevant grade-level standards and learning objectives.
171
+ Format every response as:
172
+ STANDARDS:
173
+ - [Standard 1]
174
+ - [Standard 2]
175
+ OBJECTIVES:
176
+ - By the end of this lesson, students will be able to [objective 1]
177
+ - By the end of this lesson, students will be able to [objective 2]""",
178
+ model=model,
179
+ )
180
+
181
+ lesson_planner_agent = SimpleDialogAgent(
182
+ name="Lesson_Planning_Specialist",
183
+ sys_prompt="""You are a lesson planning specialist.
184
+ Given standards and objectives, you create detailed lesson plans including:
185
+ - Opening/Hook (5-10 minutes)
186
+ - Main Activity (20-30 minutes)
187
+ - Practice Activity (15-20 minutes)
188
+ - Assessment/Closure (5-10 minutes)
189
+ Format as a structured lesson plan with clear timing and materials needed.""",
190
+ model=model,
191
+ )
192
+
193
+ reviewer_agent = SimpleDialogAgent(
194
+ name="Lesson_Plan_Reviewer",
195
+ sys_prompt="""You are a lesson plan reviewer who ensures:
196
+ 1. Age-appropriate content and activities
197
+ 2. Alignment with provided standards
198
+ 3. Realistic timing
199
+ 4. Clear instructions
200
+ 5. Differentiation opportunities
201
+ Provide an improved version of the lesson plan incorporating your feedback.""",
202
+ model=model,
203
+ )
204
+
205
+ formatter_agent = SimpleDialogAgent(
206
+ name="Lesson_Plan_Formatter",
207
+ sys_prompt="""You are a lesson plan formatter. Format the complete plan as follows:
208
+ <title>Lesson plan title</title>
209
+ <standards>Standards covered</standards>
210
+ <learning_objectives>Key learning objectives</learning_objectives>
211
+ <materials>Materials required</materials>
212
+ <activities>Detailed lesson plan activities with timing</activities>
213
+ <assessment>Assessment details</assessment>""",
214
+ model=model,
215
+ )
216
+
217
+ # Build the initial message
218
+ initial_message = f"Please provide fourth grade standards and objectives for: {user_prompt}"
219
+
220
+ # Create the sequential pipeline
221
+ # Step 1: Get curriculum standards
222
+ msg = Msg(name="User", content=initial_message, role="user")
223
+ standards_response = await curriculum_agent.reply(msg)
224
+
225
+ # Step 2: Create lesson plan based on standards
226
+ lesson_msg = Msg(
227
+ name="User",
228
+ content=f"Based on these standards and objectives, create a detailed lesson plan:\n\n{standards_response.content}",
229
+ role="user",
230
+ )
231
+ lesson_response = await lesson_planner_agent.reply(lesson_msg)
232
+
233
+ # Step 3: Review and improve the lesson plan
234
+ review_msg = Msg(
235
+ name="User",
236
+ content=f"Please review and improve this lesson plan:\n\n{lesson_response.content}",
237
+ role="user",
238
+ )
239
+ reviewed_response = await reviewer_agent.reply(review_msg)
240
+
241
+ # Step 4: Format the final lesson plan
242
+ format_msg = Msg(
243
+ name="User",
244
+ content=f"Format this reviewed lesson plan:\n\n{reviewed_response.content}",
245
+ role="user",
246
+ )
247
+ final_response = await formatter_agent.reply(format_msg)
248
+
249
+ # Extract the final lesson plan
250
+ lesson_plan = final_response.content if isinstance(final_response.content, str) else str(final_response.content)
251
+
252
+ return lesson_plan
253
+
254
+
255
+ @context_params("prompt")
256
+ async def agentscope_lesson_planner(
257
+ prompt: List[Dict[str, Any]],
258
+ ) -> AsyncGenerator[ExecutionResult, None]:
259
+ """
260
+ MassGen custom tool wrapper for AgentScope lesson planner.
261
+
262
+ This is the interface exposed to MassGen's backend. It handles environment setup,
263
+ error handling, and wraps the core agent logic in ExecutionResult.
264
+
265
+ Args:
266
+ prompt: processed message list from orchestrator (auto-injected via execution_context)
267
+
268
+ Returns:
269
+ ExecutionResult containing the formatted lesson plan or error message
270
+ """
271
+ # Get API key from environment
272
+ api_key = os.getenv("OPENAI_API_KEY")
273
+
274
+ if not api_key:
275
+ yield ExecutionResult(
276
+ output_blocks=[
277
+ TextContent(data="Error: OPENAI_API_KEY not found. Please set the environment variable."),
278
+ ],
279
+ )
280
+ return
281
+
282
+ try:
283
+ # Call the core agent function with processed messages
284
+ lesson_plan = await run_agentscope_lesson_planner_agent(
285
+ messages=prompt,
286
+ api_key=api_key,
287
+ )
288
+
289
+ yield ExecutionResult(
290
+ output_blocks=[
291
+ TextContent(data=f"AgentScope Lesson Planner Result:\n\n{lesson_plan}"),
292
+ ],
293
+ )
294
+
295
+ except Exception as e:
296
+ import traceback
297
+
298
+ error_details = traceback.format_exc()
299
+ yield ExecutionResult(
300
+ output_blocks=[
301
+ TextContent(data=f"Error creating lesson plan: {str(e)}\n\nDetails:\n{error_details}"),
302
+ ],
303
+ )
@@ -0,0 +1,275 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ LangGraph Lesson Planner Tool
4
+ This tool demonstrates interoperability by wrapping LangGraph's state graph functionality as a MassGen custom tool.
5
+ """
6
+
7
+ import operator
8
+ import os
9
+ from typing import Annotated, Any, AsyncGenerator, Dict, List, Sequence, TypedDict
10
+
11
+ from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
12
+ from langchain_openai import ChatOpenAI
13
+ from langgraph.graph import END, StateGraph
14
+
15
+ from massgen.tool import context_params
16
+ from massgen.tool._result import ExecutionResult, TextContent
17
+
18
+
19
+ class LessonPlannerState(TypedDict):
20
+ """State for the lesson planner workflow."""
21
+
22
+ messages: Annotated[Sequence[BaseMessage], operator.add]
23
+ user_prompt: str
24
+ context: str
25
+ standards: str
26
+ lesson_plan: str
27
+ reviewed_plan: str
28
+ final_plan: str
29
+
30
+
31
+ async def run_langgraph_lesson_planner_agent(
32
+ messages: List[Dict[str, Any]],
33
+ api_key: str,
34
+ ) -> str:
35
+ """
36
+ Core LangGraph lesson planner agent - pure LangGraph implementation.
37
+
38
+ This function contains the pure LangGraph logic for creating lesson plans
39
+ using a state graph architecture with multiple specialized nodes.
40
+
41
+ Args:
42
+ messages: Complete message history from orchestrator
43
+ api_key: OpenAI API key for the agents
44
+
45
+ Returns:
46
+ The formatted lesson plan as a string
47
+
48
+ Raises:
49
+ Exception: Any errors during agent execution
50
+ """
51
+ if not messages:
52
+ raise ValueError("No messages provided for lesson planning.")
53
+
54
+ # Extract the user's topic/request from messages
55
+ user_prompt = ""
56
+ for msg in messages:
57
+ if isinstance(msg, dict) and msg.get("role") == "user":
58
+ user_prompt = msg.get("content", "")
59
+ break
60
+
61
+ if not user_prompt:
62
+ # Fallback: use the entire messages as string
63
+ user_prompt = str(messages)
64
+ # Initialize the language model
65
+ llm = ChatOpenAI(
66
+ model="gpt-4o",
67
+ api_key=api_key,
68
+ temperature=0.7,
69
+ )
70
+
71
+ # Define the curriculum standards node
72
+ async def curriculum_node(state: LessonPlannerState) -> LessonPlannerState:
73
+ """Determine curriculum standards and learning objectives."""
74
+ system_msg = SystemMessage(
75
+ content="""You are a curriculum standards expert for fourth grade education.
76
+ When given a topic, you provide relevant grade-level standards and learning objectives.
77
+ Format every response as:
78
+ STANDARDS:
79
+ - [Standard 1]
80
+ - [Standard 2]
81
+ OBJECTIVES:
82
+ - By the end of this lesson, students will be able to [objective 1]
83
+ - By the end of this lesson, students will be able to [objective 2]""",
84
+ )
85
+
86
+ # Build context message if provided
87
+ context_info = f"\n\nAdditional Context: {state['context']}" if state.get("context") else ""
88
+ human_msg = HumanMessage(content=f"Please provide fourth grade standards and objectives for: {state['user_prompt']}{context_info}")
89
+
90
+ messages_to_send = [system_msg, human_msg]
91
+ response = await llm.ainvoke(messages_to_send)
92
+
93
+ return {
94
+ "messages": [response],
95
+ "standards": response.content,
96
+ "user_prompt": state["user_prompt"],
97
+ "context": state["context"],
98
+ "lesson_plan": "",
99
+ "reviewed_plan": "",
100
+ "final_plan": "",
101
+ }
102
+
103
+ # Define the lesson planner node
104
+ async def lesson_planner_node(state: LessonPlannerState) -> LessonPlannerState:
105
+ """Create a detailed lesson plan based on standards."""
106
+ system_msg = SystemMessage(
107
+ content="""You are a lesson planning specialist.
108
+ Given standards and objectives, you create detailed lesson plans including:
109
+ - Opening/Hook (5-10 minutes)
110
+ - Main Activity (20-30 minutes)
111
+ - Practice Activity (15-20 minutes)
112
+ - Assessment/Closure (5-10 minutes)
113
+ Format as a structured lesson plan with clear timing and materials needed.""",
114
+ )
115
+
116
+ human_msg = HumanMessage(content=f"Based on these standards and objectives, create a detailed lesson plan:\n\n{state['standards']}")
117
+
118
+ messages_to_send = [system_msg, human_msg]
119
+ response = await llm.ainvoke(messages_to_send)
120
+
121
+ return {
122
+ "messages": state["messages"] + [response],
123
+ "lesson_plan": response.content,
124
+ "user_prompt": state["user_prompt"],
125
+ "context": state["context"],
126
+ "standards": state["standards"],
127
+ "reviewed_plan": "",
128
+ "final_plan": "",
129
+ }
130
+
131
+ # Define the lesson reviewer node
132
+ async def lesson_reviewer_node(state: LessonPlannerState) -> LessonPlannerState:
133
+ """Review and provide feedback on the lesson plan."""
134
+ system_msg = SystemMessage(
135
+ content="""You are a lesson plan reviewer who ensures:
136
+ 1. Age-appropriate content and activities
137
+ 2. Alignment with provided standards
138
+ 3. Realistic timing
139
+ 4. Clear instructions
140
+ 5. Differentiation opportunities
141
+ Provide specific feedback in these areas and suggest improvements if needed.
142
+ Then provide an improved version of the lesson plan incorporating your feedback.""",
143
+ )
144
+
145
+ human_msg = HumanMessage(content=f"Please review this lesson plan:\n\n{state['lesson_plan']}")
146
+
147
+ messages_to_send = [system_msg, human_msg]
148
+ response = await llm.ainvoke(messages_to_send)
149
+
150
+ return {
151
+ "messages": state["messages"] + [response],
152
+ "reviewed_plan": response.content,
153
+ "user_prompt": state["user_prompt"],
154
+ "context": state["context"],
155
+ "standards": state["standards"],
156
+ "lesson_plan": state["lesson_plan"],
157
+ "final_plan": "",
158
+ }
159
+
160
+ # Define the formatter node
161
+ async def formatter_node(state: LessonPlannerState) -> LessonPlannerState:
162
+ """Format the final lesson plan to a standard format."""
163
+ system_msg = SystemMessage(
164
+ content="""You are a lesson plan formatter. Format the complete plan as follows:
165
+ <title>Lesson plan title</title>
166
+ <standards>Standards covered</standards>
167
+ <learning_objectives>Key learning objectives</learning_objectives>
168
+ <materials>Materials required</materials>
169
+ <activities>Lesson plan activities</activities>
170
+ <assessment>Assessment details</assessment>""",
171
+ )
172
+
173
+ human_msg = HumanMessage(content=f"Format this reviewed lesson plan:\n\n{state['reviewed_plan']}")
174
+
175
+ messages_to_send = [system_msg, human_msg]
176
+ response = await llm.ainvoke(messages_to_send)
177
+
178
+ return {
179
+ "messages": state["messages"] + [response],
180
+ "final_plan": response.content,
181
+ "user_prompt": state["user_prompt"],
182
+ "context": state["context"],
183
+ "standards": state["standards"],
184
+ "lesson_plan": state["lesson_plan"],
185
+ "reviewed_plan": state["reviewed_plan"],
186
+ }
187
+
188
+ # Build the state graph
189
+ workflow = StateGraph(LessonPlannerState)
190
+
191
+ # Add nodes
192
+ workflow.add_node("curriculum", curriculum_node)
193
+ workflow.add_node("planner", lesson_planner_node)
194
+ workflow.add_node("reviewer", lesson_reviewer_node)
195
+ workflow.add_node("formatter", formatter_node)
196
+
197
+ # Define the flow
198
+ workflow.set_entry_point("curriculum")
199
+ workflow.add_edge("curriculum", "planner")
200
+ workflow.add_edge("planner", "reviewer")
201
+ workflow.add_edge("reviewer", "formatter")
202
+ workflow.add_edge("formatter", END)
203
+
204
+ # Compile the graph
205
+ app = workflow.compile()
206
+
207
+ # Execute the workflow
208
+ initial_state = {
209
+ "messages": [],
210
+ "user_prompt": user_prompt,
211
+ "context": "",
212
+ "standards": "",
213
+ "lesson_plan": "",
214
+ "reviewed_plan": "",
215
+ "final_plan": "",
216
+ }
217
+
218
+ # Extract the final lesson plan
219
+ final_state = None
220
+ async for chunk in app.astream(initial_state):
221
+ for node_name, state_update in chunk.items():
222
+ if node_name == "formatter" and state_update.get("final_plan"):
223
+ final_state = state_update
224
+
225
+ lesson_plan = final_state.get("final_plan", "No lesson plan generated") if final_state else "No lesson plan generated"
226
+
227
+ return lesson_plan
228
+
229
+
230
+ @context_params("prompt")
231
+ async def langgraph_lesson_planner(
232
+ prompt: List[Dict[str, Any]],
233
+ ) -> AsyncGenerator[ExecutionResult, None]:
234
+ """
235
+ MassGen custom tool wrapper for LangGraph lesson planner.
236
+
237
+ This is the interface exposed to MassGen's backend. It handles environment setup,
238
+ error handling, and wraps the core agent logic in ExecutionResult.
239
+
240
+ Args:
241
+ prompt: processed message list from orchestrator (auto-injected via execution_context)
242
+
243
+ Returns:
244
+ ExecutionResult containing the formatted lesson plan or error message
245
+ """
246
+ # Get API key from environment
247
+ api_key = os.getenv("OPENAI_API_KEY")
248
+
249
+ if not api_key:
250
+ yield ExecutionResult(
251
+ output_blocks=[
252
+ TextContent(data="Error: OPENAI_API_KEY not found. Please set the environment variable."),
253
+ ],
254
+ )
255
+ return
256
+
257
+ try:
258
+ # Call the core agent function with processed messages
259
+ lesson_plan = await run_langgraph_lesson_planner_agent(
260
+ messages=prompt,
261
+ api_key=api_key,
262
+ )
263
+
264
+ yield ExecutionResult(
265
+ output_blocks=[
266
+ TextContent(data=f"LangGraph Lesson Planner Result:\n\n{lesson_plan}"),
267
+ ],
268
+ )
269
+
270
+ except Exception as e:
271
+ yield ExecutionResult(
272
+ output_blocks=[
273
+ TextContent(data=f"Error creating lesson plan: {str(e)}"),
274
+ ],
275
+ )