massgen 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (84) hide show
  1. massgen/__init__.py +1 -1
  2. massgen/backend/base_with_custom_tool_and_mcp.py +453 -23
  3. massgen/backend/capabilities.py +39 -0
  4. massgen/backend/chat_completions.py +111 -197
  5. massgen/backend/claude.py +210 -181
  6. massgen/backend/gemini.py +1015 -1559
  7. massgen/backend/grok.py +3 -2
  8. massgen/backend/response.py +160 -220
  9. massgen/chat_agent.py +340 -20
  10. massgen/cli.py +399 -25
  11. massgen/config_builder.py +20 -54
  12. massgen/config_validator.py +931 -0
  13. massgen/configs/README.md +95 -10
  14. massgen/configs/memory/gpt5mini_gemini_baseline_research_to_implementation.yaml +94 -0
  15. massgen/configs/memory/gpt5mini_gemini_context_window_management.yaml +187 -0
  16. massgen/configs/memory/gpt5mini_gemini_research_to_implementation.yaml +127 -0
  17. massgen/configs/memory/gpt5mini_high_reasoning_gemini.yaml +107 -0
  18. massgen/configs/memory/single_agent_compression_test.yaml +64 -0
  19. massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +1 -0
  20. massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +1 -1
  21. massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +1 -0
  22. massgen/configs/tools/custom_tools/computer_use_browser_example.yaml +1 -1
  23. massgen/configs/tools/custom_tools/computer_use_docker_example.yaml +1 -1
  24. massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +1 -0
  25. massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +1 -0
  26. massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +1 -0
  27. massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +1 -0
  28. massgen/configs/tools/custom_tools/interop/ag2_and_langgraph_lesson_planner.yaml +65 -0
  29. massgen/configs/tools/custom_tools/interop/ag2_and_openai_assistant_lesson_planner.yaml +65 -0
  30. massgen/configs/tools/custom_tools/interop/ag2_lesson_planner_example.yaml +48 -0
  31. massgen/configs/tools/custom_tools/interop/agentscope_lesson_planner_example.yaml +48 -0
  32. massgen/configs/tools/custom_tools/interop/langgraph_lesson_planner_example.yaml +49 -0
  33. massgen/configs/tools/custom_tools/interop/openai_assistant_lesson_planner_example.yaml +50 -0
  34. massgen/configs/tools/custom_tools/interop/smolagent_lesson_planner_example.yaml +49 -0
  35. massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +1 -0
  36. massgen/configs/tools/custom_tools/two_models_with_tools_example.yaml +44 -0
  37. massgen/formatter/_gemini_formatter.py +61 -15
  38. massgen/memory/README.md +277 -0
  39. massgen/memory/__init__.py +26 -0
  40. massgen/memory/_base.py +193 -0
  41. massgen/memory/_compression.py +237 -0
  42. massgen/memory/_context_monitor.py +211 -0
  43. massgen/memory/_conversation.py +255 -0
  44. massgen/memory/_fact_extraction_prompts.py +333 -0
  45. massgen/memory/_mem0_adapters.py +257 -0
  46. massgen/memory/_persistent.py +687 -0
  47. massgen/memory/docker-compose.qdrant.yml +36 -0
  48. massgen/memory/docs/DESIGN.md +388 -0
  49. massgen/memory/docs/QUICKSTART.md +409 -0
  50. massgen/memory/docs/SUMMARY.md +319 -0
  51. massgen/memory/docs/agent_use_memory.md +408 -0
  52. massgen/memory/docs/orchestrator_use_memory.md +586 -0
  53. massgen/memory/examples.py +237 -0
  54. massgen/orchestrator.py +207 -7
  55. massgen/tests/memory/test_agent_compression.py +174 -0
  56. massgen/tests/memory/test_context_window_management.py +286 -0
  57. massgen/tests/memory/test_force_compression.py +154 -0
  58. massgen/tests/memory/test_simple_compression.py +147 -0
  59. massgen/tests/test_ag2_lesson_planner.py +223 -0
  60. massgen/tests/test_agent_memory.py +534 -0
  61. massgen/tests/test_config_validator.py +1156 -0
  62. massgen/tests/test_conversation_memory.py +382 -0
  63. massgen/tests/test_langgraph_lesson_planner.py +223 -0
  64. massgen/tests/test_orchestrator_memory.py +620 -0
  65. massgen/tests/test_persistent_memory.py +435 -0
  66. massgen/token_manager/token_manager.py +6 -0
  67. massgen/tool/__init__.py +2 -9
  68. massgen/tool/_decorators.py +52 -0
  69. massgen/tool/_extraframework_agents/ag2_lesson_planner_tool.py +251 -0
  70. massgen/tool/_extraframework_agents/agentscope_lesson_planner_tool.py +303 -0
  71. massgen/tool/_extraframework_agents/langgraph_lesson_planner_tool.py +275 -0
  72. massgen/tool/_extraframework_agents/openai_assistant_lesson_planner_tool.py +247 -0
  73. massgen/tool/_extraframework_agents/smolagent_lesson_planner_tool.py +180 -0
  74. massgen/tool/_manager.py +102 -16
  75. massgen/tool/_registered_tool.py +3 -0
  76. massgen/tool/_result.py +3 -0
  77. {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/METADATA +138 -77
  78. {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/RECORD +82 -37
  79. massgen/backend/gemini_mcp_manager.py +0 -545
  80. massgen/backend/gemini_trackers.py +0 -344
  81. {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/WHEEL +0 -0
  82. {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/entry_points.txt +0 -0
  83. {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/licenses/LICENSE +0 -0
  84. {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,275 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ LangGraph Lesson Planner Tool
4
+ This tool demonstrates interoperability by wrapping LangGraph's state graph functionality as a MassGen custom tool.
5
+ """
6
+
7
+ import operator
8
+ import os
9
+ from typing import Annotated, Any, AsyncGenerator, Dict, List, Sequence, TypedDict
10
+
11
+ from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
12
+ from langchain_openai import ChatOpenAI
13
+ from langgraph.graph import END, StateGraph
14
+
15
+ from massgen.tool import context_params
16
+ from massgen.tool._result import ExecutionResult, TextContent
17
+
18
+
19
+ class LessonPlannerState(TypedDict):
20
+ """State for the lesson planner workflow."""
21
+
22
+ messages: Annotated[Sequence[BaseMessage], operator.add]
23
+ user_prompt: str
24
+ context: str
25
+ standards: str
26
+ lesson_plan: str
27
+ reviewed_plan: str
28
+ final_plan: str
29
+
30
+
31
+ async def run_langgraph_lesson_planner_agent(
32
+ messages: List[Dict[str, Any]],
33
+ api_key: str,
34
+ ) -> str:
35
+ """
36
+ Core LangGraph lesson planner agent - pure LangGraph implementation.
37
+
38
+ This function contains the pure LangGraph logic for creating lesson plans
39
+ using a state graph architecture with multiple specialized nodes.
40
+
41
+ Args:
42
+ messages: Complete message history from orchestrator
43
+ api_key: OpenAI API key for the agents
44
+
45
+ Returns:
46
+ The formatted lesson plan as a string
47
+
48
+ Raises:
49
+ Exception: Any errors during agent execution
50
+ """
51
+ if not messages:
52
+ raise ValueError("No messages provided for lesson planning.")
53
+
54
+ # Extract the user's topic/request from messages
55
+ user_prompt = ""
56
+ for msg in messages:
57
+ if isinstance(msg, dict) and msg.get("role") == "user":
58
+ user_prompt = msg.get("content", "")
59
+ break
60
+
61
+ if not user_prompt:
62
+ # Fallback: use the entire messages as string
63
+ user_prompt = str(messages)
64
+ # Initialize the language model
65
+ llm = ChatOpenAI(
66
+ model="gpt-4o",
67
+ api_key=api_key,
68
+ temperature=0.7,
69
+ )
70
+
71
+ # Define the curriculum standards node
72
+ async def curriculum_node(state: LessonPlannerState) -> LessonPlannerState:
73
+ """Determine curriculum standards and learning objectives."""
74
+ system_msg = SystemMessage(
75
+ content="""You are a curriculum standards expert for fourth grade education.
76
+ When given a topic, you provide relevant grade-level standards and learning objectives.
77
+ Format every response as:
78
+ STANDARDS:
79
+ - [Standard 1]
80
+ - [Standard 2]
81
+ OBJECTIVES:
82
+ - By the end of this lesson, students will be able to [objective 1]
83
+ - By the end of this lesson, students will be able to [objective 2]""",
84
+ )
85
+
86
+ # Build context message if provided
87
+ context_info = f"\n\nAdditional Context: {state['context']}" if state.get("context") else ""
88
+ human_msg = HumanMessage(content=f"Please provide fourth grade standards and objectives for: {state['user_prompt']}{context_info}")
89
+
90
+ messages_to_send = [system_msg, human_msg]
91
+ response = await llm.ainvoke(messages_to_send)
92
+
93
+ return {
94
+ "messages": [response],
95
+ "standards": response.content,
96
+ "user_prompt": state["user_prompt"],
97
+ "context": state["context"],
98
+ "lesson_plan": "",
99
+ "reviewed_plan": "",
100
+ "final_plan": "",
101
+ }
102
+
103
+ # Define the lesson planner node
104
+ async def lesson_planner_node(state: LessonPlannerState) -> LessonPlannerState:
105
+ """Create a detailed lesson plan based on standards."""
106
+ system_msg = SystemMessage(
107
+ content="""You are a lesson planning specialist.
108
+ Given standards and objectives, you create detailed lesson plans including:
109
+ - Opening/Hook (5-10 minutes)
110
+ - Main Activity (20-30 minutes)
111
+ - Practice Activity (15-20 minutes)
112
+ - Assessment/Closure (5-10 minutes)
113
+ Format as a structured lesson plan with clear timing and materials needed.""",
114
+ )
115
+
116
+ human_msg = HumanMessage(content=f"Based on these standards and objectives, create a detailed lesson plan:\n\n{state['standards']}")
117
+
118
+ messages_to_send = [system_msg, human_msg]
119
+ response = await llm.ainvoke(messages_to_send)
120
+
121
+ return {
122
+ "messages": state["messages"] + [response],
123
+ "lesson_plan": response.content,
124
+ "user_prompt": state["user_prompt"],
125
+ "context": state["context"],
126
+ "standards": state["standards"],
127
+ "reviewed_plan": "",
128
+ "final_plan": "",
129
+ }
130
+
131
+ # Define the lesson reviewer node
132
+ async def lesson_reviewer_node(state: LessonPlannerState) -> LessonPlannerState:
133
+ """Review and provide feedback on the lesson plan."""
134
+ system_msg = SystemMessage(
135
+ content="""You are a lesson plan reviewer who ensures:
136
+ 1. Age-appropriate content and activities
137
+ 2. Alignment with provided standards
138
+ 3. Realistic timing
139
+ 4. Clear instructions
140
+ 5. Differentiation opportunities
141
+ Provide specific feedback in these areas and suggest improvements if needed.
142
+ Then provide an improved version of the lesson plan incorporating your feedback.""",
143
+ )
144
+
145
+ human_msg = HumanMessage(content=f"Please review this lesson plan:\n\n{state['lesson_plan']}")
146
+
147
+ messages_to_send = [system_msg, human_msg]
148
+ response = await llm.ainvoke(messages_to_send)
149
+
150
+ return {
151
+ "messages": state["messages"] + [response],
152
+ "reviewed_plan": response.content,
153
+ "user_prompt": state["user_prompt"],
154
+ "context": state["context"],
155
+ "standards": state["standards"],
156
+ "lesson_plan": state["lesson_plan"],
157
+ "final_plan": "",
158
+ }
159
+
160
+ # Define the formatter node
161
+ async def formatter_node(state: LessonPlannerState) -> LessonPlannerState:
162
+ """Format the final lesson plan to a standard format."""
163
+ system_msg = SystemMessage(
164
+ content="""You are a lesson plan formatter. Format the complete plan as follows:
165
+ <title>Lesson plan title</title>
166
+ <standards>Standards covered</standards>
167
+ <learning_objectives>Key learning objectives</learning_objectives>
168
+ <materials>Materials required</materials>
169
+ <activities>Lesson plan activities</activities>
170
+ <assessment>Assessment details</assessment>""",
171
+ )
172
+
173
+ human_msg = HumanMessage(content=f"Format this reviewed lesson plan:\n\n{state['reviewed_plan']}")
174
+
175
+ messages_to_send = [system_msg, human_msg]
176
+ response = await llm.ainvoke(messages_to_send)
177
+
178
+ return {
179
+ "messages": state["messages"] + [response],
180
+ "final_plan": response.content,
181
+ "user_prompt": state["user_prompt"],
182
+ "context": state["context"],
183
+ "standards": state["standards"],
184
+ "lesson_plan": state["lesson_plan"],
185
+ "reviewed_plan": state["reviewed_plan"],
186
+ }
187
+
188
+ # Build the state graph
189
+ workflow = StateGraph(LessonPlannerState)
190
+
191
+ # Add nodes
192
+ workflow.add_node("curriculum", curriculum_node)
193
+ workflow.add_node("planner", lesson_planner_node)
194
+ workflow.add_node("reviewer", lesson_reviewer_node)
195
+ workflow.add_node("formatter", formatter_node)
196
+
197
+ # Define the flow
198
+ workflow.set_entry_point("curriculum")
199
+ workflow.add_edge("curriculum", "planner")
200
+ workflow.add_edge("planner", "reviewer")
201
+ workflow.add_edge("reviewer", "formatter")
202
+ workflow.add_edge("formatter", END)
203
+
204
+ # Compile the graph
205
+ app = workflow.compile()
206
+
207
+ # Execute the workflow
208
+ initial_state = {
209
+ "messages": [],
210
+ "user_prompt": user_prompt,
211
+ "context": "",
212
+ "standards": "",
213
+ "lesson_plan": "",
214
+ "reviewed_plan": "",
215
+ "final_plan": "",
216
+ }
217
+
218
+ # Extract the final lesson plan
219
+ final_state = None
220
+ async for chunk in app.astream(initial_state):
221
+ for node_name, state_update in chunk.items():
222
+ if node_name == "formatter" and state_update.get("final_plan"):
223
+ final_state = state_update
224
+
225
+ lesson_plan = final_state.get("final_plan", "No lesson plan generated") if final_state else "No lesson plan generated"
226
+
227
+ return lesson_plan
228
+
229
+
230
+ @context_params("prompt")
231
+ async def langgraph_lesson_planner(
232
+ prompt: List[Dict[str, Any]],
233
+ ) -> AsyncGenerator[ExecutionResult, None]:
234
+ """
235
+ MassGen custom tool wrapper for LangGraph lesson planner.
236
+
237
+ This is the interface exposed to MassGen's backend. It handles environment setup,
238
+ error handling, and wraps the core agent logic in ExecutionResult.
239
+
240
+ Args:
241
+ prompt: processed message list from orchestrator (auto-injected via execution_context)
242
+
243
+ Returns:
244
+ ExecutionResult containing the formatted lesson plan or error message
245
+ """
246
+ # Get API key from environment
247
+ api_key = os.getenv("OPENAI_API_KEY")
248
+
249
+ if not api_key:
250
+ yield ExecutionResult(
251
+ output_blocks=[
252
+ TextContent(data="Error: OPENAI_API_KEY not found. Please set the environment variable."),
253
+ ],
254
+ )
255
+ return
256
+
257
+ try:
258
+ # Call the core agent function with processed messages
259
+ lesson_plan = await run_langgraph_lesson_planner_agent(
260
+ messages=prompt,
261
+ api_key=api_key,
262
+ )
263
+
264
+ yield ExecutionResult(
265
+ output_blocks=[
266
+ TextContent(data=f"LangGraph Lesson Planner Result:\n\n{lesson_plan}"),
267
+ ],
268
+ )
269
+
270
+ except Exception as e:
271
+ yield ExecutionResult(
272
+ output_blocks=[
273
+ TextContent(data=f"Error creating lesson plan: {str(e)}"),
274
+ ],
275
+ )
@@ -0,0 +1,247 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ OpenAI Assistant Lesson Planner Tool (Multi-Agent Streaming Version)
4
+ This tool demonstrates interoperability by wrapping OpenAI's Chat Completions API with streaming support
5
+ and multi-agent collaboration pattern similar to AG2.
6
+ """
7
+
8
+ import os
9
+ from typing import Any, AsyncGenerator, Dict, List
10
+
11
+ from openai import AsyncOpenAI
12
+
13
+ from massgen.tool import context_params
14
+ from massgen.tool._result import ExecutionResult, TextContent
15
+
16
+ # Define role-specific system prompts (similar to AG2 agents)
17
+ CURRICULUM_AGENT_PROMPT = """You are a curriculum standards expert for fourth grade education.
18
+ When given a topic, you provide relevant grade-level standards and learning objectives.
19
+ Format every response as:
20
+ STANDARDS:
21
+ - [Standard 1]
22
+ - [Standard 2]
23
+ OBJECTIVES:
24
+ - By the end of this lesson, students will be able to [objective 1]
25
+ - By the end of this lesson, students will be able to [objective 2]"""
26
+
27
+ LESSON_PLANNER_AGENT_PROMPT = """You are a lesson planning specialist.
28
+ Given standards and objectives, you create detailed lesson plans including:
29
+ - Opening/Hook (5-10 minutes)
30
+ - Main Activity (20-30 minutes)
31
+ - Practice Activity (15-20 minutes)
32
+ - Assessment/Closure (5-10 minutes)
33
+ Format as a structured lesson plan with clear timing and materials needed."""
34
+
35
+ LESSON_REVIEWER_AGENT_PROMPT = """You are a lesson plan reviewer who ensures:
36
+ 1. Age-appropriate content and activities
37
+ 2. Alignment with provided standards
38
+ 3. Realistic timing
39
+ 4. Clear instructions
40
+ 5. Differentiation opportunities
41
+ Provide specific feedback in these areas and suggest improvements if needed."""
42
+
43
+ LESSON_FORMATTER_AGENT_PROMPT = """You are a lesson plan formatter. Format the complete plan as follows:
44
+ <title>Lesson plan title</title>
45
+ <standards>Standards covered</standards>
46
+ <learning_objectives>Key learning objectives</learning_objectives>
47
+ <materials>Materials required</materials>
48
+ <activities>Detailed lesson plan activities with timing</activities>
49
+ <assessment>Assessment details</assessment>"""
50
+
51
+
52
+ async def run_agent_step(
53
+ client: AsyncOpenAI,
54
+ role_prompt: str,
55
+ user_message: str,
56
+ temperature: float = 0.7,
57
+ ) -> str:
58
+ """
59
+ Run a single agent step with streaming and collect the full response.
60
+
61
+ Args:
62
+ client: AsyncOpenAI client
63
+ role_prompt: System prompt for this agent role
64
+ user_message: User message to process
65
+ temperature: Temperature for generation
66
+
67
+ Returns:
68
+ Complete response from the agent
69
+ """
70
+ messages = [
71
+ {"role": "system", "content": role_prompt},
72
+ {"role": "user", "content": user_message},
73
+ ]
74
+
75
+ stream = await client.chat.completions.create(
76
+ model="gpt-4o",
77
+ messages=messages,
78
+ stream=True,
79
+ temperature=temperature,
80
+ )
81
+
82
+ full_response = ""
83
+ async for chunk in stream:
84
+ if chunk.choices and len(chunk.choices) > 0:
85
+ delta = chunk.choices[0].delta
86
+ if delta.content:
87
+ full_response += delta.content
88
+
89
+ return full_response
90
+
91
+
92
+ @context_params("prompt")
93
+ async def openai_assistant_lesson_planner(
94
+ prompt: List[Dict[str, Any]],
95
+ ) -> AsyncGenerator[ExecutionResult, None]:
96
+ """
97
+ MassGen custom tool wrapper for OpenAI lesson planner with multi-agent collaboration.
98
+
99
+ This version uses multiple specialized agents (similar to AG2) to collaboratively create
100
+ a lesson plan through sequential steps:
101
+ 1. Curriculum Agent: Identifies standards and objectives
102
+ 2. Lesson Planner Agent: Creates the detailed lesson plan
103
+ 3. Lesson Reviewer Agent: Reviews and provides feedback
104
+ 4. Formatter Agent: Formats the final plan
105
+
106
+ Args:
107
+ prompt: processed message list from orchestrator (auto-injected via execution_context)
108
+
109
+ Yields:
110
+ ExecutionResult containing text chunks as they arrive, or error messages
111
+ """
112
+ # Get API key from environment
113
+ api_key = os.getenv("OPENAI_API_KEY")
114
+
115
+ if not api_key:
116
+ yield ExecutionResult(
117
+ output_blocks=[
118
+ TextContent(data="Error: OPENAI_API_KEY not found. Please set the environment variable."),
119
+ ],
120
+ )
121
+ return
122
+
123
+ if not prompt:
124
+ yield ExecutionResult(
125
+ output_blocks=[
126
+ TextContent(data="Error: No messages provided for lesson planning."),
127
+ ],
128
+ )
129
+ return
130
+
131
+ try:
132
+ # Initialize OpenAI client
133
+ client = AsyncOpenAI(api_key=api_key)
134
+
135
+ # Extract the user's request
136
+ user_request = str(prompt)
137
+
138
+ # Yield an initial message
139
+ yield ExecutionResult(
140
+ output_blocks=[
141
+ TextContent(data="OpenAI Lesson Planner (Multi-Agent Collaboration):\n\n"),
142
+ ],
143
+ )
144
+
145
+ # Step 1: Curriculum Agent - Determine standards and objectives
146
+ yield ExecutionResult(
147
+ output_blocks=[
148
+ TextContent(data="[Curriculum Agent] Identifying standards and objectives...\n"),
149
+ ],
150
+ is_log=True,
151
+ )
152
+
153
+ standards_and_objectives = await run_agent_step(
154
+ client,
155
+ CURRICULUM_AGENT_PROMPT,
156
+ f"Please provide fourth grade standards and objectives for: {user_request}",
157
+ )
158
+
159
+ yield ExecutionResult(
160
+ output_blocks=[
161
+ TextContent(data=f"{standards_and_objectives}\n\n"),
162
+ ],
163
+ is_log=True,
164
+ )
165
+
166
+ # Step 2: Lesson Planner Agent - Create detailed lesson plan
167
+ yield ExecutionResult(
168
+ output_blocks=[
169
+ TextContent(data="[Lesson Planner Agent] Creating detailed lesson plan...\n"),
170
+ ],
171
+ is_log=True,
172
+ )
173
+
174
+ lesson_plan = await run_agent_step(
175
+ client,
176
+ LESSON_PLANNER_AGENT_PROMPT,
177
+ f"Based on these standards and objectives:\n{standards_and_objectives}\n\nCreate a detailed lesson plan for: {user_request}",
178
+ )
179
+
180
+ yield ExecutionResult(
181
+ output_blocks=[
182
+ TextContent(data=f"{lesson_plan}\n\n"),
183
+ ],
184
+ is_log=True,
185
+ )
186
+
187
+ # Step 3: Lesson Reviewer Agent - Review and provide feedback
188
+ yield ExecutionResult(
189
+ output_blocks=[
190
+ TextContent(data="[Lesson Reviewer Agent] Reviewing lesson plan...\n"),
191
+ ],
192
+ is_log=True,
193
+ )
194
+
195
+ review_feedback = await run_agent_step(
196
+ client,
197
+ LESSON_REVIEWER_AGENT_PROMPT,
198
+ f"Review this lesson plan:\n{lesson_plan}\n\nProvide feedback and suggest improvements.",
199
+ )
200
+
201
+ yield ExecutionResult(
202
+ output_blocks=[
203
+ TextContent(data=f"{review_feedback}\n\n"),
204
+ ],
205
+ is_log=True,
206
+ )
207
+
208
+ # Step 4: Formatter Agent - Format the final plan with streaming
209
+ yield ExecutionResult(
210
+ output_blocks=[
211
+ TextContent(data="[Formatter Agent] Formatting final lesson plan...\n\n"),
212
+ ],
213
+ is_log=True,
214
+ )
215
+
216
+ messages = [
217
+ {"role": "system", "content": LESSON_FORMATTER_AGENT_PROMPT},
218
+ {
219
+ "role": "user",
220
+ "content": f"Format this complete lesson plan:\n\nStandards and Objectives:\n{standards_and_objectives}\n\nLesson Plan:\n{lesson_plan}\n\nReview Feedback:\n{review_feedback}",
221
+ },
222
+ ]
223
+
224
+ stream = await client.chat.completions.create(
225
+ model="gpt-4o",
226
+ messages=messages,
227
+ stream=True,
228
+ temperature=0.7,
229
+ )
230
+
231
+ # Stream the final formatted output
232
+ async for chunk in stream:
233
+ if chunk.choices and len(chunk.choices) > 0:
234
+ delta = chunk.choices[0].delta
235
+ if delta.content:
236
+ yield ExecutionResult(
237
+ output_blocks=[
238
+ TextContent(data=delta.content),
239
+ ],
240
+ )
241
+
242
+ except Exception as e:
243
+ yield ExecutionResult(
244
+ output_blocks=[
245
+ TextContent(data=f"\nError during lesson planning: {str(e)}"),
246
+ ],
247
+ )
@@ -0,0 +1,180 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ SmolAgent Lesson Planner Tool
4
+ This tool demonstrates interoperability by wrapping HuggingFace's SmolAgent framework as a MassGen custom tool.
5
+ """
6
+
7
+ import os
8
+ from typing import Any, AsyncGenerator, Dict, List
9
+
10
+ from smolagents import CodeAgent, LiteLLMModel, tool
11
+
12
+ from massgen.tool import context_params
13
+ from massgen.tool._result import ExecutionResult, TextContent
14
+
15
+
16
+ def run_smolagent_lesson_planner_agent(
17
+ messages: List[Dict[str, Any]],
18
+ api_key: str,
19
+ ) -> str:
20
+ """
21
+ Core SmolAgent lesson planner agent - pure SmolAgent implementation.
22
+
23
+ This function contains the pure SmolAgent logic for creating lesson plans
24
+ using custom tools and CodeAgent.
25
+
26
+ Args:
27
+ messages: Complete message history from orchestrator
28
+ api_key: OpenAI API key for the agents
29
+
30
+ Returns:
31
+ The formatted lesson plan as a string
32
+
33
+ Raises:
34
+ Exception: Any errors during agent execution
35
+ """
36
+ if not messages:
37
+ raise ValueError("No messages provided for lesson planning.")
38
+
39
+ # Define custom tools for the lesson planning workflow
40
+ @tool
41
+ def get_curriculum_standards(topic: str) -> str:
42
+ """
43
+ Determine fourth grade curriculum standards and learning objectives for a given topic.
44
+
45
+ Args:
46
+ topic: The lesson topic to get standards for
47
+
48
+ Returns:
49
+ A formatted string with standards and objectives
50
+ """
51
+ # This tool would interact with the LLM to generate standards
52
+ return f"Generate fourth grade curriculum standards and learning objectives for: {topic}"
53
+
54
+ @tool
55
+ def create_lesson_plan(topic: str, standards: str) -> str:
56
+ """
57
+ Create a detailed lesson plan based on topic and standards.
58
+
59
+ Args:
60
+ topic: The lesson topic
61
+ standards: The curriculum standards and objectives
62
+
63
+ Returns:
64
+ A detailed lesson plan with activities and timing
65
+ """
66
+ return f"Create a detailed lesson plan for '{topic}' based on these standards: {standards}"
67
+
68
+ @tool
69
+ def review_lesson_plan(lesson_plan: str) -> str:
70
+ """
71
+ Review a lesson plan for age-appropriateness, timing, and engagement.
72
+
73
+ Args:
74
+ lesson_plan: The lesson plan to review
75
+
76
+ Returns:
77
+ An improved version of the lesson plan
78
+ """
79
+ return f"Review and improve this lesson plan: {lesson_plan}"
80
+
81
+ @tool
82
+ def format_lesson_plan(lesson_plan: str) -> str:
83
+ """
84
+ Format a lesson plan to a standardized structure.
85
+
86
+ Args:
87
+ lesson_plan: The lesson plan to format
88
+
89
+ Returns:
90
+ A formatted lesson plan with XML-like tags
91
+ """
92
+ return (
93
+ f"Format this lesson plan with the following structure:\n"
94
+ f"<title>Lesson plan title</title>\n"
95
+ f"<standards>Standards covered</standards>\n"
96
+ f"<learning_objectives>Key learning objectives</learning_objectives>\n"
97
+ f"<materials>Materials required</materials>\n"
98
+ f"<activities>Detailed lesson plan activities with timing</activities>\n"
99
+ f"<assessment>Assessment details</assessment>\n\n"
100
+ f"Lesson plan to format: {lesson_plan}"
101
+ )
102
+
103
+ # Initialize the model
104
+ model = LiteLLMModel(
105
+ model_id="openai/gpt-4o",
106
+ api_key=api_key,
107
+ )
108
+
109
+ # Create the agent with custom tools
110
+ agent = CodeAgent(
111
+ tools=[get_curriculum_standards, create_lesson_plan, review_lesson_plan, format_lesson_plan],
112
+ model=model,
113
+ max_steps=10,
114
+ )
115
+
116
+ # Build the task from messages
117
+ task = f"Create a comprehensive fourth grade lesson plan for: {messages}\n\n"
118
+ task += "Please follow these steps:\n"
119
+ task += "1. Use get_curriculum_standards to identify relevant standards\n"
120
+ task += "2. Use create_lesson_plan to create a detailed plan\n"
121
+ task += "3. Use review_lesson_plan to review and improve the plan\n"
122
+ task += "4. Use format_lesson_plan to format the final output\n\n"
123
+ task += "The final plan should include:\n"
124
+ task += "- Opening/Hook (5-10 minutes)\n"
125
+ task += "- Main Activity (20-30 minutes)\n"
126
+ task += "- Practice Activity (15-20 minutes)\n"
127
+ task += "- Assessment/Closure (5-10 minutes)"
128
+
129
+ # Run the agent
130
+ result = agent.run(task)
131
+
132
+ return result
133
+
134
+
135
+ @context_params("prompt")
136
+ async def smolagent_lesson_planner(
137
+ prompt: List[Dict[str, Any]],
138
+ ) -> AsyncGenerator[ExecutionResult, None]:
139
+ """
140
+ MassGen custom tool wrapper for SmolAgent lesson planner.
141
+
142
+ This is the interface exposed to MassGen's backend. It handles environment setup,
143
+ error handling, and wraps the core agent logic in ExecutionResult.
144
+
145
+ Args:
146
+ prompt: processed message list from orchestrator (auto-injected via execution_context)
147
+
148
+ Returns:
149
+ ExecutionResult containing the formatted lesson plan or error message
150
+ """
151
+ # Get API key from environment
152
+ api_key = os.getenv("OPENAI_API_KEY")
153
+
154
+ if not api_key:
155
+ yield ExecutionResult(
156
+ output_blocks=[
157
+ TextContent(data="Error: OPENAI_API_KEY not found. Please set the environment variable."),
158
+ ],
159
+ )
160
+ return
161
+
162
+ try:
163
+ # Call the core agent function with processed messages
164
+ lesson_plan = run_smolagent_lesson_planner_agent(
165
+ messages=prompt,
166
+ api_key=api_key,
167
+ )
168
+
169
+ yield ExecutionResult(
170
+ output_blocks=[
171
+ TextContent(data=f"SmolAgent Lesson Planner Result:\n\n{lesson_plan}"),
172
+ ],
173
+ )
174
+
175
+ except Exception as e:
176
+ yield ExecutionResult(
177
+ output_blocks=[
178
+ TextContent(data=f"Error creating lesson plan: {str(e)}"),
179
+ ],
180
+ )