edda-framework 0.9.1__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,467 @@
1
+ """
2
+ DurableAgent: llm.Context を活用した durable エージェント.
3
+
4
+ Mirascope V2 の llm.Context を durable execution と統合:
5
+ - llm.Context 経由の dependency injection
6
+ - 会話履歴の自動管理
7
+ - 各ターンは durable activity として実行
8
+
9
+ Example:
10
+ Using DurableAgent with context::
11
+
12
+ from dataclasses import dataclass
13
+ from mirascope import llm
14
+ from edda import workflow, WorkflowContext
15
+ from edda.integrations.mirascope import DurableAgent, DurableDeps
16
+
17
+ @dataclass
18
+ class ResearchDeps:
19
+ documents: list[str]
20
+ search_index: dict[str, str]
21
+
22
+ class ResearchAgent(DurableAgent[ResearchDeps]):
23
+ model = "anthropic/claude-sonnet-4-20250514"
24
+
25
+ @staticmethod
26
+ @llm.tool()
27
+ def search(ctx: llm.Context[ResearchDeps], query: str) -> str:
28
+ '''Search through documents.'''
29
+ return ctx.deps.search_index.get(query, "No results")
30
+
31
+ def get_tools(self) -> list:
32
+ return [self.search]
33
+
34
+ def build_prompt(self, ctx: llm.Context[ResearchDeps], message: str) -> list:
35
+ docs = "\\n".join(ctx.deps.documents)
36
+ return [
37
+ llm.messages.system(f"You are a research assistant.\\nDocs:\\n{docs}"),
38
+ llm.messages.user(message),
39
+ ]
40
+
41
+ @workflow
42
+ async def research_workflow(ctx: WorkflowContext, topic: str) -> str:
43
+ deps = ResearchDeps(
44
+ documents=["Doc 1...", "Doc 2..."],
45
+ search_index={"key1": "value1"},
46
+ )
47
+ agent = ResearchAgent(ctx)
48
+ response = await agent.chat(deps, f"Research: {topic}")
49
+ return response["content"]
50
+ """
51
+
52
+ from __future__ import annotations
53
+
54
+ from dataclasses import dataclass, field
55
+ from typing import TYPE_CHECKING, Any, Generic, TypeVar
56
+
57
+ from edda.activity import activity
58
+ from edda.context import WorkflowContext
59
+
60
+ from .types import DurableResponse
61
+
62
+ if TYPE_CHECKING:
63
+ pass
64
+
65
+ # Type variable for dependency data
66
+ T = TypeVar("T")
67
+
68
+
69
+ def _import_mirascope() -> Any:
70
+ """Import mirascope with helpful error message."""
71
+ try:
72
+ from mirascope import llm
73
+
74
+ return llm
75
+ except ImportError as e:
76
+ msg = (
77
+ "Mirascope is not installed. Install with:\n"
78
+ " pip install 'mirascope[anthropic]'\n"
79
+ "or\n"
80
+ " pip install 'edda-framework[mirascope]'"
81
+ )
82
+ raise ImportError(msg) from e
83
+
84
+
85
+ @dataclass
86
+ class DurableDeps(Generic[T]):
87
+ """
88
+ Serializable dependency container for DurableAgent.
89
+
90
+ Bridges llm.Context and Edda's durable activity system.
91
+ Manages both user-defined dependencies and conversation history.
92
+
93
+ Attributes:
94
+ data: User-defined dependency data (will be injected into llm.Context)
95
+ history: Conversation history (automatically managed)
96
+
97
+ Example:
98
+ >>> @dataclass
99
+ ... class MyDeps:
100
+ ... api_key: str
101
+ ... cache: dict[str, str]
102
+ ...
103
+ >>> deps = DurableDeps(data=MyDeps(api_key="xxx", cache={}))
104
+ >>> agent = MyAgent(ctx)
105
+ >>> await agent.chat(deps, "Hello") # history auto-updated
106
+ """
107
+
108
+ data: T
109
+ history: list[dict[str, str]] = field(default_factory=list)
110
+
111
+ def to_dict(self) -> dict[str, Any]:
112
+ """Convert to JSON-serializable dictionary for activity caching."""
113
+ import dataclasses
114
+
115
+ # Handle dataclass or dict data
116
+ if dataclasses.is_dataclass(self.data) and not isinstance(self.data, type):
117
+ data_dict: dict[str, Any] = dataclasses.asdict(self.data)
118
+ elif hasattr(self.data, "model_dump"):
119
+ # Pydantic model
120
+ data_dict = self.data.model_dump()
121
+ elif isinstance(self.data, dict):
122
+ data_dict = self.data
123
+ else:
124
+ data_dict = {"value": self.data}
125
+
126
+ return {
127
+ "data": data_dict,
128
+ "history": self.history,
129
+ }
130
+
131
+ def add_user_message(self, content: str) -> None:
132
+ """Add a user message to history."""
133
+ self.history.append({"role": "user", "content": content})
134
+
135
+ def add_assistant_message(self, content: str) -> None:
136
+ """Add an assistant message to history."""
137
+ self.history.append({"role": "assistant", "content": content})
138
+
139
+ def add_system_message(self, content: str) -> None:
140
+ """Add a system message to history."""
141
+ self.history.append({"role": "system", "content": content})
142
+
143
+ def clear_history(self) -> None:
144
+ """Clear conversation history."""
145
+ self.history = []
146
+
147
+
148
+ @activity
149
+ async def _chat_activity(
150
+ ctx: WorkflowContext, # noqa: ARG001
151
+ *,
152
+ model: str,
153
+ messages: list[dict[str, str]],
154
+ tools: list[Any] | None = None,
155
+ response_model: type | None = None,
156
+ deps_dict: dict[str, Any], # noqa: ARG001 - for logging/debugging
157
+ turn: int, # noqa: ARG001 - used in activity ID
158
+ **call_params: Any,
159
+ ) -> dict[str, Any]:
160
+ """Internal: Execute LLM call as durable activity."""
161
+ llm = _import_mirascope()
162
+ provider = model.split("/")[0] if "/" in model else "unknown"
163
+
164
+ def convert_messages(msgs: list[dict[str, str]]) -> list[Any]:
165
+ result: list[Any] = []
166
+ for msg in msgs:
167
+ role = msg.get("role", "user")
168
+ content = msg.get("content", "")
169
+ if role == "system":
170
+ result.append(llm.messages.system(content))
171
+ elif role == "assistant":
172
+ # Mirascope V2: assistant messages require model_id and provider_id
173
+ result.append(llm.messages.assistant(content, model_id=model, provider_id=provider))
174
+ else:
175
+ result.append(llm.messages.user(content))
176
+ return result
177
+
178
+ @llm.call(model, tools=tools, response_model=response_model, **call_params) # type: ignore[misc]
179
+ async def _call() -> list[Any]:
180
+ return convert_messages(messages)
181
+
182
+ response = await _call()
183
+
184
+ # Handle structured output (response_model)
185
+ if response_model is not None and hasattr(response, "model_dump"):
186
+ return {
187
+ "content": "",
188
+ "model": model,
189
+ "provider": provider,
190
+ "structured_output": response.model_dump(),
191
+ }
192
+
193
+ return DurableResponse.from_mirascope(response, provider).to_dict()
194
+
195
+
196
+ class DurableAgent(Generic[T]):
197
+ """
198
+ Base class for durable agents with llm.Context support.
199
+
200
+ Integrates Mirascope V2's llm.Context with Edda's durable execution:
201
+ - Each chat turn is a separate durable activity (cached & replayable)
202
+ - llm.Context provides dependency injection to prompts and tools
203
+ - Conversation history is automatically managed via DurableDeps
204
+
205
+ Subclass and override:
206
+ - `model`: The LLM model string (e.g., "anthropic/claude-sonnet-4-20250514")
207
+ - `build_prompt()`: Construct the prompt with access to ctx.deps
208
+ - `get_tools()`: Return list of @llm.tool() decorated functions
209
+
210
+ Attributes:
211
+ model: The model string in "provider/model" format
212
+ response_model: Optional Pydantic model for structured output
213
+
214
+ Example:
215
+ >>> class MyAgent(DurableAgent[MyDeps]):
216
+ ... model = "anthropic/claude-sonnet-4-20250514"
217
+ ...
218
+ ... def build_prompt(self, ctx, message):
219
+ ... return [
220
+ ... llm.messages.system(f"Context: {ctx.deps.some_data}"),
221
+ ... llm.messages.user(message),
222
+ ... ]
223
+ ...
224
+ >>> @workflow
225
+ ... async def my_workflow(ctx: WorkflowContext, query: str) -> str:
226
+ ... deps = MyDeps(some_data="value")
227
+ ... agent = MyAgent(ctx)
228
+ ... response = await agent.chat(deps, query)
229
+ ... return response["content"]
230
+ """
231
+
232
+ model: str = "anthropic/claude-sonnet-4-20250514"
233
+ response_model: type | None = None
234
+
235
+ def __init__(self, workflow_ctx: WorkflowContext) -> None:
236
+ """
237
+ Initialize the agent with a workflow context.
238
+
239
+ Args:
240
+ workflow_ctx: The Edda WorkflowContext for durable execution
241
+ """
242
+ self._workflow_ctx = workflow_ctx
243
+ self._turn_count = 0
244
+
245
+ def get_tools(self) -> list[Any] | None:
246
+ """
247
+ Override to provide tools for the agent.
248
+
249
+ Tools should be decorated with @llm.tool() and can access
250
+ ctx: llm.Context[T] as their first parameter.
251
+
252
+ Returns:
253
+ List of tool functions, or None if no tools
254
+ """
255
+ return None
256
+
257
+ def build_prompt(self, ctx: Any, message: str) -> list[Any]:
258
+ """
259
+ Override to build the prompt for each turn.
260
+
261
+ Access dependencies via ctx.deps. The returned messages
262
+ will be sent to the LLM.
263
+
264
+ Args:
265
+ ctx: llm.Context[T] with access to deps
266
+ message: The user message for this turn
267
+
268
+ Returns:
269
+ List of llm.messages (system, user, assistant)
270
+
271
+ Example:
272
+ >>> def build_prompt(self, ctx, message):
273
+ ... llm = _import_mirascope()
274
+ ... return [
275
+ ... llm.messages.system(f"Data: {ctx.deps.my_data}"),
276
+ ... llm.messages.user(message),
277
+ ... ]
278
+ """
279
+ llm = _import_mirascope()
280
+ messages: list[Any] = []
281
+
282
+ # Extract provider from model string for assistant messages
283
+ provider = self.model.split("/")[0] if "/" in self.model else "unknown"
284
+
285
+ # Include history from DurableDeps if available
286
+ history = getattr(ctx.deps, "history", [])
287
+ for msg in history:
288
+ role = msg.get("role", "user")
289
+ content = msg.get("content", "")
290
+ if role == "system":
291
+ messages.append(llm.messages.system(content))
292
+ elif role == "assistant":
293
+ # Mirascope V2: assistant messages require model_id and provider_id
294
+ messages.append(
295
+ llm.messages.assistant(content, model_id=self.model, provider_id=provider)
296
+ )
297
+ else:
298
+ messages.append(llm.messages.user(content))
299
+
300
+ # Add the new user message
301
+ messages.append(llm.messages.user(message))
302
+ return messages
303
+
304
+ async def chat(
305
+ self,
306
+ deps: T | DurableDeps[T],
307
+ message: str,
308
+ **call_params: Any,
309
+ ) -> dict[str, Any]:
310
+ """
311
+ Send a message and get a response.
312
+
313
+ Each call is a separate durable activity - results are cached
314
+ and replayed on workflow recovery.
315
+
316
+ Args:
317
+ deps: Dependency data (raw or wrapped in DurableDeps)
318
+ message: User message to send
319
+ **call_params: Additional LLM call parameters
320
+
321
+ Returns:
322
+ DurableResponse as dict with keys:
323
+ - content: Response text
324
+ - model: Model used
325
+ - provider: Provider name
326
+ - tool_calls: List of tool calls (if any)
327
+ - usage: Token usage stats
328
+ """
329
+ self._turn_count += 1
330
+ llm = _import_mirascope()
331
+
332
+ # Wrap in DurableDeps if needed
333
+ durable_deps = deps if isinstance(deps, DurableDeps) else DurableDeps(data=deps)
334
+
335
+ # Add user message to history
336
+ durable_deps.add_user_message(message)
337
+
338
+ # Build llm.Context with the actual data
339
+ llm_ctx = llm.Context(deps=durable_deps.data)
340
+
341
+ # Build prompt using the context
342
+ prompt_messages = self.build_prompt(llm_ctx, message)
343
+
344
+ # Convert to serializable format for activity
345
+ serializable_messages = self._messages_to_dict(prompt_messages)
346
+
347
+ # Execute as durable activity
348
+ # The @activity decorator transforms the function signature, but mypy doesn't understand it
349
+ response: dict[str, Any] = await _chat_activity( # type: ignore[misc, call-arg]
350
+ self._workflow_ctx, # type: ignore[arg-type]
351
+ model=self.model,
352
+ messages=serializable_messages,
353
+ tools=self.get_tools(),
354
+ response_model=self.response_model,
355
+ deps_dict=durable_deps.to_dict(),
356
+ turn=self._turn_count,
357
+ **call_params,
358
+ )
359
+
360
+ # Add assistant response to history
361
+ assistant_content = response.get("content", "")
362
+ if assistant_content:
363
+ durable_deps.add_assistant_message(assistant_content)
364
+
365
+ return response
366
+
367
+ def _messages_to_dict(self, messages: list[Any]) -> list[dict[str, str]]:
368
+ """Convert llm.messages to serializable dicts."""
369
+ result = []
370
+ for msg in messages:
371
+ if isinstance(msg, dict):
372
+ result.append(msg)
373
+ elif hasattr(msg, "role") and hasattr(msg, "content"):
374
+ content = self._extract_text_content(msg.content)
375
+ result.append({"role": msg.role, "content": content})
376
+ elif hasattr(msg, "content"):
377
+ content = self._extract_text_content(msg.content)
378
+ result.append({"role": "user", "content": content})
379
+ else:
380
+ result.append({"role": "user", "content": str(msg)})
381
+ return result
382
+
383
+ def _extract_text_content(self, content: Any) -> str:
384
+ """
385
+ Extract text from Mirascope V2 content format.
386
+
387
+ Mirascope V2 content can be:
388
+ - A plain string
389
+ - A list of Text/ContentBlock objects with .text attribute
390
+ - None
391
+
392
+ Args:
393
+ content: The content to extract text from.
394
+
395
+ Returns:
396
+ Extracted text as a string.
397
+ """
398
+ if content is None:
399
+ return ""
400
+ if isinstance(content, str):
401
+ return content
402
+ # Handle Mirascope V2's list of Text/ContentBlock objects
403
+ if isinstance(content, list):
404
+ text_parts = []
405
+ for item in content:
406
+ if hasattr(item, "text"):
407
+ text_parts.append(item.text)
408
+ elif isinstance(item, str):
409
+ text_parts.append(item)
410
+ else:
411
+ text_parts.append(str(item))
412
+ return "".join(text_parts)
413
+ return str(content)
414
+
415
+ async def chat_with_tool_loop(
416
+ self,
417
+ deps: T | DurableDeps[T],
418
+ message: str,
419
+ tool_executor: Any | None = None,
420
+ max_iterations: int = 10,
421
+ **call_params: Any,
422
+ ) -> dict[str, Any]:
423
+ """
424
+ Chat with automatic tool execution loop.
425
+
426
+ Continues calling tools until the model stops requesting them
427
+ or max_iterations is reached.
428
+
429
+ Args:
430
+ deps: Dependency data
431
+ message: Initial user message
432
+ tool_executor: Optional callable(tool_name, tool_args) -> result.
433
+ If None, tools are not executed.
434
+ max_iterations: Maximum tool call iterations
435
+ **call_params: Additional LLM call parameters
436
+
437
+ Returns:
438
+ Final response after tool loop completes
439
+ """
440
+ response = await self.chat(deps, message, **call_params)
441
+
442
+ iteration = 0
443
+ while response.get("tool_calls") and iteration < max_iterations:
444
+ if tool_executor is None:
445
+ # No executor provided, return with tool_calls
446
+ break
447
+
448
+ # Execute tools
449
+ tool_outputs = []
450
+ for tc in response["tool_calls"]:
451
+ tool_name = tc.get("name")
452
+ tool_args = tc.get("args", {})
453
+ try:
454
+ result = await tool_executor(tool_name, tool_args)
455
+ tool_outputs.append({"tool": tool_name, "output": str(result)})
456
+ except Exception as e:
457
+ tool_outputs.append({"tool": tool_name, "error": str(e)})
458
+
459
+ # Format tool results and continue conversation
460
+ tool_results_str = "\n".join(
461
+ f"Tool {to['tool']}: {to.get('output', to.get('error', 'Unknown'))}"
462
+ for to in tool_outputs
463
+ )
464
+ response = await self.chat(deps, f"Tool results:\n{tool_results_str}", **call_params)
465
+ iteration += 1
466
+
467
+ return response
@@ -0,0 +1,166 @@
1
+ """
2
+ Simple durable LLM call function for Edda + Mirascope V2 integration.
3
+
4
+ This module provides a straightforward way to make durable LLM calls
5
+ without needing to define a separate function with @durable_call.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any
11
+
12
+ from edda.activity import activity
13
+ from edda.context import WorkflowContext
14
+
15
+ from .types import DurableResponse
16
+
17
+
18
+ def _import_mirascope() -> Any:
19
+ """
20
+ Lazy import Mirascope components.
21
+
22
+ Raises:
23
+ ImportError: If mirascope is not installed.
24
+ """
25
+ try:
26
+ from mirascope import llm
27
+
28
+ return llm
29
+ except ImportError as e:
30
+ raise ImportError(
31
+ "Mirascope not installed. Install with: pip install 'mirascope[anthropic]' "
32
+ "or pip install 'edda-framework[mirascope]'"
33
+ ) from e
34
+
35
+
36
+ @activity
37
+ async def call(
38
+ ctx: WorkflowContext, # noqa: ARG001 - Required by @activity decorator
39
+ *,
40
+ model: str,
41
+ prompt: str,
42
+ system: str | None = None,
43
+ tools: list[Any] | None = None,
44
+ response_model: type | None = None,
45
+ **call_params: Any,
46
+ ) -> dict[str, Any]:
47
+ """
48
+ Make a durable LLM call.
49
+
50
+ This is a simple, ad-hoc way to make LLM calls within workflows.
51
+ For more complex use cases, consider using the @durable_call decorator.
52
+
53
+ Args:
54
+ ctx: Workflow context (automatically provided by Edda).
55
+ model: Model identifier in "provider/model" format
56
+ (e.g., "anthropic/claude-sonnet-4-20250514", "openai/gpt-4").
57
+ prompt: The user prompt/message.
58
+ system: Optional system prompt.
59
+ tools: Optional list of tool functions for function calling.
60
+ response_model: Optional Pydantic model for structured output.
61
+ **call_params: Additional parameters passed to the LLM provider.
62
+
63
+ Returns:
64
+ Dictionary representation of DurableResponse.
65
+
66
+ Example:
67
+ >>> @workflow
68
+ ... async def my_workflow(ctx: WorkflowContext, question: str) -> str:
69
+ ... response = await call(
70
+ ... ctx,
71
+ ... model="anthropic/claude-sonnet-4-20250514",
72
+ ... prompt=question,
73
+ ... system="You are a helpful assistant.",
74
+ ... )
75
+ ... return response["content"]
76
+ """
77
+ llm = _import_mirascope()
78
+
79
+ # Extract provider from model string (e.g., "anthropic/claude-..." -> "anthropic")
80
+ provider = model.split("/")[0] if "/" in model else "unknown"
81
+
82
+ # Build the call function dynamically using V2 API
83
+ @llm.call(model, tools=tools, response_model=response_model, **call_params) # type: ignore[misc]
84
+ async def _call() -> list[Any]:
85
+ # V2: Use llm.messages.system/user and return list directly
86
+ messages = []
87
+ if system:
88
+ messages.append(llm.messages.system(system))
89
+ messages.append(llm.messages.user(prompt))
90
+ return messages
91
+
92
+ # Execute the call
93
+ response = await _call()
94
+
95
+ # Convert to serializable format
96
+ return DurableResponse.from_mirascope(response, provider).to_dict()
97
+
98
+
99
+ @activity
100
+ async def call_with_messages(
101
+ ctx: WorkflowContext, # noqa: ARG001 - Required by @activity decorator
102
+ *,
103
+ model: str,
104
+ messages: list[dict[str, str]],
105
+ tools: list[Any] | None = None,
106
+ response_model: type | None = None,
107
+ **call_params: Any,
108
+ ) -> dict[str, Any]:
109
+ """
110
+ Make a durable LLM call with a full message history.
111
+
112
+ This is useful for multi-turn conversations where you need to pass
113
+ the full conversation history.
114
+
115
+ Args:
116
+ ctx: Workflow context (automatically provided by Edda).
117
+ model: Model identifier in "provider/model" format
118
+ (e.g., "anthropic/claude-sonnet-4-20250514", "openai/gpt-4").
119
+ messages: List of message dicts with "role" and "content" keys.
120
+ tools: Optional list of tool functions for function calling.
121
+ response_model: Optional Pydantic model for structured output.
122
+ **call_params: Additional parameters passed to the LLM provider.
123
+
124
+ Returns:
125
+ Dictionary representation of DurableResponse.
126
+
127
+ Example:
128
+ >>> @workflow
129
+ ... async def chat_workflow(ctx: WorkflowContext, history: list[dict]) -> str:
130
+ ... response = await call_with_messages(
131
+ ... ctx,
132
+ ... model="anthropic/claude-sonnet-4-20250514",
133
+ ... messages=history,
134
+ ... )
135
+ ... return response["content"]
136
+ """
137
+ llm = _import_mirascope()
138
+
139
+ # Extract provider and model_id from model string
140
+ # e.g., "anthropic/claude-sonnet-4-20250514" -> provider="anthropic", model_id="anthropic/claude-sonnet-4-20250514"
141
+ provider = model.split("/")[0] if "/" in model else "unknown"
142
+
143
+ # Convert message dicts to Mirascope V2 message objects
144
+ def convert_messages(msgs: list[dict[str, str]]) -> list[Any]:
145
+ result = []
146
+ for msg in msgs:
147
+ role = msg.get("role", "user")
148
+ content = msg.get("content", "")
149
+ if role == "system":
150
+ result.append(llm.messages.system(content))
151
+ elif role == "assistant":
152
+ # Mirascope V2: assistant messages require model_id and provider_id
153
+ result.append(llm.messages.assistant(content, model_id=model, provider_id=provider))
154
+ else:
155
+ result.append(llm.messages.user(content))
156
+ return result
157
+
158
+ @llm.call(model, tools=tools, response_model=response_model, **call_params) # type: ignore[misc]
159
+ async def _call() -> list[Any]:
160
+ return convert_messages(messages)
161
+
162
+ # Execute the call
163
+ response = await _call()
164
+
165
+ # Convert to serializable format
166
+ return DurableResponse.from_mirascope(response, provider).to_dict()