edda-framework 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,78 @@
1
+ """
2
+ Edda + Mirascope V2 integration for durable LLM calls.
3
+
4
+ This module provides utilities to make LLM calls durable through
5
+ Edda's activity system, enabling automatic caching, retry, and
6
+ crash recovery for LLM operations.
7
+
8
+ Example:
9
+ Using the decorator::
10
+
11
+ from edda import workflow, WorkflowContext
12
+ from edda.integrations.mirascope import durable_call
13
+
14
+ @durable_call("anthropic/claude-sonnet-4-20250514")
15
+ async def summarize(text: str) -> str:
16
+ return f"Summarize: {text}"
17
+
18
+ @workflow
19
+ async def my_workflow(ctx: WorkflowContext, text: str) -> str:
20
+ response = await summarize(ctx, text)
21
+ return response["content"]
22
+
23
+ Using the call function::
24
+
25
+ from edda import workflow, WorkflowContext
26
+ from edda.integrations.mirascope import call
27
+
28
+ @workflow
29
+ async def my_workflow(ctx: WorkflowContext, question: str) -> str:
30
+ response = await call(
31
+ ctx,
32
+ model="anthropic/claude-sonnet-4-20250514",
33
+ prompt=question,
34
+ )
35
+ return response["content"]
36
+
37
+ Using DurableAgent for context-aware conversations::
38
+
39
+ from dataclasses import dataclass
40
+ from mirascope import llm
41
+ from edda import workflow, WorkflowContext
42
+ from edda.integrations.mirascope import DurableAgent, DurableDeps
43
+
44
+ @dataclass
45
+ class MyDeps:
46
+ documents: list[str]
47
+
48
+ class MyAgent(DurableAgent[MyDeps]):
49
+ model = "anthropic/claude-sonnet-4-20250514"
50
+
51
+ def build_prompt(self, ctx, message):
52
+ docs = "\\n".join(ctx.deps.documents)
53
+ return [
54
+ llm.messages.system(f"Documents:\\n{docs}"),
55
+ llm.messages.user(message),
56
+ ]
57
+
58
+ @workflow
59
+ async def my_workflow(ctx: WorkflowContext, query: str) -> str:
60
+ deps = MyDeps(documents=["Doc 1", "Doc 2"])
61
+ agent = MyAgent(ctx)
62
+ response = await agent.chat(deps, query)
63
+ return response["content"]
64
+ """
65
+
66
+ from edda.integrations.mirascope.agent import DurableAgent, DurableDeps
67
+ from edda.integrations.mirascope.call import call, call_with_messages
68
+ from edda.integrations.mirascope.decorator import durable_call
69
+ from edda.integrations.mirascope.types import DurableResponse
70
+
71
+ __all__ = [
72
+ "durable_call",
73
+ "call",
74
+ "call_with_messages",
75
+ "DurableAgent",
76
+ "DurableDeps",
77
+ "DurableResponse",
78
+ ]
@@ -0,0 +1,467 @@
1
+ """
2
+ DurableAgent: llm.Context を活用した durable エージェント.
3
+
4
+ Mirascope V2 の llm.Context を durable execution と統合:
5
+ - llm.Context 経由の dependency injection
6
+ - 会話履歴の自動管理
7
+ - 各ターンは durable activity として実行
8
+
9
+ Example:
10
+ Using DurableAgent with context::
11
+
12
+ from dataclasses import dataclass
13
+ from mirascope import llm
14
+ from edda import workflow, WorkflowContext
15
+ from edda.integrations.mirascope import DurableAgent, DurableDeps
16
+
17
+ @dataclass
18
+ class ResearchDeps:
19
+ documents: list[str]
20
+ search_index: dict[str, str]
21
+
22
+ class ResearchAgent(DurableAgent[ResearchDeps]):
23
+ model = "anthropic/claude-sonnet-4-20250514"
24
+
25
+ @staticmethod
26
+ @llm.tool()
27
+ def search(ctx: llm.Context[ResearchDeps], query: str) -> str:
28
+ '''Search through documents.'''
29
+ return ctx.deps.search_index.get(query, "No results")
30
+
31
+ def get_tools(self) -> list:
32
+ return [self.search]
33
+
34
+ def build_prompt(self, ctx: llm.Context[ResearchDeps], message: str) -> list:
35
+ docs = "\\n".join(ctx.deps.documents)
36
+ return [
37
+ llm.messages.system(f"You are a research assistant.\\nDocs:\\n{docs}"),
38
+ llm.messages.user(message),
39
+ ]
40
+
41
+ @workflow
42
+ async def research_workflow(ctx: WorkflowContext, topic: str) -> str:
43
+ deps = ResearchDeps(
44
+ documents=["Doc 1...", "Doc 2..."],
45
+ search_index={"key1": "value1"},
46
+ )
47
+ agent = ResearchAgent(ctx)
48
+ response = await agent.chat(deps, f"Research: {topic}")
49
+ return response["content"]
50
+ """
51
+
52
+ from __future__ import annotations
53
+
54
+ from dataclasses import dataclass, field
55
+ from typing import TYPE_CHECKING, Any, Generic, TypeVar
56
+
57
+ from edda.activity import activity
58
+ from edda.context import WorkflowContext
59
+
60
+ from .types import DurableResponse
61
+
62
+ if TYPE_CHECKING:
63
+ pass
64
+
65
+ # Type variable for dependency data
66
+ T = TypeVar("T")
67
+
68
+
69
+ def _import_mirascope() -> Any:
70
+ """Import mirascope with helpful error message."""
71
+ try:
72
+ from mirascope import llm
73
+
74
+ return llm
75
+ except ImportError as e:
76
+ msg = (
77
+ "Mirascope is not installed. Install with:\n"
78
+ " pip install 'mirascope[anthropic]'\n"
79
+ "or\n"
80
+ " pip install 'edda-framework[mirascope]'"
81
+ )
82
+ raise ImportError(msg) from e
83
+
84
+
85
+ @dataclass
86
+ class DurableDeps(Generic[T]):
87
+ """
88
+ Serializable dependency container for DurableAgent.
89
+
90
+ Bridges llm.Context and Edda's durable activity system.
91
+ Manages both user-defined dependencies and conversation history.
92
+
93
+ Attributes:
94
+ data: User-defined dependency data (will be injected into llm.Context)
95
+ history: Conversation history (automatically managed)
96
+
97
+ Example:
98
+ >>> @dataclass
99
+ ... class MyDeps:
100
+ ... api_key: str
101
+ ... cache: dict[str, str]
102
+ ...
103
+ >>> deps = DurableDeps(data=MyDeps(api_key="xxx", cache={}))
104
+ >>> agent = MyAgent(ctx)
105
+ >>> await agent.chat(deps, "Hello") # history auto-updated
106
+ """
107
+
108
+ data: T
109
+ history: list[dict[str, str]] = field(default_factory=list)
110
+
111
+ def to_dict(self) -> dict[str, Any]:
112
+ """Convert to JSON-serializable dictionary for activity caching."""
113
+ import dataclasses
114
+
115
+ # Handle dataclass or dict data
116
+ if dataclasses.is_dataclass(self.data) and not isinstance(self.data, type):
117
+ data_dict: dict[str, Any] = dataclasses.asdict(self.data)
118
+ elif hasattr(self.data, "model_dump"):
119
+ # Pydantic model
120
+ data_dict = self.data.model_dump()
121
+ elif isinstance(self.data, dict):
122
+ data_dict = self.data
123
+ else:
124
+ data_dict = {"value": self.data}
125
+
126
+ return {
127
+ "data": data_dict,
128
+ "history": self.history,
129
+ }
130
+
131
+ def add_user_message(self, content: str) -> None:
132
+ """Add a user message to history."""
133
+ self.history.append({"role": "user", "content": content})
134
+
135
+ def add_assistant_message(self, content: str) -> None:
136
+ """Add an assistant message to history."""
137
+ self.history.append({"role": "assistant", "content": content})
138
+
139
+ def add_system_message(self, content: str) -> None:
140
+ """Add a system message to history."""
141
+ self.history.append({"role": "system", "content": content})
142
+
143
+ def clear_history(self) -> None:
144
+ """Clear conversation history."""
145
+ self.history = []
146
+
147
+
148
+ @activity
149
+ async def _chat_activity(
150
+ ctx: WorkflowContext, # noqa: ARG001
151
+ *,
152
+ model: str,
153
+ messages: list[dict[str, str]],
154
+ tools: list[Any] | None = None,
155
+ response_model: type | None = None,
156
+ deps_dict: dict[str, Any], # noqa: ARG001 - for logging/debugging
157
+ turn: int, # noqa: ARG001 - used in activity ID
158
+ **call_params: Any,
159
+ ) -> dict[str, Any]:
160
+ """Internal: Execute LLM call as durable activity."""
161
+ llm = _import_mirascope()
162
+ provider = model.split("/")[0] if "/" in model else "unknown"
163
+
164
+ def convert_messages(msgs: list[dict[str, str]]) -> list[Any]:
165
+ result: list[Any] = []
166
+ for msg in msgs:
167
+ role = msg.get("role", "user")
168
+ content = msg.get("content", "")
169
+ if role == "system":
170
+ result.append(llm.messages.system(content))
171
+ elif role == "assistant":
172
+ # Mirascope V2: assistant messages require model_id and provider_id
173
+ result.append(llm.messages.assistant(content, model_id=model, provider_id=provider))
174
+ else:
175
+ result.append(llm.messages.user(content))
176
+ return result
177
+
178
+ @llm.call(model, tools=tools, response_model=response_model, **call_params) # type: ignore[misc]
179
+ async def _call() -> list[Any]:
180
+ return convert_messages(messages)
181
+
182
+ response = await _call()
183
+
184
+ # Handle structured output (response_model)
185
+ if response_model is not None and hasattr(response, "model_dump"):
186
+ return {
187
+ "content": "",
188
+ "model": model,
189
+ "provider": provider,
190
+ "structured_output": response.model_dump(),
191
+ }
192
+
193
+ return DurableResponse.from_mirascope(response, provider).to_dict()
194
+
195
+
196
+ class DurableAgent(Generic[T]):
197
+ """
198
+ Base class for durable agents with llm.Context support.
199
+
200
+ Integrates Mirascope V2's llm.Context with Edda's durable execution:
201
+ - Each chat turn is a separate durable activity (cached & replayable)
202
+ - llm.Context provides dependency injection to prompts and tools
203
+ - Conversation history is automatically managed via DurableDeps
204
+
205
+ Subclass and override:
206
+ - `model`: The LLM model string (e.g., "anthropic/claude-sonnet-4-20250514")
207
+ - `build_prompt()`: Construct the prompt with access to ctx.deps
208
+ - `get_tools()`: Return list of @llm.tool() decorated functions
209
+
210
+ Attributes:
211
+ model: The model string in "provider/model" format
212
+ response_model: Optional Pydantic model for structured output
213
+
214
+ Example:
215
+ >>> class MyAgent(DurableAgent[MyDeps]):
216
+ ... model = "anthropic/claude-sonnet-4-20250514"
217
+ ...
218
+ ... def build_prompt(self, ctx, message):
219
+ ... return [
220
+ ... llm.messages.system(f"Context: {ctx.deps.some_data}"),
221
+ ... llm.messages.user(message),
222
+ ... ]
223
+ ...
224
+ >>> @workflow
225
+ ... async def my_workflow(ctx: WorkflowContext, query: str) -> str:
226
+ ... deps = MyDeps(some_data="value")
227
+ ... agent = MyAgent(ctx)
228
+ ... response = await agent.chat(deps, query)
229
+ ... return response["content"]
230
+ """
231
+
232
+ model: str = "anthropic/claude-sonnet-4-20250514"
233
+ response_model: type | None = None
234
+
235
+ def __init__(self, workflow_ctx: WorkflowContext) -> None:
236
+ """
237
+ Initialize the agent with a workflow context.
238
+
239
+ Args:
240
+ workflow_ctx: The Edda WorkflowContext for durable execution
241
+ """
242
+ self._workflow_ctx = workflow_ctx
243
+ self._turn_count = 0
244
+
245
+ def get_tools(self) -> list[Any] | None:
246
+ """
247
+ Override to provide tools for the agent.
248
+
249
+ Tools should be decorated with @llm.tool() and can access
250
+ ctx: llm.Context[T] as their first parameter.
251
+
252
+ Returns:
253
+ List of tool functions, or None if no tools
254
+ """
255
+ return None
256
+
257
+ def build_prompt(self, ctx: Any, message: str) -> list[Any]:
258
+ """
259
+ Override to build the prompt for each turn.
260
+
261
+ Access dependencies via ctx.deps. The returned messages
262
+ will be sent to the LLM.
263
+
264
+ Args:
265
+ ctx: llm.Context[T] with access to deps
266
+ message: The user message for this turn
267
+
268
+ Returns:
269
+ List of llm.messages (system, user, assistant)
270
+
271
+ Example:
272
+ >>> def build_prompt(self, ctx, message):
273
+ ... llm = _import_mirascope()
274
+ ... return [
275
+ ... llm.messages.system(f"Data: {ctx.deps.my_data}"),
276
+ ... llm.messages.user(message),
277
+ ... ]
278
+ """
279
+ llm = _import_mirascope()
280
+ messages: list[Any] = []
281
+
282
+ # Extract provider from model string for assistant messages
283
+ provider = self.model.split("/")[0] if "/" in self.model else "unknown"
284
+
285
+ # Include history from DurableDeps if available
286
+ history = getattr(ctx.deps, "history", [])
287
+ for msg in history:
288
+ role = msg.get("role", "user")
289
+ content = msg.get("content", "")
290
+ if role == "system":
291
+ messages.append(llm.messages.system(content))
292
+ elif role == "assistant":
293
+ # Mirascope V2: assistant messages require model_id and provider_id
294
+ messages.append(
295
+ llm.messages.assistant(content, model_id=self.model, provider_id=provider)
296
+ )
297
+ else:
298
+ messages.append(llm.messages.user(content))
299
+
300
+ # Add the new user message
301
+ messages.append(llm.messages.user(message))
302
+ return messages
303
+
304
+ async def chat(
305
+ self,
306
+ deps: T | DurableDeps[T],
307
+ message: str,
308
+ **call_params: Any,
309
+ ) -> dict[str, Any]:
310
+ """
311
+ Send a message and get a response.
312
+
313
+ Each call is a separate durable activity - results are cached
314
+ and replayed on workflow recovery.
315
+
316
+ Args:
317
+ deps: Dependency data (raw or wrapped in DurableDeps)
318
+ message: User message to send
319
+ **call_params: Additional LLM call parameters
320
+
321
+ Returns:
322
+ DurableResponse as dict with keys:
323
+ - content: Response text
324
+ - model: Model used
325
+ - provider: Provider name
326
+ - tool_calls: List of tool calls (if any)
327
+ - usage: Token usage stats
328
+ """
329
+ self._turn_count += 1
330
+ llm = _import_mirascope()
331
+
332
+ # Wrap in DurableDeps if needed
333
+ durable_deps = deps if isinstance(deps, DurableDeps) else DurableDeps(data=deps)
334
+
335
+ # Add user message to history
336
+ durable_deps.add_user_message(message)
337
+
338
+ # Build llm.Context with the actual data
339
+ llm_ctx = llm.Context(deps=durable_deps.data)
340
+
341
+ # Build prompt using the context
342
+ prompt_messages = self.build_prompt(llm_ctx, message)
343
+
344
+ # Convert to serializable format for activity
345
+ serializable_messages = self._messages_to_dict(prompt_messages)
346
+
347
+ # Execute as durable activity
348
+ # The @activity decorator transforms the function signature, but mypy doesn't understand it
349
+ response: dict[str, Any] = await _chat_activity( # type: ignore[misc, call-arg]
350
+ self._workflow_ctx, # type: ignore[arg-type]
351
+ model=self.model,
352
+ messages=serializable_messages,
353
+ tools=self.get_tools(),
354
+ response_model=self.response_model,
355
+ deps_dict=durable_deps.to_dict(),
356
+ turn=self._turn_count,
357
+ **call_params,
358
+ )
359
+
360
+ # Add assistant response to history
361
+ assistant_content = response.get("content", "")
362
+ if assistant_content:
363
+ durable_deps.add_assistant_message(assistant_content)
364
+
365
+ return response
366
+
367
+ def _messages_to_dict(self, messages: list[Any]) -> list[dict[str, str]]:
368
+ """Convert llm.messages to serializable dicts."""
369
+ result = []
370
+ for msg in messages:
371
+ if isinstance(msg, dict):
372
+ result.append(msg)
373
+ elif hasattr(msg, "role") and hasattr(msg, "content"):
374
+ content = self._extract_text_content(msg.content)
375
+ result.append({"role": msg.role, "content": content})
376
+ elif hasattr(msg, "content"):
377
+ content = self._extract_text_content(msg.content)
378
+ result.append({"role": "user", "content": content})
379
+ else:
380
+ result.append({"role": "user", "content": str(msg)})
381
+ return result
382
+
383
+ def _extract_text_content(self, content: Any) -> str:
384
+ """
385
+ Extract text from Mirascope V2 content format.
386
+
387
+ Mirascope V2 content can be:
388
+ - A plain string
389
+ - A list of Text/ContentBlock objects with .text attribute
390
+ - None
391
+
392
+ Args:
393
+ content: The content to extract text from.
394
+
395
+ Returns:
396
+ Extracted text as a string.
397
+ """
398
+ if content is None:
399
+ return ""
400
+ if isinstance(content, str):
401
+ return content
402
+ # Handle Mirascope V2's list of Text/ContentBlock objects
403
+ if isinstance(content, list):
404
+ text_parts = []
405
+ for item in content:
406
+ if hasattr(item, "text"):
407
+ text_parts.append(item.text)
408
+ elif isinstance(item, str):
409
+ text_parts.append(item)
410
+ else:
411
+ text_parts.append(str(item))
412
+ return "".join(text_parts)
413
+ return str(content)
414
+
415
+ async def chat_with_tool_loop(
416
+ self,
417
+ deps: T | DurableDeps[T],
418
+ message: str,
419
+ tool_executor: Any | None = None,
420
+ max_iterations: int = 10,
421
+ **call_params: Any,
422
+ ) -> dict[str, Any]:
423
+ """
424
+ Chat with automatic tool execution loop.
425
+
426
+ Continues calling tools until the model stops requesting them
427
+ or max_iterations is reached.
428
+
429
+ Args:
430
+ deps: Dependency data
431
+ message: Initial user message
432
+ tool_executor: Optional callable(tool_name, tool_args) -> result.
433
+ If None, tools are not executed.
434
+ max_iterations: Maximum tool call iterations
435
+ **call_params: Additional LLM call parameters
436
+
437
+ Returns:
438
+ Final response after tool loop completes
439
+ """
440
+ response = await self.chat(deps, message, **call_params)
441
+
442
+ iteration = 0
443
+ while response.get("tool_calls") and iteration < max_iterations:
444
+ if tool_executor is None:
445
+ # No executor provided, return with tool_calls
446
+ break
447
+
448
+ # Execute tools
449
+ tool_outputs = []
450
+ for tc in response["tool_calls"]:
451
+ tool_name = tc.get("name")
452
+ tool_args = tc.get("args", {})
453
+ try:
454
+ result = await tool_executor(tool_name, tool_args)
455
+ tool_outputs.append({"tool": tool_name, "output": str(result)})
456
+ except Exception as e:
457
+ tool_outputs.append({"tool": tool_name, "error": str(e)})
458
+
459
+ # Format tool results and continue conversation
460
+ tool_results_str = "\n".join(
461
+ f"Tool {to['tool']}: {to.get('output', to.get('error', 'Unknown'))}"
462
+ for to in tool_outputs
463
+ )
464
+ response = await self.chat(deps, f"Tool results:\n{tool_results_str}", **call_params)
465
+ iteration += 1
466
+
467
+ return response
@@ -0,0 +1,166 @@
1
+ """
2
+ Simple durable LLM call function for Edda + Mirascope V2 integration.
3
+
4
+ This module provides a straightforward way to make durable LLM calls
5
+ without needing to define a separate function with @durable_call.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any
11
+
12
+ from edda.activity import activity
13
+ from edda.context import WorkflowContext
14
+
15
+ from .types import DurableResponse
16
+
17
+
18
+ def _import_mirascope() -> Any:
19
+ """
20
+ Lazy import Mirascope components.
21
+
22
+ Raises:
23
+ ImportError: If mirascope is not installed.
24
+ """
25
+ try:
26
+ from mirascope import llm
27
+
28
+ return llm
29
+ except ImportError as e:
30
+ raise ImportError(
31
+ "Mirascope not installed. Install with: pip install 'mirascope[anthropic]' "
32
+ "or pip install 'edda-framework[mirascope]'"
33
+ ) from e
34
+
35
+
36
+ @activity
37
+ async def call(
38
+ ctx: WorkflowContext, # noqa: ARG001 - Required by @activity decorator
39
+ *,
40
+ model: str,
41
+ prompt: str,
42
+ system: str | None = None,
43
+ tools: list[Any] | None = None,
44
+ response_model: type | None = None,
45
+ **call_params: Any,
46
+ ) -> dict[str, Any]:
47
+ """
48
+ Make a durable LLM call.
49
+
50
+ This is a simple, ad-hoc way to make LLM calls within workflows.
51
+ For more complex use cases, consider using the @durable_call decorator.
52
+
53
+ Args:
54
+ ctx: Workflow context (automatically provided by Edda).
55
+ model: Model identifier in "provider/model" format
56
+ (e.g., "anthropic/claude-sonnet-4-20250514", "openai/gpt-4").
57
+ prompt: The user prompt/message.
58
+ system: Optional system prompt.
59
+ tools: Optional list of tool functions for function calling.
60
+ response_model: Optional Pydantic model for structured output.
61
+ **call_params: Additional parameters passed to the LLM provider.
62
+
63
+ Returns:
64
+ Dictionary representation of DurableResponse.
65
+
66
+ Example:
67
+ >>> @workflow
68
+ ... async def my_workflow(ctx: WorkflowContext, question: str) -> str:
69
+ ... response = await call(
70
+ ... ctx,
71
+ ... model="anthropic/claude-sonnet-4-20250514",
72
+ ... prompt=question,
73
+ ... system="You are a helpful assistant.",
74
+ ... )
75
+ ... return response["content"]
76
+ """
77
+ llm = _import_mirascope()
78
+
79
+ # Extract provider from model string (e.g., "anthropic/claude-..." -> "anthropic")
80
+ provider = model.split("/")[0] if "/" in model else "unknown"
81
+
82
+ # Build the call function dynamically using V2 API
83
+ @llm.call(model, tools=tools, response_model=response_model, **call_params) # type: ignore[misc]
84
+ async def _call() -> list[Any]:
85
+ # V2: Use llm.messages.system/user and return list directly
86
+ messages = []
87
+ if system:
88
+ messages.append(llm.messages.system(system))
89
+ messages.append(llm.messages.user(prompt))
90
+ return messages
91
+
92
+ # Execute the call
93
+ response = await _call()
94
+
95
+ # Convert to serializable format
96
+ return DurableResponse.from_mirascope(response, provider).to_dict()
97
+
98
+
99
+ @activity
100
+ async def call_with_messages(
101
+ ctx: WorkflowContext, # noqa: ARG001 - Required by @activity decorator
102
+ *,
103
+ model: str,
104
+ messages: list[dict[str, str]],
105
+ tools: list[Any] | None = None,
106
+ response_model: type | None = None,
107
+ **call_params: Any,
108
+ ) -> dict[str, Any]:
109
+ """
110
+ Make a durable LLM call with a full message history.
111
+
112
+ This is useful for multi-turn conversations where you need to pass
113
+ the full conversation history.
114
+
115
+ Args:
116
+ ctx: Workflow context (automatically provided by Edda).
117
+ model: Model identifier in "provider/model" format
118
+ (e.g., "anthropic/claude-sonnet-4-20250514", "openai/gpt-4").
119
+ messages: List of message dicts with "role" and "content" keys.
120
+ tools: Optional list of tool functions for function calling.
121
+ response_model: Optional Pydantic model for structured output.
122
+ **call_params: Additional parameters passed to the LLM provider.
123
+
124
+ Returns:
125
+ Dictionary representation of DurableResponse.
126
+
127
+ Example:
128
+ >>> @workflow
129
+ ... async def chat_workflow(ctx: WorkflowContext, history: list[dict]) -> str:
130
+ ... response = await call_with_messages(
131
+ ... ctx,
132
+ ... model="anthropic/claude-sonnet-4-20250514",
133
+ ... messages=history,
134
+ ... )
135
+ ... return response["content"]
136
+ """
137
+ llm = _import_mirascope()
138
+
139
+ # Extract provider and model_id from model string
140
+ # e.g., "anthropic/claude-sonnet-4-20250514" -> provider="anthropic", model_id="anthropic/claude-sonnet-4-20250514"
141
+ provider = model.split("/")[0] if "/" in model else "unknown"
142
+
143
+ # Convert message dicts to Mirascope V2 message objects
144
+ def convert_messages(msgs: list[dict[str, str]]) -> list[Any]:
145
+ result = []
146
+ for msg in msgs:
147
+ role = msg.get("role", "user")
148
+ content = msg.get("content", "")
149
+ if role == "system":
150
+ result.append(llm.messages.system(content))
151
+ elif role == "assistant":
152
+ # Mirascope V2: assistant messages require model_id and provider_id
153
+ result.append(llm.messages.assistant(content, model_id=model, provider_id=provider))
154
+ else:
155
+ result.append(llm.messages.user(content))
156
+ return result
157
+
158
+ @llm.call(model, tools=tools, response_model=response_model, **call_params) # type: ignore[misc]
159
+ async def _call() -> list[Any]:
160
+ return convert_messages(messages)
161
+
162
+ # Execute the call
163
+ response = await _call()
164
+
165
+ # Convert to serializable format
166
+ return DurableResponse.from_mirascope(response, provider).to_dict()
@@ -0,0 +1,163 @@
1
+ """
2
+ Durable LLM call decorator for Edda + Mirascope V2 integration.
3
+
4
+ This module provides the @durable_call decorator that combines
5
+ Mirascope's @llm.call with Edda's @activity for durable LLM calls.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import functools
11
+ import inspect
12
+ from collections.abc import Callable
13
+ from typing import Any, TypeVar
14
+
15
+ from edda.activity import activity
16
+ from edda.context import WorkflowContext
17
+
18
+ from .types import DurableResponse
19
+
20
+ F = TypeVar("F", bound=Callable[..., Any])
21
+
22
+
23
+ def _import_mirascope() -> Any:
24
+ """
25
+ Lazy import Mirascope components.
26
+
27
+ Raises:
28
+ ImportError: If mirascope is not installed.
29
+ """
30
+ try:
31
+ from mirascope import llm
32
+
33
+ return llm
34
+ except ImportError as e:
35
+ raise ImportError(
36
+ "Mirascope not installed. Install with: pip install 'mirascope[anthropic]' "
37
+ "or pip install 'edda-framework[mirascope]'"
38
+ ) from e
39
+
40
+
41
+ def durable_call(
42
+ model: str,
43
+ *,
44
+ tools: list[Any] | None = None,
45
+ response_model: type | None = None,
46
+ json_mode: bool = False,
47
+ **call_params: Any,
48
+ ) -> Callable[[F], F]:
49
+ """
50
+ Decorator that makes an LLM call durable through Edda's activity system.
51
+
52
+ This decorator combines Mirascope V2's @llm.call with Edda's @activity,
53
+ providing automatic caching, retry, and crash recovery for LLM calls.
54
+
55
+ Args:
56
+ model: Model identifier in "provider/model" format
57
+ (e.g., "anthropic/claude-sonnet-4-20250514", "openai/gpt-4").
58
+ tools: Optional list of tool functions for function calling.
59
+ response_model: Optional Pydantic model for structured output.
60
+ json_mode: Whether to enable JSON mode.
61
+ **call_params: Additional parameters passed to the LLM provider.
62
+
63
+ Returns:
64
+ A decorator that transforms the function into a durable LLM call.
65
+
66
+ Example:
67
+ Basic usage::
68
+
69
+ @durable_call("anthropic/claude-sonnet-4-20250514")
70
+ async def summarize(text: str) -> str:
71
+ return f"Summarize this text: {text}"
72
+
73
+ @workflow
74
+ async def my_workflow(ctx: WorkflowContext, text: str) -> str:
75
+ response = await summarize(ctx, text)
76
+ return response["content"]
77
+
78
+ With tools::
79
+
80
+ def get_weather(city: str) -> str:
81
+ '''Get the weather for a city.'''
82
+ return f"Sunny in {city}"
83
+
84
+ @durable_call(
85
+ "anthropic/claude-sonnet-4-20250514",
86
+ tools=[get_weather],
87
+ )
88
+ async def weather_assistant(query: str) -> str:
89
+ return query
90
+
91
+ With structured output::
92
+
93
+ class BookInfo(BaseModel):
94
+ title: str
95
+ author: str
96
+ year: int
97
+
98
+ @durable_call(
99
+ "anthropic/claude-sonnet-4-20250514",
100
+ response_model=BookInfo,
101
+ )
102
+ async def extract_book_info(text: str) -> str:
103
+ return f"Extract book information from: {text}"
104
+
105
+ Note:
106
+ - The decorated function must return a string (the prompt).
107
+ - When called, the first argument must be the WorkflowContext.
108
+ - The response is returned as a dictionary (DurableResponse.to_dict()).
109
+ """
110
+ llm = _import_mirascope()
111
+
112
+ # Extract provider from model string (e.g., "anthropic/claude-..." -> "anthropic")
113
+ provider = model.split("/")[0] if "/" in model else "unknown"
114
+
115
+ def decorator(func: F) -> F:
116
+ # Apply Mirascope V2's @llm.call decorator with unified model string
117
+ mirascope_decorated = llm.call(
118
+ model,
119
+ tools=tools,
120
+ response_model=response_model,
121
+ json_mode=json_mode,
122
+ **call_params,
123
+ )(func)
124
+
125
+ # Determine if the original function is async
126
+ is_async = inspect.iscoroutinefunction(func)
127
+
128
+ @activity
129
+ @functools.wraps(func)
130
+ async def async_wrapper(
131
+ ctx: WorkflowContext, # noqa: ARG001 - Required by @activity decorator
132
+ *args: Any,
133
+ **kwargs: Any,
134
+ ) -> dict[str, Any]:
135
+ # Call the Mirascope-decorated function
136
+ if is_async or inspect.iscoroutinefunction(mirascope_decorated):
137
+ response = await mirascope_decorated(*args, **kwargs)
138
+ else:
139
+ response = mirascope_decorated(*args, **kwargs)
140
+
141
+ # Handle structured output (response_model)
142
+ # For structured output, the response is the Pydantic model itself
143
+ if response_model is not None and hasattr(response, "model_dump"):
144
+ return {
145
+ "content": "",
146
+ "model": model,
147
+ "provider": provider,
148
+ "structured_output": response.model_dump(),
149
+ }
150
+
151
+ # Convert to serializable format
152
+ return DurableResponse.from_mirascope(response, provider).to_dict()
153
+
154
+ # Store metadata for introspection
155
+ async_wrapper._mirascope_func = mirascope_decorated # type: ignore[union-attr]
156
+ async_wrapper._provider = provider # type: ignore[union-attr]
157
+ async_wrapper._model = model # type: ignore[union-attr]
158
+ async_wrapper._tools = tools # type: ignore[union-attr]
159
+ async_wrapper._response_model = response_model # type: ignore[union-attr]
160
+
161
+ return async_wrapper # type: ignore[return-value]
162
+
163
+ return decorator
@@ -0,0 +1,268 @@
1
+ """
2
+ Type definitions for Edda + Mirascope integration.
3
+
4
+ This module provides serializable response types that bridge
5
+ Mirascope's response objects with Edda's activity system.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from dataclasses import dataclass, field
11
+ from typing import Any
12
+
13
+
14
+ @dataclass
15
+ class DurableResponse:
16
+ """
17
+ Serializable representation of a Mirascope LLM response.
18
+
19
+ This class captures the essential parts of an LLM response
20
+ in a JSON-serializable format for Edda's activity caching.
21
+
22
+ Attributes:
23
+ content: The text content of the response.
24
+ model: The model identifier used for the call.
25
+ provider: The provider name (e.g., "anthropic", "openai").
26
+ usage: Token usage statistics (input, output, total).
27
+ tool_calls: List of tool calls requested by the model.
28
+ stop_reason: The reason the model stopped generating.
29
+ raw: Raw response data for debugging/advanced use.
30
+ """
31
+
32
+ content: str
33
+ model: str
34
+ provider: str
35
+ usage: dict[str, int] | None = None
36
+ tool_calls: list[dict[str, Any]] | None = None
37
+ stop_reason: str | None = None
38
+ raw: dict[str, Any] = field(default_factory=dict)
39
+
40
+ def to_dict(self) -> dict[str, Any]:
41
+ """Convert to JSON-serializable dictionary."""
42
+ return {
43
+ "content": self.content,
44
+ "model": self.model,
45
+ "provider": self.provider,
46
+ "usage": self.usage,
47
+ "tool_calls": self.tool_calls,
48
+ "stop_reason": self.stop_reason,
49
+ "raw": self.raw,
50
+ }
51
+
52
+ @classmethod
53
+ def from_dict(cls, data: dict[str, Any]) -> DurableResponse:
54
+ """Create from dictionary (for replay)."""
55
+ return cls(
56
+ content=data.get("content", ""),
57
+ model=data.get("model", ""),
58
+ provider=data.get("provider", ""),
59
+ usage=data.get("usage"),
60
+ tool_calls=data.get("tool_calls"),
61
+ stop_reason=data.get("stop_reason"),
62
+ raw=data.get("raw", {}),
63
+ )
64
+
65
+ @classmethod
66
+ def _extract_content(cls, response: Any) -> str:
67
+ """
68
+ Extract text content from a Mirascope response.
69
+
70
+ Handles Mirascope V2's response format where content can be:
71
+ - A plain string
72
+ - A list of Text/ContentBlock objects with .text attribute
73
+ - None
74
+
75
+ Args:
76
+ response: The Mirascope CallResponse object.
77
+
78
+ Returns:
79
+ The extracted text content as a string.
80
+ """
81
+ if not hasattr(response, "content"):
82
+ return str(response)
83
+
84
+ content = response.content
85
+ if content is None:
86
+ return ""
87
+ if isinstance(content, str):
88
+ return content
89
+
90
+ # Handle Mirascope V2's list of Text/ContentBlock objects
91
+ # e.g., [Text(type='text', text='Hello!')]
92
+ if isinstance(content, list):
93
+ text_parts = []
94
+ for item in content:
95
+ if hasattr(item, "text"):
96
+ text_parts.append(item.text)
97
+ elif isinstance(item, str):
98
+ text_parts.append(item)
99
+ else:
100
+ text_parts.append(str(item))
101
+ return "".join(text_parts)
102
+
103
+ return str(content)
104
+
105
+ @classmethod
106
+ def _extract_model(cls, response: Any) -> str:
107
+ """
108
+ Extract model string from a Mirascope response.
109
+
110
+ Handles Mirascope V2 where response.model is a Model object,
111
+ not a string. Use model_id for the string version.
112
+
113
+ Args:
114
+ response: The Mirascope CallResponse object.
115
+
116
+ Returns:
117
+ The model identifier as a string.
118
+ """
119
+ # Mirascope V2: use model_id (string) instead of model (Model object)
120
+ if hasattr(response, "model_id"):
121
+ return str(response.model_id)
122
+
123
+ # Fallback: try model attribute
124
+ model = getattr(response, "model", "")
125
+ if isinstance(model, str):
126
+ return model
127
+
128
+ # If model is an object, try to get a string representation
129
+ return str(model) if model else ""
130
+
131
+ @classmethod
132
+ def _extract_usage(cls, response: Any) -> dict[str, Any] | None:
133
+ """
134
+ Extract usage statistics from a Mirascope response.
135
+
136
+ Handles Mirascope V2 where usage may be in response.raw.usage
137
+ instead of response.usage.
138
+
139
+ Args:
140
+ response: The Mirascope CallResponse object.
141
+
142
+ Returns:
143
+ Usage statistics as a dict, or None if not available.
144
+ """
145
+ usage = None
146
+
147
+ # Try direct usage attribute first
148
+ if hasattr(response, "usage") and response.usage is not None:
149
+ if hasattr(response.usage, "model_dump"):
150
+ usage = response.usage.model_dump()
151
+ elif isinstance(response.usage, dict):
152
+ usage = response.usage
153
+
154
+ # Mirascope V2: try response.raw.usage
155
+ if usage is None and hasattr(response, "raw") and response.raw is not None:
156
+ raw = response.raw
157
+ if hasattr(raw, "usage") and raw.usage is not None:
158
+ if hasattr(raw.usage, "model_dump"):
159
+ usage = raw.usage.model_dump()
160
+ elif isinstance(raw.usage, dict):
161
+ usage = raw.usage
162
+
163
+ return usage
164
+
165
+ @classmethod
166
+ def _extract_stop_reason(cls, response: Any) -> str | None:
167
+ """
168
+ Extract stop reason from a Mirascope response.
169
+
170
+ Handles various attribute names across different providers
171
+ and Mirascope versions.
172
+
173
+ Args:
174
+ response: The Mirascope CallResponse object.
175
+
176
+ Returns:
177
+ The stop reason as a string, or None if not available.
178
+ """
179
+ # Try common attribute names
180
+ stop_reason = getattr(response, "stop_reason", None)
181
+ if stop_reason is None:
182
+ stop_reason = getattr(response, "finish_reason", None)
183
+
184
+ # Mirascope V2: try response.raw.stop_reason
185
+ if stop_reason is None and hasattr(response, "raw") and response.raw is not None:
186
+ stop_reason = getattr(response.raw, "stop_reason", None)
187
+ if stop_reason is None:
188
+ stop_reason = getattr(response.raw, "finish_reason", None)
189
+
190
+ return stop_reason
191
+
192
+ @classmethod
193
+ def _parse_tool_args(cls, args: Any) -> dict[str, Any]:
194
+ """
195
+ Parse tool arguments from various formats.
196
+
197
+ Mirascope V2 returns args as a JSON string (e.g., '{"city": "Tokyo"}'),
198
+ while we need a dict for execution.
199
+
200
+ Args:
201
+ args: Tool arguments (string, dict, or None).
202
+
203
+ Returns:
204
+ Parsed arguments as a dict.
205
+ """
206
+ import json
207
+
208
+ if args is None:
209
+ return {}
210
+ if isinstance(args, dict):
211
+ return args
212
+ if isinstance(args, str):
213
+ try:
214
+ parsed = json.loads(args)
215
+ return parsed if isinstance(parsed, dict) else {}
216
+ except json.JSONDecodeError:
217
+ return {}
218
+ return {}
219
+
220
+ @classmethod
221
+ def from_mirascope(cls, response: Any, provider: str) -> DurableResponse:
222
+ """
223
+ Convert a Mirascope response to DurableResponse.
224
+
225
+ Args:
226
+ response: The Mirascope CallResponse object.
227
+ provider: The provider name (e.g., "anthropic").
228
+
229
+ Returns:
230
+ A DurableResponse instance with serializable data.
231
+ """
232
+ # Extract tool calls if available
233
+ tool_calls = None
234
+ if hasattr(response, "tool_calls") and response.tool_calls:
235
+ tool_calls = []
236
+ for tc in response.tool_calls:
237
+ if hasattr(tc, "model_dump"):
238
+ tc_dict = tc.model_dump()
239
+ # Ensure args is a dict, not a JSON string
240
+ tc_dict["args"] = cls._parse_tool_args(tc_dict.get("args"))
241
+ tool_calls.append(tc_dict)
242
+ elif isinstance(tc, dict):
243
+ tc["args"] = cls._parse_tool_args(tc.get("args"))
244
+ tool_calls.append(tc)
245
+ else:
246
+ # Fallback: extract common attributes
247
+ raw_args = getattr(tc, "args", None) or getattr(tc, "arguments", {})
248
+ tool_calls.append(
249
+ {
250
+ "name": getattr(tc, "name", None) or getattr(tc, "tool_name", None),
251
+ "args": cls._parse_tool_args(raw_args),
252
+ "id": getattr(tc, "id", None) or getattr(tc, "tool_call_id", None),
253
+ }
254
+ )
255
+
256
+ return cls(
257
+ content=cls._extract_content(response),
258
+ model=cls._extract_model(response),
259
+ provider=provider,
260
+ usage=cls._extract_usage(response),
261
+ tool_calls=tool_calls,
262
+ stop_reason=cls._extract_stop_reason(response),
263
+ )
264
+
265
+ @property
266
+ def has_tool_calls(self) -> bool:
267
+ """Check if the response contains tool calls."""
268
+ return bool(self.tool_calls)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: edda-framework
3
- Version: 0.10.0
3
+ Version: 0.11.0
4
4
  Summary: Lightweight Durable Execution Framework
5
5
  Project-URL: Homepage, https://github.com/i2y/edda
6
6
  Project-URL: Documentation, https://github.com/i2y/edda#readme
@@ -44,6 +44,9 @@ Requires-Dist: testcontainers[postgres]>=4.0.0; extra == 'dev'
44
44
  Requires-Dist: tsuno>=0.1.3; extra == 'dev'
45
45
  Provides-Extra: mcp
46
46
  Requires-Dist: mcp>=1.22.0; extra == 'mcp'
47
+ Provides-Extra: mirascope
48
+ Requires-Dist: mirascope[anthropic,google,openai]>=2.0.0a0; extra == 'mirascope'
49
+ Requires-Dist: pydantic-settings>=2.0.0; extra == 'mirascope'
47
50
  Provides-Extra: mysql
48
51
  Requires-Dist: aiomysql>=0.2.0; extra == 'mysql'
49
52
  Provides-Extra: opentelemetry
@@ -93,6 +96,7 @@ For detailed documentation, visit [https://i2y.github.io/edda/](https://i2y.gith
93
96
  - 📬 **Channel-based Messaging**: Actor-model style communication with competing (job queue) and broadcast (fan-out) modes
94
97
  - ⚡ **Instant Notifications**: PostgreSQL LISTEN/NOTIFY for near-instant event delivery (optional)
95
98
  - 🤖 **MCP Integration**: Expose durable workflows as AI tools via Model Context Protocol
99
+ - 🧠 **Mirascope Integration**: Durable LLM calls
96
100
  - 🌍 **ASGI/WSGI Support**: Deploy with your preferred server (uvicorn, gunicorn, uWSGI)
97
101
 
98
102
  ## Use Cases
@@ -16,6 +16,11 @@ edda/integrations/__init__.py,sha256=F_CaTvlDEbldfOpPKq_U9ve1E573tS6XzqXnOtyHcXI
16
16
  edda/integrations/mcp/__init__.py,sha256=YK-8m0DIdP-RSqewlIX7xnWU7TD3NioCiW2_aZSgnn8,1232
17
17
  edda/integrations/mcp/decorators.py,sha256=31SmbDwmHEGvUNa3aaatW91hBkpnS5iN9uy47dID3J4,10037
18
18
  edda/integrations/mcp/server.py,sha256=Q5r4AbMn-9gBcy2CZocbgW7O0fn7Qb4e9CBJa1FEmzU,14507
19
+ edda/integrations/mirascope/__init__.py,sha256=TKKIs1W2ef88qT1oNoNm0-DQZObOc7tiuw3ul38nc6U,2509
20
+ edda/integrations/mirascope/agent.py,sha256=9y2HmyEDs5zREJgRuXI9EINjj09rWy991Khs7eDXfyY,16235
21
+ edda/integrations/mirascope/call.py,sha256=2pSDrja8Zix6d3TM4VejLmp1DHxbUnSAdSBSg7CFC7k,5754
22
+ edda/integrations/mirascope/decorator.py,sha256=TIK9qoR5ydaz-r33HAMuLrY4rsKJ5tsPlJJp5T_06B8,5488
23
+ edda/integrations/mirascope/types.py,sha256=vgEAu8EFTLSd92XSAxtZpMoe5gv93fe4Rm0DaXaDlV8,9088
19
24
  edda/integrations/opentelemetry/__init__.py,sha256=x1_PyyygGDW-rxQTwoIrGzyjKErXHOOKdquFAMlCOAo,906
20
25
  edda/integrations/opentelemetry/hooks.py,sha256=rCb6K_gJJMxjQ-UoJnbIOWsafapipzu7w-YPROZKxDA,21330
21
26
  edda/outbox/__init__.py,sha256=azXG1rtheJEjOyoWmMsBeR2jp8Bz02R3wDEd5tQnaWA,424
@@ -38,8 +43,8 @@ edda/viewer_ui/theme.py,sha256=mrXoXLRzgSnvE2a58LuMcPJkhlvHEDMWVa8Smqtk4l0,8118
38
43
  edda/visualizer/__init__.py,sha256=DOpDstNhR0VcXAs_eMKxaL30p_0u4PKZ4o2ndnYhiRo,343
39
44
  edda/visualizer/ast_analyzer.py,sha256=plmx7C9X_X35xLY80jxOL3ljg3afXxBePRZubqUIkxY,13663
40
45
  edda/visualizer/mermaid_generator.py,sha256=XWa2egoOTNDfJEjPcwoxwQmblUqXf7YInWFjFRI1QGo,12457
41
- edda_framework-0.10.0.dist-info/METADATA,sha256=HGJf790N4Rh8EDGIC4e7lFhH8xV5kCQP_xNPTLK8kpE,36366
42
- edda_framework-0.10.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
43
- edda_framework-0.10.0.dist-info/entry_points.txt,sha256=dPH47s6UoJgUZxHoeSMqZsQkLaSE-SGLi-gh88k2WrU,48
44
- edda_framework-0.10.0.dist-info/licenses/LICENSE,sha256=udxb-V7_cYKTHqW7lNm48rxJ-Zpf0WAY_PyGDK9BPCo,1069
45
- edda_framework-0.10.0.dist-info/RECORD,,
46
+ edda_framework-0.11.0.dist-info/METADATA,sha256=AkgFtGUJNfhOoHXzti_R7fLli1q45Bg5xO6TfpjvsO8,36587
47
+ edda_framework-0.11.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
48
+ edda_framework-0.11.0.dist-info/entry_points.txt,sha256=dPH47s6UoJgUZxHoeSMqZsQkLaSE-SGLi-gh88k2WrU,48
49
+ edda_framework-0.11.0.dist-info/licenses/LICENSE,sha256=udxb-V7_cYKTHqW7lNm48rxJ-Zpf0WAY_PyGDK9BPCo,1069
50
+ edda_framework-0.11.0.dist-info/RECORD,,