aury-agent 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. aury/__init__.py +2 -0
  2. aury/agents/__init__.py +55 -0
  3. aury/agents/a2a/__init__.py +168 -0
  4. aury/agents/backends/__init__.py +196 -0
  5. aury/agents/backends/artifact/__init__.py +9 -0
  6. aury/agents/backends/artifact/memory.py +130 -0
  7. aury/agents/backends/artifact/types.py +133 -0
  8. aury/agents/backends/code/__init__.py +65 -0
  9. aury/agents/backends/file/__init__.py +11 -0
  10. aury/agents/backends/file/local.py +66 -0
  11. aury/agents/backends/file/types.py +40 -0
  12. aury/agents/backends/invocation/__init__.py +8 -0
  13. aury/agents/backends/invocation/memory.py +81 -0
  14. aury/agents/backends/invocation/types.py +110 -0
  15. aury/agents/backends/memory/__init__.py +8 -0
  16. aury/agents/backends/memory/memory.py +179 -0
  17. aury/agents/backends/memory/types.py +136 -0
  18. aury/agents/backends/message/__init__.py +9 -0
  19. aury/agents/backends/message/memory.py +122 -0
  20. aury/agents/backends/message/types.py +124 -0
  21. aury/agents/backends/sandbox.py +275 -0
  22. aury/agents/backends/session/__init__.py +8 -0
  23. aury/agents/backends/session/memory.py +93 -0
  24. aury/agents/backends/session/types.py +124 -0
  25. aury/agents/backends/shell/__init__.py +11 -0
  26. aury/agents/backends/shell/local.py +110 -0
  27. aury/agents/backends/shell/types.py +55 -0
  28. aury/agents/backends/shell.py +209 -0
  29. aury/agents/backends/snapshot/__init__.py +19 -0
  30. aury/agents/backends/snapshot/git.py +95 -0
  31. aury/agents/backends/snapshot/hybrid.py +125 -0
  32. aury/agents/backends/snapshot/memory.py +86 -0
  33. aury/agents/backends/snapshot/types.py +59 -0
  34. aury/agents/backends/state/__init__.py +29 -0
  35. aury/agents/backends/state/composite.py +49 -0
  36. aury/agents/backends/state/file.py +57 -0
  37. aury/agents/backends/state/memory.py +52 -0
  38. aury/agents/backends/state/sqlite.py +262 -0
  39. aury/agents/backends/state/types.py +178 -0
  40. aury/agents/backends/subagent/__init__.py +165 -0
  41. aury/agents/cli/__init__.py +41 -0
  42. aury/agents/cli/chat.py +239 -0
  43. aury/agents/cli/config.py +236 -0
  44. aury/agents/cli/extensions.py +460 -0
  45. aury/agents/cli/main.py +189 -0
  46. aury/agents/cli/session.py +337 -0
  47. aury/agents/cli/workflow.py +276 -0
  48. aury/agents/context_providers/__init__.py +66 -0
  49. aury/agents/context_providers/artifact.py +299 -0
  50. aury/agents/context_providers/base.py +177 -0
  51. aury/agents/context_providers/memory.py +70 -0
  52. aury/agents/context_providers/message.py +130 -0
  53. aury/agents/context_providers/skill.py +50 -0
  54. aury/agents/context_providers/subagent.py +46 -0
  55. aury/agents/context_providers/tool.py +68 -0
  56. aury/agents/core/__init__.py +83 -0
  57. aury/agents/core/base.py +573 -0
  58. aury/agents/core/context.py +797 -0
  59. aury/agents/core/context_builder.py +303 -0
  60. aury/agents/core/event_bus/__init__.py +15 -0
  61. aury/agents/core/event_bus/bus.py +203 -0
  62. aury/agents/core/factory.py +169 -0
  63. aury/agents/core/isolator.py +97 -0
  64. aury/agents/core/logging.py +95 -0
  65. aury/agents/core/parallel.py +194 -0
  66. aury/agents/core/runner.py +139 -0
  67. aury/agents/core/services/__init__.py +5 -0
  68. aury/agents/core/services/file_session.py +144 -0
  69. aury/agents/core/services/message.py +53 -0
  70. aury/agents/core/services/session.py +53 -0
  71. aury/agents/core/signals.py +109 -0
  72. aury/agents/core/state.py +363 -0
  73. aury/agents/core/types/__init__.py +107 -0
  74. aury/agents/core/types/action.py +176 -0
  75. aury/agents/core/types/artifact.py +135 -0
  76. aury/agents/core/types/block.py +736 -0
  77. aury/agents/core/types/message.py +350 -0
  78. aury/agents/core/types/recall.py +144 -0
  79. aury/agents/core/types/session.py +257 -0
  80. aury/agents/core/types/subagent.py +154 -0
  81. aury/agents/core/types/tool.py +205 -0
  82. aury/agents/eval/__init__.py +331 -0
  83. aury/agents/hitl/__init__.py +57 -0
  84. aury/agents/hitl/ask_user.py +242 -0
  85. aury/agents/hitl/compaction.py +230 -0
  86. aury/agents/hitl/exceptions.py +87 -0
  87. aury/agents/hitl/permission.py +617 -0
  88. aury/agents/hitl/revert.py +216 -0
  89. aury/agents/llm/__init__.py +31 -0
  90. aury/agents/llm/adapter.py +367 -0
  91. aury/agents/llm/openai.py +294 -0
  92. aury/agents/llm/provider.py +476 -0
  93. aury/agents/mcp/__init__.py +153 -0
  94. aury/agents/memory/__init__.py +46 -0
  95. aury/agents/memory/compaction.py +394 -0
  96. aury/agents/memory/manager.py +465 -0
  97. aury/agents/memory/processor.py +177 -0
  98. aury/agents/memory/store.py +187 -0
  99. aury/agents/memory/types.py +137 -0
  100. aury/agents/messages/__init__.py +40 -0
  101. aury/agents/messages/config.py +47 -0
  102. aury/agents/messages/raw_store.py +224 -0
  103. aury/agents/messages/store.py +118 -0
  104. aury/agents/messages/types.py +88 -0
  105. aury/agents/middleware/__init__.py +31 -0
  106. aury/agents/middleware/base.py +341 -0
  107. aury/agents/middleware/chain.py +342 -0
  108. aury/agents/middleware/message.py +129 -0
  109. aury/agents/middleware/message_container.py +126 -0
  110. aury/agents/middleware/raw_message.py +153 -0
  111. aury/agents/middleware/truncation.py +139 -0
  112. aury/agents/middleware/types.py +81 -0
  113. aury/agents/plugin.py +162 -0
  114. aury/agents/react/__init__.py +4 -0
  115. aury/agents/react/agent.py +1923 -0
  116. aury/agents/sandbox/__init__.py +23 -0
  117. aury/agents/sandbox/local.py +239 -0
  118. aury/agents/sandbox/remote.py +200 -0
  119. aury/agents/sandbox/types.py +115 -0
  120. aury/agents/skill/__init__.py +16 -0
  121. aury/agents/skill/loader.py +180 -0
  122. aury/agents/skill/types.py +83 -0
  123. aury/agents/tool/__init__.py +39 -0
  124. aury/agents/tool/builtin/__init__.py +23 -0
  125. aury/agents/tool/builtin/ask_user.py +155 -0
  126. aury/agents/tool/builtin/bash.py +107 -0
  127. aury/agents/tool/builtin/delegate.py +726 -0
  128. aury/agents/tool/builtin/edit.py +121 -0
  129. aury/agents/tool/builtin/plan.py +277 -0
  130. aury/agents/tool/builtin/read.py +91 -0
  131. aury/agents/tool/builtin/thinking.py +111 -0
  132. aury/agents/tool/builtin/yield_result.py +130 -0
  133. aury/agents/tool/decorator.py +252 -0
  134. aury/agents/tool/set.py +204 -0
  135. aury/agents/usage/__init__.py +12 -0
  136. aury/agents/usage/tracker.py +236 -0
  137. aury/agents/workflow/__init__.py +85 -0
  138. aury/agents/workflow/adapter.py +268 -0
  139. aury/agents/workflow/dag.py +116 -0
  140. aury/agents/workflow/dsl.py +575 -0
  141. aury/agents/workflow/executor.py +659 -0
  142. aury/agents/workflow/expression.py +136 -0
  143. aury/agents/workflow/parser.py +182 -0
  144. aury/agents/workflow/state.py +145 -0
  145. aury/agents/workflow/types.py +86 -0
  146. aury_agent-0.0.4.dist-info/METADATA +90 -0
  147. aury_agent-0.0.4.dist-info/RECORD +149 -0
  148. aury_agent-0.0.4.dist-info/WHEEL +4 -0
  149. aury_agent-0.0.4.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,476 @@
1
+ """LLM Provider protocol and events."""
2
+ from __future__ import annotations
3
+
4
+ from dataclasses import dataclass, field
5
+ from typing import Any, AsyncIterator, Literal, Protocol, runtime_checkable
6
+
7
+
8
+ @dataclass
9
+ class Capabilities:
10
+ """Model capabilities.
11
+
12
+ Example:
13
+ caps = Capabilities(supports_tools=True, supports_thinking=True)
14
+ """
15
+ # Core
16
+ supports_tools: bool = True
17
+ supports_streaming: bool = True
18
+ supports_thinking: bool = False
19
+
20
+ # Multimodal
21
+ supports_vision: bool = False
22
+ supports_audio: bool = False
23
+ supports_video: bool = False
24
+ supports_files: bool = False
25
+
26
+ # Context
27
+ max_context_tokens: int = 128000
28
+ max_output_tokens: int = 4096
29
+
30
+ # Advanced
31
+ supports_json_mode: bool = False
32
+ supports_prefill: bool = False
33
+ supports_caching: bool = False
34
+
35
+
36
+ @dataclass
37
+ class ToolCall:
38
+ """Tool call from LLM."""
39
+ id: str
40
+ name: str
41
+ arguments: str # JSON string
42
+
43
+
44
+ @dataclass
45
+ class Usage:
46
+ """Token usage statistics."""
47
+ input_tokens: int = 0
48
+ output_tokens: int = 0
49
+ cache_read_tokens: int = 0
50
+ cache_write_tokens: int = 0
51
+ reasoning_tokens: int = 0 # Thinking/reasoning tokens (for models that support extended thinking)
52
+
53
+ @property
54
+ def total_tokens(self) -> int:
55
+ return self.input_tokens + self.output_tokens + self.reasoning_tokens
56
+
57
+ def __add__(self, other: Usage) -> Usage:
58
+ return Usage(
59
+ input_tokens=self.input_tokens + other.input_tokens,
60
+ output_tokens=self.output_tokens + other.output_tokens,
61
+ cache_read_tokens=self.cache_read_tokens + other.cache_read_tokens,
62
+ cache_write_tokens=self.cache_write_tokens + other.cache_write_tokens,
63
+ reasoning_tokens=self.reasoning_tokens + other.reasoning_tokens,
64
+ )
65
+
66
+
67
+ @dataclass
68
+ class LLMEvent:
69
+ """LLM streaming event.
70
+
71
+ Unified format for all LLM providers.
72
+ """
73
+ type: Literal[
74
+ "content", # Text content delta
75
+ "thinking", # Thinking content delta (some models)
76
+ "tool_call_start", # Tool call started (name known, arguments pending)
77
+ "tool_call_delta", # Tool arguments delta (streaming)
78
+ "tool_call_progress",# Tool arguments progress (bytes received)
79
+ "tool_call", # Tool call complete (arguments complete)
80
+ "usage", # Token usage
81
+ "completed", # Generation complete
82
+ "error", # Error
83
+ ]
84
+
85
+ # content/thinking delta
86
+ delta: str | None = None
87
+
88
+ # tool_call
89
+ tool_call: ToolCall | None = None
90
+ tool_call_delta: dict | None = None # {"call_id": str, "arguments_delta": str}
91
+ tool_call_progress: dict | None = None # {"call_id": str, "bytes_received": int, "last_delta_size": int}
92
+
93
+ # usage/completed
94
+ usage: Usage | None = None
95
+ finish_reason: str | None = None
96
+
97
+ # error
98
+ error: str | None = None
99
+
100
+
101
+ @dataclass
102
+ class ToolDefinition:
103
+ """Tool definition for LLM."""
104
+ name: str
105
+ description: str
106
+ input_schema: dict[str, Any]
107
+
108
+ def to_anthropic(self) -> dict[str, Any]:
109
+ """Convert to Anthropic format."""
110
+ return {
111
+ "name": self.name,
112
+ "description": self.description,
113
+ "input_schema": self.input_schema,
114
+ }
115
+
116
+ def to_openai(self) -> dict[str, Any]:
117
+ """Convert to OpenAI format."""
118
+ return {
119
+ "type": "function",
120
+ "function": {
121
+ "name": self.name,
122
+ "description": self.description,
123
+ "parameters": self.input_schema,
124
+ },
125
+ }
126
+
127
+
128
+ @dataclass
129
+ class LLMMessage:
130
+ """Message for LLM API.
131
+
132
+ Roles:
133
+ - system: System prompt
134
+ - user: User message (can include images)
135
+ - assistant: Assistant response (can include tool_calls)
136
+ - tool: Tool result (requires tool_call_id)
137
+ """
138
+ role: Literal["system", "user", "assistant", "tool"]
139
+ content: str | list[dict[str, Any]]
140
+ tool_call_id: str | None = None # Required for tool role
141
+
142
+ def to_dict(self) -> dict[str, Any]:
143
+ d = {"role": self.role, "content": self.content}
144
+ if self.tool_call_id:
145
+ d["tool_call_id"] = self.tool_call_id
146
+ return d
147
+
148
+ @classmethod
149
+ def system(cls, content: str) -> "LLMMessage":
150
+ """Create system message."""
151
+ return cls(role="system", content=content)
152
+
153
+ @classmethod
154
+ def user(cls, content: str | list[dict[str, Any]]) -> "LLMMessage":
155
+ """Create user message."""
156
+ return cls(role="user", content=content)
157
+
158
+ @classmethod
159
+ def assistant(cls, content: str | list[dict[str, Any]]) -> "LLMMessage":
160
+ """Create assistant message."""
161
+ return cls(role="assistant", content=content)
162
+
163
+ @classmethod
164
+ def tool(cls, content: str, tool_call_id: str) -> "LLMMessage":
165
+ """Create tool result message."""
166
+ return cls(role="tool", content=content, tool_call_id=tool_call_id)
167
+
168
+
169
+ @runtime_checkable
170
+ class LLMProvider(Protocol):
171
+ """LLM Provider protocol.
172
+
173
+ Implement this protocol to support different LLM backends.
174
+ """
175
+
176
+ @property
177
+ def provider(self) -> str:
178
+ """Provider name (openai, anthropic, etc.)."""
179
+ ...
180
+
181
+ @property
182
+ def model(self) -> str:
183
+ """Model name."""
184
+ ...
185
+
186
+ async def complete(
187
+ self,
188
+ messages: list[LLMMessage],
189
+ tools: list[ToolDefinition] | None = None,
190
+ **kwargs: Any,
191
+ ) -> AsyncIterator[LLMEvent]:
192
+ """Generate completion with streaming.
193
+
194
+ Args:
195
+ messages: Conversation messages
196
+ tools: Available tools (optional)
197
+ **kwargs: Additional parameters (temperature, max_tokens, etc.)
198
+
199
+ Yields:
200
+ LLMEvent: Streaming events
201
+ """
202
+ ...
203
+
204
+
205
+ @dataclass
206
+ class MockResponse:
207
+ """Mock response configuration.
208
+
209
+ Attributes:
210
+ text: Text response content
211
+ thinking: Thinking content (for models that support it)
212
+ tool_calls: List of tool calls to make
213
+ finish_reason: Completion reason
214
+ delay: Simulated delay in seconds
215
+ stream: Whether to stream character by character
216
+ """
217
+ text: str = ""
218
+ thinking: str = ""
219
+ tool_calls: list[dict[str, Any]] = field(default_factory=list)
220
+ finish_reason: str = "end_turn"
221
+ delay: float = 0.0
222
+ stream: bool = True
223
+
224
+
225
+ class MockLLMProvider:
226
+ """Mock LLM provider for testing and examples.
227
+
228
+ Supports both simple string responses and structured MockResponse objects.
229
+
230
+ Examples:
231
+ # Simple usage
232
+ llm = MockLLMProvider(responses=["Hello!", "How can I help?"])
233
+
234
+ # With MockResponse for tool calls
235
+ llm = MockLLMProvider(responses=[
236
+ MockResponse(
237
+ thinking="I need to use the calculator",
238
+ tool_calls=[{"name": "calc", "arguments": {"expr": "1+1"}}]
239
+ ),
240
+ MockResponse(text="The result is 2.")
241
+ ])
242
+
243
+ # Smart mode - auto-generate responses
244
+ llm = MockLLMProvider(smart_mode=True)
245
+ """
246
+
247
+ def __init__(
248
+ self,
249
+ provider: str = "mock",
250
+ model: str = "mock-model",
251
+ responses: list[str | MockResponse] | None = None,
252
+ smart_mode: bool = False,
253
+ default_delay: float = 0.0,
254
+ ):
255
+ self._provider = provider
256
+ self._model = model
257
+ self._responses = responses or []
258
+ self._smart_mode = smart_mode
259
+ self._default_delay = default_delay
260
+ self._call_count = 0
261
+ self._response_index = 0
262
+
263
+ @property
264
+ def provider(self) -> str:
265
+ return self._provider
266
+
267
+ @property
268
+ def model(self) -> str:
269
+ return self._model
270
+
271
+ @property
272
+ def call_count(self) -> int:
273
+ return self._call_count
274
+
275
+ def reset(self) -> None:
276
+ """Reset call count and response index."""
277
+ self._call_count = 0
278
+ self._response_index = 0
279
+
280
+ def add_response(self, response: str | MockResponse) -> None:
281
+ """Add a response to the queue."""
282
+ self._responses.append(response)
283
+
284
+ async def complete(
285
+ self,
286
+ messages: list[LLMMessage],
287
+ tools: list[ToolDefinition] | None = None,
288
+ enable_thinking: bool = False,
289
+ **kwargs: Any,
290
+ ) -> AsyncIterator[LLMEvent]:
291
+ """Return mock response.
292
+
293
+ Args:
294
+ messages: Conversation messages
295
+ tools: Available tools
296
+ enable_thinking: Whether to output thinking content
297
+ **kwargs: Additional parameters (ignored)
298
+ """
299
+ import asyncio
300
+ import json
301
+
302
+ self._call_count += 1
303
+
304
+ # Get response
305
+ response = self._get_response(messages, tools)
306
+
307
+ # Delay if configured
308
+ if isinstance(response, MockResponse) and response.delay > 0:
309
+ await asyncio.sleep(response.delay)
310
+ elif self._default_delay > 0:
311
+ await asyncio.sleep(self._default_delay)
312
+
313
+ # Normalize to MockResponse
314
+ if isinstance(response, str):
315
+ response = MockResponse(text=response)
316
+
317
+ # Stream thinking (only if enabled and response has thinking)
318
+ if enable_thinking and response.thinking:
319
+ if response.stream:
320
+ for char in response.thinking:
321
+ yield LLMEvent(type="thinking", delta=char)
322
+ else:
323
+ yield LLMEvent(type="thinking", delta=response.thinking)
324
+
325
+ # Stream text
326
+ if response.text:
327
+ if response.stream:
328
+ for char in response.text:
329
+ yield LLMEvent(type="content", delta=char)
330
+ else:
331
+ yield LLMEvent(type="content", delta=response.text)
332
+
333
+ # Tool calls
334
+ for i, tc in enumerate(response.tool_calls):
335
+ tool_call = ToolCall(
336
+ id=tc.get("id", f"call_{self._call_count}_{i}"),
337
+ name=tc["name"],
338
+ arguments=json.dumps(tc.get("arguments", {})),
339
+ )
340
+ yield LLMEvent(type="tool_call", tool_call=tool_call)
341
+
342
+ # Usage
343
+ yield LLMEvent(
344
+ type="usage",
345
+ usage=Usage(
346
+ input_tokens=self._estimate_tokens(messages),
347
+ output_tokens=len(response.text) // 4 + len(response.thinking) // 4 + 10,
348
+ ),
349
+ )
350
+
351
+ # Complete
352
+ finish_reason = response.finish_reason
353
+ if response.tool_calls:
354
+ finish_reason = "tool_use"
355
+ yield LLMEvent(type="completed", finish_reason=finish_reason)
356
+
357
+ def _get_response(self, messages: list[LLMMessage], tools: list[ToolDefinition] | None) -> str | MockResponse:
358
+ """Get next response."""
359
+ # Use queued responses first
360
+ if self._response_index < len(self._responses):
361
+ response = self._responses[self._response_index]
362
+ self._response_index += 1
363
+ return response
364
+
365
+ # Smart mode: generate response based on input
366
+ if self._smart_mode:
367
+ return self._generate_smart_response(messages, tools)
368
+
369
+ # Default response
370
+ return "Hello! How can I help you?"
371
+
372
+ def _generate_smart_response(self, messages: list[LLMMessage], tools: list[ToolDefinition] | None) -> MockResponse:
373
+ """Generate response based on input."""
374
+ # Check for tool results
375
+ for msg in messages:
376
+ if isinstance(msg.content, list):
377
+ for part in msg.content:
378
+ if isinstance(part, dict) and part.get("type") == "tool_result":
379
+ return MockResponse(text="I've processed the tool result. Is there anything else?")
380
+
381
+ # Get last user message
382
+ last_user_msg = ""
383
+ for msg in reversed(messages):
384
+ if msg.role == "user":
385
+ if isinstance(msg.content, str):
386
+ last_user_msg = msg.content
387
+ break
388
+
389
+ # Simple keyword responses
390
+ lower_msg = last_user_msg.lower()
391
+ if "hello" in lower_msg or "你好" in lower_msg:
392
+ return MockResponse(text="你好!我是 AI 助手,有什么可以帮助你的?")
393
+ if "谢谢" in lower_msg or "thank" in lower_msg:
394
+ return MockResponse(text="不客气!还有其他问题吗?")
395
+ if "再见" in lower_msg or "bye" in lower_msg:
396
+ return MockResponse(text="再见!祝你有愉快的一天!")
397
+
398
+ return MockResponse(text=f"[Mock] 收到: {last_user_msg[:50]}...")
399
+
400
+ def _estimate_tokens(self, messages: list[LLMMessage]) -> int:
401
+ """Estimate token count."""
402
+ total = 0
403
+ for msg in messages:
404
+ if isinstance(msg.content, str):
405
+ total += len(msg.content) // 4
406
+ elif isinstance(msg.content, list):
407
+ total += sum(len(str(p)) // 4 for p in msg.content)
408
+ return max(total, 10)
409
+
410
+
411
+ class ToolCallMockProvider(MockLLMProvider):
412
+ """Mock provider that returns tool calls in sequence.
413
+
414
+ Example:
415
+ llm = ToolCallMockProvider(
416
+ tool_calls=[
417
+ {"name": "search", "arguments": {"query": "test"}},
418
+ {"name": "read", "arguments": {"file": "test.txt"}},
419
+ ],
420
+ final_response="Based on the results, here's my answer..."
421
+ )
422
+ """
423
+
424
+ def __init__(
425
+ self,
426
+ tool_calls: list[dict[str, Any]],
427
+ final_response: str = "Done!",
428
+ **kwargs: Any,
429
+ ):
430
+ super().__init__(**kwargs)
431
+ self._tool_call_queue = tool_calls
432
+ self._final_response = final_response
433
+ self._tool_results_received = 0
434
+
435
+ async def complete(
436
+ self,
437
+ messages: list[LLMMessage],
438
+ tools: list[ToolDefinition] | None = None,
439
+ **kwargs: Any,
440
+ ) -> AsyncIterator[LLMEvent]:
441
+ """Return tool calls or final response."""
442
+ import json
443
+
444
+ self._call_count += 1
445
+
446
+ # Check if we've received tool results
447
+ has_tool_result = any(
448
+ isinstance(m.content, list) and
449
+ any(p.get("type") == "tool_result" for p in m.content if isinstance(p, dict))
450
+ for m in messages
451
+ )
452
+
453
+ if has_tool_result:
454
+ self._tool_results_received += 1
455
+
456
+ # Return tool calls if we haven't exhausted them
457
+ if self._tool_results_received < len(self._tool_call_queue):
458
+ tc = self._tool_call_queue[self._tool_results_received]
459
+ yield LLMEvent(
460
+ type="tool_call",
461
+ tool_call=ToolCall(
462
+ id=f"call_{self._tool_results_received}",
463
+ name=tc["name"],
464
+ arguments=json.dumps(tc.get("arguments", {})),
465
+ ),
466
+ )
467
+ else:
468
+ # Return final response
469
+ for char in self._final_response:
470
+ yield LLMEvent(type="content", delta=char)
471
+
472
+ yield LLMEvent(
473
+ type="usage",
474
+ usage=Usage(input_tokens=100, output_tokens=50),
475
+ )
476
+ yield LLMEvent(type="completed", finish_reason="end_turn")
@@ -0,0 +1,153 @@
1
+ """MCP (Model Context Protocol) module.
2
+
3
+ TODO: Implement MCP protocol support.
4
+
5
+ This module will provide:
6
+ - MCPToolset: Connect to external MCP servers and use their tools
7
+ - MCPServer: Expose Aury tools as MCP server
8
+ - MCPClient: Low-level MCP client for discovery
9
+
10
+ Reference: Model Context Protocol
11
+ https://modelcontextprotocol.io/
12
+ """
13
+ from __future__ import annotations
14
+
15
+ from dataclasses import dataclass, field
16
+ from typing import Any, TYPE_CHECKING
17
+
18
+ if TYPE_CHECKING:
19
+ from ..tool import BaseTool
20
+
21
+
22
+ # =============================================================================
23
+ # Connection Parameters
24
+ # =============================================================================
25
+
26
+ @dataclass
27
+ class StdioServerParams:
28
+ """Parameters for connecting to MCP server via stdio.
29
+
30
+ TODO: Implement stdio transport.
31
+ """
32
+ command: str
33
+ args: list[str] = field(default_factory=list)
34
+ env: dict[str, str] = field(default_factory=dict)
35
+
36
+
37
+ @dataclass
38
+ class HttpServerParams:
39
+ """Parameters for connecting to MCP server via HTTP.
40
+
41
+ TODO: Implement HTTP transport.
42
+ """
43
+ url: str
44
+ headers: dict[str, str] = field(default_factory=dict)
45
+
46
+
47
+ # =============================================================================
48
+ # TODO: MCP Toolset
49
+ # =============================================================================
50
+
51
+ class MCPToolset:
52
+ """Connect to external MCP servers and use their tools.
53
+
54
+ TODO: Implement MCP toolset.
55
+
56
+ Usage:
57
+ mcp_tools = MCPToolset(
58
+ connection_params=StdioServerParams(
59
+ command="npx",
60
+ args=["-y", "@modelcontextprotocol/server-filesystem", "/path"],
61
+ ),
62
+ tool_filter=["read_file", "list_directory"],
63
+ )
64
+ agent = ReactAgent.create(llm=llm, tools=[mcp_tools])
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ connection_params: StdioServerParams | HttpServerParams,
70
+ tool_filter: list[str] | None = None,
71
+ ):
72
+ self.connection_params = connection_params
73
+ self.tool_filter = tool_filter
74
+ raise NotImplementedError("TODO: MCP toolset not yet implemented")
75
+
76
+ async def connect(self) -> None:
77
+ """Connect to MCP server."""
78
+ raise NotImplementedError("TODO: MCP connect not yet implemented")
79
+
80
+ async def get_tools(self) -> list[Any]:
81
+ """Get tools from MCP server."""
82
+ raise NotImplementedError("TODO: MCP get_tools not yet implemented")
83
+
84
+
85
+ # =============================================================================
86
+ # TODO: MCP Server
87
+ # =============================================================================
88
+
89
+ class MCPServer:
90
+ """Expose Aury tools as MCP server.
91
+
92
+ TODO: Implement MCP server.
93
+
94
+ Usage:
95
+ server = MCPServer(
96
+ name="my_service",
97
+ tools=[my_tool1, my_tool2],
98
+ transport="http",
99
+ port=8000,
100
+ )
101
+ await server.start()
102
+ """
103
+
104
+ def __init__(
105
+ self,
106
+ name: str,
107
+ tools: list["BaseTool"],
108
+ transport: str = "stdio",
109
+ host: str = "0.0.0.0",
110
+ port: int = 8000,
111
+ ):
112
+ self.name = name
113
+ self.tools = tools
114
+ self.transport = transport
115
+ self.host = host
116
+ self.port = port
117
+ raise NotImplementedError("TODO: MCP server not yet implemented")
118
+
119
+ async def start(self) -> None:
120
+ """Start MCP server."""
121
+ raise NotImplementedError("TODO: MCP server not yet implemented")
122
+
123
+ async def stop(self) -> None:
124
+ """Stop MCP server."""
125
+ raise NotImplementedError("TODO: MCP server not yet implemented")
126
+
127
+
128
+ # =============================================================================
129
+ # TODO: MCP Client
130
+ # =============================================================================
131
+
132
+ class MCPClient:
133
+ """Low-level MCP client for discovery.
134
+
135
+ TODO: Implement MCP client.
136
+ """
137
+
138
+ def __init__(self, url: str):
139
+ self.url = url
140
+ raise NotImplementedError("TODO: MCP client not yet implemented")
141
+
142
+ async def discover(self) -> Any:
143
+ """Discover MCP server capabilities."""
144
+ raise NotImplementedError("TODO: MCP discover not yet implemented")
145
+
146
+
147
+ __all__ = [
148
+ "StdioServerParams",
149
+ "HttpServerParams",
150
+ "MCPToolset",
151
+ "MCPServer",
152
+ "MCPClient",
153
+ ]
@@ -0,0 +1,46 @@
1
+ """Memory system for long-term knowledge storage."""
2
+ from .types import (
3
+ MemorySummary,
4
+ MemoryRecall,
5
+ MemoryContext,
6
+ )
7
+ from .store import (
8
+ MemoryEntry,
9
+ ScoredEntry,
10
+ MemoryStore,
11
+ InMemoryStore,
12
+ )
13
+ from .manager import (
14
+ WriteTrigger,
15
+ RetrievalSource,
16
+ MemoryManager,
17
+ )
18
+ from .processor import (
19
+ WriteDecision,
20
+ WriteResult,
21
+ WriteFilter,
22
+ MemoryProcessor,
23
+ DeduplicationFilter,
24
+ )
25
+
26
+ __all__ = [
27
+ # Types
28
+ "MemorySummary",
29
+ "MemoryRecall",
30
+ "MemoryContext",
31
+ # Store
32
+ "MemoryEntry",
33
+ "ScoredEntry",
34
+ "MemoryStore",
35
+ "InMemoryStore",
36
+ # Manager
37
+ "WriteTrigger",
38
+ "RetrievalSource",
39
+ "MemoryManager",
40
+ # Processor
41
+ "WriteDecision",
42
+ "WriteResult",
43
+ "WriteFilter",
44
+ "MemoryProcessor",
45
+ "DeduplicationFilter",
46
+ ]