aury-agent 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. aury/__init__.py +2 -0
  2. aury/agents/__init__.py +55 -0
  3. aury/agents/a2a/__init__.py +168 -0
  4. aury/agents/backends/__init__.py +196 -0
  5. aury/agents/backends/artifact/__init__.py +9 -0
  6. aury/agents/backends/artifact/memory.py +130 -0
  7. aury/agents/backends/artifact/types.py +133 -0
  8. aury/agents/backends/code/__init__.py +65 -0
  9. aury/agents/backends/file/__init__.py +11 -0
  10. aury/agents/backends/file/local.py +66 -0
  11. aury/agents/backends/file/types.py +40 -0
  12. aury/agents/backends/invocation/__init__.py +8 -0
  13. aury/agents/backends/invocation/memory.py +81 -0
  14. aury/agents/backends/invocation/types.py +110 -0
  15. aury/agents/backends/memory/__init__.py +8 -0
  16. aury/agents/backends/memory/memory.py +179 -0
  17. aury/agents/backends/memory/types.py +136 -0
  18. aury/agents/backends/message/__init__.py +9 -0
  19. aury/agents/backends/message/memory.py +122 -0
  20. aury/agents/backends/message/types.py +124 -0
  21. aury/agents/backends/sandbox.py +275 -0
  22. aury/agents/backends/session/__init__.py +8 -0
  23. aury/agents/backends/session/memory.py +93 -0
  24. aury/agents/backends/session/types.py +124 -0
  25. aury/agents/backends/shell/__init__.py +11 -0
  26. aury/agents/backends/shell/local.py +110 -0
  27. aury/agents/backends/shell/types.py +55 -0
  28. aury/agents/backends/shell.py +209 -0
  29. aury/agents/backends/snapshot/__init__.py +19 -0
  30. aury/agents/backends/snapshot/git.py +95 -0
  31. aury/agents/backends/snapshot/hybrid.py +125 -0
  32. aury/agents/backends/snapshot/memory.py +86 -0
  33. aury/agents/backends/snapshot/types.py +59 -0
  34. aury/agents/backends/state/__init__.py +29 -0
  35. aury/agents/backends/state/composite.py +49 -0
  36. aury/agents/backends/state/file.py +57 -0
  37. aury/agents/backends/state/memory.py +52 -0
  38. aury/agents/backends/state/sqlite.py +262 -0
  39. aury/agents/backends/state/types.py +178 -0
  40. aury/agents/backends/subagent/__init__.py +165 -0
  41. aury/agents/cli/__init__.py +41 -0
  42. aury/agents/cli/chat.py +239 -0
  43. aury/agents/cli/config.py +236 -0
  44. aury/agents/cli/extensions.py +460 -0
  45. aury/agents/cli/main.py +189 -0
  46. aury/agents/cli/session.py +337 -0
  47. aury/agents/cli/workflow.py +276 -0
  48. aury/agents/context_providers/__init__.py +66 -0
  49. aury/agents/context_providers/artifact.py +299 -0
  50. aury/agents/context_providers/base.py +177 -0
  51. aury/agents/context_providers/memory.py +70 -0
  52. aury/agents/context_providers/message.py +130 -0
  53. aury/agents/context_providers/skill.py +50 -0
  54. aury/agents/context_providers/subagent.py +46 -0
  55. aury/agents/context_providers/tool.py +68 -0
  56. aury/agents/core/__init__.py +83 -0
  57. aury/agents/core/base.py +573 -0
  58. aury/agents/core/context.py +797 -0
  59. aury/agents/core/context_builder.py +303 -0
  60. aury/agents/core/event_bus/__init__.py +15 -0
  61. aury/agents/core/event_bus/bus.py +203 -0
  62. aury/agents/core/factory.py +169 -0
  63. aury/agents/core/isolator.py +97 -0
  64. aury/agents/core/logging.py +95 -0
  65. aury/agents/core/parallel.py +194 -0
  66. aury/agents/core/runner.py +139 -0
  67. aury/agents/core/services/__init__.py +5 -0
  68. aury/agents/core/services/file_session.py +144 -0
  69. aury/agents/core/services/message.py +53 -0
  70. aury/agents/core/services/session.py +53 -0
  71. aury/agents/core/signals.py +109 -0
  72. aury/agents/core/state.py +363 -0
  73. aury/agents/core/types/__init__.py +107 -0
  74. aury/agents/core/types/action.py +176 -0
  75. aury/agents/core/types/artifact.py +135 -0
  76. aury/agents/core/types/block.py +736 -0
  77. aury/agents/core/types/message.py +350 -0
  78. aury/agents/core/types/recall.py +144 -0
  79. aury/agents/core/types/session.py +257 -0
  80. aury/agents/core/types/subagent.py +154 -0
  81. aury/agents/core/types/tool.py +205 -0
  82. aury/agents/eval/__init__.py +331 -0
  83. aury/agents/hitl/__init__.py +57 -0
  84. aury/agents/hitl/ask_user.py +242 -0
  85. aury/agents/hitl/compaction.py +230 -0
  86. aury/agents/hitl/exceptions.py +87 -0
  87. aury/agents/hitl/permission.py +617 -0
  88. aury/agents/hitl/revert.py +216 -0
  89. aury/agents/llm/__init__.py +31 -0
  90. aury/agents/llm/adapter.py +367 -0
  91. aury/agents/llm/openai.py +294 -0
  92. aury/agents/llm/provider.py +476 -0
  93. aury/agents/mcp/__init__.py +153 -0
  94. aury/agents/memory/__init__.py +46 -0
  95. aury/agents/memory/compaction.py +394 -0
  96. aury/agents/memory/manager.py +465 -0
  97. aury/agents/memory/processor.py +177 -0
  98. aury/agents/memory/store.py +187 -0
  99. aury/agents/memory/types.py +137 -0
  100. aury/agents/messages/__init__.py +40 -0
  101. aury/agents/messages/config.py +47 -0
  102. aury/agents/messages/raw_store.py +224 -0
  103. aury/agents/messages/store.py +118 -0
  104. aury/agents/messages/types.py +88 -0
  105. aury/agents/middleware/__init__.py +31 -0
  106. aury/agents/middleware/base.py +341 -0
  107. aury/agents/middleware/chain.py +342 -0
  108. aury/agents/middleware/message.py +129 -0
  109. aury/agents/middleware/message_container.py +126 -0
  110. aury/agents/middleware/raw_message.py +153 -0
  111. aury/agents/middleware/truncation.py +139 -0
  112. aury/agents/middleware/types.py +81 -0
  113. aury/agents/plugin.py +162 -0
  114. aury/agents/react/__init__.py +4 -0
  115. aury/agents/react/agent.py +1923 -0
  116. aury/agents/sandbox/__init__.py +23 -0
  117. aury/agents/sandbox/local.py +239 -0
  118. aury/agents/sandbox/remote.py +200 -0
  119. aury/agents/sandbox/types.py +115 -0
  120. aury/agents/skill/__init__.py +16 -0
  121. aury/agents/skill/loader.py +180 -0
  122. aury/agents/skill/types.py +83 -0
  123. aury/agents/tool/__init__.py +39 -0
  124. aury/agents/tool/builtin/__init__.py +23 -0
  125. aury/agents/tool/builtin/ask_user.py +155 -0
  126. aury/agents/tool/builtin/bash.py +107 -0
  127. aury/agents/tool/builtin/delegate.py +726 -0
  128. aury/agents/tool/builtin/edit.py +121 -0
  129. aury/agents/tool/builtin/plan.py +277 -0
  130. aury/agents/tool/builtin/read.py +91 -0
  131. aury/agents/tool/builtin/thinking.py +111 -0
  132. aury/agents/tool/builtin/yield_result.py +130 -0
  133. aury/agents/tool/decorator.py +252 -0
  134. aury/agents/tool/set.py +204 -0
  135. aury/agents/usage/__init__.py +12 -0
  136. aury/agents/usage/tracker.py +236 -0
  137. aury/agents/workflow/__init__.py +85 -0
  138. aury/agents/workflow/adapter.py +268 -0
  139. aury/agents/workflow/dag.py +116 -0
  140. aury/agents/workflow/dsl.py +575 -0
  141. aury/agents/workflow/executor.py +659 -0
  142. aury/agents/workflow/expression.py +136 -0
  143. aury/agents/workflow/parser.py +182 -0
  144. aury/agents/workflow/state.py +145 -0
  145. aury/agents/workflow/types.py +86 -0
  146. aury_agent-0.0.4.dist-info/METADATA +90 -0
  147. aury_agent-0.0.4.dist-info/RECORD +149 -0
  148. aury_agent-0.0.4.dist-info/WHEEL +4 -0
  149. aury_agent-0.0.4.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,294 @@
1
+ """OpenAI LLM Provider - Direct OpenAI SDK integration without aury-ai-model dependency."""
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ from typing import Any, AsyncIterator
6
+
7
+ from openai import AsyncOpenAI
8
+
9
+ from .provider import (
10
+ LLMEvent,
11
+ LLMMessage,
12
+ LLMProvider,
13
+ ToolCall,
14
+ ToolDefinition,
15
+ Usage,
16
+ )
17
+
18
+
19
+ class OpenAIProvider:
20
+ """OpenAI LLM Provider using official SDK.
21
+
22
+ Supports:
23
+ - OpenAI models (gpt-4, gpt-3.5-turbo, etc.)
24
+ - Compatible services (OpenRouter, OneAPI, etc.)
25
+ - Streaming tool calls with new events
26
+
27
+ Example:
28
+ provider = OpenAIProvider(
29
+ api_key="sk-...",
30
+ model="gpt-4-turbo",
31
+ )
32
+
33
+ # For OpenRouter:
34
+ provider = OpenAIProvider(
35
+ api_key="sk-or-...",
36
+ base_url="https://openrouter.ai/api/v1",
37
+ model="anthropic/claude-sonnet-4",
38
+ )
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ api_key: str,
44
+ model: str = "gpt-4-turbo",
45
+ base_url: str | None = None,
46
+ organization: str | None = None,
47
+ **kwargs: Any,
48
+ ):
49
+ """Initialize OpenAI provider.
50
+
51
+ Args:
52
+ api_key: OpenAI API key
53
+ model: Model name
54
+ base_url: Optional base URL (for OpenRouter, etc.)
55
+ organization: Optional organization ID
56
+ **kwargs: Additional client kwargs
57
+ """
58
+ self._client = AsyncOpenAI(
59
+ api_key=api_key,
60
+ base_url=base_url,
61
+ organization=organization,
62
+ **kwargs,
63
+ )
64
+ self._model = model
65
+ self._base_url = base_url
66
+
67
+ @property
68
+ def provider(self) -> str:
69
+ """Provider name."""
70
+ return "openai"
71
+
72
+ @property
73
+ def model(self) -> str:
74
+ """Model name."""
75
+ return self._model
76
+
77
+ def _convert_messages(
78
+ self,
79
+ messages: list[LLMMessage],
80
+ ) -> list[dict[str, Any]]:
81
+ """Convert LLMMessage to OpenAI format."""
82
+ result = []
83
+ for msg in messages:
84
+ item: dict[str, Any] = {"role": msg.role}
85
+
86
+ # Tool message
87
+ if msg.role == "tool":
88
+ item["content"] = str(msg.content)
89
+ if msg.tool_call_id:
90
+ item["tool_call_id"] = msg.tool_call_id
91
+ result.append(item)
92
+ continue
93
+
94
+ # Other messages
95
+ item["content"] = msg.content
96
+ result.append(item)
97
+
98
+ return result
99
+
100
+ def _convert_tools(
101
+ self,
102
+ tools: list[ToolDefinition],
103
+ ) -> list[dict[str, Any]]:
104
+ """Convert ToolDefinition to OpenAI format."""
105
+ return [tool.to_openai() for tool in tools]
106
+
107
+ async def complete(
108
+ self,
109
+ messages: list[LLMMessage],
110
+ tools: list[ToolDefinition] | None = None,
111
+ **kwargs: Any,
112
+ ) -> AsyncIterator[LLMEvent]:
113
+ """Stream completion from OpenAI.
114
+
115
+ Args:
116
+ messages: Message history
117
+ tools: Available tools
118
+ **kwargs: Additional parameters (temperature, max_tokens, etc.)
119
+
120
+ Yields:
121
+ LLMEvent objects
122
+ """
123
+ # Build request payload
124
+ payload: dict[str, Any] = {
125
+ "model": self._model,
126
+ "messages": self._convert_messages(messages),
127
+ "stream": True,
128
+ "stream_options": {"include_usage": True},
129
+ }
130
+
131
+ # Add tools if provided
132
+ if tools:
133
+ payload["tools"] = self._convert_tools(tools)
134
+
135
+ # Add optional parameters
136
+ if "temperature" in kwargs:
137
+ payload["temperature"] = kwargs["temperature"]
138
+ if "max_tokens" in kwargs:
139
+ payload["max_tokens"] = kwargs["max_tokens"]
140
+ if "top_p" in kwargs:
141
+ payload["top_p"] = kwargs["top_p"]
142
+ if "stop" in kwargs:
143
+ payload["stop"] = kwargs["stop"]
144
+ if "seed" in kwargs:
145
+ payload["seed"] = kwargs["seed"]
146
+ if "response_format" in kwargs:
147
+ payload["response_format"] = kwargs["response_format"]
148
+
149
+ # Extended thinking support (OpenAI o-series)
150
+ if kwargs.get("reasoning_effort"):
151
+ payload["reasoning_effort"] = kwargs["reasoning_effort"]
152
+
153
+ try:
154
+ # Create streaming completion
155
+ try:
156
+ stream = await self._client.chat.completions.create(**payload)
157
+ except Exception:
158
+ # Fallback: remove stream_options if not supported
159
+ payload.pop("stream_options", None)
160
+ stream = await self._client.chat.completions.create(**payload)
161
+
162
+ # Track partial tool calls
163
+ partial_tools: dict[str, dict] = {}
164
+ notified_tools: set[str] = set()
165
+ last_progress: dict[str, int] = {}
166
+ last_tid: str | None = None
167
+ usage_emitted = False
168
+
169
+ # Process stream
170
+ async for chunk in stream:
171
+ # Usage (final chunk)
172
+ u = getattr(chunk, "usage", None)
173
+ if u is not None and not usage_emitted:
174
+ rt = 0
175
+ try:
176
+ details = getattr(u, "completion_tokens_details", None)
177
+ if details:
178
+ rt = getattr(details, "reasoning_tokens", 0) or 0
179
+ except Exception:
180
+ pass
181
+
182
+ yield LLMEvent(
183
+ type="usage",
184
+ usage=Usage(
185
+ input_tokens=getattr(u, "prompt_tokens", 0) or 0,
186
+ output_tokens=getattr(u, "completion_tokens", 0) or 0,
187
+ reasoning_tokens=rt,
188
+ cache_read_tokens=0,
189
+ cache_write_tokens=0,
190
+ ),
191
+ )
192
+ usage_emitted = True
193
+
194
+ # Check for choices
195
+ if not getattr(chunk, "choices", None):
196
+ continue
197
+
198
+ ch = getattr(chunk.choices[0], "delta", None)
199
+ if ch is None:
200
+ continue
201
+
202
+ # Extended thinking (DeepSeek R1, OpenAI o-series)
203
+ reasoning_delta = getattr(ch, "reasoning_content", None)
204
+ if reasoning_delta:
205
+ yield LLMEvent(type="thinking", delta=reasoning_delta)
206
+
207
+ # Content
208
+ if getattr(ch, "content", None):
209
+ yield LLMEvent(type="content", delta=ch.content)
210
+
211
+ # Tool calls (streaming)
212
+ if getattr(ch, "tool_calls", None):
213
+ for tc in ch.tool_calls:
214
+ tid = getattr(tc, "id", None) or last_tid or "_last"
215
+ if getattr(tc, "id", None):
216
+ last_tid = tid
217
+
218
+ # ⭐ 1. tool_call_start (first notification)
219
+ if tid not in notified_tools:
220
+ fn = getattr(tc, "function", None)
221
+ tool_name = getattr(fn, "name", None) if fn else None
222
+
223
+ if tool_name:
224
+ yield LLMEvent(
225
+ type="tool_call_start",
226
+ tool_call=ToolCall(
227
+ id=tid,
228
+ name=tool_name,
229
+ arguments="",
230
+ ),
231
+ )
232
+ notified_tools.add(tid)
233
+ last_progress[tid] = 0
234
+
235
+ # Accumulate tool call data
236
+ entry = partial_tools.setdefault(
237
+ tid,
238
+ {"id": tid, "name": "", "arguments": ""},
239
+ )
240
+
241
+ fn = getattr(tc, "function", None)
242
+ if fn is not None:
243
+ # Accumulate name
244
+ if getattr(fn, "name", None):
245
+ entry["name"] += fn.name
246
+
247
+ # Accumulate arguments
248
+ args_delta = getattr(fn, "arguments", None)
249
+ if args_delta is not None:
250
+ # ⭐ 2. tool_call_delta (argument increments)
251
+ if args_delta: # Only emit non-empty deltas
252
+ yield LLMEvent(
253
+ type="tool_call_delta",
254
+ tool_call_delta={
255
+ "call_id": tid,
256
+ "arguments_delta": args_delta,
257
+ },
258
+ )
259
+
260
+ entry["arguments"] += args_delta
261
+
262
+ # ⭐ 3. tool_call_progress (every 1KB)
263
+ current_size = len(entry["arguments"])
264
+ prev_size = last_progress.get(tid, 0)
265
+
266
+ if current_size - prev_size >= 1024:
267
+ yield LLMEvent(
268
+ type="tool_call_progress",
269
+ tool_call_progress={
270
+ "call_id": tid,
271
+ "bytes_received": current_size,
272
+ "last_delta_size": current_size - prev_size,
273
+ },
274
+ )
275
+ last_progress[tid] = current_size
276
+
277
+ # ⭐ 4. tool_call (complete tool calls)
278
+ for _, v in partial_tools.items():
279
+ # Normalize tool call
280
+ tool_call = ToolCall(
281
+ id=v["id"],
282
+ name=v["name"],
283
+ arguments=v["arguments"],
284
+ )
285
+ yield LLMEvent(type="tool_call", tool_call=tool_call)
286
+
287
+ # Completion
288
+ yield LLMEvent(type="completed", finish_reason="end_turn")
289
+
290
+ except Exception as e:
291
+ yield LLMEvent(type="error", error=str(e))
292
+
293
+
294
+ __all__ = ["OpenAIProvider"]