scope-oaf 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. scope_oaf-0.1.0/.github/workflows/publish.yml +53 -0
  2. scope_oaf-0.1.0/.gitignore +32 -0
  3. scope_oaf-0.1.0/DESIGN.md +594 -0
  4. scope_oaf-0.1.0/LICENSE +21 -0
  5. scope_oaf-0.1.0/PKG-INFO +121 -0
  6. scope_oaf-0.1.0/README.md +89 -0
  7. scope_oaf-0.1.0/chat.py +69 -0
  8. scope_oaf-0.1.0/oaf/__init__.py +26 -0
  9. scope_oaf-0.1.0/oaf/agent.py +253 -0
  10. scope_oaf-0.1.0/oaf/context/ConversationalInMemory.py +120 -0
  11. scope_oaf-0.1.0/oaf/context/__init__.py +4 -0
  12. scope_oaf-0.1.0/oaf/context/base.py +54 -0
  13. scope_oaf-0.1.0/oaf/hooks.py +164 -0
  14. scope_oaf-0.1.0/oaf/llmclient/README.md +356 -0
  15. scope_oaf-0.1.0/oaf/llmclient/__init__.py +78 -0
  16. scope_oaf-0.1.0/oaf/llmclient/client.py +338 -0
  17. scope_oaf-0.1.0/oaf/llmclient/errors.py +107 -0
  18. scope_oaf-0.1.0/oaf/llmclient/models.py +317 -0
  19. scope_oaf-0.1.0/oaf/llmclient/parser.py +92 -0
  20. scope_oaf-0.1.0/oaf/llmclient/providers/__init__.py +5 -0
  21. scope_oaf-0.1.0/oaf/llmclient/providers/anthropic_provider.py +304 -0
  22. scope_oaf-0.1.0/oaf/llmclient/providers/base.py +59 -0
  23. scope_oaf-0.1.0/oaf/llmclient/providers/openai_provider.py +301 -0
  24. scope_oaf-0.1.0/oaf/llmclient/sync_client.py +176 -0
  25. scope_oaf-0.1.0/oaf/llmclient/test_llmclient.py +265 -0
  26. scope_oaf-0.1.0/oaf/llmclient/test_llmclient_extras.py +274 -0
  27. scope_oaf-0.1.0/oaf/llmclient/types.py +148 -0
  28. scope_oaf-0.1.0/oaf/prompts/__init__.py +9 -0
  29. scope_oaf-0.1.0/oaf/prompts/base.py +61 -0
  30. scope_oaf-0.1.0/oaf/prompts/default.py +54 -0
  31. scope_oaf-0.1.0/oaf/prompts/raw.py +49 -0
  32. scope_oaf-0.1.0/oaf/tools/__init__.py +6 -0
  33. scope_oaf-0.1.0/oaf/tools/registry.py +113 -0
  34. scope_oaf-0.1.0/oaf/tools/tool.py +247 -0
  35. scope_oaf-0.1.0/pyproject.toml +53 -0
  36. scope_oaf-0.1.0/test.py +6 -0
  37. scope_oaf-0.1.0/test_tools.py +48 -0
  38. scope_oaf-0.1.0/web.py +157 -0
  39. scope_oaf-0.1.0/web_ui.html +657 -0
@@ -0,0 +1,53 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ permissions:
8
+ contents: read
9
+
10
+ jobs:
11
+ release-build:
12
+ runs-on: ubuntu-latest
13
+
14
+ steps:
15
+ - uses: actions/checkout@v4
16
+
17
+ - uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.11"
20
+
21
+ - name: Build release distributions
22
+ run: |
23
+ python -m pip install build
24
+ python -m build
25
+
26
+ - name: Upload distributions
27
+ uses: actions/upload-artifact@v4
28
+ with:
29
+ name: release-dists
30
+ path: dist/
31
+
32
+ pypi-publish:
33
+ runs-on: ubuntu-latest
34
+ needs:
35
+ - release-build
36
+ permissions:
37
+ id-token: write
38
+
39
+ environment:
40
+ name: pypi
41
+ url: https://pypi.org/p/scope-oaf
42
+
43
+ steps:
44
+ - name: Retrieve release distributions
45
+ uses: actions/download-artifact@v4
46
+ with:
47
+ name: release-dists
48
+ path: dist/
49
+
50
+ - name: Publish release distributions to PyPI
51
+ uses: pypa/gh-action-pypi-publish@release/v1
52
+ with:
53
+ packages-dir: dist/
@@ -0,0 +1,32 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.pyo
5
+ *.egg-info/
6
+ *.egg
7
+ dist/
8
+ build/
9
+ *.whl
10
+
11
+ # Virtual environments
12
+ .venv/
13
+ venv/
14
+ env/
15
+
16
+ # IDE
17
+ .vscode/
18
+ .idea/
19
+ *.swp
20
+ *.swo
21
+
22
+ # OS
23
+ .DS_Store
24
+ Thumbs.db
25
+
26
+ # Environment
27
+ .env
28
+
29
+ # Testing
30
+ .pytest_cache/
31
+ htmlcov/
32
+ .coverage
@@ -0,0 +1,594 @@
1
+ # OpenAgentFramework (OAF) — Design Document
2
+
3
+ ## Vision
4
+
5
+ A minimal, fast, transparent agent framework built on top of `llmclient`. Unlike LangChain's deep abstraction layers, OAF is **flat, debuggable, and composable** — you can always see exactly what's happening.
6
+
7
+ ```
8
+ ┌──────────────────────────────────────────────────┐
9
+ │ Your Application │
10
+ ├──────────────────────────────────────────────────┤
11
+ │ OpenAgentFramework (OAF) │
12
+ │ ┌──────────┐ ┌──────────┐ ┌───────────────┐ │
13
+ │ │ Agents │ │ Tools │ │ Context │ │
14
+ │ │ │ │ │ │ Engineering │ │
15
+ │ └──────────┘ └──────────┘ └───────────────┘ │
16
+ ├──────────────────────────────────────────────────┤
17
+ │ llmclient │
18
+ │ (OpenAI + Anthropic SDKs) │
19
+ └──────────────────────────────────────────────────┘
20
+ ```
21
+
22
+ ---
23
+
24
+ ## Core Principles
25
+
26
+ 1. **No magic** — Every prompt, tool call, and decision is inspectable via `request_payload` and event hooks
27
+ 2. **Parallel by default** — Multiple tool calls execute concurrently with `asyncio.gather`
28
+ 3. **Context is king** — Smart context window management, not naive concatenation
29
+ 4. **Tools are just functions** — Decorate any async/sync function → it's a tool
30
+ 5. **Composable agents** — Agents can call other agents as tools (agent-as-a-tool)
31
+ 6. **Provider agnostic** — Works with any model via `llmclient`
32
+
33
+ ---
34
+
35
+ ## Architecture
36
+
37
+ ### Directory Structure
38
+
39
+ ```
40
+ oaf/
41
+ ├── __init__.py
42
+ ├── agent.py # Agent class — the core loop
43
+ ├── tool.py # @tool decorator, ToolDef, ToolResult
44
+ ├── context.py # ContextWindow — smart prompt assembly
45
+ ├── memory.py # Conversation + long-term memory
46
+ ├── hooks.py # Event system (before_call, after_call, on_tool, on_error)
47
+ ├── router.py # Multi-agent routing / handoff
48
+ ├── types.py # AgentResponse, Turn, ToolCall, etc.
49
+ ├── errors.py # Agent-level errors
50
+ └── tools/ # Built-in tool library
51
+ ├── __init__.py
52
+ ├── web.py # web_search, fetch_url, scrape
53
+ ├── filesystem.py # read_file, write_file, list_dir
54
+ ├── code.py # python_exec, shell_exec
55
+ ├── math.py # calculator, unit_convert
56
+ └── time.py # current_time, sleep, timer
57
+ ```
58
+
59
+ ---
60
+
61
+ ## 1. Tool System
62
+
63
+ ### 1.1 Tool Definition — `@tool` Decorator
64
+
65
+ Tools are just functions. The decorator extracts the JSON schema from type hints and docstring automatically.
66
+
67
+ ```python
68
+ from oaf import tool
69
+
70
+ @tool
71
+ async def get_weather(city: str, units: str = "celsius") -> str:
72
+ """Get the current weather for a city.
73
+
74
+ Args:
75
+ city: The city name (e.g. "Tokyo", "New York")
76
+ units: Temperature units — "celsius" or "fahrenheit"
77
+ """
78
+ # your implementation
79
+ return f"72°F in {city}"
80
+
81
+ # What the decorator produces internally:
82
+ # ToolDef(
83
+ # name="get_weather",
84
+ # description="Get the current weather for a city.",
85
+ # parameters={
86
+ # "type": "object",
87
+ # "properties": {
88
+ # "city": {"type": "string", "description": "The city name (e.g. \"Tokyo\", \"New York\")"},
89
+ # "units": {"type": "string", "description": "Temperature units", "default": "celsius"}
90
+ # },
91
+ # "required": ["city"]
92
+ # },
93
+ # fn=get_weather
94
+ # )
95
+ ```
96
+
97
+ ### 1.2 Tool Types
98
+
99
+ ```python
100
+ @dataclass
101
+ class ToolDef:
102
+ name: str
103
+ description: str
104
+ parameters: dict[str, Any] # JSON Schema
105
+ fn: Callable # the actual function
106
+ requires_confirmation: bool = False # ask user before executing
107
+ timeout: float = 30.0 # per-call timeout
108
+
109
+ @dataclass
110
+ class ToolCall:
111
+ id: str
112
+ name: str
113
+ arguments: dict[str, Any]
114
+
115
+ @dataclass
116
+ class ToolResult:
117
+ tool_call_id: str
118
+ name: str
119
+ content: str # stringified result
120
+ is_error: bool = False
121
+ duration_ms: float = 0
122
+ ```
123
+
124
+ ### 1.3 Parallel Tool Execution
125
+
126
+ When the model returns multiple tool calls in a single response, they execute in parallel:
127
+
128
+ ```python
129
+ # Inside the agent loop:
130
+ async def _execute_tools(self, tool_calls: list[ToolCall]) -> list[ToolResult]:
131
+ """Execute all tool calls concurrently."""
132
+ tasks = [self._execute_one(tc) for tc in tool_calls]
133
+ return await asyncio.gather(*tasks)
134
+
135
+ async def _execute_one(self, tc: ToolCall) -> ToolResult:
136
+ tool = self._tools[tc.name]
137
+ try:
138
+ result = await asyncio.wait_for(
139
+ tool.fn(**tc.arguments),
140
+ timeout=tool.timeout,
141
+ )
142
+ return ToolResult(
143
+ tool_call_id=tc.id,
144
+ name=tc.name,
145
+ content=str(result),
146
+ )
147
+ except Exception as e:
148
+ return ToolResult(
149
+ tool_call_id=tc.id,
150
+ name=tc.name,
151
+ content=f"Error: {e}",
152
+ is_error=True,
153
+ )
154
+ ```
155
+
156
+ ### 1.4 Sync Function Support
157
+
158
+ Sync tools run in the thread pool automatically:
159
+
160
+ ```python
161
+ @tool
162
+ def read_file(path: str) -> str:
163
+ """Read a file from disk."""
164
+ return open(path).read()
165
+
166
+ # Internally wraps with asyncio.to_thread:
167
+ # async def wrapper(**kwargs):
168
+ # return await asyncio.to_thread(original_fn, **kwargs)
169
+ ```
170
+
171
+ ---
172
+
173
+ ## 2. Agent — The Core Loop
174
+
175
+ ### 2.1 Agent Class
176
+
177
+ ```python
178
+ class Agent:
179
+ def __init__(
180
+ self,
181
+ name: str = "agent",
182
+ model: str = "gpt-4.1-nano",
183
+ system: str = "",
184
+ tools: list[ToolDef] = [],
185
+ context: ContextWindow | None = None,
186
+ hooks: Hooks | None = None,
187
+ max_turns: int = 20, # safety limit
188
+ max_parallel_tools: int = 10, # concurrency cap
189
+ client: LLMClient | None = None,
190
+ ): ...
191
+
192
+ async def run(self, message: str, **kwargs) -> AgentResponse:
193
+ """Run the agent loop to completion."""
194
+ ...
195
+
196
+ async def run_stream(self, message: str, **kwargs) -> AsyncIterator[AgentEvent]:
197
+ """Stream the agent loop — yields events as they happen."""
198
+ ...
199
+ ```
200
+
201
+ ### 2.2 Agent Loop (the core)
202
+
203
+ ```
204
+ User message
205
+
206
+
207
+ ┌─────────────────────────┐
208
+ │ Assemble context │◄──── ContextWindow manages what fits
209
+ │ (system + history + │
210
+ │ tool defs + user msg) │
211
+ └────────────┬────────────┘
212
+
213
+
214
+ ┌─────────────────────────┐
215
+ │ LLM call via │
216
+ │ llmclient.chat() │──── request_payload available for inspection
217
+ └────────────┬────────────┘
218
+
219
+ ┌───┴───┐
220
+ │ │
221
+ text only tool_calls
222
+ │ │
223
+ ▼ ▼
224
+ Return ┌──────────────────┐
225
+ │ Execute tools │
226
+ │ (parallel) │
227
+ └────────┬─────────┘
228
+
229
+
230
+ ┌──────────────────┐
231
+ │ Append results │
232
+ │ to conversation │
233
+ └────────┬─────────┘
234
+
235
+
236
+ Loop back to "Assemble context"
237
+ (until text response or max_turns)
238
+ ```
239
+
240
+ ### 2.3 Agent Response
241
+
242
+ ```python
243
+ @dataclass
244
+ class Turn:
245
+ """One LLM call + its tool executions."""
246
+ request_payload: dict[str, Any] # what was sent to the API
247
+ response: ChatResponse # raw llmclient response
248
+ tool_calls: list[ToolCall] # tools the model requested
249
+ tool_results: list[ToolResult] # results from execution
250
+ duration_ms: float
251
+
252
+ @dataclass
253
+ class AgentResponse:
254
+ text: str # final text answer
255
+ turns: list[Turn] # full history of all turns
256
+ total_tokens: int # sum of all turns
257
+ total_duration_ms: float
258
+ model: str
259
+ ```
260
+
261
+ ---
262
+
263
+ ## 3. Context Engineering
264
+
265
+ ### 3.1 ContextWindow
266
+
267
+ The most important part. Naive frameworks just concatenate everything and hit token limits. OAF actively manages what goes into the prompt.
268
+
269
+ ```python
270
+ class ContextWindow:
271
+ def __init__(
272
+ self,
273
+ max_tokens: int = 128_000, # model context limit
274
+ reserve_output: int = 4_096, # tokens reserved for response
275
+ strategy: str = "sliding", # "sliding" | "summary" | "priority"
276
+ ): ...
277
+
278
+ def assemble(
279
+ self,
280
+ system: str,
281
+ tools: list[ToolDef],
282
+ history: list[Message],
283
+ user_message: str,
284
+ ) -> list[Message]:
285
+ """Build the final message list that fits within the token budget."""
286
+ ...
287
+ ```
288
+
289
+ ### 3.2 Strategies
290
+
291
+ | Strategy | How it works | Best for |
292
+ |----------|-------------|----------|
293
+ | `sliding` | Keeps system + tools + last N turns that fit | Most use cases |
294
+ | `summary` | When history overflows, summarize older turns into a condensed message | Long conversations |
295
+ | `priority` | Each message has a priority score; lowest priority gets dropped first | Complex multi-step tasks |
296
+
297
+ ### 3.3 Token Counting
298
+
299
+ ```python
300
+ class TokenCounter:
301
+ """Fast approximate token counting (no tiktoken dependency required)."""
302
+
303
+ @staticmethod
304
+ def estimate(text: str) -> int:
305
+ """~4 chars per token heuristic, good enough for budget decisions."""
306
+ return len(text) // 4
307
+
308
+ @staticmethod
309
+ def exact(text: str, model: str) -> int:
310
+ """Exact count via tiktoken (optional dependency)."""
311
+ import tiktoken
312
+ enc = tiktoken.encoding_for_model(model)
313
+ return len(enc.encode(text))
314
+ ```
315
+
316
+ ### 3.4 Assembled Prompt Inspection
317
+
318
+ Every agent response includes the full request payload so you can see exactly what was sent:
319
+
320
+ ```python
321
+ result = await agent.run("What's the weather in Tokyo and NYC?")
322
+
323
+ # See exactly what the model received on each turn:
324
+ for i, turn in enumerate(result.turns):
325
+ print(f"Turn {i}: {len(turn.request_payload['messages'])} messages")
326
+ print(f" Tools called: {[tc.name for tc in turn.tool_calls]}")
327
+ print(f" Payload: {turn.request_payload}")
328
+ ```
329
+
330
+ ---
331
+
332
+ ## 4. Event Hooks
333
+
334
+ ### 4.1 Hook System
335
+
336
+ Hooks let you observe/modify behavior without subclassing:
337
+
338
+ ```python
339
+ @dataclass
340
+ class Hooks:
341
+ before_llm_call: Callable | None = None # (messages, tools) → messages
342
+ after_llm_call: Callable | None = None # (response) → None
343
+ before_tool_call: Callable | None = None # (tool_call) → tool_call | None (None = skip)
344
+ after_tool_call: Callable | None = None # (tool_call, result) → result
345
+ on_error: Callable | None = None # (error) → None
346
+ on_turn_complete: Callable | None = None # (turn) → None
347
+ ```
348
+
349
+ ### 4.2 Example: Logging Hook
350
+
351
+ ```python
352
+ async def log_everything(response):
353
+ print(f"[LLM] {response.model} → {response.usage.total_tokens} tokens")
354
+ print(f"[LLM] {response.text[:100]}...")
355
+
356
+ agent = Agent(
357
+ model="gpt-4.1-nano",
358
+ tools=[get_weather],
359
+ hooks=Hooks(after_llm_call=log_everything),
360
+ )
361
+ ```
362
+
363
+ ### 4.3 Example: Confirmation Hook
364
+
365
+ ```python
366
+ async def confirm_dangerous(tool_call):
367
+ if tool_call.name in ("shell_exec", "write_file"):
368
+ answer = input(f"Allow {tool_call.name}({tool_call.arguments})? [y/n] ")
369
+ if answer != "y":
370
+ return None # skip this tool call
371
+ return tool_call
372
+
373
+ agent = Agent(
374
+ tools=[shell_exec, write_file, read_file],
375
+ hooks=Hooks(before_tool_call=confirm_dangerous),
376
+ )
377
+ ```
378
+
379
+ ---
380
+
381
+ ## 5. Streaming Events
382
+
383
+ For real-time UIs, `run_stream` yields typed events:
384
+
385
+ ```python
386
+ class AgentEvent:
387
+ type: str # "text_delta" | "tool_start" | "tool_end" | "turn_end" | "error" | "done"
388
+
389
+ async for event in agent.run_stream("Analyze this data"):
390
+ match event.type:
391
+ case "text_delta":
392
+ print(event.text, end="", flush=True)
393
+ case "tool_start":
394
+ print(f"\n🔧 Calling {event.tool_call.name}...")
395
+ case "tool_end":
396
+ print(f" ✓ {event.result.content[:100]}")
397
+ case "done":
398
+ print(f"\n[{event.total_tokens} tokens, {event.turns} turns]")
399
+ ```
400
+
401
+ ---
402
+
403
+ ## 6. Multi-Agent / Router
404
+
405
+ ### 6.1 Agent-as-a-Tool
406
+
407
+ Any agent can be exposed as a tool for another agent:
408
+
409
+ ```python
410
+ researcher = Agent(
411
+ name="researcher",
412
+ model="gpt-4.1-mini",
413
+ system="You research topics thoroughly using web search.",
414
+ tools=[web_search, fetch_url],
415
+ )
416
+
417
+ writer = Agent(
418
+ name="writer",
419
+ model="gpt-4.1-nano",
420
+ system="You write polished content based on research.",
421
+ tools=[researcher.as_tool()], # researcher becomes a callable tool
422
+ )
423
+
424
+ result = await writer.run("Write a blog post about quantum computing")
425
+ ```
426
+
427
+ ### 6.2 Router Agent
428
+
429
+ A router dispatches to specialized agents:
430
+
431
+ ```python
432
+ router = Router(
433
+ agents=[researcher, writer, coder, analyst],
434
+ model="gpt-4.1-nano", # cheap model for routing decisions
435
+ )
436
+
437
+ # Router picks the right agent based on the query
438
+ result = await router.run("Debug this Python error: ...")
439
+ # → Routes to coder agent
440
+ ```
441
+
442
+ ---
443
+
444
+ ## 7. Built-in Tools
445
+
446
+ ### Standard Library
447
+
448
+ | Tool | Module | Description |
449
+ |------|--------|-------------|
450
+ | `web_search` | `tools.web` | Search the web (via SerpAPI/Tavily) |
451
+ | `fetch_url` | `tools.web` | Fetch and extract text from a URL |
452
+ | `read_file` | `tools.filesystem` | Read a file |
453
+ | `write_file` | `tools.filesystem` | Write/create a file |
454
+ | `list_dir` | `tools.filesystem` | List directory contents |
455
+ | `python_exec` | `tools.code` | Execute Python code in a sandbox |
456
+ | `shell_exec` | `tools.code` | Run a shell command |
457
+ | `calculator` | `tools.math` | Evaluate math expressions safely |
458
+ | `current_time` | `tools.time` | Get current date/time |
459
+
460
+ All built-in tools are opt-in — you import only what you need:
461
+
462
+ ```python
463
+ from oaf.tools.web import web_search, fetch_url
464
+ from oaf.tools.filesystem import read_file, write_file
465
+ ```
466
+
467
+ ---
468
+
469
+ ## 8. Memory
470
+
471
+ ### 8.1 Conversation Memory
472
+
473
+ Tracks the full history of the current session:
474
+
475
+ ```python
476
+ class ConversationMemory:
477
+ messages: list[Message]
478
+
479
+ def add(self, message: Message) -> None: ...
480
+ def get_recent(self, n: int) -> list[Message]: ...
481
+ def summarize(self, model: str) -> str: ... # compress old history
482
+ def clear(self) -> None: ...
483
+ def save(self, path: str) -> None: ...
484
+ def load(self, path: str) -> None: ...
485
+ ```
486
+
487
+ ### 8.2 Persistent Memory (Key-Value)
488
+
489
+ For facts that persist across conversations:
490
+
491
+ ```python
492
+ class PersistentMemory:
493
+ """SQLite-backed key-value store."""
494
+
495
+ def store(self, key: str, value: str, metadata: dict = {}) -> None: ...
496
+ def recall(self, query: str, top_k: int = 5) -> list[MemoryEntry]: ...
497
+ def forget(self, key: str) -> None: ...
498
+ ```
499
+
500
+ ---
501
+
502
+ ## 9. Usage Examples
503
+
504
+ ### 9.1 Simple Agent
505
+
506
+ ```python
507
+ from oaf import Agent, tool
508
+
509
+ @tool
510
+ async def get_weather(city: str) -> str:
511
+ """Get weather for a city."""
512
+ return f"Sunny, 72°F in {city}"
513
+
514
+ agent = Agent(
515
+ model="gpt-4.1-nano",
516
+ system="You are a helpful assistant.",
517
+ tools=[get_weather],
518
+ )
519
+
520
+ result = await agent.run("What's the weather in Tokyo?")
521
+ print(result.text)
522
+ # "The weather in Tokyo is sunny and 72°F!"
523
+ print(result.turns[0].tool_calls[0].name)
524
+ # "get_weather"
525
+ ```
526
+
527
+ ### 9.2 Parallel Tool Calls
528
+
529
+ ```python
530
+ result = await agent.run("What's the weather in Tokyo, NYC, and London?")
531
+ # Model returns 3 tool_calls → all 3 execute in parallel
532
+ print(len(result.turns[0].tool_calls)) # 3
533
+ print(result.turns[0].tool_results) # all 3 results, fetched concurrently
534
+ ```
535
+
536
+ ### 9.3 Full Inspection
537
+
538
+ ```python
539
+ result = await agent.run("Analyze the weather patterns")
540
+
541
+ for turn in result.turns:
542
+ print(f"=== Turn ===")
543
+ print(f"Prompt: {turn.request_payload}")
544
+ print(f"Response: {turn.response.text}")
545
+ for tc, tr in zip(turn.tool_calls, turn.tool_results):
546
+ print(f" Tool: {tc.name}({tc.arguments}) → {tr.content}")
547
+ print(f"Total: {result.total_tokens} tokens, {len(result.turns)} turns")
548
+ ```
549
+
550
+ ---
551
+
552
+ ## 10. Implementation Priority
553
+
554
+ ### Phase 1 — Core (build first)
555
+ 1. `tool.py` — `@tool` decorator, schema extraction from type hints
556
+ 2. `types.py` — `ToolDef`, `ToolCall`, `ToolResult`, `Turn`, `AgentResponse`, `AgentEvent`
557
+ 3. `agent.py` — Agent loop with parallel tool execution
558
+ 4. `context.py` — `ContextWindow` with sliding strategy
559
+
560
+ ### Phase 2 — Usability
561
+ 5. `hooks.py` — Event hooks
562
+ 6. `memory.py` — Conversation memory
563
+ 7. Streaming (`run_stream` + `AgentEvent`)
564
+
565
+ ### Phase 3 — Power Features
566
+ 8. `router.py` — Multi-agent routing, agent-as-a-tool
567
+ 9. `tools/` — Built-in tool library
568
+ 10. `memory.py` — Persistent memory with embeddings search
569
+
570
+ ---
571
+
572
+ ## 11. Dependencies
573
+
574
+ | Package | Purpose | Required? |
575
+ |---------|---------|-----------|
576
+ | `llmclient` | LLM calls (already built) | Yes |
577
+ | `docstring-parser` | Extract param docs from docstrings | Yes |
578
+ | `tiktoken` | Exact token counting | Optional |
579
+ | `aiosqlite` | Persistent memory storage | Optional |
580
+ | `httpx` | Built-in web tools | Optional |
581
+
582
+ ---
583
+
584
+ ## 12. Anti-Patterns to Avoid (lessons from LangChain)
585
+
586
+ | LangChain problem | OAF solution |
587
+ |-------------------|-------------|
588
+ | 15 abstraction layers deep | **Flat** — Agent → LLMClient, that's it |
589
+ | Can't see the prompt | **`request_payload`** on every response and turn |
590
+ | "Chain" concept is confusing | **Agent loop** — one clear concept |
591
+ | Massive dependency tree | **Minimal deps** — just llmclient + docstring-parser |
592
+ | Hard to debug tool errors | **`ToolResult.is_error`** + hooks + full turn history |
593
+ | Sequential tool execution | **Parallel by default** with `asyncio.gather` |
594
+ | Opaque memory management | **Explicit `ContextWindow`** with inspectable strategy |
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 devin
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.