ai-bot-framework 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. ai_bot_framework-0.1.0/.gitignore +12 -0
  2. ai_bot_framework-0.1.0/PKG-INFO +12 -0
  3. ai_bot_framework-0.1.0/pyproject.toml +102 -0
  4. ai_bot_framework-0.1.0/src/ai_framework/__init__.py +29 -0
  5. ai_bot_framework-0.1.0/src/ai_framework/application.py +79 -0
  6. ai_bot_framework-0.1.0/src/ai_framework/entities/__init__.py +12 -0
  7. ai_bot_framework-0.1.0/src/ai_framework/entities/ai_response.py +13 -0
  8. ai_bot_framework-0.1.0/src/ai_framework/entities/message.py +14 -0
  9. ai_bot_framework-0.1.0/src/ai_framework/entities/token_usage.py +6 -0
  10. ai_bot_framework-0.1.0/src/ai_framework/entities/tool.py +13 -0
  11. ai_bot_framework-0.1.0/src/ai_framework/integrations/__init__.py +5 -0
  12. ai_bot_framework-0.1.0/src/ai_framework/integrations/bot_framework.py +57 -0
  13. ai_bot_framework-0.1.0/src/ai_framework/memory/__init__.py +9 -0
  14. ai_bot_framework-0.1.0/src/ai_framework/memory/in_memory_store.py +17 -0
  15. ai_bot_framework-0.1.0/src/ai_framework/memory/postgres_memory_store.py +81 -0
  16. ai_bot_framework-0.1.0/src/ai_framework/memory/redis_memory_store.py +23 -0
  17. ai_bot_framework-0.1.0/src/ai_framework/protocols/__init__.py +11 -0
  18. ai_bot_framework-0.1.0/src/ai_framework/protocols/i_ai_provider.py +16 -0
  19. ai_bot_framework-0.1.0/src/ai_framework/protocols/i_memory_store.py +13 -0
  20. ai_bot_framework-0.1.0/src/ai_framework/protocols/i_tool_registry.py +14 -0
  21. ai_bot_framework-0.1.0/src/ai_framework/protocols/tool_definition.py +15 -0
  22. ai_bot_framework-0.1.0/src/ai_framework/providers/__init__.py +4 -0
  23. ai_bot_framework-0.1.0/src/ai_framework/providers/anthropic_provider.py +104 -0
  24. ai_bot_framework-0.1.0/src/ai_framework/providers/claude_sdk_provider.py +130 -0
  25. ai_bot_framework-0.1.0/src/ai_framework/py.typed +0 -0
  26. ai_bot_framework-0.1.0/src/ai_framework/tools/__init__.py +4 -0
  27. ai_bot_framework-0.1.0/src/ai_framework/tools/decorators.py +59 -0
  28. ai_bot_framework-0.1.0/src/ai_framework/tools/tool_registry.py +25 -0
@@ -0,0 +1,12 @@
1
+ __pycache__/
2
+ *.pyc
3
+ .venv/
4
+ dist/
5
+ *.egg-info/
6
+ .mypy_cache/
7
+ .ruff_cache/
8
+ .pytest_cache/
9
+ .agent-branch
10
+ .todoist.yaml
11
+ .env
12
+ .claude/devops.yaml
@@ -0,0 +1,12 @@
1
+ Metadata-Version: 2.4
2
+ Name: ai-bot-framework
3
+ Version: 0.1.0
4
+ Summary: AI agent framework with tool system and memory management
5
+ Author-email: Vladimir Sumarokov <sumarokov.vp@gmail.com>
6
+ Requires-Python: >=3.13
7
+ Requires-Dist: anthropic>=0.52.0
8
+ Requires-Dist: psycopg>=3.3.2
9
+ Requires-Dist: pydantic>=2.11.0
10
+ Requires-Dist: redis>=7.1.1
11
+ Provides-Extra: claude-sdk
12
+ Requires-Dist: claude-code-sdk>=0.0.25; extra == 'claude-sdk'
@@ -0,0 +1,102 @@
1
+ [project]
2
+ name = "ai-bot-framework"
3
+ version = "0.1.0"
4
+ description = "AI agent framework with tool system and memory management"
5
+ authors = [
6
+ { name = "Vladimir Sumarokov", email = "sumarokov.vp@gmail.com" }
7
+ ]
8
+ requires-python = ">=3.13"
9
+ dependencies = [
10
+ "pydantic>=2.11.0",
11
+ "anthropic>=0.52.0",
12
+ "psycopg>=3.3.2",
13
+ "redis>=7.1.1",
14
+ ]
15
+
16
+ [project.optional-dependencies]
17
+ claude-sdk = [
18
+ "claude-code-sdk>=0.0.25",
19
+ ]
20
+
21
+ [dependency-groups]
22
+ dev = [
23
+ "import-linter>=2.9",
24
+ "mypy>=1.18.2",
25
+ "pyright>=1.1.406",
26
+ "pytest>=9.0.0",
27
+ "ruff>=0.13.3",
28
+ ]
29
+
30
+ [build-system]
31
+ requires = ["hatchling"]
32
+ build-backend = "hatchling.build"
33
+
34
+ [tool.hatch.build.targets.wheel]
35
+ packages = ["src/ai_framework"]
36
+
37
+ [tool.hatch.build.targets.sdist]
38
+ include = ["src/ai_framework/", "pyproject.toml"]
39
+
40
+ [tool.pyright]
41
+ venv = ".venv"
42
+ venvPath = "."
43
+ reportUnusedVariable = false
44
+ reportUnusedImport = false
45
+ reportMissingImports = false
46
+
47
+ [tool.ruff]
48
+ lint.select = [
49
+ "F", # flake8
50
+ "E", # pycodestyle errors
51
+ "W", # pycodestyle warnings
52
+ "B", # flake8-bugbear
53
+ "N", # PEP 8 naming conventions
54
+ "C", # McCabe complexity
55
+ "T", # Print found todos
56
+ "UP", # upgrade syntax
57
+ "ANN", # type annotations
58
+ "S", # security issues
59
+ "DTZ", # datetime timezone issues
60
+ ]
61
+
62
+ lint.ignore = [
63
+ "ANN204", # Missing type annotation for special methods
64
+ "E501", # line too long
65
+ "C901", # function too complex
66
+ "ANN401", # Any type
67
+ "W293", # blank line contains whitespace
68
+ "W291", # trailing whitespace
69
+ ]
70
+
71
+ [tool.ruff.lint.per-file-ignores]
72
+ "test*" = [
73
+ "S101", # disable security issues for tests
74
+ "ANN201", # Missing return type
75
+ ]
76
+
77
+ [tool.mypy]
78
+ exclude = [".venv/"]
79
+ ignore_missing_imports = true
80
+ strict = false
81
+ disallow_subclassing_any = true
82
+ warn_unused_ignores = false
83
+ warn_redundant_casts = true
84
+ check_untyped_defs = true
85
+ disallow_untyped_decorators = true
86
+ show_error_codes = true
87
+
88
+ [tool.importlinter]
89
+ root_packages = ["ai_framework"]
90
+
91
+ [[tool.importlinter.contracts]]
92
+ name = "ai_framework layers"
93
+ type = "layers"
94
+ layers = [
95
+ "ai_framework.integrations",
96
+ "ai_framework.application",
97
+ "ai_framework.providers",
98
+ "ai_framework.tools",
99
+ "ai_framework.memory",
100
+ "ai_framework.protocols",
101
+ "ai_framework.entities",
102
+ ]
@@ -0,0 +1,29 @@
1
+ from ai_framework.application import AIApplication
2
+ from ai_framework.entities import (
3
+ AIResponse,
4
+ Message,
5
+ TokenUsage,
6
+ ToolCall,
7
+ ToolResult,
8
+ )
9
+ from ai_framework.integrations.bot_framework import AIStep
10
+ from ai_framework.protocols import (
11
+ IAIProvider,
12
+ IMemoryStore,
13
+ IToolRegistry,
14
+ ToolDefinition,
15
+ )
16
+
17
+ __all__ = [
18
+ "AIApplication",
19
+ "AIResponse",
20
+ "AIStep",
21
+ "IAIProvider",
22
+ "IMemoryStore",
23
+ "IToolRegistry",
24
+ "Message",
25
+ "TokenUsage",
26
+ "ToolCall",
27
+ "ToolDefinition",
28
+ "ToolResult",
29
+ ]
@@ -0,0 +1,79 @@
1
+ from __future__ import annotations
2
+
3
+ from ai_framework.entities.ai_response import AIResponse
4
+ from ai_framework.entities.message import Message
5
+ from ai_framework.entities.tool import ToolResult
6
+ from ai_framework.protocols.i_ai_provider import IAIProvider
7
+ from ai_framework.protocols.i_memory_store import IMemoryStore
8
+ from ai_framework.protocols.i_tool_registry import IToolRegistry
9
+
10
+
11
+ class AIApplication:
12
+ def __init__(
13
+ self,
14
+ provider: IAIProvider,
15
+ memory: IMemoryStore,
16
+ tool_registry: IToolRegistry | None = None,
17
+ system_prompt: str | None = None,
18
+ max_tool_rounds: int = 10,
19
+ ) -> None:
20
+ self._provider = provider
21
+ self._memory = memory
22
+ self._tool_registry = tool_registry
23
+ self._system_prompt = system_prompt
24
+ self._max_tool_rounds = max_tool_rounds
25
+
26
+ def process_message(self, thread_id: str, user_message: str) -> AIResponse:
27
+ user_msg = Message(role="user", content=user_message)
28
+ self._memory.add_message(thread_id, user_msg)
29
+
30
+ tools = self._tool_registry.get_definitions() if self._tool_registry else None
31
+
32
+ for _ in range(self._max_tool_rounds):
33
+ messages = self._memory.get_messages(thread_id)
34
+ response = self._provider.send_message(
35
+ messages=messages,
36
+ system=self._system_prompt,
37
+ tools=tools,
38
+ )
39
+
40
+ if not response.tool_calls:
41
+ break
42
+
43
+ assistant_msg = Message(
44
+ role="assistant",
45
+ content=response.content or "",
46
+ tool_calls=response.tool_calls,
47
+ )
48
+ self._memory.add_message(thread_id, assistant_msg)
49
+
50
+ tool_results = self._execute_tool_calls(response)
51
+ tool_msg = Message(
52
+ role="user",
53
+ content="",
54
+ tool_results=tool_results,
55
+ )
56
+ self._memory.add_message(thread_id, tool_msg)
57
+ else:
58
+ raise RuntimeError(
59
+ f"Tool loop exceeded {self._max_tool_rounds} rounds"
60
+ )
61
+
62
+ assistant_msg = Message(
63
+ role="assistant",
64
+ content=response.content or "",
65
+ )
66
+ self._memory.add_message(thread_id, assistant_msg)
67
+ return response
68
+
69
+ def _execute_tool_calls(self, response: AIResponse) -> list[ToolResult]:
70
+ if not self._tool_registry:
71
+ raise ValueError("Tool registry is required to execute tool calls")
72
+
73
+ results: list[ToolResult] = []
74
+ for tool_call in response.tool_calls:
75
+ result = self._tool_registry.execute(
76
+ tool_call.name, tool_call.arguments, tool_call.id
77
+ )
78
+ results.append(result)
79
+ return results
@@ -0,0 +1,12 @@
1
+ from ai_framework.entities.ai_response import AIResponse
2
+ from ai_framework.entities.message import Message
3
+ from ai_framework.entities.token_usage import TokenUsage
4
+ from ai_framework.entities.tool import ToolCall, ToolResult
5
+
6
+ __all__ = [
7
+ "AIResponse",
8
+ "Message",
9
+ "TokenUsage",
10
+ "ToolCall",
11
+ "ToolResult",
12
+ ]
@@ -0,0 +1,13 @@
1
+ from __future__ import annotations
2
+
3
+ from pydantic import BaseModel
4
+
5
+ from ai_framework.entities.token_usage import TokenUsage
6
+ from ai_framework.entities.tool import ToolCall
7
+
8
+
9
+ class AIResponse(BaseModel):
10
+ content: str | None = None
11
+ tool_calls: list[ToolCall] = []
12
+ stop_reason: str | None = None
13
+ usage: TokenUsage | None = None
@@ -0,0 +1,14 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from ai_framework.entities.tool import ToolCall, ToolResult
8
+
9
+
10
+ class Message(BaseModel):
11
+ role: Literal["user", "assistant", "system"]
12
+ content: str
13
+ tool_calls: list[ToolCall] | None = None
14
+ tool_results: list[ToolResult] | None = None
@@ -0,0 +1,6 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class TokenUsage(BaseModel):
5
+ input_tokens: int
6
+ output_tokens: int
@@ -0,0 +1,13 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class ToolCall(BaseModel):
5
+ id: str
6
+ name: str
7
+ arguments: dict[str, object]
8
+
9
+
10
+ class ToolResult(BaseModel):
11
+ tool_call_id: str
12
+ content: str
13
+ is_error: bool = False
@@ -0,0 +1,5 @@
1
+ from ai_framework.integrations.bot_framework import AIStep
2
+
3
+ __all__ = [
4
+ "AIStep",
5
+ ]
@@ -0,0 +1,57 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Callable
4
+ from typing import Any, Protocol
5
+
6
+ from ai_framework.application import AIApplication
7
+
8
+ type GetUserMessage = Callable[[Any], str | None]
9
+
10
+
11
+ class IMessageSender(Protocol):
12
+ def send(
13
+ self,
14
+ chat_id: int,
15
+ text: str,
16
+ parse_mode: str = "HTML",
17
+ keyboard: Any = None,
18
+ flow_name: str | None = None,
19
+ ) -> Any: ...
20
+
21
+
22
+ class IUser(Protocol):
23
+ @property
24
+ def id(self) -> int: ...
25
+
26
+
27
+ class AIStep:
28
+ name: str = "ai"
29
+
30
+ def __init__(
31
+ self,
32
+ ai_application: AIApplication,
33
+ message_sender: IMessageSender,
34
+ get_user_message: GetUserMessage | None = None,
35
+ ) -> None:
36
+ self._ai_application = ai_application
37
+ self._message_sender = message_sender
38
+ self._get_user_message = get_user_message or _default_get_user_message
39
+
40
+ def execute(self, user: IUser, state: Any) -> bool:
41
+ text = self._get_user_message(state)
42
+ if not text:
43
+ return True
44
+
45
+ thread_id = str(user.id)
46
+ response = self._ai_application.process_message(thread_id, text)
47
+ if response.content:
48
+ self._message_sender.send(user.id, response.content)
49
+ return False
50
+
51
+
52
+ def _default_get_user_message(state: Any) -> str | None:
53
+ if hasattr(state, "source_message"):
54
+ msg = state.source_message
55
+ if hasattr(msg, "text"):
56
+ return msg.text # type: ignore[no-any-return]
57
+ return None
@@ -0,0 +1,9 @@
1
+ from ai_framework.memory.in_memory_store import InMemoryStore
2
+ from ai_framework.memory.postgres_memory_store import PostgresMemoryStore
3
+ from ai_framework.memory.redis_memory_store import RedisMemoryStore
4
+
5
+ __all__ = [
6
+ "InMemoryStore",
7
+ "PostgresMemoryStore",
8
+ "RedisMemoryStore",
9
+ ]
@@ -0,0 +1,17 @@
1
+ from ai_framework.entities.message import Message
2
+
3
+
4
+ class InMemoryStore:
5
+ def __init__(self) -> None:
6
+ self._store: dict[str, list[Message]] = {}
7
+
8
+ def get_messages(self, thread_id: str) -> list[Message]:
9
+ return list(self._store.get(thread_id, []))
10
+
11
+ def add_message(self, thread_id: str, message: Message) -> None:
12
+ if thread_id not in self._store:
13
+ self._store[thread_id] = []
14
+ self._store[thread_id].append(message)
15
+
16
+ def clear(self, thread_id: str) -> None:
17
+ self._store.pop(thread_id, None)
@@ -0,0 +1,81 @@
1
+ import json
2
+ from typing import Any
3
+
4
+ import psycopg
5
+
6
+ from ai_framework.entities.message import Message
7
+ from ai_framework.entities.tool import ToolCall, ToolResult
8
+
9
+
10
+ class PostgresMemoryStore:
11
+ def __init__(self, database_url: str) -> None:
12
+ self._database_url = database_url
13
+ self._conn: psycopg.Connection[Any] | None = None
14
+
15
+ def open(self) -> None:
16
+ self._conn = psycopg.connect(self._database_url)
17
+
18
+ def close(self) -> None:
19
+ if self._conn:
20
+ self._conn.close()
21
+ self._conn = None
22
+
23
+ @property
24
+ def _connection(self) -> psycopg.Connection[Any]:
25
+ if not self._conn:
26
+ raise RuntimeError("Connection is not open. Call open() first.")
27
+ return self._conn
28
+
29
+ def get_messages(self, thread_id: str) -> list[Message]:
30
+ cursor = self._connection.execute(
31
+ "SELECT role, content, tool_calls, tool_results "
32
+ "FROM ai_messages WHERE thread_id = %s ORDER BY id",
33
+ (thread_id,),
34
+ )
35
+ messages: list[Message] = []
36
+ rows: list[Any] = cursor.fetchall()
37
+ for role, content, tool_calls_json, tool_results_json in rows:
38
+ tool_calls = (
39
+ [ToolCall(**tc) for tc in tool_calls_json]
40
+ if tool_calls_json
41
+ else None
42
+ )
43
+ tool_results = (
44
+ [ToolResult(**tr) for tr in tool_results_json]
45
+ if tool_results_json
46
+ else None
47
+ )
48
+ messages.append(
49
+ Message(
50
+ role=role,
51
+ content=str(content),
52
+ tool_calls=tool_calls,
53
+ tool_results=tool_results,
54
+ )
55
+ )
56
+ return messages
57
+
58
+ def add_message(self, thread_id: str, message: Message) -> None:
59
+ tool_calls_json = (
60
+ json.dumps([tc.model_dump() for tc in message.tool_calls])
61
+ if message.tool_calls
62
+ else None
63
+ )
64
+ tool_results_json = (
65
+ json.dumps([tr.model_dump() for tr in message.tool_results])
66
+ if message.tool_results
67
+ else None
68
+ )
69
+ self._connection.execute(
70
+ "INSERT INTO ai_messages (thread_id, role, content, tool_calls, tool_results) "
71
+ "VALUES (%s, %s, %s, %s::jsonb, %s::jsonb)",
72
+ (thread_id, message.role, message.content, tool_calls_json, tool_results_json),
73
+ )
74
+ self._connection.commit()
75
+
76
+ def clear(self, thread_id: str) -> None:
77
+ self._connection.execute(
78
+ "DELETE FROM ai_messages WHERE thread_id = %s",
79
+ (thread_id,),
80
+ )
81
+ self._connection.commit()
@@ -0,0 +1,23 @@
1
+ from typing import Any
2
+
3
+ import redis
4
+
5
+ from ai_framework.entities.message import Message
6
+
7
+
8
+ class RedisMemoryStore:
9
+ def __init__(self, redis_url: str) -> None:
10
+ self._client = redis.Redis.from_url(redis_url)
11
+
12
+ def _key(self, thread_id: str) -> str:
13
+ return f"ai:messages:{thread_id}"
14
+
15
+ def get_messages(self, thread_id: str) -> list[Message]:
16
+ raw_messages: Any = self._client.lrange(self._key(thread_id), 0, -1)
17
+ return [Message.model_validate_json(raw) for raw in raw_messages]
18
+
19
+ def add_message(self, thread_id: str, message: Message) -> None:
20
+ self._client.rpush(self._key(thread_id), message.model_dump_json())
21
+
22
+ def clear(self, thread_id: str) -> None:
23
+ self._client.delete(self._key(thread_id))
@@ -0,0 +1,11 @@
1
+ from ai_framework.protocols.i_ai_provider import IAIProvider
2
+ from ai_framework.protocols.i_memory_store import IMemoryStore
3
+ from ai_framework.protocols.i_tool_registry import IToolRegistry
4
+ from ai_framework.protocols.tool_definition import ToolDefinition
5
+
6
+ __all__ = [
7
+ "IAIProvider",
8
+ "IMemoryStore",
9
+ "IToolRegistry",
10
+ "ToolDefinition",
11
+ ]
@@ -0,0 +1,16 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Protocol
4
+
5
+ from ai_framework.entities.ai_response import AIResponse
6
+ from ai_framework.entities.message import Message
7
+ from ai_framework.protocols.tool_definition import ToolDefinition
8
+
9
+
10
+ class IAIProvider(Protocol):
11
+ def send_message(
12
+ self,
13
+ messages: list[Message],
14
+ system: str | None = None,
15
+ tools: list[ToolDefinition] | None = None,
16
+ ) -> AIResponse: ...
@@ -0,0 +1,13 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Protocol
4
+
5
+ from ai_framework.entities.message import Message
6
+
7
+
8
+ class IMemoryStore(Protocol):
9
+ def get_messages(self, thread_id: str) -> list[Message]: ...
10
+
11
+ def add_message(self, thread_id: str, message: Message) -> None: ...
12
+
13
+ def clear(self, thread_id: str) -> None: ...
@@ -0,0 +1,14 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Protocol
4
+
5
+ from ai_framework.entities.tool import ToolResult
6
+ from ai_framework.protocols.tool_definition import ToolDefinition
7
+
8
+
9
+ class IToolRegistry(Protocol):
10
+ def register(self, tool: ToolDefinition) -> None: ...
11
+
12
+ def get_definitions(self) -> list[ToolDefinition]: ...
13
+
14
+ def execute(self, name: str, arguments: dict[str, object], tool_call_id: str) -> ToolResult: ...
@@ -0,0 +1,15 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Callable
4
+ from typing import Any
5
+
6
+ from pydantic import BaseModel, ConfigDict
7
+
8
+
9
+ class ToolDefinition(BaseModel):
10
+ model_config = ConfigDict(arbitrary_types_allowed=True)
11
+
12
+ name: str
13
+ description: str
14
+ input_schema: dict[str, Any]
15
+ handler: Callable[..., Any] | None = None
@@ -0,0 +1,4 @@
1
+ from ai_framework.providers.anthropic_provider import AnthropicProvider
2
+ from ai_framework.providers.claude_sdk_provider import ClaudeSdkProvider
3
+
4
+ __all__ = ["AnthropicProvider", "ClaudeSdkProvider"]
@@ -0,0 +1,104 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ import anthropic
6
+
7
+ from ai_framework.entities.ai_response import AIResponse
8
+ from ai_framework.entities.message import Message
9
+ from ai_framework.entities.token_usage import TokenUsage
10
+ from ai_framework.entities.tool import ToolCall
11
+ from ai_framework.protocols.tool_definition import ToolDefinition
12
+
13
+
14
+ class AnthropicProvider:
15
+ def __init__(
16
+ self, api_key: str, model: str = "claude-sonnet-4-20250514"
17
+ ) -> None:
18
+ self._client = anthropic.Anthropic(api_key=api_key)
19
+ self._model = model
20
+
21
+ def send_message(
22
+ self,
23
+ messages: list[Message],
24
+ system: str | None = None,
25
+ tools: list[ToolDefinition] | None = None,
26
+ ) -> AIResponse:
27
+ kwargs: dict[str, Any] = {
28
+ "model": self._model,
29
+ "max_tokens": 8192,
30
+ "messages": [self._convert_message(m) for m in messages],
31
+ }
32
+
33
+ if system:
34
+ kwargs["system"] = system
35
+
36
+ if tools:
37
+ kwargs["tools"] = [self._convert_tool(t) for t in tools]
38
+
39
+ response = self._client.messages.create(**kwargs)
40
+
41
+ return self._convert_response(response)
42
+
43
+ def _convert_message(self, message: Message) -> dict[str, Any]:
44
+ if message.tool_results:
45
+ content: list[dict[str, Any]] = [
46
+ {
47
+ "type": "tool_result",
48
+ "tool_use_id": tr.tool_call_id,
49
+ "content": tr.content,
50
+ **({"is_error": True} if tr.is_error else {}),
51
+ }
52
+ for tr in message.tool_results
53
+ ]
54
+ return {"role": "user", "content": content}
55
+
56
+ if message.role == "assistant" and message.tool_calls:
57
+ content_blocks: list[dict[str, Any]] = []
58
+ if message.content:
59
+ content_blocks.append({"type": "text", "text": message.content})
60
+ for tc in message.tool_calls:
61
+ content_blocks.append(
62
+ {
63
+ "type": "tool_use",
64
+ "id": tc.id,
65
+ "name": tc.name,
66
+ "input": tc.arguments,
67
+ }
68
+ )
69
+ return {"role": "assistant", "content": content_blocks}
70
+
71
+ return {"role": message.role, "content": message.content}
72
+
73
+ def _convert_tool(self, tool: ToolDefinition) -> dict[str, Any]:
74
+ return {
75
+ "name": tool.name,
76
+ "description": tool.description,
77
+ "input_schema": tool.input_schema,
78
+ }
79
+
80
+ def _convert_response(self, response: anthropic.types.Message) -> AIResponse:
81
+ text_parts: list[str] = []
82
+ tool_calls: list[ToolCall] = []
83
+
84
+ for block in response.content:
85
+ if block.type == "text":
86
+ text_parts.append(block.text)
87
+ elif block.type == "tool_use":
88
+ tool_calls.append(
89
+ ToolCall(
90
+ id=block.id,
91
+ name=block.name,
92
+ arguments=dict(block.input), # pyright: ignore[reportUnknownArgumentType]
93
+ )
94
+ )
95
+
96
+ return AIResponse(
97
+ content="\n".join(text_parts) if text_parts else None,
98
+ tool_calls=tool_calls,
99
+ stop_reason=response.stop_reason,
100
+ usage=TokenUsage(
101
+ input_tokens=response.usage.input_tokens,
102
+ output_tokens=response.usage.output_tokens,
103
+ ),
104
+ )
@@ -0,0 +1,130 @@
1
+ import asyncio
2
+ from typing import Any, Literal
3
+
4
+ from claude_code_sdk import (
5
+ AssistantMessage,
6
+ ClaudeCodeOptions,
7
+ ResultMessage,
8
+ TextBlock,
9
+ ToolUseBlock,
10
+ query,
11
+ )
12
+
13
+ from ai_framework.entities.ai_response import AIResponse
14
+ from ai_framework.entities.message import Message
15
+ from ai_framework.entities.token_usage import TokenUsage
16
+ from ai_framework.entities.tool import ToolCall
17
+ from ai_framework.protocols.tool_definition import ToolDefinition
18
+
19
+ type PermissionMode = Literal["default", "acceptEdits", "plan", "bypassPermissions"]
20
+
21
+
22
+ class ClaudeSdkProvider:
23
+ def __init__(
24
+ self,
25
+ model: str = "claude-sonnet-4-20250514",
26
+ cwd: str | None = None,
27
+ permission_mode: PermissionMode = "default",
28
+ ) -> None:
29
+ self._model = model
30
+ self._cwd = cwd
31
+ self._permission_mode: PermissionMode = permission_mode
32
+ self._last_session_id: str | None = None
33
+
34
+ @property
35
+ def last_session_id(self) -> str | None:
36
+ return self._last_session_id
37
+
38
+ def send_message(
39
+ self,
40
+ messages: list[Message],
41
+ system: str | None = None,
42
+ tools: list[ToolDefinition] | None = None,
43
+ ) -> AIResponse:
44
+ return asyncio.run(self._send_message_async(messages, system, tools))
45
+
46
+ async def _send_message_async(
47
+ self,
48
+ messages: list[Message],
49
+ system: str | None = None,
50
+ tools: list[ToolDefinition] | None = None,
51
+ ) -> AIResponse:
52
+ prompt = self._build_prompt(messages)
53
+
54
+ options = ClaudeCodeOptions(
55
+ model=self._model,
56
+ permission_mode=self._permission_mode,
57
+ resume=self._last_session_id,
58
+ )
59
+
60
+ if self._cwd:
61
+ options.cwd = self._cwd
62
+
63
+ if system:
64
+ options.system_prompt = system
65
+
66
+ if tools:
67
+ options.allowed_tools = [t.name for t in tools]
68
+
69
+ text_parts: list[str] = []
70
+ tool_calls: list[ToolCall] = []
71
+ usage: TokenUsage | None = None
72
+
73
+ async for msg in query(prompt=prompt, options=options):
74
+ if isinstance(msg, AssistantMessage):
75
+ for block in msg.content:
76
+ if isinstance(block, TextBlock):
77
+ text_parts.append(block.text)
78
+ elif isinstance(block, ToolUseBlock):
79
+ tool_calls.append(
80
+ ToolCall(
81
+ id=block.id,
82
+ name=block.name,
83
+ arguments=self._normalize_arguments(block.input),
84
+ )
85
+ )
86
+ elif isinstance(msg, ResultMessage):
87
+ self._last_session_id = msg.session_id
88
+ usage = self._extract_usage(msg)
89
+
90
+ return AIResponse(
91
+ content="\n".join(text_parts) if text_parts else None,
92
+ tool_calls=tool_calls,
93
+ stop_reason="end_turn",
94
+ usage=usage,
95
+ )
96
+
97
+ def _build_prompt(self, messages: list[Message]) -> str:
98
+ # claude-code-sdk query() accepts a single prompt string, not a messages list.
99
+ # When resuming a session, the SDK retains prior conversation context,
100
+ # so only the latest user message is needed.
101
+ # On a fresh session, we concatenate the full history to avoid context loss.
102
+ if self._last_session_id:
103
+ for msg in reversed(messages):
104
+ if msg.role == "user":
105
+ return msg.content
106
+ return ""
107
+
108
+ parts: list[str] = []
109
+ for msg in messages:
110
+ prefix = "User" if msg.role == "user" else "Assistant"
111
+ parts.append(f"[{prefix}]: {msg.content}")
112
+ return "\n\n".join(parts)
113
+
114
+ def _normalize_arguments(
115
+ self, raw_input: dict[str, Any]
116
+ ) -> dict[str, object]:
117
+ return dict(raw_input)
118
+
119
+ def _extract_usage(self, result: ResultMessage) -> TokenUsage:
120
+ raw_usage = result.usage or {}
121
+ input_tokens = (
122
+ raw_usage.get("input_tokens", 0)
123
+ + raw_usage.get("cache_creation_input_tokens", 0)
124
+ + raw_usage.get("cache_read_input_tokens", 0)
125
+ )
126
+ output_tokens = raw_usage.get("output_tokens", 0)
127
+ return TokenUsage(
128
+ input_tokens=input_tokens,
129
+ output_tokens=output_tokens,
130
+ )
File without changes
@@ -0,0 +1,4 @@
1
+ from ai_framework.tools.decorators import tool
2
+ from ai_framework.tools.tool_registry import ToolRegistry
3
+
4
+ __all__ = ["ToolRegistry", "tool"]
@@ -0,0 +1,59 @@
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ from collections.abc import Callable
5
+ from typing import Any, get_type_hints
6
+
7
+ from pydantic import TypeAdapter
8
+
9
+ from ai_framework.protocols.tool_definition import ToolDefinition
10
+
11
+ _PYTHON_TYPE_TO_JSON: dict[type, str] = {
12
+ str: "string",
13
+ int: "integer",
14
+ float: "number",
15
+ bool: "boolean",
16
+ }
17
+
18
+
19
+ def _build_input_schema(func: Callable[..., Any]) -> dict[str, Any]:
20
+ hints = get_type_hints(func)
21
+ sig = inspect.signature(func)
22
+ properties: dict[str, Any] = {}
23
+ required: list[str] = []
24
+
25
+ for param_name, param in sig.parameters.items():
26
+ param_type = hints.get(param_name, str)
27
+
28
+ if param_type in _PYTHON_TYPE_TO_JSON:
29
+ prop: dict[str, Any] = {"type": _PYTHON_TYPE_TO_JSON[param_type]}
30
+ else:
31
+ adapter = TypeAdapter(param_type)
32
+ prop = adapter.json_schema()
33
+
34
+ properties[param_name] = prop
35
+
36
+ if param.default is inspect.Parameter.empty:
37
+ required.append(param_name)
38
+
39
+ schema: dict[str, Any] = {
40
+ "type": "object",
41
+ "properties": properties,
42
+ }
43
+ if required:
44
+ schema["required"] = required
45
+
46
+ return schema
47
+
48
+
49
+ def tool(name: str, description: str) -> Callable[[Callable[..., Any]], ToolDefinition]:
50
+ def decorator(func: Callable[..., Any]) -> ToolDefinition:
51
+ input_schema = _build_input_schema(func)
52
+ return ToolDefinition(
53
+ name=name,
54
+ description=description,
55
+ input_schema=input_schema,
56
+ handler=func,
57
+ )
58
+
59
+ return decorator
@@ -0,0 +1,25 @@
1
+ from __future__ import annotations
2
+
3
+ from ai_framework.entities.tool import ToolResult
4
+ from ai_framework.protocols.tool_definition import ToolDefinition
5
+
6
+
7
+ class ToolRegistry:
8
+ def __init__(self) -> None:
9
+ self._tools: dict[str, ToolDefinition] = {}
10
+
11
+ def register(self, tool: ToolDefinition) -> None:
12
+ self._tools[tool.name] = tool
13
+
14
+ def get_definitions(self) -> list[ToolDefinition]:
15
+ return list(self._tools.values())
16
+
17
+ def execute(self, name: str, arguments: dict[str, object], tool_call_id: str) -> ToolResult:
18
+ tool = self._tools[name]
19
+ if tool.handler is None:
20
+ raise ValueError(f"Tool '{name}' has no handler")
21
+ result = tool.handler(**arguments)
22
+ return ToolResult(
23
+ tool_call_id=tool_call_id,
24
+ content=str(result),
25
+ )