agentpool 2.2.3__py3-none-any.whl → 2.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- acp/__init__.py +0 -4
- acp/acp_requests.py +20 -77
- acp/agent/connection.py +8 -0
- acp/agent/implementations/debug_server/debug_server.py +6 -2
- acp/agent/protocol.py +6 -0
- acp/client/connection.py +38 -29
- acp/client/implementations/default_client.py +3 -2
- acp/client/implementations/headless_client.py +2 -2
- acp/connection.py +2 -2
- acp/notifications.py +18 -49
- acp/schema/__init__.py +2 -0
- acp/schema/agent_responses.py +21 -0
- acp/schema/client_requests.py +3 -3
- acp/schema/session_state.py +63 -29
- acp/task/supervisor.py +2 -2
- acp/utils.py +2 -2
- agentpool/__init__.py +2 -0
- agentpool/agents/acp_agent/acp_agent.py +278 -263
- agentpool/agents/acp_agent/acp_converters.py +150 -17
- agentpool/agents/acp_agent/client_handler.py +35 -24
- agentpool/agents/acp_agent/session_state.py +14 -6
- agentpool/agents/agent.py +471 -643
- agentpool/agents/agui_agent/agui_agent.py +104 -107
- agentpool/agents/agui_agent/helpers.py +3 -4
- agentpool/agents/base_agent.py +485 -32
- agentpool/agents/claude_code_agent/FORKING.md +191 -0
- agentpool/agents/claude_code_agent/__init__.py +13 -1
- agentpool/agents/claude_code_agent/claude_code_agent.py +654 -334
- agentpool/agents/claude_code_agent/converters.py +4 -141
- agentpool/agents/claude_code_agent/models.py +77 -0
- agentpool/agents/claude_code_agent/static_info.py +100 -0
- agentpool/agents/claude_code_agent/usage.py +242 -0
- agentpool/agents/events/__init__.py +22 -0
- agentpool/agents/events/builtin_handlers.py +65 -0
- agentpool/agents/events/event_emitter.py +3 -0
- agentpool/agents/events/events.py +84 -3
- agentpool/agents/events/infer_info.py +145 -0
- agentpool/agents/events/processors.py +254 -0
- agentpool/agents/interactions.py +41 -6
- agentpool/agents/modes.py +13 -0
- agentpool/agents/slashed_agent.py +5 -4
- agentpool/agents/tool_wrapping.py +18 -6
- agentpool/common_types.py +35 -21
- agentpool/config_resources/acp_assistant.yml +2 -2
- agentpool/config_resources/agents.yml +3 -0
- agentpool/config_resources/agents_template.yml +1 -0
- agentpool/config_resources/claude_code_agent.yml +9 -8
- agentpool/config_resources/external_acp_agents.yml +2 -1
- agentpool/delegation/base_team.py +4 -30
- agentpool/delegation/pool.py +104 -265
- agentpool/delegation/team.py +57 -57
- agentpool/delegation/teamrun.py +50 -55
- agentpool/functional/run.py +10 -4
- agentpool/mcp_server/client.py +73 -38
- agentpool/mcp_server/conversions.py +54 -13
- agentpool/mcp_server/manager.py +9 -23
- agentpool/mcp_server/registries/official_registry_client.py +10 -1
- agentpool/mcp_server/tool_bridge.py +114 -79
- agentpool/messaging/connection_manager.py +11 -10
- agentpool/messaging/event_manager.py +5 -5
- agentpool/messaging/message_container.py +6 -30
- agentpool/messaging/message_history.py +87 -8
- agentpool/messaging/messagenode.py +52 -14
- agentpool/messaging/messages.py +2 -26
- agentpool/messaging/processing.py +10 -22
- agentpool/models/__init__.py +1 -1
- agentpool/models/acp_agents/base.py +6 -2
- agentpool/models/acp_agents/mcp_capable.py +124 -15
- agentpool/models/acp_agents/non_mcp.py +0 -23
- agentpool/models/agents.py +66 -66
- agentpool/models/agui_agents.py +1 -1
- agentpool/models/claude_code_agents.py +111 -17
- agentpool/models/file_parsing.py +0 -1
- agentpool/models/manifest.py +70 -50
- agentpool/prompts/conversion_manager.py +1 -1
- agentpool/prompts/prompts.py +5 -2
- agentpool/resource_providers/__init__.py +2 -0
- agentpool/resource_providers/aggregating.py +4 -2
- agentpool/resource_providers/base.py +13 -3
- agentpool/resource_providers/codemode/code_executor.py +72 -5
- agentpool/resource_providers/codemode/helpers.py +2 -2
- agentpool/resource_providers/codemode/provider.py +64 -12
- agentpool/resource_providers/codemode/remote_mcp_execution.py +2 -2
- agentpool/resource_providers/codemode/remote_provider.py +9 -12
- agentpool/resource_providers/filtering.py +3 -1
- agentpool/resource_providers/mcp_provider.py +66 -12
- agentpool/resource_providers/plan_provider.py +111 -18
- agentpool/resource_providers/pool.py +5 -3
- agentpool/resource_providers/resource_info.py +111 -0
- agentpool/resource_providers/static.py +2 -2
- agentpool/sessions/__init__.py +2 -0
- agentpool/sessions/manager.py +2 -3
- agentpool/sessions/models.py +9 -6
- agentpool/sessions/protocol.py +28 -0
- agentpool/sessions/session.py +11 -55
- agentpool/storage/manager.py +361 -54
- agentpool/talk/registry.py +4 -4
- agentpool/talk/talk.py +9 -10
- agentpool/testing.py +1 -1
- agentpool/tool_impls/__init__.py +6 -0
- agentpool/tool_impls/agent_cli/__init__.py +42 -0
- agentpool/tool_impls/agent_cli/tool.py +95 -0
- agentpool/tool_impls/bash/__init__.py +64 -0
- agentpool/tool_impls/bash/helpers.py +35 -0
- agentpool/tool_impls/bash/tool.py +171 -0
- agentpool/tool_impls/delete_path/__init__.py +70 -0
- agentpool/tool_impls/delete_path/tool.py +142 -0
- agentpool/tool_impls/download_file/__init__.py +80 -0
- agentpool/tool_impls/download_file/tool.py +183 -0
- agentpool/tool_impls/execute_code/__init__.py +55 -0
- agentpool/tool_impls/execute_code/tool.py +163 -0
- agentpool/tool_impls/grep/__init__.py +80 -0
- agentpool/tool_impls/grep/tool.py +200 -0
- agentpool/tool_impls/list_directory/__init__.py +73 -0
- agentpool/tool_impls/list_directory/tool.py +197 -0
- agentpool/tool_impls/question/__init__.py +42 -0
- agentpool/tool_impls/question/tool.py +127 -0
- agentpool/tool_impls/read/__init__.py +104 -0
- agentpool/tool_impls/read/tool.py +305 -0
- agentpool/tools/__init__.py +2 -1
- agentpool/tools/base.py +114 -34
- agentpool/tools/manager.py +57 -1
- agentpool/ui/base.py +2 -2
- agentpool/ui/mock_provider.py +2 -2
- agentpool/ui/stdlib_provider.py +2 -2
- agentpool/utils/streams.py +21 -96
- agentpool/vfs_registry.py +7 -2
- {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/METADATA +16 -22
- {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/RECORD +242 -195
- {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/WHEEL +1 -1
- agentpool_cli/__main__.py +20 -0
- agentpool_cli/create.py +1 -1
- agentpool_cli/serve_acp.py +59 -1
- agentpool_cli/serve_opencode.py +1 -1
- agentpool_cli/ui.py +557 -0
- agentpool_commands/__init__.py +12 -5
- agentpool_commands/agents.py +1 -1
- agentpool_commands/pool.py +260 -0
- agentpool_commands/session.py +1 -1
- agentpool_commands/text_sharing/__init__.py +119 -0
- agentpool_commands/text_sharing/base.py +123 -0
- agentpool_commands/text_sharing/github_gist.py +80 -0
- agentpool_commands/text_sharing/opencode.py +462 -0
- agentpool_commands/text_sharing/paste_rs.py +59 -0
- agentpool_commands/text_sharing/pastebin.py +116 -0
- agentpool_commands/text_sharing/shittycodingagent.py +112 -0
- agentpool_commands/utils.py +31 -32
- agentpool_config/__init__.py +30 -2
- agentpool_config/agentpool_tools.py +498 -0
- agentpool_config/converters.py +1 -1
- agentpool_config/event_handlers.py +42 -0
- agentpool_config/events.py +1 -1
- agentpool_config/forward_targets.py +1 -4
- agentpool_config/jinja.py +3 -3
- agentpool_config/mcp_server.py +1 -5
- agentpool_config/nodes.py +1 -1
- agentpool_config/observability.py +44 -0
- agentpool_config/session.py +0 -3
- agentpool_config/storage.py +38 -39
- agentpool_config/task.py +3 -3
- agentpool_config/tools.py +11 -28
- agentpool_config/toolsets.py +22 -90
- agentpool_server/a2a_server/agent_worker.py +307 -0
- agentpool_server/a2a_server/server.py +23 -18
- agentpool_server/acp_server/acp_agent.py +125 -56
- agentpool_server/acp_server/commands/acp_commands.py +46 -216
- agentpool_server/acp_server/commands/docs_commands/fetch_repo.py +8 -7
- agentpool_server/acp_server/event_converter.py +651 -0
- agentpool_server/acp_server/input_provider.py +53 -10
- agentpool_server/acp_server/server.py +1 -11
- agentpool_server/acp_server/session.py +90 -410
- agentpool_server/acp_server/session_manager.py +8 -34
- agentpool_server/agui_server/server.py +3 -1
- agentpool_server/mcp_server/server.py +5 -2
- agentpool_server/opencode_server/ENDPOINTS.md +53 -14
- agentpool_server/opencode_server/OPENCODE_UI_TOOLS_COMPLETE.md +202 -0
- agentpool_server/opencode_server/__init__.py +0 -8
- agentpool_server/opencode_server/converters.py +132 -26
- agentpool_server/opencode_server/input_provider.py +160 -8
- agentpool_server/opencode_server/models/__init__.py +42 -20
- agentpool_server/opencode_server/models/app.py +12 -0
- agentpool_server/opencode_server/models/events.py +203 -29
- agentpool_server/opencode_server/models/mcp.py +19 -0
- agentpool_server/opencode_server/models/message.py +18 -1
- agentpool_server/opencode_server/models/parts.py +134 -1
- agentpool_server/opencode_server/models/question.py +56 -0
- agentpool_server/opencode_server/models/session.py +13 -1
- agentpool_server/opencode_server/routes/__init__.py +4 -0
- agentpool_server/opencode_server/routes/agent_routes.py +33 -2
- agentpool_server/opencode_server/routes/app_routes.py +66 -3
- agentpool_server/opencode_server/routes/config_routes.py +66 -5
- agentpool_server/opencode_server/routes/file_routes.py +184 -5
- agentpool_server/opencode_server/routes/global_routes.py +1 -1
- agentpool_server/opencode_server/routes/lsp_routes.py +1 -1
- agentpool_server/opencode_server/routes/message_routes.py +122 -66
- agentpool_server/opencode_server/routes/permission_routes.py +63 -0
- agentpool_server/opencode_server/routes/pty_routes.py +23 -22
- agentpool_server/opencode_server/routes/question_routes.py +128 -0
- agentpool_server/opencode_server/routes/session_routes.py +139 -68
- agentpool_server/opencode_server/routes/tui_routes.py +1 -1
- agentpool_server/opencode_server/server.py +47 -2
- agentpool_server/opencode_server/state.py +30 -0
- agentpool_storage/__init__.py +0 -4
- agentpool_storage/base.py +81 -2
- agentpool_storage/claude_provider/ARCHITECTURE.md +433 -0
- agentpool_storage/claude_provider/__init__.py +42 -0
- agentpool_storage/{claude_provider.py → claude_provider/provider.py} +190 -8
- agentpool_storage/file_provider.py +149 -15
- agentpool_storage/memory_provider.py +132 -12
- agentpool_storage/opencode_provider/ARCHITECTURE.md +386 -0
- agentpool_storage/opencode_provider/__init__.py +16 -0
- agentpool_storage/opencode_provider/helpers.py +414 -0
- agentpool_storage/opencode_provider/provider.py +895 -0
- agentpool_storage/session_store.py +20 -6
- agentpool_storage/sql_provider/sql_provider.py +135 -2
- agentpool_storage/sql_provider/utils.py +2 -12
- agentpool_storage/zed_provider/__init__.py +16 -0
- agentpool_storage/zed_provider/helpers.py +281 -0
- agentpool_storage/zed_provider/models.py +130 -0
- agentpool_storage/zed_provider/provider.py +442 -0
- agentpool_storage/zed_provider.py +803 -0
- agentpool_toolsets/__init__.py +0 -2
- agentpool_toolsets/builtin/__init__.py +2 -4
- agentpool_toolsets/builtin/code.py +4 -4
- agentpool_toolsets/builtin/debug.py +115 -40
- agentpool_toolsets/builtin/execution_environment.py +54 -165
- agentpool_toolsets/builtin/skills.py +0 -77
- agentpool_toolsets/builtin/subagent_tools.py +64 -51
- agentpool_toolsets/builtin/workers.py +4 -2
- agentpool_toolsets/composio_toolset.py +2 -2
- agentpool_toolsets/entry_points.py +3 -1
- agentpool_toolsets/fsspec_toolset/grep.py +25 -5
- agentpool_toolsets/fsspec_toolset/helpers.py +3 -2
- agentpool_toolsets/fsspec_toolset/toolset.py +350 -66
- agentpool_toolsets/mcp_discovery/data/mcp_servers.parquet +0 -0
- agentpool_toolsets/mcp_discovery/toolset.py +74 -17
- agentpool_toolsets/mcp_run_toolset.py +8 -11
- agentpool_toolsets/notifications.py +33 -33
- agentpool_toolsets/openapi.py +3 -1
- agentpool_toolsets/search_toolset.py +3 -1
- agentpool_config/resources.py +0 -33
- agentpool_server/acp_server/acp_tools.py +0 -43
- agentpool_server/acp_server/commands/spawn.py +0 -210
- agentpool_storage/opencode_provider.py +0 -730
- agentpool_storage/text_log_provider.py +0 -276
- agentpool_toolsets/builtin/chain.py +0 -288
- agentpool_toolsets/builtin/user_interaction.py +0 -52
- agentpool_toolsets/semantic_memory_toolset.py +0 -536
- {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/entry_points.txt +0 -0
- {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,536 +0,0 @@
|
|
|
1
|
-
"""Semantic memory toolset using TypeAgent's KnowPro for knowledge processing."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
from dataclasses import dataclass
|
|
6
|
-
from datetime import UTC, datetime
|
|
7
|
-
import os
|
|
8
|
-
import re
|
|
9
|
-
import time
|
|
10
|
-
from typing import TYPE_CHECKING, Any, Literal, Self
|
|
11
|
-
|
|
12
|
-
from agentpool.resource_providers import ResourceProvider
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
if TYPE_CHECKING:
|
|
16
|
-
from types import TracebackType
|
|
17
|
-
|
|
18
|
-
from typeagent.knowpro import answers, query, searchlang
|
|
19
|
-
from typeagent.knowpro.answer_response_schema import AnswerResponse
|
|
20
|
-
from typeagent.knowpro.conversation_base import ConversationBase
|
|
21
|
-
from typeagent.knowpro.search_query_schema import SearchQuery
|
|
22
|
-
from typeagent.podcasts import podcast
|
|
23
|
-
from typeagent.storage.memory.semrefindex import TermToSemanticRefIndex
|
|
24
|
-
import typechat
|
|
25
|
-
|
|
26
|
-
from agentpool.agents import Agent
|
|
27
|
-
from agentpool.agents.acp_agent import ACPAgent
|
|
28
|
-
from agentpool.common_types import ModelType
|
|
29
|
-
from agentpool.tools.base import Tool
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
class AgentTypeChatModel:
|
|
33
|
-
"""TypeChat language model backed by agentpool Agent.
|
|
34
|
-
|
|
35
|
-
Implements the typechat.TypeChatLanguageModel protocol using an Agent
|
|
36
|
-
for LLM completions instead of direct API calls.
|
|
37
|
-
"""
|
|
38
|
-
|
|
39
|
-
def __init__(self, agent: Agent[Any, str] | ACPAgent) -> None:
|
|
40
|
-
"""Initialize with an Agent for completions.
|
|
41
|
-
|
|
42
|
-
Args:
|
|
43
|
-
agent: The agentpool Agent to use for LLM calls
|
|
44
|
-
"""
|
|
45
|
-
self.agent = agent
|
|
46
|
-
|
|
47
|
-
async def complete(
|
|
48
|
-
self, prompt: str | list[typechat.PromptSection]
|
|
49
|
-
) -> typechat.Success[str] | typechat.Failure:
|
|
50
|
-
"""Request completion from the Agent.
|
|
51
|
-
|
|
52
|
-
Args:
|
|
53
|
-
prompt: Either a string prompt or list of PromptSection dicts
|
|
54
|
-
|
|
55
|
-
Returns:
|
|
56
|
-
Success with response text or Failure with error message
|
|
57
|
-
"""
|
|
58
|
-
import typechat
|
|
59
|
-
|
|
60
|
-
try:
|
|
61
|
-
# Convert prompt sections to a single string if needed
|
|
62
|
-
if isinstance(prompt, list):
|
|
63
|
-
# Combine sections into a conversation-style prompt
|
|
64
|
-
parts: list[str] = []
|
|
65
|
-
for section in prompt:
|
|
66
|
-
role = section["role"]
|
|
67
|
-
content = section["content"]
|
|
68
|
-
match role:
|
|
69
|
-
case "system":
|
|
70
|
-
parts.append(f"[System]: {content}")
|
|
71
|
-
case "user":
|
|
72
|
-
parts.append(f"[User]: {content}")
|
|
73
|
-
case "assistant":
|
|
74
|
-
parts.append(f"[Assistant]: {content}")
|
|
75
|
-
prompt_text = "\n\n".join(parts)
|
|
76
|
-
else:
|
|
77
|
-
prompt_text = prompt
|
|
78
|
-
|
|
79
|
-
# Run the agent and get response
|
|
80
|
-
result = await self.agent.run(prompt_text)
|
|
81
|
-
return typechat.Success(result.data)
|
|
82
|
-
|
|
83
|
-
except Exception as e: # noqa: BLE001
|
|
84
|
-
return typechat.Failure(f"Agent completion failed: {e!r}")
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
@dataclass
|
|
88
|
-
class ProcessingContext:
|
|
89
|
-
"""Context for TypeAgent knowledge processing."""
|
|
90
|
-
|
|
91
|
-
lang_search_options: searchlang.LanguageSearchOptions
|
|
92
|
-
answer_context_options: answers.AnswerContextOptions
|
|
93
|
-
query_context: query.QueryEvalContext[podcast.PodcastMessage, TermToSemanticRefIndex]
|
|
94
|
-
query_translator: typechat.TypeChatJsonTranslator[SearchQuery]
|
|
95
|
-
answer_translator: typechat.TypeChatJsonTranslator[AnswerResponse]
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
@dataclass
|
|
99
|
-
class QueryResponse:
|
|
100
|
-
"""Response from a knowledge query."""
|
|
101
|
-
|
|
102
|
-
success: bool
|
|
103
|
-
answer: str
|
|
104
|
-
time_ms: int
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
@dataclass
|
|
108
|
-
class IngestResponse:
|
|
109
|
-
"""Response from ingesting content into the knowledge base."""
|
|
110
|
-
|
|
111
|
-
success: bool
|
|
112
|
-
messages_added: int
|
|
113
|
-
semantic_refs_added: int
|
|
114
|
-
error: str | None = None
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
class SemanticMemoryTools(ResourceProvider):
|
|
118
|
-
"""Provider for semantic memory / knowledge processing tools.
|
|
119
|
-
|
|
120
|
-
Uses TypeAgent's KnowPro for:
|
|
121
|
-
- Semantic indexing of conversations/transcripts
|
|
122
|
-
- Natural language search queries
|
|
123
|
-
- Structured answer generation
|
|
124
|
-
"""
|
|
125
|
-
|
|
126
|
-
def __init__(
|
|
127
|
-
self,
|
|
128
|
-
model: ModelType = None,
|
|
129
|
-
dbname: str | None = None,
|
|
130
|
-
name: str = "semantic_memory",
|
|
131
|
-
) -> None:
|
|
132
|
-
"""Initialize semantic memory tools.
|
|
133
|
-
|
|
134
|
-
Args:
|
|
135
|
-
model: Model to use for LLM sampling (query translation, answers)
|
|
136
|
-
dbname: SQLite database path, or None for in-memory storage
|
|
137
|
-
name: Provider name
|
|
138
|
-
"""
|
|
139
|
-
super().__init__(name=name)
|
|
140
|
-
self.model = model
|
|
141
|
-
self.dbname = dbname
|
|
142
|
-
self._agent: Agent[Any, str] | None = None
|
|
143
|
-
self._context: ProcessingContext | None = None
|
|
144
|
-
self._tools: list[Tool] | None = None
|
|
145
|
-
|
|
146
|
-
async def __aenter__(self) -> Self:
|
|
147
|
-
"""Initialize the agent and TypeAgent context."""
|
|
148
|
-
from agentpool import Agent
|
|
149
|
-
|
|
150
|
-
# Create minimal agent for LLM sampling
|
|
151
|
-
self._agent = Agent(model=self.model, name=f"{self.name}-sampler")
|
|
152
|
-
await self._agent.__aenter__()
|
|
153
|
-
self._context = await self._make_context() # Build TypeAgent processing context
|
|
154
|
-
return self
|
|
155
|
-
|
|
156
|
-
async def __aexit__(
|
|
157
|
-
self,
|
|
158
|
-
exc_type: type[BaseException] | None,
|
|
159
|
-
exc_val: BaseException | None,
|
|
160
|
-
exc_tb: TracebackType | None,
|
|
161
|
-
) -> None:
|
|
162
|
-
"""Cleanup agent resources."""
|
|
163
|
-
if self._agent:
|
|
164
|
-
await self._agent.__aexit__(exc_type, exc_val, exc_tb)
|
|
165
|
-
self._agent = None
|
|
166
|
-
self._context = None
|
|
167
|
-
|
|
168
|
-
async def _make_context(self) -> ProcessingContext:
|
|
169
|
-
"""Create TypeAgent processing context with our Agent-backed model."""
|
|
170
|
-
from typeagent.aitools import utils
|
|
171
|
-
from typeagent.knowpro import answers, searchlang
|
|
172
|
-
from typeagent.knowpro.answer_response_schema import AnswerResponse
|
|
173
|
-
from typeagent.knowpro.convsettings import ConversationSettings
|
|
174
|
-
from typeagent.knowpro.search_query_schema import SearchQuery
|
|
175
|
-
from typeagent.podcasts import podcast
|
|
176
|
-
from typeagent.storage.utils import create_storage_provider
|
|
177
|
-
|
|
178
|
-
if self._agent is None:
|
|
179
|
-
msg = "Agent not initialized"
|
|
180
|
-
raise RuntimeError(msg)
|
|
181
|
-
|
|
182
|
-
settings = ConversationSettings()
|
|
183
|
-
# Set up storage provider (SQLite or memory)
|
|
184
|
-
settings.storage_provider = await create_storage_provider(
|
|
185
|
-
settings.message_text_index_settings,
|
|
186
|
-
settings.related_term_index_settings,
|
|
187
|
-
self.dbname,
|
|
188
|
-
podcast.PodcastMessage,
|
|
189
|
-
)
|
|
190
|
-
lang_search_options = searchlang.LanguageSearchOptions(
|
|
191
|
-
compile_options=searchlang.LanguageQueryCompileOptions(),
|
|
192
|
-
max_message_matches=25,
|
|
193
|
-
)
|
|
194
|
-
answer_context_options = answers.AnswerContextOptions(entities_top_k=50, topics_top_k=50)
|
|
195
|
-
|
|
196
|
-
query_context = await self._load_conversation_index(settings) # Load / create conv index
|
|
197
|
-
# Create Agent-backed TypeChat model
|
|
198
|
-
model = AgentTypeChatModel(self._agent)
|
|
199
|
-
# Create translators for structured extraction
|
|
200
|
-
query_translator = utils.create_translator(model, SearchQuery)
|
|
201
|
-
answer_translator = utils.create_translator(model, AnswerResponse)
|
|
202
|
-
return ProcessingContext(
|
|
203
|
-
lang_search_options=lang_search_options,
|
|
204
|
-
answer_context_options=answer_context_options,
|
|
205
|
-
query_context=query_context,
|
|
206
|
-
query_translator=query_translator,
|
|
207
|
-
answer_translator=answer_translator,
|
|
208
|
-
)
|
|
209
|
-
|
|
210
|
-
async def _load_conversation_index(
|
|
211
|
-
self,
|
|
212
|
-
settings: Any,
|
|
213
|
-
) -> query.QueryEvalContext[podcast.PodcastMessage, Any]:
|
|
214
|
-
"""Load conversation index from database or create empty one."""
|
|
215
|
-
from typeagent.knowpro import query
|
|
216
|
-
from typeagent.podcasts import podcast
|
|
217
|
-
|
|
218
|
-
if self.dbname is None:
|
|
219
|
-
# Try loading from default test data, or create empty
|
|
220
|
-
try:
|
|
221
|
-
conversation = await podcast.Podcast.read_from_file(
|
|
222
|
-
"testdata/Episode_53_AdrianTchaikovsky_index",
|
|
223
|
-
settings,
|
|
224
|
-
)
|
|
225
|
-
except FileNotFoundError:
|
|
226
|
-
conversation = await podcast.Podcast.create(settings)
|
|
227
|
-
else:
|
|
228
|
-
conversation = await podcast.Podcast.create(settings)
|
|
229
|
-
|
|
230
|
-
self._conversation = conversation
|
|
231
|
-
return query.QueryEvalContext(conversation)
|
|
232
|
-
|
|
233
|
-
@property
|
|
234
|
-
def conversation(self) -> ConversationBase[Any] | None:
|
|
235
|
-
"""Get the current conversation/knowledge base."""
|
|
236
|
-
return getattr(self, "_conversation", None)
|
|
237
|
-
|
|
238
|
-
async def get_tools(self) -> list[Tool]:
|
|
239
|
-
"""Get available semantic memory tools."""
|
|
240
|
-
if self._tools is not None:
|
|
241
|
-
return self._tools
|
|
242
|
-
|
|
243
|
-
self._tools = [
|
|
244
|
-
self.create_tool(self.query_knowledge, read_only=True, idempotent=True),
|
|
245
|
-
self.create_tool(self.ingest_transcript),
|
|
246
|
-
self.create_tool(self.ingest_text),
|
|
247
|
-
]
|
|
248
|
-
return self._tools
|
|
249
|
-
|
|
250
|
-
async def query_knowledge(self, question: str) -> QueryResponse:
|
|
251
|
-
"""Query the knowledge base with a natural language question.
|
|
252
|
-
|
|
253
|
-
Returns an answer synthesized from indexed conversations and documents.
|
|
254
|
-
|
|
255
|
-
Args:
|
|
256
|
-
question: Natural language question to answer
|
|
257
|
-
|
|
258
|
-
Returns:
|
|
259
|
-
QueryResponse with success status, answer text, and timing
|
|
260
|
-
"""
|
|
261
|
-
from typeagent.knowpro import answers, searchlang
|
|
262
|
-
import typechat
|
|
263
|
-
|
|
264
|
-
if self._context is None:
|
|
265
|
-
return QueryResponse(success=False, answer="Semantic memory not initialized", time_ms=0)
|
|
266
|
-
t0 = time.time()
|
|
267
|
-
question = question.strip()
|
|
268
|
-
if not question:
|
|
269
|
-
dt = int((time.time() - t0) * 1000)
|
|
270
|
-
return QueryResponse(success=False, answer="No question provided", time_ms=dt)
|
|
271
|
-
|
|
272
|
-
# Stage 1-3: LLM -> proto-query, compile, execute
|
|
273
|
-
result = await searchlang.search_conversation_with_language(
|
|
274
|
-
self._context.query_context.conversation,
|
|
275
|
-
self._context.query_translator,
|
|
276
|
-
question,
|
|
277
|
-
self._context.lang_search_options,
|
|
278
|
-
)
|
|
279
|
-
|
|
280
|
-
if isinstance(result, typechat.Failure):
|
|
281
|
-
dt = int((time.time() - t0) * 1000)
|
|
282
|
-
return QueryResponse(success=False, answer=result.message, time_ms=dt)
|
|
283
|
-
|
|
284
|
-
# Stage 3a-4: ordinals -> messages/semrefs, LLM -> answer
|
|
285
|
-
_, combined_answer = await answers.generate_answers(
|
|
286
|
-
self._context.answer_translator,
|
|
287
|
-
result.value,
|
|
288
|
-
self._context.query_context.conversation,
|
|
289
|
-
question,
|
|
290
|
-
options=self._context.answer_context_options,
|
|
291
|
-
)
|
|
292
|
-
|
|
293
|
-
dt = int((time.time() - t0) * 1000)
|
|
294
|
-
|
|
295
|
-
match combined_answer.type:
|
|
296
|
-
case "NoAnswer":
|
|
297
|
-
answer = combined_answer.whyNoAnswer or "No answer found"
|
|
298
|
-
return QueryResponse(success=False, answer=answer, time_ms=dt)
|
|
299
|
-
case "Answered":
|
|
300
|
-
answer = combined_answer.answer or ""
|
|
301
|
-
return QueryResponse(success=True, answer=answer, time_ms=dt)
|
|
302
|
-
case _:
|
|
303
|
-
return QueryResponse(success=False, answer="Unexpected response type", time_ms=dt)
|
|
304
|
-
|
|
305
|
-
async def ingest_transcript(
|
|
306
|
-
self,
|
|
307
|
-
file_path: str,
|
|
308
|
-
name: str | None = None,
|
|
309
|
-
fmt: Literal["auto", "txt", "vtt"] = "auto",
|
|
310
|
-
) -> IngestResponse:
|
|
311
|
-
"""Ingest a transcript file into the knowledge base.
|
|
312
|
-
|
|
313
|
-
Supports plain text (.txt) and WebVTT (.vtt) formats.
|
|
314
|
-
The content will be indexed for semantic search.
|
|
315
|
-
|
|
316
|
-
Args:
|
|
317
|
-
file_path: Path to the transcript file
|
|
318
|
-
name: Optional name for the transcript (defaults to filename)
|
|
319
|
-
fmt: File format - "auto" detects from extension, or specify "txt"/"vtt"
|
|
320
|
-
|
|
321
|
-
Returns:
|
|
322
|
-
IngestResponse with counts of added messages and semantic refs
|
|
323
|
-
"""
|
|
324
|
-
if self._context is None or self.conversation is None:
|
|
325
|
-
return IngestResponse(
|
|
326
|
-
success=False,
|
|
327
|
-
messages_added=0,
|
|
328
|
-
semantic_refs_added=0,
|
|
329
|
-
error="Semantic memory not initialized",
|
|
330
|
-
)
|
|
331
|
-
|
|
332
|
-
# Detect format
|
|
333
|
-
if fmt == "auto":
|
|
334
|
-
ext = os.path.splitext(file_path)[1].lower() # noqa: PTH122
|
|
335
|
-
fmt = "vtt" if ext == ".vtt" else "txt"
|
|
336
|
-
|
|
337
|
-
try:
|
|
338
|
-
if fmt == "vtt":
|
|
339
|
-
result = await self._ingest_vtt_file(file_path, name)
|
|
340
|
-
else:
|
|
341
|
-
result = await self._ingest_text_file(file_path, name)
|
|
342
|
-
except Exception as e: # noqa: BLE001
|
|
343
|
-
return IngestResponse(
|
|
344
|
-
success=False,
|
|
345
|
-
messages_added=0,
|
|
346
|
-
semantic_refs_added=0,
|
|
347
|
-
error=str(e),
|
|
348
|
-
)
|
|
349
|
-
else:
|
|
350
|
-
return result
|
|
351
|
-
|
|
352
|
-
async def _ingest_vtt_file(self, file_path: str, name: str | None) -> IngestResponse:
|
|
353
|
-
"""Ingest a WebVTT file."""
|
|
354
|
-
from datetime import timedelta
|
|
355
|
-
|
|
356
|
-
from typeagent.knowpro.universal_message import (
|
|
357
|
-
UNIX_EPOCH,
|
|
358
|
-
ConversationMessage,
|
|
359
|
-
ConversationMessageMeta,
|
|
360
|
-
format_timestamp_utc,
|
|
361
|
-
)
|
|
362
|
-
import webvtt # type: ignore[import-untyped]
|
|
363
|
-
|
|
364
|
-
assert self.conversation
|
|
365
|
-
vtt = webvtt.read(file_path)
|
|
366
|
-
messages: list[ConversationMessage] = []
|
|
367
|
-
|
|
368
|
-
for caption in vtt:
|
|
369
|
-
if not caption.text.strip():
|
|
370
|
-
continue
|
|
371
|
-
|
|
372
|
-
# Parse voice tags for speaker detection
|
|
373
|
-
from typeagent.transcripts.transcript_ingest import (
|
|
374
|
-
parse_voice_tags,
|
|
375
|
-
webvtt_timestamp_to_seconds,
|
|
376
|
-
)
|
|
377
|
-
|
|
378
|
-
raw_text = getattr(caption, "raw_text", caption.text)
|
|
379
|
-
voice_segments = parse_voice_tags(raw_text)
|
|
380
|
-
|
|
381
|
-
for speaker, text in voice_segments:
|
|
382
|
-
if not text.strip():
|
|
383
|
-
continue
|
|
384
|
-
|
|
385
|
-
offset_seconds = webvtt_timestamp_to_seconds(caption.start)
|
|
386
|
-
ts = format_timestamp_utc(UNIX_EPOCH + timedelta(seconds=offset_seconds))
|
|
387
|
-
metadata = ConversationMessageMeta(speaker=speaker, recipients=[])
|
|
388
|
-
message = ConversationMessage(text_chunks=[text], metadata=metadata, timestamp=ts)
|
|
389
|
-
messages.append(message)
|
|
390
|
-
if not messages:
|
|
391
|
-
return IngestResponse(
|
|
392
|
-
success=False,
|
|
393
|
-
messages_added=0,
|
|
394
|
-
semantic_refs_added=0,
|
|
395
|
-
error="No messages found in VTT file",
|
|
396
|
-
)
|
|
397
|
-
|
|
398
|
-
result = await self.conversation.add_messages_with_indexing(messages)
|
|
399
|
-
return IngestResponse(
|
|
400
|
-
success=True,
|
|
401
|
-
messages_added=result.messages_added,
|
|
402
|
-
semantic_refs_added=result.semrefs_added,
|
|
403
|
-
)
|
|
404
|
-
|
|
405
|
-
async def _ingest_text_file(self, file_path: str, name: str | None) -> IngestResponse:
|
|
406
|
-
"""Ingest a plain text transcript file."""
|
|
407
|
-
from typeagent.knowpro.universal_message import (
|
|
408
|
-
UNIX_EPOCH,
|
|
409
|
-
ConversationMessage,
|
|
410
|
-
ConversationMessageMeta,
|
|
411
|
-
format_timestamp_utc,
|
|
412
|
-
)
|
|
413
|
-
|
|
414
|
-
with open(file_path, encoding="utf-8") as f: # noqa: PTH123
|
|
415
|
-
lines = f.readlines()
|
|
416
|
-
|
|
417
|
-
# Parse transcript lines with speaker detection
|
|
418
|
-
speaker_pattern = re.compile(r"^\s*(?P<speaker>[A-Z][A-Z\s]*?):\s*(?P<text>.*)$")
|
|
419
|
-
|
|
420
|
-
messages: list[ConversationMessage] = []
|
|
421
|
-
current_speaker: str | None = None
|
|
422
|
-
current_chunks: list[str] = []
|
|
423
|
-
assert self.conversation
|
|
424
|
-
for line in lines:
|
|
425
|
-
stripped = line.strip()
|
|
426
|
-
if not stripped:
|
|
427
|
-
continue
|
|
428
|
-
|
|
429
|
-
match = speaker_pattern.match(stripped)
|
|
430
|
-
if match:
|
|
431
|
-
# Save previous message if exists
|
|
432
|
-
if current_chunks:
|
|
433
|
-
meta = ConversationMessageMeta(speaker=current_speaker, recipients=[])
|
|
434
|
-
ts = format_timestamp_utc(UNIX_EPOCH)
|
|
435
|
-
chunks = [" ".join(current_chunks)]
|
|
436
|
-
message = ConversationMessage(text_chunks=chunks, metadata=meta, timestamp=ts)
|
|
437
|
-
messages.append(message)
|
|
438
|
-
current_speaker = match.group("speaker").strip()
|
|
439
|
-
current_chunks = [match.group("text").strip()]
|
|
440
|
-
elif current_chunks:
|
|
441
|
-
current_chunks.append(stripped)
|
|
442
|
-
else:
|
|
443
|
-
# No speaker detected, use None
|
|
444
|
-
current_chunks = [stripped]
|
|
445
|
-
|
|
446
|
-
# Don't forget last message
|
|
447
|
-
if current_chunks:
|
|
448
|
-
metadata = ConversationMessageMeta(speaker=current_speaker, recipients=[])
|
|
449
|
-
chunks = [" ".join(current_chunks)]
|
|
450
|
-
ts = format_timestamp_utc(UNIX_EPOCH)
|
|
451
|
-
message = ConversationMessage(text_chunks=chunks, metadata=metadata, timestamp=ts)
|
|
452
|
-
messages.append(message)
|
|
453
|
-
|
|
454
|
-
if not messages:
|
|
455
|
-
err = "No messages found in text file"
|
|
456
|
-
return IngestResponse(success=False, messages_added=0, semantic_refs_added=0, error=err)
|
|
457
|
-
result = await self.conversation.add_messages_with_indexing(messages)
|
|
458
|
-
return IngestResponse(
|
|
459
|
-
success=True,
|
|
460
|
-
messages_added=result.messages_added,
|
|
461
|
-
semantic_refs_added=result.semrefs_added,
|
|
462
|
-
)
|
|
463
|
-
|
|
464
|
-
async def ingest_text(
|
|
465
|
-
self,
|
|
466
|
-
content: str,
|
|
467
|
-
speaker: str | None = None,
|
|
468
|
-
timestamp: str | None = None,
|
|
469
|
-
) -> IngestResponse:
|
|
470
|
-
"""Ingest raw text content into the knowledge base.
|
|
471
|
-
|
|
472
|
-
Useful for adding content from memory, APIs, or other sources.
|
|
473
|
-
Optionally specify a speaker name for attribution.
|
|
474
|
-
|
|
475
|
-
Args:
|
|
476
|
-
content: The text content to ingest
|
|
477
|
-
speaker: Optional speaker/source attribution
|
|
478
|
-
timestamp: Optional ISO timestamp (defaults to now)
|
|
479
|
-
|
|
480
|
-
Returns:
|
|
481
|
-
IngestResponse with counts of added messages and semantic refs
|
|
482
|
-
"""
|
|
483
|
-
from typeagent.knowpro.universal_message import (
|
|
484
|
-
ConversationMessage,
|
|
485
|
-
ConversationMessageMeta,
|
|
486
|
-
format_timestamp_utc,
|
|
487
|
-
)
|
|
488
|
-
|
|
489
|
-
if self._context is None or self.conversation is None:
|
|
490
|
-
return IngestResponse(
|
|
491
|
-
success=False,
|
|
492
|
-
messages_added=0,
|
|
493
|
-
semantic_refs_added=0,
|
|
494
|
-
error="Semantic memory not initialized",
|
|
495
|
-
)
|
|
496
|
-
|
|
497
|
-
content = content.strip()
|
|
498
|
-
if not content:
|
|
499
|
-
return IngestResponse(
|
|
500
|
-
success=False,
|
|
501
|
-
messages_added=0,
|
|
502
|
-
semantic_refs_added=0,
|
|
503
|
-
error="No content provided",
|
|
504
|
-
)
|
|
505
|
-
|
|
506
|
-
# Use provided timestamp or current time
|
|
507
|
-
if timestamp is None:
|
|
508
|
-
timestamp = format_timestamp_utc(datetime.now(UTC))
|
|
509
|
-
|
|
510
|
-
meta = ConversationMessageMeta(speaker=speaker, recipients=[])
|
|
511
|
-
message = ConversationMessage(text_chunks=[content], metadata=meta, timestamp=timestamp)
|
|
512
|
-
try:
|
|
513
|
-
result = await self.conversation.add_messages_with_indexing([message])
|
|
514
|
-
return IngestResponse(
|
|
515
|
-
success=True,
|
|
516
|
-
messages_added=result.messages_added,
|
|
517
|
-
semantic_refs_added=result.semrefs_added,
|
|
518
|
-
)
|
|
519
|
-
except Exception as e: # noqa: BLE001
|
|
520
|
-
return IngestResponse(
|
|
521
|
-
success=False,
|
|
522
|
-
messages_added=0,
|
|
523
|
-
semantic_refs_added=0,
|
|
524
|
-
error=str(e),
|
|
525
|
-
)
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
if __name__ == "__main__":
|
|
529
|
-
import anyio
|
|
530
|
-
|
|
531
|
-
async def main() -> None:
|
|
532
|
-
async with SemanticMemoryTools(model="openai:gpt-4o-mini") as tools:
|
|
533
|
-
fns = await tools.get_tools()
|
|
534
|
-
print(f"Available tools: {[t.name for t in fns]}")
|
|
535
|
-
|
|
536
|
-
anyio.run(main)
|
|
File without changes
|
|
File without changes
|