python-base-agent 2026.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- base_agent/__init__.py +49 -0
- base_agent/agent.py +1060 -0
- base_agent/config.py +220 -0
- base_agent/contracts/__init__.py +209 -0
- base_agent/contracts/access_control_summary_part.py +42 -0
- base_agent/contracts/agent_loop_part.py +87 -0
- base_agent/contracts/agent_tool_part.py +15 -0
- base_agent/contracts/append_entity_class_part.py +15 -0
- base_agent/contracts/broadcast_message.py +30 -0
- base_agent/contracts/broadcast_message_type.py +13 -0
- base_agent/contracts/cache_stability_metadata.py +27 -0
- base_agent/contracts/code_generation.py +18 -0
- base_agent/contracts/context_management_notification.py +20 -0
- base_agent/contracts/context_management_pipeline.py +21 -0
- base_agent/contracts/cross_vector_similarity_search_request.py +14 -0
- base_agent/contracts/data_classification_tag.py +12 -0
- base_agent/contracts/data_lineage_part.py +46 -0
- base_agent/contracts/data_part.py +15 -0
- base_agent/contracts/data_pointer_part.py +20 -0
- base_agent/contracts/embedding_request.py +15 -0
- base_agent/contracts/embedding_response.py +21 -0
- base_agent/contracts/entity_matching_form_data.py +79 -0
- base_agent/contracts/entity_vector_text_search_match.py +12 -0
- base_agent/contracts/entity_vector_text_search_request.py +14 -0
- base_agent/contracts/entity_vector_text_search_result.py +24 -0
- base_agent/contracts/entity_vectorization_job_info.py +24 -0
- base_agent/contracts/entity_vectorization_jobs_result.py +30 -0
- base_agent/contracts/entity_vectorization_progress.py +14 -0
- base_agent/contracts/entity_vectorization_request.py +15 -0
- base_agent/contracts/entity_vectorization_result.py +17 -0
- base_agent/contracts/envelope.py +20 -0
- base_agent/contracts/error_part.py +24 -0
- base_agent/contracts/error_part_type.py +14 -0
- base_agent/contracts/file_part.py +17 -0
- base_agent/contracts/file_pointer_part.py +17 -0
- base_agent/contracts/file_revectorization_request_event.py +19 -0
- base_agent/contracts/file_vectorization_request_event.py +22 -0
- base_agent/contracts/for_each_part.py +30 -0
- base_agent/contracts/fuzzy_matching_params.py +21 -0
- base_agent/contracts/group_plan.py +16 -0
- base_agent/contracts/header.py +69 -0
- base_agent/contracts/human_input_request.py +16 -0
- base_agent/contracts/inspector_pipeline_item.py +19 -0
- base_agent/contracts/list_entity_vectorization_jobs_request.py +12 -0
- base_agent/contracts/llm_accounting_part.py +33 -0
- base_agent/contracts/llm_error_notification.py +13 -0
- base_agent/contracts/llm_judge_job.py +84 -0
- base_agent/contracts/llm_judge_params.py +15 -0
- base_agent/contracts/llm_judge_progress.py +20 -0
- base_agent/contracts/llm_model_part.py +72 -0
- base_agent/contracts/llm_reasoning_summary.py +23 -0
- base_agent/contracts/llm_response.py +25 -0
- base_agent/contracts/llm_response_part.py +54 -0
- base_agent/contracts/match_entities_job.py +43 -0
- base_agent/contracts/match_result.py +52 -0
- base_agent/contracts/notification_error_type.py +12 -0
- base_agent/contracts/notification_severity.py +14 -0
- base_agent/contracts/on_trigger_fired.py +17 -0
- base_agent/contracts/parallel_execution_plan.py +25 -0
- base_agent/contracts/part.py +30 -0
- base_agent/contracts/resume_entity_vectorization_request.py +11 -0
- base_agent/contracts/room_dataset_catalog.py +44 -0
- base_agent/contracts/room_dataset_catalog_entry.py +38 -0
- base_agent/contracts/room_dataset_column.py +12 -0
- base_agent/contracts/room_dataset_schema.py +24 -0
- base_agent/contracts/scratch_pad_content.py +14 -0
- base_agent/contracts/search_request.py +27 -0
- base_agent/contracts/search_response.py +39 -0
- base_agent/contracts/search_result.py +21 -0
- base_agent/contracts/search_type_result.py +13 -0
- base_agent/contracts/semantic_search_progress.py +18 -0
- base_agent/contracts/semantic_search_result.py +25 -0
- base_agent/contracts/stream_content_message.py +35 -0
- base_agent/contracts/stream_event_type.py +12 -0
- base_agent/contracts/stream_message.py +14 -0
- base_agent/contracts/stream_metadata.py +28 -0
- base_agent/contracts/task_artifact_update.py +20 -0
- base_agent/contracts/task_error_response.py +22 -0
- base_agent/contracts/task_ref.py +19 -0
- base_agent/contracts/task_request.py +21 -0
- base_agent/contracts/task_response.py +21 -0
- base_agent/contracts/task_status_update.py +31 -0
- base_agent/contracts/text_part.py +15 -0
- base_agent/contracts/tool_authorization_request.py +21 -0
- base_agent/contracts/tool_call_index.py +42 -0
- base_agent/contracts/tool_call_index_entry.py +32 -0
- base_agent/contracts/tool_call_initiation.py +27 -0
- base_agent/contracts/tool_learning_request.py +34 -0
- base_agent/contracts/tool_response_completion.py +19 -0
- base_agent/contracts/tool_response_part.py +31 -0
- base_agent/contracts/usage_metadata.py +11 -0
- base_agent/contracts/user_data_notification.py +13 -0
- base_agent/contracts/user_notification.py +21 -0
- base_agent/contracts/vector_similarity_search_request.py +13 -0
- base_agent/contracts/vector_similarity_search_result.py +25 -0
- base_agent/contracts/workflow_part.py +17 -0
- base_agent/exceptions.py +39 -0
- base_agent/health/__init__.py +5 -0
- base_agent/health/server.py +191 -0
- base_agent/messaging/__init__.py +5 -0
- base_agent/messaging/kafka_client.py +572 -0
- base_agent/ordering/__init__.py +5 -0
- base_agent/ordering/vector_clock.py +176 -0
- base_agent/prompts/__init__.py +10 -0
- base_agent/prompts/prompt_manager.py +213 -0
- base_agent/registration/__init__.py +5 -0
- base_agent/registration/registration_client.py +364 -0
- base_agent/schemas/__init__.py +14 -0
- base_agent/schemas/models.py +30 -0
- base_agent/schemas/schema_registry_client.py +493 -0
- base_agent/schemas/source_type.py +24 -0
- base_agent/schemas/technical_name_validator.py +147 -0
- base_agent/state/__init__.py +13 -0
- base_agent/state/logical_clock_tracker.py +50 -0
- base_agent/state/session_tracker.py +91 -0
- base_agent/state/store.py +333 -0
- base_agent/storage/__init__.py +27 -0
- base_agent/storage/azure_store.py +615 -0
- base_agent/storage/exceptions.py +26 -0
- base_agent/storage/models.py +73 -0
- base_agent/storage/object_store.py +248 -0
- base_agent/storage/object_store_factory.py +136 -0
- base_agent/storage/s3_store.py +411 -0
- base_agent/telemetry/__init__.py +1 -0
- base_agent/tools/__init__.py +12 -0
- base_agent/tools/models.py +66 -0
- base_agent/tools/tool_registry_client.py +607 -0
- base_agent/utils/__init__.py +5 -0
- base_agent/utils/logger.py +146 -0
- base_agent/utils/version_utils.py +92 -0
- python_base_agent-2026.2.13.dist-info/METADATA +536 -0
- python_base_agent-2026.2.13.dist-info/RECORD +134 -0
- python_base_agent-2026.2.13.dist-info/WHEEL +5 -0
- python_base_agent-2026.2.13.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,607 @@
|
|
|
1
|
+
"""Tool registry client for registering agent tools with the platform.
|
|
2
|
+
|
|
3
|
+
This is the Python equivalent of Java's ToolRegistryClient.
|
|
4
|
+
Reference: /base-agent/src/main/java/one/ai/platform/baseagent/agent/tools/ToolRegistryClient.java
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import threading
|
|
9
|
+
import time
|
|
10
|
+
|
|
11
|
+
import httpx
|
|
12
|
+
|
|
13
|
+
from base_agent.tools.models import (
|
|
14
|
+
OutputSchemaConfig,
|
|
15
|
+
OutputSchemaResponse,
|
|
16
|
+
ToolRegistration,
|
|
17
|
+
ToolResponse,
|
|
18
|
+
)
|
|
19
|
+
from base_agent.utils.logger import sanitize_for_logging
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ToolRegistryClient:
|
|
25
|
+
"""
|
|
26
|
+
Client for registering tools with the platform tool registry.
|
|
27
|
+
|
|
28
|
+
Provides feature parity with Java ToolRegistryClient:
|
|
29
|
+
- Hash-based schema registration
|
|
30
|
+
- Smart update logic (only updates when fields change)
|
|
31
|
+
- Thread-safe operations
|
|
32
|
+
- Health check integration via is_live property
|
|
33
|
+
- Automatic deactivation of registered tools on close()
|
|
34
|
+
|
|
35
|
+
Reference: Java ToolRegistryClient.java
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
API_PATH = "/api/v1/ai-agent-tools"
|
|
39
|
+
|
|
40
|
+
def __init__(self, admin_gateway_url: str, agent_id: int):
|
|
41
|
+
"""
|
|
42
|
+
Initialize the tool registry client.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
admin_gateway_url: Base URL of the admin gateway (e.g., "http://localhost:8888")
|
|
46
|
+
agent_id: ID of the agent registering tools
|
|
47
|
+
"""
|
|
48
|
+
if not admin_gateway_url:
|
|
49
|
+
raise ValueError("admin_gateway_url is required")
|
|
50
|
+
if agent_id <= 0:
|
|
51
|
+
raise ValueError("agent_id must be positive")
|
|
52
|
+
|
|
53
|
+
self._admin_gateway_url = admin_gateway_url.rstrip("/")
|
|
54
|
+
self._agent_id = agent_id
|
|
55
|
+
self._registered_tool_ids: set[int] = set()
|
|
56
|
+
self._is_live = True
|
|
57
|
+
self._lock = threading.Lock()
|
|
58
|
+
self._client = httpx.Client(
|
|
59
|
+
base_url=self._admin_gateway_url,
|
|
60
|
+
timeout=30.0,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
logger.info(
|
|
64
|
+
f"ToolRegistryClient initialized for agent {agent_id} "
|
|
65
|
+
f"at {sanitize_for_logging(admin_gateway_url)}"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def is_live(self) -> bool:
|
|
70
|
+
"""Check if the client is live (healthy)."""
|
|
71
|
+
return self._is_live
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def agent_id(self) -> int:
|
|
75
|
+
"""Get the agent ID."""
|
|
76
|
+
return self._agent_id
|
|
77
|
+
|
|
78
|
+
@property
|
|
79
|
+
def registered_tool_ids(self) -> set[int]:
|
|
80
|
+
"""Get the set of registered tool IDs."""
|
|
81
|
+
with self._lock:
|
|
82
|
+
return self._registered_tool_ids.copy()
|
|
83
|
+
|
|
84
|
+
def _request_with_retry(
|
|
85
|
+
self,
|
|
86
|
+
method: str,
|
|
87
|
+
path: str,
|
|
88
|
+
max_retries: int = 3,
|
|
89
|
+
**kwargs,
|
|
90
|
+
) -> httpx.Response | None:
|
|
91
|
+
"""
|
|
92
|
+
Make an HTTP request with retry logic.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
method: HTTP method (GET, POST, PUT, PATCH, DELETE)
|
|
96
|
+
path: API path
|
|
97
|
+
max_retries: Maximum number of retries on connection errors
|
|
98
|
+
**kwargs: Additional arguments to pass to httpx
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Response object or None if all retries failed
|
|
102
|
+
"""
|
|
103
|
+
for attempt in range(max_retries):
|
|
104
|
+
try:
|
|
105
|
+
response = self._client.request(method, path, **kwargs)
|
|
106
|
+
return response
|
|
107
|
+
except (httpx.ConnectError, httpx.TimeoutException) as e:
|
|
108
|
+
if attempt == max_retries - 1:
|
|
109
|
+
self._is_live = False
|
|
110
|
+
logger.error(
|
|
111
|
+
f"Connection failed after {max_retries} attempts: "
|
|
112
|
+
f"{sanitize_for_logging(str(e))}"
|
|
113
|
+
)
|
|
114
|
+
return None
|
|
115
|
+
wait_time = 0.5 * (attempt + 1)
|
|
116
|
+
logger.warning(
|
|
117
|
+
f"Request failed (attempt {attempt + 1}/{max_retries}), "
|
|
118
|
+
f"retrying in {wait_time}s: {sanitize_for_logging(str(e))}"
|
|
119
|
+
)
|
|
120
|
+
time.sleep(wait_time)
|
|
121
|
+
return None
|
|
122
|
+
|
|
123
|
+
def _get_existing_tool(self, tool_technical_name: str) -> ToolResponse | None:
|
|
124
|
+
"""
|
|
125
|
+
Check if a tool exists for this agent.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
tool_technical_name: Technical name of the tool
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
ToolResponse if found, None otherwise
|
|
132
|
+
"""
|
|
133
|
+
response = self._request_with_retry(
|
|
134
|
+
"GET",
|
|
135
|
+
self.API_PATH,
|
|
136
|
+
params={
|
|
137
|
+
"agent_id": self._agent_id,
|
|
138
|
+
"tool_technical_name": tool_technical_name,
|
|
139
|
+
},
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
if response is None:
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
if response.status_code == 200:
|
|
146
|
+
data = response.json()
|
|
147
|
+
if data and len(data) > 0:
|
|
148
|
+
return ToolResponse(**data[0])
|
|
149
|
+
elif response.status_code == 404:
|
|
150
|
+
return None
|
|
151
|
+
else:
|
|
152
|
+
logger.warning(
|
|
153
|
+
f"Unexpected status {response.status_code} checking for tool: "
|
|
154
|
+
f"{sanitize_for_logging(response.text)}"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
return None
|
|
158
|
+
|
|
159
|
+
def _needs_update(
|
|
160
|
+
self,
|
|
161
|
+
existing: ToolResponse,
|
|
162
|
+
name: str,
|
|
163
|
+
description: str | None,
|
|
164
|
+
ai_model_prompt_guidance: str | None,
|
|
165
|
+
input_schema_hash: str | None,
|
|
166
|
+
output_schemas: list[OutputSchemaConfig] | None,
|
|
167
|
+
metadata: dict | None,
|
|
168
|
+
) -> bool:
|
|
169
|
+
"""
|
|
170
|
+
Check if an existing tool needs to be updated.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
existing: Existing tool from API
|
|
174
|
+
name: New name
|
|
175
|
+
description: New description
|
|
176
|
+
ai_model_prompt_guidance: New AI guidance
|
|
177
|
+
input_schema_hash: New input schema hash
|
|
178
|
+
output_schemas: New output schemas
|
|
179
|
+
metadata: New metadata
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
True if any field differs and update is needed
|
|
183
|
+
"""
|
|
184
|
+
if existing.name != name:
|
|
185
|
+
logger.debug(f"Tool name changed: {existing.name} -> {name}")
|
|
186
|
+
return True
|
|
187
|
+
|
|
188
|
+
if existing.description != description:
|
|
189
|
+
logger.debug("Tool description changed")
|
|
190
|
+
return True
|
|
191
|
+
|
|
192
|
+
if existing.ai_model_prompt_guidance != ai_model_prompt_guidance:
|
|
193
|
+
logger.debug("Tool AI guidance changed")
|
|
194
|
+
return True
|
|
195
|
+
|
|
196
|
+
if existing.input_schema_hash != input_schema_hash:
|
|
197
|
+
logger.debug(
|
|
198
|
+
f"Tool input schema hash changed: "
|
|
199
|
+
f"{existing.input_schema_hash} -> {input_schema_hash}"
|
|
200
|
+
)
|
|
201
|
+
return True
|
|
202
|
+
|
|
203
|
+
if existing.tool_metadata != metadata:
|
|
204
|
+
logger.debug("Tool metadata changed")
|
|
205
|
+
return True
|
|
206
|
+
|
|
207
|
+
# Compare output schemas by hash
|
|
208
|
+
existing_hashes = set()
|
|
209
|
+
if existing.output_schemas:
|
|
210
|
+
existing_hashes = {s.schema_hash for s in existing.output_schemas if s.schema_hash}
|
|
211
|
+
|
|
212
|
+
desired_hashes = set()
|
|
213
|
+
if output_schemas:
|
|
214
|
+
desired_hashes = {s.schema_hash for s in output_schemas}
|
|
215
|
+
|
|
216
|
+
if existing_hashes != desired_hashes:
|
|
217
|
+
logger.debug("Tool output schemas changed")
|
|
218
|
+
return True
|
|
219
|
+
|
|
220
|
+
return False
|
|
221
|
+
|
|
222
|
+
def _sync_output_schemas(
|
|
223
|
+
self,
|
|
224
|
+
tool_id: int,
|
|
225
|
+
existing_schemas: list[OutputSchemaResponse] | None,
|
|
226
|
+
desired_schemas: list[OutputSchemaConfig] | None,
|
|
227
|
+
) -> bool:
|
|
228
|
+
"""
|
|
229
|
+
Sync output schemas - add missing, remove stale.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
tool_id: Tool ID to update
|
|
233
|
+
existing_schemas: Current output schemas
|
|
234
|
+
desired_schemas: Desired output schemas
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
True if sync was successful
|
|
238
|
+
"""
|
|
239
|
+
existing_by_hash: dict[str, OutputSchemaResponse] = {}
|
|
240
|
+
if existing_schemas:
|
|
241
|
+
for schema in existing_schemas:
|
|
242
|
+
if schema.schema_hash:
|
|
243
|
+
existing_by_hash[schema.schema_hash] = schema
|
|
244
|
+
|
|
245
|
+
desired_by_hash: dict[str, OutputSchemaConfig] = {}
|
|
246
|
+
if desired_schemas:
|
|
247
|
+
for schema in desired_schemas:
|
|
248
|
+
desired_by_hash[schema.schema_hash] = schema
|
|
249
|
+
|
|
250
|
+
existing_hashes = set(existing_by_hash.keys())
|
|
251
|
+
desired_hashes = set(desired_by_hash.keys())
|
|
252
|
+
|
|
253
|
+
# Remove stale schemas FIRST (before adding new ones)
|
|
254
|
+
# This is important when is_primary=True, as only one primary can exist
|
|
255
|
+
for hash_value in existing_hashes - desired_hashes:
|
|
256
|
+
schema = existing_by_hash[hash_value]
|
|
257
|
+
if not self.remove_output_schema(tool_id, schema.id):
|
|
258
|
+
return False
|
|
259
|
+
|
|
260
|
+
# Add new schemas
|
|
261
|
+
for hash_value in desired_hashes - existing_hashes:
|
|
262
|
+
schema = desired_by_hash[hash_value]
|
|
263
|
+
if not self.add_output_schema(tool_id, schema):
|
|
264
|
+
return False
|
|
265
|
+
|
|
266
|
+
return True
|
|
267
|
+
|
|
268
|
+
def register_tool_with_hash(
|
|
269
|
+
self,
|
|
270
|
+
name: str,
|
|
271
|
+
tool_technical_name: str,
|
|
272
|
+
description: str | None = None,
|
|
273
|
+
ai_model_prompt_guidance: str | None = None,
|
|
274
|
+
input_schema_hash: str | None = None,
|
|
275
|
+
output_schemas: list[OutputSchemaConfig] | None = None,
|
|
276
|
+
metadata: dict | None = None,
|
|
277
|
+
) -> bool:
|
|
278
|
+
"""
|
|
279
|
+
Register a tool with hash-based schema references.
|
|
280
|
+
|
|
281
|
+
This method implements smart update logic:
|
|
282
|
+
1. Checks if tool exists
|
|
283
|
+
2. If not exists, creates new tool
|
|
284
|
+
3. If exists, compares all fields
|
|
285
|
+
4. Only updates if fields have changed
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
name: Display name of the tool
|
|
289
|
+
tool_technical_name: Technical identifier (unique per agent+version)
|
|
290
|
+
description: Tool description
|
|
291
|
+
ai_model_prompt_guidance: Guidance for AI models using this tool
|
|
292
|
+
input_schema_hash: SHA-256 hash of input schema
|
|
293
|
+
output_schemas: List of output schema configurations
|
|
294
|
+
metadata: Additional metadata (can include icon)
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
True if registration succeeded, False otherwise
|
|
298
|
+
|
|
299
|
+
Reference: Java ToolRegistryClient.registerToolWithHash()
|
|
300
|
+
"""
|
|
301
|
+
with self._lock:
|
|
302
|
+
logger.info(
|
|
303
|
+
f"Registering tool '{sanitize_for_logging(name)}' "
|
|
304
|
+
f"(technical: {sanitize_for_logging(tool_technical_name)})"
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# Check if tool exists
|
|
308
|
+
existing = self._get_existing_tool(tool_technical_name)
|
|
309
|
+
|
|
310
|
+
if existing is None:
|
|
311
|
+
# Create new tool
|
|
312
|
+
return self._create_tool(
|
|
313
|
+
name=name,
|
|
314
|
+
tool_technical_name=tool_technical_name,
|
|
315
|
+
description=description,
|
|
316
|
+
ai_model_prompt_guidance=ai_model_prompt_guidance,
|
|
317
|
+
input_schema_hash=input_schema_hash,
|
|
318
|
+
output_schemas=output_schemas,
|
|
319
|
+
metadata=metadata,
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
# Tool exists - check if update needed
|
|
323
|
+
if not self._needs_update(
|
|
324
|
+
existing=existing,
|
|
325
|
+
name=name,
|
|
326
|
+
description=description,
|
|
327
|
+
ai_model_prompt_guidance=ai_model_prompt_guidance,
|
|
328
|
+
input_schema_hash=input_schema_hash,
|
|
329
|
+
output_schemas=output_schemas,
|
|
330
|
+
metadata=metadata,
|
|
331
|
+
):
|
|
332
|
+
logger.info(
|
|
333
|
+
f"Tool '{sanitize_for_logging(tool_technical_name)}' unchanged, skipping update"
|
|
334
|
+
)
|
|
335
|
+
self._registered_tool_ids.add(existing.tool_id)
|
|
336
|
+
return True
|
|
337
|
+
|
|
338
|
+
# Update existing tool
|
|
339
|
+
return self._update_tool(
|
|
340
|
+
tool_id=existing.tool_id,
|
|
341
|
+
name=name,
|
|
342
|
+
description=description,
|
|
343
|
+
ai_model_prompt_guidance=ai_model_prompt_guidance,
|
|
344
|
+
input_schema_hash=input_schema_hash,
|
|
345
|
+
output_schemas=output_schemas,
|
|
346
|
+
existing_schemas=existing.output_schemas,
|
|
347
|
+
metadata=metadata,
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
def _create_tool(
|
|
351
|
+
self,
|
|
352
|
+
name: str,
|
|
353
|
+
tool_technical_name: str,
|
|
354
|
+
description: str | None,
|
|
355
|
+
ai_model_prompt_guidance: str | None,
|
|
356
|
+
input_schema_hash: str | None,
|
|
357
|
+
output_schemas: list[OutputSchemaConfig] | None,
|
|
358
|
+
metadata: dict | None,
|
|
359
|
+
) -> bool:
|
|
360
|
+
"""Create a new tool."""
|
|
361
|
+
registration = ToolRegistration(
|
|
362
|
+
agent_id=self._agent_id,
|
|
363
|
+
name=name,
|
|
364
|
+
tool_technical_name=tool_technical_name,
|
|
365
|
+
description=description,
|
|
366
|
+
ai_model_prompt_guidance=ai_model_prompt_guidance,
|
|
367
|
+
input_schema_hash=input_schema_hash,
|
|
368
|
+
output_schemas=output_schemas,
|
|
369
|
+
version_number=1,
|
|
370
|
+
status="ACTIVE",
|
|
371
|
+
tool_metadata=metadata,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
response = self._request_with_retry(
|
|
375
|
+
"POST",
|
|
376
|
+
self.API_PATH,
|
|
377
|
+
json=registration.model_dump(exclude_none=True),
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
if response is None:
|
|
381
|
+
logger.error(f"Failed to create tool '{sanitize_for_logging(tool_technical_name)}'")
|
|
382
|
+
return False
|
|
383
|
+
|
|
384
|
+
if response.status_code in (200, 201):
|
|
385
|
+
data = response.json()
|
|
386
|
+
tool_id = data.get("tool_id")
|
|
387
|
+
if tool_id:
|
|
388
|
+
self._registered_tool_ids.add(tool_id)
|
|
389
|
+
logger.info(
|
|
390
|
+
f"Successfully created tool '{sanitize_for_logging(tool_technical_name)}' "
|
|
391
|
+
f"(ID: {tool_id})"
|
|
392
|
+
)
|
|
393
|
+
return True
|
|
394
|
+
elif response.status_code == 409:
|
|
395
|
+
# Conflict - tool already exists (race condition), treat as success
|
|
396
|
+
logger.info(f"Tool '{sanitize_for_logging(tool_technical_name)}' already exists (409)")
|
|
397
|
+
# Try to fetch the existing tool to track its ID
|
|
398
|
+
existing = self._get_existing_tool(tool_technical_name)
|
|
399
|
+
if existing:
|
|
400
|
+
self._registered_tool_ids.add(existing.tool_id)
|
|
401
|
+
return True
|
|
402
|
+
else:
|
|
403
|
+
logger.error(
|
|
404
|
+
f"Failed to create tool '{sanitize_for_logging(tool_technical_name)}': "
|
|
405
|
+
f"{response.status_code} - {sanitize_for_logging(response.text)}"
|
|
406
|
+
)
|
|
407
|
+
return False
|
|
408
|
+
|
|
409
|
+
def _update_tool(
|
|
410
|
+
self,
|
|
411
|
+
tool_id: int,
|
|
412
|
+
name: str,
|
|
413
|
+
description: str | None,
|
|
414
|
+
ai_model_prompt_guidance: str | None,
|
|
415
|
+
input_schema_hash: str | None,
|
|
416
|
+
output_schemas: list[OutputSchemaConfig] | None,
|
|
417
|
+
existing_schemas: list[OutputSchemaResponse] | None,
|
|
418
|
+
metadata: dict | None,
|
|
419
|
+
) -> bool:
|
|
420
|
+
"""Update an existing tool."""
|
|
421
|
+
update_payload = {
|
|
422
|
+
"name": name,
|
|
423
|
+
"description": description,
|
|
424
|
+
"ai_model_prompt_guidance": ai_model_prompt_guidance,
|
|
425
|
+
"input_schema_hash": input_schema_hash,
|
|
426
|
+
"tool_metadata": metadata,
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
# Remove None values
|
|
430
|
+
update_payload = {k: v for k, v in update_payload.items() if v is not None}
|
|
431
|
+
|
|
432
|
+
response = self._request_with_retry(
|
|
433
|
+
"PUT",
|
|
434
|
+
f"{self.API_PATH}/{tool_id}",
|
|
435
|
+
json=update_payload,
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
if response is None:
|
|
439
|
+
logger.error(f"Failed to update tool {tool_id}")
|
|
440
|
+
return False
|
|
441
|
+
|
|
442
|
+
if response.status_code not in (200, 204):
|
|
443
|
+
logger.error(
|
|
444
|
+
f"Failed to update tool {tool_id}: "
|
|
445
|
+
f"{response.status_code} - {sanitize_for_logging(response.text)}"
|
|
446
|
+
)
|
|
447
|
+
return False
|
|
448
|
+
|
|
449
|
+
# Sync output schemas
|
|
450
|
+
if not self._sync_output_schemas(tool_id, existing_schemas, output_schemas):
|
|
451
|
+
logger.warning(f"Failed to sync output schemas for tool {tool_id}")
|
|
452
|
+
|
|
453
|
+
self._registered_tool_ids.add(tool_id)
|
|
454
|
+
logger.info(f"Successfully updated tool {tool_id}")
|
|
455
|
+
return True
|
|
456
|
+
|
|
457
|
+
def add_output_schema(self, tool_id: int, schema: OutputSchemaConfig) -> bool:
|
|
458
|
+
"""
|
|
459
|
+
Add an output schema to a tool.
|
|
460
|
+
|
|
461
|
+
Args:
|
|
462
|
+
tool_id: Tool ID
|
|
463
|
+
schema: Output schema configuration
|
|
464
|
+
|
|
465
|
+
Returns:
|
|
466
|
+
True if successful
|
|
467
|
+
"""
|
|
468
|
+
response = self._request_with_retry(
|
|
469
|
+
"POST",
|
|
470
|
+
f"{self.API_PATH}/{tool_id}/output-schemas",
|
|
471
|
+
json={
|
|
472
|
+
"schema_hash": schema.schema_hash,
|
|
473
|
+
"schema_name": schema.schema_name,
|
|
474
|
+
"is_primary": schema.is_primary,
|
|
475
|
+
"description": schema.description,
|
|
476
|
+
},
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
if response is None:
|
|
480
|
+
return False
|
|
481
|
+
|
|
482
|
+
if response.status_code in (200, 201):
|
|
483
|
+
logger.debug(f"Added output schema '{schema.schema_name}' to tool {tool_id}")
|
|
484
|
+
return True
|
|
485
|
+
elif response.status_code == 409:
|
|
486
|
+
# Already exists
|
|
487
|
+
logger.debug(f"Output schema '{schema.schema_name}' already exists for tool {tool_id}")
|
|
488
|
+
return True
|
|
489
|
+
else:
|
|
490
|
+
logger.error(
|
|
491
|
+
f"Failed to add output schema to tool {tool_id}: "
|
|
492
|
+
f"{response.status_code} - {sanitize_for_logging(response.text)}"
|
|
493
|
+
)
|
|
494
|
+
return False
|
|
495
|
+
|
|
496
|
+
def remove_output_schema(self, tool_id: int, schema_id: int) -> bool:
|
|
497
|
+
"""
|
|
498
|
+
Remove an output schema from a tool.
|
|
499
|
+
|
|
500
|
+
Args:
|
|
501
|
+
tool_id: Tool ID
|
|
502
|
+
schema_id: Output schema ID to remove
|
|
503
|
+
|
|
504
|
+
Returns:
|
|
505
|
+
True if successful
|
|
506
|
+
"""
|
|
507
|
+
response = self._request_with_retry(
|
|
508
|
+
"DELETE",
|
|
509
|
+
f"{self.API_PATH}/{tool_id}/output-schemas/{schema_id}",
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
if response is None:
|
|
513
|
+
return False
|
|
514
|
+
|
|
515
|
+
if response.status_code in (200, 204):
|
|
516
|
+
logger.debug(f"Removed output schema {schema_id} from tool {tool_id}")
|
|
517
|
+
return True
|
|
518
|
+
elif response.status_code == 404:
|
|
519
|
+
# Already removed
|
|
520
|
+
logger.debug(f"Output schema {schema_id} already removed from tool {tool_id}")
|
|
521
|
+
return True
|
|
522
|
+
else:
|
|
523
|
+
logger.error(
|
|
524
|
+
f"Failed to remove output schema {schema_id} from tool {tool_id}: "
|
|
525
|
+
f"{response.status_code} - {sanitize_for_logging(response.text)}"
|
|
526
|
+
)
|
|
527
|
+
return False
|
|
528
|
+
|
|
529
|
+
def activate_tool(self, tool_id: int) -> bool:
|
|
530
|
+
"""
|
|
531
|
+
Activate a tool.
|
|
532
|
+
|
|
533
|
+
Args:
|
|
534
|
+
tool_id: Tool ID to activate
|
|
535
|
+
|
|
536
|
+
Returns:
|
|
537
|
+
True if successful
|
|
538
|
+
"""
|
|
539
|
+
response = self._request_with_retry(
|
|
540
|
+
"PATCH",
|
|
541
|
+
f"{self.API_PATH}/{tool_id}/activate",
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
if response is None:
|
|
545
|
+
return False
|
|
546
|
+
|
|
547
|
+
if response.status_code in (200, 204):
|
|
548
|
+
logger.info(f"Activated tool {tool_id}")
|
|
549
|
+
return True
|
|
550
|
+
else:
|
|
551
|
+
logger.error(
|
|
552
|
+
f"Failed to activate tool {tool_id}: "
|
|
553
|
+
f"{response.status_code} - {sanitize_for_logging(response.text)}"
|
|
554
|
+
)
|
|
555
|
+
return False
|
|
556
|
+
|
|
557
|
+
def deactivate_tool(self, tool_id: int) -> bool:
|
|
558
|
+
"""
|
|
559
|
+
Deactivate a tool.
|
|
560
|
+
|
|
561
|
+
Args:
|
|
562
|
+
tool_id: Tool ID to deactivate
|
|
563
|
+
|
|
564
|
+
Returns:
|
|
565
|
+
True if successful
|
|
566
|
+
"""
|
|
567
|
+
response = self._request_with_retry(
|
|
568
|
+
"PATCH",
|
|
569
|
+
f"{self.API_PATH}/{tool_id}/deactivate",
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
if response is None:
|
|
573
|
+
return False
|
|
574
|
+
|
|
575
|
+
if response.status_code in (200, 204):
|
|
576
|
+
logger.info(f"Deactivated tool {tool_id}")
|
|
577
|
+
return True
|
|
578
|
+
else:
|
|
579
|
+
logger.error(
|
|
580
|
+
f"Failed to deactivate tool {tool_id}: "
|
|
581
|
+
f"{response.status_code} - {sanitize_for_logging(response.text)}"
|
|
582
|
+
)
|
|
583
|
+
return False
|
|
584
|
+
|
|
585
|
+
def close(self) -> None:
|
|
586
|
+
"""
|
|
587
|
+
Close the client, deactivating all registered tools.
|
|
588
|
+
|
|
589
|
+
Reference: Java ToolRegistryClient deactivates all tools on shutdown.
|
|
590
|
+
"""
|
|
591
|
+
with self._lock:
|
|
592
|
+
logger.info(
|
|
593
|
+
f"Closing ToolRegistryClient, deactivating {len(self._registered_tool_ids)} tools"
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
for tool_id in self._registered_tool_ids:
|
|
597
|
+
try:
|
|
598
|
+
self.deactivate_tool(tool_id)
|
|
599
|
+
except Exception as e:
|
|
600
|
+
logger.warning(
|
|
601
|
+
f"Failed to deactivate tool {tool_id} during close: "
|
|
602
|
+
f"{sanitize_for_logging(str(e))}"
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
self._registered_tool_ids.clear()
|
|
606
|
+
self._client.close()
|
|
607
|
+
logger.info("ToolRegistryClient closed")
|