acontext 0.0.6__py3-none-any.whl → 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- acontext/agent/__init__.py +0 -0
- acontext/agent/base.py +89 -0
- acontext/agent/disk.py +325 -0
- acontext/resources/async_spaces.py +1 -58
- acontext/resources/spaces.py +1 -58
- {acontext-0.0.6.dist-info → acontext-0.0.7.dist-info}/METADATA +140 -37
- {acontext-0.0.6.dist-info → acontext-0.0.7.dist-info}/RECORD +8 -5
- {acontext-0.0.6.dist-info → acontext-0.0.7.dist-info}/WHEEL +1 -1
|
File without changes
|
acontext/agent/base.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
class BaseContext:
|
|
2
|
+
pass
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class BaseConverter:
|
|
6
|
+
def to_openai_tool_schema(self) -> dict:
|
|
7
|
+
raise NotImplementedError
|
|
8
|
+
|
|
9
|
+
def to_anthropic_tool_schema(self) -> dict:
|
|
10
|
+
raise NotImplementedError
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BaseTool(BaseConverter):
|
|
14
|
+
@property
|
|
15
|
+
def name(self) -> str:
|
|
16
|
+
raise NotImplementedError
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def description(self) -> str:
|
|
20
|
+
raise NotImplementedError
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def arguments(self) -> dict:
|
|
24
|
+
raise NotImplementedError
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def required_arguments(self) -> list[str]:
|
|
28
|
+
raise NotImplementedError
|
|
29
|
+
|
|
30
|
+
def execute(self, ctx: BaseContext, llm_arguments: dict) -> str:
|
|
31
|
+
raise NotImplementedError
|
|
32
|
+
|
|
33
|
+
def to_openai_tool_schema(self) -> dict:
|
|
34
|
+
return {
|
|
35
|
+
"type": "function",
|
|
36
|
+
"function": {
|
|
37
|
+
"name": self.name,
|
|
38
|
+
"description": self.description,
|
|
39
|
+
"parameters": {
|
|
40
|
+
"type": "object",
|
|
41
|
+
"properties": self.arguments,
|
|
42
|
+
"required": self.required_arguments,
|
|
43
|
+
},
|
|
44
|
+
},
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
def to_anthropic_tool_schema(self) -> dict:
|
|
48
|
+
return {
|
|
49
|
+
"name": self.name,
|
|
50
|
+
"description": self.description,
|
|
51
|
+
"input_schema": {
|
|
52
|
+
"type": "object",
|
|
53
|
+
"properties": self.arguments,
|
|
54
|
+
"required": self.required_arguments,
|
|
55
|
+
},
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class BaseToolPool(BaseConverter):
|
|
60
|
+
def __init__(self):
|
|
61
|
+
self.tools: dict[str, BaseTool] = {}
|
|
62
|
+
|
|
63
|
+
def add_tool(self, tool: BaseTool):
|
|
64
|
+
self.tools[tool.name] = tool
|
|
65
|
+
|
|
66
|
+
def remove_tool(self, tool_name: str):
|
|
67
|
+
self.tools.pop(tool_name)
|
|
68
|
+
|
|
69
|
+
def extent_tool_pool(self, pool: "BaseToolPool"):
|
|
70
|
+
self.tools.update(pool.tools)
|
|
71
|
+
|
|
72
|
+
def execute_tool(
|
|
73
|
+
self, ctx: BaseContext, tool_name: str, llm_arguments: dict
|
|
74
|
+
) -> str:
|
|
75
|
+
tool = self.tools[tool_name]
|
|
76
|
+
r = tool.execute(ctx, llm_arguments)
|
|
77
|
+
return r.strip()
|
|
78
|
+
|
|
79
|
+
def tool_exists(self, tool_name: str) -> bool:
|
|
80
|
+
return tool_name in self.tools
|
|
81
|
+
|
|
82
|
+
def to_openai_tool_schema(self) -> list[dict]:
|
|
83
|
+
return [tool.to_openai_tool_schema() for tool in self.tools.values()]
|
|
84
|
+
|
|
85
|
+
def to_anthropic_tool_schema(self) -> list[dict]:
|
|
86
|
+
return [tool.to_anthropic_tool_schema() for tool in self.tools.values()]
|
|
87
|
+
|
|
88
|
+
def format_context(self, *args, **kwargs) -> BaseContext:
|
|
89
|
+
raise NotImplementedError
|
acontext/agent/disk.py
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
|
|
3
|
+
from .base import BaseContext, BaseTool, BaseToolPool
|
|
4
|
+
from ..client import AcontextClient
|
|
5
|
+
from ..uploads import FileUpload
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class DiskContext(BaseContext):
|
|
10
|
+
client: AcontextClient
|
|
11
|
+
disk_id: str
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _normalize_path(path: str | None) -> str:
|
|
15
|
+
"""Normalize a file path to ensure it starts with '/'."""
|
|
16
|
+
if not path:
|
|
17
|
+
return "/"
|
|
18
|
+
normalized = path if path.startswith("/") else f"/{path}"
|
|
19
|
+
if not normalized.endswith("/"):
|
|
20
|
+
normalized += "/"
|
|
21
|
+
return normalized
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class WriteFileTool(BaseTool):
|
|
25
|
+
"""Tool for writing text content to a file on the Acontext disk."""
|
|
26
|
+
|
|
27
|
+
@property
|
|
28
|
+
def name(self) -> str:
|
|
29
|
+
return "write_file"
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def description(self) -> str:
|
|
33
|
+
return "Write text content to a file in the file system. Creates the file if it doesn't exist, overwrites if it does."
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def arguments(self) -> dict:
|
|
37
|
+
return {
|
|
38
|
+
"file_path": {
|
|
39
|
+
"type": "string",
|
|
40
|
+
"description": "Optional folder path to organize files, e.g. '/notes/' or '/documents/'. Defaults to root '/' if not specified.",
|
|
41
|
+
},
|
|
42
|
+
"filename": {
|
|
43
|
+
"type": "string",
|
|
44
|
+
"description": "Filename such as 'report.md' or 'demo.txt'.",
|
|
45
|
+
},
|
|
46
|
+
"content": {
|
|
47
|
+
"type": "string",
|
|
48
|
+
"description": "Text content to write to the file.",
|
|
49
|
+
},
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def required_arguments(self) -> list[str]:
|
|
54
|
+
return ["filename", "content"]
|
|
55
|
+
|
|
56
|
+
def execute(self, ctx: DiskContext, llm_arguments: dict) -> str:
|
|
57
|
+
"""Write text content to a file."""
|
|
58
|
+
filename = llm_arguments.get("filename")
|
|
59
|
+
content = llm_arguments.get("content")
|
|
60
|
+
file_path = llm_arguments.get("file_path")
|
|
61
|
+
|
|
62
|
+
if not filename:
|
|
63
|
+
raise ValueError("filename is required")
|
|
64
|
+
if not content:
|
|
65
|
+
raise ValueError("content is required")
|
|
66
|
+
|
|
67
|
+
normalized_path = _normalize_path(file_path)
|
|
68
|
+
payload = FileUpload(filename=filename, content=content.encode("utf-8"))
|
|
69
|
+
artifact = ctx.client.disks.artifacts.upsert(
|
|
70
|
+
ctx.disk_id,
|
|
71
|
+
file=payload,
|
|
72
|
+
file_path=normalized_path,
|
|
73
|
+
)
|
|
74
|
+
return f"File '{artifact.filename}' written successfully to '{artifact.path}'"
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class ReadFileTool(BaseTool):
|
|
78
|
+
"""Tool for reading a text file from the Acontext disk."""
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def name(self) -> str:
|
|
82
|
+
return "read_file"
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def description(self) -> str:
|
|
86
|
+
return "Read a text file from the file system and return its content."
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def arguments(self) -> dict:
|
|
90
|
+
return {
|
|
91
|
+
"file_path": {
|
|
92
|
+
"type": "string",
|
|
93
|
+
"description": "Optional directory path where the file is located, e.g. '/notes/'. Defaults to root '/' if not specified.",
|
|
94
|
+
},
|
|
95
|
+
"filename": {
|
|
96
|
+
"type": "string",
|
|
97
|
+
"description": "Filename to read.",
|
|
98
|
+
},
|
|
99
|
+
"line_offset": {
|
|
100
|
+
"type": "integer",
|
|
101
|
+
"description": "The line number to start reading from. Default to 0",
|
|
102
|
+
},
|
|
103
|
+
"line_limit": {
|
|
104
|
+
"type": "integer",
|
|
105
|
+
"description": "The maximum number of lines to return. Default to 100",
|
|
106
|
+
},
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
@property
|
|
110
|
+
def required_arguments(self) -> list[str]:
|
|
111
|
+
return ["filename"]
|
|
112
|
+
|
|
113
|
+
def execute(self, ctx: DiskContext, llm_arguments: dict) -> str:
|
|
114
|
+
"""Read a text file and return its content preview."""
|
|
115
|
+
filename = llm_arguments.get("filename")
|
|
116
|
+
file_path = llm_arguments.get("file_path")
|
|
117
|
+
line_offset = llm_arguments.get("line_offset", 0)
|
|
118
|
+
line_limit = llm_arguments.get("line_limit", 100)
|
|
119
|
+
|
|
120
|
+
if not filename:
|
|
121
|
+
raise ValueError("filename is required")
|
|
122
|
+
|
|
123
|
+
normalized_path = _normalize_path(file_path)
|
|
124
|
+
result = ctx.client.disks.artifacts.get(
|
|
125
|
+
ctx.disk_id,
|
|
126
|
+
file_path=normalized_path,
|
|
127
|
+
filename=filename,
|
|
128
|
+
with_content=True,
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
if not result.content:
|
|
132
|
+
raise RuntimeError("Failed to read file: server did not return content.")
|
|
133
|
+
|
|
134
|
+
content_str = result.content.raw
|
|
135
|
+
lines = content_str.split("\n")
|
|
136
|
+
line_start = min(line_offset, len(lines) - 1)
|
|
137
|
+
line_end = min(line_start + line_limit, len(lines))
|
|
138
|
+
preview = "\n".join(lines[line_start:line_end])
|
|
139
|
+
return f"[{normalized_path}{filename} - showing L{line_start}-{line_end} of {len(lines)} lines]\n{preview}"
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class ReplaceStringTool(BaseTool):
|
|
143
|
+
"""Tool for replacing an old string with a new string in a file on the Acontext disk."""
|
|
144
|
+
|
|
145
|
+
@property
|
|
146
|
+
def name(self) -> str:
|
|
147
|
+
return "replace_string"
|
|
148
|
+
|
|
149
|
+
@property
|
|
150
|
+
def description(self) -> str:
|
|
151
|
+
return "Replace an old string with a new string in a file. Reads the file, performs the replacement, and writes it back."
|
|
152
|
+
|
|
153
|
+
@property
|
|
154
|
+
def arguments(self) -> dict:
|
|
155
|
+
return {
|
|
156
|
+
"file_path": {
|
|
157
|
+
"type": "string",
|
|
158
|
+
"description": "Optional directory path where the file is located, e.g. '/notes/'. Defaults to root '/' if not specified.",
|
|
159
|
+
},
|
|
160
|
+
"filename": {
|
|
161
|
+
"type": "string",
|
|
162
|
+
"description": "Filename to modify.",
|
|
163
|
+
},
|
|
164
|
+
"old_string": {
|
|
165
|
+
"type": "string",
|
|
166
|
+
"description": "The string to be replaced.",
|
|
167
|
+
},
|
|
168
|
+
"new_string": {
|
|
169
|
+
"type": "string",
|
|
170
|
+
"description": "The string to replace the old_string with.",
|
|
171
|
+
},
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
@property
|
|
175
|
+
def required_arguments(self) -> list[str]:
|
|
176
|
+
return ["filename", "old_string", "new_string"]
|
|
177
|
+
|
|
178
|
+
def execute(self, ctx: DiskContext, llm_arguments: dict) -> str:
|
|
179
|
+
"""Replace an old string with a new string in a file."""
|
|
180
|
+
filename = llm_arguments.get("filename")
|
|
181
|
+
file_path = llm_arguments.get("file_path")
|
|
182
|
+
old_string = llm_arguments.get("old_string")
|
|
183
|
+
new_string = llm_arguments.get("new_string")
|
|
184
|
+
|
|
185
|
+
if not filename:
|
|
186
|
+
raise ValueError("filename is required")
|
|
187
|
+
if old_string is None:
|
|
188
|
+
raise ValueError("old_string is required")
|
|
189
|
+
if new_string is None:
|
|
190
|
+
raise ValueError("new_string is required")
|
|
191
|
+
|
|
192
|
+
normalized_path = _normalize_path(file_path)
|
|
193
|
+
|
|
194
|
+
# Read the file content
|
|
195
|
+
result = ctx.client.disks.artifacts.get(
|
|
196
|
+
ctx.disk_id,
|
|
197
|
+
file_path=normalized_path,
|
|
198
|
+
filename=filename,
|
|
199
|
+
with_content=True,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
if not result.content:
|
|
203
|
+
raise RuntimeError("Failed to read file: server did not return content.")
|
|
204
|
+
|
|
205
|
+
content_str = result.content.raw
|
|
206
|
+
|
|
207
|
+
# Perform the replacement
|
|
208
|
+
if old_string not in content_str:
|
|
209
|
+
return f"String '{old_string}' not found in file '{filename}'"
|
|
210
|
+
|
|
211
|
+
updated_content = content_str.replace(old_string, new_string)
|
|
212
|
+
replacement_count = content_str.count(old_string)
|
|
213
|
+
|
|
214
|
+
# Write the updated content back
|
|
215
|
+
payload = FileUpload(filename=filename, content=updated_content.encode("utf-8"))
|
|
216
|
+
ctx.client.disks.artifacts.upsert(
|
|
217
|
+
ctx.disk_id,
|
|
218
|
+
file=payload,
|
|
219
|
+
file_path=normalized_path,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
return f"Found {replacement_count} old_string in {normalized_path}{filename} and replaced it."
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class ListTool(BaseTool):
|
|
226
|
+
"""Tool for listing files in a directory on the Acontext disk."""
|
|
227
|
+
|
|
228
|
+
@property
|
|
229
|
+
def name(self) -> str:
|
|
230
|
+
return "list_artifacts"
|
|
231
|
+
|
|
232
|
+
@property
|
|
233
|
+
def description(self) -> str:
|
|
234
|
+
return "List all files and directories in a specified path on the disk."
|
|
235
|
+
|
|
236
|
+
@property
|
|
237
|
+
def arguments(self) -> dict:
|
|
238
|
+
return {
|
|
239
|
+
"file_path": {
|
|
240
|
+
"type": "string",
|
|
241
|
+
"description": "Optional directory path to list, e.g. '/todo/' or '/notes/'. Root is '/'",
|
|
242
|
+
},
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
@property
|
|
246
|
+
def required_arguments(self) -> list[str]:
|
|
247
|
+
return ["file_path"]
|
|
248
|
+
|
|
249
|
+
def execute(self, ctx: DiskContext, llm_arguments: dict) -> str:
|
|
250
|
+
"""List all files in a specified path."""
|
|
251
|
+
file_path = llm_arguments.get("file_path")
|
|
252
|
+
normalized_path = _normalize_path(file_path)
|
|
253
|
+
|
|
254
|
+
result = ctx.client.disks.artifacts.list(
|
|
255
|
+
ctx.disk_id,
|
|
256
|
+
path=normalized_path,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
artifacts_list = [artifact.filename for artifact in result.artifacts]
|
|
260
|
+
|
|
261
|
+
if not artifacts_list and not result.directories:
|
|
262
|
+
return f"No files or directories found in '{normalized_path}'"
|
|
263
|
+
|
|
264
|
+
output_parts = []
|
|
265
|
+
if artifacts_list:
|
|
266
|
+
output_parts.append(f"Files: {', '.join(artifacts_list)}")
|
|
267
|
+
if result.directories:
|
|
268
|
+
output_parts.append(f"Directories: {', '.join(result.directories)}")
|
|
269
|
+
|
|
270
|
+
ls_sect = "\n".join(output_parts)
|
|
271
|
+
return f"""[Listing in {normalized_path}]
|
|
272
|
+
{ls_sect}"""
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
class DiskToolPool(BaseToolPool):
|
|
276
|
+
"""Tool pool for disk operations on Acontext disks."""
|
|
277
|
+
|
|
278
|
+
def format_context(self, client: AcontextClient, disk_id: str) -> DiskContext:
|
|
279
|
+
return DiskContext(client=client, disk_id=disk_id)
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
DISK_TOOLS = DiskToolPool()
|
|
283
|
+
DISK_TOOLS.add_tool(WriteFileTool())
|
|
284
|
+
DISK_TOOLS.add_tool(ReadFileTool())
|
|
285
|
+
DISK_TOOLS.add_tool(ReplaceStringTool())
|
|
286
|
+
DISK_TOOLS.add_tool(ListTool())
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
if __name__ == "__main__":
|
|
290
|
+
client = AcontextClient(
|
|
291
|
+
api_key="sk-ac-your-root-api-bearer-token",
|
|
292
|
+
base_url="http://localhost:8029/api/v1",
|
|
293
|
+
)
|
|
294
|
+
print(client.ping())
|
|
295
|
+
new_disk = client.disks.create()
|
|
296
|
+
|
|
297
|
+
ctx = DISK_TOOLS.format_context(client, new_disk.id)
|
|
298
|
+
r = DISK_TOOLS.execute_tool(
|
|
299
|
+
ctx,
|
|
300
|
+
"write_file",
|
|
301
|
+
{"filename": "test.txt", "file_path": "/try/", "content": "Hello, world!"},
|
|
302
|
+
)
|
|
303
|
+
print(r)
|
|
304
|
+
r = DISK_TOOLS.execute_tool(
|
|
305
|
+
ctx, "read_file", {"filename": "test.txt", "file_path": "/try/"}
|
|
306
|
+
)
|
|
307
|
+
print(r)
|
|
308
|
+
r = DISK_TOOLS.execute_tool(ctx, "list_artifacts", {"file_path": "/"})
|
|
309
|
+
print(r)
|
|
310
|
+
|
|
311
|
+
r = DISK_TOOLS.execute_tool(
|
|
312
|
+
ctx,
|
|
313
|
+
"replace_string",
|
|
314
|
+
{
|
|
315
|
+
"filename": "test.txt",
|
|
316
|
+
"file_path": "/try/",
|
|
317
|
+
"old_string": "Hello",
|
|
318
|
+
"new_string": "Hi",
|
|
319
|
+
},
|
|
320
|
+
)
|
|
321
|
+
print(r)
|
|
322
|
+
r = DISK_TOOLS.execute_tool(
|
|
323
|
+
ctx, "read_file", {"filename": "test.txt", "file_path": "/try/"}
|
|
324
|
+
)
|
|
325
|
+
print(r)
|
|
@@ -3,7 +3,7 @@ Spaces endpoints (async).
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
from collections.abc import Mapping
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any
|
|
7
7
|
|
|
8
8
|
from .._utils import build_params
|
|
9
9
|
from ..client_types import AsyncRequesterProtocol
|
|
@@ -11,7 +11,6 @@ from ..types.space import (
|
|
|
11
11
|
ExperienceConfirmation,
|
|
12
12
|
ListExperienceConfirmationsOutput,
|
|
13
13
|
ListSpacesOutput,
|
|
14
|
-
SearchResultBlockItem,
|
|
15
14
|
Space,
|
|
16
15
|
SpaceSearchResult,
|
|
17
16
|
)
|
|
@@ -133,62 +132,6 @@ class AsyncSpacesAPI:
|
|
|
133
132
|
)
|
|
134
133
|
return SpaceSearchResult.model_validate(data)
|
|
135
134
|
|
|
136
|
-
async def semantic_glob(
|
|
137
|
-
self,
|
|
138
|
-
space_id: str,
|
|
139
|
-
*,
|
|
140
|
-
query: str,
|
|
141
|
-
limit: int | None = None,
|
|
142
|
-
threshold: float | None = None,
|
|
143
|
-
) -> List[SearchResultBlockItem]:
|
|
144
|
-
"""Perform semantic glob (glob) search for page/folder titles.
|
|
145
|
-
|
|
146
|
-
Searches specifically for page/folder titles using semantic similarity,
|
|
147
|
-
similar to a semantic version of the glob command.
|
|
148
|
-
|
|
149
|
-
Args:
|
|
150
|
-
space_id: The UUID of the space.
|
|
151
|
-
query: Search query for page/folder titles.
|
|
152
|
-
limit: Maximum number of results to return (1-50, default 10).
|
|
153
|
-
threshold: Cosine distance threshold (0=identical, 2=opposite).
|
|
154
|
-
|
|
155
|
-
Returns:
|
|
156
|
-
List of SearchResultBlockItem objects matching the query.
|
|
157
|
-
"""
|
|
158
|
-
params = build_params(query=query, limit=limit, threshold=threshold)
|
|
159
|
-
data = await self._requester.request(
|
|
160
|
-
"GET", f"/space/{space_id}/semantic_glob", params=params or None
|
|
161
|
-
)
|
|
162
|
-
return [SearchResultBlockItem.model_validate(item) for item in data]
|
|
163
|
-
|
|
164
|
-
async def semantic_grep(
|
|
165
|
-
self,
|
|
166
|
-
space_id: str,
|
|
167
|
-
*,
|
|
168
|
-
query: str,
|
|
169
|
-
limit: int | None = None,
|
|
170
|
-
threshold: float | None = None,
|
|
171
|
-
) -> List[SearchResultBlockItem]:
|
|
172
|
-
"""Perform semantic grep search for content blocks.
|
|
173
|
-
|
|
174
|
-
Searches through content blocks (actual text content) using semantic similarity,
|
|
175
|
-
similar to a semantic version of the grep command.
|
|
176
|
-
|
|
177
|
-
Args:
|
|
178
|
-
space_id: The UUID of the space.
|
|
179
|
-
query: Search query for content blocks.
|
|
180
|
-
limit: Maximum number of results to return (1-50, default 10).
|
|
181
|
-
threshold: Cosine distance threshold (0=identical, 2=opposite).
|
|
182
|
-
|
|
183
|
-
Returns:
|
|
184
|
-
List of SearchResultBlockItem objects matching the query.
|
|
185
|
-
"""
|
|
186
|
-
params = build_params(query=query, limit=limit, threshold=threshold)
|
|
187
|
-
data = await self._requester.request(
|
|
188
|
-
"GET", f"/space/{space_id}/semantic_grep", params=params or None
|
|
189
|
-
)
|
|
190
|
-
return [SearchResultBlockItem.model_validate(item) for item in data]
|
|
191
|
-
|
|
192
135
|
async def get_unconfirmed_experiences(
|
|
193
136
|
self,
|
|
194
137
|
space_id: str,
|
acontext/resources/spaces.py
CHANGED
|
@@ -3,7 +3,7 @@ Spaces endpoints.
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
from collections.abc import Mapping
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any
|
|
7
7
|
|
|
8
8
|
from .._utils import build_params
|
|
9
9
|
from ..client_types import RequesterProtocol
|
|
@@ -11,7 +11,6 @@ from ..types.space import (
|
|
|
11
11
|
ExperienceConfirmation,
|
|
12
12
|
ListExperienceConfirmationsOutput,
|
|
13
13
|
ListSpacesOutput,
|
|
14
|
-
SearchResultBlockItem,
|
|
15
14
|
Space,
|
|
16
15
|
SpaceSearchResult,
|
|
17
16
|
)
|
|
@@ -131,62 +130,6 @@ class SpacesAPI:
|
|
|
131
130
|
)
|
|
132
131
|
return SpaceSearchResult.model_validate(data)
|
|
133
132
|
|
|
134
|
-
def semantic_glob(
|
|
135
|
-
self,
|
|
136
|
-
space_id: str,
|
|
137
|
-
*,
|
|
138
|
-
query: str,
|
|
139
|
-
limit: int | None = None,
|
|
140
|
-
threshold: float | None = None,
|
|
141
|
-
) -> List[SearchResultBlockItem]:
|
|
142
|
-
"""Perform semantic glob (glob) search for page/folder titles.
|
|
143
|
-
|
|
144
|
-
Searches specifically for page/folder titles using semantic similarity,
|
|
145
|
-
similar to a semantic version of the glob command.
|
|
146
|
-
|
|
147
|
-
Args:
|
|
148
|
-
space_id: The UUID of the space.
|
|
149
|
-
query: Search query for page/folder titles.
|
|
150
|
-
limit: Maximum number of results to return (1-50, default 10).
|
|
151
|
-
threshold: Cosine distance threshold (0=identical, 2=opposite).
|
|
152
|
-
|
|
153
|
-
Returns:
|
|
154
|
-
List of SearchResultBlockItem objects matching the query.
|
|
155
|
-
"""
|
|
156
|
-
params = build_params(query=query, limit=limit, threshold=threshold)
|
|
157
|
-
data = self._requester.request(
|
|
158
|
-
"GET", f"/space/{space_id}/semantic_glob", params=params or None
|
|
159
|
-
)
|
|
160
|
-
return [SearchResultBlockItem.model_validate(item) for item in data]
|
|
161
|
-
|
|
162
|
-
def semantic_grep(
|
|
163
|
-
self,
|
|
164
|
-
space_id: str,
|
|
165
|
-
*,
|
|
166
|
-
query: str,
|
|
167
|
-
limit: int | None = None,
|
|
168
|
-
threshold: float | None = None,
|
|
169
|
-
) -> List[SearchResultBlockItem]:
|
|
170
|
-
"""Perform semantic grep search for content blocks.
|
|
171
|
-
|
|
172
|
-
Searches through content blocks (actual text content) using semantic similarity,
|
|
173
|
-
similar to a semantic version of the grep command.
|
|
174
|
-
|
|
175
|
-
Args:
|
|
176
|
-
space_id: The UUID of the space.
|
|
177
|
-
query: Search query for content blocks.
|
|
178
|
-
limit: Maximum number of results to return (1-50, default 10).
|
|
179
|
-
threshold: Cosine distance threshold (0=identical, 2=opposite).
|
|
180
|
-
|
|
181
|
-
Returns:
|
|
182
|
-
List of SearchResultBlockItem objects matching the query.
|
|
183
|
-
"""
|
|
184
|
-
params = build_params(query=query, limit=limit, threshold=threshold)
|
|
185
|
-
data = self._requester.request(
|
|
186
|
-
"GET", f"/space/{space_id}/semantic_grep", params=params or None
|
|
187
|
-
)
|
|
188
|
-
return [SearchResultBlockItem.model_validate(item) for item in data]
|
|
189
|
-
|
|
190
133
|
def get_unconfirmed_experiences(
|
|
191
134
|
self,
|
|
192
135
|
space_id: str,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: acontext
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.7
|
|
4
4
|
Summary: Python SDK for the Acontext API
|
|
5
5
|
Keywords: acontext,sdk,client,api
|
|
6
6
|
Requires-Dist: httpx>=0.28.1
|
|
@@ -304,6 +304,144 @@ result = client.tools.rename_tool_name(
|
|
|
304
304
|
print(result.status) # 0 for success
|
|
305
305
|
```
|
|
306
306
|
|
|
307
|
+
### Agent Tools
|
|
308
|
+
|
|
309
|
+
The SDK provides agent tools that allow LLMs (OpenAI, Anthropic) to interact with Acontext disks through function calling. These tools can be converted to OpenAI or Anthropic tool schemas and executed when the LLM calls them.
|
|
310
|
+
|
|
311
|
+
#### Pre-configured Disk Tools
|
|
312
|
+
|
|
313
|
+
The SDK includes a pre-configured `DISK_TOOLS` pool with four disk operation tools:
|
|
314
|
+
|
|
315
|
+
- **`write_file`**: Write text content to a file
|
|
316
|
+
- **`read_file`**: Read a text file with optional line offset and limit
|
|
317
|
+
- **`replace_string`**: Replace strings in a file
|
|
318
|
+
- **`list_artifacts`**: List files and directories in a path
|
|
319
|
+
|
|
320
|
+
#### Getting Tool Schemas for LLM APIs
|
|
321
|
+
|
|
322
|
+
Convert tools to the appropriate format for your LLM provider:
|
|
323
|
+
|
|
324
|
+
```python
|
|
325
|
+
from acontext import AcontextClient
|
|
326
|
+
from acontext.agent.disk import DISK_TOOLS
|
|
327
|
+
|
|
328
|
+
client = AcontextClient(api_key="sk-ac-your-root-api-bearer-token")
|
|
329
|
+
|
|
330
|
+
# Get OpenAI-compatible tool schemas
|
|
331
|
+
openai_tools = DISK_TOOLS.to_openai_tool_schema()
|
|
332
|
+
|
|
333
|
+
# Get Anthropic-compatible tool schemas
|
|
334
|
+
anthropic_tools = DISK_TOOLS.to_anthropic_tool_schema()
|
|
335
|
+
|
|
336
|
+
# Use with OpenAI API
|
|
337
|
+
import openai
|
|
338
|
+
openai_client = openai.OpenAI(api_key="your-openai-key")
|
|
339
|
+
completion = openai_client.chat.completions.create(
|
|
340
|
+
model="gpt-4",
|
|
341
|
+
messages=[{"role": "user", "content": 'Write a file called hello.txt with "Hello, World!"'}],
|
|
342
|
+
tools=openai_tools,
|
|
343
|
+
)
|
|
344
|
+
```
|
|
345
|
+
|
|
346
|
+
#### Executing Tools
|
|
347
|
+
|
|
348
|
+
When an LLM calls a tool, execute it using the tool pool:
|
|
349
|
+
|
|
350
|
+
```python
|
|
351
|
+
from acontext import AcontextClient
|
|
352
|
+
from acontext.agent.disk import DISK_TOOLS
|
|
353
|
+
|
|
354
|
+
client = AcontextClient(api_key="sk-ac-your-root-api-bearer-token")
|
|
355
|
+
|
|
356
|
+
# Create a disk for the tools to operate on
|
|
357
|
+
disk = client.disks.create()
|
|
358
|
+
|
|
359
|
+
# Create a context for the tools
|
|
360
|
+
ctx = DISK_TOOLS.format_context(client, disk.id)
|
|
361
|
+
|
|
362
|
+
# Execute a tool (e.g., after LLM returns a tool call)
|
|
363
|
+
result = DISK_TOOLS.execute_tool(
|
|
364
|
+
ctx,
|
|
365
|
+
"write_file",
|
|
366
|
+
{"filename": "hello.txt", "file_path": "/notes/", "content": "Hello, World!"}
|
|
367
|
+
)
|
|
368
|
+
print(result) # File 'hello.txt' written successfully to '/notes/hello.txt'
|
|
369
|
+
|
|
370
|
+
# Read the file
|
|
371
|
+
read_result = DISK_TOOLS.execute_tool(
|
|
372
|
+
ctx,
|
|
373
|
+
"read_file",
|
|
374
|
+
{"filename": "hello.txt", "file_path": "/notes/"}
|
|
375
|
+
)
|
|
376
|
+
print(read_result)
|
|
377
|
+
|
|
378
|
+
# List files in a directory
|
|
379
|
+
list_result = DISK_TOOLS.execute_tool(
|
|
380
|
+
ctx,
|
|
381
|
+
"list_artifacts",
|
|
382
|
+
{"file_path": "/notes/"}
|
|
383
|
+
)
|
|
384
|
+
print(list_result)
|
|
385
|
+
|
|
386
|
+
# Replace a string in a file
|
|
387
|
+
replace_result = DISK_TOOLS.execute_tool(
|
|
388
|
+
ctx,
|
|
389
|
+
"replace_string",
|
|
390
|
+
{
|
|
391
|
+
"filename": "hello.txt",
|
|
392
|
+
"file_path": "/notes/",
|
|
393
|
+
"old_string": "Hello",
|
|
394
|
+
"new_string": "Hi",
|
|
395
|
+
}
|
|
396
|
+
)
|
|
397
|
+
print(replace_result)
|
|
398
|
+
```
|
|
399
|
+
|
|
400
|
+
#### Creating Custom Tools
|
|
401
|
+
|
|
402
|
+
You can create custom tools by extending `BaseTool`:
|
|
403
|
+
|
|
404
|
+
```python
|
|
405
|
+
from acontext.agent.base import BaseTool, BaseToolPool, BaseContext
|
|
406
|
+
from typing import Dict, Any
|
|
407
|
+
|
|
408
|
+
class MyCustomTool(BaseTool):
|
|
409
|
+
@property
|
|
410
|
+
def name(self) -> str:
|
|
411
|
+
return "my_custom_tool"
|
|
412
|
+
|
|
413
|
+
@property
|
|
414
|
+
def description(self) -> str:
|
|
415
|
+
return "A custom tool that does something"
|
|
416
|
+
|
|
417
|
+
@property
|
|
418
|
+
def arguments(self) -> dict:
|
|
419
|
+
return {
|
|
420
|
+
"param1": {
|
|
421
|
+
"type": "string",
|
|
422
|
+
"description": "First parameter",
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
@property
|
|
427
|
+
def required_arguments(self) -> list[str]:
|
|
428
|
+
return ["param1"]
|
|
429
|
+
|
|
430
|
+
def execute(self, ctx: BaseContext, llm_arguments: dict) -> str:
|
|
431
|
+
param1 = llm_arguments.get("param1")
|
|
432
|
+
# Your custom logic here
|
|
433
|
+
return f"Result: {param1}"
|
|
434
|
+
|
|
435
|
+
# Create a custom tool pool
|
|
436
|
+
class MyToolPool(BaseToolPool):
|
|
437
|
+
def format_context(self, *args, **kwargs) -> BaseContext:
|
|
438
|
+
# Create and return your context
|
|
439
|
+
return BaseContext()
|
|
440
|
+
|
|
441
|
+
my_pool = MyToolPool()
|
|
442
|
+
my_pool.add_tool(MyCustomTool())
|
|
443
|
+
```
|
|
444
|
+
|
|
307
445
|
### Blocks API
|
|
308
446
|
|
|
309
447
|
#### List blocks
|
|
@@ -484,7 +622,7 @@ for artifact in artifacts.items:
|
|
|
484
622
|
|
|
485
623
|
### Semantic search within spaces
|
|
486
624
|
|
|
487
|
-
The SDK provides
|
|
625
|
+
The SDK provides a powerful semantic search API for finding content within your spaces:
|
|
488
626
|
|
|
489
627
|
#### 1. Experience Search (Advanced AI-powered search)
|
|
490
628
|
|
|
@@ -520,39 +658,4 @@ if result.final_answer:
|
|
|
520
658
|
print(f"AI Answer: {result.final_answer}")
|
|
521
659
|
```
|
|
522
660
|
|
|
523
|
-
#### 2. Semantic Glob (Search page/folder titles)
|
|
524
|
-
|
|
525
|
-
Search for pages and folders by their titles using semantic similarity (like a semantic version of `glob`):
|
|
526
|
-
|
|
527
|
-
```python
|
|
528
|
-
# Find pages about authentication
|
|
529
|
-
results = client.spaces.semantic_glob(
|
|
530
|
-
space_id="space-uuid",
|
|
531
|
-
query="authentication and authorization pages",
|
|
532
|
-
limit=10,
|
|
533
|
-
threshold=1.0, # Only show results with distance < 1.0
|
|
534
|
-
)
|
|
535
|
-
|
|
536
|
-
for block in results:
|
|
537
|
-
print(f"{block.title} - {block.type}")
|
|
538
|
-
```
|
|
539
|
-
|
|
540
|
-
#### 3. Semantic Grep (Search content blocks)
|
|
541
|
-
|
|
542
|
-
Search through actual content blocks using semantic similarity (like a semantic version of `grep`):
|
|
543
|
-
|
|
544
|
-
```python
|
|
545
|
-
# Find code examples for JWT validation
|
|
546
|
-
results = client.spaces.semantic_grep(
|
|
547
|
-
space_id="space-uuid",
|
|
548
|
-
query="JWT token validation code examples",
|
|
549
|
-
limit=15,
|
|
550
|
-
threshold=0.7,
|
|
551
|
-
)
|
|
552
|
-
|
|
553
|
-
for block in results:
|
|
554
|
-
print(f"{block.title} - distance: {block.distance}")
|
|
555
|
-
print(f"Content: {block.props.get('text', '')[:100]}...")
|
|
556
|
-
```
|
|
557
|
-
|
|
558
661
|
See `examples/search_usage.py` for more detailed examples including async usage.
|
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
acontext/__init__.py,sha256=CZzHIZ2VEtNQFj0MkH3eDx-A78fSCYsUklDvhYUsD2Y,965
|
|
2
2
|
acontext/_constants.py,sha256=-OxfFwn4UdkQiNkyWhBmpM7KnZv6SgR-gMPEkjLKtDA,362
|
|
3
3
|
acontext/_utils.py,sha256=GKQH45arKh0sDu64u-5jwrII_ctnU_oChYlgR5lRkfE,1250
|
|
4
|
+
acontext/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
|
+
acontext/agent/base.py,sha256=3uuvC2Hhw1C97smooYX2301CQahYuz9nymMG-t8fsFc,2463
|
|
6
|
+
acontext/agent/disk.py,sha256=tYSvdWKjJ6KIvAk3UzJaQABB4rOL7GAMvEVmI715-hw,10566
|
|
4
7
|
acontext/async_client.py,sha256=mfyR-WVy_LEOXjrkTcXLNaPDC5euaZ9zFI_5e1gapGs,8153
|
|
5
8
|
acontext/client.py,sha256=jFr5-Q8obdcZBamUNtY3fCklpOJiNiIKLzEApTmbnTg,7920
|
|
6
9
|
acontext/client_types.py,sha256=uVBWzLbZyXrqkljG49ojdQL_xX6N3n_HGt4Bs_TEE48,987
|
|
@@ -11,12 +14,12 @@ acontext/resources/__init__.py,sha256=KSVQ3YJ-wuU6OWGWJ47gDGbjjetP102aEsVSR8JUb_
|
|
|
11
14
|
acontext/resources/async_blocks.py,sha256=e_iJpgcAdwS2tXQ16MMCnySlddp5JV3BadN_EyqZSF4,5555
|
|
12
15
|
acontext/resources/async_disks.py,sha256=2JjLpUkz5YZEkLt_jCts_khTG_b7lvf4cUMfoaJcnI8,6471
|
|
13
16
|
acontext/resources/async_sessions.py,sha256=b5oha4np1-RmNf2eRsnuJ2UJjdPyztExYQF-7_1pIAc,11894
|
|
14
|
-
acontext/resources/async_spaces.py,sha256=
|
|
17
|
+
acontext/resources/async_spaces.py,sha256=s3CrIDmabh2tOOPtuDn40aAsFaG2oAVFonjoNPHtiV0,6410
|
|
15
18
|
acontext/resources/async_tools.py,sha256=RbGaF2kX65Mun-q-Fp5H1J8waWTLIdCOfbdY19jpn1o,1091
|
|
16
19
|
acontext/resources/blocks.py,sha256=HJdAy5HdyTcHCYCPmqNdvApYKZ6aWs-ORIi_wQt3TUM,5447
|
|
17
20
|
acontext/resources/disks.py,sha256=BjVhVXoujHWhg6L0TG9GmW9HLTTldJYEPxCbuppRkc4,6336
|
|
18
21
|
acontext/resources/sessions.py,sha256=9iVd_2ZCxwuUFIfdsp1lRSTHb9JoFTHFkhzT7065kwY,11715
|
|
19
|
-
acontext/resources/spaces.py,sha256=
|
|
22
|
+
acontext/resources/spaces.py,sha256=krPC836R9FDFrgdxZ-hmcF-mMwrGhI0Cac3zszG0URA,6269
|
|
20
23
|
acontext/resources/tools.py,sha256=II_185B0HYKSP43hizE6C1zs7kjkkPLKihuEG8s-DRY,1046
|
|
21
24
|
acontext/types/__init__.py,sha256=vrKXYUO8zZ4uIBFqRdnp-DRY0tzTDk7rNSUn0bdpTA8,1418
|
|
22
25
|
acontext/types/block.py,sha256=CzKByunk642rWXNNnh8cx67UzKLKDAxODmC_whwjP90,1078
|
|
@@ -25,6 +28,6 @@ acontext/types/session.py,sha256=OprkkCsEbDHrQ_Kpd6yv8lcyoCfV54IcTmNft-gAyPI,565
|
|
|
25
28
|
acontext/types/space.py,sha256=uxbPrOHYpsntPHqhMCLQ2KovM7BngHC5Q2j7qexVrN8,2537
|
|
26
29
|
acontext/types/tool.py,sha256=-mVn-vgk2SENK0Ubt-ZgWFZxKa-ddABqcAgXQ69YY-E,805
|
|
27
30
|
acontext/uploads.py,sha256=6twnqQOY_eerNuEjeSKsE_3S0IfJUiczXtAy4aXqDl8,1379
|
|
28
|
-
acontext-0.0.
|
|
29
|
-
acontext-0.0.
|
|
30
|
-
acontext-0.0.
|
|
31
|
+
acontext-0.0.7.dist-info/WHEEL,sha256=z-mOpxbJHqy3cq6SvUThBZdaLGFZzdZPtgWLcP2NKjQ,79
|
|
32
|
+
acontext-0.0.7.dist-info/METADATA,sha256=pGwq18SKhhmwrXpVWKB7FHuTplaAmkxEEK4QwidULQI,14395
|
|
33
|
+
acontext-0.0.7.dist-info/RECORD,,
|