acontext 0.0.6__tar.gz → 0.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {acontext-0.0.6 → acontext-0.0.7}/PKG-INFO +140 -37
  2. {acontext-0.0.6 → acontext-0.0.7}/README.md +139 -36
  3. {acontext-0.0.6 → acontext-0.0.7}/pyproject.toml +1 -1
  4. acontext-0.0.7/src/acontext/agent/base.py +89 -0
  5. acontext-0.0.7/src/acontext/agent/disk.py +325 -0
  6. acontext-0.0.7/src/acontext/py.typed +0 -0
  7. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/async_spaces.py +1 -58
  8. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/spaces.py +1 -58
  9. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/__init__.py +0 -0
  10. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/_constants.py +0 -0
  11. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/_utils.py +0 -0
  12. /acontext-0.0.6/src/acontext/py.typed → /acontext-0.0.7/src/acontext/agent/__init__.py +0 -0
  13. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/async_client.py +0 -0
  14. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/client.py +0 -0
  15. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/client_types.py +0 -0
  16. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/errors.py +0 -0
  17. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/messages.py +0 -0
  18. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/__init__.py +0 -0
  19. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/async_blocks.py +0 -0
  20. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/async_disks.py +0 -0
  21. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/async_sessions.py +0 -0
  22. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/async_tools.py +0 -0
  23. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/blocks.py +0 -0
  24. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/disks.py +0 -0
  25. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/sessions.py +0 -0
  26. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/resources/tools.py +0 -0
  27. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/types/__init__.py +0 -0
  28. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/types/block.py +0 -0
  29. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/types/disk.py +0 -0
  30. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/types/session.py +0 -0
  31. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/types/space.py +0 -0
  32. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/types/tool.py +0 -0
  33. {acontext-0.0.6 → acontext-0.0.7}/src/acontext/uploads.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: acontext
3
- Version: 0.0.6
3
+ Version: 0.0.7
4
4
  Summary: Python SDK for the Acontext API
5
5
  Keywords: acontext,sdk,client,api
6
6
  Requires-Dist: httpx>=0.28.1
@@ -304,6 +304,144 @@ result = client.tools.rename_tool_name(
304
304
  print(result.status) # 0 for success
305
305
  ```
306
306
 
307
+ ### Agent Tools
308
+
309
+ The SDK provides agent tools that allow LLMs (OpenAI, Anthropic) to interact with Acontext disks through function calling. These tools can be converted to OpenAI or Anthropic tool schemas and executed when the LLM calls them.
310
+
311
+ #### Pre-configured Disk Tools
312
+
313
+ The SDK includes a pre-configured `DISK_TOOLS` pool with four disk operation tools:
314
+
315
+ - **`write_file`**: Write text content to a file
316
+ - **`read_file`**: Read a text file with optional line offset and limit
317
+ - **`replace_string`**: Replace strings in a file
318
+ - **`list_artifacts`**: List files and directories in a path
319
+
320
+ #### Getting Tool Schemas for LLM APIs
321
+
322
+ Convert tools to the appropriate format for your LLM provider:
323
+
324
+ ```python
325
+ from acontext import AcontextClient
326
+ from acontext.agent.disk import DISK_TOOLS
327
+
328
+ client = AcontextClient(api_key="sk-ac-your-root-api-bearer-token")
329
+
330
+ # Get OpenAI-compatible tool schemas
331
+ openai_tools = DISK_TOOLS.to_openai_tool_schema()
332
+
333
+ # Get Anthropic-compatible tool schemas
334
+ anthropic_tools = DISK_TOOLS.to_anthropic_tool_schema()
335
+
336
+ # Use with OpenAI API
337
+ import openai
338
+ openai_client = openai.OpenAI(api_key="your-openai-key")
339
+ completion = openai_client.chat.completions.create(
340
+ model="gpt-4",
341
+ messages=[{"role": "user", "content": 'Write a file called hello.txt with "Hello, World!"'}],
342
+ tools=openai_tools,
343
+ )
344
+ ```
345
+
346
+ #### Executing Tools
347
+
348
+ When an LLM calls a tool, execute it using the tool pool:
349
+
350
+ ```python
351
+ from acontext import AcontextClient
352
+ from acontext.agent.disk import DISK_TOOLS
353
+
354
+ client = AcontextClient(api_key="sk-ac-your-root-api-bearer-token")
355
+
356
+ # Create a disk for the tools to operate on
357
+ disk = client.disks.create()
358
+
359
+ # Create a context for the tools
360
+ ctx = DISK_TOOLS.format_context(client, disk.id)
361
+
362
+ # Execute a tool (e.g., after LLM returns a tool call)
363
+ result = DISK_TOOLS.execute_tool(
364
+ ctx,
365
+ "write_file",
366
+ {"filename": "hello.txt", "file_path": "/notes/", "content": "Hello, World!"}
367
+ )
368
+ print(result) # File 'hello.txt' written successfully to '/notes/hello.txt'
369
+
370
+ # Read the file
371
+ read_result = DISK_TOOLS.execute_tool(
372
+ ctx,
373
+ "read_file",
374
+ {"filename": "hello.txt", "file_path": "/notes/"}
375
+ )
376
+ print(read_result)
377
+
378
+ # List files in a directory
379
+ list_result = DISK_TOOLS.execute_tool(
380
+ ctx,
381
+ "list_artifacts",
382
+ {"file_path": "/notes/"}
383
+ )
384
+ print(list_result)
385
+
386
+ # Replace a string in a file
387
+ replace_result = DISK_TOOLS.execute_tool(
388
+ ctx,
389
+ "replace_string",
390
+ {
391
+ "filename": "hello.txt",
392
+ "file_path": "/notes/",
393
+ "old_string": "Hello",
394
+ "new_string": "Hi",
395
+ }
396
+ )
397
+ print(replace_result)
398
+ ```
399
+
400
+ #### Creating Custom Tools
401
+
402
+ You can create custom tools by extending `BaseTool`:
403
+
404
+ ```python
405
+ from acontext.agent.base import BaseTool, BaseToolPool, BaseContext
406
+ from typing import Dict, Any
407
+
408
+ class MyCustomTool(BaseTool):
409
+ @property
410
+ def name(self) -> str:
411
+ return "my_custom_tool"
412
+
413
+ @property
414
+ def description(self) -> str:
415
+ return "A custom tool that does something"
416
+
417
+ @property
418
+ def arguments(self) -> dict:
419
+ return {
420
+ "param1": {
421
+ "type": "string",
422
+ "description": "First parameter",
423
+ }
424
+ }
425
+
426
+ @property
427
+ def required_arguments(self) -> list[str]:
428
+ return ["param1"]
429
+
430
+ def execute(self, ctx: BaseContext, llm_arguments: dict) -> str:
431
+ param1 = llm_arguments.get("param1")
432
+ # Your custom logic here
433
+ return f"Result: {param1}"
434
+
435
+ # Create a custom tool pool
436
+ class MyToolPool(BaseToolPool):
437
+ def format_context(self, *args, **kwargs) -> BaseContext:
438
+ # Create and return your context
439
+ return BaseContext()
440
+
441
+ my_pool = MyToolPool()
442
+ my_pool.add_tool(MyCustomTool())
443
+ ```
444
+
307
445
  ### Blocks API
308
446
 
309
447
  #### List blocks
@@ -484,7 +622,7 @@ for artifact in artifacts.items:
484
622
 
485
623
  ### Semantic search within spaces
486
624
 
487
- The SDK provides three powerful semantic search APIs for finding content within your spaces:
625
+ The SDK provides a powerful semantic search API for finding content within your spaces:
488
626
 
489
627
  #### 1. Experience Search (Advanced AI-powered search)
490
628
 
@@ -520,39 +658,4 @@ if result.final_answer:
520
658
  print(f"AI Answer: {result.final_answer}")
521
659
  ```
522
660
 
523
- #### 2. Semantic Glob (Search page/folder titles)
524
-
525
- Search for pages and folders by their titles using semantic similarity (like a semantic version of `glob`):
526
-
527
- ```python
528
- # Find pages about authentication
529
- results = client.spaces.semantic_glob(
530
- space_id="space-uuid",
531
- query="authentication and authorization pages",
532
- limit=10,
533
- threshold=1.0, # Only show results with distance < 1.0
534
- )
535
-
536
- for block in results:
537
- print(f"{block.title} - {block.type}")
538
- ```
539
-
540
- #### 3. Semantic Grep (Search content blocks)
541
-
542
- Search through actual content blocks using semantic similarity (like a semantic version of `grep`):
543
-
544
- ```python
545
- # Find code examples for JWT validation
546
- results = client.spaces.semantic_grep(
547
- space_id="space-uuid",
548
- query="JWT token validation code examples",
549
- limit=15,
550
- threshold=0.7,
551
- )
552
-
553
- for block in results:
554
- print(f"{block.title} - distance: {block.distance}")
555
- print(f"Content: {block.props.get('text', '')[:100]}...")
556
- ```
557
-
558
661
  See `examples/search_usage.py` for more detailed examples including async usage.
@@ -289,6 +289,144 @@ result = client.tools.rename_tool_name(
289
289
  print(result.status) # 0 for success
290
290
  ```
291
291
 
292
+ ### Agent Tools
293
+
294
+ The SDK provides agent tools that allow LLMs (OpenAI, Anthropic) to interact with Acontext disks through function calling. These tools can be converted to OpenAI or Anthropic tool schemas and executed when the LLM calls them.
295
+
296
+ #### Pre-configured Disk Tools
297
+
298
+ The SDK includes a pre-configured `DISK_TOOLS` pool with four disk operation tools:
299
+
300
+ - **`write_file`**: Write text content to a file
301
+ - **`read_file`**: Read a text file with optional line offset and limit
302
+ - **`replace_string`**: Replace strings in a file
303
+ - **`list_artifacts`**: List files and directories in a path
304
+
305
+ #### Getting Tool Schemas for LLM APIs
306
+
307
+ Convert tools to the appropriate format for your LLM provider:
308
+
309
+ ```python
310
+ from acontext import AcontextClient
311
+ from acontext.agent.disk import DISK_TOOLS
312
+
313
+ client = AcontextClient(api_key="sk-ac-your-root-api-bearer-token")
314
+
315
+ # Get OpenAI-compatible tool schemas
316
+ openai_tools = DISK_TOOLS.to_openai_tool_schema()
317
+
318
+ # Get Anthropic-compatible tool schemas
319
+ anthropic_tools = DISK_TOOLS.to_anthropic_tool_schema()
320
+
321
+ # Use with OpenAI API
322
+ import openai
323
+ openai_client = openai.OpenAI(api_key="your-openai-key")
324
+ completion = openai_client.chat.completions.create(
325
+ model="gpt-4",
326
+ messages=[{"role": "user", "content": 'Write a file called hello.txt with "Hello, World!"'}],
327
+ tools=openai_tools,
328
+ )
329
+ ```
330
+
331
+ #### Executing Tools
332
+
333
+ When an LLM calls a tool, execute it using the tool pool:
334
+
335
+ ```python
336
+ from acontext import AcontextClient
337
+ from acontext.agent.disk import DISK_TOOLS
338
+
339
+ client = AcontextClient(api_key="sk-ac-your-root-api-bearer-token")
340
+
341
+ # Create a disk for the tools to operate on
342
+ disk = client.disks.create()
343
+
344
+ # Create a context for the tools
345
+ ctx = DISK_TOOLS.format_context(client, disk.id)
346
+
347
+ # Execute a tool (e.g., after LLM returns a tool call)
348
+ result = DISK_TOOLS.execute_tool(
349
+ ctx,
350
+ "write_file",
351
+ {"filename": "hello.txt", "file_path": "/notes/", "content": "Hello, World!"}
352
+ )
353
+ print(result) # File 'hello.txt' written successfully to '/notes/hello.txt'
354
+
355
+ # Read the file
356
+ read_result = DISK_TOOLS.execute_tool(
357
+ ctx,
358
+ "read_file",
359
+ {"filename": "hello.txt", "file_path": "/notes/"}
360
+ )
361
+ print(read_result)
362
+
363
+ # List files in a directory
364
+ list_result = DISK_TOOLS.execute_tool(
365
+ ctx,
366
+ "list_artifacts",
367
+ {"file_path": "/notes/"}
368
+ )
369
+ print(list_result)
370
+
371
+ # Replace a string in a file
372
+ replace_result = DISK_TOOLS.execute_tool(
373
+ ctx,
374
+ "replace_string",
375
+ {
376
+ "filename": "hello.txt",
377
+ "file_path": "/notes/",
378
+ "old_string": "Hello",
379
+ "new_string": "Hi",
380
+ }
381
+ )
382
+ print(replace_result)
383
+ ```
384
+
385
+ #### Creating Custom Tools
386
+
387
+ You can create custom tools by extending `BaseTool`:
388
+
389
+ ```python
390
+ from acontext.agent.base import BaseTool, BaseToolPool, BaseContext
391
+ from typing import Dict, Any
392
+
393
+ class MyCustomTool(BaseTool):
394
+ @property
395
+ def name(self) -> str:
396
+ return "my_custom_tool"
397
+
398
+ @property
399
+ def description(self) -> str:
400
+ return "A custom tool that does something"
401
+
402
+ @property
403
+ def arguments(self) -> dict:
404
+ return {
405
+ "param1": {
406
+ "type": "string",
407
+ "description": "First parameter",
408
+ }
409
+ }
410
+
411
+ @property
412
+ def required_arguments(self) -> list[str]:
413
+ return ["param1"]
414
+
415
+ def execute(self, ctx: BaseContext, llm_arguments: dict) -> str:
416
+ param1 = llm_arguments.get("param1")
417
+ # Your custom logic here
418
+ return f"Result: {param1}"
419
+
420
+ # Create a custom tool pool
421
+ class MyToolPool(BaseToolPool):
422
+ def format_context(self, *args, **kwargs) -> BaseContext:
423
+ # Create and return your context
424
+ return BaseContext()
425
+
426
+ my_pool = MyToolPool()
427
+ my_pool.add_tool(MyCustomTool())
428
+ ```
429
+
292
430
  ### Blocks API
293
431
 
294
432
  #### List blocks
@@ -469,7 +607,7 @@ for artifact in artifacts.items:
469
607
 
470
608
  ### Semantic search within spaces
471
609
 
472
- The SDK provides three powerful semantic search APIs for finding content within your spaces:
610
+ The SDK provides a powerful semantic search API for finding content within your spaces:
473
611
 
474
612
  #### 1. Experience Search (Advanced AI-powered search)
475
613
 
@@ -505,39 +643,4 @@ if result.final_answer:
505
643
  print(f"AI Answer: {result.final_answer}")
506
644
  ```
507
645
 
508
- #### 2. Semantic Glob (Search page/folder titles)
509
-
510
- Search for pages and folders by their titles using semantic similarity (like a semantic version of `glob`):
511
-
512
- ```python
513
- # Find pages about authentication
514
- results = client.spaces.semantic_glob(
515
- space_id="space-uuid",
516
- query="authentication and authorization pages",
517
- limit=10,
518
- threshold=1.0, # Only show results with distance < 1.0
519
- )
520
-
521
- for block in results:
522
- print(f"{block.title} - {block.type}")
523
- ```
524
-
525
- #### 3. Semantic Grep (Search content blocks)
526
-
527
- Search through actual content blocks using semantic similarity (like a semantic version of `grep`):
528
-
529
- ```python
530
- # Find code examples for JWT validation
531
- results = client.spaces.semantic_grep(
532
- space_id="space-uuid",
533
- query="JWT token validation code examples",
534
- limit=15,
535
- threshold=0.7,
536
- )
537
-
538
- for block in results:
539
- print(f"{block.title} - distance: {block.distance}")
540
- print(f"Content: {block.props.get('text', '')[:100]}...")
541
- ```
542
-
543
646
  See `examples/search_usage.py` for more detailed examples including async usage.
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "acontext"
3
- version = "0.0.6"
3
+ version = "0.0.7"
4
4
  description = "Python SDK for the Acontext API"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -0,0 +1,89 @@
1
+ class BaseContext:
2
+ pass
3
+
4
+
5
+ class BaseConverter:
6
+ def to_openai_tool_schema(self) -> dict:
7
+ raise NotImplementedError
8
+
9
+ def to_anthropic_tool_schema(self) -> dict:
10
+ raise NotImplementedError
11
+
12
+
13
+ class BaseTool(BaseConverter):
14
+ @property
15
+ def name(self) -> str:
16
+ raise NotImplementedError
17
+
18
+ @property
19
+ def description(self) -> str:
20
+ raise NotImplementedError
21
+
22
+ @property
23
+ def arguments(self) -> dict:
24
+ raise NotImplementedError
25
+
26
+ @property
27
+ def required_arguments(self) -> list[str]:
28
+ raise NotImplementedError
29
+
30
+ def execute(self, ctx: BaseContext, llm_arguments: dict) -> str:
31
+ raise NotImplementedError
32
+
33
+ def to_openai_tool_schema(self) -> dict:
34
+ return {
35
+ "type": "function",
36
+ "function": {
37
+ "name": self.name,
38
+ "description": self.description,
39
+ "parameters": {
40
+ "type": "object",
41
+ "properties": self.arguments,
42
+ "required": self.required_arguments,
43
+ },
44
+ },
45
+ }
46
+
47
+ def to_anthropic_tool_schema(self) -> dict:
48
+ return {
49
+ "name": self.name,
50
+ "description": self.description,
51
+ "input_schema": {
52
+ "type": "object",
53
+ "properties": self.arguments,
54
+ "required": self.required_arguments,
55
+ },
56
+ }
57
+
58
+
59
+ class BaseToolPool(BaseConverter):
60
+ def __init__(self):
61
+ self.tools: dict[str, BaseTool] = {}
62
+
63
+ def add_tool(self, tool: BaseTool):
64
+ self.tools[tool.name] = tool
65
+
66
+ def remove_tool(self, tool_name: str):
67
+ self.tools.pop(tool_name)
68
+
69
+ def extent_tool_pool(self, pool: "BaseToolPool"):
70
+ self.tools.update(pool.tools)
71
+
72
+ def execute_tool(
73
+ self, ctx: BaseContext, tool_name: str, llm_arguments: dict
74
+ ) -> str:
75
+ tool = self.tools[tool_name]
76
+ r = tool.execute(ctx, llm_arguments)
77
+ return r.strip()
78
+
79
+ def tool_exists(self, tool_name: str) -> bool:
80
+ return tool_name in self.tools
81
+
82
+ def to_openai_tool_schema(self) -> list[dict]:
83
+ return [tool.to_openai_tool_schema() for tool in self.tools.values()]
84
+
85
+ def to_anthropic_tool_schema(self) -> list[dict]:
86
+ return [tool.to_anthropic_tool_schema() for tool in self.tools.values()]
87
+
88
+ def format_context(self, *args, **kwargs) -> BaseContext:
89
+ raise NotImplementedError
@@ -0,0 +1,325 @@
1
+ from dataclasses import dataclass
2
+
3
+ from .base import BaseContext, BaseTool, BaseToolPool
4
+ from ..client import AcontextClient
5
+ from ..uploads import FileUpload
6
+
7
+
8
+ @dataclass
9
+ class DiskContext(BaseContext):
10
+ client: AcontextClient
11
+ disk_id: str
12
+
13
+
14
+ def _normalize_path(path: str | None) -> str:
15
+ """Normalize a file path to ensure it starts with '/'."""
16
+ if not path:
17
+ return "/"
18
+ normalized = path if path.startswith("/") else f"/{path}"
19
+ if not normalized.endswith("/"):
20
+ normalized += "/"
21
+ return normalized
22
+
23
+
24
+ class WriteFileTool(BaseTool):
25
+ """Tool for writing text content to a file on the Acontext disk."""
26
+
27
+ @property
28
+ def name(self) -> str:
29
+ return "write_file"
30
+
31
+ @property
32
+ def description(self) -> str:
33
+ return "Write text content to a file in the file system. Creates the file if it doesn't exist, overwrites if it does."
34
+
35
+ @property
36
+ def arguments(self) -> dict:
37
+ return {
38
+ "file_path": {
39
+ "type": "string",
40
+ "description": "Optional folder path to organize files, e.g. '/notes/' or '/documents/'. Defaults to root '/' if not specified.",
41
+ },
42
+ "filename": {
43
+ "type": "string",
44
+ "description": "Filename such as 'report.md' or 'demo.txt'.",
45
+ },
46
+ "content": {
47
+ "type": "string",
48
+ "description": "Text content to write to the file.",
49
+ },
50
+ }
51
+
52
+ @property
53
+ def required_arguments(self) -> list[str]:
54
+ return ["filename", "content"]
55
+
56
+ def execute(self, ctx: DiskContext, llm_arguments: dict) -> str:
57
+ """Write text content to a file."""
58
+ filename = llm_arguments.get("filename")
59
+ content = llm_arguments.get("content")
60
+ file_path = llm_arguments.get("file_path")
61
+
62
+ if not filename:
63
+ raise ValueError("filename is required")
64
+ if not content:
65
+ raise ValueError("content is required")
66
+
67
+ normalized_path = _normalize_path(file_path)
68
+ payload = FileUpload(filename=filename, content=content.encode("utf-8"))
69
+ artifact = ctx.client.disks.artifacts.upsert(
70
+ ctx.disk_id,
71
+ file=payload,
72
+ file_path=normalized_path,
73
+ )
74
+ return f"File '{artifact.filename}' written successfully to '{artifact.path}'"
75
+
76
+
77
+ class ReadFileTool(BaseTool):
78
+ """Tool for reading a text file from the Acontext disk."""
79
+
80
+ @property
81
+ def name(self) -> str:
82
+ return "read_file"
83
+
84
+ @property
85
+ def description(self) -> str:
86
+ return "Read a text file from the file system and return its content."
87
+
88
+ @property
89
+ def arguments(self) -> dict:
90
+ return {
91
+ "file_path": {
92
+ "type": "string",
93
+ "description": "Optional directory path where the file is located, e.g. '/notes/'. Defaults to root '/' if not specified.",
94
+ },
95
+ "filename": {
96
+ "type": "string",
97
+ "description": "Filename to read.",
98
+ },
99
+ "line_offset": {
100
+ "type": "integer",
101
+ "description": "The line number to start reading from. Default to 0",
102
+ },
103
+ "line_limit": {
104
+ "type": "integer",
105
+ "description": "The maximum number of lines to return. Default to 100",
106
+ },
107
+ }
108
+
109
+ @property
110
+ def required_arguments(self) -> list[str]:
111
+ return ["filename"]
112
+
113
+ def execute(self, ctx: DiskContext, llm_arguments: dict) -> str:
114
+ """Read a text file and return its content preview."""
115
+ filename = llm_arguments.get("filename")
116
+ file_path = llm_arguments.get("file_path")
117
+ line_offset = llm_arguments.get("line_offset", 0)
118
+ line_limit = llm_arguments.get("line_limit", 100)
119
+
120
+ if not filename:
121
+ raise ValueError("filename is required")
122
+
123
+ normalized_path = _normalize_path(file_path)
124
+ result = ctx.client.disks.artifacts.get(
125
+ ctx.disk_id,
126
+ file_path=normalized_path,
127
+ filename=filename,
128
+ with_content=True,
129
+ )
130
+
131
+ if not result.content:
132
+ raise RuntimeError("Failed to read file: server did not return content.")
133
+
134
+ content_str = result.content.raw
135
+ lines = content_str.split("\n")
136
+ line_start = min(line_offset, len(lines) - 1)
137
+ line_end = min(line_start + line_limit, len(lines))
138
+ preview = "\n".join(lines[line_start:line_end])
139
+ return f"[{normalized_path}{filename} - showing L{line_start}-{line_end} of {len(lines)} lines]\n{preview}"
140
+
141
+
142
+ class ReplaceStringTool(BaseTool):
143
+ """Tool for replacing an old string with a new string in a file on the Acontext disk."""
144
+
145
+ @property
146
+ def name(self) -> str:
147
+ return "replace_string"
148
+
149
+ @property
150
+ def description(self) -> str:
151
+ return "Replace an old string with a new string in a file. Reads the file, performs the replacement, and writes it back."
152
+
153
+ @property
154
+ def arguments(self) -> dict:
155
+ return {
156
+ "file_path": {
157
+ "type": "string",
158
+ "description": "Optional directory path where the file is located, e.g. '/notes/'. Defaults to root '/' if not specified.",
159
+ },
160
+ "filename": {
161
+ "type": "string",
162
+ "description": "Filename to modify.",
163
+ },
164
+ "old_string": {
165
+ "type": "string",
166
+ "description": "The string to be replaced.",
167
+ },
168
+ "new_string": {
169
+ "type": "string",
170
+ "description": "The string to replace the old_string with.",
171
+ },
172
+ }
173
+
174
+ @property
175
+ def required_arguments(self) -> list[str]:
176
+ return ["filename", "old_string", "new_string"]
177
+
178
+ def execute(self, ctx: DiskContext, llm_arguments: dict) -> str:
179
+ """Replace an old string with a new string in a file."""
180
+ filename = llm_arguments.get("filename")
181
+ file_path = llm_arguments.get("file_path")
182
+ old_string = llm_arguments.get("old_string")
183
+ new_string = llm_arguments.get("new_string")
184
+
185
+ if not filename:
186
+ raise ValueError("filename is required")
187
+ if old_string is None:
188
+ raise ValueError("old_string is required")
189
+ if new_string is None:
190
+ raise ValueError("new_string is required")
191
+
192
+ normalized_path = _normalize_path(file_path)
193
+
194
+ # Read the file content
195
+ result = ctx.client.disks.artifacts.get(
196
+ ctx.disk_id,
197
+ file_path=normalized_path,
198
+ filename=filename,
199
+ with_content=True,
200
+ )
201
+
202
+ if not result.content:
203
+ raise RuntimeError("Failed to read file: server did not return content.")
204
+
205
+ content_str = result.content.raw
206
+
207
+ # Perform the replacement
208
+ if old_string not in content_str:
209
+ return f"String '{old_string}' not found in file '{filename}'"
210
+
211
+ updated_content = content_str.replace(old_string, new_string)
212
+ replacement_count = content_str.count(old_string)
213
+
214
+ # Write the updated content back
215
+ payload = FileUpload(filename=filename, content=updated_content.encode("utf-8"))
216
+ ctx.client.disks.artifacts.upsert(
217
+ ctx.disk_id,
218
+ file=payload,
219
+ file_path=normalized_path,
220
+ )
221
+
222
+ return f"Found {replacement_count} old_string in {normalized_path}{filename} and replaced it."
223
+
224
+
225
+ class ListTool(BaseTool):
226
+ """Tool for listing files in a directory on the Acontext disk."""
227
+
228
+ @property
229
+ def name(self) -> str:
230
+ return "list_artifacts"
231
+
232
+ @property
233
+ def description(self) -> str:
234
+ return "List all files and directories in a specified path on the disk."
235
+
236
+ @property
237
+ def arguments(self) -> dict:
238
+ return {
239
+ "file_path": {
240
+ "type": "string",
241
+ "description": "Optional directory path to list, e.g. '/todo/' or '/notes/'. Root is '/'",
242
+ },
243
+ }
244
+
245
+ @property
246
+ def required_arguments(self) -> list[str]:
247
+ return ["file_path"]
248
+
249
+ def execute(self, ctx: DiskContext, llm_arguments: dict) -> str:
250
+ """List all files in a specified path."""
251
+ file_path = llm_arguments.get("file_path")
252
+ normalized_path = _normalize_path(file_path)
253
+
254
+ result = ctx.client.disks.artifacts.list(
255
+ ctx.disk_id,
256
+ path=normalized_path,
257
+ )
258
+
259
+ artifacts_list = [artifact.filename for artifact in result.artifacts]
260
+
261
+ if not artifacts_list and not result.directories:
262
+ return f"No files or directories found in '{normalized_path}'"
263
+
264
+ output_parts = []
265
+ if artifacts_list:
266
+ output_parts.append(f"Files: {', '.join(artifacts_list)}")
267
+ if result.directories:
268
+ output_parts.append(f"Directories: {', '.join(result.directories)}")
269
+
270
+ ls_sect = "\n".join(output_parts)
271
+ return f"""[Listing in {normalized_path}]
272
+ {ls_sect}"""
273
+
274
+
275
+ class DiskToolPool(BaseToolPool):
276
+ """Tool pool for disk operations on Acontext disks."""
277
+
278
+ def format_context(self, client: AcontextClient, disk_id: str) -> DiskContext:
279
+ return DiskContext(client=client, disk_id=disk_id)
280
+
281
+
282
+ DISK_TOOLS = DiskToolPool()
283
+ DISK_TOOLS.add_tool(WriteFileTool())
284
+ DISK_TOOLS.add_tool(ReadFileTool())
285
+ DISK_TOOLS.add_tool(ReplaceStringTool())
286
+ DISK_TOOLS.add_tool(ListTool())
287
+
288
+
289
+ if __name__ == "__main__":
290
+ client = AcontextClient(
291
+ api_key="sk-ac-your-root-api-bearer-token",
292
+ base_url="http://localhost:8029/api/v1",
293
+ )
294
+ print(client.ping())
295
+ new_disk = client.disks.create()
296
+
297
+ ctx = DISK_TOOLS.format_context(client, new_disk.id)
298
+ r = DISK_TOOLS.execute_tool(
299
+ ctx,
300
+ "write_file",
301
+ {"filename": "test.txt", "file_path": "/try/", "content": "Hello, world!"},
302
+ )
303
+ print(r)
304
+ r = DISK_TOOLS.execute_tool(
305
+ ctx, "read_file", {"filename": "test.txt", "file_path": "/try/"}
306
+ )
307
+ print(r)
308
+ r = DISK_TOOLS.execute_tool(ctx, "list_artifacts", {"file_path": "/"})
309
+ print(r)
310
+
311
+ r = DISK_TOOLS.execute_tool(
312
+ ctx,
313
+ "replace_string",
314
+ {
315
+ "filename": "test.txt",
316
+ "file_path": "/try/",
317
+ "old_string": "Hello",
318
+ "new_string": "Hi",
319
+ },
320
+ )
321
+ print(r)
322
+ r = DISK_TOOLS.execute_tool(
323
+ ctx, "read_file", {"filename": "test.txt", "file_path": "/try/"}
324
+ )
325
+ print(r)
File without changes
@@ -3,7 +3,7 @@ Spaces endpoints (async).
3
3
  """
4
4
 
5
5
  from collections.abc import Mapping
6
- from typing import Any, List
6
+ from typing import Any
7
7
 
8
8
  from .._utils import build_params
9
9
  from ..client_types import AsyncRequesterProtocol
@@ -11,7 +11,6 @@ from ..types.space import (
11
11
  ExperienceConfirmation,
12
12
  ListExperienceConfirmationsOutput,
13
13
  ListSpacesOutput,
14
- SearchResultBlockItem,
15
14
  Space,
16
15
  SpaceSearchResult,
17
16
  )
@@ -133,62 +132,6 @@ class AsyncSpacesAPI:
133
132
  )
134
133
  return SpaceSearchResult.model_validate(data)
135
134
 
136
- async def semantic_glob(
137
- self,
138
- space_id: str,
139
- *,
140
- query: str,
141
- limit: int | None = None,
142
- threshold: float | None = None,
143
- ) -> List[SearchResultBlockItem]:
144
- """Perform semantic glob (glob) search for page/folder titles.
145
-
146
- Searches specifically for page/folder titles using semantic similarity,
147
- similar to a semantic version of the glob command.
148
-
149
- Args:
150
- space_id: The UUID of the space.
151
- query: Search query for page/folder titles.
152
- limit: Maximum number of results to return (1-50, default 10).
153
- threshold: Cosine distance threshold (0=identical, 2=opposite).
154
-
155
- Returns:
156
- List of SearchResultBlockItem objects matching the query.
157
- """
158
- params = build_params(query=query, limit=limit, threshold=threshold)
159
- data = await self._requester.request(
160
- "GET", f"/space/{space_id}/semantic_glob", params=params or None
161
- )
162
- return [SearchResultBlockItem.model_validate(item) for item in data]
163
-
164
- async def semantic_grep(
165
- self,
166
- space_id: str,
167
- *,
168
- query: str,
169
- limit: int | None = None,
170
- threshold: float | None = None,
171
- ) -> List[SearchResultBlockItem]:
172
- """Perform semantic grep search for content blocks.
173
-
174
- Searches through content blocks (actual text content) using semantic similarity,
175
- similar to a semantic version of the grep command.
176
-
177
- Args:
178
- space_id: The UUID of the space.
179
- query: Search query for content blocks.
180
- limit: Maximum number of results to return (1-50, default 10).
181
- threshold: Cosine distance threshold (0=identical, 2=opposite).
182
-
183
- Returns:
184
- List of SearchResultBlockItem objects matching the query.
185
- """
186
- params = build_params(query=query, limit=limit, threshold=threshold)
187
- data = await self._requester.request(
188
- "GET", f"/space/{space_id}/semantic_grep", params=params or None
189
- )
190
- return [SearchResultBlockItem.model_validate(item) for item in data]
191
-
192
135
  async def get_unconfirmed_experiences(
193
136
  self,
194
137
  space_id: str,
@@ -3,7 +3,7 @@ Spaces endpoints.
3
3
  """
4
4
 
5
5
  from collections.abc import Mapping
6
- from typing import Any, List
6
+ from typing import Any
7
7
 
8
8
  from .._utils import build_params
9
9
  from ..client_types import RequesterProtocol
@@ -11,7 +11,6 @@ from ..types.space import (
11
11
  ExperienceConfirmation,
12
12
  ListExperienceConfirmationsOutput,
13
13
  ListSpacesOutput,
14
- SearchResultBlockItem,
15
14
  Space,
16
15
  SpaceSearchResult,
17
16
  )
@@ -131,62 +130,6 @@ class SpacesAPI:
131
130
  )
132
131
  return SpaceSearchResult.model_validate(data)
133
132
 
134
- def semantic_glob(
135
- self,
136
- space_id: str,
137
- *,
138
- query: str,
139
- limit: int | None = None,
140
- threshold: float | None = None,
141
- ) -> List[SearchResultBlockItem]:
142
- """Perform semantic glob (glob) search for page/folder titles.
143
-
144
- Searches specifically for page/folder titles using semantic similarity,
145
- similar to a semantic version of the glob command.
146
-
147
- Args:
148
- space_id: The UUID of the space.
149
- query: Search query for page/folder titles.
150
- limit: Maximum number of results to return (1-50, default 10).
151
- threshold: Cosine distance threshold (0=identical, 2=opposite).
152
-
153
- Returns:
154
- List of SearchResultBlockItem objects matching the query.
155
- """
156
- params = build_params(query=query, limit=limit, threshold=threshold)
157
- data = self._requester.request(
158
- "GET", f"/space/{space_id}/semantic_glob", params=params or None
159
- )
160
- return [SearchResultBlockItem.model_validate(item) for item in data]
161
-
162
- def semantic_grep(
163
- self,
164
- space_id: str,
165
- *,
166
- query: str,
167
- limit: int | None = None,
168
- threshold: float | None = None,
169
- ) -> List[SearchResultBlockItem]:
170
- """Perform semantic grep search for content blocks.
171
-
172
- Searches through content blocks (actual text content) using semantic similarity,
173
- similar to a semantic version of the grep command.
174
-
175
- Args:
176
- space_id: The UUID of the space.
177
- query: Search query for content blocks.
178
- limit: Maximum number of results to return (1-50, default 10).
179
- threshold: Cosine distance threshold (0=identical, 2=opposite).
180
-
181
- Returns:
182
- List of SearchResultBlockItem objects matching the query.
183
- """
184
- params = build_params(query=query, limit=limit, threshold=threshold)
185
- data = self._requester.request(
186
- "GET", f"/space/{space_id}/semantic_grep", params=params or None
187
- )
188
- return [SearchResultBlockItem.model_validate(item) for item in data]
189
-
190
133
  def get_unconfirmed_experiences(
191
134
  self,
192
135
  space_id: str,