@acontext/acontext 0.0.8 → 0.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +126 -39
- package/dist/agent/base.d.ts +36 -0
- package/dist/agent/base.js +68 -0
- package/dist/agent/disk.d.ts +93 -0
- package/dist/agent/disk.js +243 -0
- package/dist/agent/index.d.ts +5 -0
- package/dist/agent/index.js +21 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/messages.d.ts +2 -3
- package/dist/messages.js +3 -3
- package/dist/resources/sessions.d.ts +19 -1
- package/dist/resources/sessions.js +22 -0
- package/dist/resources/spaces.d.ts +1 -31
- package/dist/resources/spaces.js +1 -39
- package/dist/types/session.d.ts +63 -0
- package/dist/types/session.js +58 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -136,9 +136,134 @@ const result = await client.tools.renameToolName({
|
|
|
136
136
|
console.log(result); // { status: 0, errmsg: '' }
|
|
137
137
|
```
|
|
138
138
|
|
|
139
|
+
## Agent Tools
|
|
140
|
+
|
|
141
|
+
The SDK provides agent tools that allow LLMs (OpenAI, Anthropic) to interact with Acontext disks through function calling. These tools can be converted to OpenAI or Anthropic tool schemas and executed when the LLM calls them.
|
|
142
|
+
|
|
143
|
+
### Pre-configured Disk Tools
|
|
144
|
+
|
|
145
|
+
The SDK includes a pre-configured `DISK_TOOLS` pool with four disk operation tools:
|
|
146
|
+
|
|
147
|
+
- **`write_file`**: Write text content to a file
|
|
148
|
+
- **`read_file`**: Read a text file with optional line offset and limit
|
|
149
|
+
- **`replace_string`**: Replace strings in a file
|
|
150
|
+
- **`list_artifacts`**: List files and directories in a path
|
|
151
|
+
|
|
152
|
+
### Getting Tool Schemas for LLM APIs
|
|
153
|
+
|
|
154
|
+
Convert tools to the appropriate format for your LLM provider:
|
|
155
|
+
|
|
156
|
+
```typescript
|
|
157
|
+
import { AcontextClient, DISK_TOOLS } from '@acontext/acontext';
|
|
158
|
+
|
|
159
|
+
const client = new AcontextClient({ apiKey: 'sk-ac-your-root-api-bearer-token' });
|
|
160
|
+
|
|
161
|
+
// Get OpenAI-compatible tool schemas
|
|
162
|
+
const openaiTools = DISK_TOOLS.toOpenAIToolSchema();
|
|
163
|
+
|
|
164
|
+
// Get Anthropic-compatible tool schemas
|
|
165
|
+
const anthropicTools = DISK_TOOLS.toAnthropicToolSchema();
|
|
166
|
+
|
|
167
|
+
// Use with OpenAI API
|
|
168
|
+
import OpenAI from 'openai';
|
|
169
|
+
const openai = new OpenAI({ apiKey: 'your-openai-key' });
|
|
170
|
+
const completion = await openai.chat.completions.create({
|
|
171
|
+
model: 'gpt-4',
|
|
172
|
+
messages: [{ role: 'user', content: 'Write a file called hello.txt with "Hello, World!"' }],
|
|
173
|
+
tools: openaiTools,
|
|
174
|
+
});
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
### Executing Tools
|
|
178
|
+
|
|
179
|
+
When an LLM calls a tool, execute it using the tool pool:
|
|
180
|
+
|
|
181
|
+
```typescript
|
|
182
|
+
import { AcontextClient, DISK_TOOLS } from '@acontext/acontext';
|
|
183
|
+
|
|
184
|
+
const client = new AcontextClient({ apiKey: 'sk-ac-your-root-api-bearer-token' });
|
|
185
|
+
|
|
186
|
+
// Create a disk for the tools to operate on
|
|
187
|
+
const disk = await client.disks.create();
|
|
188
|
+
|
|
189
|
+
// Create a context for the tools
|
|
190
|
+
const ctx = DISK_TOOLS.formatContext(client, disk.id);
|
|
191
|
+
|
|
192
|
+
// Execute a tool (e.g., after LLM returns a tool call)
|
|
193
|
+
const result = await DISK_TOOLS.executeTool(ctx, 'write_file', {
|
|
194
|
+
filename: 'hello.txt',
|
|
195
|
+
file_path: '/notes/',
|
|
196
|
+
content: 'Hello, World!',
|
|
197
|
+
});
|
|
198
|
+
console.log(result); // File 'hello.txt' written successfully to '/notes/hello.txt'
|
|
199
|
+
|
|
200
|
+
// Read the file
|
|
201
|
+
const readResult = await DISK_TOOLS.executeTool(ctx, 'read_file', {
|
|
202
|
+
filename: 'hello.txt',
|
|
203
|
+
file_path: '/notes/',
|
|
204
|
+
});
|
|
205
|
+
console.log(readResult);
|
|
206
|
+
|
|
207
|
+
// List files in a directory
|
|
208
|
+
const listResult = await DISK_TOOLS.executeTool(ctx, 'list_artifacts', {
|
|
209
|
+
file_path: '/notes/',
|
|
210
|
+
});
|
|
211
|
+
console.log(listResult);
|
|
212
|
+
|
|
213
|
+
// Replace a string in a file
|
|
214
|
+
const replaceResult = await DISK_TOOLS.executeTool(ctx, 'replace_string', {
|
|
215
|
+
filename: 'hello.txt',
|
|
216
|
+
file_path: '/notes/',
|
|
217
|
+
old_string: 'Hello',
|
|
218
|
+
new_string: 'Hi',
|
|
219
|
+
});
|
|
220
|
+
console.log(replaceResult);
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
### Creating Custom Tools
|
|
224
|
+
|
|
225
|
+
You can create custom tools by extending `AbstractBaseTool`:
|
|
226
|
+
|
|
227
|
+
```typescript
|
|
228
|
+
import { AbstractBaseTool, BaseContext, BaseToolPool } from '@acontext/acontext';
|
|
229
|
+
|
|
230
|
+
interface MyContext extends BaseContext {
|
|
231
|
+
// Your context properties
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
class MyCustomTool extends AbstractBaseTool {
|
|
235
|
+
readonly name = 'my_custom_tool';
|
|
236
|
+
readonly description = 'A custom tool that does something';
|
|
237
|
+
readonly arguments = {
|
|
238
|
+
param1: {
|
|
239
|
+
type: 'string',
|
|
240
|
+
description: 'First parameter',
|
|
241
|
+
},
|
|
242
|
+
};
|
|
243
|
+
readonly requiredArguments = ['param1'];
|
|
244
|
+
|
|
245
|
+
async execute(ctx: MyContext, llmArguments: Record<string, unknown>): Promise<string> {
|
|
246
|
+
const param1 = llmArguments.param1 as string;
|
|
247
|
+
// Your custom logic here
|
|
248
|
+
return `Result: ${param1}`;
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// Create a custom tool pool
|
|
253
|
+
class MyToolPool extends BaseToolPool {
|
|
254
|
+
formatContext(...args: unknown[]): MyContext {
|
|
255
|
+
// Create and return your context
|
|
256
|
+
return {};
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
const myPool = new MyToolPool();
|
|
261
|
+
myPool.addTool(new MyCustomTool());
|
|
262
|
+
```
|
|
263
|
+
|
|
139
264
|
## Semantic search within spaces
|
|
140
265
|
|
|
141
|
-
The SDK provides
|
|
266
|
+
The SDK provides a powerful semantic search API for finding content within your spaces:
|
|
142
267
|
|
|
143
268
|
### 1. Experience Search (Advanced AI-powered search)
|
|
144
269
|
|
|
@@ -175,41 +300,3 @@ if (result.final_answer) {
|
|
|
175
300
|
}
|
|
176
301
|
```
|
|
177
302
|
|
|
178
|
-
### 2. Semantic Glob (Search page/folder titles)
|
|
179
|
-
|
|
180
|
-
Search for pages and folders by their titles using semantic similarity (like a semantic version of `glob`):
|
|
181
|
-
|
|
182
|
-
```typescript
|
|
183
|
-
// Find pages about authentication
|
|
184
|
-
const results = await client.spaces.semanticGlobal('space-uuid', {
|
|
185
|
-
query: 'authentication and authorization pages',
|
|
186
|
-
limit: 10,
|
|
187
|
-
threshold: 1.0, // Only show results with distance < 1.0
|
|
188
|
-
});
|
|
189
|
-
|
|
190
|
-
for (const block of results) {
|
|
191
|
-
console.log(`${block.title} - ${block.type}`);
|
|
192
|
-
}
|
|
193
|
-
```
|
|
194
|
-
|
|
195
|
-
### 3. Semantic Grep (Search content blocks)
|
|
196
|
-
|
|
197
|
-
Search through actual content blocks using semantic similarity (like a semantic version of `grep`):
|
|
198
|
-
|
|
199
|
-
```typescript
|
|
200
|
-
// Find code examples for JWT validation
|
|
201
|
-
const results = await client.spaces.semanticGrep('space-uuid', {
|
|
202
|
-
query: 'JWT token validation code examples',
|
|
203
|
-
limit: 15,
|
|
204
|
-
threshold: 0.7,
|
|
205
|
-
});
|
|
206
|
-
|
|
207
|
-
for (const block of results) {
|
|
208
|
-
console.log(`${block.title} - distance: ${block.distance}`);
|
|
209
|
-
const content = block.props.text || block.props.content;
|
|
210
|
-
if (content) {
|
|
211
|
-
console.log(`Content: ${String(content).substring(0, 100)}...`);
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
```
|
|
215
|
-
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Base classes for agent tools.
|
|
3
|
+
*/
|
|
4
|
+
export interface BaseContext {
|
|
5
|
+
}
|
|
6
|
+
export interface BaseConverter {
|
|
7
|
+
toOpenAIToolSchema(): Record<string, unknown>;
|
|
8
|
+
toAnthropicToolSchema(): Record<string, unknown>;
|
|
9
|
+
}
|
|
10
|
+
export interface BaseTool extends BaseConverter {
|
|
11
|
+
readonly name: string;
|
|
12
|
+
readonly description: string;
|
|
13
|
+
readonly arguments: Record<string, unknown>;
|
|
14
|
+
readonly requiredArguments: string[];
|
|
15
|
+
execute(ctx: BaseContext, llmArguments: Record<string, unknown>): Promise<string>;
|
|
16
|
+
}
|
|
17
|
+
export declare abstract class AbstractBaseTool implements BaseTool {
|
|
18
|
+
abstract readonly name: string;
|
|
19
|
+
abstract readonly description: string;
|
|
20
|
+
abstract readonly arguments: Record<string, unknown>;
|
|
21
|
+
abstract readonly requiredArguments: string[];
|
|
22
|
+
abstract execute(ctx: BaseContext, llmArguments: Record<string, unknown>): Promise<string>;
|
|
23
|
+
toOpenAIToolSchema(): Record<string, unknown>;
|
|
24
|
+
toAnthropicToolSchema(): Record<string, unknown>;
|
|
25
|
+
}
|
|
26
|
+
export declare abstract class BaseToolPool {
|
|
27
|
+
protected tools: Map<string, BaseTool>;
|
|
28
|
+
addTool(tool: BaseTool): void;
|
|
29
|
+
removeTool(toolName: string): void;
|
|
30
|
+
extendToolPool(pool: BaseToolPool): void;
|
|
31
|
+
executeTool(ctx: BaseContext, toolName: string, llmArguments: Record<string, unknown>): Promise<string>;
|
|
32
|
+
toolExists(toolName: string): boolean;
|
|
33
|
+
toOpenAIToolSchema(): Record<string, unknown>[];
|
|
34
|
+
toAnthropicToolSchema(): Record<string, unknown>[];
|
|
35
|
+
abstract formatContext(...args: unknown[]): BaseContext;
|
|
36
|
+
}
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Base classes for agent tools.
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.BaseToolPool = exports.AbstractBaseTool = void 0;
|
|
7
|
+
class AbstractBaseTool {
|
|
8
|
+
toOpenAIToolSchema() {
|
|
9
|
+
return {
|
|
10
|
+
type: 'function',
|
|
11
|
+
function: {
|
|
12
|
+
name: this.name,
|
|
13
|
+
description: this.description,
|
|
14
|
+
parameters: {
|
|
15
|
+
type: 'object',
|
|
16
|
+
properties: this.arguments,
|
|
17
|
+
required: this.requiredArguments,
|
|
18
|
+
},
|
|
19
|
+
},
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
toAnthropicToolSchema() {
|
|
23
|
+
return {
|
|
24
|
+
name: this.name,
|
|
25
|
+
description: this.description,
|
|
26
|
+
input_schema: {
|
|
27
|
+
type: 'object',
|
|
28
|
+
properties: this.arguments,
|
|
29
|
+
required: this.requiredArguments,
|
|
30
|
+
},
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
exports.AbstractBaseTool = AbstractBaseTool;
|
|
35
|
+
class BaseToolPool {
|
|
36
|
+
constructor() {
|
|
37
|
+
this.tools = new Map();
|
|
38
|
+
}
|
|
39
|
+
addTool(tool) {
|
|
40
|
+
this.tools.set(tool.name, tool);
|
|
41
|
+
}
|
|
42
|
+
removeTool(toolName) {
|
|
43
|
+
this.tools.delete(toolName);
|
|
44
|
+
}
|
|
45
|
+
extendToolPool(pool) {
|
|
46
|
+
for (const [name, tool] of pool.tools.entries()) {
|
|
47
|
+
this.tools.set(name, tool);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
async executeTool(ctx, toolName, llmArguments) {
|
|
51
|
+
const tool = this.tools.get(toolName);
|
|
52
|
+
if (!tool) {
|
|
53
|
+
throw new Error(`Tool '${toolName}' not found`);
|
|
54
|
+
}
|
|
55
|
+
const result = await tool.execute(ctx, llmArguments);
|
|
56
|
+
return result.trim();
|
|
57
|
+
}
|
|
58
|
+
toolExists(toolName) {
|
|
59
|
+
return this.tools.has(toolName);
|
|
60
|
+
}
|
|
61
|
+
toOpenAIToolSchema() {
|
|
62
|
+
return Array.from(this.tools.values()).map((tool) => tool.toOpenAIToolSchema());
|
|
63
|
+
}
|
|
64
|
+
toAnthropicToolSchema() {
|
|
65
|
+
return Array.from(this.tools.values()).map((tool) => tool.toAnthropicToolSchema());
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
exports.BaseToolPool = BaseToolPool;
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Disk tools for agent operations.
|
|
3
|
+
*/
|
|
4
|
+
import { AcontextClient } from '../client';
|
|
5
|
+
import { AbstractBaseTool, BaseContext, BaseToolPool } from './base';
|
|
6
|
+
export interface DiskContext extends BaseContext {
|
|
7
|
+
client: AcontextClient;
|
|
8
|
+
diskId: string;
|
|
9
|
+
}
|
|
10
|
+
export declare class WriteFileTool extends AbstractBaseTool {
|
|
11
|
+
readonly name = "write_file";
|
|
12
|
+
readonly description = "Write text content to a file in the file system. Creates the file if it doesn't exist, overwrites if it does.";
|
|
13
|
+
readonly arguments: {
|
|
14
|
+
file_path: {
|
|
15
|
+
type: string;
|
|
16
|
+
description: string;
|
|
17
|
+
};
|
|
18
|
+
filename: {
|
|
19
|
+
type: string;
|
|
20
|
+
description: string;
|
|
21
|
+
};
|
|
22
|
+
content: {
|
|
23
|
+
type: string;
|
|
24
|
+
description: string;
|
|
25
|
+
};
|
|
26
|
+
};
|
|
27
|
+
readonly requiredArguments: string[];
|
|
28
|
+
execute(ctx: DiskContext, llmArguments: Record<string, unknown>): Promise<string>;
|
|
29
|
+
}
|
|
30
|
+
export declare class ReadFileTool extends AbstractBaseTool {
|
|
31
|
+
readonly name = "read_file";
|
|
32
|
+
readonly description = "Read a text file from the file system and return its content.";
|
|
33
|
+
readonly arguments: {
|
|
34
|
+
file_path: {
|
|
35
|
+
type: string;
|
|
36
|
+
description: string;
|
|
37
|
+
};
|
|
38
|
+
filename: {
|
|
39
|
+
type: string;
|
|
40
|
+
description: string;
|
|
41
|
+
};
|
|
42
|
+
line_offset: {
|
|
43
|
+
type: string;
|
|
44
|
+
description: string;
|
|
45
|
+
};
|
|
46
|
+
line_limit: {
|
|
47
|
+
type: string;
|
|
48
|
+
description: string;
|
|
49
|
+
};
|
|
50
|
+
};
|
|
51
|
+
readonly requiredArguments: string[];
|
|
52
|
+
execute(ctx: DiskContext, llmArguments: Record<string, unknown>): Promise<string>;
|
|
53
|
+
}
|
|
54
|
+
export declare class ReplaceStringTool extends AbstractBaseTool {
|
|
55
|
+
readonly name = "replace_string";
|
|
56
|
+
readonly description = "Replace an old string with a new string in a file. Reads the file, performs the replacement, and writes it back.";
|
|
57
|
+
readonly arguments: {
|
|
58
|
+
file_path: {
|
|
59
|
+
type: string;
|
|
60
|
+
description: string;
|
|
61
|
+
};
|
|
62
|
+
filename: {
|
|
63
|
+
type: string;
|
|
64
|
+
description: string;
|
|
65
|
+
};
|
|
66
|
+
old_string: {
|
|
67
|
+
type: string;
|
|
68
|
+
description: string;
|
|
69
|
+
};
|
|
70
|
+
new_string: {
|
|
71
|
+
type: string;
|
|
72
|
+
description: string;
|
|
73
|
+
};
|
|
74
|
+
};
|
|
75
|
+
readonly requiredArguments: string[];
|
|
76
|
+
execute(ctx: DiskContext, llmArguments: Record<string, unknown>): Promise<string>;
|
|
77
|
+
}
|
|
78
|
+
export declare class ListTool extends AbstractBaseTool {
|
|
79
|
+
readonly name = "list_artifacts";
|
|
80
|
+
readonly description = "List all files and directories in a specified path on the disk.";
|
|
81
|
+
readonly arguments: {
|
|
82
|
+
file_path: {
|
|
83
|
+
type: string;
|
|
84
|
+
description: string;
|
|
85
|
+
};
|
|
86
|
+
};
|
|
87
|
+
readonly requiredArguments: string[];
|
|
88
|
+
execute(ctx: DiskContext, llmArguments: Record<string, unknown>): Promise<string>;
|
|
89
|
+
}
|
|
90
|
+
export declare class DiskToolPool extends BaseToolPool {
|
|
91
|
+
formatContext(client: AcontextClient, diskId: string): DiskContext;
|
|
92
|
+
}
|
|
93
|
+
export declare const DISK_TOOLS: DiskToolPool;
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Disk tools for agent operations.
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.DISK_TOOLS = exports.DiskToolPool = exports.ListTool = exports.ReplaceStringTool = exports.ReadFileTool = exports.WriteFileTool = void 0;
|
|
7
|
+
const uploads_1 = require("../uploads");
|
|
8
|
+
const base_1 = require("./base");
|
|
9
|
+
function normalizePath(path) {
|
|
10
|
+
if (!path) {
|
|
11
|
+
return '/';
|
|
12
|
+
}
|
|
13
|
+
let normalized = path.startsWith('/') ? path : `/${path}`;
|
|
14
|
+
if (!normalized.endsWith('/')) {
|
|
15
|
+
normalized += '/';
|
|
16
|
+
}
|
|
17
|
+
return normalized;
|
|
18
|
+
}
|
|
19
|
+
class WriteFileTool extends base_1.AbstractBaseTool {
|
|
20
|
+
constructor() {
|
|
21
|
+
super(...arguments);
|
|
22
|
+
this.name = 'write_file';
|
|
23
|
+
this.description = "Write text content to a file in the file system. Creates the file if it doesn't exist, overwrites if it does.";
|
|
24
|
+
this.arguments = {
|
|
25
|
+
file_path: {
|
|
26
|
+
type: 'string',
|
|
27
|
+
description: "Optional folder path to organize files, e.g. '/notes/' or '/documents/'. Defaults to root '/' if not specified.",
|
|
28
|
+
},
|
|
29
|
+
filename: {
|
|
30
|
+
type: 'string',
|
|
31
|
+
description: "Filename such as 'report.md' or 'demo.txt'.",
|
|
32
|
+
},
|
|
33
|
+
content: {
|
|
34
|
+
type: 'string',
|
|
35
|
+
description: 'Text content to write to the file.',
|
|
36
|
+
},
|
|
37
|
+
};
|
|
38
|
+
this.requiredArguments = ['filename', 'content'];
|
|
39
|
+
}
|
|
40
|
+
async execute(ctx, llmArguments) {
|
|
41
|
+
const filename = llmArguments.filename;
|
|
42
|
+
const content = llmArguments.content;
|
|
43
|
+
const filePath = llmArguments.file_path || null;
|
|
44
|
+
if (!filename) {
|
|
45
|
+
throw new Error('filename is required');
|
|
46
|
+
}
|
|
47
|
+
if (!content) {
|
|
48
|
+
throw new Error('content is required');
|
|
49
|
+
}
|
|
50
|
+
const normalizedPath = normalizePath(filePath);
|
|
51
|
+
const payload = new uploads_1.FileUpload({
|
|
52
|
+
filename,
|
|
53
|
+
content: Buffer.from(content, 'utf-8'),
|
|
54
|
+
contentType: 'text/plain',
|
|
55
|
+
});
|
|
56
|
+
const artifact = await ctx.client.disks.artifacts.upsert(ctx.diskId, {
|
|
57
|
+
file: payload,
|
|
58
|
+
filePath: normalizedPath,
|
|
59
|
+
});
|
|
60
|
+
return `File '${artifact.filename}' written successfully to '${artifact.path}'`;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
exports.WriteFileTool = WriteFileTool;
|
|
64
|
+
class ReadFileTool extends base_1.AbstractBaseTool {
|
|
65
|
+
constructor() {
|
|
66
|
+
super(...arguments);
|
|
67
|
+
this.name = 'read_file';
|
|
68
|
+
this.description = 'Read a text file from the file system and return its content.';
|
|
69
|
+
this.arguments = {
|
|
70
|
+
file_path: {
|
|
71
|
+
type: 'string',
|
|
72
|
+
description: "Optional directory path where the file is located, e.g. '/notes/'. Defaults to root '/' if not specified.",
|
|
73
|
+
},
|
|
74
|
+
filename: {
|
|
75
|
+
type: 'string',
|
|
76
|
+
description: 'Filename to read.',
|
|
77
|
+
},
|
|
78
|
+
line_offset: {
|
|
79
|
+
type: 'number',
|
|
80
|
+
description: 'The line number to start reading from. Default to 0',
|
|
81
|
+
},
|
|
82
|
+
line_limit: {
|
|
83
|
+
type: 'number',
|
|
84
|
+
description: 'The maximum number of lines to return. Default to 100',
|
|
85
|
+
},
|
|
86
|
+
};
|
|
87
|
+
this.requiredArguments = ['filename'];
|
|
88
|
+
}
|
|
89
|
+
async execute(ctx, llmArguments) {
|
|
90
|
+
const filename = llmArguments.filename;
|
|
91
|
+
const filePath = llmArguments.file_path || null;
|
|
92
|
+
const lineOffset = llmArguments.line_offset || 0;
|
|
93
|
+
const lineLimit = llmArguments.line_limit || 100;
|
|
94
|
+
if (!filename) {
|
|
95
|
+
throw new Error('filename is required');
|
|
96
|
+
}
|
|
97
|
+
const normalizedPath = normalizePath(filePath);
|
|
98
|
+
const result = await ctx.client.disks.artifacts.get(ctx.diskId, {
|
|
99
|
+
filePath: normalizedPath,
|
|
100
|
+
filename,
|
|
101
|
+
withContent: true,
|
|
102
|
+
});
|
|
103
|
+
if (!result.content) {
|
|
104
|
+
throw new Error('Failed to read file: server did not return content.');
|
|
105
|
+
}
|
|
106
|
+
const contentStr = result.content.raw;
|
|
107
|
+
const lines = contentStr.split('\n');
|
|
108
|
+
const lineStart = Math.min(lineOffset, Math.max(0, lines.length - 1));
|
|
109
|
+
const lineEnd = Math.min(lineStart + lineLimit, lines.length);
|
|
110
|
+
const preview = lines.slice(lineStart, lineEnd).join('\n');
|
|
111
|
+
return `[${normalizedPath}${filename} - showing L${lineStart}-${lineEnd} of ${lines.length} lines]\n${preview}`;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
exports.ReadFileTool = ReadFileTool;
|
|
115
|
+
class ReplaceStringTool extends base_1.AbstractBaseTool {
|
|
116
|
+
constructor() {
|
|
117
|
+
super(...arguments);
|
|
118
|
+
this.name = 'replace_string';
|
|
119
|
+
this.description = 'Replace an old string with a new string in a file. Reads the file, performs the replacement, and writes it back.';
|
|
120
|
+
this.arguments = {
|
|
121
|
+
file_path: {
|
|
122
|
+
type: 'string',
|
|
123
|
+
description: "Optional directory path where the file is located, e.g. '/notes/'. Defaults to root '/' if not specified.",
|
|
124
|
+
},
|
|
125
|
+
filename: {
|
|
126
|
+
type: 'string',
|
|
127
|
+
description: 'Filename to modify.',
|
|
128
|
+
},
|
|
129
|
+
old_string: {
|
|
130
|
+
type: 'string',
|
|
131
|
+
description: 'The string to be replaced.',
|
|
132
|
+
},
|
|
133
|
+
new_string: {
|
|
134
|
+
type: 'string',
|
|
135
|
+
description: 'The string to replace the old_string with.',
|
|
136
|
+
},
|
|
137
|
+
};
|
|
138
|
+
this.requiredArguments = ['filename', 'old_string', 'new_string'];
|
|
139
|
+
}
|
|
140
|
+
async execute(ctx, llmArguments) {
|
|
141
|
+
const filename = llmArguments.filename;
|
|
142
|
+
const filePath = llmArguments.file_path || null;
|
|
143
|
+
const oldString = llmArguments.old_string;
|
|
144
|
+
const newString = llmArguments.new_string;
|
|
145
|
+
if (!filename) {
|
|
146
|
+
throw new Error('filename is required');
|
|
147
|
+
}
|
|
148
|
+
if (oldString === null || oldString === undefined) {
|
|
149
|
+
throw new Error('old_string is required');
|
|
150
|
+
}
|
|
151
|
+
if (newString === null || newString === undefined) {
|
|
152
|
+
throw new Error('new_string is required');
|
|
153
|
+
}
|
|
154
|
+
const normalizedPath = normalizePath(filePath);
|
|
155
|
+
// Read the file content
|
|
156
|
+
const result = await ctx.client.disks.artifacts.get(ctx.diskId, {
|
|
157
|
+
filePath: normalizedPath,
|
|
158
|
+
filename,
|
|
159
|
+
withContent: true,
|
|
160
|
+
});
|
|
161
|
+
if (!result.content) {
|
|
162
|
+
throw new Error('Failed to read file: server did not return content.');
|
|
163
|
+
}
|
|
164
|
+
const contentStr = result.content.raw;
|
|
165
|
+
// Perform the replacement
|
|
166
|
+
if (!contentStr.includes(oldString)) {
|
|
167
|
+
return `String '${oldString}' not found in file '${filename}'`;
|
|
168
|
+
}
|
|
169
|
+
// Count occurrences before replacement
|
|
170
|
+
let replacementCount = 0;
|
|
171
|
+
let searchIndex = 0;
|
|
172
|
+
while (searchIndex < contentStr.length) {
|
|
173
|
+
const index = contentStr.indexOf(oldString, searchIndex);
|
|
174
|
+
if (index === -1) {
|
|
175
|
+
break;
|
|
176
|
+
}
|
|
177
|
+
replacementCount++;
|
|
178
|
+
searchIndex = index + oldString.length;
|
|
179
|
+
}
|
|
180
|
+
const updatedContent = contentStr.replace(oldString, newString);
|
|
181
|
+
// Write the updated content back
|
|
182
|
+
const payload = new uploads_1.FileUpload({
|
|
183
|
+
filename,
|
|
184
|
+
content: Buffer.from(updatedContent, 'utf-8'),
|
|
185
|
+
contentType: 'text/plain',
|
|
186
|
+
});
|
|
187
|
+
await ctx.client.disks.artifacts.upsert(ctx.diskId, {
|
|
188
|
+
file: payload,
|
|
189
|
+
filePath: normalizedPath,
|
|
190
|
+
});
|
|
191
|
+
return `Found ${replacementCount} old_string in ${normalizedPath}${filename} and replaced it.`;
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
exports.ReplaceStringTool = ReplaceStringTool;
|
|
195
|
+
class ListTool extends base_1.AbstractBaseTool {
|
|
196
|
+
constructor() {
|
|
197
|
+
super(...arguments);
|
|
198
|
+
this.name = 'list_artifacts';
|
|
199
|
+
this.description = 'List all files and directories in a specified path on the disk.';
|
|
200
|
+
this.arguments = {
|
|
201
|
+
file_path: {
|
|
202
|
+
type: 'string',
|
|
203
|
+
description: "Optional directory path to list, e.g. '/todo/' or '/notes/'. Root is '/'",
|
|
204
|
+
},
|
|
205
|
+
};
|
|
206
|
+
this.requiredArguments = ['file_path'];
|
|
207
|
+
}
|
|
208
|
+
async execute(ctx, llmArguments) {
|
|
209
|
+
const filePath = llmArguments.file_path;
|
|
210
|
+
const normalizedPath = normalizePath(filePath);
|
|
211
|
+
const result = await ctx.client.disks.artifacts.list(ctx.diskId, {
|
|
212
|
+
path: normalizedPath,
|
|
213
|
+
});
|
|
214
|
+
const artifactsList = result.artifacts.map((artifact) => artifact.filename);
|
|
215
|
+
if (artifactsList.length === 0 && result.directories.length === 0) {
|
|
216
|
+
return `No files or directories found in '${normalizedPath}'`;
|
|
217
|
+
}
|
|
218
|
+
const outputParts = [];
|
|
219
|
+
if (artifactsList.length > 0) {
|
|
220
|
+
outputParts.push(`Files: ${artifactsList.join(', ')}`);
|
|
221
|
+
}
|
|
222
|
+
if (result.directories.length > 0) {
|
|
223
|
+
outputParts.push(`Directories: ${result.directories.join(', ')}`);
|
|
224
|
+
}
|
|
225
|
+
const lsSect = outputParts.join('\n');
|
|
226
|
+
return `[Listing in ${normalizedPath}]\n${lsSect}`;
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
exports.ListTool = ListTool;
|
|
230
|
+
class DiskToolPool extends base_1.BaseToolPool {
|
|
231
|
+
formatContext(client, diskId) {
|
|
232
|
+
return {
|
|
233
|
+
client,
|
|
234
|
+
diskId,
|
|
235
|
+
};
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
exports.DiskToolPool = DiskToolPool;
|
|
239
|
+
exports.DISK_TOOLS = new DiskToolPool();
|
|
240
|
+
exports.DISK_TOOLS.addTool(new WriteFileTool());
|
|
241
|
+
exports.DISK_TOOLS.addTool(new ReadFileTool());
|
|
242
|
+
exports.DISK_TOOLS.addTool(new ReplaceStringTool());
|
|
243
|
+
exports.DISK_TOOLS.addTool(new ListTool());
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Agent tools for Acontext SDK.
|
|
4
|
+
*/
|
|
5
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
6
|
+
if (k2 === undefined) k2 = k;
|
|
7
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
8
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
9
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
10
|
+
}
|
|
11
|
+
Object.defineProperty(o, k2, desc);
|
|
12
|
+
}) : (function(o, m, k, k2) {
|
|
13
|
+
if (k2 === undefined) k2 = k;
|
|
14
|
+
o[k2] = m[k];
|
|
15
|
+
}));
|
|
16
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
17
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
18
|
+
};
|
|
19
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
20
|
+
__exportStar(require("./base"), exports);
|
|
21
|
+
__exportStar(require("./disk"), exports);
|
package/dist/index.d.ts
CHANGED
package/dist/index.js
CHANGED
|
@@ -32,3 +32,4 @@ Object.defineProperty(exports, "TransportError", { enumerable: true, get: functi
|
|
|
32
32
|
Object.defineProperty(exports, "AcontextError", { enumerable: true, get: function () { return errors_1.AcontextError; } });
|
|
33
33
|
__exportStar(require("./types"), exports);
|
|
34
34
|
__exportStar(require("./resources"), exports);
|
|
35
|
+
__exportStar(require("./agent"), exports);
|
package/dist/messages.d.ts
CHANGED
|
@@ -27,7 +27,6 @@ export declare const AcontextMessageSchema: z.ZodObject<{
|
|
|
27
27
|
role: z.ZodEnum<{
|
|
28
28
|
user: "user";
|
|
29
29
|
assistant: "assistant";
|
|
30
|
-
system: "system";
|
|
31
30
|
}>;
|
|
32
31
|
parts: z.ZodArray<z.ZodObject<{
|
|
33
32
|
type: z.ZodString;
|
|
@@ -39,14 +38,14 @@ export declare const AcontextMessageSchema: z.ZodObject<{
|
|
|
39
38
|
}, z.core.$strip>;
|
|
40
39
|
export type AcontextMessageInput = z.infer<typeof AcontextMessageSchema>;
|
|
41
40
|
export declare class AcontextMessage {
|
|
42
|
-
role: 'user' | 'assistant'
|
|
41
|
+
role: 'user' | 'assistant';
|
|
43
42
|
parts: MessagePart[];
|
|
44
43
|
meta?: Record<string, unknown> | null;
|
|
45
44
|
constructor(data: AcontextMessageInput);
|
|
46
45
|
toJSON(): AcontextMessageInput;
|
|
47
46
|
}
|
|
48
47
|
export declare function buildAcontextMessage(options: {
|
|
49
|
-
role: 'user' | 'assistant'
|
|
48
|
+
role: 'user' | 'assistant';
|
|
50
49
|
parts: (MessagePart | string | MessagePartInput)[];
|
|
51
50
|
meta?: Record<string, unknown> | null;
|
|
52
51
|
}): AcontextMessage;
|
package/dist/messages.js
CHANGED
|
@@ -44,7 +44,7 @@ class MessagePart {
|
|
|
44
44
|
}
|
|
45
45
|
exports.MessagePart = MessagePart;
|
|
46
46
|
exports.AcontextMessageSchema = zod_1.z.object({
|
|
47
|
-
role: zod_1.z.enum(['user', 'assistant'
|
|
47
|
+
role: zod_1.z.enum(['user', 'assistant']),
|
|
48
48
|
parts: zod_1.z.array(exports.MessagePartSchema),
|
|
49
49
|
meta: zod_1.z.record(zod_1.z.string(), zod_1.z.unknown()).nullable().optional(),
|
|
50
50
|
});
|
|
@@ -64,8 +64,8 @@ class AcontextMessage {
|
|
|
64
64
|
}
|
|
65
65
|
exports.AcontextMessage = AcontextMessage;
|
|
66
66
|
function buildAcontextMessage(options) {
|
|
67
|
-
if (!['user', 'assistant'
|
|
68
|
-
throw new Error("role must be one of {'user', 'assistant'
|
|
67
|
+
if (!['user', 'assistant'].includes(options.role)) {
|
|
68
|
+
throw new Error("role must be one of {'user', 'assistant'}");
|
|
69
69
|
}
|
|
70
70
|
const normalizedParts = options.parts.map((part) => {
|
|
71
71
|
if (part instanceof MessagePart) {
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
import { RequesterProtocol } from '../client-types';
|
|
5
5
|
import { AcontextMessage } from '../messages';
|
|
6
6
|
import { FileUpload } from '../uploads';
|
|
7
|
-
import { GetMessagesOutput, GetTasksOutput, LearningStatus, ListSessionsOutput, Message, Session, TokenCounts } from '../types';
|
|
7
|
+
import { EditStrategy, GetMessagesOutput, GetTasksOutput, LearningStatus, ListSessionsOutput, Message, Session, TokenCounts } from '../types';
|
|
8
8
|
export type MessageBlob = AcontextMessage | Record<string, unknown>;
|
|
9
9
|
export declare class SessionsAPI {
|
|
10
10
|
private requester;
|
|
@@ -18,6 +18,7 @@ export declare class SessionsAPI {
|
|
|
18
18
|
}): Promise<ListSessionsOutput>;
|
|
19
19
|
create(options?: {
|
|
20
20
|
spaceId?: string | null;
|
|
21
|
+
disableTaskTracking?: boolean | null;
|
|
21
22
|
configs?: Record<string, unknown>;
|
|
22
23
|
}): Promise<Session>;
|
|
23
24
|
delete(sessionId: string): Promise<void>;
|
|
@@ -38,12 +39,29 @@ export declare class SessionsAPI {
|
|
|
38
39
|
fileField?: string | null;
|
|
39
40
|
file?: FileUpload | null;
|
|
40
41
|
}): Promise<Message>;
|
|
42
|
+
/**
|
|
43
|
+
* Get messages for a session.
|
|
44
|
+
*
|
|
45
|
+
* @param sessionId - The UUID of the session.
|
|
46
|
+
* @param options - Options for retrieving messages.
|
|
47
|
+
* @param options.limit - Maximum number of messages to return.
|
|
48
|
+
* @param options.cursor - Cursor for pagination.
|
|
49
|
+
* @param options.withAssetPublicUrl - Whether to include presigned URLs for assets.
|
|
50
|
+
* @param options.format - The format of the messages ('acontext', 'openai', or 'anthropic').
|
|
51
|
+
* @param options.timeDesc - Order by created_at descending if true, ascending if false.
|
|
52
|
+
* @param options.editStrategies - Optional list of edit strategies to apply before format conversion.
|
|
53
|
+
* Examples:
|
|
54
|
+
* - Remove tool results: [{ type: 'remove_tool_result', params: { keep_recent_n_tool_results: 3 } }]
|
|
55
|
+
* - Token limit: [{ type: 'token_limit', params: { limit_tokens: 20000 } }]
|
|
56
|
+
* @returns GetMessagesOutput containing the list of messages and pagination information.
|
|
57
|
+
*/
|
|
41
58
|
getMessages(sessionId: string, options?: {
|
|
42
59
|
limit?: number | null;
|
|
43
60
|
cursor?: string | null;
|
|
44
61
|
withAssetPublicUrl?: boolean | null;
|
|
45
62
|
format?: 'acontext' | 'openai' | 'anthropic';
|
|
46
63
|
timeDesc?: boolean | null;
|
|
64
|
+
editStrategies?: Array<EditStrategy> | null;
|
|
47
65
|
}): Promise<GetMessagesOutput>;
|
|
48
66
|
flush(sessionId: string): Promise<{
|
|
49
67
|
status: number;
|
|
@@ -32,6 +32,9 @@ class SessionsAPI {
|
|
|
32
32
|
if (options?.spaceId) {
|
|
33
33
|
payload.space_id = options.spaceId;
|
|
34
34
|
}
|
|
35
|
+
if (options?.disableTaskTracking !== undefined && options?.disableTaskTracking !== null) {
|
|
36
|
+
payload.disable_task_tracking = options.disableTaskTracking;
|
|
37
|
+
}
|
|
35
38
|
if (options?.configs !== undefined) {
|
|
36
39
|
payload.configs = options.configs;
|
|
37
40
|
}
|
|
@@ -115,6 +118,22 @@ class SessionsAPI {
|
|
|
115
118
|
return types_1.MessageSchema.parse(data);
|
|
116
119
|
}
|
|
117
120
|
}
|
|
121
|
+
/**
|
|
122
|
+
* Get messages for a session.
|
|
123
|
+
*
|
|
124
|
+
* @param sessionId - The UUID of the session.
|
|
125
|
+
* @param options - Options for retrieving messages.
|
|
126
|
+
* @param options.limit - Maximum number of messages to return.
|
|
127
|
+
* @param options.cursor - Cursor for pagination.
|
|
128
|
+
* @param options.withAssetPublicUrl - Whether to include presigned URLs for assets.
|
|
129
|
+
* @param options.format - The format of the messages ('acontext', 'openai', or 'anthropic').
|
|
130
|
+
* @param options.timeDesc - Order by created_at descending if true, ascending if false.
|
|
131
|
+
* @param options.editStrategies - Optional list of edit strategies to apply before format conversion.
|
|
132
|
+
* Examples:
|
|
133
|
+
* - Remove tool results: [{ type: 'remove_tool_result', params: { keep_recent_n_tool_results: 3 } }]
|
|
134
|
+
* - Token limit: [{ type: 'token_limit', params: { limit_tokens: 20000 } }]
|
|
135
|
+
* @returns GetMessagesOutput containing the list of messages and pagination information.
|
|
136
|
+
*/
|
|
118
137
|
async getMessages(sessionId, options) {
|
|
119
138
|
const params = {};
|
|
120
139
|
if (options?.format !== undefined) {
|
|
@@ -126,6 +145,9 @@ class SessionsAPI {
|
|
|
126
145
|
with_asset_public_url: options?.withAssetPublicUrl ?? null,
|
|
127
146
|
time_desc: options?.timeDesc ?? true, // Default to true
|
|
128
147
|
}));
|
|
148
|
+
if (options?.editStrategies !== undefined && options?.editStrategies !== null) {
|
|
149
|
+
params.edit_strategies = JSON.stringify(options.editStrategies);
|
|
150
|
+
}
|
|
129
151
|
const data = await this.requester.request('GET', `/session/${sessionId}/messages`, {
|
|
130
152
|
params: Object.keys(params).length > 0 ? params : undefined,
|
|
131
153
|
});
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* Spaces endpoints.
|
|
3
3
|
*/
|
|
4
4
|
import { RequesterProtocol } from '../client-types';
|
|
5
|
-
import { ExperienceConfirmation, ListExperienceConfirmationsOutput, ListSpacesOutput,
|
|
5
|
+
import { ExperienceConfirmation, ListExperienceConfirmationsOutput, ListSpacesOutput, Space, SpaceSearchResult } from '../types';
|
|
6
6
|
export declare class SpacesAPI {
|
|
7
7
|
private requester;
|
|
8
8
|
constructor(requester: RequesterProtocol);
|
|
@@ -37,36 +37,6 @@ export declare class SpacesAPI {
|
|
|
37
37
|
semanticThreshold?: number | null;
|
|
38
38
|
maxIterations?: number | null;
|
|
39
39
|
}): Promise<SpaceSearchResult>;
|
|
40
|
-
/**
|
|
41
|
-
* Perform semantic glob (glob) search for page/folder titles.
|
|
42
|
-
*
|
|
43
|
-
* Searches specifically for page/folder titles using semantic similarity,
|
|
44
|
-
* similar to a semantic version of the glob command.
|
|
45
|
-
*
|
|
46
|
-
* @param spaceId - The UUID of the space
|
|
47
|
-
* @param options - Search options
|
|
48
|
-
* @returns List of SearchResultBlockItem objects matching the query
|
|
49
|
-
*/
|
|
50
|
-
semanticGlobal(spaceId: string, options: {
|
|
51
|
-
query: string;
|
|
52
|
-
limit?: number | null;
|
|
53
|
-
threshold?: number | null;
|
|
54
|
-
}): Promise<SearchResultBlockItem[]>;
|
|
55
|
-
/**
|
|
56
|
-
* Perform semantic grep search for content blocks.
|
|
57
|
-
*
|
|
58
|
-
* Searches through content blocks (actual text content) using semantic similarity,
|
|
59
|
-
* similar to a semantic version of the grep command.
|
|
60
|
-
*
|
|
61
|
-
* @param spaceId - The UUID of the space
|
|
62
|
-
* @param options - Search options
|
|
63
|
-
* @returns List of SearchResultBlockItem objects matching the query
|
|
64
|
-
*/
|
|
65
|
-
semanticGrep(spaceId: string, options: {
|
|
66
|
-
query: string;
|
|
67
|
-
limit?: number | null;
|
|
68
|
-
threshold?: number | null;
|
|
69
|
-
}): Promise<SearchResultBlockItem[]>;
|
|
70
40
|
/**
|
|
71
41
|
* Get all unconfirmed experiences in a space with cursor-based pagination.
|
|
72
42
|
*
|
package/dist/resources/spaces.js
CHANGED
|
@@ -66,44 +66,6 @@ class SpacesAPI {
|
|
|
66
66
|
const data = await this.requester.request('GET', `/space/${spaceId}/experience_search`, { params: Object.keys(params).length > 0 ? params : undefined });
|
|
67
67
|
return types_1.SpaceSearchResultSchema.parse(data);
|
|
68
68
|
}
|
|
69
|
-
/**
|
|
70
|
-
* Perform semantic glob (glob) search for page/folder titles.
|
|
71
|
-
*
|
|
72
|
-
* Searches specifically for page/folder titles using semantic similarity,
|
|
73
|
-
* similar to a semantic version of the glob command.
|
|
74
|
-
*
|
|
75
|
-
* @param spaceId - The UUID of the space
|
|
76
|
-
* @param options - Search options
|
|
77
|
-
* @returns List of SearchResultBlockItem objects matching the query
|
|
78
|
-
*/
|
|
79
|
-
async semanticGlobal(spaceId, options) {
|
|
80
|
-
const params = (0, utils_1.buildParams)({
|
|
81
|
-
query: options.query,
|
|
82
|
-
limit: options.limit ?? null,
|
|
83
|
-
threshold: options.threshold ?? null,
|
|
84
|
-
});
|
|
85
|
-
const data = await this.requester.request('GET', `/space/${spaceId}/semantic_glob`, { params: Object.keys(params).length > 0 ? params : undefined });
|
|
86
|
-
return data.map((item) => types_1.SearchResultBlockItemSchema.parse(item));
|
|
87
|
-
}
|
|
88
|
-
/**
|
|
89
|
-
* Perform semantic grep search for content blocks.
|
|
90
|
-
*
|
|
91
|
-
* Searches through content blocks (actual text content) using semantic similarity,
|
|
92
|
-
* similar to a semantic version of the grep command.
|
|
93
|
-
*
|
|
94
|
-
* @param spaceId - The UUID of the space
|
|
95
|
-
* @param options - Search options
|
|
96
|
-
* @returns List of SearchResultBlockItem objects matching the query
|
|
97
|
-
*/
|
|
98
|
-
async semanticGrep(spaceId, options) {
|
|
99
|
-
const params = (0, utils_1.buildParams)({
|
|
100
|
-
query: options.query,
|
|
101
|
-
limit: options.limit ?? null,
|
|
102
|
-
threshold: options.threshold ?? null,
|
|
103
|
-
});
|
|
104
|
-
const data = await this.requester.request('GET', `/space/${spaceId}/semantic_grep`, { params: Object.keys(params).length > 0 ? params : undefined });
|
|
105
|
-
return data.map((item) => types_1.SearchResultBlockItemSchema.parse(item));
|
|
106
|
-
}
|
|
107
69
|
/**
|
|
108
70
|
* Get all unconfirmed experiences in a space with cursor-based pagination.
|
|
109
71
|
*
|
|
@@ -133,7 +95,7 @@ class SpacesAPI {
|
|
|
133
95
|
*/
|
|
134
96
|
async confirmExperience(spaceId, experienceId, options) {
|
|
135
97
|
const payload = { save: options.save };
|
|
136
|
-
const data = await this.requester.request('
|
|
98
|
+
const data = await this.requester.request('PUT', `/space/${spaceId}/experience_confirmations/${experienceId}`, { jsonData: payload });
|
|
137
99
|
if (data === null || data === undefined) {
|
|
138
100
|
return null;
|
|
139
101
|
}
|
package/dist/types/session.d.ts
CHANGED
|
@@ -55,6 +55,7 @@ export type Message = z.infer<typeof MessageSchema>;
|
|
|
55
55
|
export declare const SessionSchema: z.ZodObject<{
|
|
56
56
|
id: z.ZodString;
|
|
57
57
|
project_id: z.ZodString;
|
|
58
|
+
disable_task_tracking: z.ZodBoolean;
|
|
58
59
|
space_id: z.ZodNullable<z.ZodString>;
|
|
59
60
|
configs: z.ZodNullable<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
|
|
60
61
|
created_at: z.ZodString;
|
|
@@ -78,6 +79,7 @@ export declare const ListSessionsOutputSchema: z.ZodObject<{
|
|
|
78
79
|
items: z.ZodArray<z.ZodObject<{
|
|
79
80
|
id: z.ZodString;
|
|
80
81
|
project_id: z.ZodString;
|
|
82
|
+
disable_task_tracking: z.ZodBoolean;
|
|
81
83
|
space_id: z.ZodNullable<z.ZodString>;
|
|
82
84
|
configs: z.ZodNullable<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
|
|
83
85
|
created_at: z.ZodString;
|
|
@@ -128,3 +130,64 @@ export declare const TokenCountsSchema: z.ZodObject<{
|
|
|
128
130
|
total_tokens: z.ZodNumber;
|
|
129
131
|
}, z.core.$strip>;
|
|
130
132
|
export type TokenCounts = z.infer<typeof TokenCountsSchema>;
|
|
133
|
+
/**
|
|
134
|
+
* Parameters for the remove_tool_result edit strategy.
|
|
135
|
+
*/
|
|
136
|
+
export declare const RemoveToolResultParamsSchema: z.ZodObject<{
|
|
137
|
+
keep_recent_n_tool_results: z.ZodOptional<z.ZodNumber>;
|
|
138
|
+
tool_result_placeholder: z.ZodOptional<z.ZodString>;
|
|
139
|
+
}, z.core.$strip>;
|
|
140
|
+
export type RemoveToolResultParams = z.infer<typeof RemoveToolResultParamsSchema>;
|
|
141
|
+
/**
|
|
142
|
+
* Edit strategy to replace old tool results with placeholder text.
|
|
143
|
+
*
|
|
144
|
+
* Example: { type: 'remove_tool_result', params: { keep_recent_n_tool_results: 5, tool_result_placeholder: 'Cleared' } }
|
|
145
|
+
*/
|
|
146
|
+
export declare const RemoveToolResultStrategySchema: z.ZodObject<{
|
|
147
|
+
type: z.ZodLiteral<"remove_tool_result">;
|
|
148
|
+
params: z.ZodObject<{
|
|
149
|
+
keep_recent_n_tool_results: z.ZodOptional<z.ZodNumber>;
|
|
150
|
+
tool_result_placeholder: z.ZodOptional<z.ZodString>;
|
|
151
|
+
}, z.core.$strip>;
|
|
152
|
+
}, z.core.$strip>;
|
|
153
|
+
export type RemoveToolResultStrategy = z.infer<typeof RemoveToolResultStrategySchema>;
|
|
154
|
+
/**
|
|
155
|
+
* Parameters for the token_limit edit strategy.
|
|
156
|
+
*/
|
|
157
|
+
export declare const TokenLimitParamsSchema: z.ZodObject<{
|
|
158
|
+
limit_tokens: z.ZodNumber;
|
|
159
|
+
}, z.core.$strip>;
|
|
160
|
+
export type TokenLimitParams = z.infer<typeof TokenLimitParamsSchema>;
|
|
161
|
+
/**
|
|
162
|
+
* Edit strategy to truncate messages based on token count.
|
|
163
|
+
*
|
|
164
|
+
* Removes oldest messages until the total token count is within the specified limit.
|
|
165
|
+
* Maintains tool-call/tool-result pairing - when removing a message with tool-calls,
|
|
166
|
+
* the corresponding tool-result messages are also removed.
|
|
167
|
+
*
|
|
168
|
+
* Example: { type: 'token_limit', params: { limit_tokens: 20000 } }
|
|
169
|
+
*/
|
|
170
|
+
export declare const TokenLimitStrategySchema: z.ZodObject<{
|
|
171
|
+
type: z.ZodLiteral<"token_limit">;
|
|
172
|
+
params: z.ZodObject<{
|
|
173
|
+
limit_tokens: z.ZodNumber;
|
|
174
|
+
}, z.core.$strip>;
|
|
175
|
+
}, z.core.$strip>;
|
|
176
|
+
export type TokenLimitStrategy = z.infer<typeof TokenLimitStrategySchema>;
|
|
177
|
+
/**
|
|
178
|
+
* Union schema for all edit strategies.
|
|
179
|
+
* When adding new strategies, extend this union: z.union([RemoveToolResultStrategySchema, OtherStrategySchema, ...])
|
|
180
|
+
*/
|
|
181
|
+
export declare const EditStrategySchema: z.ZodUnion<readonly [z.ZodObject<{
|
|
182
|
+
type: z.ZodLiteral<"remove_tool_result">;
|
|
183
|
+
params: z.ZodObject<{
|
|
184
|
+
keep_recent_n_tool_results: z.ZodOptional<z.ZodNumber>;
|
|
185
|
+
tool_result_placeholder: z.ZodOptional<z.ZodString>;
|
|
186
|
+
}, z.core.$strip>;
|
|
187
|
+
}, z.core.$strip>, z.ZodObject<{
|
|
188
|
+
type: z.ZodLiteral<"token_limit">;
|
|
189
|
+
params: z.ZodObject<{
|
|
190
|
+
limit_tokens: z.ZodNumber;
|
|
191
|
+
}, z.core.$strip>;
|
|
192
|
+
}, z.core.$strip>]>;
|
|
193
|
+
export type EditStrategy = z.infer<typeof EditStrategySchema>;
|
package/dist/types/session.js
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* Type definitions for session, message, and task resources.
|
|
4
4
|
*/
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
exports.TokenCountsSchema = exports.LearningStatusSchema = exports.GetTasksOutputSchema = exports.GetMessagesOutputSchema = exports.PublicURLSchema = exports.ListSessionsOutputSchema = exports.TaskSchema = exports.SessionSchema = exports.MessageSchema = exports.PartSchema = exports.AssetSchema = void 0;
|
|
6
|
+
exports.EditStrategySchema = exports.TokenLimitStrategySchema = exports.TokenLimitParamsSchema = exports.RemoveToolResultStrategySchema = exports.RemoveToolResultParamsSchema = exports.TokenCountsSchema = exports.LearningStatusSchema = exports.GetTasksOutputSchema = exports.GetMessagesOutputSchema = exports.PublicURLSchema = exports.ListSessionsOutputSchema = exports.TaskSchema = exports.SessionSchema = exports.MessageSchema = exports.PartSchema = exports.AssetSchema = void 0;
|
|
7
7
|
const zod_1 = require("zod");
|
|
8
8
|
exports.AssetSchema = zod_1.z.object({
|
|
9
9
|
bucket: zod_1.z.string(),
|
|
@@ -35,6 +35,7 @@ exports.MessageSchema = zod_1.z.object({
|
|
|
35
35
|
exports.SessionSchema = zod_1.z.object({
|
|
36
36
|
id: zod_1.z.string(),
|
|
37
37
|
project_id: zod_1.z.string(),
|
|
38
|
+
disable_task_tracking: zod_1.z.boolean(),
|
|
38
39
|
space_id: zod_1.z.string().nullable(),
|
|
39
40
|
configs: zod_1.z.record(zod_1.z.string(), zod_1.z.unknown()).nullable(),
|
|
40
41
|
created_at: zod_1.z.string(),
|
|
@@ -79,3 +80,59 @@ exports.LearningStatusSchema = zod_1.z.object({
|
|
|
79
80
|
exports.TokenCountsSchema = zod_1.z.object({
|
|
80
81
|
total_tokens: zod_1.z.number(),
|
|
81
82
|
});
|
|
83
|
+
/**
|
|
84
|
+
* Parameters for the remove_tool_result edit strategy.
|
|
85
|
+
*/
|
|
86
|
+
exports.RemoveToolResultParamsSchema = zod_1.z.object({
|
|
87
|
+
/**
|
|
88
|
+
* Number of most recent tool results to keep with original content.
|
|
89
|
+
* @default 3
|
|
90
|
+
*/
|
|
91
|
+
keep_recent_n_tool_results: zod_1.z.number().optional(),
|
|
92
|
+
/**
|
|
93
|
+
* Custom text to replace old tool results with.
|
|
94
|
+
* @default "Done"
|
|
95
|
+
*/
|
|
96
|
+
tool_result_placeholder: zod_1.z.string().optional(),
|
|
97
|
+
});
|
|
98
|
+
/**
|
|
99
|
+
* Edit strategy to replace old tool results with placeholder text.
|
|
100
|
+
*
|
|
101
|
+
* Example: { type: 'remove_tool_result', params: { keep_recent_n_tool_results: 5, tool_result_placeholder: 'Cleared' } }
|
|
102
|
+
*/
|
|
103
|
+
exports.RemoveToolResultStrategySchema = zod_1.z.object({
|
|
104
|
+
type: zod_1.z.literal('remove_tool_result'),
|
|
105
|
+
params: exports.RemoveToolResultParamsSchema,
|
|
106
|
+
});
|
|
107
|
+
/**
|
|
108
|
+
* Parameters for the token_limit edit strategy.
|
|
109
|
+
*/
|
|
110
|
+
exports.TokenLimitParamsSchema = zod_1.z.object({
|
|
111
|
+
/**
|
|
112
|
+
* Maximum number of tokens to keep. Required parameter.
|
|
113
|
+
* Messages will be removed from oldest to newest until total tokens <= limit_tokens.
|
|
114
|
+
* Tool-call and tool-result pairs are always removed together.
|
|
115
|
+
*/
|
|
116
|
+
limit_tokens: zod_1.z.number(),
|
|
117
|
+
});
|
|
118
|
+
/**
|
|
119
|
+
* Edit strategy to truncate messages based on token count.
|
|
120
|
+
*
|
|
121
|
+
* Removes oldest messages until the total token count is within the specified limit.
|
|
122
|
+
* Maintains tool-call/tool-result pairing - when removing a message with tool-calls,
|
|
123
|
+
* the corresponding tool-result messages are also removed.
|
|
124
|
+
*
|
|
125
|
+
* Example: { type: 'token_limit', params: { limit_tokens: 20000 } }
|
|
126
|
+
*/
|
|
127
|
+
exports.TokenLimitStrategySchema = zod_1.z.object({
|
|
128
|
+
type: zod_1.z.literal('token_limit'),
|
|
129
|
+
params: exports.TokenLimitParamsSchema,
|
|
130
|
+
});
|
|
131
|
+
/**
|
|
132
|
+
* Union schema for all edit strategies.
|
|
133
|
+
* When adding new strategies, extend this union: z.union([RemoveToolResultStrategySchema, OtherStrategySchema, ...])
|
|
134
|
+
*/
|
|
135
|
+
exports.EditStrategySchema = zod_1.z.union([
|
|
136
|
+
exports.RemoveToolResultStrategySchema,
|
|
137
|
+
exports.TokenLimitStrategySchema,
|
|
138
|
+
]);
|