@augmentcode/auggie-sdk 0.1.13 → 0.1.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +94 -0
- package/dist/auggie/ai-sdk-provider.d.ts +106 -0
- package/dist/auggie/ai-sdk-provider.js +515 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +7 -1
- package/package.json +4 -3
package/README.md
CHANGED
|
@@ -4,6 +4,15 @@ A TypeScript SDK for building AI-powered developer tools with Auggie.
|
|
|
4
4
|
|
|
5
5
|
## Features
|
|
6
6
|
|
|
7
|
+
### 🎯 AI SDK Provider (Vercel AI SDK)
|
|
8
|
+
**Requires: API credentials only (no local Auggie needed)**
|
|
9
|
+
|
|
10
|
+
- Use Augment as a language model provider with Vercel's AI SDK
|
|
11
|
+
- Compatible with `generateText`, `streamText`, and other AI SDK functions
|
|
12
|
+
- Full support for tool calling (function calling) with automatic execution
|
|
13
|
+
- Multi-turn conversations with context retention
|
|
14
|
+
- Streaming responses for real-time output
|
|
15
|
+
|
|
7
16
|
### 🤖 Agent Interaction (ACP)
|
|
8
17
|
**Requires: Local Auggie installation**
|
|
9
18
|
|
|
@@ -55,6 +64,91 @@ pnpm add /path/to/auggie-sdk
|
|
|
55
64
|
|
|
56
65
|
## Usage
|
|
57
66
|
|
|
67
|
+
### AI SDK Provider (Vercel AI SDK)
|
|
68
|
+
|
|
69
|
+
**✅ No Local Auggie Required - API Only**
|
|
70
|
+
|
|
71
|
+
Use Augment as a language model provider with Vercel's AI SDK:
|
|
72
|
+
|
|
73
|
+
```typescript
|
|
74
|
+
import { AugmentLanguageModel, resolveAugmentCredentials } from "@augmentcode/auggie-sdk";
|
|
75
|
+
import { generateText } from "ai";
|
|
76
|
+
|
|
77
|
+
// Resolve credentials from environment or ~/.augment/session.json
|
|
78
|
+
const credentials = await resolveAugmentCredentials();
|
|
79
|
+
|
|
80
|
+
// Create the Augment language model
|
|
81
|
+
const model = new AugmentLanguageModel("claude-sonnet-4-5", credentials);
|
|
82
|
+
|
|
83
|
+
// Use with AI SDK functions
|
|
84
|
+
const { text } = await generateText({
|
|
85
|
+
model,
|
|
86
|
+
prompt: "Explain TypeScript in one sentence.",
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
console.log(text);
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
#### Streaming Responses
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
import { streamText } from "ai";
|
|
96
|
+
|
|
97
|
+
const { textStream } = await streamText({
|
|
98
|
+
model,
|
|
99
|
+
prompt: "Write a haiku about coding.",
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
for await (const chunk of textStream) {
|
|
103
|
+
process.stdout.write(chunk);
|
|
104
|
+
}
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
#### Tool Calling (Function Calling)
|
|
108
|
+
|
|
109
|
+
```typescript
|
|
110
|
+
import { generateText, tool } from "ai";
|
|
111
|
+
import { z } from "zod";
|
|
112
|
+
|
|
113
|
+
const weatherTool = tool({
|
|
114
|
+
description: "Get the current weather in a location",
|
|
115
|
+
parameters: z.object({
|
|
116
|
+
location: z.string(),
|
|
117
|
+
}),
|
|
118
|
+
execute: async ({ location }) => {
|
|
119
|
+
// Your implementation
|
|
120
|
+
return { temperature: 72, conditions: "sunny" };
|
|
121
|
+
},
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
const { text } = await generateText({
|
|
125
|
+
model,
|
|
126
|
+
tools: {
|
|
127
|
+
getWeather: weatherTool,
|
|
128
|
+
},
|
|
129
|
+
maxSteps: 5, // Enable multi-step agentic loop
|
|
130
|
+
prompt: "What's the weather in San Francisco?",
|
|
131
|
+
});
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
#### Multi-turn Conversations
|
|
135
|
+
|
|
136
|
+
```typescript
|
|
137
|
+
import { type CoreMessage } from "ai";
|
|
138
|
+
|
|
139
|
+
const messages: CoreMessage[] = [
|
|
140
|
+
{ role: "user", content: "What is TypeScript?" },
|
|
141
|
+
];
|
|
142
|
+
|
|
143
|
+
const response1 = await generateText({ model, messages });
|
|
144
|
+
messages.push({ role: "assistant", content: response1.text });
|
|
145
|
+
|
|
146
|
+
messages.push({ role: "user", content: "What are its benefits?" });
|
|
147
|
+
const response2 = await generateText({ model, messages });
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
**See [examples/ai-sdk-README.md](examples/ai-sdk-README.md) for more detailed examples.**
|
|
151
|
+
|
|
58
152
|
### Agent Interaction (ACP)
|
|
59
153
|
|
|
60
154
|
**⚠️ Requires Local Auggie Installation**
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import { LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2StreamPart } from '@ai-sdk/provider';
|
|
2
|
+
import { ResolvedCredentials } from '../context/internal/credentials.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Augment Language Model - AI SDK LanguageModelV2 implementation using Augment's API.
|
|
6
|
+
*
|
|
7
|
+
* This module provides an implementation of the AI SDK's LanguageModelV2 interface
|
|
8
|
+
* that uses Augment's chat API. It enables using Augment as a model provider with
|
|
9
|
+
* generateText/streamText from the Vercel AI SDK.
|
|
10
|
+
*
|
|
11
|
+
* Supports:
|
|
12
|
+
* - Text generation
|
|
13
|
+
* - Tool calling (function tools)
|
|
14
|
+
* - Multi-turn conversations with tool results
|
|
15
|
+
* - Streaming responses
|
|
16
|
+
*
|
|
17
|
+
* @module auggie/ai-sdk-provider
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
/** Configuration for the Augment language model */
|
|
21
|
+
interface AugmentLanguageModelConfig {
|
|
22
|
+
/** API key for authentication */
|
|
23
|
+
apiKey: string;
|
|
24
|
+
/** Augment API URL */
|
|
25
|
+
apiUrl: string;
|
|
26
|
+
/** Enable debug logging */
|
|
27
|
+
debug?: boolean;
|
|
28
|
+
}
|
|
29
|
+
/** Resolved credentials for Augment API (re-export for convenience) */
|
|
30
|
+
type AugmentCredentials = ResolvedCredentials;
|
|
31
|
+
/**
|
|
32
|
+
* Resolve Augment API credentials from environment variables or session file.
|
|
33
|
+
*
|
|
34
|
+
* Priority order:
|
|
35
|
+
* 1. AUGMENT_API_TOKEN and AUGMENT_API_URL environment variables
|
|
36
|
+
* 2. ~/.augment/session.json (created by `auggie login`)
|
|
37
|
+
*
|
|
38
|
+
* @returns Resolved credentials
|
|
39
|
+
* @throws Error if credentials cannot be resolved
|
|
40
|
+
*/
|
|
41
|
+
declare function resolveAugmentCredentials(): Promise<AugmentCredentials>;
|
|
42
|
+
/**
|
|
43
|
+
* Augment Language Model implementation for the AI SDK.
|
|
44
|
+
*
|
|
45
|
+
* Implements LanguageModelV2 to enable use with generateText/streamText.
|
|
46
|
+
* Supports text generation, tool calling, and multi-turn conversations.
|
|
47
|
+
*
|
|
48
|
+
* @example
|
|
49
|
+
* ```typescript
|
|
50
|
+
* import { AugmentLanguageModel, resolveAugmentCredentials } from "@augmentcode/auggie-sdk";
|
|
51
|
+
* import { generateText } from "ai";
|
|
52
|
+
*
|
|
53
|
+
* const credentials = await resolveAugmentCredentials();
|
|
54
|
+
* const model = new AugmentLanguageModel("sonnet4.5", credentials);
|
|
55
|
+
*
|
|
56
|
+
* const { text } = await generateText({
|
|
57
|
+
* model,
|
|
58
|
+
* prompt: "Hello, world!",
|
|
59
|
+
* });
|
|
60
|
+
* ```
|
|
61
|
+
*
|
|
62
|
+
* @example Tool calling with agentic loop
|
|
63
|
+
* ```typescript
|
|
64
|
+
* import { generateText, tool } from "ai";
|
|
65
|
+
* import { z } from "zod";
|
|
66
|
+
*
|
|
67
|
+
* const { text, toolCalls } = await generateText({
|
|
68
|
+
* model,
|
|
69
|
+
* tools: {
|
|
70
|
+
* search: tool({
|
|
71
|
+
* description: "Search for information",
|
|
72
|
+
* parameters: z.object({ query: z.string() }),
|
|
73
|
+
* execute: async ({ query }) => searchDatabase(query),
|
|
74
|
+
* }),
|
|
75
|
+
* },
|
|
76
|
+
* maxSteps: 10,
|
|
77
|
+
* prompt: "Find information about...",
|
|
78
|
+
* });
|
|
79
|
+
* ```
|
|
80
|
+
*/
|
|
81
|
+
declare class AugmentLanguageModel implements LanguageModelV2 {
|
|
82
|
+
readonly specificationVersion = "v2";
|
|
83
|
+
readonly provider = "augment";
|
|
84
|
+
readonly modelId: string;
|
|
85
|
+
private readonly apiKey;
|
|
86
|
+
private readonly apiUrl;
|
|
87
|
+
private readonly sessionId;
|
|
88
|
+
private readonly debug;
|
|
89
|
+
constructor(modelId: string, config: AugmentLanguageModelConfig);
|
|
90
|
+
private log;
|
|
91
|
+
private getHeaders;
|
|
92
|
+
private buildPayload;
|
|
93
|
+
doGenerate(options: LanguageModelV2CallOptions): ReturnType<LanguageModelV2["doGenerate"]>;
|
|
94
|
+
/**
|
|
95
|
+
* Parse streaming response and collect content, usage, and finish reason
|
|
96
|
+
*/
|
|
97
|
+
private parseStreamResponse;
|
|
98
|
+
doStream(options: LanguageModelV2CallOptions): PromiseLike<{
|
|
99
|
+
stream: ReadableStream<LanguageModelV2StreamPart>;
|
|
100
|
+
}>;
|
|
101
|
+
readonly supportsImageUrls = false;
|
|
102
|
+
readonly supportsStructuredOutputs = false;
|
|
103
|
+
readonly supportedUrls: {};
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
export { type AugmentCredentials, AugmentLanguageModel, type AugmentLanguageModelConfig, resolveAugmentCredentials };
|
|
@@ -0,0 +1,515 @@
|
|
|
1
|
+
import { v4 as uuidv4 } from "uuid";
|
|
2
|
+
import { resolveCredentials } from "../context/internal/credentials.js";
|
|
3
|
+
async function resolveAugmentCredentials() {
|
|
4
|
+
return resolveCredentials();
|
|
5
|
+
}
|
|
6
|
+
var ChatRequestNodeType = /* @__PURE__ */ ((ChatRequestNodeType2) => {
|
|
7
|
+
ChatRequestNodeType2[ChatRequestNodeType2["TEXT"] = 0] = "TEXT";
|
|
8
|
+
ChatRequestNodeType2[ChatRequestNodeType2["TOOL_RESULT"] = 1] = "TOOL_RESULT";
|
|
9
|
+
ChatRequestNodeType2[ChatRequestNodeType2["IMAGE"] = 2] = "IMAGE";
|
|
10
|
+
ChatRequestNodeType2[ChatRequestNodeType2["IMAGE_ID"] = 3] = "IMAGE_ID";
|
|
11
|
+
ChatRequestNodeType2[ChatRequestNodeType2["IDE_STATE"] = 4] = "IDE_STATE";
|
|
12
|
+
ChatRequestNodeType2[ChatRequestNodeType2["EDIT_EVENTS"] = 5] = "EDIT_EVENTS";
|
|
13
|
+
return ChatRequestNodeType2;
|
|
14
|
+
})(ChatRequestNodeType || {});
|
|
15
|
+
var ChatResultNodeType = /* @__PURE__ */ ((ChatResultNodeType2) => {
|
|
16
|
+
ChatResultNodeType2[ChatResultNodeType2["RAW_RESPONSE"] = 0] = "RAW_RESPONSE";
|
|
17
|
+
ChatResultNodeType2[ChatResultNodeType2["SUGGESTED_QUESTIONS"] = 1] = "SUGGESTED_QUESTIONS";
|
|
18
|
+
ChatResultNodeType2[ChatResultNodeType2["MAIN_TEXT_FINISHED"] = 2] = "MAIN_TEXT_FINISHED";
|
|
19
|
+
ChatResultNodeType2[ChatResultNodeType2["WORKSPACE_FILE_CHUNKS"] = 3] = "WORKSPACE_FILE_CHUNKS";
|
|
20
|
+
ChatResultNodeType2[ChatResultNodeType2["RELEVANT_SOURCES"] = 4] = "RELEVANT_SOURCES";
|
|
21
|
+
ChatResultNodeType2[ChatResultNodeType2["TOOL_USE"] = 5] = "TOOL_USE";
|
|
22
|
+
ChatResultNodeType2[ChatResultNodeType2["TOOL_USE_START"] = 7] = "TOOL_USE_START";
|
|
23
|
+
ChatResultNodeType2[ChatResultNodeType2["THINKING"] = 8] = "THINKING";
|
|
24
|
+
ChatResultNodeType2[ChatResultNodeType2["BILLING_METADATA"] = 9] = "BILLING_METADATA";
|
|
25
|
+
ChatResultNodeType2[ChatResultNodeType2["TOKEN_USAGE"] = 10] = "TOKEN_USAGE";
|
|
26
|
+
return ChatResultNodeType2;
|
|
27
|
+
})(ChatResultNodeType || {});
|
|
28
|
+
var ChatStopReason = /* @__PURE__ */ ((ChatStopReason2) => {
|
|
29
|
+
ChatStopReason2[ChatStopReason2["REASON_UNSPECIFIED"] = 0] = "REASON_UNSPECIFIED";
|
|
30
|
+
ChatStopReason2[ChatStopReason2["END_TURN"] = 1] = "END_TURN";
|
|
31
|
+
ChatStopReason2[ChatStopReason2["MAX_TOKENS"] = 2] = "MAX_TOKENS";
|
|
32
|
+
ChatStopReason2[ChatStopReason2["TOOL_USE_REQUESTED"] = 3] = "TOOL_USE_REQUESTED";
|
|
33
|
+
ChatStopReason2[ChatStopReason2["SAFETY"] = 4] = "SAFETY";
|
|
34
|
+
ChatStopReason2[ChatStopReason2["RECITATION"] = 5] = "RECITATION";
|
|
35
|
+
ChatStopReason2[ChatStopReason2["MALFORMED_FUNCTION_CALL"] = 6] = "MALFORMED_FUNCTION_CALL";
|
|
36
|
+
return ChatStopReason2;
|
|
37
|
+
})(ChatStopReason || {});
|
|
38
|
+
function extractText(content) {
|
|
39
|
+
if (typeof content === "string") {
|
|
40
|
+
return content;
|
|
41
|
+
}
|
|
42
|
+
if (Array.isArray(content)) {
|
|
43
|
+
return content.filter((p) => typeof p === "object" && p !== null && p.type === "text").map((p) => p.text).join("");
|
|
44
|
+
}
|
|
45
|
+
return "";
|
|
46
|
+
}
|
|
47
|
+
function toolsToDefinitions(tools) {
|
|
48
|
+
if (!tools) return [];
|
|
49
|
+
return tools.filter((t) => t.type === "function").map((t) => ({
|
|
50
|
+
name: t.name,
|
|
51
|
+
description: t.description ?? "",
|
|
52
|
+
input_schema_json: JSON.stringify(t.inputSchema)
|
|
53
|
+
}));
|
|
54
|
+
}
|
|
55
|
+
function userMessageToNodes(msg, startId) {
|
|
56
|
+
const nodes = [];
|
|
57
|
+
let text = "";
|
|
58
|
+
let id = startId;
|
|
59
|
+
if (msg.role !== "user") return { nodes, text };
|
|
60
|
+
for (const part of msg.content) {
|
|
61
|
+
if (part.type === "text") {
|
|
62
|
+
nodes.push({
|
|
63
|
+
id: id++,
|
|
64
|
+
type: 0 /* TEXT */,
|
|
65
|
+
text_node: { content: part.text }
|
|
66
|
+
});
|
|
67
|
+
text += part.text;
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
return { nodes, text };
|
|
71
|
+
}
|
|
72
|
+
function toolMessageToNodes(msg, startId) {
|
|
73
|
+
const nodes = [];
|
|
74
|
+
let id = startId;
|
|
75
|
+
if (msg.role !== "tool") return nodes;
|
|
76
|
+
for (const part of msg.content) {
|
|
77
|
+
if (part.type === "tool-result") {
|
|
78
|
+
let content = "";
|
|
79
|
+
let isError = false;
|
|
80
|
+
const output = part.output;
|
|
81
|
+
if (output.type === "text") {
|
|
82
|
+
content = output.value;
|
|
83
|
+
} else if (output.type === "json") {
|
|
84
|
+
content = JSON.stringify(output.value);
|
|
85
|
+
} else if (output.type === "error-text") {
|
|
86
|
+
content = output.value;
|
|
87
|
+
isError = true;
|
|
88
|
+
} else if (output.type === "error-json") {
|
|
89
|
+
content = JSON.stringify(output.value);
|
|
90
|
+
isError = true;
|
|
91
|
+
} else if (output.type === "content") {
|
|
92
|
+
content = output.value.filter((v) => v.type === "text").map((v) => v.text).join("\n");
|
|
93
|
+
}
|
|
94
|
+
nodes.push({
|
|
95
|
+
id: id++,
|
|
96
|
+
type: 1 /* TOOL_RESULT */,
|
|
97
|
+
tool_result_node: {
|
|
98
|
+
tool_use_id: part.toolCallId,
|
|
99
|
+
content,
|
|
100
|
+
is_error: isError
|
|
101
|
+
}
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
return nodes;
|
|
106
|
+
}
|
|
107
|
+
function assistantMessageToResponseNodes(msg) {
|
|
108
|
+
const nodes = [];
|
|
109
|
+
let text = "";
|
|
110
|
+
let id = 0;
|
|
111
|
+
if (msg.role !== "assistant") return { nodes, text };
|
|
112
|
+
for (const part of msg.content) {
|
|
113
|
+
if (part.type === "text") {
|
|
114
|
+
text += part.text;
|
|
115
|
+
nodes.push({
|
|
116
|
+
id: id++,
|
|
117
|
+
type: 0 /* RAW_RESPONSE */,
|
|
118
|
+
content: part.text
|
|
119
|
+
});
|
|
120
|
+
} else if (part.type === "tool-call") {
|
|
121
|
+
nodes.push({
|
|
122
|
+
id: id++,
|
|
123
|
+
type: 5 /* TOOL_USE */,
|
|
124
|
+
tool_use: {
|
|
125
|
+
tool_use_id: part.toolCallId,
|
|
126
|
+
tool_name: part.toolName,
|
|
127
|
+
input_json: typeof part.input === "string" ? part.input : JSON.stringify(part.input)
|
|
128
|
+
}
|
|
129
|
+
});
|
|
130
|
+
} else if (part.type === "reasoning") {
|
|
131
|
+
nodes.push({
|
|
132
|
+
id: id++,
|
|
133
|
+
type: 8 /* THINKING */,
|
|
134
|
+
thinking: { content: part.text }
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
return { nodes, text };
|
|
139
|
+
}
|
|
140
|
+
function buildChatRequest(prompt, tools) {
|
|
141
|
+
const chatHistory = [];
|
|
142
|
+
let pendingRequestNodes = [];
|
|
143
|
+
let pendingRequestText = "";
|
|
144
|
+
let nodeId = 0;
|
|
145
|
+
for (const msg of prompt) {
|
|
146
|
+
if (msg.role === "system") {
|
|
147
|
+
const systemText = extractText(msg.content);
|
|
148
|
+
if (systemText) {
|
|
149
|
+
pendingRequestNodes.push({
|
|
150
|
+
id: nodeId++,
|
|
151
|
+
type: 0 /* TEXT */,
|
|
152
|
+
text_node: { content: `System: ${systemText}` }
|
|
153
|
+
});
|
|
154
|
+
pendingRequestText += `System: ${systemText}
|
|
155
|
+
|
|
156
|
+
`;
|
|
157
|
+
}
|
|
158
|
+
} else if (msg.role === "user") {
|
|
159
|
+
const { nodes, text } = userMessageToNodes(msg, nodeId);
|
|
160
|
+
pendingRequestNodes.push(...nodes);
|
|
161
|
+
nodeId += nodes.length;
|
|
162
|
+
if (pendingRequestText && text) {
|
|
163
|
+
pendingRequestText += "\n" + text;
|
|
164
|
+
} else {
|
|
165
|
+
pendingRequestText += text;
|
|
166
|
+
}
|
|
167
|
+
} else if (msg.role === "tool") {
|
|
168
|
+
const nodes = toolMessageToNodes(msg, nodeId);
|
|
169
|
+
pendingRequestNodes.push(...nodes);
|
|
170
|
+
nodeId += nodes.length;
|
|
171
|
+
} else if (msg.role === "assistant") {
|
|
172
|
+
const { nodes: responseNodes, text: responseText } = assistantMessageToResponseNodes(msg);
|
|
173
|
+
chatHistory.push({
|
|
174
|
+
request_message: pendingRequestText,
|
|
175
|
+
request_nodes: pendingRequestNodes,
|
|
176
|
+
response_text: responseText,
|
|
177
|
+
response_nodes: responseNodes
|
|
178
|
+
});
|
|
179
|
+
pendingRequestNodes = [];
|
|
180
|
+
pendingRequestText = "";
|
|
181
|
+
nodeId = 0;
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
pendingRequestNodes.forEach((node, i) => {
|
|
185
|
+
node.id = i;
|
|
186
|
+
});
|
|
187
|
+
return {
|
|
188
|
+
message: pendingRequestText,
|
|
189
|
+
nodes: pendingRequestNodes,
|
|
190
|
+
chatHistory,
|
|
191
|
+
toolDefinitions: toolsToDefinitions(tools)
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
function stopReasonToFinishReason(stopReason) {
|
|
195
|
+
switch (stopReason) {
|
|
196
|
+
case 1 /* END_TURN */:
|
|
197
|
+
return "stop";
|
|
198
|
+
case 2 /* MAX_TOKENS */:
|
|
199
|
+
return "length";
|
|
200
|
+
case 3 /* TOOL_USE_REQUESTED */:
|
|
201
|
+
return "tool-calls";
|
|
202
|
+
case 4 /* SAFETY */:
|
|
203
|
+
case 5 /* RECITATION */:
|
|
204
|
+
return "content-filter";
|
|
205
|
+
case 6 /* MALFORMED_FUNCTION_CALL */:
|
|
206
|
+
return "error";
|
|
207
|
+
default:
|
|
208
|
+
return "unknown";
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
function responseNodeToContent(node) {
|
|
212
|
+
if (node.type === 5 /* TOOL_USE */ && node.tool_use) {
|
|
213
|
+
return {
|
|
214
|
+
type: "tool-call",
|
|
215
|
+
toolCallId: node.tool_use.tool_use_id,
|
|
216
|
+
toolName: node.tool_use.tool_name,
|
|
217
|
+
input: node.tool_use.input_json || "{}"
|
|
218
|
+
};
|
|
219
|
+
}
|
|
220
|
+
if (node.type === 8 /* THINKING */ && node.thinking) {
|
|
221
|
+
const text = node.thinking.content || node.thinking.summary || "";
|
|
222
|
+
if (text) {
|
|
223
|
+
return { type: "reasoning", text };
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
return null;
|
|
227
|
+
}
|
|
228
|
+
class AugmentLanguageModel {
|
|
229
|
+
specificationVersion = "v2";
|
|
230
|
+
provider = "augment";
|
|
231
|
+
modelId;
|
|
232
|
+
apiKey;
|
|
233
|
+
apiUrl;
|
|
234
|
+
sessionId;
|
|
235
|
+
debug;
|
|
236
|
+
constructor(modelId, config) {
|
|
237
|
+
this.modelId = modelId;
|
|
238
|
+
this.apiKey = config.apiKey;
|
|
239
|
+
this.apiUrl = config.apiUrl.endsWith("/") ? config.apiUrl.slice(0, -1) : config.apiUrl;
|
|
240
|
+
this.sessionId = uuidv4();
|
|
241
|
+
this.debug = config.debug ?? false;
|
|
242
|
+
}
|
|
243
|
+
log(message) {
|
|
244
|
+
if (this.debug) {
|
|
245
|
+
console.log(`[Augment] ${message}`);
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
getHeaders(requestId) {
|
|
249
|
+
return {
|
|
250
|
+
"Content-Type": "application/json",
|
|
251
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
252
|
+
"X-Request-Session-Id": this.sessionId,
|
|
253
|
+
"X-Request-Id": requestId,
|
|
254
|
+
"conversation-id": this.sessionId,
|
|
255
|
+
"X-Mode": "sdk",
|
|
256
|
+
"User-Agent": "auggie-sdk/1.0.0 (typescript)"
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
buildPayload(options) {
|
|
260
|
+
const { message, nodes, chatHistory, toolDefinitions } = buildChatRequest(
|
|
261
|
+
options.prompt,
|
|
262
|
+
options.tools
|
|
263
|
+
);
|
|
264
|
+
return {
|
|
265
|
+
mode: "CLI_AGENT",
|
|
266
|
+
model: this.modelId,
|
|
267
|
+
message,
|
|
268
|
+
nodes,
|
|
269
|
+
chat_history: chatHistory,
|
|
270
|
+
conversation_id: this.sessionId,
|
|
271
|
+
tool_definitions: toolDefinitions.length > 0 ? toolDefinitions : void 0
|
|
272
|
+
};
|
|
273
|
+
}
|
|
274
|
+
doGenerate(options) {
|
|
275
|
+
const run = async () => {
|
|
276
|
+
const requestId = uuidv4();
|
|
277
|
+
const url = `${this.apiUrl}/chat-stream`;
|
|
278
|
+
const payload = this.buildPayload(options);
|
|
279
|
+
this.log(`POST ${url}`);
|
|
280
|
+
if (this.debug && payload.tool_definitions) {
|
|
281
|
+
this.log(`Tools: ${payload.tool_definitions.map((t) => t.name).join(", ")}`);
|
|
282
|
+
}
|
|
283
|
+
const response = await fetch(url, {
|
|
284
|
+
method: "POST",
|
|
285
|
+
headers: this.getHeaders(requestId),
|
|
286
|
+
body: JSON.stringify(payload),
|
|
287
|
+
signal: options.abortSignal
|
|
288
|
+
});
|
|
289
|
+
if (!response.ok) {
|
|
290
|
+
const errorText = await response.text();
|
|
291
|
+
throw new Error(`Augment API error: ${response.status} ${response.statusText} - ${errorText}`);
|
|
292
|
+
}
|
|
293
|
+
if (!response.body) {
|
|
294
|
+
throw new Error("Response body is null");
|
|
295
|
+
}
|
|
296
|
+
const { content, usage, finishReason } = await this.parseStreamResponse(response.body);
|
|
297
|
+
return {
|
|
298
|
+
content,
|
|
299
|
+
finishReason,
|
|
300
|
+
usage,
|
|
301
|
+
warnings: []
|
|
302
|
+
};
|
|
303
|
+
};
|
|
304
|
+
return run();
|
|
305
|
+
}
|
|
306
|
+
/**
|
|
307
|
+
* Parse streaming response and collect content, usage, and finish reason
|
|
308
|
+
*/
|
|
309
|
+
async parseStreamResponse(body) {
|
|
310
|
+
const reader = body.getReader();
|
|
311
|
+
const decoder = new TextDecoder();
|
|
312
|
+
let textBuffer = "";
|
|
313
|
+
let accumulatedText = "";
|
|
314
|
+
const content = [];
|
|
315
|
+
let usage = { inputTokens: void 0, outputTokens: void 0, totalTokens: void 0 };
|
|
316
|
+
let stopReason;
|
|
317
|
+
const toolCallsEmitted = /* @__PURE__ */ new Set();
|
|
318
|
+
try {
|
|
319
|
+
while (true) {
|
|
320
|
+
const { done, value } = await reader.read();
|
|
321
|
+
if (done) break;
|
|
322
|
+
textBuffer += decoder.decode(value, { stream: true });
|
|
323
|
+
while (textBuffer.includes("\n")) {
|
|
324
|
+
const newLineIndex = textBuffer.indexOf("\n");
|
|
325
|
+
const line = textBuffer.substring(0, newLineIndex);
|
|
326
|
+
textBuffer = textBuffer.substring(newLineIndex + 1);
|
|
327
|
+
const trimmed = line.trim();
|
|
328
|
+
if (trimmed) {
|
|
329
|
+
try {
|
|
330
|
+
const chunk = JSON.parse(trimmed);
|
|
331
|
+
if (chunk.text) {
|
|
332
|
+
accumulatedText += chunk.text;
|
|
333
|
+
}
|
|
334
|
+
if (chunk.nodes) {
|
|
335
|
+
for (const node of chunk.nodes) {
|
|
336
|
+
if (node.type === 5 /* TOOL_USE */ && node.tool_use) {
|
|
337
|
+
if (!toolCallsEmitted.has(node.tool_use.tool_use_id)) {
|
|
338
|
+
toolCallsEmitted.add(node.tool_use.tool_use_id);
|
|
339
|
+
const toolContent = responseNodeToContent(node);
|
|
340
|
+
if (toolContent) {
|
|
341
|
+
content.push(toolContent);
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
} else if (node.type === 8 /* THINKING */) {
|
|
345
|
+
const thinkingContent = responseNodeToContent(node);
|
|
346
|
+
if (thinkingContent) {
|
|
347
|
+
content.push(thinkingContent);
|
|
348
|
+
}
|
|
349
|
+
} else if (node.type === 10 /* TOKEN_USAGE */ && node.token_usage) {
|
|
350
|
+
usage = {
|
|
351
|
+
inputTokens: node.token_usage.input_tokens,
|
|
352
|
+
outputTokens: node.token_usage.output_tokens,
|
|
353
|
+
totalTokens: (node.token_usage.input_tokens ?? 0) + (node.token_usage.output_tokens ?? 0) || void 0
|
|
354
|
+
};
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
if (chunk.stop_reason !== void 0) {
|
|
359
|
+
stopReason = chunk.stop_reason;
|
|
360
|
+
}
|
|
361
|
+
} catch {
|
|
362
|
+
if (this.debug) this.log(`JSON parse failed for line: ${trimmed}`);
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
const finalChunk = decoder.decode();
|
|
368
|
+
if (finalChunk) textBuffer += finalChunk;
|
|
369
|
+
if (textBuffer.trim()) {
|
|
370
|
+
try {
|
|
371
|
+
const chunk = JSON.parse(textBuffer.trim());
|
|
372
|
+
if (chunk.text) accumulatedText += chunk.text;
|
|
373
|
+
if (chunk.stop_reason !== void 0) stopReason = chunk.stop_reason;
|
|
374
|
+
} catch {
|
|
375
|
+
if (this.debug) this.log(`JSON parse failed for remaining: ${textBuffer.trim()}`);
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
} finally {
|
|
379
|
+
reader.releaseLock();
|
|
380
|
+
}
|
|
381
|
+
if (accumulatedText) {
|
|
382
|
+
content.unshift({ type: "text", text: accumulatedText });
|
|
383
|
+
}
|
|
384
|
+
return {
|
|
385
|
+
content,
|
|
386
|
+
usage,
|
|
387
|
+
finishReason: stopReasonToFinishReason(stopReason)
|
|
388
|
+
};
|
|
389
|
+
}
|
|
390
|
+
doStream(options) {
|
|
391
|
+
const requestId = uuidv4();
|
|
392
|
+
const url = `${this.apiUrl}/chat-stream`;
|
|
393
|
+
const payload = this.buildPayload(options);
|
|
394
|
+
this.log(`POST ${url} (streaming)`);
|
|
395
|
+
const stream = new ReadableStream({
|
|
396
|
+
start: async (controller) => {
|
|
397
|
+
try {
|
|
398
|
+
const response = await fetch(url, {
|
|
399
|
+
method: "POST",
|
|
400
|
+
headers: this.getHeaders(requestId),
|
|
401
|
+
body: JSON.stringify(payload),
|
|
402
|
+
signal: options.abortSignal
|
|
403
|
+
});
|
|
404
|
+
if (!response.ok) {
|
|
405
|
+
const errorText = await response.text();
|
|
406
|
+
throw new Error(`Augment API error: ${response.status} ${response.statusText} - ${errorText}`);
|
|
407
|
+
}
|
|
408
|
+
if (!response.body) {
|
|
409
|
+
throw new Error("Response body is null");
|
|
410
|
+
}
|
|
411
|
+
const reader = response.body.getReader();
|
|
412
|
+
const decoder = new TextDecoder();
|
|
413
|
+
let textBuffer = "";
|
|
414
|
+
let textStarted = false;
|
|
415
|
+
const textId = uuidv4();
|
|
416
|
+
let stopReason;
|
|
417
|
+
const toolCallsEmitted = /* @__PURE__ */ new Set();
|
|
418
|
+
let usage = { inputTokens: void 0, outputTokens: void 0, totalTokens: void 0 };
|
|
419
|
+
while (true) {
|
|
420
|
+
const { done, value } = await reader.read();
|
|
421
|
+
if (done) break;
|
|
422
|
+
textBuffer += decoder.decode(value, { stream: true });
|
|
423
|
+
while (textBuffer.includes("\n")) {
|
|
424
|
+
const newLineIndex = textBuffer.indexOf("\n");
|
|
425
|
+
const line = textBuffer.substring(0, newLineIndex);
|
|
426
|
+
textBuffer = textBuffer.substring(newLineIndex + 1);
|
|
427
|
+
const trimmed = line.trim();
|
|
428
|
+
if (trimmed) {
|
|
429
|
+
try {
|
|
430
|
+
const chunk = JSON.parse(trimmed);
|
|
431
|
+
if (chunk.text) {
|
|
432
|
+
if (!textStarted) {
|
|
433
|
+
controller.enqueue({ type: "text-start", id: textId });
|
|
434
|
+
textStarted = true;
|
|
435
|
+
}
|
|
436
|
+
controller.enqueue({ type: "text-delta", id: textId, delta: chunk.text });
|
|
437
|
+
}
|
|
438
|
+
if (chunk.nodes) {
|
|
439
|
+
for (const node of chunk.nodes) {
|
|
440
|
+
if (node.type === 5 /* TOOL_USE */ && node.tool_use) {
|
|
441
|
+
if (!toolCallsEmitted.has(node.tool_use.tool_use_id)) {
|
|
442
|
+
toolCallsEmitted.add(node.tool_use.tool_use_id);
|
|
443
|
+
const inputJson = node.tool_use.input_json || "{}";
|
|
444
|
+
controller.enqueue({
|
|
445
|
+
type: "tool-input-start",
|
|
446
|
+
id: node.tool_use.tool_use_id,
|
|
447
|
+
toolName: node.tool_use.tool_name
|
|
448
|
+
});
|
|
449
|
+
controller.enqueue({
|
|
450
|
+
type: "tool-input-delta",
|
|
451
|
+
id: node.tool_use.tool_use_id,
|
|
452
|
+
delta: inputJson
|
|
453
|
+
});
|
|
454
|
+
controller.enqueue({
|
|
455
|
+
type: "tool-input-end",
|
|
456
|
+
id: node.tool_use.tool_use_id
|
|
457
|
+
});
|
|
458
|
+
controller.enqueue({
|
|
459
|
+
type: "tool-call",
|
|
460
|
+
toolCallId: node.tool_use.tool_use_id,
|
|
461
|
+
toolName: node.tool_use.tool_name,
|
|
462
|
+
input: inputJson
|
|
463
|
+
});
|
|
464
|
+
}
|
|
465
|
+
} else if (node.type === 8 /* THINKING */ && node.thinking) {
|
|
466
|
+
const text = node.thinking.content || node.thinking.summary || "";
|
|
467
|
+
if (text) {
|
|
468
|
+
const reasoningId = uuidv4();
|
|
469
|
+
controller.enqueue({ type: "reasoning-start", id: reasoningId });
|
|
470
|
+
controller.enqueue({ type: "reasoning-delta", id: reasoningId, delta: text });
|
|
471
|
+
controller.enqueue({ type: "reasoning-end", id: reasoningId });
|
|
472
|
+
}
|
|
473
|
+
} else if (node.type === 10 /* TOKEN_USAGE */ && node.token_usage) {
|
|
474
|
+
usage = {
|
|
475
|
+
inputTokens: node.token_usage.input_tokens,
|
|
476
|
+
outputTokens: node.token_usage.output_tokens,
|
|
477
|
+
totalTokens: (node.token_usage.input_tokens ?? 0) + (node.token_usage.output_tokens ?? 0) || void 0
|
|
478
|
+
};
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
if (chunk.stop_reason !== void 0) {
|
|
483
|
+
stopReason = chunk.stop_reason;
|
|
484
|
+
}
|
|
485
|
+
} catch {
|
|
486
|
+
if (this.debug) this.log(`JSON parse failed: ${trimmed}`);
|
|
487
|
+
}
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
if (textStarted) {
|
|
492
|
+
controller.enqueue({ type: "text-end", id: textId });
|
|
493
|
+
}
|
|
494
|
+
controller.enqueue({
|
|
495
|
+
type: "finish",
|
|
496
|
+
usage,
|
|
497
|
+
finishReason: stopReasonToFinishReason(stopReason)
|
|
498
|
+
});
|
|
499
|
+
controller.close();
|
|
500
|
+
} catch (error) {
|
|
501
|
+
controller.enqueue({ type: "error", error });
|
|
502
|
+
controller.error(error);
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
});
|
|
506
|
+
return Promise.resolve({ stream });
|
|
507
|
+
}
|
|
508
|
+
supportsImageUrls = false;
|
|
509
|
+
supportsStructuredOutputs = false;
|
|
510
|
+
supportedUrls = {};
|
|
511
|
+
}
|
|
512
|
+
export {
|
|
513
|
+
AugmentLanguageModel,
|
|
514
|
+
resolveAugmentCredentials
|
|
515
|
+
};
|
package/dist/index.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
export { Auggie, PredefinedToolType, ToolIdentifier } from './auggie/sdk-acp-client.js';
|
|
2
|
+
export { AugmentCredentials, AugmentLanguageModel, AugmentLanguageModelConfig, resolveAugmentCredentials } from './auggie/ai-sdk-provider.js';
|
|
2
3
|
export { DirectContext } from './context/direct-context.js';
|
|
3
4
|
export { FileSystemContext } from './context/filesystem-context.js';
|
|
4
5
|
export { APIError } from './context/internal/api-client.js';
|
|
@@ -6,3 +7,5 @@ export { BlobTooLargeError } from './context/internal/blob-name-calculator.js';
|
|
|
6
7
|
export { AddToIndexOptions, BlobEntry, BlobInfo, Blobs, DirectContextOptions, DirectContextState, ExportOptions, File, FileSystemContextOptions, FullContextState, IndexingProgress, IndexingResult, SearchOnlyContextState } from './context/types.js';
|
|
7
8
|
import '@agentclientprotocol/sdk';
|
|
8
9
|
import 'ai';
|
|
10
|
+
import '@ai-sdk/provider';
|
|
11
|
+
import './context/internal/credentials.js';
|
package/dist/index.js
CHANGED
|
@@ -1,6 +1,10 @@
|
|
|
1
1
|
import {
|
|
2
2
|
Auggie
|
|
3
3
|
} from "./auggie/sdk-acp-client.js";
|
|
4
|
+
import {
|
|
5
|
+
AugmentLanguageModel,
|
|
6
|
+
resolveAugmentCredentials
|
|
7
|
+
} from "./auggie/ai-sdk-provider.js";
|
|
4
8
|
import { DirectContext } from "./context/direct-context.js";
|
|
5
9
|
import { FileSystemContext } from "./context/filesystem-context.js";
|
|
6
10
|
import { APIError } from "./context/internal/api-client.js";
|
|
@@ -8,7 +12,9 @@ import { BlobTooLargeError } from "./context/internal/blob-name-calculator.js";
|
|
|
8
12
|
export {
|
|
9
13
|
APIError,
|
|
10
14
|
Auggie,
|
|
15
|
+
AugmentLanguageModel,
|
|
11
16
|
BlobTooLargeError,
|
|
12
17
|
DirectContext,
|
|
13
|
-
FileSystemContext
|
|
18
|
+
FileSystemContext,
|
|
19
|
+
resolveAugmentCredentials
|
|
14
20
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@augmentcode/auggie-sdk",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.14",
|
|
4
4
|
"description": "TypeScript SDK for Auggie",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -35,10 +35,11 @@
|
|
|
35
35
|
},
|
|
36
36
|
"dependencies": {
|
|
37
37
|
"@agentclientprotocol/sdk": "^0.5.1",
|
|
38
|
+
"@ai-sdk/provider": "^3.0.3",
|
|
38
39
|
"@mastra/mcp": "^0.14.1",
|
|
39
40
|
"ai": "^5.0.86",
|
|
40
41
|
"async-mutex": "^0.5.0",
|
|
41
|
-
"
|
|
42
|
-
"
|
|
42
|
+
"uuid": "^11.1.0",
|
|
43
|
+
"zod": "^4.1.12"
|
|
43
44
|
}
|
|
44
45
|
}
|