@lowire/loop 0.0.21 → 0.0.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/cache.js +2 -1
- package/lib/loop.d.ts +2 -1
- package/lib/loop.js +12 -5
- package/lib/providers/anthropic.js +12 -0
- package/lib/providers/google.js +21 -2
- package/lib/providers/openai.js +32 -10
- package/lib/providers/openaiCompatible.js +23 -8
- package/lib/types.d.ts +4 -0
- package/package.json +1 -1
- package/githubAuth.js +0 -17
package/lib/cache.js
CHANGED
|
@@ -28,7 +28,8 @@ async function cachedComplete(provider, conversation, caches, options) {
|
|
|
28
28
|
async function cachedCompleteNoSecrets(provider, conversation, caches, options) {
|
|
29
29
|
if (!caches)
|
|
30
30
|
return await provider.complete(conversation, options);
|
|
31
|
-
const
|
|
31
|
+
const { maxTokens, reasoning, temperature } = options;
|
|
32
|
+
const key = calculateSha1(JSON.stringify({ conversation, maxTokens, reasoning, temperature }));
|
|
32
33
|
if (!process.env.LOWIRE_NO_CACHE && caches.input[key]) {
|
|
33
34
|
caches.output[key] = caches.input[key];
|
|
34
35
|
return caches.input[key] ?? caches.output[key];
|
package/lib/loop.d.ts
CHANGED
package/lib/loop.js
CHANGED
|
@@ -49,7 +49,7 @@ class Loop {
|
|
|
49
49
|
const maxTurns = options.maxTurns || 100;
|
|
50
50
|
for (let turns = 0; turns < maxTurns; ++turns) {
|
|
51
51
|
if (options.maxTokens && budget.tokens !== undefined && budget.tokens <= 0)
|
|
52
|
-
|
|
52
|
+
return { status: 'error', error: `Budget tokens ${options.maxTokens} exhausted`, usage: totalUsage, turns };
|
|
53
53
|
debug?.('lowire:loop')(`Turn ${turns + 1} of (max ${maxTurns})`);
|
|
54
54
|
const caches = options.cache ? {
|
|
55
55
|
input: options.cache,
|
|
@@ -60,11 +60,18 @@ class Loop {
|
|
|
60
60
|
if (abortController?.signal.aborted)
|
|
61
61
|
return { status: 'break', usage: totalUsage, turns };
|
|
62
62
|
debug?.('lowire:loop')(`Request`, JSON.stringify({ ...summarizedConversation, tools: `${summarizedConversation.tools.length} tools` }, null, 2));
|
|
63
|
+
const tokenEstimate = Math.floor(JSON.stringify(summarizedConversation).length / 4);
|
|
64
|
+
if (budget.tokens !== undefined && tokenEstimate >= budget.tokens)
|
|
65
|
+
return { status: 'error', error: `Input token estimate ${tokenEstimate} exceeds budget ${budget.tokens}`, usage: totalUsage, turns };
|
|
63
66
|
const { result: assistantMessage, usage } = await (0, cache_1.cachedComplete)(this._provider, summarizedConversation, caches, {
|
|
64
67
|
...options,
|
|
65
|
-
maxTokens: budget.tokens,
|
|
68
|
+
maxTokens: budget.tokens !== undefined ? budget.tokens - tokenEstimate : undefined,
|
|
66
69
|
signal: abortController?.signal,
|
|
67
70
|
});
|
|
71
|
+
if (assistantMessage.stopReason.code === 'max_tokens')
|
|
72
|
+
return { status: 'error', error: `Max tokens exhausted`, usage: totalUsage, turns };
|
|
73
|
+
if (assistantMessage.stopReason.code === 'other')
|
|
74
|
+
return { status: 'error', error: assistantMessage.stopReason.message, usage: totalUsage, turns };
|
|
68
75
|
const intent = assistantMessage.content.filter(part => part.type === 'text').map(part => part.text).join('\n');
|
|
69
76
|
totalUsage.input += usage.input;
|
|
70
77
|
totalUsage.output += usage.output;
|
|
@@ -83,7 +90,7 @@ class Loop {
|
|
|
83
90
|
}
|
|
84
91
|
for (const toolCall of toolCalls) {
|
|
85
92
|
if (budget.toolCalls !== undefined && --budget.toolCalls < 0)
|
|
86
|
-
|
|
93
|
+
return { status: 'error', error: `Failed to perform step, max tool calls (${options.maxToolCalls}) reached`, usage: totalUsage, turns };
|
|
87
94
|
const { name, arguments: args } = toolCall;
|
|
88
95
|
debug?.('lowire:loop')('Call tool', name, JSON.stringify(args, null, 2));
|
|
89
96
|
const status = await options.onBeforeToolCall?.({ assistantMessage, toolCall });
|
|
@@ -139,9 +146,9 @@ class Loop {
|
|
|
139
146
|
if (!hasErrors)
|
|
140
147
|
budget.toolCallRetries = options.maxToolCallRetries;
|
|
141
148
|
if (hasErrors && budget.toolCallRetries !== undefined && --budget.toolCallRetries < 0)
|
|
142
|
-
|
|
149
|
+
return { status: 'error', error: `Failed to perform action after ${options.maxToolCallRetries} tool call retries`, usage: totalUsage, turns };
|
|
143
150
|
}
|
|
144
|
-
|
|
151
|
+
return { status: 'error', error: `Failed to perform step, max attempts reached`, usage: totalUsage, turns: maxTurns };
|
|
145
152
|
}
|
|
146
153
|
_summarizeConversation(task, conversation, options) {
|
|
147
154
|
const { summary, lastMessage } = (0, summary_1.summarizeConversation)(task, conversation, options);
|
|
@@ -109,9 +109,21 @@ function toAnthropicResultParam(part) {
|
|
|
109
109
|
throw new Error(`Unsupported content part type: ${part.type}`);
|
|
110
110
|
}
|
|
111
111
|
function toAssistantMessage(message) {
|
|
112
|
+
const stopReason = { code: 'ok' };
|
|
113
|
+
if (message.stop_reason === 'max_tokens') {
|
|
114
|
+
stopReason.code = 'max_tokens';
|
|
115
|
+
}
|
|
116
|
+
else if (message.stop_reason === 'tool_use') {
|
|
117
|
+
stopReason.code = 'ok';
|
|
118
|
+
}
|
|
119
|
+
else {
|
|
120
|
+
stopReason.code = 'other';
|
|
121
|
+
stopReason.message = `Unexpected stop reason: ${message.stop_reason}`;
|
|
122
|
+
}
|
|
112
123
|
return {
|
|
113
124
|
role: 'assistant',
|
|
114
125
|
content: message.content.map(toContentPart).filter(Boolean),
|
|
126
|
+
stopReason,
|
|
115
127
|
};
|
|
116
128
|
}
|
|
117
129
|
function toAnthropicTool(tool) {
|
package/lib/providers/google.js
CHANGED
|
@@ -30,7 +30,10 @@ class Google {
|
|
|
30
30
|
},
|
|
31
31
|
contents,
|
|
32
32
|
tools: conversation.tools.length > 0 ? [{ functionDeclarations: conversation.tools.map(toGeminiTool) }] : undefined,
|
|
33
|
-
generationConfig: {
|
|
33
|
+
generationConfig: {
|
|
34
|
+
temperature: options.temperature,
|
|
35
|
+
maxOutputTokens: options.maxTokens
|
|
36
|
+
},
|
|
34
37
|
}, options);
|
|
35
38
|
const [candidate] = response.candidates ?? [];
|
|
36
39
|
if (!candidate)
|
|
@@ -85,9 +88,25 @@ function stripUnsupportedSchemaFields(schema) {
|
|
|
85
88
|
return cleaned;
|
|
86
89
|
}
|
|
87
90
|
function toAssistantMessage(candidate) {
|
|
91
|
+
const stopReason = { code: 'ok' };
|
|
92
|
+
const finishReason = candidate.finishReason;
|
|
93
|
+
if (finishReason === 'MAX_TOKENS') {
|
|
94
|
+
stopReason.code = 'max_tokens';
|
|
95
|
+
}
|
|
96
|
+
else if (!finishReason || finishReason === 'STOP') {
|
|
97
|
+
stopReason.code = 'ok';
|
|
98
|
+
}
|
|
99
|
+
else if (finishReason.includes('FUNCTION') || finishReason.includes('TOOL')) {
|
|
100
|
+
stopReason.code = 'ok';
|
|
101
|
+
}
|
|
102
|
+
else {
|
|
103
|
+
stopReason.code = 'other';
|
|
104
|
+
stopReason.message = `Unexpected finish reason: ${finishReason}`;
|
|
105
|
+
}
|
|
88
106
|
return {
|
|
89
107
|
role: 'assistant',
|
|
90
|
-
content: candidate.content.parts.map(toContentPart).filter(Boolean),
|
|
108
|
+
content: (candidate.content.parts || []).map(toContentPart).filter(Boolean),
|
|
109
|
+
stopReason,
|
|
91
110
|
};
|
|
92
111
|
}
|
|
93
112
|
function toContentPart(part) {
|
package/lib/providers/openai.js
CHANGED
|
@@ -27,7 +27,7 @@ exports.OpenAI = OpenAI;
|
|
|
27
27
|
async function complete(conversation, options) {
|
|
28
28
|
const inputItems = conversation.messages.map(toResponseInputItems).flat();
|
|
29
29
|
const tools = conversation.tools.map(toOpenAIFunctionTool);
|
|
30
|
-
const response = await create({
|
|
30
|
+
const { response, error } = await create({
|
|
31
31
|
model: options.model,
|
|
32
32
|
temperature: options.temperature,
|
|
33
33
|
input: inputItems,
|
|
@@ -35,10 +35,31 @@ async function complete(conversation, options) {
|
|
|
35
35
|
tools: tools.length > 0 ? tools : undefined,
|
|
36
36
|
tool_choice: conversation.tools.length > 0 ? 'auto' : undefined,
|
|
37
37
|
parallel_tool_calls: false,
|
|
38
|
+
max_output_tokens: options.maxTokens,
|
|
38
39
|
reasoning: toOpenAIReasoning(options.reasoning),
|
|
39
40
|
}, options);
|
|
41
|
+
if (!response || error)
|
|
42
|
+
return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: error } }, usage: { input: 0, output: 0 } };
|
|
40
43
|
// Parse response output items
|
|
41
|
-
const
|
|
44
|
+
const stopReason = { code: 'ok' };
|
|
45
|
+
if (response.incomplete_details?.reason === 'max_output_tokens') {
|
|
46
|
+
stopReason.code = 'max_tokens';
|
|
47
|
+
}
|
|
48
|
+
else if (response.incomplete_details?.reason === 'content_filter') {
|
|
49
|
+
stopReason.code = 'other';
|
|
50
|
+
stopReason.message = 'Content filter triggered';
|
|
51
|
+
}
|
|
52
|
+
else if (response.incomplete_details?.reason) {
|
|
53
|
+
stopReason.code = 'other';
|
|
54
|
+
stopReason.message = `Unexpected incomplete reason: ${response.incomplete_details.reason}`;
|
|
55
|
+
}
|
|
56
|
+
const result = { role: 'assistant', content: [], stopReason };
|
|
57
|
+
const usage = {
|
|
58
|
+
input: response.usage?.input_tokens ?? 0,
|
|
59
|
+
output: response.usage?.output_tokens ?? 0,
|
|
60
|
+
};
|
|
61
|
+
if (stopReason.code !== 'ok')
|
|
62
|
+
return { result, usage };
|
|
42
63
|
for (const item of response.output) {
|
|
43
64
|
if (item.type === 'message' && item.role === 'assistant') {
|
|
44
65
|
result.openaiId = item.id;
|
|
@@ -57,10 +78,6 @@ async function complete(conversation, options) {
|
|
|
57
78
|
result.content.push(toToolCall(item));
|
|
58
79
|
}
|
|
59
80
|
}
|
|
60
|
-
const usage = {
|
|
61
|
-
input: response.usage?.input_tokens ?? 0,
|
|
62
|
-
output: response.usage?.output_tokens ?? 0,
|
|
63
|
-
};
|
|
64
81
|
return { result, usage };
|
|
65
82
|
}
|
|
66
83
|
async function create(createParams, options) {
|
|
@@ -77,13 +94,18 @@ async function create(createParams, options) {
|
|
|
77
94
|
signal: options.signal,
|
|
78
95
|
timeout: options.apiTimeout
|
|
79
96
|
});
|
|
97
|
+
const responseText = await response.text();
|
|
98
|
+
const responseBody = JSON.parse(responseText);
|
|
80
99
|
if (!response.ok) {
|
|
81
|
-
|
|
82
|
-
|
|
100
|
+
try {
|
|
101
|
+
return { error: responseBody.error.message };
|
|
102
|
+
}
|
|
103
|
+
catch {
|
|
104
|
+
return { error: responseText };
|
|
105
|
+
}
|
|
83
106
|
}
|
|
84
|
-
const responseBody = await response.json();
|
|
85
107
|
options.debug?.('lowire:openai-responses')('Response:', JSON.stringify(responseBody, null, 2));
|
|
86
|
-
return responseBody;
|
|
108
|
+
return { response: responseBody };
|
|
87
109
|
}
|
|
88
110
|
function toResultContentPart(part) {
|
|
89
111
|
if (part.type === 'text') {
|
|
@@ -32,9 +32,9 @@ async function complete(conversation, options) {
|
|
|
32
32
|
};
|
|
33
33
|
const openaiMessages = [systemMessage, ...conversation.messages.map(toCompletionsMessages).flat()];
|
|
34
34
|
const openaiTools = conversation.tools.map(t => toCompletionsTool(t));
|
|
35
|
-
const response = await create({
|
|
35
|
+
const { response, error } = await create({
|
|
36
36
|
model: options.model,
|
|
37
|
-
|
|
37
|
+
max_completion_tokens: options.maxTokens,
|
|
38
38
|
temperature: options.temperature,
|
|
39
39
|
messages: openaiMessages,
|
|
40
40
|
tools: openaiTools,
|
|
@@ -42,9 +42,15 @@ async function complete(conversation, options) {
|
|
|
42
42
|
reasoning_effort: toCompletionsReasoning(options.reasoning),
|
|
43
43
|
parallel_tool_calls: false,
|
|
44
44
|
}, options);
|
|
45
|
+
if (error) {
|
|
46
|
+
if (error.type === 'invalid_request_error')
|
|
47
|
+
return { result: { role: 'assistant', content: [], stopReason: { code: 'max_tokens' } }, usage: { input: 0, output: 0 } };
|
|
48
|
+
return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: response.error.message } }, usage: { input: 0, output: 0 } };
|
|
49
|
+
}
|
|
45
50
|
if (!response || !response.choices.length)
|
|
46
|
-
|
|
47
|
-
const result = { role: 'assistant', content: [] };
|
|
51
|
+
return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: 'Failed to get response from OpenAI completions' } }, usage: { input: 0, output: 0 } };
|
|
52
|
+
const result = { role: 'assistant', content: [], stopReason: { code: 'ok' } };
|
|
53
|
+
const finishReason = response.choices[0]?.finish_reason;
|
|
48
54
|
for (const choice of response.choices) {
|
|
49
55
|
const message = choice.message;
|
|
50
56
|
if (message.content)
|
|
@@ -55,6 +61,10 @@ async function complete(conversation, options) {
|
|
|
55
61
|
result.content.push(toToolCall(entry));
|
|
56
62
|
}
|
|
57
63
|
}
|
|
64
|
+
if (finishReason === 'length')
|
|
65
|
+
result.stopReason = { code: 'max_tokens' };
|
|
66
|
+
else if (finishReason !== 'tool_calls' && finishReason !== 'function_call' && finishReason !== 'stop')
|
|
67
|
+
result.stopReason = { code: 'other', message: `Unexpected finish reason: ${finishReason}` };
|
|
58
68
|
const usage = {
|
|
59
69
|
input: response.usage?.prompt_tokens ?? 0,
|
|
60
70
|
output: response.usage?.completion_tokens ?? 0,
|
|
@@ -75,13 +85,18 @@ async function create(createParams, options) {
|
|
|
75
85
|
signal: options.signal,
|
|
76
86
|
timeout: options.apiTimeout
|
|
77
87
|
});
|
|
88
|
+
const responseText = await response.text();
|
|
89
|
+
const responseBody = JSON.parse(responseText);
|
|
78
90
|
if (!response.ok) {
|
|
79
|
-
|
|
80
|
-
|
|
91
|
+
try {
|
|
92
|
+
return { error: responseBody };
|
|
93
|
+
}
|
|
94
|
+
catch {
|
|
95
|
+
return { error: { type: 'unknown', message: responseText } };
|
|
96
|
+
}
|
|
81
97
|
}
|
|
82
|
-
const responseBody = await response.json();
|
|
83
98
|
options.debug?.('lowire:openai')('Response:', JSON.stringify(responseBody, null, 2));
|
|
84
|
-
return responseBody;
|
|
99
|
+
return { response: responseBody };
|
|
85
100
|
}
|
|
86
101
|
function toCopilotResultContentPart(part) {
|
|
87
102
|
if (part.type === 'text') {
|
package/lib/types.d.ts
CHANGED
|
@@ -38,6 +38,10 @@ export type UserMessage = BaseMessage & {
|
|
|
38
38
|
export type AssistantMessage = BaseMessage & {
|
|
39
39
|
role: 'assistant';
|
|
40
40
|
content: (TextContentPart | ToolCallContentPart | ThinkingContentPart)[];
|
|
41
|
+
stopReason: {
|
|
42
|
+
code: 'max_tokens' | 'ok' | 'other';
|
|
43
|
+
message?: string;
|
|
44
|
+
};
|
|
41
45
|
openaiId?: string;
|
|
42
46
|
openaiStatus?: 'completed' | 'incomplete' | 'in_progress';
|
|
43
47
|
toolError?: string;
|
package/package.json
CHANGED
package/githubAuth.js
DELETED
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Copyright (c) Microsoft Corporation.
|
|
3
|
-
*
|
|
4
|
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
-
* you may not use this file except in compliance with the License.
|
|
6
|
-
* You may obtain a copy of the License at
|
|
7
|
-
*
|
|
8
|
-
* http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
-
*
|
|
10
|
-
* Unless required by applicable law or agreed to in writing, software
|
|
11
|
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
-
* See the License for the specific language governing permissions and
|
|
14
|
-
* limitations under the License.
|
|
15
|
-
*/
|
|
16
|
-
|
|
17
|
-
require('./lib/auth/githubAuth');
|