@lowire/loop 0.0.22 → 0.0.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/cache.d.ts CHANGED
@@ -20,4 +20,5 @@ export type ReplayCaches = {
20
20
  };
21
21
  export declare function cachedComplete(provider: types.Provider, conversation: types.Conversation, caches: ReplayCaches | undefined, options: types.CompletionOptions & {
22
22
  secrets?: Record<string, string>;
23
+ cacheMode?: 'strict' | 'lax';
23
24
  }): ReturnType<types.Provider['complete']>;
package/lib/cache.js CHANGED
@@ -28,7 +28,13 @@ async function cachedComplete(provider, conversation, caches, options) {
28
28
  async function cachedCompleteNoSecrets(provider, conversation, caches, options) {
29
29
  if (!caches)
30
30
  return await provider.complete(conversation, options);
31
- const key = calculateSha1(JSON.stringify(conversation));
31
+ const keyObject = {
32
+ conversation: options.cacheMode === 'lax' ? { ...conversation, tools: [] } : conversation,
33
+ maxTokens: options.maxTokens,
34
+ reasoning: options.reasoning,
35
+ temperature: options.temperature,
36
+ };
37
+ const key = calculateSha1(JSON.stringify(keyObject));
32
38
  if (!process.env.LOWIRE_NO_CACHE && caches.input[key]) {
33
39
  caches.output[key] = caches.input[key];
34
40
  return caches.input[key] ?? caches.output[key];
package/lib/loop.d.ts CHANGED
@@ -48,6 +48,7 @@ export type LoopOptions = types.CompletionOptions & LoopEvents & {
48
48
  maxToolCalls?: number;
49
49
  maxToolCallRetries?: number;
50
50
  cache?: types.ReplayCache;
51
+ cacheMode?: 'strict' | 'lax';
51
52
  secrets?: Record<string, string>;
52
53
  summarize?: boolean;
53
54
  };
package/lib/loop.js CHANGED
@@ -60,11 +60,18 @@ class Loop {
60
60
  if (abortController?.signal.aborted)
61
61
  return { status: 'break', usage: totalUsage, turns };
62
62
  debug?.('lowire:loop')(`Request`, JSON.stringify({ ...summarizedConversation, tools: `${summarizedConversation.tools.length} tools` }, null, 2));
63
+ const tokenEstimate = Math.floor(JSON.stringify(summarizedConversation).length / 4);
64
+ if (budget.tokens !== undefined && tokenEstimate >= budget.tokens)
65
+ return { status: 'error', error: `Input token estimate ${tokenEstimate} exceeds budget ${budget.tokens}`, usage: totalUsage, turns };
63
66
  const { result: assistantMessage, usage } = await (0, cache_1.cachedComplete)(this._provider, summarizedConversation, caches, {
64
67
  ...options,
65
- maxTokens: budget.tokens,
68
+ maxTokens: budget.tokens !== undefined ? budget.tokens - tokenEstimate : undefined,
66
69
  signal: abortController?.signal,
67
70
  });
71
+ if (assistantMessage.stopReason.code === 'error')
72
+ return { status: 'error', error: assistantMessage.stopReason.message, usage: totalUsage, turns };
73
+ if (assistantMessage.stopReason.code === 'max_tokens')
74
+ return { status: 'error', error: `Max tokens exhausted`, usage: totalUsage, turns };
68
75
  const intent = assistantMessage.content.filter(part => part.type === 'text').map(part => part.text).join('\n');
69
76
  totalUsage.input += usage.input;
70
77
  totalUsage.output += usage.output;
@@ -17,11 +17,12 @@
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.Anthropic = void 0;
19
19
  const fetchWithTimeout_1 = require("../fetchWithTimeout");
20
+ const types_1 = require("../types");
20
21
  class Anthropic {
21
22
  name = 'anthropic';
22
23
  async complete(conversation, options) {
23
24
  const maxTokens = Math.min(options.maxTokens ?? 32_768, 32_768);
24
- const response = await create({
25
+ const { response, error } = await create({
25
26
  model: options.model,
26
27
  max_tokens: maxTokens,
27
28
  temperature: options.temperature,
@@ -33,6 +34,8 @@ class Anthropic {
33
34
  budget_tokens: options.maxTokens ? Math.round(maxTokens / 10) : 1024,
34
35
  } : undefined,
35
36
  }, options);
37
+ if (error || !response)
38
+ return { result: (0, types_1.assistantMessageFromError)(error ?? 'No response from Anthropic API'), usage: (0, types_1.emptyUsage)() };
36
39
  const result = toAssistantMessage(response);
37
40
  const usage = {
38
41
  input: response.usage.input_tokens,
@@ -57,13 +60,14 @@ async function create(createParams, options) {
57
60
  signal: options.signal,
58
61
  timeout: options.apiTimeout
59
62
  });
63
+ const responseText = await response.text();
64
+ const responseBody = JSON.parse(responseText);
65
+ options.debug?.('lowire:anthropic')('Response:', responseText);
60
66
  if (!response.ok) {
61
67
  options.debug?.('lowire:anthropic')('Response:', response.status);
62
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
68
+ return { error: `API error: ${response.status} ${response.statusText} ${responseText}` };
63
69
  }
64
- const responseBody = await response.json();
65
- options.debug?.('lowire:anthropic')('Response:', JSON.stringify(responseBody, null, 2));
66
- return responseBody;
70
+ return { response: responseBody };
67
71
  }
68
72
  function toContentPart(block) {
69
73
  if (block.type === 'text') {
@@ -109,9 +113,13 @@ function toAnthropicResultParam(part) {
109
113
  throw new Error(`Unsupported content part type: ${part.type}`);
110
114
  }
111
115
  function toAssistantMessage(message) {
116
+ const stopReason = { code: 'ok' };
117
+ if (message.stop_reason === 'max_tokens')
118
+ stopReason.code = 'max_tokens';
112
119
  return {
113
120
  role: 'assistant',
114
121
  content: message.content.map(toContentPart).filter(Boolean),
122
+ stopReason,
115
123
  };
116
124
  }
117
125
  function toAnthropicTool(tool) {
@@ -17,11 +17,12 @@
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.Google = void 0;
19
19
  const fetchWithTimeout_1 = require("../fetchWithTimeout");
20
+ const types_1 = require("../types");
20
21
  class Google {
21
22
  name = 'google';
22
23
  async complete(conversation, options) {
23
24
  const contents = conversation.messages.map(toGeminiContent).flat();
24
- const response = await create(options.model ?? 'gemini-2.5-pro', {
25
+ const { response, error } = await create(options.model ?? 'gemini-2.5-pro', {
25
26
  systemInstruction: {
26
27
  role: 'system',
27
28
  parts: [
@@ -30,11 +31,14 @@ class Google {
30
31
  },
31
32
  contents,
32
33
  tools: conversation.tools.length > 0 ? [{ functionDeclarations: conversation.tools.map(toGeminiTool) }] : undefined,
33
- generationConfig: { temperature: options.temperature },
34
+ generationConfig: {
35
+ temperature: options.temperature,
36
+ maxOutputTokens: options.maxTokens
37
+ },
34
38
  }, options);
35
- const [candidate] = response.candidates ?? [];
36
- if (!candidate)
37
- throw new Error('No candidates in response');
39
+ const [candidate] = response?.candidates ?? [];
40
+ if (error || !response || !candidate)
41
+ return { result: (0, types_1.assistantMessageFromError)(error ?? 'No response from Google API'), usage: (0, types_1.emptyUsage)() };
38
42
  const usage = {
39
43
  input: response.usageMetadata?.promptTokenCount ?? 0,
40
44
  output: response.usageMetadata?.candidatesTokenCount ?? 0,
@@ -59,11 +63,11 @@ async function create(model, createParams, options) {
59
63
  });
60
64
  if (!response.ok) {
61
65
  options.debug?.('lowire:google')('Response:', response.status);
62
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
66
+ return { error: `API error: ${response.status} ${response.statusText} ${await response.text()}` };
63
67
  }
64
68
  const responseBody = await response.json();
65
69
  options.debug?.('lowire:google')('Response:', JSON.stringify(responseBody, null, 2));
66
- return responseBody;
70
+ return { response: responseBody };
67
71
  }
68
72
  function toGeminiTool(tool) {
69
73
  return {
@@ -85,9 +89,13 @@ function stripUnsupportedSchemaFields(schema) {
85
89
  return cleaned;
86
90
  }
87
91
  function toAssistantMessage(candidate) {
92
+ const stopReason = { code: 'ok' };
93
+ if (candidate.finishReason === 'MAX_TOKENS')
94
+ stopReason.code = 'max_tokens';
88
95
  return {
89
96
  role: 'assistant',
90
- content: candidate.content.parts.map(toContentPart).filter(Boolean),
97
+ content: (candidate.content.parts || []).map(toContentPart).filter(Boolean),
98
+ stopReason,
91
99
  };
92
100
  }
93
101
  function toContentPart(part) {
@@ -17,6 +17,7 @@
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.OpenAI = void 0;
19
19
  const fetchWithTimeout_1 = require("../fetchWithTimeout");
20
+ const types_1 = require("../types");
20
21
  class OpenAI {
21
22
  name = 'openai';
22
23
  async complete(conversation, options) {
@@ -27,7 +28,7 @@ exports.OpenAI = OpenAI;
27
28
  async function complete(conversation, options) {
28
29
  const inputItems = conversation.messages.map(toResponseInputItems).flat();
29
30
  const tools = conversation.tools.map(toOpenAIFunctionTool);
30
- const response = await create({
31
+ const { response, error } = await create({
31
32
  model: options.model,
32
33
  temperature: options.temperature,
33
34
  input: inputItems,
@@ -35,10 +36,22 @@ async function complete(conversation, options) {
35
36
  tools: tools.length > 0 ? tools : undefined,
36
37
  tool_choice: conversation.tools.length > 0 ? 'auto' : undefined,
37
38
  parallel_tool_calls: false,
39
+ max_output_tokens: options.maxTokens,
38
40
  reasoning: toOpenAIReasoning(options.reasoning),
39
41
  }, options);
42
+ if (!response || error)
43
+ return { result: (0, types_1.assistantMessageFromError)(error ?? 'No response from OpenAI API'), usage: (0, types_1.emptyUsage)() };
40
44
  // Parse response output items
41
- const result = { role: 'assistant', content: [] };
45
+ const stopReason = { code: 'ok' };
46
+ if (response.incomplete_details?.reason === 'max_output_tokens')
47
+ stopReason.code = 'max_tokens';
48
+ const result = { role: 'assistant', content: [], stopReason };
49
+ const usage = {
50
+ input: response.usage?.input_tokens ?? 0,
51
+ output: response.usage?.output_tokens ?? 0,
52
+ };
53
+ if (stopReason.code !== 'ok')
54
+ return { result, usage };
42
55
  for (const item of response.output) {
43
56
  if (item.type === 'message' && item.role === 'assistant') {
44
57
  result.openaiId = item.id;
@@ -57,10 +70,6 @@ async function complete(conversation, options) {
57
70
  result.content.push(toToolCall(item));
58
71
  }
59
72
  }
60
- const usage = {
61
- input: response.usage?.input_tokens ?? 0,
62
- output: response.usage?.output_tokens ?? 0,
63
- };
64
73
  return { result, usage };
65
74
  }
66
75
  async function create(createParams, options) {
@@ -77,13 +86,18 @@ async function create(createParams, options) {
77
86
  signal: options.signal,
78
87
  timeout: options.apiTimeout
79
88
  });
89
+ const responseText = await response.text();
90
+ const responseBody = JSON.parse(responseText);
80
91
  if (!response.ok) {
81
- options.debug?.('lowire:openai-responses')('Response:', response.status);
82
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
92
+ try {
93
+ return { error: responseBody.error.message };
94
+ }
95
+ catch {
96
+ return { error: responseText };
97
+ }
83
98
  }
84
- const responseBody = await response.json();
85
99
  options.debug?.('lowire:openai-responses')('Response:', JSON.stringify(responseBody, null, 2));
86
- return responseBody;
100
+ return { response: responseBody };
87
101
  }
88
102
  function toResultContentPart(part) {
89
103
  if (part.type === 'text') {
@@ -17,6 +17,7 @@
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.OpenAICompatible = void 0;
19
19
  const fetchWithTimeout_1 = require("../fetchWithTimeout");
20
+ const types_1 = require("../types");
20
21
  class OpenAICompatible {
21
22
  name = 'openai-compatible';
22
23
  async complete(conversation, options) {
@@ -32,9 +33,9 @@ async function complete(conversation, options) {
32
33
  };
33
34
  const openaiMessages = [systemMessage, ...conversation.messages.map(toCompletionsMessages).flat()];
34
35
  const openaiTools = conversation.tools.map(t => toCompletionsTool(t));
35
- const response = await create({
36
+ const { response, error } = await create({
36
37
  model: options.model,
37
- max_tokens: options.maxTokens,
38
+ max_completion_tokens: options.maxTokens,
38
39
  temperature: options.temperature,
39
40
  messages: openaiMessages,
40
41
  tools: openaiTools,
@@ -42,9 +43,10 @@ async function complete(conversation, options) {
42
43
  reasoning_effort: toCompletionsReasoning(options.reasoning),
43
44
  parallel_tool_calls: false,
44
45
  }, options);
45
- if (!response || !response.choices.length)
46
- throw new Error('Failed to get response from OpenAI completions');
47
- const result = { role: 'assistant', content: [] };
46
+ if (error || !response)
47
+ return { result: (0, types_1.assistantMessageFromError)(error?.message ?? 'No response from OpenAI compatible API'), usage: (0, types_1.emptyUsage)() };
48
+ const result = { role: 'assistant', content: [], stopReason: { code: 'ok' } };
49
+ const finishReason = response.choices[0]?.finish_reason;
48
50
  for (const choice of response.choices) {
49
51
  const message = choice.message;
50
52
  if (message.content)
@@ -55,6 +57,8 @@ async function complete(conversation, options) {
55
57
  result.content.push(toToolCall(entry));
56
58
  }
57
59
  }
60
+ if (finishReason === 'length')
61
+ result.stopReason = { code: 'max_tokens' };
58
62
  const usage = {
59
63
  input: response.usage?.prompt_tokens ?? 0,
60
64
  output: response.usage?.completion_tokens ?? 0,
@@ -75,13 +79,18 @@ async function create(createParams, options) {
75
79
  signal: options.signal,
76
80
  timeout: options.apiTimeout
77
81
  });
82
+ const responseText = await response.text();
83
+ const responseBody = JSON.parse(responseText);
78
84
  if (!response.ok) {
79
- options.debug?.('lowire:openai')('Response:', response.status);
80
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
85
+ try {
86
+ return { error: responseBody };
87
+ }
88
+ catch {
89
+ return { error: { type: 'unknown', message: responseText } };
90
+ }
81
91
  }
82
- const responseBody = await response.json();
83
92
  options.debug?.('lowire:openai')('Response:', JSON.stringify(responseBody, null, 2));
84
- return responseBody;
93
+ return { response: responseBody };
85
94
  }
86
95
  function toCopilotResultContentPart(part) {
87
96
  if (part.type === 'text') {
package/lib/types.d.ts CHANGED
@@ -38,6 +38,10 @@ export type UserMessage = BaseMessage & {
38
38
  export type AssistantMessage = BaseMessage & {
39
39
  role: 'assistant';
40
40
  content: (TextContentPart | ToolCallContentPart | ThinkingContentPart)[];
41
+ stopReason: {
42
+ code: 'ok' | 'max_tokens' | 'error';
43
+ message?: string;
44
+ };
41
45
  openaiId?: string;
42
46
  openaiStatus?: 'completed' | 'incomplete' | 'in_progress';
43
47
  toolError?: string;
@@ -116,3 +120,5 @@ export type ReplayCache = Record<string, {
116
120
  result: AssistantMessage;
117
121
  usage: Usage;
118
122
  }>;
123
+ export declare function assistantMessageFromError(error: string): AssistantMessage;
124
+ export declare function emptyUsage(): Usage;
package/lib/types.js CHANGED
@@ -15,3 +15,18 @@
15
15
  * limitations under the License.
16
16
  */
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
+ exports.assistantMessageFromError = assistantMessageFromError;
19
+ exports.emptyUsage = emptyUsage;
20
+ function assistantMessageFromError(error) {
21
+ return {
22
+ role: 'assistant',
23
+ content: [],
24
+ stopReason: { code: 'error', message: error },
25
+ };
26
+ }
27
+ function emptyUsage() {
28
+ return {
29
+ input: 0,
30
+ output: 0,
31
+ };
32
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lowire/loop",
3
- "version": "0.0.22",
3
+ "version": "0.0.24",
4
4
  "description": "Small agentic loop",
5
5
  "repository": {
6
6
  "type": "git",