@lowire/loop 0.0.23 → 0.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/cache.d.ts CHANGED
@@ -20,4 +20,5 @@ export type ReplayCaches = {
20
20
  };
21
21
  export declare function cachedComplete(provider: types.Provider, conversation: types.Conversation, caches: ReplayCaches | undefined, options: types.CompletionOptions & {
22
22
  secrets?: Record<string, string>;
23
+ cacheMode?: 'strict' | 'lax';
23
24
  }): ReturnType<types.Provider['complete']>;
package/lib/cache.js CHANGED
@@ -28,8 +28,13 @@ async function cachedComplete(provider, conversation, caches, options) {
28
28
  async function cachedCompleteNoSecrets(provider, conversation, caches, options) {
29
29
  if (!caches)
30
30
  return await provider.complete(conversation, options);
31
- const { maxTokens, reasoning, temperature } = options;
32
- const key = calculateSha1(JSON.stringify({ conversation, maxTokens, reasoning, temperature }));
31
+ const keyObject = {
32
+ conversation: options.cacheMode === 'lax' ? { ...conversation, tools: [] } : conversation,
33
+ maxTokens: options.maxTokens,
34
+ reasoning: options.reasoning,
35
+ temperature: options.temperature,
36
+ };
37
+ const key = calculateSha1(JSON.stringify(keyObject));
33
38
  if (!process.env.LOWIRE_NO_CACHE && caches.input[key]) {
34
39
  caches.output[key] = caches.input[key];
35
40
  return caches.input[key] ?? caches.output[key];
package/lib/loop.d.ts CHANGED
@@ -48,6 +48,7 @@ export type LoopOptions = types.CompletionOptions & LoopEvents & {
48
48
  maxToolCalls?: number;
49
49
  maxToolCallRetries?: number;
50
50
  cache?: types.ReplayCache;
51
+ cacheMode?: 'strict' | 'lax';
51
52
  secrets?: Record<string, string>;
52
53
  summarize?: boolean;
53
54
  };
@@ -58,7 +59,6 @@ export declare class Loop {
58
59
  constructor(options: LoopOptions);
59
60
  run(task: string, runOptions?: Omit<LoopOptions, 'model' | 'api' | 'apiKey'> & {
60
61
  model?: string;
61
- abortController?: AbortController;
62
62
  }): Promise<{
63
63
  result?: types.ToolResult;
64
64
  status: 'ok' | 'break' | 'error';
package/lib/loop.js CHANGED
@@ -29,7 +29,6 @@ class Loop {
29
29
  }
30
30
  async run(task, runOptions = {}) {
31
31
  const options = { ...this._loopOptions, ...runOptions };
32
- const abortController = runOptions.abortController;
33
32
  const allTools = [...(options.tools || []).map(wrapToolWithIsDone)];
34
33
  const conversation = {
35
34
  systemPrompt,
@@ -57,7 +56,7 @@ class Loop {
57
56
  } : undefined;
58
57
  const summarizedConversation = options.summarize ? this._summarizeConversation(task, conversation, options) : conversation;
59
58
  await options.onBeforeTurn?.({ conversation: summarizedConversation, totalUsage, budgetTokens: budget.tokens });
60
- if (abortController?.signal.aborted)
59
+ if (options.signal?.aborted)
61
60
  return { status: 'break', usage: totalUsage, turns };
62
61
  debug?.('lowire:loop')(`Request`, JSON.stringify({ ...summarizedConversation, tools: `${summarizedConversation.tools.length} tools` }, null, 2));
63
62
  const tokenEstimate = Math.floor(JSON.stringify(summarizedConversation).length / 4);
@@ -66,12 +65,12 @@ class Loop {
66
65
  const { result: assistantMessage, usage } = await (0, cache_1.cachedComplete)(this._provider, summarizedConversation, caches, {
67
66
  ...options,
68
67
  maxTokens: budget.tokens !== undefined ? budget.tokens - tokenEstimate : undefined,
69
- signal: abortController?.signal,
68
+ signal: options.signal,
70
69
  });
70
+ if (assistantMessage.stopReason.code === 'error')
71
+ return { status: 'error', error: assistantMessage.stopReason.message, usage: totalUsage, turns };
71
72
  if (assistantMessage.stopReason.code === 'max_tokens')
72
73
  return { status: 'error', error: `Max tokens exhausted`, usage: totalUsage, turns };
73
- if (assistantMessage.stopReason.code === 'other')
74
- return { status: 'error', error: assistantMessage.stopReason.message, usage: totalUsage, turns };
75
74
  const intent = assistantMessage.content.filter(part => part.type === 'text').map(part => part.text).join('\n');
76
75
  totalUsage.input += usage.input;
77
76
  totalUsage.output += usage.output;
@@ -80,7 +79,7 @@ class Loop {
80
79
  debug?.('lowire:loop')('Usage', `input: ${usage.input}, output: ${usage.output}`);
81
80
  debug?.('lowire:loop')('Assistant', intent, JSON.stringify(assistantMessage.content, null, 2));
82
81
  await options.onAfterTurn?.({ assistantMessage, totalUsage, budgetTokens: budget.tokens });
83
- if (abortController?.signal.aborted)
82
+ if (options.signal?.aborted)
84
83
  return { status: 'break', usage: totalUsage, turns };
85
84
  conversation.messages.push(assistantMessage);
86
85
  const toolCalls = assistantMessage.content.filter(part => part.type === 'tool_call');
@@ -94,7 +93,7 @@ class Loop {
94
93
  const { name, arguments: args } = toolCall;
95
94
  debug?.('lowire:loop')('Call tool', name, JSON.stringify(args, null, 2));
96
95
  const status = await options.onBeforeToolCall?.({ assistantMessage, toolCall });
97
- if (abortController?.signal.aborted)
96
+ if (options.signal?.aborted)
98
97
  return { status: 'break', usage: totalUsage, turns };
99
98
  if (status === 'disallow') {
100
99
  toolCall.result = {
@@ -118,7 +117,7 @@ class Loop {
118
117
  const text = result.content.filter(part => part.type === 'text').map(part => part.text).join('\n');
119
118
  debug?.('lowire:loop')('Tool result', text, JSON.stringify(result, null, 2));
120
119
  const status = await options.onAfterToolCall?.({ assistantMessage, toolCall, result });
121
- if (abortController?.signal.aborted)
120
+ if (options.signal?.aborted)
122
121
  return { status: 'break', usage: totalUsage, turns };
123
122
  if (status === 'disallow') {
124
123
  toolCall.result = {
@@ -134,7 +133,7 @@ class Loop {
134
133
  catch (error) {
135
134
  const errorMessage = `Error while executing tool "${name}": ${error instanceof Error ? error.message : String(error)}\n\nPlease try to recover and complete the task.`;
136
135
  await options.onToolCallError?.({ assistantMessage, toolCall, error });
137
- if (abortController?.signal.aborted)
136
+ if (options.signal?.aborted)
138
137
  return { status: 'break', usage: totalUsage, turns };
139
138
  toolCall.result = {
140
139
  content: [{ type: 'text', text: errorMessage }],
@@ -17,11 +17,12 @@
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.Anthropic = void 0;
19
19
  const fetchWithTimeout_1 = require("../fetchWithTimeout");
20
+ const types_1 = require("../types");
20
21
  class Anthropic {
21
22
  name = 'anthropic';
22
23
  async complete(conversation, options) {
23
24
  const maxTokens = Math.min(options.maxTokens ?? 32_768, 32_768);
24
- const response = await create({
25
+ const { response, error } = await create({
25
26
  model: options.model,
26
27
  max_tokens: maxTokens,
27
28
  temperature: options.temperature,
@@ -33,6 +34,8 @@ class Anthropic {
33
34
  budget_tokens: options.maxTokens ? Math.round(maxTokens / 10) : 1024,
34
35
  } : undefined,
35
36
  }, options);
37
+ if (error || !response)
38
+ return { result: (0, types_1.assistantMessageFromError)(error ?? 'No response from Anthropic API'), usage: (0, types_1.emptyUsage)() };
36
39
  const result = toAssistantMessage(response);
37
40
  const usage = {
38
41
  input: response.usage.input_tokens,
@@ -57,13 +60,14 @@ async function create(createParams, options) {
57
60
  signal: options.signal,
58
61
  timeout: options.apiTimeout
59
62
  });
63
+ const responseText = await response.text();
64
+ const responseBody = JSON.parse(responseText);
65
+ options.debug?.('lowire:anthropic')('Response:', responseText);
60
66
  if (!response.ok) {
61
67
  options.debug?.('lowire:anthropic')('Response:', response.status);
62
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
68
+ return { error: `API error: ${response.status} ${response.statusText} ${responseText}` };
63
69
  }
64
- const responseBody = await response.json();
65
- options.debug?.('lowire:anthropic')('Response:', JSON.stringify(responseBody, null, 2));
66
- return responseBody;
70
+ return { response: responseBody };
67
71
  }
68
72
  function toContentPart(block) {
69
73
  if (block.type === 'text') {
@@ -110,16 +114,8 @@ function toAnthropicResultParam(part) {
110
114
  }
111
115
  function toAssistantMessage(message) {
112
116
  const stopReason = { code: 'ok' };
113
- if (message.stop_reason === 'max_tokens') {
117
+ if (message.stop_reason === 'max_tokens')
114
118
  stopReason.code = 'max_tokens';
115
- }
116
- else if (message.stop_reason === 'tool_use') {
117
- stopReason.code = 'ok';
118
- }
119
- else {
120
- stopReason.code = 'other';
121
- stopReason.message = `Unexpected stop reason: ${message.stop_reason}`;
122
- }
123
119
  return {
124
120
  role: 'assistant',
125
121
  content: message.content.map(toContentPart).filter(Boolean),
@@ -17,11 +17,12 @@
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.Google = void 0;
19
19
  const fetchWithTimeout_1 = require("../fetchWithTimeout");
20
+ const types_1 = require("../types");
20
21
  class Google {
21
22
  name = 'google';
22
23
  async complete(conversation, options) {
23
24
  const contents = conversation.messages.map(toGeminiContent).flat();
24
- const response = await create(options.model ?? 'gemini-2.5-pro', {
25
+ const { response, error } = await create(options.model ?? 'gemini-2.5-pro', {
25
26
  systemInstruction: {
26
27
  role: 'system',
27
28
  parts: [
@@ -35,9 +36,9 @@ class Google {
35
36
  maxOutputTokens: options.maxTokens
36
37
  },
37
38
  }, options);
38
- const [candidate] = response.candidates ?? [];
39
- if (!candidate)
40
- throw new Error('No candidates in response');
39
+ const [candidate] = response?.candidates ?? [];
40
+ if (error || !response || !candidate)
41
+ return { result: (0, types_1.assistantMessageFromError)(error ?? 'No response from Google API'), usage: (0, types_1.emptyUsage)() };
41
42
  const usage = {
42
43
  input: response.usageMetadata?.promptTokenCount ?? 0,
43
44
  output: response.usageMetadata?.candidatesTokenCount ?? 0,
@@ -62,11 +63,11 @@ async function create(model, createParams, options) {
62
63
  });
63
64
  if (!response.ok) {
64
65
  options.debug?.('lowire:google')('Response:', response.status);
65
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
66
+ return { error: `API error: ${response.status} ${response.statusText} ${await response.text()}` };
66
67
  }
67
68
  const responseBody = await response.json();
68
69
  options.debug?.('lowire:google')('Response:', JSON.stringify(responseBody, null, 2));
69
- return responseBody;
70
+ return { response: responseBody };
70
71
  }
71
72
  function toGeminiTool(tool) {
72
73
  return {
@@ -89,20 +90,8 @@ function stripUnsupportedSchemaFields(schema) {
89
90
  }
90
91
  function toAssistantMessage(candidate) {
91
92
  const stopReason = { code: 'ok' };
92
- const finishReason = candidate.finishReason;
93
- if (finishReason === 'MAX_TOKENS') {
93
+ if (candidate.finishReason === 'MAX_TOKENS')
94
94
  stopReason.code = 'max_tokens';
95
- }
96
- else if (!finishReason || finishReason === 'STOP') {
97
- stopReason.code = 'ok';
98
- }
99
- else if (finishReason.includes('FUNCTION') || finishReason.includes('TOOL')) {
100
- stopReason.code = 'ok';
101
- }
102
- else {
103
- stopReason.code = 'other';
104
- stopReason.message = `Unexpected finish reason: ${finishReason}`;
105
- }
106
95
  return {
107
96
  role: 'assistant',
108
97
  content: (candidate.content.parts || []).map(toContentPart).filter(Boolean),
@@ -17,6 +17,7 @@
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.OpenAI = void 0;
19
19
  const fetchWithTimeout_1 = require("../fetchWithTimeout");
20
+ const types_1 = require("../types");
20
21
  class OpenAI {
21
22
  name = 'openai';
22
23
  async complete(conversation, options) {
@@ -39,20 +40,11 @@ async function complete(conversation, options) {
39
40
  reasoning: toOpenAIReasoning(options.reasoning),
40
41
  }, options);
41
42
  if (!response || error)
42
- return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: error } }, usage: { input: 0, output: 0 } };
43
+ return { result: (0, types_1.assistantMessageFromError)(error ?? 'No response from OpenAI API'), usage: (0, types_1.emptyUsage)() };
43
44
  // Parse response output items
44
45
  const stopReason = { code: 'ok' };
45
- if (response.incomplete_details?.reason === 'max_output_tokens') {
46
+ if (response.incomplete_details?.reason === 'max_output_tokens')
46
47
  stopReason.code = 'max_tokens';
47
- }
48
- else if (response.incomplete_details?.reason === 'content_filter') {
49
- stopReason.code = 'other';
50
- stopReason.message = 'Content filter triggered';
51
- }
52
- else if (response.incomplete_details?.reason) {
53
- stopReason.code = 'other';
54
- stopReason.message = `Unexpected incomplete reason: ${response.incomplete_details.reason}`;
55
- }
56
48
  const result = { role: 'assistant', content: [], stopReason };
57
49
  const usage = {
58
50
  input: response.usage?.input_tokens ?? 0,
@@ -17,6 +17,7 @@
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.OpenAICompatible = void 0;
19
19
  const fetchWithTimeout_1 = require("../fetchWithTimeout");
20
+ const types_1 = require("../types");
20
21
  class OpenAICompatible {
21
22
  name = 'openai-compatible';
22
23
  async complete(conversation, options) {
@@ -42,13 +43,8 @@ async function complete(conversation, options) {
42
43
  reasoning_effort: toCompletionsReasoning(options.reasoning),
43
44
  parallel_tool_calls: false,
44
45
  }, options);
45
- if (error) {
46
- if (error.type === 'invalid_request_error')
47
- return { result: { role: 'assistant', content: [], stopReason: { code: 'max_tokens' } }, usage: { input: 0, output: 0 } };
48
- return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: response.error.message } }, usage: { input: 0, output: 0 } };
49
- }
50
- if (!response || !response.choices.length)
51
- return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: 'Failed to get response from OpenAI completions' } }, usage: { input: 0, output: 0 } };
46
+ if (error || !response)
47
+ return { result: (0, types_1.assistantMessageFromError)(error?.message ?? 'No response from OpenAI compatible API'), usage: (0, types_1.emptyUsage)() };
52
48
  const result = { role: 'assistant', content: [], stopReason: { code: 'ok' } };
53
49
  const finishReason = response.choices[0]?.finish_reason;
54
50
  for (const choice of response.choices) {
@@ -63,8 +59,6 @@ async function complete(conversation, options) {
63
59
  }
64
60
  if (finishReason === 'length')
65
61
  result.stopReason = { code: 'max_tokens' };
66
- else if (finishReason !== 'tool_calls' && finishReason !== 'function_call' && finishReason !== 'stop')
67
- result.stopReason = { code: 'other', message: `Unexpected finish reason: ${finishReason}` };
68
62
  const usage = {
69
63
  input: response.usage?.prompt_tokens ?? 0,
70
64
  output: response.usage?.completion_tokens ?? 0,
package/lib/types.d.ts CHANGED
@@ -39,7 +39,7 @@ export type AssistantMessage = BaseMessage & {
39
39
  role: 'assistant';
40
40
  content: (TextContentPart | ToolCallContentPart | ThinkingContentPart)[];
41
41
  stopReason: {
42
- code: 'max_tokens' | 'ok' | 'other';
42
+ code: 'ok' | 'max_tokens' | 'error';
43
43
  message?: string;
44
44
  };
45
45
  openaiId?: string;
@@ -120,3 +120,5 @@ export type ReplayCache = Record<string, {
120
120
  result: AssistantMessage;
121
121
  usage: Usage;
122
122
  }>;
123
+ export declare function assistantMessageFromError(error: string): AssistantMessage;
124
+ export declare function emptyUsage(): Usage;
package/lib/types.js CHANGED
@@ -15,3 +15,18 @@
15
15
  * limitations under the License.
16
16
  */
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
+ exports.assistantMessageFromError = assistantMessageFromError;
19
+ exports.emptyUsage = emptyUsage;
20
+ function assistantMessageFromError(error) {
21
+ return {
22
+ role: 'assistant',
23
+ content: [],
24
+ stopReason: { code: 'error', message: error },
25
+ };
26
+ }
27
+ function emptyUsage() {
28
+ return {
29
+ input: 0,
30
+ output: 0,
31
+ };
32
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lowire/loop",
3
- "version": "0.0.23",
3
+ "version": "0.0.25",
4
4
  "description": "Small agentic loop",
5
5
  "repository": {
6
6
  "type": "git",