@lowire/loop 0.0.22 → 0.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/cache.js CHANGED
@@ -28,7 +28,8 @@ async function cachedComplete(provider, conversation, caches, options) {
28
28
  async function cachedCompleteNoSecrets(provider, conversation, caches, options) {
29
29
  if (!caches)
30
30
  return await provider.complete(conversation, options);
31
- const key = calculateSha1(JSON.stringify(conversation));
31
+ const { maxTokens, reasoning, temperature } = options;
32
+ const key = calculateSha1(JSON.stringify({ conversation, maxTokens, reasoning, temperature }));
32
33
  if (!process.env.LOWIRE_NO_CACHE && caches.input[key]) {
33
34
  caches.output[key] = caches.input[key];
34
35
  return caches.input[key] ?? caches.output[key];
package/lib/loop.js CHANGED
@@ -60,11 +60,18 @@ class Loop {
60
60
  if (abortController?.signal.aborted)
61
61
  return { status: 'break', usage: totalUsage, turns };
62
62
  debug?.('lowire:loop')(`Request`, JSON.stringify({ ...summarizedConversation, tools: `${summarizedConversation.tools.length} tools` }, null, 2));
63
+ const tokenEstimate = Math.floor(JSON.stringify(summarizedConversation).length / 4);
64
+ if (budget.tokens !== undefined && tokenEstimate >= budget.tokens)
65
+ return { status: 'error', error: `Input token estimate ${tokenEstimate} exceeds budget ${budget.tokens}`, usage: totalUsage, turns };
63
66
  const { result: assistantMessage, usage } = await (0, cache_1.cachedComplete)(this._provider, summarizedConversation, caches, {
64
67
  ...options,
65
- maxTokens: budget.tokens,
68
+ maxTokens: budget.tokens !== undefined ? budget.tokens - tokenEstimate : undefined,
66
69
  signal: abortController?.signal,
67
70
  });
71
+ if (assistantMessage.stopReason.code === 'max_tokens')
72
+ return { status: 'error', error: `Max tokens exhausted`, usage: totalUsage, turns };
73
+ if (assistantMessage.stopReason.code === 'other')
74
+ return { status: 'error', error: assistantMessage.stopReason.message, usage: totalUsage, turns };
68
75
  const intent = assistantMessage.content.filter(part => part.type === 'text').map(part => part.text).join('\n');
69
76
  totalUsage.input += usage.input;
70
77
  totalUsage.output += usage.output;
@@ -109,9 +109,21 @@ function toAnthropicResultParam(part) {
109
109
  throw new Error(`Unsupported content part type: ${part.type}`);
110
110
  }
111
111
  function toAssistantMessage(message) {
112
+ const stopReason = { code: 'ok' };
113
+ if (message.stop_reason === 'max_tokens') {
114
+ stopReason.code = 'max_tokens';
115
+ }
116
+ else if (message.stop_reason === 'tool_use') {
117
+ stopReason.code = 'ok';
118
+ }
119
+ else {
120
+ stopReason.code = 'other';
121
+ stopReason.message = `Unexpected stop reason: ${message.stop_reason}`;
122
+ }
112
123
  return {
113
124
  role: 'assistant',
114
125
  content: message.content.map(toContentPart).filter(Boolean),
126
+ stopReason,
115
127
  };
116
128
  }
117
129
  function toAnthropicTool(tool) {
@@ -30,7 +30,10 @@ class Google {
30
30
  },
31
31
  contents,
32
32
  tools: conversation.tools.length > 0 ? [{ functionDeclarations: conversation.tools.map(toGeminiTool) }] : undefined,
33
- generationConfig: { temperature: options.temperature },
33
+ generationConfig: {
34
+ temperature: options.temperature,
35
+ maxOutputTokens: options.maxTokens
36
+ },
34
37
  }, options);
35
38
  const [candidate] = response.candidates ?? [];
36
39
  if (!candidate)
@@ -85,9 +88,25 @@ function stripUnsupportedSchemaFields(schema) {
85
88
  return cleaned;
86
89
  }
87
90
  function toAssistantMessage(candidate) {
91
+ const stopReason = { code: 'ok' };
92
+ const finishReason = candidate.finishReason;
93
+ if (finishReason === 'MAX_TOKENS') {
94
+ stopReason.code = 'max_tokens';
95
+ }
96
+ else if (!finishReason || finishReason === 'STOP') {
97
+ stopReason.code = 'ok';
98
+ }
99
+ else if (finishReason.includes('FUNCTION') || finishReason.includes('TOOL')) {
100
+ stopReason.code = 'ok';
101
+ }
102
+ else {
103
+ stopReason.code = 'other';
104
+ stopReason.message = `Unexpected finish reason: ${finishReason}`;
105
+ }
88
106
  return {
89
107
  role: 'assistant',
90
- content: candidate.content.parts.map(toContentPart).filter(Boolean),
108
+ content: (candidate.content.parts || []).map(toContentPart).filter(Boolean),
109
+ stopReason,
91
110
  };
92
111
  }
93
112
  function toContentPart(part) {
@@ -27,7 +27,7 @@ exports.OpenAI = OpenAI;
27
27
  async function complete(conversation, options) {
28
28
  const inputItems = conversation.messages.map(toResponseInputItems).flat();
29
29
  const tools = conversation.tools.map(toOpenAIFunctionTool);
30
- const response = await create({
30
+ const { response, error } = await create({
31
31
  model: options.model,
32
32
  temperature: options.temperature,
33
33
  input: inputItems,
@@ -35,10 +35,31 @@ async function complete(conversation, options) {
35
35
  tools: tools.length > 0 ? tools : undefined,
36
36
  tool_choice: conversation.tools.length > 0 ? 'auto' : undefined,
37
37
  parallel_tool_calls: false,
38
+ max_output_tokens: options.maxTokens,
38
39
  reasoning: toOpenAIReasoning(options.reasoning),
39
40
  }, options);
41
+ if (!response || error)
42
+ return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: error } }, usage: { input: 0, output: 0 } };
40
43
  // Parse response output items
41
- const result = { role: 'assistant', content: [] };
44
+ const stopReason = { code: 'ok' };
45
+ if (response.incomplete_details?.reason === 'max_output_tokens') {
46
+ stopReason.code = 'max_tokens';
47
+ }
48
+ else if (response.incomplete_details?.reason === 'content_filter') {
49
+ stopReason.code = 'other';
50
+ stopReason.message = 'Content filter triggered';
51
+ }
52
+ else if (response.incomplete_details?.reason) {
53
+ stopReason.code = 'other';
54
+ stopReason.message = `Unexpected incomplete reason: ${response.incomplete_details.reason}`;
55
+ }
56
+ const result = { role: 'assistant', content: [], stopReason };
57
+ const usage = {
58
+ input: response.usage?.input_tokens ?? 0,
59
+ output: response.usage?.output_tokens ?? 0,
60
+ };
61
+ if (stopReason.code !== 'ok')
62
+ return { result, usage };
42
63
  for (const item of response.output) {
43
64
  if (item.type === 'message' && item.role === 'assistant') {
44
65
  result.openaiId = item.id;
@@ -57,10 +78,6 @@ async function complete(conversation, options) {
57
78
  result.content.push(toToolCall(item));
58
79
  }
59
80
  }
60
- const usage = {
61
- input: response.usage?.input_tokens ?? 0,
62
- output: response.usage?.output_tokens ?? 0,
63
- };
64
81
  return { result, usage };
65
82
  }
66
83
  async function create(createParams, options) {
@@ -77,13 +94,18 @@ async function create(createParams, options) {
77
94
  signal: options.signal,
78
95
  timeout: options.apiTimeout
79
96
  });
97
+ const responseText = await response.text();
98
+ const responseBody = JSON.parse(responseText);
80
99
  if (!response.ok) {
81
- options.debug?.('lowire:openai-responses')('Response:', response.status);
82
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
100
+ try {
101
+ return { error: responseBody.error.message };
102
+ }
103
+ catch {
104
+ return { error: responseText };
105
+ }
83
106
  }
84
- const responseBody = await response.json();
85
107
  options.debug?.('lowire:openai-responses')('Response:', JSON.stringify(responseBody, null, 2));
86
- return responseBody;
108
+ return { response: responseBody };
87
109
  }
88
110
  function toResultContentPart(part) {
89
111
  if (part.type === 'text') {
@@ -32,9 +32,9 @@ async function complete(conversation, options) {
32
32
  };
33
33
  const openaiMessages = [systemMessage, ...conversation.messages.map(toCompletionsMessages).flat()];
34
34
  const openaiTools = conversation.tools.map(t => toCompletionsTool(t));
35
- const response = await create({
35
+ const { response, error } = await create({
36
36
  model: options.model,
37
- max_tokens: options.maxTokens,
37
+ max_completion_tokens: options.maxTokens,
38
38
  temperature: options.temperature,
39
39
  messages: openaiMessages,
40
40
  tools: openaiTools,
@@ -42,9 +42,15 @@ async function complete(conversation, options) {
42
42
  reasoning_effort: toCompletionsReasoning(options.reasoning),
43
43
  parallel_tool_calls: false,
44
44
  }, options);
45
+ if (error) {
46
+ if (error.type === 'invalid_request_error')
47
+ return { result: { role: 'assistant', content: [], stopReason: { code: 'max_tokens' } }, usage: { input: 0, output: 0 } };
48
+ return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: response.error.message } }, usage: { input: 0, output: 0 } };
49
+ }
45
50
  if (!response || !response.choices.length)
46
- throw new Error('Failed to get response from OpenAI completions');
47
- const result = { role: 'assistant', content: [] };
51
+ return { result: { role: 'assistant', content: [], stopReason: { code: 'other', message: 'Failed to get response from OpenAI completions' } }, usage: { input: 0, output: 0 } };
52
+ const result = { role: 'assistant', content: [], stopReason: { code: 'ok' } };
53
+ const finishReason = response.choices[0]?.finish_reason;
48
54
  for (const choice of response.choices) {
49
55
  const message = choice.message;
50
56
  if (message.content)
@@ -55,6 +61,10 @@ async function complete(conversation, options) {
55
61
  result.content.push(toToolCall(entry));
56
62
  }
57
63
  }
64
+ if (finishReason === 'length')
65
+ result.stopReason = { code: 'max_tokens' };
66
+ else if (finishReason !== 'tool_calls' && finishReason !== 'function_call' && finishReason !== 'stop')
67
+ result.stopReason = { code: 'other', message: `Unexpected finish reason: ${finishReason}` };
58
68
  const usage = {
59
69
  input: response.usage?.prompt_tokens ?? 0,
60
70
  output: response.usage?.completion_tokens ?? 0,
@@ -75,13 +85,18 @@ async function create(createParams, options) {
75
85
  signal: options.signal,
76
86
  timeout: options.apiTimeout
77
87
  });
88
+ const responseText = await response.text();
89
+ const responseBody = JSON.parse(responseText);
78
90
  if (!response.ok) {
79
- options.debug?.('lowire:openai')('Response:', response.status);
80
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
91
+ try {
92
+ return { error: responseBody };
93
+ }
94
+ catch {
95
+ return { error: { type: 'unknown', message: responseText } };
96
+ }
81
97
  }
82
- const responseBody = await response.json();
83
98
  options.debug?.('lowire:openai')('Response:', JSON.stringify(responseBody, null, 2));
84
- return responseBody;
99
+ return { response: responseBody };
85
100
  }
86
101
  function toCopilotResultContentPart(part) {
87
102
  if (part.type === 'text') {
package/lib/types.d.ts CHANGED
@@ -38,6 +38,10 @@ export type UserMessage = BaseMessage & {
38
38
  export type AssistantMessage = BaseMessage & {
39
39
  role: 'assistant';
40
40
  content: (TextContentPart | ToolCallContentPart | ThinkingContentPart)[];
41
+ stopReason: {
42
+ code: 'max_tokens' | 'ok' | 'other';
43
+ message?: string;
44
+ };
41
45
  openaiId?: string;
42
46
  openaiStatus?: 'completed' | 'incomplete' | 'in_progress';
43
47
  toolError?: string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lowire/loop",
3
- "version": "0.0.22",
3
+ "version": "0.0.23",
4
4
  "description": "Small agentic loop",
5
5
  "repository": {
6
6
  "type": "git",