@lowire/loop 0.0.14 → 0.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,14 +16,183 @@
16
16
  */
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
18
  exports.OpenAI = void 0;
19
- const openaiCompletions_1 = require("./openaiCompletions");
20
- const openaiResponses_1 = require("./openaiResponses");
21
19
  class OpenAI {
22
20
  name = 'openai';
23
21
  async complete(conversation, options) {
24
- if (options.apiVersion === 'v1/chat/completions')
25
- return (0, openaiCompletions_1.complete)(conversation, options);
26
- return (0, openaiResponses_1.complete)(conversation, options);
22
+ return complete(conversation, options);
27
23
  }
28
24
  }
29
25
  exports.OpenAI = OpenAI;
26
+ async function complete(conversation, options) {
27
+ const inputItems = conversation.messages.map(toResponseInputItems).flat();
28
+ const tools = conversation.tools.map(toOpenAIFunctionTool);
29
+ const response = await create({
30
+ model: options.model,
31
+ temperature: options.temperature,
32
+ input: inputItems,
33
+ instructions: systemPrompt(conversation.systemPrompt),
34
+ tools: tools.length > 0 ? tools : undefined,
35
+ tool_choice: conversation.tools.length > 0 ? 'auto' : undefined,
36
+ parallel_tool_calls: false,
37
+ reasoning: toOpenAIReasoning(options.reasoning),
38
+ }, options);
39
+ // Parse response output items
40
+ const result = { role: 'assistant', content: [] };
41
+ for (const item of response.output) {
42
+ if (item.type === 'message' && item.role === 'assistant') {
43
+ result.openaiId = item.id;
44
+ result.openaiStatus = item.status;
45
+ for (const contentPart of item.content) {
46
+ if (contentPart.type === 'output_text') {
47
+ result.content.push({
48
+ type: 'text',
49
+ text: contentPart.text,
50
+ });
51
+ }
52
+ }
53
+ }
54
+ else if (item.type === 'function_call') {
55
+ // Add tool call
56
+ result.content.push(toToolCall(item));
57
+ }
58
+ }
59
+ const usage = {
60
+ input: response.usage?.input_tokens ?? 0,
61
+ output: response.usage?.output_tokens ?? 0,
62
+ };
63
+ return { result, usage };
64
+ }
65
+ async function create(createParams, options) {
66
+ const headers = {
67
+ 'Content-Type': 'application/json',
68
+ 'Authorization': `Bearer ${options.apiKey}`,
69
+ };
70
+ const debugBody = { ...createParams, tools: `${createParams.tools?.length ?? 0} tools` };
71
+ options.debug?.('lowire:openai-responses')('Request:', JSON.stringify(debugBody, null, 2));
72
+ const response = await fetch(options.apiEndpoint ?? `https://api.openai.com/v1/responses`, {
73
+ method: 'POST',
74
+ headers,
75
+ body: JSON.stringify(createParams)
76
+ });
77
+ if (!response.ok) {
78
+ options.debug?.('lowire:openai-responses')('Response:', response.status);
79
+ throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
80
+ }
81
+ const responseBody = await response.json();
82
+ options.debug?.('lowire:openai-responses')('Response:', JSON.stringify(responseBody, null, 2));
83
+ return responseBody;
84
+ }
85
+ function toResultContentPart(part) {
86
+ if (part.type === 'text') {
87
+ return {
88
+ type: 'input_text',
89
+ text: part.text,
90
+ };
91
+ }
92
+ if (part.type === 'image') {
93
+ return {
94
+ type: 'input_image',
95
+ image_url: `data:${part.mimeType};base64,${part.data}`,
96
+ detail: 'auto',
97
+ };
98
+ }
99
+ throw new Error(`Cannot convert content part of type ${part.type} to response content part`);
100
+ }
101
+ function toResponseInputItems(message) {
102
+ if (message.role === 'user') {
103
+ return [{
104
+ type: 'message',
105
+ role: 'user',
106
+ content: message.content
107
+ }];
108
+ }
109
+ if (message.role === 'assistant') {
110
+ const textParts = message.content.filter(part => part.type === 'text');
111
+ const toolCallParts = message.content.filter(part => part.type === 'tool_call');
112
+ const items = [];
113
+ // Add assistant message with text content
114
+ if (textParts.length > 0) {
115
+ const outputMessage = {
116
+ id: message.openaiId,
117
+ status: message.openaiStatus,
118
+ type: 'message',
119
+ role: 'assistant',
120
+ content: textParts.map(part => ({
121
+ type: 'output_text',
122
+ text: part.text,
123
+ annotations: [],
124
+ logprobs: []
125
+ }))
126
+ };
127
+ items.push(outputMessage);
128
+ }
129
+ if (message.toolError) {
130
+ items.push({
131
+ type: 'message',
132
+ role: 'user',
133
+ content: message.toolError
134
+ });
135
+ }
136
+ items.push(...toolCallParts.map(toFunctionToolCall).flat());
137
+ return items;
138
+ }
139
+ throw new Error(`Unsupported message role: ${message.role}`);
140
+ }
141
+ function toOpenAIFunctionTool(tool) {
142
+ return {
143
+ type: 'function',
144
+ name: tool.name,
145
+ description: tool.description ?? null,
146
+ parameters: tool.inputSchema,
147
+ strict: null,
148
+ };
149
+ }
150
+ function toFunctionToolCall(toolCall) {
151
+ const result = [{
152
+ type: 'function_call',
153
+ call_id: toolCall.id,
154
+ name: toolCall.name,
155
+ arguments: JSON.stringify(toolCall.arguments),
156
+ id: toolCall.openaiId,
157
+ status: toolCall.openaiStatus,
158
+ }];
159
+ if (toolCall.result) {
160
+ result.push({
161
+ type: 'function_call_output',
162
+ call_id: toolCall.id,
163
+ output: toolCall.result.content.map(toResultContentPart),
164
+ });
165
+ }
166
+ return result;
167
+ }
168
+ function toToolCall(functionCall) {
169
+ return {
170
+ type: 'tool_call',
171
+ name: functionCall.name,
172
+ arguments: JSON.parse(functionCall.arguments),
173
+ id: functionCall.call_id,
174
+ openaiId: functionCall.id,
175
+ openaiStatus: functionCall.status,
176
+ };
177
+ }
178
+ function toOpenAIReasoning(reasoning) {
179
+ switch (reasoning) {
180
+ case 'none':
181
+ return { effort: 'none' };
182
+ case 'medium':
183
+ return { effort: 'medium' };
184
+ case 'high':
185
+ return { effort: 'high' };
186
+ }
187
+ }
188
+ const systemPrompt = (prompt) => `
189
+ ### System instructions
190
+
191
+ ${prompt}
192
+
193
+ ### Tool calling instructions
194
+ - Make sure every message contains a tool call.
195
+ - When you use a tool, you may provide a brief thought or explanation in the content field
196
+ immediately before the tool_call. Do not split this into separate messages.
197
+ - Every reply must include a tool call.
198
+ `;
@@ -14,7 +14,10 @@
14
14
  * limitations under the License.
15
15
  */
16
16
  import type * as types from '../types';
17
- export declare function complete(conversation: types.Conversation, options: types.CompletionOptions): Promise<{
18
- result: types.AssistantMessage;
19
- usage: types.Usage;
20
- }>;
17
+ export declare class OpenAICompatible implements types.Provider {
18
+ readonly name: string;
19
+ complete(conversation: types.Conversation, options: types.CompletionOptions): Promise<{
20
+ result: types.AssistantMessage;
21
+ usage: types.Usage;
22
+ }>;
23
+ }
@@ -15,7 +15,14 @@
15
15
  * limitations under the License.
16
16
  */
17
17
  Object.defineProperty(exports, "__esModule", { value: true });
18
- exports.complete = complete;
18
+ exports.OpenAICompatible = void 0;
19
+ class OpenAICompatible {
20
+ name = 'openai-compatible';
21
+ async complete(conversation, options) {
22
+ return complete(conversation, options);
23
+ }
24
+ }
25
+ exports.OpenAICompatible = OpenAICompatible;
19
26
  async function complete(conversation, options) {
20
27
  // Convert generic messages to OpenAI format
21
28
  const systemMessage = {
@@ -14,4 +14,4 @@
14
14
  * limitations under the License.
15
15
  */
16
16
  import type * as types from '../types';
17
- export declare function getProvider(api: 'openai' | 'anthropic' | 'google'): types.Provider;
17
+ export declare function getProvider(api: 'openai' | 'openai-compatible' | 'anthropic' | 'google'): types.Provider;
@@ -19,9 +19,12 @@ exports.getProvider = getProvider;
19
19
  const anthropic_1 = require("./anthropic");
20
20
  const google_1 = require("./google");
21
21
  const openai_1 = require("./openai");
22
+ const openaiCompatible_1 = require("./openaiCompatible");
22
23
  function getProvider(api) {
23
24
  if (api === 'openai')
24
25
  return new openai_1.OpenAI();
26
+ if (api === 'openai-compatible')
27
+ return new openaiCompatible_1.OpenAICompatible();
25
28
  if (api === 'anthropic')
26
29
  return new anthropic_1.Anthropic();
27
30
  if (api === 'google')
package/lib/types.d.ts CHANGED
@@ -90,10 +90,9 @@ export type Conversation = {
90
90
  };
91
91
  export type Debug = (category: string) => (...args: any[]) => void;
92
92
  export type CompletionOptions = {
93
- api: 'openai' | 'anthropic' | 'google';
93
+ api: 'openai' | 'openai-compatible' | 'anthropic' | 'google';
94
94
  apiEndpoint?: string;
95
95
  apiKey: string;
96
- apiVersion?: string;
97
96
  model: string;
98
97
  maxTokens?: number;
99
98
  reasoning?: 'none' | 'medium' | 'high';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lowire/loop",
3
- "version": "0.0.14",
3
+ "version": "0.0.15",
4
4
  "description": "Small agentic loop",
5
5
  "repository": {
6
6
  "type": "git",
@@ -1,20 +0,0 @@
1
- /**
2
- * Copyright (c) Microsoft Corporation.
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
- import type * as types from '../types';
17
- export declare function complete(conversation: types.Conversation, options: types.CompletionOptions): Promise<{
18
- result: types.AssistantMessage;
19
- usage: types.Usage;
20
- }>;
@@ -1,191 +0,0 @@
1
- "use strict";
2
- /**
3
- * Copyright (c) Microsoft Corporation.
4
- *
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- */
17
- Object.defineProperty(exports, "__esModule", { value: true });
18
- exports.complete = complete;
19
- async function complete(conversation, options) {
20
- const inputItems = conversation.messages.map(toResponseInputItems).flat();
21
- const tools = conversation.tools.map(toOpenAIFunctionTool);
22
- const response = await create({
23
- model: options.model,
24
- temperature: options.temperature,
25
- input: inputItems,
26
- instructions: systemPrompt(conversation.systemPrompt),
27
- tools: tools.length > 0 ? tools : undefined,
28
- tool_choice: conversation.tools.length > 0 ? 'auto' : undefined,
29
- parallel_tool_calls: false,
30
- reasoning: toOpenAIReasoning(options.reasoning),
31
- }, options);
32
- // Parse response output items
33
- const result = { role: 'assistant', content: [] };
34
- for (const item of response.output) {
35
- if (item.type === 'message' && item.role === 'assistant') {
36
- result.openaiId = item.id;
37
- result.openaiStatus = item.status;
38
- for (const contentPart of item.content) {
39
- if (contentPart.type === 'output_text') {
40
- result.content.push({
41
- type: 'text',
42
- text: contentPart.text,
43
- });
44
- }
45
- }
46
- }
47
- else if (item.type === 'function_call') {
48
- // Add tool call
49
- result.content.push(toToolCall(item));
50
- }
51
- }
52
- const usage = {
53
- input: response.usage?.input_tokens ?? 0,
54
- output: response.usage?.output_tokens ?? 0,
55
- };
56
- return { result, usage };
57
- }
58
- async function create(createParams, options) {
59
- const headers = {
60
- 'Content-Type': 'application/json',
61
- 'Authorization': `Bearer ${options.apiKey}`,
62
- };
63
- const debugBody = { ...createParams, tools: `${createParams.tools?.length ?? 0} tools` };
64
- options.debug?.('lowire:openai-responses')('Request:', JSON.stringify(debugBody, null, 2));
65
- const response = await fetch(options.apiEndpoint ?? `https://api.openai.com/v1/responses`, {
66
- method: 'POST',
67
- headers,
68
- body: JSON.stringify(createParams)
69
- });
70
- if (!response.ok) {
71
- options.debug?.('lowire:openai-responses')('Response:', response.status);
72
- throw new Error(`API error: ${response.status} ${response.statusText} ${await response.text()}`);
73
- }
74
- const responseBody = await response.json();
75
- options.debug?.('lowire:openai-responses')('Response:', JSON.stringify(responseBody, null, 2));
76
- return responseBody;
77
- }
78
- function toResultContentPart(part) {
79
- if (part.type === 'text') {
80
- return {
81
- type: 'input_text',
82
- text: part.text,
83
- };
84
- }
85
- if (part.type === 'image') {
86
- return {
87
- type: 'input_image',
88
- image_url: `data:${part.mimeType};base64,${part.data}`,
89
- detail: 'auto',
90
- };
91
- }
92
- throw new Error(`Cannot convert content part of type ${part.type} to response content part`);
93
- }
94
- function toResponseInputItems(message) {
95
- if (message.role === 'user') {
96
- return [{
97
- type: 'message',
98
- role: 'user',
99
- content: message.content
100
- }];
101
- }
102
- if (message.role === 'assistant') {
103
- const textParts = message.content.filter(part => part.type === 'text');
104
- const toolCallParts = message.content.filter(part => part.type === 'tool_call');
105
- const items = [];
106
- // Add assistant message with text content
107
- if (textParts.length > 0) {
108
- const outputMessage = {
109
- id: message.openaiId,
110
- status: message.openaiStatus,
111
- type: 'message',
112
- role: 'assistant',
113
- content: textParts.map(part => ({
114
- type: 'output_text',
115
- text: part.text,
116
- annotations: [],
117
- logprobs: []
118
- }))
119
- };
120
- items.push(outputMessage);
121
- }
122
- if (message.toolError) {
123
- items.push({
124
- type: 'message',
125
- role: 'user',
126
- content: message.toolError
127
- });
128
- }
129
- items.push(...toolCallParts.map(toFunctionToolCall).flat());
130
- return items;
131
- }
132
- throw new Error(`Unsupported message role: ${message.role}`);
133
- }
134
- function toOpenAIFunctionTool(tool) {
135
- return {
136
- type: 'function',
137
- name: tool.name,
138
- description: tool.description ?? null,
139
- parameters: tool.inputSchema,
140
- strict: null,
141
- };
142
- }
143
- function toFunctionToolCall(toolCall) {
144
- const result = [{
145
- type: 'function_call',
146
- call_id: toolCall.id,
147
- name: toolCall.name,
148
- arguments: JSON.stringify(toolCall.arguments),
149
- id: toolCall.openaiId,
150
- status: toolCall.openaiStatus,
151
- }];
152
- if (toolCall.result) {
153
- result.push({
154
- type: 'function_call_output',
155
- call_id: toolCall.id,
156
- output: toolCall.result.content.map(toResultContentPart),
157
- });
158
- }
159
- return result;
160
- }
161
- function toToolCall(functionCall) {
162
- return {
163
- type: 'tool_call',
164
- name: functionCall.name,
165
- arguments: JSON.parse(functionCall.arguments),
166
- id: functionCall.call_id,
167
- openaiId: functionCall.id,
168
- openaiStatus: functionCall.status,
169
- };
170
- }
171
- function toOpenAIReasoning(reasoning) {
172
- switch (reasoning) {
173
- case 'none':
174
- return { effort: 'none' };
175
- case 'medium':
176
- return { effort: 'medium' };
177
- case 'high':
178
- return { effort: 'high' };
179
- }
180
- }
181
- const systemPrompt = (prompt) => `
182
- ### System instructions
183
-
184
- ${prompt}
185
-
186
- ### Tool calling instructions
187
- - Make sure every message contains a tool call.
188
- - When you use a tool, you may provide a brief thought or explanation in the content field
189
- immediately before the tool_call. Do not split this into separate messages.
190
- - Every reply must include a tool call.
191
- `;