lynkr 4.1.0 → 4.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ /**
2
+ * OpenAI Responses API ↔ Chat Completions API Conversion
3
+ *
4
+ * The Responses API is OpenAI's new format that uses 'input' instead of 'messages'.
5
+ * This module converts between the two formats for compatibility.
6
+ *
7
+ * @module clients/responses-format
8
+ */
9
+
10
+ const logger = require("../logger");
11
+
12
+ /**
13
+ * Convert Responses API request to Chat Completions format
14
+ * @param {Object} responsesRequest - Responses API format request
15
+ * @returns {Object} Chat Completions format request
16
+ */
17
+ function convertResponsesToChat(responsesRequest) {
18
+ const { input, model, max_tokens, temperature, top_p, tools, tool_choice, stream } = responsesRequest;
19
+
20
+ logger.info({
21
+ inputType: typeof input,
22
+ inputIsArray: Array.isArray(input),
23
+ inputLength: Array.isArray(input) ? input.length : input?.length || 0,
24
+ model,
25
+ hasTools: !!tools
26
+ }, "Converting Responses API to Chat Completions");
27
+
28
+ // Handle input as either string or array of messages
29
+ let messages;
30
+
31
+ if (typeof input === 'string') {
32
+ // Simple string input - convert to user message
33
+ messages = [{ role: "user", content: input }];
34
+ logger.info({ messageCount: 1 }, "Converted string input to single user message");
35
+
36
+ } else if (Array.isArray(input)) {
37
+ // Array of messages - validate and clean each message
38
+ logger.info({
39
+ rawInputSample: input.slice(0, 3).map(m => ({
40
+ role: m?.role,
41
+ hasContent: !!m?.content,
42
+ contentType: typeof m?.content,
43
+ contentLength: m?.content?.length || 0,
44
+ hasToolCalls: !!m?.tool_calls,
45
+ hasToolCallId: !!m?.tool_call_id,
46
+ allKeys: m ? Object.keys(m) : []
47
+ }))
48
+ }, "Processing Responses API message array");
49
+
50
+ messages = input
51
+ .filter(msg => {
52
+ // Keep messages that have valid role and either content or tool_calls
53
+ const isValid = msg &&
54
+ msg.role &&
55
+ (msg.content || msg.tool_calls || msg.tool_call_id);
56
+
57
+ if (!isValid) {
58
+ logger.warn({
59
+ msg: msg ? {role: msg.role, hasContent: !!msg.content, keys: Object.keys(msg)} : null
60
+ }, "Filtering out invalid message");
61
+ }
62
+
63
+ return isValid;
64
+ })
65
+ .map(msg => {
66
+ // Clean up message structure - only keep valid OpenAI Chat Completions fields
67
+ let content = msg.content || null;
68
+
69
+ // Handle content that's an array of content parts (multimodal format)
70
+ // OpenAI accepts both: string OR array of {type, text/image_url}
71
+ // If it's an array with input_text/text types, extract the text
72
+ if (Array.isArray(content)) {
73
+ // Extract text from array of content parts
74
+ const textParts = content
75
+ .filter(part => part && (part.type === 'text' || part.type === 'input_text'))
76
+ .map(part => part.text || part.input_text || '')
77
+ .filter(text => text.length > 0);
78
+
79
+ if (textParts.length > 0) {
80
+ // Combine all text parts into a single string
81
+ content = textParts.join('\n\n');
82
+ logger.info({
83
+ originalPartCount: content.length,
84
+ extractedTextLength: content.length,
85
+ sample: content.substring(0, 100)
86
+ }, "Converted multimodal content array to string");
87
+ } else {
88
+ // No text found, keep as array (might be image-only)
89
+ content = content;
90
+ }
91
+ }
92
+
93
+ const cleaned = {
94
+ role: msg.role,
95
+ content: content
96
+ };
97
+
98
+ // Add optional fields if present
99
+ if (msg.name) cleaned.name = msg.name;
100
+ if (msg.tool_calls) cleaned.tool_calls = msg.tool_calls;
101
+ if (msg.tool_call_id) cleaned.tool_call_id = msg.tool_call_id;
102
+
103
+ return cleaned;
104
+ });
105
+
106
+ logger.info({
107
+ originalCount: input.length,
108
+ filteredCount: messages.length,
109
+ messageRoles: messages.map(m => m.role),
110
+ sample: messages.slice(0, 2).map(m => ({
111
+ role: m.role,
112
+ contentType: typeof m.content,
113
+ contentIsArray: Array.isArray(m.content),
114
+ contentPreview: typeof m.content === 'string' ? m.content.substring(0, 50) : (Array.isArray(m.content) ? `[Array:${m.content.length}]` : m.content),
115
+ hasToolCalls: !!m.tool_calls
116
+ }))
117
+ }, "Converted and cleaned Responses API message array");
118
+
119
+ // Debug: Log ALL messages to see what's actually being returned
120
+ logger.info({
121
+ allMessagesDetailed: messages.map((m, idx) => ({
122
+ index: idx,
123
+ role: m.role,
124
+ contentType: typeof m.content,
125
+ contentLength: typeof m.content === 'string' ? m.content.length : (Array.isArray(m.content) ? m.content.length : 'N/A'),
126
+ contentSample: typeof m.content === 'string' ? m.content.substring(0, 100) : JSON.stringify(m.content).substring(0, 100)
127
+ }))
128
+ }, "ALL MESSAGES AFTER CONVERSION");
129
+
130
+ // Validate we have at least one message
131
+ if (messages.length === 0) {
132
+ logger.error({ originalInput: input }, "All messages filtered out - no valid messages remaining");
133
+ throw new Error("Responses API: No valid messages after filtering. All messages were invalid.");
134
+ }
135
+
136
+ } else {
137
+ // Fallback for unexpected format
138
+ logger.warn({
139
+ inputType: typeof input,
140
+ input: input
141
+ }, "Unexpected input format in Responses API");
142
+ messages = [{ role: "user", content: String(input || "") }];
143
+ }
144
+
145
+ const result = {
146
+ model: model || "gpt-4o",
147
+ messages: messages,
148
+ max_tokens: max_tokens || 4096,
149
+ temperature: temperature,
150
+ top_p: top_p,
151
+ tools: tools,
152
+ tool_choice: tool_choice,
153
+ stream: stream || false
154
+ };
155
+
156
+ logger.info({
157
+ resultMessageCount: messages.length,
158
+ resultHasTools: !!result.tools,
159
+ resultStream: result.stream
160
+ }, "Responses to Chat conversion complete");
161
+
162
+ return result;
163
+ }
164
+
165
+ /**
166
+ * Convert Chat Completions response to Responses API format
167
+ * @param {Object} chatResponse - Chat Completions format response
168
+ * @returns {Object} Responses API format response
169
+ */
170
+ function convertChatToResponses(chatResponse) {
171
+ logger.debug({
172
+ hasContent: !!chatResponse.choices?.[0]?.message?.content,
173
+ finishReason: chatResponse.choices?.[0]?.finish_reason
174
+ }, "Converting Chat Completions to Responses API");
175
+
176
+ const message = chatResponse.choices[0].message;
177
+
178
+ // Extract content and tool calls
179
+ const content = message.content || "";
180
+ const toolCalls = message.tool_calls || [];
181
+
182
+ return {
183
+ id: chatResponse.id,
184
+ object: "response",
185
+ created: chatResponse.created,
186
+ model: chatResponse.model,
187
+ content: content,
188
+ tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
189
+ stop_reason: mapFinishReason(chatResponse.choices[0].finish_reason),
190
+ usage: chatResponse.usage
191
+ };
192
+ }
193
+
194
+ /**
195
+ * Map Chat Completions finish_reason to Responses API stop_reason
196
+ * @param {string} finishReason - Chat Completions finish reason
197
+ * @returns {string} Responses API stop reason
198
+ */
199
+ function mapFinishReason(finishReason) {
200
+ const mapping = {
201
+ "stop": "end_turn",
202
+ "length": "max_tokens",
203
+ "tool_calls": "tool_use",
204
+ "content_filter": "content_filter"
205
+ };
206
+
207
+ return mapping[finishReason] || "end_turn";
208
+ }
209
+
210
+ module.exports = {
211
+ convertResponsesToChat,
212
+ convertChatToResponses,
213
+ mapFinishReason
214
+ };
@@ -12,7 +12,7 @@ const STANDARD_TOOLS = [
12
12
  properties: {
13
13
  file_path: {
14
14
  type: "string",
15
- description: "The absolute path to the file to write (must be absolute, not relative)"
15
+ description: "Relative path within workspace (e.g., 'hello.cpp', 'src/main.py'). DO NOT use absolute paths."
16
16
  },
17
17
  content: {
18
18
  type: "string",
@@ -30,7 +30,7 @@ const STANDARD_TOOLS = [
30
30
  properties: {
31
31
  file_path: {
32
32
  type: "string",
33
- description: "The absolute path to the file to read"
33
+ description: "Relative path within workspace (e.g., 'config.js', 'src/index.ts'). DO NOT use absolute paths."
34
34
  },
35
35
  limit: {
36
36
  type: "number",
@@ -52,7 +52,7 @@ const STANDARD_TOOLS = [
52
52
  properties: {
53
53
  file_path: {
54
54
  type: "string",
55
- description: "The absolute path to the file to modify"
55
+ description: "Relative path within workspace (e.g., 'app.py', 'src/utils.js'). DO NOT use absolute paths."
56
56
  },
57
57
  old_string: {
58
58
  type: "string",
@@ -312,7 +312,7 @@ const STANDARD_TOOLS = [
312
312
  properties: {
313
313
  notebook_path: {
314
314
  type: "string",
315
- description: "The absolute path to the Jupyter notebook file to edit"
315
+ description: "Relative path to the Jupyter notebook within workspace (e.g., 'analysis.ipynb', 'notebooks/data.ipynb'). DO NOT use absolute paths."
316
316
  },
317
317
  new_source: {
318
318
  type: "string",
@@ -1052,6 +1052,38 @@ function sanitizePayload(payload) {
1052
1052
  }
1053
1053
  }
1054
1054
 
1055
+ // FIX: Prevent consecutive messages with the same role (causes llama.cpp 400 error)
1056
+ if (Array.isArray(clean.messages) && clean.messages.length > 0) {
1057
+ const deduplicated = [];
1058
+ let lastRole = null;
1059
+
1060
+ for (const msg of clean.messages) {
1061
+ // Skip if this message has the same role as the previous one
1062
+ if (msg.role === lastRole) {
1063
+ logger.debug({
1064
+ skippedRole: msg.role,
1065
+ contentPreview: typeof msg.content === 'string'
1066
+ ? msg.content.substring(0, 50)
1067
+ : JSON.stringify(msg.content).substring(0, 50)
1068
+ }, 'Skipping duplicate consecutive message with same role');
1069
+ continue;
1070
+ }
1071
+
1072
+ deduplicated.push(msg);
1073
+ lastRole = msg.role;
1074
+ }
1075
+
1076
+ if (deduplicated.length !== clean.messages.length) {
1077
+ logger.info({
1078
+ originalCount: clean.messages.length,
1079
+ deduplicatedCount: deduplicated.length,
1080
+ removed: clean.messages.length - deduplicated.length
1081
+ }, 'Removed consecutive duplicate roles from message sequence');
1082
+ }
1083
+
1084
+ clean.messages = deduplicated;
1085
+ }
1086
+
1055
1087
  return clean;
1056
1088
  }
1057
1089