@proteinjs/conversation 2.7.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/CHANGELOG.md +24 -0
  2. package/LICENSE +21 -0
  3. package/dist/index.d.ts +3 -0
  4. package/dist/index.d.ts.map +1 -1
  5. package/dist/index.js +3 -0
  6. package/dist/index.js.map +1 -1
  7. package/dist/src/CodegenConversation.js +1 -1
  8. package/dist/src/CodegenConversation.js.map +1 -1
  9. package/dist/src/Conversation.d.ts +173 -99
  10. package/dist/src/Conversation.d.ts.map +1 -1
  11. package/dist/src/Conversation.js +903 -502
  12. package/dist/src/Conversation.js.map +1 -1
  13. package/dist/src/OpenAi.d.ts +20 -0
  14. package/dist/src/OpenAi.d.ts.map +1 -1
  15. package/dist/src/OpenAi.js +16 -0
  16. package/dist/src/OpenAi.js.map +1 -1
  17. package/dist/src/OpenAiStreamProcessor.d.ts +9 -3
  18. package/dist/src/OpenAiStreamProcessor.d.ts.map +1 -1
  19. package/dist/src/OpenAiStreamProcessor.js +5 -3
  20. package/dist/src/OpenAiStreamProcessor.js.map +1 -1
  21. package/dist/src/UsageData.d.ts.map +1 -1
  22. package/dist/src/UsageData.js +22 -0
  23. package/dist/src/UsageData.js.map +1 -1
  24. package/dist/src/code_template/Code.d.ts.map +1 -1
  25. package/dist/src/code_template/Code.js +8 -2
  26. package/dist/src/code_template/Code.js.map +1 -1
  27. package/dist/src/resolveModel.d.ts +17 -0
  28. package/dist/src/resolveModel.d.ts.map +1 -0
  29. package/dist/src/resolveModel.js +121 -0
  30. package/dist/src/resolveModel.js.map +1 -0
  31. package/dist/test/conversation/conversation.generateObject.test.d.ts +2 -0
  32. package/dist/test/conversation/conversation.generateObject.test.d.ts.map +1 -0
  33. package/dist/test/conversation/conversation.generateObject.test.js +153 -0
  34. package/dist/test/conversation/conversation.generateObject.test.js.map +1 -0
  35. package/dist/test/conversation/conversation.generateResponse.test.d.ts +2 -0
  36. package/dist/test/conversation/conversation.generateResponse.test.d.ts.map +1 -0
  37. package/dist/test/conversation/conversation.generateResponse.test.js +167 -0
  38. package/dist/test/conversation/conversation.generateResponse.test.js.map +1 -0
  39. package/dist/test/conversation/conversation.generateStream.test.d.ts +2 -0
  40. package/dist/test/conversation/conversation.generateStream.test.d.ts.map +1 -0
  41. package/dist/test/conversation/conversation.generateStream.test.js +255 -0
  42. package/dist/test/conversation/conversation.generateStream.test.js.map +1 -0
  43. package/index.ts +5 -0
  44. package/package.json +7 -2
  45. package/src/CodegenConversation.ts +1 -1
  46. package/src/Conversation.ts +938 -496
  47. package/src/OpenAi.ts +20 -0
  48. package/src/OpenAiStreamProcessor.ts +9 -3
  49. package/src/UsageData.ts +25 -0
  50. package/src/code_template/Code.ts +5 -1
  51. package/src/resolveModel.ts +130 -0
  52. package/test/conversation/conversation.generateObject.test.ts +132 -0
  53. package/test/conversation/conversation.generateResponse.test.ts +132 -0
  54. package/test/conversation/conversation.generateStream.test.ts +173 -0
@@ -46,16 +46,24 @@ var __generator = (this && this.__generator) || function (thisArg, body) {
46
46
  if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
47
47
  }
48
48
  };
49
- var __rest = (this && this.__rest) || function (s, e) {
50
- var t = {};
51
- for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
52
- t[p] = s[p];
53
- if (s != null && typeof Object.getOwnPropertySymbols === "function")
54
- for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
55
- if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
56
- t[p[i]] = s[p[i]];
57
- }
58
- return t;
49
+ var __await = (this && this.__await) || function (v) { return this instanceof __await ? (this.v = v, this) : new __await(v); }
50
+ var __asyncGenerator = (this && this.__asyncGenerator) || function (thisArg, _arguments, generator) {
51
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
52
+ var g = generator.apply(thisArg, _arguments || []), i, q = [];
53
+ return i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i;
54
+ function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }
55
+ function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }
56
+ function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }
57
+ function fulfill(value) { resume("next", value); }
58
+ function reject(value) { resume("throw", value); }
59
+ function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }
60
+ };
61
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
62
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
63
+ var m = o[Symbol.asyncIterator], i;
64
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
65
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
66
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
59
67
  };
60
68
  var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
61
69
  if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
@@ -67,54 +75,332 @@ var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
67
75
  return to.concat(ar || Array.prototype.slice.call(from));
68
76
  };
69
77
  Object.defineProperty(exports, "__esModule", { value: true });
70
- exports.summarizeConversationHistoryFunction = exports.summarizeConversationHistoryFunctionName = exports.Conversation = void 0;
71
- var OpenAi_1 = require("./OpenAi");
72
- var MessageHistory_1 = require("./history/MessageHistory");
78
+ exports.Conversation = void 0;
79
+ var ai_1 = require("ai");
73
80
  var logger_1 = require("@proteinjs/logger");
74
- var util_node_1 = require("@proteinjs/util-node");
75
- var tiktoken_1 = require("tiktoken");
76
- var PackageFunctions_1 = require("./fs/package/PackageFunctions");
81
+ var MessageHistory_1 = require("./history/MessageHistory");
77
82
  var UsageData_1 = require("./UsageData");
78
- var ai_1 = require("ai");
83
+ var resolveModel_1 = require("./resolveModel");
84
+ // ────────────────────────────────────────────────────────────────
85
+ // Default constants
86
+ // ────────────────────────────────────────────────────────────────
87
+ var DEFAULT_MODEL = 'gpt-4o';
88
+ var DEFAULT_TOKEN_LIMIT = 50000;
89
+ // ────────────────────────────────────────────────────────────────
90
+ // Conversation class
91
+ // ────────────────────────────────────────────────────────────────
79
92
  var Conversation = /** @class */ (function () {
80
93
  function Conversation(params) {
81
94
  var _a, _b, _c, _d;
82
- this.tokenLimit = 50000;
83
95
  this.systemMessages = [];
84
96
  this.functions = [];
85
97
  this.messageModerators = [];
86
- this.generatedCode = false;
87
- this.generatedList = false;
88
98
  this.modulesProcessed = false;
89
99
  this.processingModulesPromise = null;
90
100
  this.params = params;
101
+ this.tokenLimit = (_b = (_a = params.limits) === null || _a === void 0 ? void 0 : _a.tokenLimit) !== null && _b !== void 0 ? _b : DEFAULT_TOKEN_LIMIT;
91
102
  this.history = new MessageHistory_1.MessageHistory({
92
- maxMessages: (_a = params.limits) === null || _a === void 0 ? void 0 : _a.maxMessagesInHistory,
93
- enforceMessageLimit: (_b = params.limits) === null || _b === void 0 ? void 0 : _b.enforceLimits,
103
+ maxMessages: (_c = params.limits) === null || _c === void 0 ? void 0 : _c.maxMessagesInHistory,
104
+ enforceMessageLimit: (_d = params.limits) === null || _d === void 0 ? void 0 : _d.enforceLimits,
94
105
  });
95
106
  this.logger = new logger_1.Logger({ name: params.name, logLevel: params.logLevel });
96
- if ((_c = params === null || params === void 0 ? void 0 : params.limits) === null || _c === void 0 ? void 0 : _c.enforceLimits) {
97
- this.addFunctions('Conversation', [(0, exports.summarizeConversationHistoryFunction)(this)]);
107
+ }
108
+ // ────────────────────────────────────────────────────────────
109
+ // Public API
110
+ // ────────────────────────────────────────────────────────────
111
+ /**
112
+ * Stream a text response from the model.
113
+ *
114
+ * Returns a `StreamResult` with async iterables for text and reasoning chunks,
115
+ * plus promises that resolve when generation completes.
116
+ *
117
+ * For OpenAI models with high reasoning effort or pro models, this may
118
+ * fall back to background/polling mode via `OpenAiResponses` and return
119
+ * the full result as a single-chunk stream.
120
+ */
121
+ Conversation.prototype.generateStream = function (params) {
122
+ var _a, _b, _c, _d;
123
+ return __awaiter(this, void 0, void 0, function () {
124
+ var model, modelString, provider, messages, system, nonSystem, allFunctions, tools, providerOptions, result, usagePromise, toolInvocationsPromise, safeText, safeReasoning, safeSources, safeUsage, safeToolInvocations;
125
+ return __generator(this, function (_e) {
126
+ switch (_e.label) {
127
+ case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
128
+ case 1:
129
+ _e.sent();
130
+ model = this.resolveModelInstance(params.model);
131
+ modelString = this.getModelString(params.model);
132
+ provider = (0, resolveModel_1.inferProvider)((_b = (_a = params.model) !== null && _a !== void 0 ? _a : this.params.defaultModel) !== null && _b !== void 0 ? _b : DEFAULT_MODEL);
133
+ this.logger.info({
134
+ message: "generateStream",
135
+ obj: { model: modelString, provider: provider, reasoningEffort: params.reasoningEffort, webSearch: params.webSearch },
136
+ });
137
+ // Check if we should use background/polling mode (OpenAI-specific)
138
+ if (provider === 'openai' && this.shouldUseBackgroundMode(modelString, params)) {
139
+ return [2 /*return*/, this.generateStreamViaPolling(params, modelString)];
140
+ }
141
+ messages = this.buildAiSdkMessages(params.messages);
142
+ // Google requires all system messages at the beginning of the conversation.
143
+ // Reorder so system messages come first, preserving relative order within
144
+ // each group.
145
+ if (provider === 'google') {
146
+ system = messages.filter(function (m) { return m.role === 'system'; });
147
+ nonSystem = messages.filter(function (m) { return m.role !== 'system'; });
148
+ messages = __spreadArray(__spreadArray([], system, true), nonSystem, true);
149
+ }
150
+ allFunctions = __spreadArray(__spreadArray([], this.functions, true), ((_c = params.tools) !== null && _c !== void 0 ? _c : []), true);
151
+ tools = this.buildAiSdkTools(allFunctions);
152
+ providerOptions = this.buildProviderOptions(provider, params, modelString);
153
+ result = (0, ai_1.streamText)(__assign({ model: model, messages: messages, tools: Object.keys(tools).length > 0 ? tools : undefined, stopWhen: (0, ai_1.stepCountIs)((_d = params.maxToolCalls) !== null && _d !== void 0 ? _d : 50), abortSignal: params.abortSignal, providerOptions: providerOptions }, (params.webSearch && provider === 'openai' ? { toolChoice: 'auto' } : {})));
154
+ usagePromise = this.buildUsagePromise(result, modelString, params);
155
+ toolInvocationsPromise = this.buildToolInvocationsPromise(result);
156
+ safeText = Promise.resolve(result.text).catch(function () { return ''; });
157
+ safeReasoning = Promise.resolve(result.reasoning)
158
+ .then(function (parts) {
159
+ return parts
160
+ ? parts
161
+ .filter(function (part) { return part.type === 'reasoning'; })
162
+ .map(function (part) { return part.text; })
163
+ .join('')
164
+ : '';
165
+ })
166
+ .catch(function () { return ''; });
167
+ safeSources = Promise.resolve(result.sources)
168
+ .then(function (s) {
169
+ return (s !== null && s !== void 0 ? s : []).map(function (source) { return ({
170
+ url: source.sourceType === 'url' ? source.url : undefined,
171
+ title: source.sourceType === 'url' ? source.title : undefined,
172
+ }); });
173
+ })
174
+ .catch(function () { return []; });
175
+ safeUsage = usagePromise.catch(function () {
176
+ return ({
177
+ model: modelString,
178
+ initialRequestTokenUsage: {
179
+ inputTokens: 0,
180
+ cachedInputTokens: 0,
181
+ reasoningTokens: 0,
182
+ outputTokens: 0,
183
+ totalTokens: 0,
184
+ },
185
+ initialRequestCostUsd: { inputUsd: 0, cachedInputUsd: 0, reasoningUsd: 0, outputUsd: 0, totalUsd: 0 },
186
+ totalTokenUsage: {
187
+ inputTokens: 0,
188
+ cachedInputTokens: 0,
189
+ reasoningTokens: 0,
190
+ outputTokens: 0,
191
+ totalTokens: 0,
192
+ },
193
+ totalCostUsd: { inputUsd: 0, cachedInputUsd: 0, reasoningUsd: 0, outputUsd: 0, totalUsd: 0 },
194
+ totalRequestsToAssistant: 0,
195
+ callsPerTool: {},
196
+ totalToolCalls: 0,
197
+ });
198
+ });
199
+ safeToolInvocations = toolInvocationsPromise.catch(function () { return []; });
200
+ // Catch remaining AI SDK promises that are rejected by NoOutputGeneratedError
201
+ // on flush when the stream is aborted before any output. The AI SDK's internal
202
+ // flush rejects _finishReason, _rawFinishReason, _totalUsage, and _steps.
203
+ // We already catch totalUsage and steps above; these catch the rest so the
204
+ // unhandled rejections don't crash the Node process.
205
+ Promise.resolve(result.finishReason).catch(function () { });
206
+ Promise.resolve(result.rawFinishReason).catch(function () { });
207
+ Promise.resolve(result.response).catch(function () { });
208
+ return [2 /*return*/, {
209
+ textStream: result.textStream,
210
+ reasoningStream: (function () {
211
+ return __asyncGenerator(this, arguments, function () {
212
+ return __generator(this, function (_a) {
213
+ return [2 /*return*/];
214
+ });
215
+ });
216
+ })(),
217
+ fullStream: this.mapFullStream(result.fullStream),
218
+ text: safeText,
219
+ reasoning: safeReasoning,
220
+ sources: safeSources,
221
+ usage: safeUsage,
222
+ toolInvocations: safeToolInvocations,
223
+ }];
224
+ }
225
+ });
226
+ });
227
+ };
228
+ /**
229
+ * Generate a strongly-typed structured object from the model.
230
+ *
231
+ * This is promise-based (not streaming-first) to guarantee the
232
+ * type contract. Reasoning is available on the result after completion.
233
+ *
234
+ * For OpenAI models with high reasoning or pro models, this uses
235
+ * `OpenAiResponses` with background/polling mode.
236
+ */
237
+ Conversation.prototype.generateObject = function (params) {
238
+ var _a, _b, _c;
239
+ return __awaiter(this, void 0, void 0, function () {
240
+ var model, modelString, provider, messages, system, nonSystem, isZod, normalizedSchema, result, toRecord, usage, reasoning;
241
+ var _this = this;
242
+ return __generator(this, function (_d) {
243
+ switch (_d.label) {
244
+ case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
245
+ case 1:
246
+ _d.sent();
247
+ model = this.resolveModelInstance(params.model);
248
+ modelString = this.getModelString(params.model);
249
+ provider = (0, resolveModel_1.inferProvider)((_b = (_a = params.model) !== null && _a !== void 0 ? _a : this.params.defaultModel) !== null && _b !== void 0 ? _b : DEFAULT_MODEL);
250
+ // Check if we should use background/polling mode (OpenAI-specific)
251
+ if (provider === 'openai' && this.shouldUseBackgroundMode(modelString, params)) {
252
+ return [2 /*return*/, this.generateObjectViaPolling(params, modelString)];
253
+ }
254
+ messages = this.buildAiSdkMessages(params.messages);
255
+ // Google requires all system messages at the beginning
256
+ if (provider === 'google') {
257
+ system = messages.filter(function (m) { return m.role === 'system'; });
258
+ nonSystem = messages.filter(function (m) { return m.role !== 'system'; });
259
+ messages = __spreadArray(__spreadArray([], system, true), nonSystem, true);
260
+ }
261
+ isZod = this.isZodSchema(params.schema);
262
+ normalizedSchema = isZod ? params.schema : (0, ai_1.jsonSchema)(this.strictifyJsonSchema(params.schema));
263
+ return [4 /*yield*/, (0, ai_1.generateObject)({
264
+ model: model,
265
+ messages: messages,
266
+ schema: normalizedSchema,
267
+ abortSignal: params.abortSignal,
268
+ maxOutputTokens: params.maxTokens,
269
+ temperature: params.temperature,
270
+ topP: params.topP,
271
+ providerOptions: this.buildProviderOptions(provider, params, modelString),
272
+ experimental_repairText: (function (_a) {
273
+ var text = _a.text;
274
+ return __awaiter(_this, void 0, void 0, function () {
275
+ var cleaned;
276
+ return __generator(this, function (_b) {
277
+ cleaned = String(text !== null && text !== void 0 ? text : '')
278
+ .trim()
279
+ .replace(/^```(?:json)?/i, '')
280
+ .replace(/```$/, '');
281
+ try {
282
+ JSON.parse(cleaned);
283
+ return [2 /*return*/, cleaned];
284
+ }
285
+ catch (_c) {
286
+ return [2 /*return*/, null];
287
+ }
288
+ return [2 /*return*/];
289
+ });
290
+ });
291
+ }),
292
+ })];
293
+ case 2:
294
+ result = _d.sent();
295
+ // Record in history
296
+ if (params.recordInHistory !== false) {
297
+ try {
298
+ toRecord = typeof (result === null || result === void 0 ? void 0 : result.object) === 'object' ? JSON.stringify(result.object) : '';
299
+ if (toRecord) {
300
+ this.addAssistantMessagesToHistory([toRecord]);
301
+ }
302
+ }
303
+ catch (_e) {
304
+ /* ignore */
305
+ }
306
+ }
307
+ usage = this.processAiSdkUsage(result, modelString);
308
+ if (!params.onUsageData) return [3 /*break*/, 4];
309
+ return [4 /*yield*/, params.onUsageData(usage)];
310
+ case 3:
311
+ _d.sent();
312
+ _d.label = 4;
313
+ case 4:
314
+ reasoning = this.extractReasoningFromResult(result);
315
+ return [2 /*return*/, {
316
+ object: ((_c = result === null || result === void 0 ? void 0 : result.object) !== null && _c !== void 0 ? _c : {}),
317
+ usage: usage,
318
+ reasoning: reasoning || undefined,
319
+ toolInvocations: [],
320
+ }];
321
+ }
322
+ });
323
+ });
324
+ };
325
+ /**
326
+ * Non-streaming convenience: generates a text response and waits for completion.
327
+ */
328
+ Conversation.prototype.generateResponse = function (params) {
329
+ return __awaiter(this, void 0, void 0, function () {
330
+ var stream, _a, text, reasoning, sources, usage, toolInvocations;
331
+ return __generator(this, function (_b) {
332
+ switch (_b.label) {
333
+ case 0: return [4 /*yield*/, this.generateStream(params)];
334
+ case 1:
335
+ stream = _b.sent();
336
+ return [4 /*yield*/, Promise.all([
337
+ stream.text,
338
+ stream.reasoning,
339
+ stream.sources,
340
+ stream.usage,
341
+ stream.toolInvocations,
342
+ ])];
343
+ case 2:
344
+ _a = _b.sent(), text = _a[0], reasoning = _a[1], sources = _a[2], usage = _a[3], toolInvocations = _a[4];
345
+ return [2 /*return*/, { text: text, reasoning: reasoning || undefined, sources: sources, usage: usage, toolInvocations: toolInvocations }];
346
+ }
347
+ });
348
+ });
349
+ };
350
+ // ────────────────────────────────────────────────────────────
351
+ // History management (public, for callers like ThoughtConversation)
352
+ // ────────────────────────────────────────────────────────────
353
+ Conversation.prototype.addSystemMessagesToHistory = function (messages, unshift) {
354
+ if (unshift === void 0) { unshift = false; }
355
+ var formatted = messages.map(function (m) { return ({ role: 'system', content: m }); });
356
+ this.addMessagesToHistory(formatted, unshift);
357
+ };
358
+ Conversation.prototype.addAssistantMessagesToHistory = function (messages, unshift) {
359
+ if (unshift === void 0) { unshift = false; }
360
+ var formatted = messages.map(function (m) { return ({ role: 'assistant', content: m }); });
361
+ this.addMessagesToHistory(formatted, unshift);
362
+ };
363
+ Conversation.prototype.addUserMessagesToHistory = function (messages, unshift) {
364
+ if (unshift === void 0) { unshift = false; }
365
+ var formatted = messages.map(function (m) { return ({ role: 'user', content: m }); });
366
+ this.addMessagesToHistory(formatted, unshift);
367
+ };
368
+ Conversation.prototype.addMessagesToHistory = function (messages, unshift) {
369
+ var _a, _b, _c;
370
+ if (unshift === void 0) { unshift = false; }
371
+ // Convert to the format MessageHistory expects (ChatCompletionMessageParam-like)
372
+ var historyMessages = messages.map(function (m) {
373
+ if (typeof m === 'string') {
374
+ return { role: 'user', content: m };
375
+ }
376
+ return m;
377
+ });
378
+ var systemMsgs = historyMessages.filter(function (m) { return m.role === 'system'; });
379
+ if (unshift) {
380
+ (_a = this.history.getMessages()).unshift.apply(_a, historyMessages);
381
+ this.history.prune();
382
+ (_b = this.systemMessages).unshift.apply(_b, systemMsgs);
98
383
  }
99
- if ((_d = params.limits) === null || _d === void 0 ? void 0 : _d.tokenLimit) {
100
- this.tokenLimit = params.limits.tokenLimit;
384
+ else {
385
+ this.history.push(historyMessages);
386
+ (_c = this.systemMessages).push.apply(_c, systemMsgs);
101
387
  }
102
- }
388
+ };
389
+ // ────────────────────────────────────────────────────────────
390
+ // Module system
391
+ // ────────────────────────────────────────────────────────────
103
392
  Conversation.prototype.ensureModulesProcessed = function () {
104
393
  return __awaiter(this, void 0, void 0, function () {
105
394
  var error_1;
106
395
  return __generator(this, function (_a) {
107
396
  switch (_a.label) {
108
397
  case 0:
109
- // If modules are already processed, return immediately
110
398
  if (this.modulesProcessed) {
111
399
  return [2 /*return*/];
112
400
  }
113
- // If modules are currently being processed, wait for that to complete
114
401
  if (this.processingModulesPromise) {
115
402
  return [2 /*return*/, this.processingModulesPromise];
116
403
  }
117
- // Start processing modules and keep a reference to the promise
118
404
  this.processingModulesPromise = this.processModules();
119
405
  _a.label = 1;
120
406
  case 1:
@@ -127,7 +413,6 @@ var Conversation = /** @class */ (function () {
127
413
  case 3:
128
414
  error_1 = _a.sent();
129
415
  this.logger.error({ message: 'Error processing modules', obj: { error: error_1 } });
130
- // Reset the promise so we can try again
131
416
  this.processingModulesPromise = null;
132
417
  throw error_1;
133
418
  case 4: return [2 /*return*/];
@@ -137,339 +422,658 @@ var Conversation = /** @class */ (function () {
137
422
  };
138
423
  Conversation.prototype.processModules = function () {
139
424
  return __awaiter(this, void 0, void 0, function () {
140
- var _i, _a, module_1, moduleSystemMessagesResult, moduleSystemMessages, formattedSystemMessages;
141
- return __generator(this, function (_b) {
142
- switch (_b.label) {
425
+ var _i, _a, module_1, moduleName, rawSystem, sysArr, trimmed, formatted, moduleFunctions, functionInstructions, hasInstructions, _b, moduleFunctions_1, f, paragraph;
426
+ var _c, _d;
427
+ return __generator(this, function (_e) {
428
+ switch (_e.label) {
143
429
  case 0:
144
430
  if (!this.params.modules || this.params.modules.length === 0) {
145
431
  return [2 /*return*/];
146
432
  }
147
433
  _i = 0, _a = this.params.modules;
148
- _b.label = 1;
434
+ _e.label = 1;
149
435
  case 1:
150
- if (!(_i < _a.length)) return [3 /*break*/, 6];
436
+ if (!(_i < _a.length)) return [3 /*break*/, 4];
151
437
  module_1 = _a[_i];
152
- moduleSystemMessagesResult = module_1.getSystemMessages();
153
- moduleSystemMessages = void 0;
154
- if (!(moduleSystemMessagesResult instanceof Promise)) return [3 /*break*/, 3];
155
- return [4 /*yield*/, moduleSystemMessagesResult];
438
+ moduleName = module_1.getName();
439
+ return [4 /*yield*/, Promise.resolve(module_1.getSystemMessages())];
156
440
  case 2:
157
- moduleSystemMessages = _b.sent();
158
- return [3 /*break*/, 4];
159
- case 3:
160
- moduleSystemMessages = moduleSystemMessagesResult;
161
- _b.label = 4;
162
- case 4:
163
- if (!moduleSystemMessages || (Array.isArray(moduleSystemMessages) && moduleSystemMessages.length < 1)) {
164
- return [3 /*break*/, 5];
441
+ rawSystem = _e.sent();
442
+ sysArr = Array.isArray(rawSystem) ? rawSystem : rawSystem ? [rawSystem] : [];
443
+ trimmed = sysArr.map(function (s) { return String(s !== null && s !== void 0 ? s : '').trim(); }).filter(Boolean);
444
+ if (trimmed.length > 0) {
445
+ formatted = trimmed.join('. ');
446
+ this.addSystemMessagesToHistory([
447
+ "The following are instructions from the ".concat(moduleName, " module:\n").concat(formatted),
448
+ ]);
165
449
  }
166
- formattedSystemMessages = Array.isArray(moduleSystemMessages)
167
- ? moduleSystemMessages.join('. ')
168
- : moduleSystemMessages;
169
- this.addSystemMessagesToHistory([
170
- "The following are instructions from the ".concat(module_1.getName(), " module:\n").concat(formattedSystemMessages),
171
- ]);
172
- this.addFunctions(module_1.getName(), module_1.getFunctions());
173
- this.addMessageModerators(module_1.getMessageModerators());
174
- _b.label = 5;
175
- case 5:
450
+ moduleFunctions = module_1.getFunctions();
451
+ (_c = this.functions).push.apply(_c, moduleFunctions);
452
+ functionInstructions = "The following are instructions from functions in the ".concat(moduleName, " module:");
453
+ hasInstructions = false;
454
+ for (_b = 0, moduleFunctions_1 = moduleFunctions; _b < moduleFunctions_1.length; _b++) {
455
+ f = moduleFunctions_1[_b];
456
+ if (f.instructions && f.instructions.length > 0) {
457
+ hasInstructions = true;
458
+ paragraph = f.instructions.join('. ');
459
+ functionInstructions += " ".concat(f.definition.name, ": ").concat(paragraph, ".");
460
+ }
461
+ }
462
+ if (hasInstructions) {
463
+ this.addSystemMessagesToHistory([functionInstructions]);
464
+ }
465
+ // Message moderators
466
+ (_d = this.messageModerators).push.apply(_d, module_1.getMessageModerators());
467
+ _e.label = 3;
468
+ case 3:
176
469
  _i++;
177
470
  return [3 /*break*/, 1];
178
- case 6: return [2 /*return*/];
471
+ case 4: return [2 /*return*/];
179
472
  }
180
473
  });
181
474
  });
182
475
  };
183
- Conversation.prototype.addFunctions = function (moduleName, functions) {
184
- var _a;
185
- (_a = this.functions).push.apply(_a, functions);
186
- var functionInstructions = "The following are instructions from functions in the ".concat(moduleName, " module:");
187
- var functionInstructionsAdded = false;
188
- for (var _i = 0, functions_1 = functions; _i < functions_1.length; _i++) {
189
- var f = functions_1[_i];
190
- if (f.instructions) {
191
- if (!f.instructions || f.instructions.length < 1) {
192
- continue;
476
+ // ────────────────────────────────────────────────────────────
477
+ // AI SDK message building
478
+ // ────────────────────────────────────────────────────────────
479
+ Conversation.prototype.buildAiSdkMessages = function (callMessages) {
480
+ var _a, _b;
481
+ var result = [];
482
+ // Add history messages
483
+ for (var _i = 0, _c = this.history.getMessages(); _i < _c.length; _i++) {
484
+ var msg = _c[_i];
485
+ var m = msg;
486
+ var rawRole = String((_a = m.role) !== null && _a !== void 0 ? _a : 'user');
487
+ // Map non-standard roles to the closest AI SDK role
488
+ var role = (rawRole === 'system' ? 'system' : rawRole === 'assistant' ? 'assistant' : 'user');
489
+ var content = typeof m.content === 'string' ? m.content : this.extractTextFromContent(m.content);
490
+ if (content.trim()) {
491
+ result.push({ role: role, content: content });
492
+ }
493
+ }
494
+ // Add call messages
495
+ for (var _d = 0, callMessages_1 = callMessages; _d < callMessages_1.length; _d++) {
496
+ var msg = callMessages_1[_d];
497
+ if (typeof msg === 'string') {
498
+ result.push({ role: 'user', content: msg });
499
+ }
500
+ else {
501
+ var rawRole = String((_b = msg.role) !== null && _b !== void 0 ? _b : 'user');
502
+ var role = (rawRole === 'system' ? 'system' : rawRole === 'assistant' ? 'assistant' : 'user');
503
+ result.push({ role: role, content: typeof msg.content === 'string' ? msg.content : '' });
504
+ }
505
+ }
506
+ return result;
507
+ };
508
+ Conversation.prototype.extractTextFromContent = function (content) {
509
+ if (typeof content === 'string') {
510
+ return content;
511
+ }
512
+ if (Array.isArray(content)) {
513
+ return content
514
+ .map(function (p) {
515
+ if (typeof p === 'string') {
516
+ return p;
193
517
  }
194
- functionInstructionsAdded = true;
195
- var instructionsParagraph = f.instructions.join('. ');
196
- functionInstructions += " ".concat(f.definition.name, ": ").concat(instructionsParagraph, ".");
518
+ if ((p === null || p === void 0 ? void 0 : p.type) === 'text') {
519
+ return p.text;
520
+ }
521
+ return '';
522
+ })
523
+ .join('\n');
524
+ }
525
+ return '';
526
+ };
527
+ // ────────────────────────────────────────────────────────────
528
+ // AI SDK tool building
529
+ // ────────────────────────────────────────────────────────────
530
+ Conversation.prototype.buildAiSdkTools = function (functions) {
531
+ var _this = this;
532
+ var tools = {};
533
+ var _loop_1 = function (f) {
534
+ var def = f.definition;
535
+ if (!(def === null || def === void 0 ? void 0 : def.name)) {
536
+ return "continue";
197
537
  }
538
+ tools[def.name] = {
539
+ description: def.description,
540
+ inputSchema: (0, ai_1.jsonSchema)(this_1.normalizeToolParameters(def.parameters)),
541
+ execute: function (args) { return __awaiter(_this, void 0, void 0, function () {
542
+ var result;
543
+ return __generator(this, function (_a) {
544
+ switch (_a.label) {
545
+ case 0: return [4 /*yield*/, f.call(args)];
546
+ case 1:
547
+ result = _a.sent();
548
+ if (typeof result === 'undefined') {
549
+ return [2 /*return*/, { result: 'Function executed successfully' }];
550
+ }
551
+ return [2 /*return*/, result];
552
+ }
553
+ });
554
+ }); },
555
+ };
556
+ };
557
+ var this_1 = this;
558
+ for (var _i = 0, functions_1 = functions; _i < functions_1.length; _i++) {
559
+ var f = functions_1[_i];
560
+ _loop_1(f);
561
+ }
562
+ return tools;
563
+ };
564
+ /**
565
+ * Normalize tool parameter schemas to ensure they are valid JSON Schema
566
+ * with `type: "object"`. Handles missing, null, or invalid schemas
567
+ * (e.g. `type: "None"` which some functions produce).
568
+ */
569
+ Conversation.prototype.normalizeToolParameters = function (parameters) {
570
+ var emptySchema = { type: 'object', properties: {} };
571
+ if (!parameters || typeof parameters !== 'object') {
572
+ return emptySchema;
198
573
  }
199
- if (!functionInstructionsAdded) {
200
- return;
574
+ // If type is missing, not a string, or not a valid JSON Schema type, default to object
575
+ var validTypes = ['object', 'array', 'string', 'number', 'integer', 'boolean', 'null'];
576
+ if (!parameters.type ||
577
+ typeof parameters.type !== 'string' ||
578
+ !validTypes.includes(parameters.type.toLowerCase())) {
579
+ return __assign(__assign(__assign({}, emptySchema), parameters), { type: 'object' });
201
580
  }
202
- this.addSystemMessagesToHistory([functionInstructions]);
581
+ return parameters;
203
582
  };
204
- Conversation.prototype.addMessageModerators = function (messageModerators) {
583
+ // ────────────────────────────────────────────────────────────
584
+ // Provider options
585
+ // ────────────────────────────────────────────────────────────
586
+ Conversation.prototype.buildProviderOptions = function (provider, params, modelString) {
205
587
  var _a;
206
- (_a = this.messageModerators).push.apply(_a, messageModerators);
588
+ var options = {};
589
+ var effort = params.reasoningEffort;
590
+ if (provider === 'openai') {
591
+ var openaiOpts = {};
592
+ if (effort) {
593
+ // OpenAI accepts: none | low | medium | high | xhigh
594
+ // 'max' → 'xhigh' (OpenAI's highest)
595
+ openaiOpts.reasoningEffort = effort === 'max' ? 'xhigh' : effort;
596
+ }
597
+ if (params.serviceTier) {
598
+ openaiOpts.serviceTier = params.serviceTier;
599
+ }
600
+ options.openai = openaiOpts;
601
+ }
602
+ if (provider === 'anthropic') {
603
+ var anthropicOpts = {};
604
+ if (effort && effort !== 'none') {
605
+ // Use adaptive thinking (Sonnet 4.6+, Opus 4.6+) with effort level.
606
+ // Anthropic accepts effort: low | medium | high | max
607
+ // 'xhigh' has no Anthropic equivalent → map to 'max'
608
+ anthropicOpts.thinking = { type: 'adaptive' };
609
+ anthropicOpts.effort = effort === 'xhigh' ? 'max' : effort;
610
+ }
611
+ options.anthropic = anthropicOpts;
612
+ }
613
+ if (provider === 'google') {
614
+ var googleOpts = {};
615
+ if (effort && effort !== 'none') {
616
+ // Google accepts thinkingLevel: minimal | low | medium | high
617
+ // Our 'max'/'xhigh' have no Google equivalent → map to 'high'
618
+ var levelMap = {
619
+ low: 'low',
620
+ medium: 'medium',
621
+ high: 'high',
622
+ xhigh: 'high',
623
+ max: 'high',
624
+ };
625
+ googleOpts.thinkingConfig = {
626
+ thinkingLevel: (_a = levelMap[effort]) !== null && _a !== void 0 ? _a : 'medium',
627
+ };
628
+ }
629
+ options.google = googleOpts;
630
+ }
631
+ if (provider === 'xai') {
632
+ var xaiOpts = {};
633
+ // Only models with reasoning support accept the reasoningEffort parameter.
634
+ // Models like grok-4 (no "-fast" suffix) reject it with a 400 error.
635
+ var xaiSupportsReasoning = modelString ? /fast/i.test(modelString) : false;
636
+ if (effort && effort !== 'none' && xaiSupportsReasoning) {
637
+ // xAI accepts: low | high
638
+ // Map everything to the closest valid value
639
+ var xaiEffort = effort === 'low' ? 'low' : 'high';
640
+ xaiOpts.reasoningEffort = xaiEffort;
641
+ }
642
+ options.xai = xaiOpts;
643
+ }
644
+ return options;
207
645
  };
208
- Conversation.prototype.enforceTokenLimit = function (messages, model) {
209
- var _a;
646
+ // ────────────────────────────────────────────────────────────
647
+ // Background/polling escape hatch (OpenAI-specific)
648
+ // ────────────────────────────────────────────────────────────
649
+ Conversation.prototype.shouldUseBackgroundMode = function (modelString, params) {
650
+ if (typeof params.backgroundMode === 'boolean') {
651
+ return params.backgroundMode;
652
+ }
653
+ if (this.isProModel(modelString)) {
654
+ return true;
655
+ }
656
+ if (this.isHighReasoningEffort(params.reasoningEffort)) {
657
+ return true;
658
+ }
659
+ return false;
660
+ };
661
+ Conversation.prototype.isProModel = function (model) {
662
+ return /(^|[-_.])pro($|[-_.])/.test(String(model !== null && model !== void 0 ? model : '').toLowerCase());
663
+ };
664
+ Conversation.prototype.isHighReasoningEffort = function (effort) {
665
+ return effort === 'high' || effort === 'xhigh' || effort === 'max';
666
+ };
667
+ /**
668
+ * Map our ReasoningEffort to OpenAI's accepted values.
669
+ * OpenAI accepts: none | low | medium | high | xhigh
670
+ * 'max' → 'xhigh' (OpenAI's highest).
671
+ */
672
+ Conversation.prototype.mapReasoningEffortForOpenAi = function (effort) {
673
+ if (!effort) {
674
+ return undefined;
675
+ }
676
+ if (effort === 'max') {
677
+ return 'xhigh';
678
+ }
679
+ return effort;
680
+ };
681
+ /**
682
+ * Fall back to OpenAiResponses for background/polling mode.
683
+ * Returns a StreamResult where the text arrives as a single chunk after polling completes.
684
+ */
685
+ Conversation.prototype.generateStreamViaPolling = function (params, modelString) {
210
686
  return __awaiter(this, void 0, void 0, function () {
211
- var resolvedModel, encoder, conversation, encoded, summarizeConversationRequest, referenceSummaryRequest;
212
- return __generator(this, function (_b) {
213
- switch (_b.label) {
687
+ var responses, messages, result, text, usage, toolInvocations;
688
+ return __generator(this, function (_a) {
689
+ switch (_a.label) {
214
690
  case 0:
215
- if (!((_a = this.params.limits) === null || _a === void 0 ? void 0 : _a.enforceLimits)) {
216
- return [2 /*return*/];
217
- }
218
- resolvedModel = model ? model : OpenAi_1.DEFAULT_MODEL;
219
- encoder = (0, tiktoken_1.encoding_for_model)(resolvedModel);
220
- conversation = this.history.toString() +
221
- messages
222
- .map(function (message) {
223
- if (typeof message === 'string') {
224
- return message;
225
- }
226
- else {
227
- // Extract content from ChatCompletionMessageParam
228
- var contentParts = Array.isArray(message.content) ? message.content : [message.content];
229
- return contentParts
230
- .map(function (part) {
231
- if (typeof part === 'string') {
232
- return part;
233
- }
234
- else if ((part === null || part === void 0 ? void 0 : part.type) === 'text') {
235
- return part.text;
236
- }
237
- else {
238
- return ''; // Handle non-text content types as empty string
239
- }
240
- })
241
- .join(' ');
242
- }
243
- })
244
- .join('. ');
245
- encoded = encoder.encode(conversation);
246
- console.log("current tokens: ".concat(encoded.length));
247
- if (encoded.length < this.tokenLimit) {
248
- return [2 /*return*/];
249
- }
250
- summarizeConversationRequest = "First, call the ".concat(exports.summarizeConversationHistoryFunctionName, " function");
251
- return [4 /*yield*/, new OpenAi_1.OpenAi({
252
- history: this.history,
253
- functions: this.functions,
254
- messageModerators: this.messageModerators,
255
- logLevel: this.params.logLevel,
256
- }).generateResponse({ messages: [summarizeConversationRequest], model: model })];
691
+ responses = this.createOpenAiResponses(params);
692
+ messages = this.convertToOpenAiMessages(params.messages);
693
+ return [4 /*yield*/, responses.generateText({
694
+ messages: messages,
695
+ model: modelString,
696
+ abortSignal: params.abortSignal,
697
+ onToolInvocation: params.onToolInvocation,
698
+ onUsageData: params.onUsageData,
699
+ reasoningEffort: this.mapReasoningEffortForOpenAi(params.reasoningEffort),
700
+ maxToolCalls: params.maxToolCalls,
701
+ backgroundMode: params.backgroundMode,
702
+ maxBackgroundWaitMs: params.maxBackgroundWaitMs,
703
+ serviceTier: params.serviceTier,
704
+ })];
257
705
  case 1:
258
- _b.sent();
259
- referenceSummaryRequest = "If there's a file mentioned in the conversation summary, find and read the file to better respond to my next request. If that doesn't find anything, call the ".concat(PackageFunctions_1.searchLibrariesFunctionName, " function on other keywords in the conversation summary to find a file to read");
260
- return [4 /*yield*/, new OpenAi_1.OpenAi({
261
- history: this.history,
262
- functions: this.functions,
263
- messageModerators: this.messageModerators,
264
- logLevel: this.params.logLevel,
265
- }).generateResponse({ messages: [referenceSummaryRequest], model: model })];
266
- case 2:
267
- _b.sent();
268
- return [2 /*return*/];
706
+ result = _a.sent();
707
+ text = result.message;
708
+ usage = result.usagedata;
709
+ toolInvocations = result.toolInvocations;
710
+ return [2 /*return*/, {
711
+ textStream: (function () {
712
+ return __asyncGenerator(this, arguments, function () {
713
+ return __generator(this, function (_a) {
714
+ switch (_a.label) {
715
+ case 0: return [4 /*yield*/, __await(text)];
716
+ case 1: return [4 /*yield*/, _a.sent()];
717
+ case 2:
718
+ _a.sent();
719
+ return [2 /*return*/];
720
+ }
721
+ });
722
+ });
723
+ })(),
724
+ reasoningStream: (function () {
725
+ return __asyncGenerator(this, arguments, function () {
726
+ return __generator(this, function (_a) {
727
+ return [2 /*return*/];
728
+ });
729
+ });
730
+ })(),
731
+ fullStream: (function () {
732
+ return __asyncGenerator(this, arguments, function () {
733
+ return __generator(this, function (_a) {
734
+ switch (_a.label) {
735
+ case 0: return [4 /*yield*/, __await({ type: 'text-delta', textDelta: text })];
736
+ case 1: return [4 /*yield*/, _a.sent()];
737
+ case 2:
738
+ _a.sent();
739
+ return [2 /*return*/];
740
+ }
741
+ });
742
+ });
743
+ })(),
744
+ text: Promise.resolve(text),
745
+ reasoning: Promise.resolve(''),
746
+ sources: Promise.resolve([]),
747
+ usage: Promise.resolve(usage),
748
+ toolInvocations: Promise.resolve(toolInvocations),
749
+ }];
269
750
  }
270
751
  });
271
752
  });
272
753
  };
273
- Conversation.prototype.summarizeConversationHistory = function (summary) {
274
- this.clearHistory();
275
- this.history.push([{ role: 'assistant', content: "Previous conversation summary: ".concat(summary) }]);
276
- };
277
- Conversation.prototype.clearHistory = function () {
278
- this.history = new MessageHistory_1.MessageHistory();
279
- this.history.push(this.systemMessages);
280
- };
281
- Conversation.prototype.addSystemMessagesToHistory = function (messages, unshift) {
282
- if (unshift === void 0) { unshift = false; }
283
- var chatCompletions = messages.map(function (message) {
284
- return { role: 'system', content: message };
754
+ /**
755
+ * Fall back to OpenAiResponses for generateObject with background/polling.
756
+ */
757
+ Conversation.prototype.generateObjectViaPolling = function (params, modelString) {
758
+ return __awaiter(this, void 0, void 0, function () {
759
+ var responses, messages, result;
760
+ return __generator(this, function (_a) {
761
+ switch (_a.label) {
762
+ case 0:
763
+ responses = this.createOpenAiResponses(params);
764
+ messages = this.convertToOpenAiMessages(params.messages);
765
+ return [4 /*yield*/, responses.generateObject({
766
+ messages: messages,
767
+ model: modelString,
768
+ schema: params.schema,
769
+ abortSignal: params.abortSignal,
770
+ onUsageData: params.onUsageData,
771
+ reasoningEffort: this.mapReasoningEffortForOpenAi(params.reasoningEffort),
772
+ temperature: params.temperature,
773
+ topP: params.topP,
774
+ maxTokens: params.maxTokens,
775
+ backgroundMode: params.backgroundMode,
776
+ maxBackgroundWaitMs: params.maxBackgroundWaitMs,
777
+ serviceTier: params.serviceTier,
778
+ })];
779
+ case 1:
780
+ result = _a.sent();
781
+ return [2 /*return*/, {
782
+ object: result.object,
783
+ usage: result.usageData,
784
+ reasoning: undefined,
785
+ toolInvocations: [],
786
+ }];
787
+ }
788
+ });
285
789
  });
286
- this.addMessagesToHistory(chatCompletions, unshift);
287
790
  };
288
- Conversation.prototype.addAssistantMessagesToHistory = function (messages, unshift) {
289
- if (unshift === void 0) { unshift = false; }
290
- var chatCompletions = messages.map(function (message) {
291
- return { role: 'assistant', content: message };
791
+ Conversation.prototype.createOpenAiResponses = function (params) {
792
+ // Lazy require to avoid circular dependency and keep OpenAiResponses optional
793
+ // eslint-disable-next-line @typescript-eslint/no-var-requires
794
+ var OAIResponses = require('./OpenAiResponses').OpenAiResponses;
795
+ return new OAIResponses({
796
+ modules: this.params.modules,
797
+ logLevel: this.params.logLevel,
798
+ defaultModel: this.getModelString(this.params.defaultModel),
292
799
  });
293
- this.addMessagesToHistory(chatCompletions, unshift);
294
800
  };
295
- Conversation.prototype.addUserMessagesToHistory = function (messages, unshift) {
296
- if (unshift === void 0) { unshift = false; }
297
- var chatCompletions = messages.map(function (message) {
298
- return { role: 'user', content: message };
299
- });
300
- this.addMessagesToHistory(chatCompletions, unshift);
801
+ Conversation.prototype.convertToOpenAiMessages = function (messages) {
802
+ var result = [];
803
+ // Include history
804
+ for (var _i = 0, _a = this.history.getMessages(); _i < _a.length; _i++) {
805
+ var msg = _a[_i];
806
+ var m = msg;
807
+ result.push({
808
+ role: m.role,
809
+ content: typeof m.content === 'string' ? m.content : this.extractTextFromContent(m.content),
810
+ });
811
+ }
812
+ // Include call messages
813
+ for (var _b = 0, messages_1 = messages; _b < messages_1.length; _b++) {
814
+ var msg = messages_1[_b];
815
+ if (typeof msg === 'string') {
816
+ result.push(msg);
817
+ }
818
+ else {
819
+ result.push({
820
+ role: msg.role,
821
+ content: msg.content,
822
+ });
823
+ }
824
+ }
825
+ return result;
301
826
  };
302
- Conversation.prototype.addMessagesToHistory = function (messages, unshift) {
303
- var _a, _b, _c;
304
- if (unshift === void 0) { unshift = false; }
305
- var systemMessages = messages.filter(function (message) { return message.role === 'system'; });
306
- if (unshift) {
307
- (_a = this.history.getMessages()).unshift.apply(_a, messages);
308
- this.history.prune();
309
- (_b = this.systemMessages).unshift.apply(_b, systemMessages);
827
+ // ────────────────────────────────────────────────────────────
828
+ // Model resolution
829
+ // ────────────────────────────────────────────────────────────
830
+ Conversation.prototype.resolveModelInstance = function (model) {
831
+ var _a;
832
+ var m = (_a = model !== null && model !== void 0 ? model : this.params.defaultModel) !== null && _a !== void 0 ? _a : DEFAULT_MODEL;
833
+ return (0, resolveModel_1.resolveModel)(m);
834
+ };
835
+ Conversation.prototype.getModelString = function (model) {
836
+ var _a, _b;
837
+ if (!model) {
838
+ var def = this.params.defaultModel;
839
+ if (!def) {
840
+ return DEFAULT_MODEL;
841
+ }
842
+ if (typeof def === 'string') {
843
+ return def;
844
+ }
845
+ return (_a = def.modelId) !== null && _a !== void 0 ? _a : DEFAULT_MODEL;
310
846
  }
311
- else {
312
- this.history.push(messages);
313
- (_c = this.systemMessages).push.apply(_c, systemMessages);
847
+ if (typeof model === 'string') {
848
+ return model;
314
849
  }
850
+ return (_b = model.modelId) !== null && _b !== void 0 ? _b : 'unknown';
315
851
  };
316
- Conversation.prototype.generateResponse = function (_a) {
317
- var messages = _a.messages, model = _a.model, maxToolCalls = _a.maxToolCalls, rest = __rest(_a, ["messages", "model", "maxToolCalls"]);
852
+ // ────────────────────────────────────────────────────────────
853
+ // Usage processing
854
+ // ────────────────────────────────────────────────────────────
855
+ /**
856
+ * Build a usage promise from a streaming result.
857
+ * Uses `totalUsage` (accumulated across all steps in a tool-call loop)
858
+ * and populates tool call stats from the steps.
859
+ */
860
+ Conversation.prototype.buildUsagePromise = function (result, modelString, params) {
318
861
  return __awaiter(this, void 0, void 0, function () {
862
+ var _a, sdkUsage, steps, usage;
319
863
  return __generator(this, function (_b) {
320
864
  switch (_b.label) {
321
- case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
865
+ case 0: return [4 /*yield*/, Promise.all([result.totalUsage, result.steps])];
322
866
  case 1:
323
- _b.sent();
324
- return [4 /*yield*/, this.enforceTokenLimit(messages, model)];
867
+ _a = _b.sent(), sdkUsage = _a[0], steps = _a[1];
868
+ usage = this.mapSdkUsage(sdkUsage, modelString, steps);
869
+ if (!params.onUsageData) return [3 /*break*/, 3];
870
+ return [4 /*yield*/, params.onUsageData(usage)];
325
871
  case 2:
326
872
  _b.sent();
327
- this.logger.debug({ message: "=============== Conversation.generateResponse (start) ===============" });
328
- this.logger.debug({ message: "Message history", obj: { history: this.history.getMessages(), messages: messages } });
329
- this.logger.debug({ message: "=============== Conversation.generateResponse (end) ===============" });
330
- return [4 /*yield*/, new OpenAi_1.OpenAi(__assign({ history: this.history, functions: this.functions, messageModerators: this.messageModerators, logLevel: this.params.logLevel }, (typeof maxToolCalls !== 'undefined' ? { maxFunctionCalls: maxToolCalls } : {}))).generateResponse(__assign({ messages: messages, model: model }, rest))];
331
- case 3: return [2 /*return*/, _b.sent()];
873
+ _b.label = 3;
874
+ case 3: return [2 /*return*/, usage];
332
875
  }
333
876
  });
334
877
  });
335
878
  };
336
- Conversation.prototype.generateStreamingResponse = function (_a) {
337
- var messages = _a.messages, model = _a.model, maxToolCalls = _a.maxToolCalls, rest = __rest(_a, ["messages", "model", "maxToolCalls"]);
879
+ Conversation.prototype.buildToolInvocationsPromise = function (result) {
880
+ var _a, _b, _c, _d, _e;
338
881
  return __awaiter(this, void 0, void 0, function () {
339
- return __generator(this, function (_b) {
340
- switch (_b.label) {
341
- case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
882
+ var steps, invocations, _i, _f, step, _loop_2, _g, _h, toolCall;
883
+ return __generator(this, function (_j) {
884
+ switch (_j.label) {
885
+ case 0: return [4 /*yield*/, result.steps];
342
886
  case 1:
343
- _b.sent();
344
- return [4 /*yield*/, this.enforceTokenLimit(messages, model)];
345
- case 2:
346
- _b.sent();
347
- return [4 /*yield*/, new OpenAi_1.OpenAi(__assign({ history: this.history, functions: this.functions, messageModerators: this.messageModerators, logLevel: this.params.logLevel }, (typeof maxToolCalls !== 'undefined' ? { maxFunctionCalls: maxToolCalls } : {}))).generateStreamingResponse(__assign({ messages: messages, model: model }, rest))];
348
- case 3: return [2 /*return*/, _b.sent()];
887
+ steps = _j.sent();
888
+ invocations = [];
889
+ for (_i = 0, _f = steps !== null && steps !== void 0 ? steps : []; _i < _f.length; _i++) {
890
+ step = _f[_i];
891
+ _loop_2 = function (toolCall) {
892
+ invocations.push({
893
+ id: (_b = toolCall.toolCallId) !== null && _b !== void 0 ? _b : '',
894
+ name: (_c = toolCall.toolName) !== null && _c !== void 0 ? _c : '',
895
+ startedAt: new Date(),
896
+ finishedAt: new Date(),
897
+ input: toolCall.args,
898
+ ok: true,
899
+ data: (_e = ((_d = step.toolResults) !== null && _d !== void 0 ? _d : []).find(function (r) { return r.toolCallId === toolCall.toolCallId; })) === null || _e === void 0 ? void 0 : _e.result,
900
+ });
901
+ };
902
+ for (_g = 0, _h = (_a = step.toolCalls) !== null && _a !== void 0 ? _a : []; _g < _h.length; _g++) {
903
+ toolCall = _h[_g];
904
+ _loop_2(toolCall);
905
+ }
906
+ }
907
+ return [2 /*return*/, invocations];
349
908
  }
350
909
  });
351
910
  });
352
911
  };
353
912
  /**
354
- * Generate a validated JSON object (no tools in this run).
355
- * Uses AI SDK `generateObject` which leverages provider-native structured outputs when available.
913
+ * Map AI SDK's `LanguageModelUsage` to our `UsageData`.
914
+ *
915
+ * The AI SDK v6 provides cached/reasoning token breakdowns directly in
916
+ * `LanguageModelUsage.inputTokenDetails` and `outputTokenDetails`, so we
917
+ * use those first and only fall back to provider metadata for older providers.
356
918
  */
357
- Conversation.prototype.generateObject = function (_a) {
358
- var _b;
359
- var messages = _a.messages, model = _a.model, abortSignal = _a.abortSignal, schema = _a.schema, temperature = _a.temperature, topP = _a.topP, maxTokens = _a.maxTokens, onUsageData = _a.onUsageData, _c = _a.recordInHistory, recordInHistory = _c === void 0 ? true : _c, reasoningEffort = _a.reasoningEffort;
360
- return __awaiter(this, void 0, void 0, function () {
361
- var combined, isZod, normalizedSchema, result, chatCompletions, toRecord, usageData;
362
- var _this = this;
363
- return __generator(this, function (_d) {
364
- switch (_d.label) {
365
- case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
366
- case 1:
367
- _d.sent();
368
- combined = __spreadArray(__spreadArray([], this.toModelMessages(this.history.getMessages()), true), this.toModelMessages(messages), true);
369
- isZod = schema &&
370
- (typeof schema.safeParse === 'function' ||
371
- (!!schema._def && typeof schema._def.typeName === 'string'));
372
- normalizedSchema = isZod ? schema : (0, ai_1.jsonSchema)(this.strictifyJsonSchema(schema));
373
- this.logger.debug({ message: "=============== Conversation.generateObject (start) ===============" });
374
- this.logger.debug({ message: "Message history", obj: { messages: combined } });
375
- this.logger.debug({ message: "=============== Conversation.generateObject (end) ===============" });
376
- return [4 /*yield*/, (0, ai_1.generateObject)({
377
- model: model,
378
- abortSignal: abortSignal,
379
- messages: combined,
380
- schema: normalizedSchema,
381
- providerOptions: {
382
- openai: {
383
- strictJsonSchema: true,
384
- reasoningEffort: reasoningEffort,
385
- },
386
- },
387
- maxOutputTokens: maxTokens,
388
- temperature: temperature,
389
- topP: topP,
390
- experimental_repairText: function (_a) {
391
- var text = _a.text;
392
- return __awaiter(_this, void 0, void 0, function () {
393
- var cleaned;
394
- return __generator(this, function (_b) {
395
- cleaned = String(text !== null && text !== void 0 ? text : '')
396
- .trim()
397
- .replace(/^```(?:json)?/i, '')
398
- .replace(/```$/, '');
399
- try {
400
- JSON.parse(cleaned);
401
- return [2 /*return*/, cleaned];
402
- }
403
- catch (_c) {
404
- return [2 /*return*/, null];
405
- }
406
- return [2 /*return*/];
407
- });
408
- });
409
- },
410
- })];
411
- case 2:
412
- result = _d.sent();
413
- chatCompletions = messages.map(function (m) {
414
- return typeof m === 'string' ? { role: 'user', content: m } : m;
415
- });
416
- this.addMessagesToHistory(chatCompletions);
417
- // Optionally persist the final JSON in history
418
- if (recordInHistory) {
419
- try {
420
- toRecord = typeof (result === null || result === void 0 ? void 0 : result.object) === 'object' ? JSON.stringify(result.object) : '';
421
- if (toRecord) {
422
- this.addAssistantMessagesToHistory([toRecord]);
423
- }
424
- }
425
- catch (_e) {
426
- /* ignore */
427
- }
919
+ Conversation.prototype.mapSdkUsage = function (sdkUsage, modelString, steps) {
920
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
921
+ var inputTokens = (_a = sdkUsage === null || sdkUsage === void 0 ? void 0 : sdkUsage.inputTokens) !== null && _a !== void 0 ? _a : 0;
922
+ var outputTokens = (_b = sdkUsage === null || sdkUsage === void 0 ? void 0 : sdkUsage.outputTokens) !== null && _b !== void 0 ? _b : 0;
923
+ var totalTokens = (_c = sdkUsage === null || sdkUsage === void 0 ? void 0 : sdkUsage.totalTokens) !== null && _c !== void 0 ? _c : inputTokens + outputTokens;
924
+ // AI SDK v6 provides structured token details
925
+ var cachedInputTokens = (_e = (_d = sdkUsage === null || sdkUsage === void 0 ? void 0 : sdkUsage.inputTokenDetails) === null || _d === void 0 ? void 0 : _d.cacheReadTokens) !== null && _e !== void 0 ? _e : 0;
926
+ var reasoningTokens = (_g = (_f = sdkUsage === null || sdkUsage === void 0 ? void 0 : sdkUsage.outputTokenDetails) === null || _f === void 0 ? void 0 : _f.reasoningTokens) !== null && _g !== void 0 ? _g : 0;
927
+ var tokenUsage = {
928
+ inputTokens: inputTokens,
929
+ cachedInputTokens: cachedInputTokens,
930
+ reasoningTokens: reasoningTokens,
931
+ outputTokens: outputTokens,
932
+ totalTokens: totalTokens,
933
+ };
934
+ // Count steps as individual requests to the assistant
935
+ var stepCount = (_h = steps === null || steps === void 0 ? void 0 : steps.length) !== null && _h !== void 0 ? _h : 1;
936
+ var acc = new UsageData_1.UsageDataAccumulator({ model: modelString });
937
+ acc.addTokenUsage(tokenUsage);
938
+ // Populate tool call stats from steps
939
+ var callsPerTool = {};
940
+ var totalToolCalls = 0;
941
+ for (var _i = 0, _m = steps !== null && steps !== void 0 ? steps : []; _i < _m.length; _i++) {
942
+ var step = _m[_i];
943
+ for (var _o = 0, _p = (_j = step.toolCalls) !== null && _j !== void 0 ? _j : []; _o < _p.length; _o++) {
944
+ var toolCall = _p[_o];
945
+ var name_1 = (_k = toolCall.toolName) !== null && _k !== void 0 ? _k : 'unknown';
946
+ callsPerTool[name_1] = ((_l = callsPerTool[name_1]) !== null && _l !== void 0 ? _l : 0) + 1;
947
+ totalToolCalls++;
948
+ }
949
+ }
950
+ return __assign(__assign({}, acc.usageData), { totalRequestsToAssistant: stepCount, callsPerTool: callsPerTool, totalToolCalls: totalToolCalls });
951
+ };
952
+ /**
953
+ * Process usage from a generateObject result (single-step, no tool calls).
954
+ */
955
+ Conversation.prototype.processAiSdkUsage = function (result, modelString) {
956
+ return this.mapSdkUsage(result.usage, modelString);
957
+ };
958
+ // ────────────────────────────────────────────────────────────
959
+ // Full stream mapping
960
+ // ────────────────────────────────────────────────────────────
961
+ /**
962
+ * Maps the AI SDK's `fullStream` (which emits all event types) into our
963
+ * `StreamPart` union. This is the primary way to consume streaming output
964
+ * in real-time, since it yields text, reasoning, and source events in the
965
+ * order the model produces them.
966
+ */
967
+ Conversation.prototype.mapFullStream = function (aiSdkFullStream) {
968
+ var _a;
969
+ return _a = {},
970
+ _a[Symbol.asyncIterator] = function () {
971
+ return __asyncGenerator(this, arguments, function _a() {
972
+ var _b, aiSdkFullStream_1, aiSdkFullStream_1_1, part, e_1_1;
973
+ var _c, e_1, _d, _e;
974
+ return __generator(this, function (_f) {
975
+ switch (_f.label) {
976
+ case 0:
977
+ _f.trys.push([0, 13, 14, 19]);
978
+ _b = true, aiSdkFullStream_1 = __asyncValues(aiSdkFullStream);
979
+ _f.label = 1;
980
+ case 1: return [4 /*yield*/, __await(aiSdkFullStream_1.next())];
981
+ case 2:
982
+ if (!(aiSdkFullStream_1_1 = _f.sent(), _c = aiSdkFullStream_1_1.done, !_c)) return [3 /*break*/, 12];
983
+ _e = aiSdkFullStream_1_1.value;
984
+ _b = false;
985
+ part = _e;
986
+ if (!(part.type === 'text-delta' && part.textDelta)) return [3 /*break*/, 5];
987
+ return [4 /*yield*/, __await({ type: 'text-delta', textDelta: part.textDelta })];
988
+ case 3: return [4 /*yield*/, _f.sent()];
989
+ case 4:
990
+ _f.sent();
991
+ return [3 /*break*/, 11];
992
+ case 5:
993
+ if (!(part.type === 'reasoning' && part.textDelta)) return [3 /*break*/, 8];
994
+ return [4 /*yield*/, __await({ type: 'reasoning-delta', textDelta: part.textDelta })];
995
+ case 6: return [4 /*yield*/, _f.sent()];
996
+ case 7:
997
+ _f.sent();
998
+ return [3 /*break*/, 11];
999
+ case 8:
1000
+ if (!(part.type === 'source')) return [3 /*break*/, 11];
1001
+ return [4 /*yield*/, __await({
1002
+ type: 'source',
1003
+ source: {
1004
+ url: part.sourceType === 'url' ? part.url : undefined,
1005
+ title: part.sourceType === 'url' ? part.title : undefined,
1006
+ },
1007
+ })];
1008
+ case 9: return [4 /*yield*/, _f.sent()];
1009
+ case 10:
1010
+ _f.sent();
1011
+ _f.label = 11;
1012
+ case 11:
1013
+ _b = true;
1014
+ return [3 /*break*/, 1];
1015
+ case 12: return [3 /*break*/, 19];
1016
+ case 13:
1017
+ e_1_1 = _f.sent();
1018
+ e_1 = { error: e_1_1 };
1019
+ return [3 /*break*/, 19];
1020
+ case 14:
1021
+ _f.trys.push([14, , 17, 18]);
1022
+ if (!(!_b && !_c && (_d = aiSdkFullStream_1.return))) return [3 /*break*/, 16];
1023
+ return [4 /*yield*/, __await(_d.call(aiSdkFullStream_1))];
1024
+ case 15:
1025
+ _f.sent();
1026
+ _f.label = 16;
1027
+ case 16: return [3 /*break*/, 18];
1028
+ case 17:
1029
+ if (e_1) throw e_1.error;
1030
+ return [7 /*endfinally*/];
1031
+ case 18: return [7 /*endfinally*/];
1032
+ case 19: return [2 /*return*/];
428
1033
  }
429
- usageData = this.processUsageData({
430
- result: result,
431
- model: model,
432
- });
433
- if (!onUsageData) return [3 /*break*/, 4];
434
- return [4 /*yield*/, onUsageData(usageData)];
435
- case 3:
436
- _d.sent();
437
- _d.label = 4;
438
- case 4: return [2 /*return*/, {
439
- object: ((_b = result === null || result === void 0 ? void 0 : result.object) !== null && _b !== void 0 ? _b : {}),
440
- usageData: usageData,
441
- }];
442
- }
443
- });
444
- });
1034
+ });
1035
+ });
1036
+ },
1037
+ _a;
445
1038
  };
446
- /** Convert (string | ChatCompletionMessageParam)[] -> AI SDK ModelMessage[] */
447
- Conversation.prototype.toModelMessages = function (input) {
448
- return input.map(function (m) {
449
- var _a;
450
- if (typeof m === 'string') {
451
- return { role: 'user', content: m };
1039
+ Conversation.prototype.extractReasoningFromResult = function (result) {
1040
+ try {
1041
+ // Try to get reasoning from provider metadata or response
1042
+ var reasoning = result === null || result === void 0 ? void 0 : result.reasoning;
1043
+ if (typeof reasoning === 'string') {
1044
+ return reasoning;
452
1045
  }
453
- var text = Array.isArray(m.content)
454
- ? m.content.map(function (p) { var _a; return (typeof p === 'string' ? p : (_a = p === null || p === void 0 ? void 0 : p.text) !== null && _a !== void 0 ? _a : ''); }).join('\n')
455
- : (_a = m.content) !== null && _a !== void 0 ? _a : '';
456
- var role = m.role === 'system' || m.role === 'user' || m.role === 'assistant' ? m.role : 'user';
457
- return { role: role, content: text };
458
- });
1046
+ if (Array.isArray(reasoning)) {
1047
+ return reasoning
1048
+ .filter(function (r) { return r.type === 'reasoning'; })
1049
+ .map(function (r) { return r.text; })
1050
+ .join('');
1051
+ }
1052
+ }
1053
+ catch (_a) {
1054
+ // ignore
1055
+ }
1056
+ return '';
1057
+ };
1058
+ // ────────────────────────────────────────────────────────────
1059
+ // Schema utilities
1060
+ // ────────────────────────────────────────────────────────────
1061
+ Conversation.prototype.isZodSchema = function (schema) {
1062
+ if (!schema || (typeof schema !== 'object' && typeof schema !== 'function')) {
1063
+ return false;
1064
+ }
1065
+ return (typeof schema.safeParse === 'function' ||
1066
+ (!!schema._def && typeof schema._def.typeName === 'string'));
459
1067
  };
460
1068
  /**
461
- * Strictifies a plain JSON Schema for OpenAI Structured Outputs (strict mode):
462
- * - Ensures every object has `additionalProperties: false`
463
- * - Ensures every object has a `required` array that includes **all** keys in `properties`
464
- * - Adds missing `type: "object"` / `type: "array"` where implied by keywords
1069
+ * Strictifies a JSON Schema for OpenAI Structured Outputs (strict mode).
465
1070
  */
466
1071
  Conversation.prototype.strictifyJsonSchema = function (schema) {
467
- var root = JSON.parse(JSON.stringify(schema));
1072
+ var root = JSON.parse(JSON.stringify(schema !== null && schema !== void 0 ? schema : {}));
468
1073
  var visit = function (node) {
469
1074
  if (!node || typeof node !== 'object') {
470
1075
  return;
471
1076
  }
472
- // If keywords imply a type but it's missing, add it (helps downstream validators)
473
1077
  if (!node.type) {
474
1078
  if (node.properties || node.additionalProperties || node.patternProperties) {
475
1079
  node.type = 'object';
@@ -479,32 +1083,25 @@ var Conversation = /** @class */ (function () {
479
1083
  }
480
1084
  }
481
1085
  var types = Array.isArray(node.type) ? node.type : node.type ? [node.type] : [];
482
- // Objects: enforce strict requirements
483
1086
  if (types.includes('object')) {
484
- // 1) additionalProperties: false
485
1087
  if (node.additionalProperties !== false) {
486
1088
  node.additionalProperties = false;
487
1089
  }
488
- // 2) required must exist and include every key in properties
489
1090
  if (node.properties && typeof node.properties === 'object') {
490
1091
  var propKeys = Object.keys(node.properties);
491
1092
  var currentReq = Array.isArray(node.required) ? node.required.slice() : [];
492
- var union = Array.from(new Set(__spreadArray(__spreadArray([], currentReq, true), propKeys, true)));
493
- node.required = union;
494
- // Recurse into each property schema
1093
+ node.required = Array.from(new Set(__spreadArray(__spreadArray([], currentReq, true), propKeys, true)));
495
1094
  for (var _i = 0, propKeys_1 = propKeys; _i < propKeys_1.length; _i++) {
496
1095
  var k = propKeys_1[_i];
497
1096
  visit(node.properties[k]);
498
1097
  }
499
1098
  }
500
- // Recurse into patternProperties
501
1099
  if (node.patternProperties && typeof node.patternProperties === 'object') {
502
1100
  for (var _a = 0, _b = Object.keys(node.patternProperties); _a < _b.length; _a++) {
503
1101
  var k = _b[_a];
504
1102
  visit(node.patternProperties[k]);
505
1103
  }
506
1104
  }
507
- // Recurse into $defs / definitions
508
1105
  for (var _c = 0, _d = ['$defs', 'definitions']; _c < _d.length; _c++) {
509
1106
  var defsKey = _d[_c];
510
1107
  if (node[defsKey] && typeof node[defsKey] === 'object') {
@@ -515,7 +1112,6 @@ var Conversation = /** @class */ (function () {
515
1112
  }
516
1113
  }
517
1114
  }
518
- // Arrays: recurse into items/prefixItems
519
1115
  if (types.includes('array')) {
520
1116
  if (node.items) {
521
1117
  if (Array.isArray(node.items)) {
@@ -529,14 +1125,12 @@ var Conversation = /** @class */ (function () {
529
1125
  node.prefixItems.forEach(visit);
530
1126
  }
531
1127
  }
532
- // Combinators
533
1128
  for (var _g = 0, _h = ['oneOf', 'anyOf', 'allOf']; _g < _h.length; _g++) {
534
1129
  var k = _h[_g];
535
1130
  if (Array.isArray(node[k])) {
536
1131
  node[k].forEach(visit);
537
1132
  }
538
1133
  }
539
- // Negation
540
1134
  if (node.not) {
541
1135
  visit(node.not);
542
1136
  }
@@ -544,200 +1138,7 @@ var Conversation = /** @class */ (function () {
544
1138
  visit(root);
545
1139
  return root;
546
1140
  };
547
- // ---- Usage + provider metadata normalization ----
548
- Conversation.prototype.processUsageData = function (args) {
549
- var _a, _b;
550
- var result = args.result, model = args.model, toolCounts = args.toolCounts, toolLedgerLen = args.toolLedgerLen;
551
- var u = result === null || result === void 0 ? void 0 : result.usage;
552
- // Provider-specific extras (OpenAI Responses variants)
553
- var _c = (_b = (_a = this.extractOpenAiUsageDetails) === null || _a === void 0 ? void 0 : _a.call(this, result)) !== null && _b !== void 0 ? _b : {}, cachedInputTokens = _c.cachedInputTokens, reasoningTokens = _c.reasoningTokens;
554
- var input = Number.isFinite(u === null || u === void 0 ? void 0 : u.inputTokens) ? Number(u.inputTokens) : 0;
555
- var reasoning = Number.isFinite(reasoningTokens) ? Number(reasoningTokens) : 0;
556
- var output = Number.isFinite(u === null || u === void 0 ? void 0 : u.outputTokens) ? Number(u.outputTokens) : 0;
557
- var total = Number.isFinite(u === null || u === void 0 ? void 0 : u.totalTokens) ? Number(u.totalTokens) : input + output;
558
- var cached = Number.isFinite(cachedInputTokens) ? Number(cachedInputTokens) : 0;
559
- // Resolve model id for pricing/telemetry
560
- var modelId = model === null || model === void 0 ? void 0 : model.toString();
561
- var resolvedModel = typeof modelId === 'string' && modelId.trim().length > 0 ? modelId : 'unknown';
562
- var tokenUsage = {
563
- inputTokens: input,
564
- reasoningTokens: reasoning,
565
- cachedInputTokens: cached,
566
- outputTokens: output,
567
- totalTokens: total,
568
- };
569
- var uda = new UsageData_1.UsageDataAccumulator({ model: modelId });
570
- uda.addTokenUsage(tokenUsage);
571
- var callsPerTool = toolCounts ? Object.fromEntries(toolCounts) : {};
572
- var totalToolCalls = typeof toolLedgerLen === 'number' ? toolLedgerLen : Object.values(callsPerTool).reduce(function (a, b) { return a + (b || 0); }, 0);
573
- return __assign(__assign({}, uda.usageData), { totalRequestsToAssistant: 1, totalToolCalls: totalToolCalls, callsPerTool: callsPerTool });
574
- };
575
- // Pull OpenAI-specific cached/extra usage from provider metadata or raw usage.
576
- // Safe across providers; returns undefined if not available.
577
- Conversation.prototype.extractOpenAiUsageDetails = function (result) {
578
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q;
579
- try {
580
- var md = (_b = (_a = result === null || result === void 0 ? void 0 : result.providerMetadata) === null || _a === void 0 ? void 0 : _a.openai) !== null && _b !== void 0 ? _b : (_d = (_c = result === null || result === void 0 ? void 0 : result.response) === null || _c === void 0 ? void 0 : _c.providerMetadata) === null || _d === void 0 ? void 0 : _d.openai;
581
- var usage = (_g = (_e = md === null || md === void 0 ? void 0 : md.usage) !== null && _e !== void 0 ? _e : (_f = result === null || result === void 0 ? void 0 : result.response) === null || _f === void 0 ? void 0 : _f.usage) !== null && _g !== void 0 ? _g : result === null || result === void 0 ? void 0 : result.usage;
582
- // OpenAI Responses API has used different shapes over time; try both:
583
- var cachedInputTokens = (_l = (_j = (_h = usage === null || usage === void 0 ? void 0 : usage.input_tokens_details) === null || _h === void 0 ? void 0 : _h.cached_tokens) !== null && _j !== void 0 ? _j : (_k = usage === null || usage === void 0 ? void 0 : usage.prompt_tokens_details) === null || _k === void 0 ? void 0 : _k.cached_tokens) !== null && _l !== void 0 ? _l : usage === null || usage === void 0 ? void 0 : usage.cached_input_tokens;
584
- // Reasoning tokens (when available on reasoning models)
585
- var reasoningTokens = (_q = (_o = (_m = usage === null || usage === void 0 ? void 0 : usage.output_tokens_details) === null || _m === void 0 ? void 0 : _m.reasoning_tokens) !== null && _o !== void 0 ? _o : (_p = usage === null || usage === void 0 ? void 0 : usage.completion_tokens_details) === null || _p === void 0 ? void 0 : _p.reasoning_tokens) !== null && _q !== void 0 ? _q : usage === null || usage === void 0 ? void 0 : usage.reasoning_tokens;
586
- return {
587
- cachedInputTokens: typeof cachedInputTokens === 'number' ? cachedInputTokens : undefined,
588
- reasoningTokens: typeof reasoningTokens === 'number' ? reasoningTokens : undefined,
589
- };
590
- }
591
- catch (_r) {
592
- return {};
593
- }
594
- };
595
- Conversation.prototype.generateCode = function (_a) {
596
- var description = _a.description, model = _a.model;
597
- return __awaiter(this, void 0, void 0, function () {
598
- var code;
599
- return __generator(this, function (_b) {
600
- switch (_b.label) {
601
- case 0:
602
- this.logger.debug({ message: "Generating code", obj: { description: description } });
603
- return [4 /*yield*/, this.ensureModulesProcessed()];
604
- case 1:
605
- _b.sent();
606
- return [4 /*yield*/, new OpenAi_1.OpenAi({
607
- history: this.history,
608
- functions: this.functions,
609
- messageModerators: this.messageModerators,
610
- logLevel: this.params.logLevel,
611
- }).generateCode({
612
- messages: description,
613
- model: model,
614
- includeSystemMessages: !this.generatedCode,
615
- })];
616
- case 2:
617
- code = _b.sent();
618
- this.logger.debug({ message: "Generated code", obj: { code: code } });
619
- this.generatedCode = true;
620
- return [2 /*return*/, code];
621
- }
622
- });
623
- });
624
- };
625
- Conversation.prototype.updateCodeFromFile = function (_a) {
626
- var codeToUpdateFilePath = _a.codeToUpdateFilePath, dependencyCodeFilePaths = _a.dependencyCodeFilePaths, description = _a.description, model = _a.model;
627
- return __awaiter(this, void 0, void 0, function () {
628
- var codeToUpdate, dependencyDescription, _i, dependencyCodeFilePaths_1, dependencyCodeFilePath, dependencCode;
629
- return __generator(this, function (_b) {
630
- switch (_b.label) {
631
- case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
632
- case 1:
633
- _b.sent();
634
- return [4 /*yield*/, util_node_1.Fs.readFile(codeToUpdateFilePath)];
635
- case 2:
636
- codeToUpdate = _b.sent();
637
- dependencyDescription = "Assume the following exists:\n";
638
- _i = 0, dependencyCodeFilePaths_1 = dependencyCodeFilePaths;
639
- _b.label = 3;
640
- case 3:
641
- if (!(_i < dependencyCodeFilePaths_1.length)) return [3 /*break*/, 6];
642
- dependencyCodeFilePath = dependencyCodeFilePaths_1[_i];
643
- return [4 /*yield*/, util_node_1.Fs.readFile(dependencyCodeFilePath)];
644
- case 4:
645
- dependencCode = _b.sent();
646
- dependencyDescription += dependencCode + '\n\n';
647
- _b.label = 5;
648
- case 5:
649
- _i++;
650
- return [3 /*break*/, 3];
651
- case 6:
652
- this.logger.debug({ message: "Updating code from file", obj: { codeToUpdateFilePath: codeToUpdateFilePath } });
653
- return [4 /*yield*/, this.updateCode({ code: codeToUpdate, description: dependencyDescription + description, model: model })];
654
- case 7: return [2 /*return*/, _b.sent()];
655
- }
656
- });
657
- });
658
- };
659
- Conversation.prototype.updateCode = function (_a) {
660
- var code = _a.code, description = _a.description, model = _a.model;
661
- return __awaiter(this, void 0, void 0, function () {
662
- var updatedCode;
663
- return __generator(this, function (_b) {
664
- switch (_b.label) {
665
- case 0:
666
- this.logger.debug({ message: "Updating code", obj: { description: description, code: code } });
667
- return [4 /*yield*/, this.ensureModulesProcessed()];
668
- case 1:
669
- _b.sent();
670
- return [4 /*yield*/, new OpenAi_1.OpenAi({
671
- history: this.history,
672
- functions: this.functions,
673
- messageModerators: this.messageModerators,
674
- logLevel: this.params.logLevel,
675
- }).updateCode({
676
- code: code,
677
- description: description,
678
- model: model,
679
- includeSystemMessages: !this.generatedCode,
680
- })];
681
- case 2:
682
- updatedCode = _b.sent();
683
- this.logger.debug({ message: "Updated code", obj: { updatedCode: updatedCode } });
684
- this.generatedCode = true;
685
- return [2 /*return*/, updatedCode];
686
- }
687
- });
688
- });
689
- };
690
- Conversation.prototype.generateList = function (_a) {
691
- var description = _a.description, model = _a.model;
692
- return __awaiter(this, void 0, void 0, function () {
693
- var list;
694
- return __generator(this, function (_b) {
695
- switch (_b.label) {
696
- case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
697
- case 1:
698
- _b.sent();
699
- return [4 /*yield*/, new OpenAi_1.OpenAi({
700
- history: this.history,
701
- functions: this.functions,
702
- messageModerators: this.messageModerators,
703
- logLevel: this.params.logLevel,
704
- }).generateList({
705
- messages: description,
706
- model: model,
707
- includeSystemMessages: !this.generatedList,
708
- })];
709
- case 2:
710
- list = _b.sent();
711
- this.generatedList = true;
712
- return [2 /*return*/, list];
713
- }
714
- });
715
- });
716
- };
717
1141
  return Conversation;
718
1142
  }());
719
1143
  exports.Conversation = Conversation;
720
- exports.summarizeConversationHistoryFunctionName = 'summarizeConversationHistory';
721
- var summarizeConversationHistoryFunction = function (conversation) {
722
- return {
723
- definition: {
724
- name: exports.summarizeConversationHistoryFunctionName,
725
- description: 'Clear the conversation history and summarize what was in it',
726
- parameters: {
727
- type: 'object',
728
- properties: {
729
- summary: {
730
- type: 'string',
731
- description: 'A 1-3 sentence summary of the current chat history',
732
- },
733
- },
734
- required: ['summary'],
735
- },
736
- },
737
- call: function (params) { return __awaiter(void 0, void 0, void 0, function () { return __generator(this, function (_a) {
738
- return [2 /*return*/, conversation.summarizeConversationHistory(params.summary)];
739
- }); }); },
740
- };
741
- };
742
- exports.summarizeConversationHistoryFunction = summarizeConversationHistoryFunction;
743
1144
  //# sourceMappingURL=Conversation.js.map