@ax-llm/ax 12.0.16 → 12.0.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +970 -223
- package/index.cjs.map +1 -1
- package/index.d.cts +289 -14
- package/index.d.ts +289 -14
- package/index.js +966 -223
- package/index.js.map +1 -1
- package/package.json +1 -1
package/index.cjs
CHANGED
|
@@ -65,6 +65,7 @@ __export(index_exports, {
|
|
|
65
65
|
AxAIOpenAIResponsesBase: () => AxAIOpenAIResponsesBase,
|
|
66
66
|
AxAIOpenAIResponsesImpl: () => AxAIOpenAIResponsesImpl,
|
|
67
67
|
AxAIOpenAIResponsesModel: () => AxAIOpenAIResponsesModel,
|
|
68
|
+
AxAIRefusalError: () => AxAIRefusalError,
|
|
68
69
|
AxAIReka: () => AxAIReka,
|
|
69
70
|
AxAIRekaModel: () => AxAIRekaModel,
|
|
70
71
|
AxAIServiceAbortedError: () => AxAIServiceAbortedError,
|
|
@@ -96,6 +97,8 @@ __export(index_exports, {
|
|
|
96
97
|
AxDockerSession: () => AxDockerSession,
|
|
97
98
|
AxEmbeddingAdapter: () => AxEmbeddingAdapter,
|
|
98
99
|
AxEvalUtil: () => AxEvalUtil,
|
|
100
|
+
AxFlow: () => AxFlow,
|
|
101
|
+
AxFlowTypedSubContextImpl: () => AxFlowTypedSubContextImpl,
|
|
99
102
|
AxFunctionError: () => AxFunctionError,
|
|
100
103
|
AxFunctionProcessor: () => AxFunctionProcessor,
|
|
101
104
|
AxGen: () => AxGen,
|
|
@@ -173,6 +176,7 @@ __export(index_exports, {
|
|
|
173
176
|
axModelInfoHuggingFace: () => axModelInfoHuggingFace,
|
|
174
177
|
axModelInfoMistral: () => axModelInfoMistral,
|
|
175
178
|
axModelInfoOpenAI: () => axModelInfoOpenAI,
|
|
179
|
+
axModelInfoOpenAIResponses: () => axModelInfoOpenAIResponses,
|
|
176
180
|
axModelInfoReka: () => axModelInfoReka,
|
|
177
181
|
axModelInfoTogether: () => axModelInfoTogether,
|
|
178
182
|
axSpanAttributes: () => axSpanAttributes,
|
|
@@ -184,74 +188,6 @@ __export(index_exports, {
|
|
|
184
188
|
});
|
|
185
189
|
module.exports = __toCommonJS(index_exports);
|
|
186
190
|
|
|
187
|
-
// ai/base.ts
|
|
188
|
-
var import_crypto2 = __toESM(require("crypto"), 1);
|
|
189
|
-
var import_api2 = require("@opentelemetry/api");
|
|
190
|
-
|
|
191
|
-
// trace/trace.ts
|
|
192
|
-
var axSpanAttributes = {
|
|
193
|
-
// LLM
|
|
194
|
-
LLM_SYSTEM: "gen_ai.system",
|
|
195
|
-
LLM_OPERATION_NAME: "gen_ai.operation.name",
|
|
196
|
-
LLM_REQUEST_MODEL: "gen_ai.request.model",
|
|
197
|
-
LLM_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens",
|
|
198
|
-
LLM_REQUEST_TEMPERATURE: "gen_ai.request.temperature",
|
|
199
|
-
LLM_REQUEST_TOP_K: "gen_ai.request.top_k",
|
|
200
|
-
LLM_REQUEST_FREQUENCY_PENALTY: "gen_ai.request.frequency_penalty",
|
|
201
|
-
LLM_REQUEST_PRESENCE_PENALTY: "gen_ai.request.presence_penalty",
|
|
202
|
-
LLM_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences",
|
|
203
|
-
LLM_REQUEST_LLM_IS_STREAMING: "gen_ai.request.llm_is_streaming",
|
|
204
|
-
LLM_REQUEST_TOP_P: "gen_ai.request.top_p",
|
|
205
|
-
LLM_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
|
|
206
|
-
LLM_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
|
|
207
|
-
LLM_USAGE_TOTAL_TOKENS: "gen_ai.usage.total_tokens",
|
|
208
|
-
LLM_USAGE_THOUGHTS_TOKENS: "gen_ai.usage.thoughts_tokens",
|
|
209
|
-
// Vector DB
|
|
210
|
-
DB_SYSTEM: "db.system",
|
|
211
|
-
DB_TABLE: "db.table",
|
|
212
|
-
DB_NAMESPACE: "db.namespace",
|
|
213
|
-
DB_ID: "db.id",
|
|
214
|
-
DB_QUERY_TEXT: "db.query.text",
|
|
215
|
-
DB_VECTOR: "db.vector",
|
|
216
|
-
DB_OPERATION_NAME: "db.operation.name",
|
|
217
|
-
DB_VECTOR_QUERY_TOP_K: "db.vector.query.top_k",
|
|
218
|
-
DB_QUERY_EMBEDDINGS: "db.query.embeddings",
|
|
219
|
-
DB_QUERY_RESULT: "db.query.result",
|
|
220
|
-
// Query Embeddings
|
|
221
|
-
DB_QUERY_EMBEDDINGS_VECTOR: "db.query.embeddings.vector",
|
|
222
|
-
// Query Result (canonical format)
|
|
223
|
-
DB_QUERY_RESULT_ID: "db.query.result.id",
|
|
224
|
-
DB_QUERY_RESULT_SCORE: "db.query.result.score",
|
|
225
|
-
DB_QUERY_RESULT_DISTANCE: "db.query.result.distance",
|
|
226
|
-
DB_QUERY_RESULT_METADATA: "db.query.result.metadata",
|
|
227
|
-
DB_QUERY_RESULT_VECTOR: "db.query.result.vector",
|
|
228
|
-
DB_QUERY_RESULT_DOCUMENT: "db.query.result.document"
|
|
229
|
-
};
|
|
230
|
-
var axSpanEvents = {
|
|
231
|
-
GEN_AI_USER_MESSAGE: "gen_ai.user.message",
|
|
232
|
-
GEN_AI_SYSTEM_MESSAGE: "gen_ai.system.message",
|
|
233
|
-
GEN_AI_ASSISTANT_MESSAGE: "gen_ai.assistant.message",
|
|
234
|
-
GEN_AI_TOOL_MESSAGE: "gen_ai.tool.message",
|
|
235
|
-
// For tool messages in request & response tool calls
|
|
236
|
-
GEN_AI_CHOICE: "gen_ai.choice",
|
|
237
|
-
GEN_AI_USAGE: "gen_ai.usage"
|
|
238
|
-
};
|
|
239
|
-
var AxLLMRequestTypeValues = /* @__PURE__ */ ((AxLLMRequestTypeValues2) => {
|
|
240
|
-
AxLLMRequestTypeValues2["COMPLETION"] = "completion";
|
|
241
|
-
AxLLMRequestTypeValues2["CHAT"] = "chat";
|
|
242
|
-
AxLLMRequestTypeValues2["RERANK"] = "rerank";
|
|
243
|
-
AxLLMRequestTypeValues2["UNKNOWN"] = "unknown";
|
|
244
|
-
return AxLLMRequestTypeValues2;
|
|
245
|
-
})(AxLLMRequestTypeValues || {});
|
|
246
|
-
var AxSpanKindValues = /* @__PURE__ */ ((AxSpanKindValues2) => {
|
|
247
|
-
AxSpanKindValues2["WORKFLOW"] = "workflow";
|
|
248
|
-
AxSpanKindValues2["TASK"] = "task";
|
|
249
|
-
AxSpanKindValues2["AGENT"] = "agent";
|
|
250
|
-
AxSpanKindValues2["TOOL"] = "tool";
|
|
251
|
-
AxSpanKindValues2["UNKNOWN"] = "unknown";
|
|
252
|
-
return AxSpanKindValues2;
|
|
253
|
-
})(AxSpanKindValues || {});
|
|
254
|
-
|
|
255
191
|
// util/apicall.ts
|
|
256
192
|
var import_crypto = __toESM(require("crypto"), 1);
|
|
257
193
|
var import_web3 = require("stream/web");
|
|
@@ -500,6 +436,33 @@ var AxAIServiceAuthenticationError = class extends AxAIServiceError {
|
|
|
500
436
|
this.name = this.constructor.name;
|
|
501
437
|
}
|
|
502
438
|
};
|
|
439
|
+
var AxAIRefusalError = class extends Error {
|
|
440
|
+
constructor(refusalMessage, model, requestId) {
|
|
441
|
+
super(`Model refused to fulfill request: ${refusalMessage}`);
|
|
442
|
+
this.refusalMessage = refusalMessage;
|
|
443
|
+
this.model = model;
|
|
444
|
+
this.requestId = requestId;
|
|
445
|
+
this.name = "AxAIRefusalError";
|
|
446
|
+
this.timestamp = (/* @__PURE__ */ new Date()).toISOString();
|
|
447
|
+
this.errorId = import_crypto.default.randomUUID();
|
|
448
|
+
}
|
|
449
|
+
timestamp;
|
|
450
|
+
errorId;
|
|
451
|
+
toString() {
|
|
452
|
+
return [
|
|
453
|
+
`${this.name}: ${this.message}`,
|
|
454
|
+
`Refusal: ${this.refusalMessage}`,
|
|
455
|
+
this.model ? `Model: ${this.model}` : "",
|
|
456
|
+
this.requestId ? `Request ID: ${this.requestId}` : "",
|
|
457
|
+
`Timestamp: ${this.timestamp}`,
|
|
458
|
+
`Error ID: ${this.errorId}`
|
|
459
|
+
].filter(Boolean).join("\n");
|
|
460
|
+
}
|
|
461
|
+
// For Node.js, override the custom inspect method so console.log shows our custom string.
|
|
462
|
+
[Symbol.for("nodejs.util.inspect.custom")](_depth, _options) {
|
|
463
|
+
return this.toString();
|
|
464
|
+
}
|
|
465
|
+
};
|
|
503
466
|
async function safeReadResponseBody(response) {
|
|
504
467
|
try {
|
|
505
468
|
if (response.headers.get("content-type")?.includes("application/json")) {
|
|
@@ -803,6 +766,74 @@ var apiCall = async (api, json) => {
|
|
|
803
766
|
}
|
|
804
767
|
};
|
|
805
768
|
|
|
769
|
+
// ai/base.ts
|
|
770
|
+
var import_crypto2 = __toESM(require("crypto"), 1);
|
|
771
|
+
var import_api2 = require("@opentelemetry/api");
|
|
772
|
+
|
|
773
|
+
// trace/trace.ts
|
|
774
|
+
var axSpanAttributes = {
|
|
775
|
+
// LLM
|
|
776
|
+
LLM_SYSTEM: "gen_ai.system",
|
|
777
|
+
LLM_OPERATION_NAME: "gen_ai.operation.name",
|
|
778
|
+
LLM_REQUEST_MODEL: "gen_ai.request.model",
|
|
779
|
+
LLM_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens",
|
|
780
|
+
LLM_REQUEST_TEMPERATURE: "gen_ai.request.temperature",
|
|
781
|
+
LLM_REQUEST_TOP_K: "gen_ai.request.top_k",
|
|
782
|
+
LLM_REQUEST_FREQUENCY_PENALTY: "gen_ai.request.frequency_penalty",
|
|
783
|
+
LLM_REQUEST_PRESENCE_PENALTY: "gen_ai.request.presence_penalty",
|
|
784
|
+
LLM_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences",
|
|
785
|
+
LLM_REQUEST_LLM_IS_STREAMING: "gen_ai.request.llm_is_streaming",
|
|
786
|
+
LLM_REQUEST_TOP_P: "gen_ai.request.top_p",
|
|
787
|
+
LLM_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
|
|
788
|
+
LLM_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
|
|
789
|
+
LLM_USAGE_TOTAL_TOKENS: "gen_ai.usage.total_tokens",
|
|
790
|
+
LLM_USAGE_THOUGHTS_TOKENS: "gen_ai.usage.thoughts_tokens",
|
|
791
|
+
// Vector DB
|
|
792
|
+
DB_SYSTEM: "db.system",
|
|
793
|
+
DB_TABLE: "db.table",
|
|
794
|
+
DB_NAMESPACE: "db.namespace",
|
|
795
|
+
DB_ID: "db.id",
|
|
796
|
+
DB_QUERY_TEXT: "db.query.text",
|
|
797
|
+
DB_VECTOR: "db.vector",
|
|
798
|
+
DB_OPERATION_NAME: "db.operation.name",
|
|
799
|
+
DB_VECTOR_QUERY_TOP_K: "db.vector.query.top_k",
|
|
800
|
+
DB_QUERY_EMBEDDINGS: "db.query.embeddings",
|
|
801
|
+
DB_QUERY_RESULT: "db.query.result",
|
|
802
|
+
// Query Embeddings
|
|
803
|
+
DB_QUERY_EMBEDDINGS_VECTOR: "db.query.embeddings.vector",
|
|
804
|
+
// Query Result (canonical format)
|
|
805
|
+
DB_QUERY_RESULT_ID: "db.query.result.id",
|
|
806
|
+
DB_QUERY_RESULT_SCORE: "db.query.result.score",
|
|
807
|
+
DB_QUERY_RESULT_DISTANCE: "db.query.result.distance",
|
|
808
|
+
DB_QUERY_RESULT_METADATA: "db.query.result.metadata",
|
|
809
|
+
DB_QUERY_RESULT_VECTOR: "db.query.result.vector",
|
|
810
|
+
DB_QUERY_RESULT_DOCUMENT: "db.query.result.document"
|
|
811
|
+
};
|
|
812
|
+
var axSpanEvents = {
|
|
813
|
+
GEN_AI_USER_MESSAGE: "gen_ai.user.message",
|
|
814
|
+
GEN_AI_SYSTEM_MESSAGE: "gen_ai.system.message",
|
|
815
|
+
GEN_AI_ASSISTANT_MESSAGE: "gen_ai.assistant.message",
|
|
816
|
+
GEN_AI_TOOL_MESSAGE: "gen_ai.tool.message",
|
|
817
|
+
// For tool messages in request & response tool calls
|
|
818
|
+
GEN_AI_CHOICE: "gen_ai.choice",
|
|
819
|
+
GEN_AI_USAGE: "gen_ai.usage"
|
|
820
|
+
};
|
|
821
|
+
var AxLLMRequestTypeValues = /* @__PURE__ */ ((AxLLMRequestTypeValues2) => {
|
|
822
|
+
AxLLMRequestTypeValues2["COMPLETION"] = "completion";
|
|
823
|
+
AxLLMRequestTypeValues2["CHAT"] = "chat";
|
|
824
|
+
AxLLMRequestTypeValues2["RERANK"] = "rerank";
|
|
825
|
+
AxLLMRequestTypeValues2["UNKNOWN"] = "unknown";
|
|
826
|
+
return AxLLMRequestTypeValues2;
|
|
827
|
+
})(AxLLMRequestTypeValues || {});
|
|
828
|
+
var AxSpanKindValues = /* @__PURE__ */ ((AxSpanKindValues2) => {
|
|
829
|
+
AxSpanKindValues2["WORKFLOW"] = "workflow";
|
|
830
|
+
AxSpanKindValues2["TASK"] = "task";
|
|
831
|
+
AxSpanKindValues2["AGENT"] = "agent";
|
|
832
|
+
AxSpanKindValues2["TOOL"] = "tool";
|
|
833
|
+
AxSpanKindValues2["UNKNOWN"] = "unknown";
|
|
834
|
+
return AxSpanKindValues2;
|
|
835
|
+
})(AxSpanKindValues || {});
|
|
836
|
+
|
|
806
837
|
// util/transform.ts
|
|
807
838
|
var import_web4 = require("stream/web");
|
|
808
839
|
var TypeTransformer = class {
|
|
@@ -1405,6 +1436,14 @@ var AxBaseAI = class {
|
|
|
1405
1436
|
if (options?.showThoughts && !this.getFeatures(model).hasShowThoughts) {
|
|
1406
1437
|
throw new Error(`Model ${model} does not support showThoughts.`);
|
|
1407
1438
|
}
|
|
1439
|
+
const modelInfo = this.modelInfo.find(
|
|
1440
|
+
(info) => info.name === model
|
|
1441
|
+
);
|
|
1442
|
+
if (modelInfo?.isExpensive && options?.useExpensiveModel !== "yes") {
|
|
1443
|
+
throw new Error(
|
|
1444
|
+
`Model ${model} is marked as expensive and requires explicit confirmation. Set useExpensiveModel: "yes" to proceed.`
|
|
1445
|
+
);
|
|
1446
|
+
}
|
|
1408
1447
|
modelConfig.stream = (options?.stream !== void 0 ? options.stream : modelConfig.stream) ?? true;
|
|
1409
1448
|
const canStream = this.getFeatures(model).streaming;
|
|
1410
1449
|
if (!canStream) {
|
|
@@ -2166,7 +2205,13 @@ var AxAIAnthropicImpl = class {
|
|
|
2166
2205
|
};
|
|
2167
2206
|
createChatResp = (resp) => {
|
|
2168
2207
|
if (resp.type === "error") {
|
|
2169
|
-
throw new
|
|
2208
|
+
throw new AxAIRefusalError(
|
|
2209
|
+
resp.error.message,
|
|
2210
|
+
void 0,
|
|
2211
|
+
// model not specified in error response
|
|
2212
|
+
void 0
|
|
2213
|
+
// requestId not specified in error response
|
|
2214
|
+
);
|
|
2170
2215
|
}
|
|
2171
2216
|
const finishReason = mapFinishReason(resp.stop_reason);
|
|
2172
2217
|
const showThoughts = this.currentPromptConfig?.thinkingTokenBudget !== "none" && this.currentPromptConfig?.showThoughts !== false;
|
|
@@ -2222,7 +2267,13 @@ var AxAIAnthropicImpl = class {
|
|
|
2222
2267
|
}
|
|
2223
2268
|
if (resp.type === "error") {
|
|
2224
2269
|
const { error } = resp;
|
|
2225
|
-
throw new
|
|
2270
|
+
throw new AxAIRefusalError(
|
|
2271
|
+
error.message,
|
|
2272
|
+
void 0,
|
|
2273
|
+
// model not specified in error event
|
|
2274
|
+
void 0
|
|
2275
|
+
// requestId not specified in error event
|
|
2276
|
+
);
|
|
2226
2277
|
}
|
|
2227
2278
|
const index = 0;
|
|
2228
2279
|
if (resp.type === "message_start") {
|
|
@@ -2527,8 +2578,6 @@ function mapFinishReason(stopReason) {
|
|
|
2527
2578
|
|
|
2528
2579
|
// ai/openai/chat_types.ts
|
|
2529
2580
|
var AxAIOpenAIModel = /* @__PURE__ */ ((AxAIOpenAIModel2) => {
|
|
2530
|
-
AxAIOpenAIModel2["O1"] = "o1";
|
|
2531
|
-
AxAIOpenAIModel2["O1Mini"] = "o1-mini";
|
|
2532
2581
|
AxAIOpenAIModel2["GPT4"] = "gpt-4";
|
|
2533
2582
|
AxAIOpenAIModel2["GPT41"] = "gpt-4.1";
|
|
2534
2583
|
AxAIOpenAIModel2["GPT41Mini"] = "gpt-4.1-mini";
|
|
@@ -2541,6 +2590,11 @@ var AxAIOpenAIModel = /* @__PURE__ */ ((AxAIOpenAIModel2) => {
|
|
|
2541
2590
|
AxAIOpenAIModel2["GPT35TextDavinci002"] = "text-davinci-002";
|
|
2542
2591
|
AxAIOpenAIModel2["GPT3TextBabbage002"] = "text-babbage-002";
|
|
2543
2592
|
AxAIOpenAIModel2["GPT3TextAda001"] = "text-ada-001";
|
|
2593
|
+
AxAIOpenAIModel2["O1"] = "o1";
|
|
2594
|
+
AxAIOpenAIModel2["O1Mini"] = "o1-mini";
|
|
2595
|
+
AxAIOpenAIModel2["O3"] = "o3";
|
|
2596
|
+
AxAIOpenAIModel2["O3Mini"] = "o3-mini";
|
|
2597
|
+
AxAIOpenAIModel2["O4Mini"] = "o4-mini";
|
|
2544
2598
|
return AxAIOpenAIModel2;
|
|
2545
2599
|
})(AxAIOpenAIModel || {});
|
|
2546
2600
|
var AxAIOpenAIEmbedModel = /* @__PURE__ */ ((AxAIOpenAIEmbedModel2) => {
|
|
@@ -2552,8 +2606,6 @@ var AxAIOpenAIEmbedModel = /* @__PURE__ */ ((AxAIOpenAIEmbedModel2) => {
|
|
|
2552
2606
|
|
|
2553
2607
|
// ai/openai/responses_types.ts
|
|
2554
2608
|
var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
|
|
2555
|
-
AxAIOpenAIResponsesModel2["O1"] = "o1";
|
|
2556
|
-
AxAIOpenAIResponsesModel2["O1Mini"] = "o1-mini";
|
|
2557
2609
|
AxAIOpenAIResponsesModel2["GPT4"] = "gpt-4";
|
|
2558
2610
|
AxAIOpenAIResponsesModel2["GPT41"] = "gpt-4.1";
|
|
2559
2611
|
AxAIOpenAIResponsesModel2["GPT41Mini"] = "gpt-4.1-mini";
|
|
@@ -2566,6 +2618,10 @@ var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
|
|
|
2566
2618
|
AxAIOpenAIResponsesModel2["GPT35TextDavinci002"] = "text-davinci-002";
|
|
2567
2619
|
AxAIOpenAIResponsesModel2["GPT3TextBabbage002"] = "text-babbage-002";
|
|
2568
2620
|
AxAIOpenAIResponsesModel2["GPT3TextAda001"] = "text-ada-001";
|
|
2621
|
+
AxAIOpenAIResponsesModel2["O1Pro"] = "o1-pro";
|
|
2622
|
+
AxAIOpenAIResponsesModel2["O1"] = "o1";
|
|
2623
|
+
AxAIOpenAIResponsesModel2["O1Mini"] = "o1-mini";
|
|
2624
|
+
AxAIOpenAIResponsesModel2["O3Pro"] = "o3-pro";
|
|
2569
2625
|
AxAIOpenAIResponsesModel2["O3"] = "o3";
|
|
2570
2626
|
AxAIOpenAIResponsesModel2["O3Mini"] = "o3-mini";
|
|
2571
2627
|
AxAIOpenAIResponsesModel2["O4Mini"] = "o4-mini";
|
|
@@ -2574,20 +2630,7 @@ var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
|
|
|
2574
2630
|
|
|
2575
2631
|
// ai/openai/info.ts
|
|
2576
2632
|
var axModelInfoOpenAI = [
|
|
2577
|
-
|
|
2578
|
-
name: "o1" /* O1 */,
|
|
2579
|
-
currency: "usd",
|
|
2580
|
-
promptTokenCostPer1M: 15,
|
|
2581
|
-
completionTokenCostPer1M: 60,
|
|
2582
|
-
hasThinkingBudget: true
|
|
2583
|
-
},
|
|
2584
|
-
{
|
|
2585
|
-
name: "o1-mini" /* O1Mini */,
|
|
2586
|
-
currency: "usd",
|
|
2587
|
-
promptTokenCostPer1M: 1.1,
|
|
2588
|
-
completionTokenCostPer1M: 14.4,
|
|
2589
|
-
hasThinkingBudget: true
|
|
2590
|
-
},
|
|
2633
|
+
// Not Reasoning models
|
|
2591
2634
|
{
|
|
2592
2635
|
name: "gpt-4" /* GPT4 */,
|
|
2593
2636
|
currency: "usd",
|
|
@@ -2636,30 +2679,36 @@ var axModelInfoOpenAI = [
|
|
|
2636
2679
|
promptTokenCostPer1M: 0.5,
|
|
2637
2680
|
completionTokenCostPer1M: 1.5
|
|
2638
2681
|
},
|
|
2639
|
-
//
|
|
2682
|
+
// Reasoning models
|
|
2683
|
+
{
|
|
2684
|
+
name: "o1" /* O1 */,
|
|
2685
|
+
currency: "usd",
|
|
2686
|
+
promptTokenCostPer1M: 15,
|
|
2687
|
+
completionTokenCostPer1M: 60
|
|
2688
|
+
},
|
|
2689
|
+
{
|
|
2690
|
+
name: "o1-mini" /* O1Mini */,
|
|
2691
|
+
currency: "usd",
|
|
2692
|
+
promptTokenCostPer1M: 1.1,
|
|
2693
|
+
completionTokenCostPer1M: 14.4
|
|
2694
|
+
},
|
|
2640
2695
|
{
|
|
2641
2696
|
name: "o3" /* O3 */,
|
|
2642
2697
|
currency: "usd",
|
|
2643
2698
|
promptTokenCostPer1M: 15,
|
|
2644
|
-
completionTokenCostPer1M: 60
|
|
2645
|
-
hasThinkingBudget: true,
|
|
2646
|
-
hasShowThoughts: true
|
|
2699
|
+
completionTokenCostPer1M: 60
|
|
2647
2700
|
},
|
|
2648
2701
|
{
|
|
2649
2702
|
name: "o3-mini" /* O3Mini */,
|
|
2650
2703
|
currency: "usd",
|
|
2651
2704
|
promptTokenCostPer1M: 1.1,
|
|
2652
|
-
completionTokenCostPer1M: 4.4
|
|
2653
|
-
hasThinkingBudget: true,
|
|
2654
|
-
hasShowThoughts: true
|
|
2705
|
+
completionTokenCostPer1M: 4.4
|
|
2655
2706
|
},
|
|
2656
2707
|
{
|
|
2657
2708
|
name: "o4-mini" /* O4Mini */,
|
|
2658
2709
|
currency: "usd",
|
|
2659
2710
|
promptTokenCostPer1M: 1.1,
|
|
2660
|
-
completionTokenCostPer1M: 4.4
|
|
2661
|
-
hasThinkingBudget: true,
|
|
2662
|
-
hasShowThoughts: true
|
|
2711
|
+
completionTokenCostPer1M: 4.4
|
|
2663
2712
|
},
|
|
2664
2713
|
// Embedding models
|
|
2665
2714
|
{
|
|
@@ -2681,8 +2730,123 @@ var axModelInfoOpenAI = [
|
|
|
2681
2730
|
completionTokenCostPer1M: 0.13
|
|
2682
2731
|
}
|
|
2683
2732
|
];
|
|
2733
|
+
var axModelInfoOpenAIResponses = [
|
|
2734
|
+
// Not Reasoning models
|
|
2735
|
+
{
|
|
2736
|
+
name: "gpt-4" /* GPT4 */,
|
|
2737
|
+
currency: "usd",
|
|
2738
|
+
promptTokenCostPer1M: 30,
|
|
2739
|
+
completionTokenCostPer1M: 60
|
|
2740
|
+
},
|
|
2741
|
+
{
|
|
2742
|
+
name: "gpt-4.1" /* GPT41 */,
|
|
2743
|
+
currency: "usd",
|
|
2744
|
+
promptTokenCostPer1M: 2,
|
|
2745
|
+
completionTokenCostPer1M: 8
|
|
2746
|
+
},
|
|
2747
|
+
{
|
|
2748
|
+
name: "gpt-4.1-mini" /* GPT41Mini */,
|
|
2749
|
+
currency: "usd",
|
|
2750
|
+
promptTokenCostPer1M: 0.4,
|
|
2751
|
+
completionTokenCostPer1M: 1.6
|
|
2752
|
+
},
|
|
2753
|
+
{
|
|
2754
|
+
name: "gpt-4o" /* GPT4O */,
|
|
2755
|
+
currency: "usd",
|
|
2756
|
+
promptTokenCostPer1M: 5,
|
|
2757
|
+
completionTokenCostPer1M: 15
|
|
2758
|
+
},
|
|
2759
|
+
{
|
|
2760
|
+
name: "gpt-4o-mini" /* GPT4OMini */,
|
|
2761
|
+
currency: "usd",
|
|
2762
|
+
promptTokenCostPer1M: 0.15,
|
|
2763
|
+
completionTokenCostPer1M: 0.6
|
|
2764
|
+
},
|
|
2765
|
+
{
|
|
2766
|
+
name: "chatgpt-4o-latest" /* GPT4ChatGPT4O */,
|
|
2767
|
+
currency: "usd",
|
|
2768
|
+
promptTokenCostPer1M: 5,
|
|
2769
|
+
completionTokenCostPer1M: 15
|
|
2770
|
+
},
|
|
2771
|
+
{
|
|
2772
|
+
name: "gpt-4-turbo" /* GPT4Turbo */,
|
|
2773
|
+
currency: "usd",
|
|
2774
|
+
promptTokenCostPer1M: 10,
|
|
2775
|
+
completionTokenCostPer1M: 30
|
|
2776
|
+
},
|
|
2777
|
+
{
|
|
2778
|
+
name: "gpt-3.5-turbo" /* GPT35Turbo */,
|
|
2779
|
+
currency: "usd",
|
|
2780
|
+
promptTokenCostPer1M: 0.5,
|
|
2781
|
+
completionTokenCostPer1M: 1.5
|
|
2782
|
+
},
|
|
2783
|
+
// Reasoning models
|
|
2784
|
+
{
|
|
2785
|
+
name: "o1-pro" /* O1Pro */,
|
|
2786
|
+
currency: "usd",
|
|
2787
|
+
promptTokenCostPer1M: 150,
|
|
2788
|
+
completionTokenCostPer1M: 600,
|
|
2789
|
+
hasThinkingBudget: true,
|
|
2790
|
+
hasShowThoughts: true,
|
|
2791
|
+
isExpensive: true
|
|
2792
|
+
},
|
|
2793
|
+
{
|
|
2794
|
+
name: "o1" /* O1 */,
|
|
2795
|
+
currency: "usd",
|
|
2796
|
+
promptTokenCostPer1M: 15,
|
|
2797
|
+
completionTokenCostPer1M: 60,
|
|
2798
|
+
hasThinkingBudget: true,
|
|
2799
|
+
hasShowThoughts: true
|
|
2800
|
+
},
|
|
2801
|
+
{
|
|
2802
|
+
name: "o3-pro" /* O3Pro */,
|
|
2803
|
+
currency: "usd",
|
|
2804
|
+
promptTokenCostPer1M: 20,
|
|
2805
|
+
completionTokenCostPer1M: 80,
|
|
2806
|
+
hasThinkingBudget: true,
|
|
2807
|
+
hasShowThoughts: true,
|
|
2808
|
+
isExpensive: true
|
|
2809
|
+
},
|
|
2810
|
+
{
|
|
2811
|
+
name: "o3" /* O3 */,
|
|
2812
|
+
currency: "usd",
|
|
2813
|
+
promptTokenCostPer1M: 15,
|
|
2814
|
+
completionTokenCostPer1M: 60,
|
|
2815
|
+
hasThinkingBudget: true,
|
|
2816
|
+
hasShowThoughts: true
|
|
2817
|
+
},
|
|
2818
|
+
{
|
|
2819
|
+
name: "o3-mini" /* O3Mini */,
|
|
2820
|
+
currency: "usd",
|
|
2821
|
+
promptTokenCostPer1M: 1.1,
|
|
2822
|
+
completionTokenCostPer1M: 4.4,
|
|
2823
|
+
hasThinkingBudget: true,
|
|
2824
|
+
hasShowThoughts: true
|
|
2825
|
+
},
|
|
2826
|
+
{
|
|
2827
|
+
name: "o4-mini" /* O4Mini */,
|
|
2828
|
+
currency: "usd",
|
|
2829
|
+
promptTokenCostPer1M: 1.1,
|
|
2830
|
+
completionTokenCostPer1M: 4.4,
|
|
2831
|
+
hasThinkingBudget: true,
|
|
2832
|
+
hasShowThoughts: true
|
|
2833
|
+
}
|
|
2834
|
+
];
|
|
2684
2835
|
|
|
2685
2836
|
// ai/openai/api.ts
|
|
2837
|
+
var isOpenAIThinkingModel = (model) => {
|
|
2838
|
+
const thinkingModels = [
|
|
2839
|
+
"o1" /* O1 */,
|
|
2840
|
+
"o1-mini" /* O1Mini */,
|
|
2841
|
+
"o3" /* O3 */,
|
|
2842
|
+
"o3-mini" /* O3Mini */,
|
|
2843
|
+
"o4-mini" /* O4Mini */,
|
|
2844
|
+
// Pro models (string values since they're not in the regular chat enum)
|
|
2845
|
+
"o1-pro",
|
|
2846
|
+
"o3-pro"
|
|
2847
|
+
];
|
|
2848
|
+
return thinkingModels.includes(model) || thinkingModels.includes(model);
|
|
2849
|
+
};
|
|
2686
2850
|
var axAIOpenAIDefaultConfig = () => structuredClone({
|
|
2687
2851
|
model: "gpt-4.1" /* GPT41 */,
|
|
2688
2852
|
embedModel: "text-embedding-3-small" /* TextEmbedding3Small */,
|
|
@@ -2746,20 +2910,24 @@ var AxAIOpenAIImpl = class {
|
|
|
2746
2910
|
const frequencyPenalty = req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty;
|
|
2747
2911
|
const stream = req.modelConfig?.stream ?? this.config.stream;
|
|
2748
2912
|
const store = this.config.store;
|
|
2913
|
+
const isThinkingModel = isOpenAIThinkingModel(model);
|
|
2749
2914
|
let reqValue = {
|
|
2750
2915
|
model,
|
|
2751
2916
|
messages,
|
|
2752
2917
|
response_format: this.config?.responseFormat ? { type: this.config.responseFormat } : void 0,
|
|
2753
2918
|
tools,
|
|
2754
2919
|
tool_choice: toolsChoice,
|
|
2755
|
-
|
|
2756
|
-
|
|
2757
|
-
|
|
2758
|
-
|
|
2920
|
+
// For thinking models, don't set these parameters as they're not supported
|
|
2921
|
+
...isThinkingModel ? {} : {
|
|
2922
|
+
max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
|
|
2923
|
+
temperature: req.modelConfig?.temperature ?? this.config.temperature,
|
|
2924
|
+
top_p: req.modelConfig?.topP ?? this.config.topP ?? 1,
|
|
2925
|
+
n: req.modelConfig?.n ?? this.config.n,
|
|
2926
|
+
presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty,
|
|
2927
|
+
...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {}
|
|
2928
|
+
},
|
|
2759
2929
|
stop: req.modelConfig?.stopSequences ?? this.config.stop,
|
|
2760
|
-
presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty,
|
|
2761
2930
|
logit_bias: this.config.logitBias,
|
|
2762
|
-
...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {},
|
|
2763
2931
|
...stream && this.streamingUsage ? { stream: true, stream_options: { include_usage: true } } : {},
|
|
2764
2932
|
...store ? { store } : {},
|
|
2765
2933
|
...this.config.serviceTier ? { service_tier: this.config.serviceTier } : {},
|
|
@@ -2850,6 +3018,9 @@ var AxAIOpenAIImpl = class {
|
|
|
2850
3018
|
totalTokens: usage.total_tokens
|
|
2851
3019
|
} : void 0;
|
|
2852
3020
|
const results = choices.map((choice) => {
|
|
3021
|
+
if (choice.message.refusal) {
|
|
3022
|
+
throw new AxAIRefusalError(choice.message.refusal, resp.model, resp.id);
|
|
3023
|
+
}
|
|
2853
3024
|
const finishReason = mapFinishReason2(choice.finish_reason);
|
|
2854
3025
|
const functionCalls = choice.message.tool_calls?.map(
|
|
2855
3026
|
({ id: id2, function: { arguments: params, name } }) => ({
|
|
@@ -2861,8 +3032,9 @@ var AxAIOpenAIImpl = class {
|
|
|
2861
3032
|
return {
|
|
2862
3033
|
index: choice.index,
|
|
2863
3034
|
id: `${choice.index}`,
|
|
2864
|
-
content: choice.message.content,
|
|
3035
|
+
content: choice.message.content ?? void 0,
|
|
2865
3036
|
thought: choice.message.reasoning_content,
|
|
3037
|
+
annotations: choice.message.annotations,
|
|
2866
3038
|
functionCalls,
|
|
2867
3039
|
finishReason
|
|
2868
3040
|
};
|
|
@@ -2889,11 +3061,16 @@ var AxAIOpenAIImpl = class {
|
|
|
2889
3061
|
delta: {
|
|
2890
3062
|
content,
|
|
2891
3063
|
role,
|
|
3064
|
+
refusal,
|
|
2892
3065
|
tool_calls: toolCalls,
|
|
2893
|
-
reasoning_content: thought
|
|
3066
|
+
reasoning_content: thought,
|
|
3067
|
+
annotations
|
|
2894
3068
|
},
|
|
2895
3069
|
finish_reason: oaiFinishReason
|
|
2896
3070
|
}) => {
|
|
3071
|
+
if (refusal) {
|
|
3072
|
+
throw new AxAIRefusalError(refusal, void 0, id);
|
|
3073
|
+
}
|
|
2897
3074
|
const finishReason = mapFinishReason2(oaiFinishReason);
|
|
2898
3075
|
const functionCalls = toolCalls?.map(({ id: _id, index: index2, function: { name, arguments: params } }) => {
|
|
2899
3076
|
if (typeof _id === "string" && typeof index2 === "number" && !sstate.indexIdMap[index2]) {
|
|
@@ -2911,9 +3088,10 @@ var AxAIOpenAIImpl = class {
|
|
|
2911
3088
|
}).filter((v) => v !== null);
|
|
2912
3089
|
return {
|
|
2913
3090
|
index,
|
|
2914
|
-
content,
|
|
3091
|
+
content: content ?? void 0,
|
|
2915
3092
|
role,
|
|
2916
3093
|
thought,
|
|
3094
|
+
annotations,
|
|
2917
3095
|
functionCalls,
|
|
2918
3096
|
finishReason,
|
|
2919
3097
|
id
|
|
@@ -4010,11 +4188,29 @@ var AxAIGoogleGeminiImpl = class {
|
|
|
4010
4188
|
result.finishReason = "stop";
|
|
4011
4189
|
break;
|
|
4012
4190
|
case "SAFETY":
|
|
4013
|
-
throw new
|
|
4191
|
+
throw new AxAIRefusalError(
|
|
4192
|
+
"Content was blocked due to safety settings",
|
|
4193
|
+
void 0,
|
|
4194
|
+
// model not available in candidate
|
|
4195
|
+
void 0
|
|
4196
|
+
// requestId not available
|
|
4197
|
+
);
|
|
4014
4198
|
case "RECITATION":
|
|
4015
|
-
throw new
|
|
4199
|
+
throw new AxAIRefusalError(
|
|
4200
|
+
"Content was blocked due to recitation policy",
|
|
4201
|
+
void 0,
|
|
4202
|
+
// model not available in candidate
|
|
4203
|
+
void 0
|
|
4204
|
+
// requestId not available
|
|
4205
|
+
);
|
|
4016
4206
|
case "MALFORMED_FUNCTION_CALL":
|
|
4017
|
-
throw new
|
|
4207
|
+
throw new AxAIRefusalError(
|
|
4208
|
+
"Function call was malformed and blocked",
|
|
4209
|
+
void 0,
|
|
4210
|
+
// model not available in candidate
|
|
4211
|
+
void 0
|
|
4212
|
+
// requestId not available
|
|
4213
|
+
);
|
|
4018
4214
|
}
|
|
4019
4215
|
if (!candidate.content || !candidate.content.parts) {
|
|
4020
4216
|
return result;
|
|
@@ -4623,6 +4819,18 @@ var AxAIOllama = class extends AxAIOpenAIBase {
|
|
|
4623
4819
|
};
|
|
4624
4820
|
|
|
4625
4821
|
// ai/openai/responses_api.ts
|
|
4822
|
+
var isOpenAIResponsesThinkingModel = (model) => {
|
|
4823
|
+
const thinkingModels = [
|
|
4824
|
+
"o1" /* O1 */,
|
|
4825
|
+
"o1-mini" /* O1Mini */,
|
|
4826
|
+
"o1-pro" /* O1Pro */,
|
|
4827
|
+
"o3" /* O3 */,
|
|
4828
|
+
"o3-mini" /* O3Mini */,
|
|
4829
|
+
"o3-pro" /* O3Pro */,
|
|
4830
|
+
"o4-mini" /* O4Mini */
|
|
4831
|
+
];
|
|
4832
|
+
return thinkingModels.includes(model);
|
|
4833
|
+
};
|
|
4626
4834
|
var AxAIOpenAIResponsesImpl = class {
|
|
4627
4835
|
constructor(config, streamingUsage, responsesReqUpdater) {
|
|
4628
4836
|
this.config = config;
|
|
@@ -4778,10 +4986,37 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4778
4986
|
parameters: v.parameters ?? {}
|
|
4779
4987
|
})
|
|
4780
4988
|
);
|
|
4781
|
-
const includeFields =
|
|
4782
|
-
|
|
4783
|
-
|
|
4784
|
-
|
|
4989
|
+
const includeFields = (
|
|
4990
|
+
// | 'computer_call_output.output.image_url'
|
|
4991
|
+
// | 'reasoning.encrypted_content'
|
|
4992
|
+
// | 'code_interpreter_call.outputs'
|
|
4993
|
+
[]
|
|
4994
|
+
);
|
|
4995
|
+
const isThinkingModel = isOpenAIResponsesThinkingModel(model);
|
|
4996
|
+
let reasoningSummary = this.config.reasoningSummary;
|
|
4997
|
+
if (!config?.showThoughts) {
|
|
4998
|
+
reasoningSummary = void 0;
|
|
4999
|
+
} else if (!reasoningSummary) {
|
|
5000
|
+
reasoningSummary = "auto";
|
|
5001
|
+
}
|
|
5002
|
+
let reasoningEffort = this.config.reasoningEffort;
|
|
5003
|
+
if (config?.thinkingTokenBudget) {
|
|
5004
|
+
switch (config.thinkingTokenBudget) {
|
|
5005
|
+
case "none":
|
|
5006
|
+
reasoningEffort = void 0;
|
|
5007
|
+
break;
|
|
5008
|
+
case "minimal":
|
|
5009
|
+
reasoningEffort = "low";
|
|
5010
|
+
break;
|
|
5011
|
+
case "low":
|
|
5012
|
+
reasoningEffort = "medium";
|
|
5013
|
+
break;
|
|
5014
|
+
case "medium":
|
|
5015
|
+
case "high":
|
|
5016
|
+
case "highest":
|
|
5017
|
+
reasoningEffort = "high";
|
|
5018
|
+
break;
|
|
5019
|
+
}
|
|
4785
5020
|
}
|
|
4786
5021
|
let mutableReq = {
|
|
4787
5022
|
model,
|
|
@@ -4790,9 +5025,15 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4790
5025
|
instructions: finalInstructions,
|
|
4791
5026
|
tools: tools?.length ? tools : void 0,
|
|
4792
5027
|
tool_choice: req.functionCall === "none" || req.functionCall === "auto" || req.functionCall === "required" ? req.functionCall : typeof req.functionCall === "object" && req.functionCall.function ? { type: "function", name: req.functionCall.function.name } : void 0,
|
|
4793
|
-
|
|
4794
|
-
|
|
4795
|
-
|
|
5028
|
+
// For thinking models, don't set these parameters as they're not supported
|
|
5029
|
+
...isThinkingModel ? {
|
|
5030
|
+
max_output_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? void 0
|
|
5031
|
+
} : {
|
|
5032
|
+
temperature: req.modelConfig?.temperature ?? this.config.temperature ?? void 0,
|
|
5033
|
+
top_p: req.modelConfig?.topP ?? this.config.topP ?? void 0,
|
|
5034
|
+
presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty ?? void 0,
|
|
5035
|
+
frequency_penalty: req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty ?? void 0
|
|
5036
|
+
},
|
|
4796
5037
|
stream: req.modelConfig?.stream ?? this.config.stream ?? false,
|
|
4797
5038
|
// Sourced from modelConfig or global config
|
|
4798
5039
|
// Optional fields from AxAIOpenAIResponsesRequest that need to be in Mutable for initialization
|
|
@@ -4801,7 +5042,12 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4801
5042
|
metadata: void 0,
|
|
4802
5043
|
parallel_tool_calls: this.config.parallelToolCalls,
|
|
4803
5044
|
previous_response_id: void 0,
|
|
4804
|
-
|
|
5045
|
+
...reasoningEffort ? {
|
|
5046
|
+
reasoning: {
|
|
5047
|
+
effort: reasoningEffort,
|
|
5048
|
+
summary: reasoningSummary
|
|
5049
|
+
}
|
|
5050
|
+
} : {},
|
|
4805
5051
|
service_tier: this.config.serviceTier,
|
|
4806
5052
|
store: this.config.store,
|
|
4807
5053
|
text: void 0,
|
|
@@ -4892,7 +5138,7 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4892
5138
|
switch (item.type) {
|
|
4893
5139
|
case "message":
|
|
4894
5140
|
currentResult.id = item.id;
|
|
4895
|
-
currentResult.content = contentToText(item.content);
|
|
5141
|
+
currentResult.content = contentToText(item.content, id);
|
|
4896
5142
|
currentResult.finishReason = item.status === "completed" ? "stop" : "content_filter";
|
|
4897
5143
|
break;
|
|
4898
5144
|
case "reasoning":
|
|
@@ -5065,7 +5311,10 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
5065
5311
|
switch (event.item.type) {
|
|
5066
5312
|
case "message":
|
|
5067
5313
|
baseResult.id = event.item.id;
|
|
5068
|
-
baseResult.content = contentToText(
|
|
5314
|
+
baseResult.content = contentToText(
|
|
5315
|
+
event.item.content,
|
|
5316
|
+
event.item.id
|
|
5317
|
+
);
|
|
5069
5318
|
break;
|
|
5070
5319
|
case "function_call":
|
|
5071
5320
|
baseResult.id = event.item.id;
|
|
@@ -5218,24 +5467,11 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
5218
5467
|
];
|
|
5219
5468
|
}
|
|
5220
5469
|
break;
|
|
5221
|
-
case "reasoning":
|
|
5222
|
-
{
|
|
5223
|
-
const reasoningItem = event.item;
|
|
5224
|
-
baseResult.id = event.item.id;
|
|
5225
|
-
if (reasoningItem.encrypted_content) {
|
|
5226
|
-
baseResult.thought = reasoningItem.encrypted_content;
|
|
5227
|
-
} else if (reasoningItem.summary) {
|
|
5228
|
-
baseResult.thought = reasoningItem.summary.map(
|
|
5229
|
-
(s2) => typeof s2 === "object" ? JSON.stringify(s2) : s2
|
|
5230
|
-
).join("\n");
|
|
5231
|
-
}
|
|
5232
|
-
}
|
|
5233
|
-
break;
|
|
5234
5470
|
}
|
|
5235
5471
|
break;
|
|
5236
5472
|
case "response.content_part.added":
|
|
5237
5473
|
baseResult.id = event.item_id;
|
|
5238
|
-
baseResult.content = contentToText([event.part]);
|
|
5474
|
+
baseResult.content = contentToText([event.part], event.item_id);
|
|
5239
5475
|
break;
|
|
5240
5476
|
case "response.output_text.delta":
|
|
5241
5477
|
baseResult.id = event.item_id;
|
|
@@ -5256,16 +5492,21 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
5256
5492
|
}
|
|
5257
5493
|
];
|
|
5258
5494
|
break;
|
|
5259
|
-
case
|
|
5260
|
-
|
|
5495
|
+
// case 'response.function_call_arguments.done':
|
|
5496
|
+
// // Function call arguments done - don't return function calls here
|
|
5497
|
+
// // The mergeFunctionCalls will handle combining name and arguments
|
|
5498
|
+
// baseResult.id = event.item_id
|
|
5499
|
+
// baseResult.finishReason = 'function_call'
|
|
5500
|
+
// break
|
|
5261
5501
|
case "response.reasoning_summary_text.delta":
|
|
5262
5502
|
baseResult.id = event.item_id;
|
|
5263
5503
|
baseResult.thought = event.delta;
|
|
5264
5504
|
break;
|
|
5265
|
-
case
|
|
5266
|
-
|
|
5267
|
-
|
|
5268
|
-
|
|
5505
|
+
// case 'response.reasoning_summary_text.done':
|
|
5506
|
+
// // Reasoning summary done
|
|
5507
|
+
// baseResult.id = event.item_id
|
|
5508
|
+
// baseResult.thought = event.text
|
|
5509
|
+
// break
|
|
5269
5510
|
// File search tool events
|
|
5270
5511
|
case "response.file_search_call.in_progress":
|
|
5271
5512
|
case "response.file_search_call.searching":
|
|
@@ -5359,10 +5600,6 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
5359
5600
|
baseResult.id = event.item.id;
|
|
5360
5601
|
baseResult.finishReason = "function_call";
|
|
5361
5602
|
break;
|
|
5362
|
-
case "reasoning":
|
|
5363
|
-
baseResult.id = event.item.id;
|
|
5364
|
-
baseResult.finishReason = "stop";
|
|
5365
|
-
break;
|
|
5366
5603
|
}
|
|
5367
5604
|
break;
|
|
5368
5605
|
case "response.completed":
|
|
@@ -5420,11 +5657,13 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
5420
5657
|
return [apiConfig, reqValue];
|
|
5421
5658
|
}
|
|
5422
5659
|
};
|
|
5423
|
-
var contentToText = (content) => {
|
|
5424
|
-
|
|
5425
|
-
|
|
5426
|
-
|
|
5427
|
-
|
|
5660
|
+
var contentToText = (content, responseId) => {
|
|
5661
|
+
const refusalContent = content.filter((c) => c.type === "refusal");
|
|
5662
|
+
if (refusalContent.length > 0) {
|
|
5663
|
+
const refusalMessage = refusalContent.map((c) => c.refusal).join("\n");
|
|
5664
|
+
throw new AxAIRefusalError(refusalMessage, void 0, responseId);
|
|
5665
|
+
}
|
|
5666
|
+
return content.filter((c) => c.type === "output_text").map((c) => c.text).join("\n");
|
|
5428
5667
|
};
|
|
5429
5668
|
|
|
5430
5669
|
// ai/openai/responses_api_base.ts
|
|
@@ -5488,7 +5727,7 @@ var AxAIOpenAIResponses = class extends AxAIOpenAIResponsesBase {
|
|
|
5488
5727
|
if (!apiKey || apiKey === "") {
|
|
5489
5728
|
throw new Error("OpenAI API key not set");
|
|
5490
5729
|
}
|
|
5491
|
-
modelInfo = [...
|
|
5730
|
+
modelInfo = [...axModelInfoOpenAIResponses, ...modelInfo ?? []];
|
|
5492
5731
|
const supportFor = (model) => {
|
|
5493
5732
|
const mi = getModelInfo({
|
|
5494
5733
|
model,
|
|
@@ -6008,213 +6247,279 @@ var import_api22 = require("@opentelemetry/api");
|
|
|
6008
6247
|
|
|
6009
6248
|
// ai/validate.ts
|
|
6010
6249
|
function axValidateChatRequestMessage(item) {
|
|
6250
|
+
const value = (v) => JSON.stringify(v, null, 2);
|
|
6011
6251
|
if (!item) {
|
|
6012
|
-
throw new Error(
|
|
6252
|
+
throw new Error(
|
|
6253
|
+
`Chat request message item cannot be null or undefined, received: ${value(item)}`
|
|
6254
|
+
);
|
|
6013
6255
|
}
|
|
6014
|
-
|
|
6015
|
-
|
|
6256
|
+
const role = item?.role;
|
|
6257
|
+
if (!role) {
|
|
6258
|
+
throw new Error(
|
|
6259
|
+
`Chat request message must have a role, received: ${value(role)}`
|
|
6260
|
+
);
|
|
6016
6261
|
}
|
|
6017
|
-
switch (
|
|
6018
|
-
case "system":
|
|
6019
|
-
|
|
6262
|
+
switch (role) {
|
|
6263
|
+
case "system": {
|
|
6264
|
+
const systemItem = item;
|
|
6265
|
+
if (!systemItem.content || systemItem.content.trim() === "") {
|
|
6020
6266
|
throw new Error(
|
|
6021
|
-
|
|
6267
|
+
`System message content cannot be empty or whitespace-only, received: ${value(systemItem.content)}`
|
|
6022
6268
|
);
|
|
6023
6269
|
}
|
|
6024
6270
|
break;
|
|
6025
|
-
|
|
6026
|
-
|
|
6027
|
-
|
|
6271
|
+
}
|
|
6272
|
+
case "user": {
|
|
6273
|
+
const userItem = item;
|
|
6274
|
+
if (!userItem.content) {
|
|
6275
|
+
throw new Error(
|
|
6276
|
+
`User message content cannot be undefined, received: ${value(userItem.content)}`
|
|
6277
|
+
);
|
|
6028
6278
|
}
|
|
6029
|
-
if (typeof
|
|
6030
|
-
if (
|
|
6279
|
+
if (typeof userItem.content === "string") {
|
|
6280
|
+
if (userItem.content.trim() === "") {
|
|
6031
6281
|
throw new Error(
|
|
6032
|
-
|
|
6282
|
+
`User message content cannot be empty or whitespace-only, received: ${value(userItem.content)}`
|
|
6033
6283
|
);
|
|
6034
6284
|
}
|
|
6035
|
-
} else if (Array.isArray(
|
|
6036
|
-
if (
|
|
6037
|
-
throw new Error(
|
|
6285
|
+
} else if (Array.isArray(userItem.content)) {
|
|
6286
|
+
if (userItem.content.length === 0) {
|
|
6287
|
+
throw new Error(
|
|
6288
|
+
`User message content array cannot be empty, received: ${value(userItem.content)}`
|
|
6289
|
+
);
|
|
6038
6290
|
}
|
|
6039
|
-
for (let index = 0; index <
|
|
6040
|
-
const contentItem =
|
|
6291
|
+
for (let index = 0; index < userItem.content.length; index++) {
|
|
6292
|
+
const contentItem = userItem.content[index];
|
|
6041
6293
|
if (!contentItem || typeof contentItem !== "object") {
|
|
6042
6294
|
throw new Error(
|
|
6043
|
-
`User message content item at index ${index} must be an object`
|
|
6295
|
+
`User message content item at index ${index} must be an object, received: ${value(contentItem)}`
|
|
6044
6296
|
);
|
|
6045
6297
|
}
|
|
6046
|
-
|
|
6298
|
+
const contentType = contentItem?.type;
|
|
6299
|
+
if (!contentType) {
|
|
6047
6300
|
throw new Error(
|
|
6048
|
-
`User message content item at index ${index} must have a type`
|
|
6301
|
+
`User message content item at index ${index} must have a type, received: ${value(contentType)}`
|
|
6049
6302
|
);
|
|
6050
6303
|
}
|
|
6051
|
-
switch (
|
|
6052
|
-
case "text":
|
|
6053
|
-
|
|
6304
|
+
switch (contentType) {
|
|
6305
|
+
case "text": {
|
|
6306
|
+
const textItem = contentItem;
|
|
6307
|
+
if (!textItem.text || textItem.text.trim() === "") {
|
|
6054
6308
|
throw new Error(
|
|
6055
|
-
`User message text content at index ${index} cannot be empty or whitespace-only`
|
|
6309
|
+
`User message text content at index ${index} cannot be empty or whitespace-only, received: ${value(textItem.text)}`
|
|
6056
6310
|
);
|
|
6057
6311
|
}
|
|
6058
6312
|
break;
|
|
6059
|
-
|
|
6060
|
-
|
|
6313
|
+
}
|
|
6314
|
+
case "image": {
|
|
6315
|
+
const imageItem = contentItem;
|
|
6316
|
+
if (!imageItem.image || imageItem.image.trim() === "") {
|
|
6061
6317
|
throw new Error(
|
|
6062
|
-
`User message image content at index ${index} cannot be empty`
|
|
6318
|
+
`User message image content at index ${index} cannot be empty, received: ${value(imageItem.image)}`
|
|
6063
6319
|
);
|
|
6064
6320
|
}
|
|
6065
|
-
if (!
|
|
6321
|
+
if (!imageItem.mimeType || imageItem.mimeType.trim() === "") {
|
|
6066
6322
|
throw new Error(
|
|
6067
|
-
`User message image content at index ${index} must have a mimeType`
|
|
6323
|
+
`User message image content at index ${index} must have a mimeType, received: ${value(imageItem.mimeType)}`
|
|
6068
6324
|
);
|
|
6069
6325
|
}
|
|
6070
6326
|
break;
|
|
6071
|
-
|
|
6072
|
-
|
|
6327
|
+
}
|
|
6328
|
+
case "audio": {
|
|
6329
|
+
const audioItem = contentItem;
|
|
6330
|
+
if (!audioItem.data || audioItem.data.trim() === "") {
|
|
6073
6331
|
throw new Error(
|
|
6074
|
-
`User message audio content at index ${index} cannot be empty`
|
|
6332
|
+
`User message audio content at index ${index} cannot be empty, received: ${value(audioItem.data)}`
|
|
6075
6333
|
);
|
|
6076
6334
|
}
|
|
6077
6335
|
break;
|
|
6336
|
+
}
|
|
6078
6337
|
default:
|
|
6079
6338
|
throw new Error(
|
|
6080
|
-
`User message content item at index ${index} has unsupported type: ${
|
|
6339
|
+
`User message content item at index ${index} has unsupported type: ${value(contentType)}`
|
|
6081
6340
|
);
|
|
6082
6341
|
}
|
|
6083
6342
|
}
|
|
6084
6343
|
} else {
|
|
6085
6344
|
throw new Error(
|
|
6086
|
-
|
|
6345
|
+
`User message content must be a string or array of content objects, received: ${value(userItem.content)}`
|
|
6087
6346
|
);
|
|
6088
6347
|
}
|
|
6089
6348
|
break;
|
|
6090
|
-
|
|
6091
|
-
|
|
6349
|
+
}
|
|
6350
|
+
case "assistant": {
|
|
6351
|
+
const assistantItem = item;
|
|
6352
|
+
if (!assistantItem.content && !assistantItem.functionCalls) {
|
|
6092
6353
|
throw new Error(
|
|
6093
|
-
|
|
6354
|
+
`Assistant message must have either content or function calls, received content: ${value(assistantItem.content)}, functionCalls: ${value(assistantItem.functionCalls)}`
|
|
6094
6355
|
);
|
|
6095
6356
|
}
|
|
6096
|
-
if (
|
|
6097
|
-
throw new Error(
|
|
6357
|
+
if (assistantItem.content && typeof assistantItem.content !== "string") {
|
|
6358
|
+
throw new Error(
|
|
6359
|
+
`Assistant message content must be a string, received: ${value(assistantItem.content)}`
|
|
6360
|
+
);
|
|
6098
6361
|
}
|
|
6099
|
-
if (
|
|
6100
|
-
throw new Error(
|
|
6362
|
+
if (assistantItem.functionCalls && !Array.isArray(assistantItem.functionCalls)) {
|
|
6363
|
+
throw new Error(
|
|
6364
|
+
`Assistant message function calls must be an array, received: ${value(assistantItem.functionCalls)}`
|
|
6365
|
+
);
|
|
6101
6366
|
}
|
|
6102
6367
|
break;
|
|
6103
|
-
|
|
6104
|
-
|
|
6105
|
-
|
|
6368
|
+
}
|
|
6369
|
+
case "function": {
|
|
6370
|
+
const functionItem = item;
|
|
6371
|
+
if (!functionItem.functionId || functionItem.functionId.trim() === "") {
|
|
6372
|
+
throw new Error(
|
|
6373
|
+
`Function message must have a non-empty functionId, received: ${value(functionItem.functionId)}`
|
|
6374
|
+
);
|
|
6106
6375
|
}
|
|
6107
|
-
if (
|
|
6108
|
-
throw new Error(
|
|
6376
|
+
if (functionItem.result === void 0 || functionItem.result === null) {
|
|
6377
|
+
throw new Error(
|
|
6378
|
+
`Function message must have a result, received: ${value(functionItem.result)}`
|
|
6379
|
+
);
|
|
6109
6380
|
}
|
|
6110
|
-
if (typeof
|
|
6111
|
-
throw new Error(
|
|
6381
|
+
if (typeof functionItem.result !== "string") {
|
|
6382
|
+
throw new Error(
|
|
6383
|
+
`Function message result must be a string, received: ${value(functionItem.result)}`
|
|
6384
|
+
);
|
|
6112
6385
|
}
|
|
6113
6386
|
break;
|
|
6387
|
+
}
|
|
6114
6388
|
default:
|
|
6115
|
-
throw new Error(
|
|
6116
|
-
`Unsupported message role: ${item.role}`
|
|
6117
|
-
);
|
|
6389
|
+
throw new Error(`Unsupported message role: ${value(role)}`);
|
|
6118
6390
|
}
|
|
6119
6391
|
}
|
|
6120
6392
|
function axValidateChatResponseResult(results) {
|
|
6393
|
+
const value = (v) => JSON.stringify(v, null, 2);
|
|
6121
6394
|
const resultsArray = Array.isArray(results) ? results : [results];
|
|
6122
6395
|
if (resultsArray.length === 0) {
|
|
6123
|
-
throw new Error(
|
|
6396
|
+
throw new Error(
|
|
6397
|
+
`Chat response results cannot be empty, received: ${value(resultsArray)}`
|
|
6398
|
+
);
|
|
6124
6399
|
}
|
|
6125
6400
|
for (let arrayIndex = 0; arrayIndex < resultsArray.length; arrayIndex++) {
|
|
6126
6401
|
const result = resultsArray[arrayIndex];
|
|
6127
6402
|
if (!result) {
|
|
6128
6403
|
throw new Error(
|
|
6129
|
-
`Chat response result at index ${arrayIndex} cannot be null or undefined`
|
|
6404
|
+
`Chat response result at index ${arrayIndex} cannot be null or undefined, received: ${value(result)}`
|
|
6130
6405
|
);
|
|
6131
6406
|
}
|
|
6132
6407
|
if (typeof result.index !== "number") {
|
|
6133
6408
|
throw new Error(
|
|
6134
|
-
`Chat response result at index ${arrayIndex} must have a numeric index`
|
|
6409
|
+
`Chat response result at index ${arrayIndex} must have a numeric index, received: ${value(result.index)}`
|
|
6135
6410
|
);
|
|
6136
6411
|
}
|
|
6137
6412
|
if (result.index < 0) {
|
|
6138
6413
|
throw new Error(
|
|
6139
|
-
`Chat response result at index ${arrayIndex} must have a non-negative index`
|
|
6414
|
+
`Chat response result at index ${arrayIndex} must have a non-negative index, received: ${value(result.index)}`
|
|
6140
6415
|
);
|
|
6141
6416
|
}
|
|
6142
6417
|
if (!result.content && !result.thought && !result.functionCalls && !result.finishReason) {
|
|
6143
6418
|
throw new Error(
|
|
6144
|
-
`Chat response result at index ${arrayIndex} must have at least one of: content, thought, functionCalls, or finishReason`
|
|
6419
|
+
`Chat response result at index ${arrayIndex} must have at least one of: content, thought, functionCalls, or finishReason, received: ${value({ content: result.content, thought: result.thought, functionCalls: result.functionCalls, finishReason: result.finishReason })}`
|
|
6145
6420
|
);
|
|
6146
6421
|
}
|
|
6147
6422
|
if (result.content !== void 0 && typeof result.content !== "string") {
|
|
6148
6423
|
throw new Error(
|
|
6149
|
-
`Chat response result content at index ${arrayIndex} must be a string`
|
|
6424
|
+
`Chat response result content at index ${arrayIndex} must be a string, received: ${value(result.content)}`
|
|
6150
6425
|
);
|
|
6151
6426
|
}
|
|
6152
6427
|
if (result.thought !== void 0 && typeof result.thought !== "string") {
|
|
6153
6428
|
throw new Error(
|
|
6154
|
-
`Chat response result thought at index ${arrayIndex} must be a string`
|
|
6429
|
+
`Chat response result thought at index ${arrayIndex} must be a string, received: ${value(result.thought)}`
|
|
6155
6430
|
);
|
|
6156
6431
|
}
|
|
6157
6432
|
if (result.name !== void 0) {
|
|
6158
6433
|
if (typeof result.name !== "string") {
|
|
6159
6434
|
throw new Error(
|
|
6160
|
-
`Chat response result name at index ${arrayIndex} must be a string`
|
|
6435
|
+
`Chat response result name at index ${arrayIndex} must be a string, received: ${value(result.name)}`
|
|
6161
6436
|
);
|
|
6162
6437
|
}
|
|
6163
6438
|
if (result.name.trim() === "") {
|
|
6164
6439
|
throw new Error(
|
|
6165
|
-
`Chat response result name at index ${arrayIndex} cannot be empty or whitespace-only`
|
|
6440
|
+
`Chat response result name at index ${arrayIndex} cannot be empty or whitespace-only, received: ${value(result.name)}`
|
|
6441
|
+
);
|
|
6442
|
+
}
|
|
6443
|
+
}
|
|
6444
|
+
if (result.annotations !== void 0) {
|
|
6445
|
+
if (!Array.isArray(result.annotations)) {
|
|
6446
|
+
throw new Error(
|
|
6447
|
+
`Chat response result annotations at index ${arrayIndex} must be an array, received: ${value(result.annotations)}`
|
|
6166
6448
|
);
|
|
6167
6449
|
}
|
|
6450
|
+
for (let i = 0; i < result.annotations.length; i++) {
|
|
6451
|
+
const annotation = result.annotations[i];
|
|
6452
|
+
if (!annotation || typeof annotation !== "object") {
|
|
6453
|
+
throw new Error(
|
|
6454
|
+
`Chat response result annotation at index ${arrayIndex}[${i}] must be an object, received: ${value(annotation)}`
|
|
6455
|
+
);
|
|
6456
|
+
}
|
|
6457
|
+
if (annotation.type !== "url_citation") {
|
|
6458
|
+
throw new Error(
|
|
6459
|
+
`Chat response result annotation at index ${arrayIndex}[${i}] must have type 'url_citation', received: ${value(annotation.type)}`
|
|
6460
|
+
);
|
|
6461
|
+
}
|
|
6462
|
+
if (!annotation.url_citation || typeof annotation.url_citation !== "object") {
|
|
6463
|
+
throw new Error(
|
|
6464
|
+
`Chat response result annotation at index ${arrayIndex}[${i}] must have a valid url_citation object, received: ${value(annotation.url_citation)}`
|
|
6465
|
+
);
|
|
6466
|
+
}
|
|
6467
|
+
if (typeof annotation.url_citation.url !== "string") {
|
|
6468
|
+
throw new Error(
|
|
6469
|
+
`Chat response result annotation at index ${arrayIndex}[${i}] url_citation.url must be a string, received: ${value(annotation.url_citation.url)}`
|
|
6470
|
+
);
|
|
6471
|
+
}
|
|
6472
|
+
}
|
|
6168
6473
|
}
|
|
6169
6474
|
if (result.id !== void 0) {
|
|
6170
6475
|
if (typeof result.id !== "string") {
|
|
6171
6476
|
throw new Error(
|
|
6172
|
-
`Chat response result id at index ${arrayIndex} must be a string`
|
|
6477
|
+
`Chat response result id at index ${arrayIndex} must be a string, received: ${value(result.id)}`
|
|
6173
6478
|
);
|
|
6174
6479
|
}
|
|
6175
6480
|
if (result.id.trim() === "") {
|
|
6176
6481
|
throw new Error(
|
|
6177
|
-
`Chat response result id at index ${arrayIndex} cannot be empty or whitespace-only`
|
|
6482
|
+
`Chat response result id at index ${arrayIndex} cannot be empty or whitespace-only, received: ${value(result.id)}`
|
|
6178
6483
|
);
|
|
6179
6484
|
}
|
|
6180
6485
|
}
|
|
6181
6486
|
if (result.functionCalls !== void 0) {
|
|
6182
6487
|
if (!Array.isArray(result.functionCalls)) {
|
|
6183
6488
|
throw new Error(
|
|
6184
|
-
`Chat response result functionCalls at index ${arrayIndex} must be an array`
|
|
6489
|
+
`Chat response result functionCalls at index ${arrayIndex} must be an array, received: ${value(result.functionCalls)}`
|
|
6185
6490
|
);
|
|
6186
6491
|
}
|
|
6187
6492
|
for (let callIndex = 0; callIndex < result.functionCalls.length; callIndex++) {
|
|
6188
6493
|
const functionCall = result.functionCalls[callIndex];
|
|
6189
6494
|
if (!functionCall) {
|
|
6190
6495
|
throw new Error(
|
|
6191
|
-
`Function call at index ${callIndex} in result ${arrayIndex} cannot be null or undefined`
|
|
6496
|
+
`Function call at index ${callIndex} in result ${arrayIndex} cannot be null or undefined, received: ${value(functionCall)}`
|
|
6192
6497
|
);
|
|
6193
6498
|
}
|
|
6194
6499
|
if (!functionCall.id || typeof functionCall.id !== "string" || functionCall.id.trim() === "") {
|
|
6195
6500
|
throw new Error(
|
|
6196
|
-
`Function call at index ${callIndex} in result ${arrayIndex} must have a non-empty string id`
|
|
6501
|
+
`Function call at index ${callIndex} in result ${arrayIndex} must have a non-empty string id, received: ${value(functionCall.id)}`
|
|
6197
6502
|
);
|
|
6198
6503
|
}
|
|
6199
6504
|
if (functionCall.type !== "function") {
|
|
6200
6505
|
throw new Error(
|
|
6201
|
-
`Function call at index ${callIndex} in result ${arrayIndex} must have type 'function'`
|
|
6506
|
+
`Function call at index ${callIndex} in result ${arrayIndex} must have type 'function', received: ${value(functionCall.type)}`
|
|
6202
6507
|
);
|
|
6203
6508
|
}
|
|
6204
6509
|
if (!functionCall.function) {
|
|
6205
6510
|
throw new Error(
|
|
6206
|
-
`Function call at index ${callIndex} in result ${arrayIndex} must have a function object`
|
|
6511
|
+
`Function call at index ${callIndex} in result ${arrayIndex} must have a function object, received: ${value(functionCall.function)}`
|
|
6207
6512
|
);
|
|
6208
6513
|
}
|
|
6209
6514
|
if (!functionCall.function.name || typeof functionCall.function.name !== "string" || functionCall.function.name.trim() === "") {
|
|
6210
6515
|
throw new Error(
|
|
6211
|
-
`Function call at index ${callIndex} in result ${arrayIndex} must have a non-empty function name`
|
|
6516
|
+
`Function call at index ${callIndex} in result ${arrayIndex} must have a non-empty function name, received: ${value(functionCall.function.name)}`
|
|
6212
6517
|
);
|
|
6213
6518
|
}
|
|
6214
6519
|
if (functionCall.function.params !== void 0) {
|
|
6215
6520
|
if (typeof functionCall.function.params !== "string" && typeof functionCall.function.params !== "object") {
|
|
6216
6521
|
throw new Error(
|
|
6217
|
-
`Function call params at index ${callIndex} in result ${arrayIndex} must be a string or object`
|
|
6522
|
+
`Function call params at index ${callIndex} in result ${arrayIndex} must be a string or object, received: ${value(functionCall.function.params)}`
|
|
6218
6523
|
);
|
|
6219
6524
|
}
|
|
6220
6525
|
}
|
|
@@ -6230,7 +6535,7 @@ function axValidateChatResponseResult(results) {
|
|
|
6230
6535
|
];
|
|
6231
6536
|
if (!validFinishReasons.includes(result.finishReason)) {
|
|
6232
6537
|
throw new Error(
|
|
6233
|
-
`Chat response result finishReason at index ${arrayIndex} must be one of: ${validFinishReasons.join(", ")}`
|
|
6538
|
+
`Chat response result finishReason at index ${arrayIndex} must be one of: ${validFinishReasons.join(", ")}, received: ${value(result.finishReason)}`
|
|
6234
6539
|
);
|
|
6235
6540
|
}
|
|
6236
6541
|
}
|
|
@@ -9002,7 +9307,7 @@ async function* processStreamingResponse({
|
|
|
9002
9307
|
usage.push(v.modelUsage);
|
|
9003
9308
|
}
|
|
9004
9309
|
for (const result of v.results) {
|
|
9005
|
-
if (result.content === "" && (!result.functionCalls || result.functionCalls.length === 0)) {
|
|
9310
|
+
if (result.content === "" && (!result.thought || result.thought === "") && (!result.functionCalls || result.functionCalls.length === 0)) {
|
|
9006
9311
|
continue;
|
|
9007
9312
|
}
|
|
9008
9313
|
const state = states.find((s2) => s2.index === result.index);
|
|
@@ -9714,7 +10019,7 @@ var toFieldType = (type) => {
|
|
|
9714
10019
|
case "number":
|
|
9715
10020
|
return "number";
|
|
9716
10021
|
case "boolean":
|
|
9717
|
-
return "boolean";
|
|
10022
|
+
return "boolean (true or false)";
|
|
9718
10023
|
case "date":
|
|
9719
10024
|
return 'date ("YYYY-MM-DD" format)';
|
|
9720
10025
|
case "datetime":
|
|
@@ -12872,6 +13177,444 @@ var AxDockerSession = class {
|
|
|
12872
13177
|
}
|
|
12873
13178
|
};
|
|
12874
13179
|
|
|
13180
|
+
// flow/flow.ts
|
|
13181
|
+
var AxFlow = class extends AxProgramWithSignature {
|
|
13182
|
+
nodes = /* @__PURE__ */ new Map();
|
|
13183
|
+
flowDefinition = [];
|
|
13184
|
+
nodeGenerators = /* @__PURE__ */ new Map();
|
|
13185
|
+
loopStack = [];
|
|
13186
|
+
stepLabels = /* @__PURE__ */ new Map();
|
|
13187
|
+
branchContext = null;
|
|
13188
|
+
constructor(signature = "userInput:string -> flowOutput:string") {
|
|
13189
|
+
super(signature);
|
|
13190
|
+
}
|
|
13191
|
+
/**
|
|
13192
|
+
* Declares a reusable computational node and its input/output signature.
|
|
13193
|
+
* Returns a new AxFlow type that tracks this node in the TNodes registry.
|
|
13194
|
+
*
|
|
13195
|
+
* @param name - The name of the node
|
|
13196
|
+
* @param signature - Signature string in the same format as AxSignature
|
|
13197
|
+
* @param options - Optional program forward options (same as AxGen)
|
|
13198
|
+
* @returns New AxFlow instance with updated TNodes type
|
|
13199
|
+
*
|
|
13200
|
+
* @example
|
|
13201
|
+
* ```typescript
|
|
13202
|
+
* flow.node('summarizer', 'text:string -> summary:string')
|
|
13203
|
+
* flow.node('analyzer', 'text:string -> analysis:string, confidence:number', { debug: true })
|
|
13204
|
+
* ```
|
|
13205
|
+
*/
|
|
13206
|
+
node(name, signature, options) {
|
|
13207
|
+
if (!signature) {
|
|
13208
|
+
throw new Error(
|
|
13209
|
+
`Invalid signature for node '${name}': signature cannot be empty`
|
|
13210
|
+
);
|
|
13211
|
+
}
|
|
13212
|
+
this.nodes.set(name, {
|
|
13213
|
+
inputs: {},
|
|
13214
|
+
outputs: {}
|
|
13215
|
+
});
|
|
13216
|
+
this.nodeGenerators.set(name, new AxGen(signature, options));
|
|
13217
|
+
return this;
|
|
13218
|
+
}
|
|
13219
|
+
/**
|
|
13220
|
+
* Applies a synchronous transformation to the state object.
|
|
13221
|
+
* Returns a new AxFlow type with the evolved state.
|
|
13222
|
+
*
|
|
13223
|
+
* @param transform - Function that takes the current state and returns a new state
|
|
13224
|
+
* @returns New AxFlow instance with updated TState type
|
|
13225
|
+
*
|
|
13226
|
+
* @example
|
|
13227
|
+
* ```typescript
|
|
13228
|
+
* flow.map(state => ({ ...state, processedText: state.text.toLowerCase() }))
|
|
13229
|
+
* ```
|
|
13230
|
+
*/
|
|
13231
|
+
map(transform) {
|
|
13232
|
+
const step = (state) => {
|
|
13233
|
+
return transform(state);
|
|
13234
|
+
};
|
|
13235
|
+
if (this.branchContext?.currentBranchValue !== void 0) {
|
|
13236
|
+
const currentBranch = this.branchContext.branches.get(
|
|
13237
|
+
this.branchContext.currentBranchValue
|
|
13238
|
+
) || [];
|
|
13239
|
+
currentBranch.push(step);
|
|
13240
|
+
this.branchContext.branches.set(
|
|
13241
|
+
this.branchContext.currentBranchValue,
|
|
13242
|
+
currentBranch
|
|
13243
|
+
);
|
|
13244
|
+
} else {
|
|
13245
|
+
this.flowDefinition.push(step);
|
|
13246
|
+
}
|
|
13247
|
+
return this;
|
|
13248
|
+
}
|
|
13249
|
+
/**
|
|
13250
|
+
* Labels a step for later reference (useful for feedback loops).
|
|
13251
|
+
*
|
|
13252
|
+
* @param label - The label to assign to the current step position
|
|
13253
|
+
* @returns this (for chaining, no type change)
|
|
13254
|
+
*
|
|
13255
|
+
* @example
|
|
13256
|
+
* ```typescript
|
|
13257
|
+
* flow.label('retry-point')
|
|
13258
|
+
* .execute('queryGen', ...)
|
|
13259
|
+
* ```
|
|
13260
|
+
*/
|
|
13261
|
+
label(label) {
|
|
13262
|
+
if (this.branchContext?.currentBranchValue !== void 0) {
|
|
13263
|
+
throw new Error("Cannot create labels inside branch blocks");
|
|
13264
|
+
}
|
|
13265
|
+
this.stepLabels.set(label, this.flowDefinition.length);
|
|
13266
|
+
return this;
|
|
13267
|
+
}
|
|
13268
|
+
/**
|
|
13269
|
+
* Executes a previously defined node with full type safety.
|
|
13270
|
+
* The node name must exist in TNodes, and the mapping function is typed based on the node's signature.
|
|
13271
|
+
*
|
|
13272
|
+
* @param nodeName - The name of the node to execute (must exist in TNodes)
|
|
13273
|
+
* @param mapping - Typed function that takes the current state and returns the input for the node
|
|
13274
|
+
* @param dynamicContext - Optional object to override the AI service or options for this specific step
|
|
13275
|
+
* @returns New AxFlow instance with TState augmented with the node's result
|
|
13276
|
+
*
|
|
13277
|
+
* @example
|
|
13278
|
+
* ```typescript
|
|
13279
|
+
* flow.execute('summarizer', state => ({ text: state.originalText }), { ai: cheapAI })
|
|
13280
|
+
* ```
|
|
13281
|
+
*/
|
|
13282
|
+
execute(nodeName, mapping, dynamicContext) {
|
|
13283
|
+
if (!this.nodes.has(nodeName)) {
|
|
13284
|
+
throw new Error(
|
|
13285
|
+
`Node '${nodeName}' not found. Make sure to define it with .node() first.`
|
|
13286
|
+
);
|
|
13287
|
+
}
|
|
13288
|
+
const nodeGenerator = this.nodeGenerators.get(nodeName);
|
|
13289
|
+
if (!nodeGenerator) {
|
|
13290
|
+
throw new Error(`Node generator for '${nodeName}' not found.`);
|
|
13291
|
+
}
|
|
13292
|
+
const step = async (state, context3) => {
|
|
13293
|
+
const ai = dynamicContext?.ai ?? context3.mainAi;
|
|
13294
|
+
const options = dynamicContext?.options ?? context3.mainOptions;
|
|
13295
|
+
const nodeInputs = mapping(state);
|
|
13296
|
+
const result = await nodeGenerator.forward(ai, nodeInputs, options);
|
|
13297
|
+
return {
|
|
13298
|
+
...state,
|
|
13299
|
+
[`${nodeName}Result`]: result
|
|
13300
|
+
};
|
|
13301
|
+
};
|
|
13302
|
+
if (this.branchContext?.currentBranchValue !== void 0) {
|
|
13303
|
+
const currentBranch = this.branchContext.branches.get(
|
|
13304
|
+
this.branchContext.currentBranchValue
|
|
13305
|
+
) || [];
|
|
13306
|
+
currentBranch.push(step);
|
|
13307
|
+
this.branchContext.branches.set(
|
|
13308
|
+
this.branchContext.currentBranchValue,
|
|
13309
|
+
currentBranch
|
|
13310
|
+
);
|
|
13311
|
+
} else {
|
|
13312
|
+
this.flowDefinition.push(step);
|
|
13313
|
+
}
|
|
13314
|
+
return this;
|
|
13315
|
+
}
|
|
13316
|
+
/**
|
|
13317
|
+
* Starts a conditional branch based on a predicate function.
|
|
13318
|
+
*
|
|
13319
|
+
* @param predicate - Function that takes state and returns a value to branch on
|
|
13320
|
+
* @returns this (for chaining)
|
|
13321
|
+
*
|
|
13322
|
+
* @example
|
|
13323
|
+
* ```typescript
|
|
13324
|
+
* flow.branch(state => state.qualityResult.needsMoreInfo)
|
|
13325
|
+
* .when(true)
|
|
13326
|
+
* .execute('queryGen', ...)
|
|
13327
|
+
* .when(false)
|
|
13328
|
+
* .execute('answer', ...)
|
|
13329
|
+
* .merge()
|
|
13330
|
+
* ```
|
|
13331
|
+
*/
|
|
13332
|
+
branch(predicate) {
|
|
13333
|
+
if (this.branchContext) {
|
|
13334
|
+
throw new Error("Nested branches are not supported");
|
|
13335
|
+
}
|
|
13336
|
+
this.branchContext = {
|
|
13337
|
+
predicate: (state) => predicate(state),
|
|
13338
|
+
branches: /* @__PURE__ */ new Map(),
|
|
13339
|
+
currentBranchValue: void 0
|
|
13340
|
+
};
|
|
13341
|
+
return this;
|
|
13342
|
+
}
|
|
13343
|
+
/**
|
|
13344
|
+
* Defines a branch case for the current branch context.
|
|
13345
|
+
*
|
|
13346
|
+
* @param value - The value to match against the branch predicate result
|
|
13347
|
+
* @returns this (for chaining)
|
|
13348
|
+
*/
|
|
13349
|
+
when(value) {
|
|
13350
|
+
if (!this.branchContext) {
|
|
13351
|
+
throw new Error("when() called without matching branch()");
|
|
13352
|
+
}
|
|
13353
|
+
this.branchContext.currentBranchValue = value;
|
|
13354
|
+
this.branchContext.branches.set(value, []);
|
|
13355
|
+
return this;
|
|
13356
|
+
}
|
|
13357
|
+
/**
|
|
13358
|
+
* Ends the current branch and merges all branch paths back into the main flow.
|
|
13359
|
+
*
|
|
13360
|
+
* @returns this (for chaining)
|
|
13361
|
+
*/
|
|
13362
|
+
merge() {
|
|
13363
|
+
if (!this.branchContext) {
|
|
13364
|
+
throw new Error("merge() called without matching branch()");
|
|
13365
|
+
}
|
|
13366
|
+
const branchContext = this.branchContext;
|
|
13367
|
+
this.branchContext = null;
|
|
13368
|
+
this.flowDefinition.push(async (state, context3) => {
|
|
13369
|
+
const branchValue = branchContext.predicate(state);
|
|
13370
|
+
const branchSteps = branchContext.branches.get(branchValue);
|
|
13371
|
+
if (!branchSteps) {
|
|
13372
|
+
return state;
|
|
13373
|
+
}
|
|
13374
|
+
let currentState = state;
|
|
13375
|
+
for (const step of branchSteps) {
|
|
13376
|
+
currentState = await step(currentState, context3);
|
|
13377
|
+
}
|
|
13378
|
+
return currentState;
|
|
13379
|
+
});
|
|
13380
|
+
return this;
|
|
13381
|
+
}
|
|
13382
|
+
/**
|
|
13383
|
+
* Executes multiple operations in parallel and merges their results.
|
|
13384
|
+
* Both typed and legacy untyped branches are supported.
|
|
13385
|
+
*
|
|
13386
|
+
* @param branches - Array of functions that define parallel operations
|
|
13387
|
+
* @returns Object with merge method for combining results
|
|
13388
|
+
*
|
|
13389
|
+
* @example
|
|
13390
|
+
* ```typescript
|
|
13391
|
+
* flow.parallel([
|
|
13392
|
+
* subFlow => subFlow.execute('retrieve1', state => ({ query: state.query1 })),
|
|
13393
|
+
* subFlow => subFlow.execute('retrieve2', state => ({ query: state.query2 })),
|
|
13394
|
+
* subFlow => subFlow.execute('retrieve3', state => ({ query: state.query3 }))
|
|
13395
|
+
* ]).merge('documents', (docs1, docs2, docs3) => [...docs1, ...docs2, ...docs3])
|
|
13396
|
+
* ```
|
|
13397
|
+
*/
|
|
13398
|
+
parallel(branches) {
|
|
13399
|
+
const parallelStep = async (state, context3) => {
|
|
13400
|
+
const promises = branches.map(async (branchFn) => {
|
|
13401
|
+
const subContext = new AxFlowSubContextImpl(this.nodeGenerators);
|
|
13402
|
+
const populatedSubContext = branchFn(
|
|
13403
|
+
subContext
|
|
13404
|
+
);
|
|
13405
|
+
return await populatedSubContext.executeSteps(state, context3);
|
|
13406
|
+
});
|
|
13407
|
+
const results = await Promise.all(promises);
|
|
13408
|
+
return {
|
|
13409
|
+
...state,
|
|
13410
|
+
_parallelResults: results
|
|
13411
|
+
};
|
|
13412
|
+
};
|
|
13413
|
+
this.flowDefinition.push(parallelStep);
|
|
13414
|
+
return {
|
|
13415
|
+
merge: (resultKey, mergeFunction) => {
|
|
13416
|
+
this.flowDefinition.push((state) => {
|
|
13417
|
+
const results = state._parallelResults;
|
|
13418
|
+
if (!Array.isArray(results)) {
|
|
13419
|
+
throw new Error("No parallel results found for merge");
|
|
13420
|
+
}
|
|
13421
|
+
const mergedValue = mergeFunction(...results);
|
|
13422
|
+
const newState = { ...state };
|
|
13423
|
+
delete newState._parallelResults;
|
|
13424
|
+
newState[resultKey] = mergedValue;
|
|
13425
|
+
return newState;
|
|
13426
|
+
});
|
|
13427
|
+
return this;
|
|
13428
|
+
}
|
|
13429
|
+
};
|
|
13430
|
+
}
|
|
13431
|
+
/**
|
|
13432
|
+
* Creates a feedback loop that jumps back to a labeled step if a condition is met.
|
|
13433
|
+
*
|
|
13434
|
+
* @param condition - Function that returns true to trigger the feedback loop
|
|
13435
|
+
* @param targetLabel - The label to jump back to
|
|
13436
|
+
* @param maxIterations - Maximum number of iterations to prevent infinite loops (default: 10)
|
|
13437
|
+
* @returns this (for chaining)
|
|
13438
|
+
*
|
|
13439
|
+
* @example
|
|
13440
|
+
* ```typescript
|
|
13441
|
+
* flow.label('retry-point')
|
|
13442
|
+
* .execute('answer', ...)
|
|
13443
|
+
* .execute('qualityCheck', ...)
|
|
13444
|
+
* .feedback(state => state.qualityCheckResult.confidence < 0.7, 'retry-point')
|
|
13445
|
+
* ```
|
|
13446
|
+
*/
|
|
13447
|
+
feedback(condition, targetLabel, maxIterations = 10) {
|
|
13448
|
+
if (!this.stepLabels.has(targetLabel)) {
|
|
13449
|
+
throw new Error(
|
|
13450
|
+
`Label '${targetLabel}' not found. Make sure to define it with .label() before the feedback point.`
|
|
13451
|
+
);
|
|
13452
|
+
}
|
|
13453
|
+
const targetIndex = this.stepLabels.get(targetLabel);
|
|
13454
|
+
const feedbackStepIndex = this.flowDefinition.length;
|
|
13455
|
+
this.flowDefinition.push(async (state, context3) => {
|
|
13456
|
+
let currentState = state;
|
|
13457
|
+
let iterations = 1;
|
|
13458
|
+
const iterationKey = `_feedback_${targetLabel}_iterations`;
|
|
13459
|
+
if (typeof currentState[iterationKey] !== "number") {
|
|
13460
|
+
currentState = { ...currentState, [iterationKey]: 1 };
|
|
13461
|
+
}
|
|
13462
|
+
while (condition(currentState) && iterations < maxIterations) {
|
|
13463
|
+
iterations++;
|
|
13464
|
+
currentState = { ...currentState, [iterationKey]: iterations };
|
|
13465
|
+
for (let i = targetIndex; i < feedbackStepIndex; i++) {
|
|
13466
|
+
const step = this.flowDefinition[i];
|
|
13467
|
+
if (step) {
|
|
13468
|
+
currentState = await step(currentState, context3);
|
|
13469
|
+
}
|
|
13470
|
+
}
|
|
13471
|
+
}
|
|
13472
|
+
return currentState;
|
|
13473
|
+
});
|
|
13474
|
+
return this;
|
|
13475
|
+
}
|
|
13476
|
+
/**
|
|
13477
|
+
* Marks the beginning of a loop block.
|
|
13478
|
+
*
|
|
13479
|
+
* @param condition - Function that takes the current state and returns a boolean
|
|
13480
|
+
* @returns this (for chaining)
|
|
13481
|
+
*
|
|
13482
|
+
* @example
|
|
13483
|
+
* ```typescript
|
|
13484
|
+
* flow.while(state => state.iterations < 3)
|
|
13485
|
+
* .map(state => ({ ...state, iterations: (state.iterations || 0) + 1 }))
|
|
13486
|
+
* .endWhile()
|
|
13487
|
+
* ```
|
|
13488
|
+
*/
|
|
13489
|
+
while(condition) {
|
|
13490
|
+
const loopStartIndex = this.flowDefinition.length;
|
|
13491
|
+
this.loopStack.push(loopStartIndex);
|
|
13492
|
+
const placeholderStep = Object.assign(
|
|
13493
|
+
(state) => state,
|
|
13494
|
+
{
|
|
13495
|
+
_condition: condition,
|
|
13496
|
+
_isLoopStart: true
|
|
13497
|
+
}
|
|
13498
|
+
);
|
|
13499
|
+
this.flowDefinition.push(placeholderStep);
|
|
13500
|
+
return this;
|
|
13501
|
+
}
|
|
13502
|
+
/**
|
|
13503
|
+
* Marks the end of a loop block.
|
|
13504
|
+
*
|
|
13505
|
+
* @returns this (for chaining)
|
|
13506
|
+
*/
|
|
13507
|
+
endWhile() {
|
|
13508
|
+
if (this.loopStack.length === 0) {
|
|
13509
|
+
throw new Error("endWhile() called without matching while()");
|
|
13510
|
+
}
|
|
13511
|
+
const loopStartIndex = this.loopStack.pop();
|
|
13512
|
+
const placeholderStep = this.flowDefinition[loopStartIndex];
|
|
13513
|
+
if (!placeholderStep || !("_isLoopStart" in placeholderStep)) {
|
|
13514
|
+
throw new Error("Loop start step not found or invalid");
|
|
13515
|
+
}
|
|
13516
|
+
const condition = placeholderStep._condition;
|
|
13517
|
+
const loopBodySteps = this.flowDefinition.splice(loopStartIndex + 1);
|
|
13518
|
+
this.flowDefinition[loopStartIndex] = async (state, context3) => {
|
|
13519
|
+
let currentState = state;
|
|
13520
|
+
while (condition(currentState)) {
|
|
13521
|
+
for (const step of loopBodySteps) {
|
|
13522
|
+
currentState = await step(currentState, context3);
|
|
13523
|
+
}
|
|
13524
|
+
}
|
|
13525
|
+
return currentState;
|
|
13526
|
+
};
|
|
13527
|
+
return this;
|
|
13528
|
+
}
|
|
13529
|
+
/**
|
|
13530
|
+
* Executes the flow with the given AI service and input values.
|
|
13531
|
+
*
|
|
13532
|
+
* @param ai - The AI service to use as the default for all steps
|
|
13533
|
+
* @param values - The input values for the flow
|
|
13534
|
+
* @param options - Optional forward options to use as defaults
|
|
13535
|
+
* @returns Promise that resolves to the final output
|
|
13536
|
+
*/
|
|
13537
|
+
async forward(ai, values, options) {
|
|
13538
|
+
let state = { ...values };
|
|
13539
|
+
const context3 = {
|
|
13540
|
+
mainAi: ai,
|
|
13541
|
+
mainOptions: options
|
|
13542
|
+
};
|
|
13543
|
+
for (const step of this.flowDefinition) {
|
|
13544
|
+
state = await step(state, context3);
|
|
13545
|
+
}
|
|
13546
|
+
return state;
|
|
13547
|
+
}
|
|
13548
|
+
};
|
|
13549
|
+
var AxFlowSubContextImpl = class {
|
|
13550
|
+
constructor(nodeGenerators) {
|
|
13551
|
+
this.nodeGenerators = nodeGenerators;
|
|
13552
|
+
}
|
|
13553
|
+
steps = [];
|
|
13554
|
+
execute(nodeName, mapping, dynamicContext) {
|
|
13555
|
+
const nodeGenerator = this.nodeGenerators.get(nodeName);
|
|
13556
|
+
if (!nodeGenerator) {
|
|
13557
|
+
throw new Error(`Node generator for '${nodeName}' not found.`);
|
|
13558
|
+
}
|
|
13559
|
+
this.steps.push(async (state, context3) => {
|
|
13560
|
+
const ai = dynamicContext?.ai ?? context3.mainAi;
|
|
13561
|
+
const options = dynamicContext?.options ?? context3.mainOptions;
|
|
13562
|
+
const nodeInputs = mapping(state);
|
|
13563
|
+
const result = await nodeGenerator.forward(ai, nodeInputs, options);
|
|
13564
|
+
return {
|
|
13565
|
+
...state,
|
|
13566
|
+
[`${nodeName}Result`]: result
|
|
13567
|
+
};
|
|
13568
|
+
});
|
|
13569
|
+
return this;
|
|
13570
|
+
}
|
|
13571
|
+
map(transform) {
|
|
13572
|
+
this.steps.push((state) => transform(state));
|
|
13573
|
+
return this;
|
|
13574
|
+
}
|
|
13575
|
+
async executeSteps(initialState, context3) {
|
|
13576
|
+
let currentState = initialState;
|
|
13577
|
+
for (const step of this.steps) {
|
|
13578
|
+
currentState = await step(currentState, context3);
|
|
13579
|
+
}
|
|
13580
|
+
return currentState;
|
|
13581
|
+
}
|
|
13582
|
+
};
|
|
13583
|
+
var AxFlowTypedSubContextImpl = class {
|
|
13584
|
+
constructor(nodeGenerators) {
|
|
13585
|
+
this.nodeGenerators = nodeGenerators;
|
|
13586
|
+
}
|
|
13587
|
+
steps = [];
|
|
13588
|
+
execute(nodeName, mapping, dynamicContext) {
|
|
13589
|
+
const nodeGenerator = this.nodeGenerators.get(nodeName);
|
|
13590
|
+
if (!nodeGenerator) {
|
|
13591
|
+
throw new Error(`Node generator for '${nodeName}' not found.`);
|
|
13592
|
+
}
|
|
13593
|
+
this.steps.push(async (state, context3) => {
|
|
13594
|
+
const ai = dynamicContext?.ai ?? context3.mainAi;
|
|
13595
|
+
const options = dynamicContext?.options ?? context3.mainOptions;
|
|
13596
|
+
const nodeInputs = mapping(state);
|
|
13597
|
+
const result = await nodeGenerator.forward(ai, nodeInputs, options);
|
|
13598
|
+
return {
|
|
13599
|
+
...state,
|
|
13600
|
+
[`${nodeName}Result`]: result
|
|
13601
|
+
};
|
|
13602
|
+
});
|
|
13603
|
+
return this;
|
|
13604
|
+
}
|
|
13605
|
+
map(transform) {
|
|
13606
|
+
this.steps.push((state) => transform(state));
|
|
13607
|
+
return this;
|
|
13608
|
+
}
|
|
13609
|
+
async executeSteps(initialState, context3) {
|
|
13610
|
+
let currentState = initialState;
|
|
13611
|
+
for (const step of this.steps) {
|
|
13612
|
+
currentState = await step(currentState, context3);
|
|
13613
|
+
}
|
|
13614
|
+
return currentState;
|
|
13615
|
+
}
|
|
13616
|
+
};
|
|
13617
|
+
|
|
12875
13618
|
// dsp/loader.ts
|
|
12876
13619
|
var AxHFDataLoader = class {
|
|
12877
13620
|
rows = [];
|
|
@@ -16824,6 +17567,7 @@ var AxRAG = class extends AxChainOfThought {
|
|
|
16824
17567
|
AxAIOpenAIResponsesBase,
|
|
16825
17568
|
AxAIOpenAIResponsesImpl,
|
|
16826
17569
|
AxAIOpenAIResponsesModel,
|
|
17570
|
+
AxAIRefusalError,
|
|
16827
17571
|
AxAIReka,
|
|
16828
17572
|
AxAIRekaModel,
|
|
16829
17573
|
AxAIServiceAbortedError,
|
|
@@ -16855,6 +17599,8 @@ var AxRAG = class extends AxChainOfThought {
|
|
|
16855
17599
|
AxDockerSession,
|
|
16856
17600
|
AxEmbeddingAdapter,
|
|
16857
17601
|
AxEvalUtil,
|
|
17602
|
+
AxFlow,
|
|
17603
|
+
AxFlowTypedSubContextImpl,
|
|
16858
17604
|
AxFunctionError,
|
|
16859
17605
|
AxFunctionProcessor,
|
|
16860
17606
|
AxGen,
|
|
@@ -16932,6 +17678,7 @@ var AxRAG = class extends AxChainOfThought {
|
|
|
16932
17678
|
axModelInfoHuggingFace,
|
|
16933
17679
|
axModelInfoMistral,
|
|
16934
17680
|
axModelInfoOpenAI,
|
|
17681
|
+
axModelInfoOpenAIResponses,
|
|
16935
17682
|
axModelInfoReka,
|
|
16936
17683
|
axModelInfoTogether,
|
|
16937
17684
|
axSpanAttributes,
|