@ax-llm/ax 13.0.6 → 13.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.cjs.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../dsp/modelinfo.ts","../util/crypto.ts","../util/sse.ts","../util/stream.ts","../util/apicall.ts","../dsp/loggers.ts","../ai/debug.ts","../ai/metrics.ts","../ai/base.ts","../ai/huggingface/api.ts","../ai/mistral/types.ts","../ai/openai/responses_api.ts","../ai/reka/info.ts","../db/weaviate.ts","../docs/manager.ts","../mem/memory.ts","../dsp/jsonschema.ts","../dsp/functions.ts","../dsp/metrics.ts","../dsp/util.ts","../dsp/extract.ts","../dsp/processResponse.ts","../dsp/parser.ts","../dsp/prompt.ts","../dsp/samples.ts","../dsp/validate.ts","../dsp/generate.ts","../dsp/classifier.ts","../dsp/stopwords.ts","../dsp/evaluate.ts","../dsp/optimizer.ts","../dsp/optimizers/miproV2.ts","../mcp/httpTransport.ts","../prompts/agent.ts"],"names":["getModelInfo","model","modelInfo","models","modelEntry","v","mappedModel","exactMatch","normalizedName","normalizedMatch","webCrypto","randomUUID","sha256","data","encoder","inputData","hashBuffer","b","Hash","chunk","encoding","hash","i","char","createHash","algorithm","SSEParser","options","controller","error","rawData","lines","line","colonIndex","field","value","retryValue","parsedData","e","TextDecodeTransformer","text","TextDecoderStreamPolyfill","defaultRetryConfig","defaultTimeoutMs","textDecoderStream","AxAIServiceError","message","url","requestBody","responseBody","context","stringValue","resultItems","lastItem","instruction"],"mappings":"AAQO,o5CAASA,CAAAA,CAAuD,CACrE,KAAA,CAAAC,CAAAA,CACA,SAAA,CAAAC,CAAAA,CACA,MAAA,CAAAC,CACF,CAAA,CAEiC,CAE/B,IAAMC,CAAAA,iBAAaD,CAAAA,6BAAQ,IAAA,mBAAME,CAAAA,EAAMA,CAAAA,CAAE,GAAA,GAAQJ,CAAK,GAAA,CAChDK,CAAAA,CACJF,CAAAA,EAAc,OAAA,GAAWA,CAAAA,CACpBA,CAAAA,CAAW,KAAA,CACXH,CAAAA,CAGDM,CAAAA,CAAaL,CAAAA,CAAU,IAAA,CAAMG,CAAAA,EAAMA,CAAAA,CAAE,IAAA,GAASJ,CAAK,CAAA,CACzD,EAAA,CAAIM,CAAAA,CAAY,OAAOA,CAAAA,CAGvB,IAAMC,CAAAA,CAAiBF,CAAAA,CAEpB,OAAA,CAAQ,yBAAA,CAA2B,EAAE,CAAA,CAErC,OAAA,CAAQ,UAAA,CAAY,EAAE,CAAA,CACtB,OAAA,CAAQ,SAAA,CAAW,EAAE,CAAA,CACrB,OAAA,CAAQ,YAAA,CAAc,EAAE,CAAA,CACxB,OAAA,CAAQ,SAAA,CAAW,EAAE,CAAA,CACrB,OAAA,CAAQ,2BAAA,CAA6B,EAAE,CAAA,CACvC,OAAA,CAAQ,cAAA,CAAgB,EAAE,CAAA,CAC1B,OAAA,CAAQ,QAAA,CAAU,EAAE,CAAA,CAGjBG,CAAAA,CAAkBP,CAAAA,CAAU,IAAA,CAAMG,CAAAA,EAAMA,CAAAA,CAAE,IAAA,GAASG,CAAc,CAAA,CACvE,OAAIC,CAAAA,EAGG,IACT,CCvCA,IAAMC,EAAAA,CAAAA,CAAa,CAAA,CAAA,EAAM,CACvB,EAAA,CAAI,UAAA,CAAW,MAAA,EAAU,OAAO,UAAA,CAAW,MAAA,CAAO,UAAA,EAAe,UAAA,CAC/D,OAAO,UAAA,CAAW,MAAA,CAGpB,MAAM,IAAI,KAAA,CACR,+FACF,CACF,CAAA,CAAA,CAAG,CAAA,CAMI,SAASC,CAAAA,CAAAA,CAAqB,CACnC,OAAOD,EAAAA,CAAU,UAAA,CAAW,CAC9B,CAOA,MAAA,SAAsBE,EAAAA,CAAOC,CAAAA,CAA6C,CACxE,IAAMC,CAAAA,CAAU,IAAI,WAAA,CACdC,CAAAA,CAAY,OAAOF,CAAAA,EAAS,QAAA,CAAWC,CAAAA,CAAQ,MAAA,CAAOD,CAAI,CAAA,CAAIA,CAAAA,CAE9DG,CAAAA,CAAa,MAAMN,EAAAA,CAAU,MAAA,CAAO,MAAA,CAAO,SAAA,CAAWK,CAAS,CAAA,CAMrE,OALkB,KAAA,CAAM,IAAA,CAAK,IAAI,UAAA,CAAWC,CAAU,CAAC,CAAA,CAEpD,GAAA,CAAKC,CAAAA,EAAMA,CAAAA,CAAE,QAAA,CAAS,EAAE,CAAA,CAAE,QAAA,CAAS,CAAA,CAAG,GAAG,CAAC,CAAA,CAC1C,IAAA,CAAK,EAAE,CAGZ,CAMO,IAAMC,EAAAA,WAAN,KAAW,qEACR,IAAA,CAAO,GAAA,MAEf,CAAOC,CAAAA,CAAqB,CAC1B,OAAA,IAAA,CAAK,IAAA,EAAQA,CAAAA,CACN,IACT,CAEA,MAAA,CAAOC,CAAAA,CAAyB,CAC9B,EAAA,CAAIA,CAAAA,GAAa,KAAA,CACf,MAAM,IAAI,KAAA,CAAM,gCAAgC,CAAA,CAMlD,IAAML,CAAAA,CADU,IAAI,WAAA,CAAY,CAAA,CACN,MAAA,CAAO,IAAA,CAAK,IAAI,CAAA,CAEtCM,CAAAA,CAAO,CAAA,CACX,GAAA,CAAA,IAASC,CAAAA,CAAI,CAAA,CAAGA,CAAAA,CAAIP,CAAAA,CAAU,MAAA,CAAQO,CAAAA,EAAAA,CAAK,CACzC,IAAMC,CAAAA,CAAOR,CAAAA,CAAUO,CAAC,CAAA,CACxBD,CAAAA,CAAAA,CAAQA,CAAAA,EAAQ,CAAA,CAAA,CAAKA,CAAAA,CAAOE,CAAAA,CAC5BF,CAAAA,CAAOA,CAAAA,CAAOA,CAChB,CAGA,OAAO,IAAA,CAAK,GAAA,CAAIA,CAAI,CAAA,CAAE,QAAA,CAAS,EAAE,CAAA,CAAE,QAAA,CAAS,CAAA,CAAG,GAAG,CACpD,CAEA,MAAM,WAAA,CAAA,CAA+B,CACnC,OAAOT,EAAAA,CAAO,IAAA,CAAK,IAAI,CACzB,CACF,UAAA,CAOO,SAASY,EAAAA,CAAWC,CAAAA,CAAyB,CAClD,EAAA,CAAIA,CAAAA,GAAc,QAAA,CAChB,MAAM,IAAI,KAAA,CAAM,qCAAqC,CAAA,CAEvD,OAAO,IAAIP,EACb,CC5EO,IAAMQ,EAAAA,YAAN,MAAA,QAAqC,eAA2B,iBAC7D,MAAA,CAAS,GAAA,gBACT,YAAA,CAAkC,CAAE,OAAA,CAAS,EAAG,EAAA,WAIxD,CAAYC,CAAAA,CAA+B,CAAC,CAAA,CAAG,CAC7C,KAAA,CAAM,CACJ,SAAA,CAAW,CAACR,CAAAA,CAAOS,CAAAA,CAAAA,EAAe,IAAA,CAAK,WAAA,CAAYT,CAAAA,CAAOS,CAAU,CAAA,CACpE,KAAA,CAAQA,CAAAA,EAAe,IAAA,CAAK,WAAA,CAAYA,CAAU,CACpD,CAAC,4EAAA,CAED,IAAA,CAAK,UAAA,CAAaD,CAAAA,CAAQ,UAAA,EAAc,IAAA,CAAK,KAAA,CAC7C,IAAA,CAAK,OAAA,CACHA,CAAAA,CAAQ,OAAA,EAAA,CACP,CAACE,CAAAA,CAAOC,CAAAA,CAAAA,EAAY,CACnB,OAAA,CAAQ,IAAA,CAAK,6BAAA,CAA+BD,CAAK,CAAA,CACjD,OAAA,CAAQ,GAAA,CAAI,gCAAA,CAAkCC,CAAO,CACvD,CAAA,CACJ,CAEQ,WAAA,CACNX,CAAAA,CACAS,CAAAA,CACM,CACN,IAAA,CAAK,MAAA,EAAUT,CAAAA,CACf,IAAA,CAAK,aAAA,CAAcS,CAAU,CAC/B,CAEQ,WAAA,CAAYA,CAAAA,CAAuD,CACzE,IAAA,CAAK,aAAA,CAAcA,CAAU,CAAA,CACzB,IAAA,CAAK,YAAA,CAAa,OAAA,EACpB,IAAA,CAAK,YAAA,CAAaA,CAAU,CAEhC,CAEQ,aAAA,CAAcA,CAAAA,CAAuD,CAG3E,IAAMG,CAAAA,CADmB,IAAA,CAAK,MAAA,CAAO,OAAA,CAAQ,UAAA,CAAY,CAAA;AAAA,CAAI,CAAA,CAC9B,KAAA,CAAM,CAAA;AAAA,CAAI,CAAA,CACzC,IAAA,CAAK,MAAA,CAASA,CAAAA,CAAM,GAAA,CAAI,CAAA,EAAK,EAAA,CAE7B,GAAA,CAAA,IAAWC,EAAAA,GAAQD,CAAAA,CACbC,CAAAA,GAAS,EAAA,CACX,IAAA,CAAK,YAAA,CAAaJ,CAAU,CAAA,CAE5B,IAAA,CAAK,SAAA,CAAUI,CAAI,CAGzB,CAEQ,SAAA,CAAUA,CAAAA,CAAoB,CACpC,EAAA,CAAIA,CAAAA,CAAK,UAAA,CAAW,GAAG,CAAA,CACrB,MAAA,CAGF,IAAMC,CAAAA,CAAaD,CAAAA,CAAK,OAAA,CAAQ,GAAG,CAAA,CACnC,EAAA,CAAIC,CAAAA,GAAe,CAAA,CAAA,CAAI,CACrB,IAAA,CAAK,YAAA,CAAa,OAAA,EAAA,CACf,IAAA,CAAK,YAAA,CAAa,OAAA,EAAW,CAAC,IAAA,CAAK,YAAA,CAAa,OAAA,CAAQ,QAAA,CAAS,CAAA;AAAA,CAAI,CAAA,CAClE,CAAA;AAAA,CAAA,CACA,EAAA,CAAA,CAAMD,CAAAA,CAAK,IAAA,CAAK,CAAA,CACtB,MACF,CAEA,IAAME,CAAAA,CAAQF,CAAAA,CAAK,KAAA,CAAM,CAAA,CAAGC,CAAU,CAAA,CAAE,IAAA,CAAK,CAAA,CACvCE,CAAAA,CAAQH,CAAAA,CAAK,KAAA,CAAMC,CAAAA,CAAa,CAAC,CAAA,CAAE,IAAA,CAAK,CAAA,CAE9C,MAAA,CAAQC,CAAAA,CAAO,CACb,IAAK,OAAA,CACH,IAAA,CAAK,YAAA,CAAa,KAAA,CAAQC,CAAAA,CAC1B,KAAA,CACF,IAAK,MAAA,CACH,IAAA,CAAK,YAAA,CAAa,OAAA,EAAA,CACf,IAAA,CAAK,YAAA,CAAa,OAAA,EACnB,CAAC,IAAA,CAAK,YAAA,CAAa,OAAA,CAAQ,QAAA,CAAS,CAAA;AAAA,CAAI,CAAA,CACpC,CAAA;AAAA,CAAA,CACA,EAAA,CAAA,CAAMA,CAAAA,CACZ,KAAA,CACF,IAAK,IAAA,CACH,IAAA,CAAK,YAAA,CAAa,EAAA,CAAKA,CAAAA,CACvB,KAAA,CACF,IAAK,OAAA,CAAS,CACZ,IAAMC,CAAAA,CAAa,MAAA,CAAO,QAAA,CAASD,CAAAA,CAAO,EAAE,CAAA,CACvC,MAAA,CAAO,KAAA,CAAMC,CAAU,CAAA,EAAA,CAC1B,IAAA,CAAK,YAAA,CAAa,KAAA,CAAQA,CAAAA,CAAAA,CAE5B,KACF,CACF,CACF,CAEQ,YAAA,CAAaR,CAAAA,CAAuD,CAC1E,EAAA,CAAI,IAAA,CAAK,YAAA,CAAa,OAAA,CAAS,CAK7B,EAAA,CAJK,IAAA,CAAK,YAAA,CAAa,KAAA,EAAA,CACrB,IAAA,CAAK,YAAA,CAAa,KAAA,CAAQ,SAAA,CAAA,CAGxB,IAAA,CAAK,YAAA,CAAa,OAAA,CAAQ,IAAA,CAAK,CAAA,GAAM,QAAA,CAAU,CAIjD,IAAA,CAAK,YAAA,CAAe,CAAE,OAAA,CAAS,EAAG,CAAA,CAClC,MACF,CAEA,GAAI,CACF,IAAMS,CAAAA,CAAgB,IAAA,CAAK,UAAA,CAAW,IAAA,CAAK,YAAA,CAAa,OAAO,CAAA,CAC/DT,CAAAA,CAAW,OAAA,CAAQS,CAAU,CAC/B,CAAA,KAAA,CAASC,CAAAA,CAAG,CACV,IAAA,CAAK,OAAA,CAAQA,CAAAA,CAAY,IAAA,CAAK,YAAA,CAAa,OAAO,CACpD,CAEA,IAAA,CAAK,YAAA,CAAe,CAAE,OAAA,CAAS,EAAG,CACpC,CACF,CACF,WAAA,CC1HA,IAAMC,EAAAA,CAAN,KAEA,CACU,WAER,CAAA,CAAc,CACZ,IAAA,CAAK,OAAA,CAAU,IAAI,WACrB,CAEA,SAAA,CACEpB,CAAAA,CACAS,CAAAA,CACA,CACA,EAAA,CAAI,CAAA,CAAET,EAAAA,WAAiB,WAAA,EAAe,WAAA,CAAY,MAAA,CAAOA,CAAK,CAAA,CAAA,CAC5D,MAAM,IAAI,SAAA,CAAU,mCAAmC,CAAA,CAEzD,IAAMqB,CAAAA,CAAO,IAAA,CAAK,OAAA,CAAQ,MAAA,CAAOrB,CAAAA,CAAO,CAAE,MAAA,CAAQ,CAAA,CAAK,CAAC,CAAA,CACpDqB,CAAAA,CAAK,MAAA,GAAW,CAAA,EAClBZ,CAAAA,CAAW,OAAA,CAAQY,CAAI,CAE3B,CAEA,KAAA,CAAMZ,CAAAA,CAAsD,CAC1D,IAAMY,CAAAA,CAAO,IAAA,CAAK,OAAA,CAAQ,MAAA,CAAO,CAAA,CAC7BA,CAAAA,CAAK,MAAA,GAAW,CAAA,EAClBZ,CAAAA,CAAW,OAAA,CAAQY,CAAI,CAE3B,CACF,CAAA,CAEaC,EAAAA,CAAN,MAAA,QAAwC,eAG7C,CACA,WAAA,CAAA,CAAc,CACZ,KAAA,CAAM,IAAIF,EAAuB,CACnC,CACF,CAAA,CCaO,IAAMG,EAAAA,CAAkC,CAC7C,UAAA,CAAY,CAAA,CACZ,cAAA,CAAgB,GAAA,CAChB,UAAA,CAAY,GAAA,CACZ,aAAA,CAAe,CAAA,CACf,oBAAA,CAAsB,CAAC,GAAA,CAAK,GAAA,CAAK,GAAA,CAAK,GAAA,CAAK,GAAA,CAAK,GAAG,CACrD,CAAA,CAEMC,EAAAA,CAAmB,GAAA,CACnBC,EAAAA,kBACH,UAAA,CAAmB,iBAAA,SAAqBH,IAAAA,CAG9BI,CAAAA,4BAAN,MAAA,QAA+B,KAAM,CAK1C,WAAA,CACEC,CAAAA,CACgBC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,CAChBC,CAAAA,CAAmC,CAAC,CAAA,CACpC,CACA,KAAA,CAAMJ,CAAO,CAAA,CALG,IAAA,CAAA,GAAA,CAAAC,CAAAA,CACA,IAAA,CAAA,WAAA,CAAAC,CAAAA,CACA,IAAA,CAAA,YAAA,CAAAC,CAAAA,CAIhB,IAAA,CAAK,IAAA,CAAO,IAAA,CAAK,WAAA,CAAY,IAAA,CAC7B,IAAA,CAAK,SAAA,CAAY,IAAI,IAAA,CAAK,CAAA,CAAE,WAAA,CAAY,CAAA,CACxC,IAAA,CAAK,OAAA,CAAUtC,CAAAA,CAAW,CAAA,CAC1B,IAAA,CAAK,OAAA,CAAUuC,CAAAA,CAEf,IAAA,CAAK,KAAA,CAAQ,IAAA,CAAK,QAAA,CAAS,CAC7B,CAlBgB,QAoBP,CAAA,CAAmB,CAC1B,MAAO,CACL,CAAA,EAAA;AA2JU;AAmJR;AClVoC;AAAA;AA8BnB;AAImB;AAmCf;AAAqD,aAAA;AAErD;AAAqB,aAAA;AAU0B;AAId;AAIjC;AAEa;AAIA;AAGA;AAMI;AAEA;AAGjB;AAEa;AAGF;AAOqB;AAEnB;AAEb;AAIa;AAMU;AAErB;AAWkB;AAOxB;AAAc,OAAA;AAEd;AAAc,OAAA;ACrM1B;AAEA;AAGE;AAYF;AAA6B;AAW3B;AAAgC;AAElC;ACpCLC;AC+8CwC;ACt4Ca;AAsBzC;AAE0B;AAAA;AAUlC;AC9GW;AC0nBW;AAsfxB;ACzmCgB;ACqIiB,qBAAA;AAG5B,SAAA;AACmB,wBAAA;AAenB,SAAA;AAAA;AAEQ,YAAA;AACe,qBAAA;AAChB,cAAA;AAAA;AAEc,gBAAA;AAAK;AAAA;AAAA;AClHrB,SAAA;AAAA;AA+GHC;AA4Ba;AAOA;AAAA;AAkBuB;AAAA;AAKtB;AAAA;AC9GdC;ACVsB;AAwG5B;ACvLA;AAgDuF;AAAwB;AAW/G;AAgKJ;ACnFc;ACjBK;AA0NV;AA8EF,WAAA;AAGE,SAAA;AAgBF,cAAA;AAGE,YAAA;AAMA,SAAA;AAIF,UAAA;AClYwB,eAAA;AAoIb;AC6IjB,SAAA;AC/RkB,SAAA;AAyPX;AChTkB;AAAA;AAAA;AAMT,yDAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA6CZ,yBAAA;AAGI;AAIF;AAGA;AAgBQ;AAAA;AAkBiB;AACS;AAmBP;AAAA;AAAA;AA6HvB;AAsD+B;AAAA;AAKrB;AA0CA;AAmBF;AAgIhB;AAWsC;AA2B7B;AAgCA;ACnjBG;ACaX;ACmLQ;ACxKH,4GAAA;AC2Gd;ACnFI,aAAA;AC2HF;AC5BsB;AAAA;AAGI,mBAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA2CJ,QAAA;AAAA;AAAA;AAAA;AAFd;AAME;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA4CO,gBAAA;AAIA,iBAAA;AAIA,iBAAA;AAIS,yCAAA;AAAA;AAAA;AAGjB;AAAA;AAEa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAqGdC,YAAAA;ACpByB;ACrR9B;AA0XqB","file":"/home/runner/work/ax/ax/src/ax/dist/index.cjs","sourcesContent":["import type { AxAIInputModelList, AxModelInfo } from '../ai/types.js';\n\ninterface GetModelInfoParams<TModel = string, TEmbedModel = undefined> {\n model: TModel;\n modelInfo: readonly AxModelInfo[];\n models?: AxAIInputModelList<TModel, TEmbedModel>;\n}\n\nexport function getModelInfo<TModel = string, TEmbedModel = undefined>({\n model,\n modelInfo,\n models,\n}: Readonly<\n GetModelInfoParams<TModel, TEmbedModel>\n>): Readonly<AxModelInfo> | null {\n // First check if there's a mapping for this model\n const modelEntry = models?.find((v) => v.key === model);\n const mappedModel =\n modelEntry && 'model' in modelEntry\n ? (modelEntry.model as string)\n : (model as string);\n\n // Try exact match first\n const exactMatch = modelInfo.find((v) => v.name === model);\n if (exactMatch) return exactMatch;\n\n // Handle normalization if no exact match\n const normalizedName = mappedModel\n // Remove vendor prefixes\n .replace(/^(anthropic\\.|openai\\.)/, '')\n // Remove various postfixes one by one, stopping after first match\n .replace(/-latest$/, '')\n .replace(/-\\d{8}$/, '') // YYYYMMDD\n .replace(/-v\\d+:\\d+$/, '') // v2:0\n .replace(/@\\d{8}$/, '') // @YYYYMMDD\n .replace(/-\\d{2,}(-[a-zA-Z0-9-]+)?$/, '') // XX or XXXXX-something\n .replace(/-v\\d+@\\d{8}$/, '') // vX@YYYYMMDD\n .replace(/-v\\d+$/, ''); // Remove standalone version number\n\n // Try to find a match with the normalized name\n const normalizedMatch = modelInfo.find((v) => v.name === normalizedName);\n if (normalizedMatch) return normalizedMatch;\n\n // Return default if no match found\n return null;\n}\n","/**\n * Cross-platform crypto utilities that work in both Node.js and browser environments\n * using Web Crypto API standards\n */\n\n// Web Crypto API is available in both modern Node.js (16+) and browsers via globalThis.crypto\nconst webCrypto = (() => {\n if (globalThis.crypto && typeof globalThis.crypto.randomUUID === 'function') {\n return globalThis.crypto;\n }\n\n throw new Error(\n 'Web Crypto API with randomUUID support not available. Requires Node.js 16+ or modern browser.'\n );\n})();\n\n/**\n * Generate a random UUID using Web Crypto API\n * @returns A random UUID string\n */\nexport function randomUUID(): string {\n return webCrypto.randomUUID();\n}\n\n/**\n * Create a SHA-256 hash of the input data\n * @param data - The data to hash (string or ArrayBuffer)\n * @returns A promise that resolves to the hex-encoded hash\n */\nexport async function sha256(data: string | ArrayBuffer): Promise<string> {\n const encoder = new TextEncoder();\n const inputData = typeof data === 'string' ? encoder.encode(data) : data;\n\n const hashBuffer = await webCrypto.subtle.digest('SHA-256', inputData);\n const hashArray = Array.from(new Uint8Array(hashBuffer));\n const hashHex = hashArray\n .map((b) => b.toString(16).padStart(2, '0'))\n .join('');\n\n return hashHex;\n}\n\n/**\n * Create a hash instance that can be updated incrementally (similar to Node.js createHash)\n * Note: This is a synchronous wrapper around async Web Crypto API - uses simplified hash for compatibility\n */\nexport class Hash {\n private data = '';\n\n update(chunk: string): this {\n this.data += chunk;\n return this;\n }\n\n digest(encoding: 'hex'): string {\n if (encoding !== 'hex') {\n throw new Error('Only hex encoding is supported');\n }\n\n // For browser compatibility, we use a simple hash function\n // This maintains API compatibility but is not cryptographically secure\n const encoder = new TextEncoder();\n const inputData = encoder.encode(this.data);\n\n let hash = 0;\n for (let i = 0; i < inputData.length; i++) {\n const char = inputData[i]!;\n hash = (hash << 5) - hash + char;\n hash = hash & hash; // Convert to 32-bit integer\n }\n\n // Convert to hex string\n return Math.abs(hash).toString(16).padStart(8, '0');\n }\n\n async digestAsync(): Promise<string> {\n return sha256(this.data);\n }\n}\n\n/**\n * Create a hash instance (compatibility function)\n * @param algorithm - The hash algorithm (only 'sha256' supported)\n * @returns A Hash instance\n */\nexport function createHash(algorithm: string): Hash {\n if (algorithm !== 'sha256') {\n throw new Error('Only SHA-256 algorithm is supported');\n }\n return new Hash();\n}\n\n/**\n * Get the crypto object for use in JavaScript interpreter contexts\n * @returns The Web Crypto API object\n */\nexport function getCrypto() {\n return webCrypto;\n}\n","// Web Streams API types are now available globally via DOM types in tsconfig\n\ninterface CurrentEventState {\n event?: string;\n rawData: string;\n id?: string;\n retry?: number;\n}\n\ninterface SSEParserOptions<T> {\n dataParser?: (data: string) => T;\n onError?: (error: Error, rawData: string) => void;\n}\n\nexport class SSEParser<T = unknown> extends TransformStream<string, T> {\n private buffer = '';\n private currentEvent: CurrentEventState = { rawData: '' };\n private dataParser: (data: string) => T;\n private onError: (error: Error, rawData: string) => void;\n\n constructor(options: SSEParserOptions<T> = {}) {\n super({\n transform: (chunk, controller) => this.handleChunk(chunk, controller),\n flush: (controller) => this.handleFlush(controller),\n });\n\n this.dataParser = options.dataParser || JSON.parse;\n this.onError =\n options.onError ||\n ((error, rawData) => {\n console.warn('Failed to parse event data:', error);\n console.log('Raw data that failed to parse:', rawData);\n });\n }\n\n private handleChunk(\n chunk: string,\n controller: TransformStreamDefaultController<T>\n ): void {\n this.buffer += chunk;\n this.processBuffer(controller);\n }\n\n private handleFlush(controller: TransformStreamDefaultController<T>): void {\n this.processBuffer(controller);\n if (this.currentEvent.rawData) {\n this.processEvent(controller);\n }\n }\n\n private processBuffer(controller: TransformStreamDefaultController<T>): void {\n // Normalize newlines to \\n\n const normalizedBuffer = this.buffer.replace(/\\r\\n|\\r/g, '\\n');\n const lines = normalizedBuffer.split('\\n');\n this.buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line === '') {\n this.processEvent(controller);\n } else {\n this.parseLine(line);\n }\n }\n }\n\n private parseLine(line: string): void {\n if (line.startsWith(':')) {\n return; // Ignore comment lines\n }\n\n const colonIndex = line.indexOf(':');\n if (colonIndex === -1) {\n this.currentEvent.rawData +=\n (this.currentEvent.rawData && !this.currentEvent.rawData.endsWith('\\n')\n ? '\\n'\n : '') + line.trim();\n return;\n }\n\n const field = line.slice(0, colonIndex).trim();\n const value = line.slice(colonIndex + 1).trim();\n\n switch (field) {\n case 'event':\n this.currentEvent.event = value;\n break;\n case 'data':\n this.currentEvent.rawData +=\n (this.currentEvent.rawData &&\n !this.currentEvent.rawData.endsWith('\\n')\n ? '\\n'\n : '') + value;\n break;\n case 'id':\n this.currentEvent.id = value;\n break;\n case 'retry': {\n const retryValue = Number.parseInt(value, 10);\n if (!Number.isNaN(retryValue)) {\n this.currentEvent.retry = retryValue;\n }\n break;\n }\n }\n }\n\n private processEvent(controller: TransformStreamDefaultController<T>): void {\n if (this.currentEvent.rawData) {\n if (!this.currentEvent.event) {\n this.currentEvent.event = 'message';\n }\n\n if (this.currentEvent.rawData.trim() === '[DONE]') {\n // maybe we want to emit [DONE] to signal the end of the stream\n // controller.enqueue('[DONE]' as any)\n // Reset the current event\n this.currentEvent = { rawData: '' };\n return;\n }\n\n try {\n const parsedData: T = this.dataParser(this.currentEvent.rawData);\n controller.enqueue(parsedData);\n } catch (e) {\n this.onError(e as Error, this.currentEvent.rawData);\n }\n\n this.currentEvent = { rawData: '' };\n }\n }\n}\n","// Web Streams API types are now available globally via DOM types in tsconfig\n\nexport interface TextDecoderCommon {\n readonly encoding: string;\n readonly fatal: boolean;\n readonly ignoreBOM: boolean;\n}\n\nclass TextDecodeTransformer\n implements Transformer<ArrayBuffer | Uint8Array, string>\n{\n private decoder;\n\n constructor() {\n this.decoder = new TextDecoder();\n }\n\n transform(\n chunk: ArrayBuffer | Uint8Array,\n controller: TransformStreamDefaultController<string>\n ) {\n if (!(chunk instanceof ArrayBuffer || ArrayBuffer.isView(chunk))) {\n throw new TypeError('Input data must be a BufferSource');\n }\n const text = this.decoder.decode(chunk, { stream: true });\n if (text.length !== 0) {\n controller.enqueue(text);\n }\n }\n\n flush(controller: TransformStreamDefaultController<string>) {\n const text = this.decoder.decode();\n if (text.length !== 0) {\n controller.enqueue(text);\n }\n }\n}\n\nexport class TextDecoderStreamPolyfill extends TransformStream<\n ArrayBuffer | Uint8Array,\n string\n> {\n constructor() {\n super(new TextDecodeTransformer());\n }\n}\n","// Web Streams API types are now available globally via DOM types in tsconfig\nimport type { Span } from '@opentelemetry/api';\nimport { randomUUID } from './crypto.js';\n\nimport { SSEParser } from './sse.js';\nimport { TextDecoderStreamPolyfill } from './stream.js';\n\n// Configuration Types\nexport interface RetryConfig {\n maxRetries: number;\n initialDelayMs: number;\n maxDelayMs: number;\n backoffFactor: number;\n retryableStatusCodes: number[];\n}\n\nexport interface RequestMetrics {\n startTime: number;\n retryCount: number;\n lastRetryTime?: number;\n streamChunks?: number;\n lastChunkTime?: number;\n streamDuration?: number;\n errorTime?: number;\n}\n\n// Validation Interfaces\ninterface RequestValidation {\n validateRequest?: (request: unknown) => boolean | Promise<boolean>;\n}\n\ninterface ResponseValidation {\n validateResponse?: (response: unknown) => boolean | Promise<boolean>;\n}\n\n// API Base Types\nexport interface AxAPI {\n name?: string;\n headers?: Record<string, string>;\n put?: boolean;\n}\n\n// Enhanced API Configuration\nexport interface AxAPIConfig\n extends AxAPI,\n RequestValidation,\n ResponseValidation {\n url: string | URL;\n stream?: boolean;\n debug?: boolean;\n fetch?: typeof fetch;\n span?: Span;\n timeout?: number;\n retry?: Partial<RetryConfig>;\n abortSignal?: AbortSignal;\n}\n\n// Default Configurations\nexport const defaultRetryConfig: RetryConfig = {\n maxRetries: 3,\n initialDelayMs: 1000,\n maxDelayMs: 60000,\n backoffFactor: 2,\n retryableStatusCodes: [500, 408, 429, 502, 503, 504],\n};\n\nconst defaultTimeoutMs = 30000;\nconst textDecoderStream =\n (globalThis as any).TextDecoderStream ?? TextDecoderStreamPolyfill;\n\n// Error Classes\nexport class AxAIServiceError extends Error {\n public readonly timestamp: string;\n public readonly errorId: string;\n public readonly context: Record<string, unknown>;\n\n constructor(\n message: string,\n public readonly url: string,\n public readonly requestBody: unknown,\n public readonly responseBody: unknown,\n context: Record<string, unknown> = {}\n ) {\n super(message);\n this.name = this.constructor.name;\n this.timestamp = new Date().toISOString();\n this.errorId = randomUUID();\n this.context = context;\n\n this.stack = this.toString();\n }\n\n override toString(): string {\n return [\n `${this.name}: ${this.message}`,\n `URL: ${this.url}`,\n `Request Body: ${JSON.stringify(this.requestBody, null, 2)}`,\n `Response Body: ${JSON.stringify(this.responseBody, null, 2)}`,\n `Context: ${JSON.stringify(this.context, null, 2)}`,\n `Timestamp: ${this.timestamp}`,\n `Error ID: ${this.errorId}`,\n ].join('\\n');\n }\n\n // For Node.js, override the custom inspect method so console.log shows our custom string.\n [Symbol.for('nodejs.util.inspect.custom')](\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _depth: number,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: Record<string, unknown>\n ) {\n return this.toString();\n }\n}\n\nexport class AxAIServiceStatusError extends AxAIServiceError {\n constructor(\n public readonly status: number,\n public readonly statusText: string,\n url: string,\n requestBody: unknown,\n responseBody: unknown,\n context?: Record<string, unknown>\n ) {\n super(`HTTP ${status} - ${statusText}`, url, requestBody, {\n httpStatus: status,\n httpStatusText: statusText,\n responseBody,\n ...context,\n });\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceNetworkError extends AxAIServiceError {\n constructor(\n public readonly originalError: Error,\n url: string,\n requestBody: unknown,\n responseBody: unknown,\n context?: Record<string, unknown>\n ) {\n super(\n `Network Error: ${originalError.message}`,\n url,\n requestBody,\n responseBody,\n {\n originalErrorName: originalError.name,\n originalErrorStack: originalError.stack,\n ...context,\n }\n );\n this.name = this.constructor.name;\n this.stack = originalError.stack;\n }\n}\n\nexport class AxAIServiceResponseError extends AxAIServiceError {\n constructor(\n message: string,\n url: string,\n requestBody?: unknown,\n context?: Record<string, unknown>\n ) {\n super(message, url, requestBody, undefined, context);\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceStreamTerminatedError extends AxAIServiceError {\n constructor(\n url: string,\n requestBody?: unknown,\n public readonly lastChunk?: unknown,\n context?: Record<string, unknown>\n ) {\n super(\n 'Stream terminated unexpectedly by remote host',\n url,\n requestBody,\n undefined,\n {\n lastChunk,\n ...context,\n }\n );\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceTimeoutError extends AxAIServiceError {\n constructor(\n url: string,\n timeoutMs: number,\n requestBody?: unknown,\n context?: Record<string, unknown>\n ) {\n super(\n `Request timed out after ${timeoutMs}ms`,\n url,\n requestBody,\n undefined,\n { timeoutMs, ...context }\n );\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceAbortedError extends AxAIServiceError {\n constructor(\n url: string,\n reason?: string,\n requestBody?: unknown,\n context?: Record<string, unknown>\n ) {\n super(\n `Request aborted${reason ? `: ${reason}` : ''}`,\n url,\n requestBody,\n undefined,\n { abortReason: reason, ...context }\n );\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceAuthenticationError extends AxAIServiceError {\n constructor(\n url: string,\n requestBody: unknown,\n responseBody: unknown,\n context?: Record<string, unknown>\n ) {\n super('Authentication failed', url, requestBody, responseBody, context);\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIRefusalError extends Error {\n public readonly timestamp: string;\n public readonly errorId: string;\n\n constructor(\n public readonly refusalMessage: string,\n public readonly model?: string,\n public readonly requestId?: string\n ) {\n super(`Model refused to fulfill request: ${refusalMessage}`);\n this.name = 'AxAIRefusalError';\n this.timestamp = new Date().toISOString();\n this.errorId = randomUUID();\n }\n\n override toString(): string {\n return [\n `${this.name}: ${this.message}`,\n `Refusal: ${this.refusalMessage}`,\n this.model ? `Model: ${this.model}` : '',\n this.requestId ? `Request ID: ${this.requestId}` : '',\n `Timestamp: ${this.timestamp}`,\n `Error ID: ${this.errorId}`,\n ]\n .filter(Boolean)\n .join('\\n');\n }\n\n // For Node.js, override the custom inspect method so console.log shows our custom string.\n [Symbol.for('nodejs.util.inspect.custom')](\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _depth: number,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: Record<string, unknown>\n ) {\n return this.toString();\n }\n}\n\n// Utility Functions\nasync function safeReadResponseBody(response: Response): Promise<unknown> {\n try {\n if (response.headers.get('content-type')?.includes('application/json')) {\n return await response.json();\n }\n\n // Clone the response so we can read it without consuming the original\n const clonedResponse = response.clone();\n return await clonedResponse.text();\n } catch (e) {\n // If we can't read the body, return a descriptive message\n return `[ReadableStream - read failed: ${(e as Error).message}]`;\n }\n}\n\nfunction calculateRetryDelay(\n attempt: number,\n config: Readonly<RetryConfig>\n): number {\n const delay = Math.min(\n config.maxDelayMs,\n config.initialDelayMs * config.backoffFactor ** attempt\n );\n return delay * (0.75 + Math.random() * 0.5);\n}\n\nfunction createRequestMetrics(): RequestMetrics {\n return {\n startTime: Date.now(),\n retryCount: 0,\n };\n}\n\n// eslint-disable-next-line functional/prefer-immutable-types\nfunction updateRetryMetrics(metrics: RequestMetrics): void {\n metrics.retryCount++;\n metrics.lastRetryTime = Date.now();\n}\n\nfunction shouldRetry(\n error: Error,\n status: number | undefined,\n attempt: number,\n config: Readonly<RetryConfig>\n): boolean {\n if (attempt >= config.maxRetries) return false;\n if (status && config.retryableStatusCodes.includes(status)) return true;\n\n return (\n error instanceof AxAIServiceNetworkError &&\n !(error instanceof AxAIServiceAuthenticationError)\n );\n}\n\n// Enhanced API Call Function\nexport const apiCall = async <TRequest = unknown, TResponse = unknown>(\n api: Readonly<AxAPIConfig>,\n json: TRequest\n): Promise<TResponse | ReadableStream<TResponse>> => {\n const retryConfig: RetryConfig = { ...defaultRetryConfig, ...api.retry };\n const timeoutMs = api.timeout ?? defaultTimeoutMs;\n const metrics = createRequestMetrics();\n let timeoutId: NodeJS.Timeout;\n\n const baseUrl = new URL(process.env.PROXY ?? api.url);\n const apiPath = `${[baseUrl.pathname, api.name]\n .filter(Boolean)\n .join('/')\n .replace(/\\/+/g, '/')}${baseUrl.search}`;\n const apiUrl = new URL(apiPath, baseUrl);\n\n const requestId = randomUUID();\n\n // Validate request if validator is provided\n if (api.validateRequest) {\n const isValid = await api.validateRequest(json);\n if (!isValid) {\n throw new AxAIServiceResponseError(\n 'Invalid request data',\n apiUrl.href,\n json,\n { validation: 'request' }\n );\n }\n }\n\n // Set up telemetry\n api.span?.setAttributes({\n 'http.request.method': api.put ? 'PUT' : 'POST',\n 'url.full': apiUrl.href,\n 'request.id': requestId,\n 'request.startTime': metrics.startTime,\n });\n\n let attempt = 0;\n\n while (true) {\n // Combine user abort signal with timeout signal\n const combinedAbortController = new AbortController();\n\n // Handle user abort signal\n if (api.abortSignal) {\n if (api.abortSignal.aborted) {\n throw new AxAIServiceAbortedError(\n apiUrl.href,\n api.abortSignal.reason,\n json,\n { metrics }\n );\n }\n\n const userAbortHandler = () => {\n combinedAbortController.abort(\n api.abortSignal!.reason || 'User aborted request'\n );\n };\n api.abortSignal.addEventListener('abort', userAbortHandler, {\n once: true,\n });\n\n // Clean up listener if we complete before abort\n const originalAbort = combinedAbortController.abort.bind(\n combinedAbortController\n );\n combinedAbortController.abort = (reason?: string) => {\n api.abortSignal!.removeEventListener('abort', userAbortHandler);\n originalAbort(reason);\n };\n }\n\n timeoutId = setTimeout(() => {\n combinedAbortController.abort('Request timeout');\n }, timeoutMs);\n\n try {\n // Set up timeout with proper cleanup\n\n const res = await (api.fetch ?? fetch)(apiUrl, {\n method: api.put ? 'PUT' : 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'X-Request-ID': requestId,\n 'X-Retry-Count': attempt.toString(),\n ...api.headers,\n },\n body: JSON.stringify(json),\n signal: combinedAbortController.signal,\n });\n\n clearTimeout(timeoutId);\n\n // Handle authentication errors\n if (res.status === 401 || res.status === 403) {\n const responseBody = await safeReadResponseBody(res);\n throw new AxAIServiceAuthenticationError(\n apiUrl.href,\n json,\n responseBody,\n {\n metrics,\n }\n );\n }\n\n // Handle retryable status codes\n if (\n res.status >= 400 &&\n shouldRetry(new Error(), res.status, attempt, retryConfig)\n ) {\n const delay = calculateRetryDelay(attempt, retryConfig);\n attempt++;\n updateRetryMetrics(metrics);\n\n api.span?.addEvent('retry', {\n attempt,\n delay,\n status: res.status,\n 'metrics.startTime': metrics.startTime,\n 'metrics.retryCount': metrics.retryCount,\n 'metrics.lastRetryTime': metrics.lastRetryTime,\n });\n\n await new Promise((resolve) => setTimeout(resolve, delay));\n continue;\n }\n\n if (res.status >= 400) {\n const responseBody = await safeReadResponseBody(res);\n throw new AxAIServiceStatusError(\n res.status,\n res.statusText,\n apiUrl.href,\n json,\n responseBody,\n { metrics }\n );\n }\n\n // Handle non-streaming response\n if (!api.stream) {\n const resJson = await res.json();\n\n // Validate response if validator is provided\n if (api.validateResponse) {\n const isValid = await api.validateResponse(resJson);\n if (!isValid) {\n throw new AxAIServiceResponseError(\n 'Invalid response data',\n apiUrl.href,\n json,\n { validation: 'response' }\n );\n }\n }\n\n api.span?.setAttributes({\n 'response.time': Date.now() - metrics.startTime,\n 'response.retries': metrics.retryCount,\n });\n\n return resJson as TResponse;\n }\n\n // Handle streaming response\n if (!res.body) {\n throw new AxAIServiceResponseError(\n 'Response body is null',\n apiUrl.href,\n json,\n { metrics }\n );\n }\n\n let lastChunk: TResponse | undefined;\n let chunkCount = 0;\n\n // Enhanced tracking stream\n const trackingStream = new TransformStream<TResponse, TResponse>({\n transform(chunk, controller) {\n lastChunk = chunk;\n chunkCount++;\n metrics.streamChunks = chunkCount;\n metrics.lastChunkTime = Date.now();\n controller.enqueue(chunk);\n\n api.span?.addEvent('stream.chunk', {\n 'stream.chunks': chunkCount,\n 'stream.duration': Date.now() - metrics.startTime,\n 'response.retries': metrics.retryCount,\n });\n },\n });\n\n // Flag to track if the controller is closed.\n let closed = false;\n\n // Enhanced wrapped stream\n return new ReadableStream<TResponse>({\n start(controller) {\n const reader = res\n .body!.pipeThrough(new textDecoderStream())\n .pipeThrough(new SSEParser<TResponse>())\n .pipeThrough(trackingStream)\n .getReader();\n\n async function read() {\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n if (!closed) {\n closed = true;\n controller.close();\n }\n break;\n }\n\n // Check if the controller is already closed before enqueuing.\n if (closed) break;\n controller.enqueue(value);\n }\n } catch (e) {\n const error = e as Error;\n const streamMetrics = {\n ...metrics,\n streamDuration: Date.now() - metrics.startTime,\n };\n\n if (\n error.name === 'AbortError' ||\n error.message?.includes('aborted')\n ) {\n controller.error(\n new AxAIServiceStreamTerminatedError(\n apiUrl.href,\n json,\n lastChunk,\n { streamMetrics }\n )\n );\n } else if (\n error instanceof TypeError &&\n error.message.includes('cancelled')\n ) {\n controller.error(\n new AxAIServiceStreamTerminatedError(\n apiUrl.href,\n json,\n lastChunk,\n {\n streamMetrics,\n cancelReason: 'Stream cancelled by client',\n }\n )\n );\n } else {\n controller.error(\n new AxAIServiceNetworkError(\n error,\n apiUrl.href,\n json,\n '[ReadableStream - consumed during streaming]',\n {\n streamMetrics,\n }\n )\n );\n }\n throw error;\n } finally {\n clearTimeout(timeoutId);\n reader.releaseLock();\n }\n }\n\n read();\n },\n // When the consumer cancels the stream, set our flag to stop processing further.\n cancel() {\n closed = true;\n },\n });\n } catch (error) {\n if (error instanceof Error && error.name === 'AbortError') {\n // Check if this was a user abort or timeout\n if (api.abortSignal?.aborted) {\n throw new AxAIServiceAbortedError(\n apiUrl.href,\n api.abortSignal.reason,\n json,\n { metrics }\n );\n }\n throw new AxAIServiceTimeoutError(apiUrl.href, timeoutMs, json, {\n metrics,\n });\n }\n\n if (api.span?.isRecording()) {\n api.span.recordException(error as Error);\n api.span.setAttributes({\n 'error.time': Date.now() - metrics.startTime,\n 'error.retries': metrics.retryCount,\n });\n }\n\n // Handle retryable network errors\n if (\n error instanceof AxAIServiceNetworkError &&\n shouldRetry(error, undefined, attempt, retryConfig)\n ) {\n const delay = calculateRetryDelay(attempt, retryConfig);\n attempt++;\n updateRetryMetrics(metrics);\n\n api.span?.addEvent('retry', {\n attempt,\n delay,\n error: error.message,\n 'metrics.startTime': metrics.startTime,\n 'metrics.retryCount': metrics.retryCount,\n 'metrics.lastRetryTime': metrics.lastRetryTime,\n });\n\n await new Promise((resolve) => setTimeout(resolve, delay));\n continue;\n }\n\n if (error instanceof AxAIServiceError) {\n error.context.metrics = metrics;\n }\n\n throw error;\n } finally {\n if (timeoutId !== undefined) {\n clearTimeout(timeoutId);\n }\n }\n }\n};\n\nexport function createApiConfig(\n config: Readonly<Partial<AxAPIConfig>>\n): AxAPIConfig {\n return {\n timeout: defaultTimeoutMs,\n retry: defaultRetryConfig,\n ...config,\n url: config.url!, // URL is required\n };\n}\n","import type { AxLoggerFunction, AxLoggerTag } from '../ai/types.js';\nimport { ColorLog } from '../util/log.js';\n\nconst colorLog = new ColorLog();\n\n// Default output function that writes to stdout\nconst defaultOutput = (message: string): void => {\n process.stdout.write(message);\n};\n\n// Factory function to create a default logger with customizable output\nexport const axCreateDefaultColorLogger = (\n output: (message: string) => void = defaultOutput\n): AxLoggerFunction => {\n return (message: string, options?: { tags?: AxLoggerTag[] }) => {\n const tags = options?.tags ?? [];\n let formattedMessage = message;\n\n // Step 1: Pick color function based on semantic tags\n let colorFunction: (text: string) => string = (text) => text; // default no color\n\n if (tags.includes('systemContent')) {\n colorFunction = (text) => colorLog.white(text);\n } else if (tags.includes('userContent')) {\n colorFunction = (text) => colorLog.white(text);\n } else if (tags.includes('functionName')) {\n colorFunction = (text) => colorLog.greenBright(text);\n } else if (tags.includes('functionArg')) {\n colorFunction = (text) => colorLog.greenBright(text);\n } else if (tags.includes('assistantContent')) {\n colorFunction = (text) => colorLog.white(text);\n } else if (tags.includes('responseContent')) {\n colorFunction = (text) => colorLog.greenBright(text);\n } else if (tags.includes('functionResult')) {\n colorFunction = (text) => colorLog.blueBright(text);\n }\n\n if (tags.includes('error')) {\n colorFunction = (text) => colorLog.redBright(text);\n } else if (tags.includes('warning')) {\n colorFunction = (text) => colorLog.red(text);\n }\n\n // Step 2: Add prefix based on tag type\n if (\n tags.includes('systemContent') ||\n tags.includes('userContent') ||\n tags.includes('functionName') ||\n tags.includes('functionArg') ||\n tags.includes('functionResult') ||\n tags.includes('assistantStart') ||\n tags.includes('start') ||\n tags.includes('end')\n ) {\n formattedMessage = `\\n${formattedMessage}`;\n }\n\n if (tags.includes('responseEnd')) {\n formattedMessage = `${formattedMessage}\\n───\\n`;\n }\n // Step 4: Apply color function and output\n output(colorFunction(formattedMessage));\n };\n};\n\nexport const defaultLogger: AxLoggerFunction = axCreateDefaultColorLogger();\n\n// Factory function to create a text-only logger (no colors) with customizable output\nexport const axCreateDefaultTextLogger = (\n output: (message: string) => void = defaultOutput\n): AxLoggerFunction => {\n return (message: string, options?: { tags?: AxLoggerTag[] }) => {\n const tags = options?.tags ?? [];\n let formattedMessage = message;\n\n // Step 1: No color function needed for text logger\n\n // Step 2: Add prefix based on tag type\n if (\n tags.includes('systemContent') ||\n tags.includes('userContent') ||\n tags.includes('functionName') ||\n tags.includes('functionArg') ||\n tags.includes('functionResult') ||\n tags.includes('assistantStart') ||\n tags.includes('start') ||\n tags.includes('end')\n ) {\n formattedMessage = `\\n${formattedMessage}`;\n }\n\n if (tags.includes('responseEnd')) {\n formattedMessage = `${formattedMessage}───\\n`;\n }\n\n // Step 4: Output without color\n output(formattedMessage);\n };\n};\n\n/**\n * Factory function to create an enhanced optimizer logger with clean visual formatting\n * that works for all optimizer types using semantic tags for proper categorization\n */\nexport const axCreateOptimizerLogger = (\n output: (message: string) => void = (msg) => process.stdout.write(msg)\n): AxLoggerFunction => {\n const baseLogger = axCreateDefaultColorLogger(output);\n\n // Track state for better visual flow\n let isFirstPhase = true;\n\n return (message: string, options) => {\n const tags = options?.tags ?? [];\n let formattedMessage = message;\n\n // Use tags for semantic formatting instead of string pattern matching\n if (tags.includes('optimizer')) {\n if (tags.includes('start')) {\n const trialsMatch =\n message.match(/with (\\d+) trials?/) || message.match(/(\\d+) trials?/);\n const optimizerMatch = message.match(\n /(MIPROv2|BootstrapFewshot|[A-Z][a-zA-Z]+)/\n );\n const optimizerName = optimizerMatch ? optimizerMatch[1] : 'Optimizer';\n\n if (trialsMatch?.[1]) {\n formattedMessage = `\\n┌─ ${optimizerName} optimization (${trialsMatch[1]} trials)\\n`;\n } else {\n formattedMessage = `\\n┌─ ${optimizerName} optimization\\n`;\n }\n isFirstPhase = true;\n } else if (tags.includes('config')) {\n if (message.includes('examples') && message.includes('training')) {\n const match =\n message.match(\n /(\\d+) examples for training and (\\d+) for validation/\n ) || message.match(/(\\d+) training.*?(\\d+) validation/);\n if (match?.[1] && match[2]) {\n formattedMessage = `│ Dataset: ${match[1]} training, ${match[2]} validation\\n`;\n } else {\n const simpleMatch = message.match(/(\\d+) examples/);\n if (simpleMatch?.[1]) {\n formattedMessage = `│ Dataset: ${simpleMatch[1]} examples\\n`;\n }\n }\n } else if (message.includes('teacher')) {\n formattedMessage = '│ Using teacher model\\n';\n } else {\n formattedMessage = `│ ${message}\\n`;\n }\n } else if (tags.includes('phase')) {\n if (isFirstPhase) {\n formattedMessage = `├─ ${message}\\n`;\n isFirstPhase = false;\n } else {\n formattedMessage = `├─ ${message}\\n`;\n }\n } else if (tags.includes('result')) {\n if (message.includes('Generated') || message.includes('Selected')) {\n const match = message.match(/(\\d+)/);\n if (match?.[1]) {\n formattedMessage = `│ ✓ ${message}\\n`;\n } else {\n formattedMessage = `│ ✓ ${message}\\n`;\n }\n } else if (message.includes('configuration')) {\n formattedMessage = '│ Applied best configuration\\n';\n } else {\n formattedMessage = `│ ${message}\\n`;\n }\n } else if (tags.includes('progress')) {\n formattedMessage = `│ ${message}\\n`;\n } else if (tags.includes('complete')) {\n const scoreMatch = message.match(/(score|performance):\\s*([\\d.]+)/);\n if (scoreMatch?.[2]) {\n const score = Number.parseFloat(scoreMatch[2]);\n const percentage =\n score <= 1 ? `${(score * 100).toFixed(1)}%` : score.toFixed(3);\n formattedMessage = `├─ Complete! Best: ${percentage}\\n`;\n } else if (message.includes('Bootstrap')) {\n formattedMessage = `├─ ${message}\\n`;\n } else {\n formattedMessage = '├─ Optimization complete\\n';\n }\n } else if (tags.includes('checkpoint')) {\n if (message.includes('Resuming')) {\n formattedMessage = `│ ${message}\\n`;\n } else {\n const match =\n message.match(/checkpoint:\\s*(.+)/) ||\n message.match(/Saved\\s+(.+)/);\n if (match?.[1]) {\n formattedMessage = `└─ Saved: ${match[1]}\\n`;\n } else {\n formattedMessage = '└─ Checkpoint saved\\n';\n }\n }\n }\n }\n\n // Handle non-optimizer messages with basic formatting\n else if (tags.includes('discovery')) {\n if (message.includes('Found') && message.includes('examples')) {\n const match = message.match(/Found (\\d+)/);\n if (match?.[1]) {\n formattedMessage = `│ Found ${match[1]} examples\\n`;\n }\n }\n }\n\n // Handle errors and warnings\n if (tags.includes('error')) {\n formattedMessage = `\\n✗ ${message}\\n`;\n } else if (tags.includes('warning')) {\n formattedMessage = `\\n⚠ ${message}\\n`;\n }\n\n // Use the base logger for color formatting and output\n baseLogger(formattedMessage, options);\n };\n};\n\n/**\n * Default optimizer logger instance\n */\nexport const axDefaultOptimizerLogger = axCreateOptimizerLogger();\n","import { defaultLogger } from '../dsp/loggers.js';\nimport type {\n AxChatRequest,\n AxChatResponse,\n AxLoggerFunction,\n AxLoggerTag,\n} from './types.js';\n\nconst formatChatMessage = (\n msg: AxChatRequest['chatPrompt'][number],\n hideContent?: boolean,\n hideSystemPrompt?: boolean\n) => {\n switch (msg.role) {\n case 'system':\n if (hideSystemPrompt) {\n return '';\n }\n return `─── System: ───\\n${msg.content}`;\n case 'function':\n return `─── Function Result: ───\\n${msg.result}`;\n case 'user': {\n if (typeof msg.content === 'string') {\n return `─── User: ───\\n${msg.content}`;\n }\n const items = msg.content.map((v) => {\n switch (v.type) {\n case 'text':\n return v.text;\n case 'image':\n return `(Image, ${v.mimeType}) ${v.image.substring(0, 10)}`;\n default:\n throw new Error('Invalid content type');\n }\n });\n return `─── User: ───\\n${items.join('\\n')}`;\n }\n case 'assistant': {\n if (msg.functionCalls) {\n const fns = msg.functionCalls?.map(({ function: fn }) => {\n const args =\n typeof fn.params !== 'string'\n ? JSON.stringify(fn.params, null, 2)\n : fn.params;\n return `${fn.name}(${args})`;\n });\n return `─── Functions: ───\\n${fns.join('\\n')}`;\n }\n return `─── Assistant: ───\\n${hideContent ? '' : (msg.content ?? '<empty>')}`;\n }\n default:\n throw new Error('Invalid role');\n }\n};\n\nexport const logChatRequestMessage = (\n msg: AxChatRequest['chatPrompt'][number],\n hideSystemPrompt?: boolean,\n logger: AxLoggerFunction = defaultLogger\n) => {\n logChatRequest([msg], hideSystemPrompt, logger);\n};\n\nexport const logChatRequest = (\n chatPrompt: Readonly<AxChatRequest['chatPrompt']>,\n hideSystemPrompt?: boolean,\n logger: AxLoggerFunction = defaultLogger\n) => {\n for (const msg of chatPrompt ?? []) {\n const formattedMessage = formatChatMessage(msg, false, hideSystemPrompt);\n if (formattedMessage) {\n const tags: AxLoggerTag[] = [];\n\n switch (msg.role) {\n case 'system':\n tags.push('systemContent');\n break;\n case 'function':\n tags.push('functionName');\n break;\n case 'user':\n tags.push('userContent');\n break;\n }\n\n logger(formattedMessage, { tags });\n }\n }\n\n logger('─── Assistant: ───', { tags: ['assistantStart'] });\n};\n\nexport const logResponseResult = (\n r: Readonly<AxChatResponse['results'][number] & { index: number }>,\n logger: AxLoggerFunction = defaultLogger\n) => {\n if (r.content) {\n logger(r.content, { tags: ['responseContent'] });\n }\n\n const loggedFunctionCalls = new Set<string>();\n\n if (r.functionCalls && r.functionCalls.length > 0) {\n for (const [i, f] of r.functionCalls.entries()) {\n if (f.id) {\n if (loggedFunctionCalls.has(f.id)) {\n continue;\n }\n loggedFunctionCalls.add(f.id);\n\n const tags: AxLoggerTag[] = ['functionName'];\n if (i === 0) {\n tags.push('firstFunction');\n }\n if (r.functionCalls.length > 1) {\n tags.push('multipleFunctions');\n }\n logger(`[${i + 1}] ${f.function.name} [${f.id}]`, { tags });\n }\n\n if (f.function.params) {\n const params =\n typeof f.function.params === 'string'\n ? f.function.params\n : JSON.stringify(f.function.params, null, 2);\n logger(params, { tags: ['functionArg'] });\n }\n }\n }\n};\n\nexport const logResponse = (\n resp: Readonly<AxChatResponse>,\n logger: AxLoggerFunction = defaultLogger\n) => {\n if (!resp.results) {\n return;\n }\n for (const r of resp.results) {\n logResponseResult(r, logger);\n }\n};\n\nexport const logResponseDelta = (\n delta: string,\n logger: AxLoggerFunction = defaultLogger\n) => {\n logger(delta, { tags: ['responseContent', 'responseDelta'] });\n};\n\nexport const logFunctionResults = (\n results: Readonly<\n { result: string; functionId: string; isError?: boolean; index: number }[]\n >,\n logger: AxLoggerFunction = defaultLogger\n) => {\n for (const result of results) {\n logger(`Function Result [${result.functionId}]:`, {\n tags: ['functionResult'],\n });\n\n if (result.isError) {\n logger(result.result, { tags: ['functionResult', 'error'] });\n } else {\n logger(result.result, { tags: ['functionResult'] });\n }\n }\n};\n","import type { Counter, Gauge, Histogram, Meter } from '@opentelemetry/api';\n\n// Utility function to sanitize metric labels\nconst sanitizeLabels = (\n labels: Record<string, unknown>\n): Record<string, string> => {\n const sanitized: Record<string, string> = {};\n for (const [key, value] of Object.entries(labels)) {\n if (value !== undefined && value !== null) {\n const stringValue = String(value);\n // Limit label length to prevent excessive memory usage\n sanitized[key] =\n stringValue.length > 100 ? stringValue.substring(0, 100) : stringValue;\n }\n }\n return sanitized;\n};\n\nexport interface AxAIMetricsInstruments {\n latencyHistogram?: Histogram;\n errorCounter?: Counter;\n requestCounter?: Counter;\n tokenCounter?: Counter;\n inputTokenCounter?: Counter;\n outputTokenCounter?: Counter;\n errorRateGauge?: Gauge;\n meanLatencyGauge?: Gauge;\n p95LatencyGauge?: Gauge;\n p99LatencyGauge?: Gauge;\n\n streamingRequestsCounter?: Counter;\n\n functionCallsCounter?: Counter;\n functionCallLatencyHistogram?: Histogram;\n\n requestSizeHistogram?: Histogram;\n responseSizeHistogram?: Histogram;\n\n temperatureGauge?: Gauge;\n maxTokensGauge?: Gauge;\n\n estimatedCostCounter?: Counter;\n\n promptLengthHistogram?: Histogram;\n contextWindowUsageGauge?: Gauge;\n\n timeoutsCounter?: Counter;\n abortsCounter?: Counter;\n\n thinkingBudgetUsageCounter?: Counter;\n multimodalRequestsCounter?: Counter;\n}\n\n// Singleton instance for AI metrics instruments\nlet globalAIMetricsInstruments: AxAIMetricsInstruments | undefined;\n\n// Function to get or create AI metrics instruments (singleton pattern)\nexport const getOrCreateAIMetricsInstruments = (\n meter?: Meter\n): AxAIMetricsInstruments | undefined => {\n // Return existing instance if available\n if (globalAIMetricsInstruments) {\n return globalAIMetricsInstruments;\n }\n\n if (meter) {\n globalAIMetricsInstruments = createMetricsInstruments(meter);\n return globalAIMetricsInstruments;\n }\n\n return undefined;\n};\n\n// Function to reset the AI metrics singleton (useful for testing)\nexport const resetAIMetricsInstruments = (): void => {\n globalAIMetricsInstruments = undefined;\n};\n\nexport const createMetricsInstruments = (\n meter: Meter\n): AxAIMetricsInstruments => {\n return {\n latencyHistogram: meter.createHistogram('ax_llm_request_duration_ms', {\n description: 'Duration of LLM requests in milliseconds',\n unit: 'ms',\n }),\n\n errorCounter: meter.createCounter('ax_llm_errors_total', {\n description: 'Total number of LLM request errors',\n }),\n\n requestCounter: meter.createCounter('ax_llm_requests_total', {\n description: 'Total number of LLM requests',\n }),\n\n tokenCounter: meter.createCounter('ax_llm_tokens_total', {\n description: 'Total number of LLM tokens consumed',\n }),\n\n inputTokenCounter: meter.createCounter('ax_llm_input_tokens_total', {\n description: 'Total number of input/prompt tokens consumed',\n }),\n\n outputTokenCounter: meter.createCounter('ax_llm_output_tokens_total', {\n description: 'Total number of output/completion tokens generated',\n }),\n\n errorRateGauge: meter.createGauge('ax_llm_error_rate', {\n description: 'Current error rate as a percentage (0-100)',\n }),\n\n meanLatencyGauge: meter.createGauge('ax_llm_mean_latency_ms', {\n description: 'Mean latency of LLM requests in milliseconds',\n unit: 'ms',\n }),\n\n p95LatencyGauge: meter.createGauge('ax_llm_p95_latency_ms', {\n description: '95th percentile latency of LLM requests in milliseconds',\n unit: 'ms',\n }),\n\n p99LatencyGauge: meter.createGauge('ax_llm_p99_latency_ms', {\n description: '99th percentile latency of LLM requests in milliseconds',\n unit: 'ms',\n }),\n\n streamingRequestsCounter: meter.createCounter(\n 'ax_llm_streaming_requests_total',\n {\n description: 'Total number of streaming LLM requests',\n }\n ),\n\n functionCallsCounter: meter.createCounter('ax_llm_function_calls_total', {\n description: 'Total number of function/tool calls made',\n }),\n\n functionCallLatencyHistogram: meter.createHistogram(\n 'ax_llm_function_call_latency_ms',\n {\n description: 'Latency of function calls in milliseconds',\n unit: 'ms',\n }\n ),\n\n requestSizeHistogram: meter.createHistogram('ax_llm_request_size_bytes', {\n description: 'Size of LLM request payloads in bytes',\n unit: 'By',\n }),\n\n responseSizeHistogram: meter.createHistogram('ax_llm_response_size_bytes', {\n description: 'Size of LLM response payloads in bytes',\n unit: 'By',\n }),\n\n temperatureGauge: meter.createGauge('ax_llm_temperature_gauge', {\n description: 'Temperature setting used for LLM requests',\n }),\n\n maxTokensGauge: meter.createGauge('ax_llm_max_tokens_gauge', {\n description: 'Maximum tokens setting used for LLM requests',\n }),\n\n estimatedCostCounter: meter.createCounter('ax_llm_estimated_cost_total', {\n description: 'Estimated cost of LLM requests in USD',\n unit: '$',\n }),\n\n promptLengthHistogram: meter.createHistogram('ax_llm_prompt_length_chars', {\n description: 'Length of prompts in characters',\n }),\n\n contextWindowUsageGauge: meter.createGauge(\n 'ax_llm_context_window_usage_ratio',\n {\n description: 'Context window utilization ratio (0-1)',\n }\n ),\n\n timeoutsCounter: meter.createCounter('ax_llm_timeouts_total', {\n description: 'Total number of timed out LLM requests',\n }),\n\n abortsCounter: meter.createCounter('ax_llm_aborts_total', {\n description: 'Total number of aborted LLM requests',\n }),\n\n thinkingBudgetUsageCounter: meter.createCounter(\n 'ax_llm_thinking_budget_usage_total',\n {\n description: 'Total thinking budget tokens used',\n }\n ),\n\n multimodalRequestsCounter: meter.createCounter(\n 'ax_llm_multimodal_requests_total',\n {\n description: 'Total number of multimodal requests (with images/audio)',\n }\n ),\n };\n};\n\nexport const recordLatencyMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n duration: number,\n aiService: string,\n model?: string\n): void => {\n try {\n if (instruments.latencyHistogram) {\n const labels = sanitizeLabels({\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n instruments.latencyHistogram.record(duration, labels);\n }\n } catch (error) {\n console.warn('Failed to record latency metric:', error);\n }\n};\n\nexport const recordLatencyStatsMetrics = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n meanLatency: number,\n p95Latency: number,\n p99Latency: number,\n aiService: string,\n model?: string\n): void => {\n const labels = {\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n };\n\n if (instruments.meanLatencyGauge) {\n instruments.meanLatencyGauge.record(meanLatency, labels);\n }\n\n if (instruments.p95LatencyGauge) {\n instruments.p95LatencyGauge.record(p95Latency, labels);\n }\n\n if (instruments.p99LatencyGauge) {\n instruments.p99LatencyGauge.record(p99Latency, labels);\n }\n};\n\nexport const recordErrorMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n aiService: string,\n model?: string\n): void => {\n try {\n if (instruments.errorCounter) {\n const labels = sanitizeLabels({\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n instruments.errorCounter.add(1, labels);\n }\n } catch (error) {\n console.warn('Failed to record error metric:', error);\n }\n};\n\nexport const recordErrorRateMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n errorRate: number,\n aiService: string,\n model?: string\n): void => {\n if (instruments.errorRateGauge) {\n instruments.errorRateGauge.record(errorRate * 100, {\n // Convert to percentage\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordRequestMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n aiService: string,\n model?: string\n): void => {\n if (instruments.requestCounter) {\n instruments.requestCounter.add(1, {\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordTokenMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'input' | 'output' | 'total' | 'thoughts',\n tokens: number,\n aiService: string,\n model?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n\n // Record in the general token counter with type label\n if (instruments.tokenCounter) {\n instruments.tokenCounter.add(tokens, {\n token_type: type,\n ...labels,\n });\n }\n\n // Also record in specific counters for input/output\n if (type === 'input' && instruments.inputTokenCounter) {\n instruments.inputTokenCounter.add(tokens, labels);\n }\n\n if (type === 'output' && instruments.outputTokenCounter) {\n instruments.outputTokenCounter.add(tokens, labels);\n }\n } catch (error) {\n console.warn('Failed to record token metric:', error);\n }\n};\n\nexport const recordStreamingRequestMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n isStreaming: boolean,\n aiService: string,\n model?: string\n): void => {\n if (isStreaming && instruments.streamingRequestsCounter) {\n instruments.streamingRequestsCounter.add(1, {\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordFunctionCallMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n functionName: string,\n latency?: number,\n aiService?: string,\n model?: string\n): void => {\n const labels = {\n function_name: functionName,\n ...(aiService ? { ai_service: aiService } : {}),\n ...(model ? { model } : {}),\n };\n\n if (instruments.functionCallsCounter) {\n instruments.functionCallsCounter.add(1, labels);\n }\n\n if (latency && instruments.functionCallLatencyHistogram) {\n instruments.functionCallLatencyHistogram.record(latency, labels);\n }\n};\n\nexport const recordRequestSizeMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n sizeBytes: number,\n aiService: string,\n model?: string\n): void => {\n if (instruments.requestSizeHistogram) {\n instruments.requestSizeHistogram.record(sizeBytes, {\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordResponseSizeMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n sizeBytes: number,\n aiService: string,\n model?: string\n): void => {\n if (instruments.responseSizeHistogram) {\n instruments.responseSizeHistogram.record(sizeBytes, {\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordModelConfigMetrics = (\n instruments: Readonly<AxAIMetricsInstruments>,\n temperature?: number,\n maxTokens?: number,\n aiService?: string,\n model?: string\n): void => {\n const labels = {\n ...(aiService ? { ai_service: aiService } : {}),\n ...(model ? { model } : {}),\n };\n\n if (temperature !== undefined && instruments.temperatureGauge) {\n instruments.temperatureGauge.record(temperature, labels);\n }\n\n if (maxTokens !== undefined && instruments.maxTokensGauge) {\n instruments.maxTokensGauge.record(maxTokens, labels);\n }\n};\n\nexport const recordEstimatedCostMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n costUSD: number,\n aiService: string,\n model?: string\n): void => {\n if (instruments.estimatedCostCounter) {\n instruments.estimatedCostCounter.add(costUSD, {\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordPromptLengthMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n lengthChars: number,\n aiService: string,\n model?: string\n): void => {\n if (instruments.promptLengthHistogram) {\n instruments.promptLengthHistogram.record(lengthChars, {\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordContextWindowUsageMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n usageRatio: number,\n aiService: string,\n model?: string\n): void => {\n if (instruments.contextWindowUsageGauge) {\n instruments.contextWindowUsageGauge.record(usageRatio, {\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordTimeoutMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n aiService: string,\n model?: string\n): void => {\n if (instruments.timeoutsCounter) {\n instruments.timeoutsCounter.add(1, {\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordAbortMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n type: 'chat' | 'embed',\n aiService: string,\n model?: string\n): void => {\n if (instruments.abortsCounter) {\n instruments.abortsCounter.add(1, {\n operation: type,\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordThinkingBudgetUsageMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n tokensUsed: number,\n aiService: string,\n model?: string\n): void => {\n if (instruments.thinkingBudgetUsageCounter) {\n instruments.thinkingBudgetUsageCounter.add(tokensUsed, {\n ai_service: aiService,\n ...(model ? { model } : {}),\n });\n }\n};\n\nexport const recordMultimodalRequestMetric = (\n instruments: Readonly<AxAIMetricsInstruments>,\n hasImages: boolean,\n hasAudio: boolean,\n aiService: string,\n model?: string\n): void => {\n if ((hasImages || hasAudio) && instruments.multimodalRequestsCounter) {\n instruments.multimodalRequestsCounter.add(1, {\n ai_service: aiService,\n has_images: hasImages.toString(),\n has_audio: hasAudio.toString(),\n ...(model ? { model } : {}),\n });\n }\n};\n","// ReadableStream is available globally in modern browsers and Node.js 16+ via DOM types\nimport { type Span, SpanKind, context } from '@opentelemetry/api';\nimport { randomUUID } from '../util/crypto.js';\n\nimport { axGlobals } from '../dsp/globals.js';\nimport { axSpanAttributes, axSpanEvents } from '../trace/trace.js';\nimport { apiCall } from '../util/apicall.js';\nimport { RespTransformStream } from '../util/transform.js';\n\nimport { defaultLogger } from '../dsp/loggers.js';\nimport { logChatRequest, logResponse } from './debug.js';\nimport {\n type AxAIMetricsInstruments,\n getOrCreateAIMetricsInstruments,\n recordAbortMetric,\n recordContextWindowUsageMetric,\n recordErrorMetric,\n recordErrorRateMetric,\n recordEstimatedCostMetric,\n recordFunctionCallMetric,\n recordLatencyMetric,\n recordLatencyStatsMetrics,\n recordModelConfigMetrics,\n recordMultimodalRequestMetric,\n recordPromptLengthMetric,\n recordRequestMetric,\n recordRequestSizeMetric,\n recordResponseSizeMetric,\n recordStreamingRequestMetric,\n recordThinkingBudgetUsageMetric,\n recordTimeoutMetric,\n recordTokenMetric,\n} from './metrics.js';\nimport type {\n AxAIInputModelList,\n AxAIModelList,\n AxAIPromptConfig,\n AxAIService,\n AxAIServiceActionOptions,\n AxAIServiceImpl,\n AxAIServiceMetrics,\n AxAIServiceOptions,\n AxChatRequest,\n AxChatResponse,\n AxEmbedRequest,\n AxEmbedResponse,\n AxLoggerFunction,\n AxModelConfig,\n AxModelInfo,\n AxModelUsage,\n} from './types.js';\n\nexport interface AxAIFeatures {\n functions: boolean;\n streaming: boolean;\n functionCot?: boolean;\n hasThinkingBudget?: boolean;\n hasShowThoughts?: boolean;\n}\n\nexport interface AxBaseAIArgs<TModel, TEmbedModel> {\n name: string;\n apiURL: string;\n headers: () => Promise<Record<string, string>>;\n modelInfo: Readonly<AxModelInfo[]>;\n defaults: Readonly<{ model: TModel; embedModel?: TEmbedModel }>;\n options?: Readonly<AxAIServiceOptions>;\n supportFor: AxAIFeatures | ((model: TModel) => AxAIFeatures);\n models?: AxAIInputModelList<TModel, TEmbedModel>;\n}\n\nexport const axBaseAIDefaultConfig = (): AxModelConfig =>\n structuredClone({\n temperature: 0,\n topK: 40,\n topP: 0.9,\n });\n\nexport const axBaseAIDefaultCreativeConfig = (): AxModelConfig =>\n structuredClone({\n temperature: 0.4,\n topP: 0.7,\n frequencyPenalty: 0.2,\n });\n\nexport class AxBaseAI<\n TModel,\n TEmbedModel,\n TChatRequest,\n TEmbedRequest,\n TChatResponse,\n TChatResponseDelta,\n TEmbedResponse,\n> implements AxAIService<TModel, TEmbedModel>\n{\n private debug = false;\n\n private rt?: AxAIServiceOptions['rateLimiter'];\n private fetch?: AxAIServiceOptions['fetch'];\n private tracer?: AxAIServiceOptions['tracer'];\n private meter?: AxAIServiceOptions['meter'];\n private timeout?: AxAIServiceOptions['timeout'];\n private excludeContentFromTrace?: boolean;\n private models?: AxAIInputModelList<TModel, TEmbedModel>;\n private abortSignal?: AbortSignal;\n private logger: AxLoggerFunction = defaultLogger;\n\n private modelInfo: readonly AxModelInfo[];\n private modelUsage?: AxModelUsage;\n private embedModelUsage?: AxModelUsage;\n private defaults: AxBaseAIArgs<TModel, TEmbedModel>['defaults'];\n private lastUsedModelConfig?: AxModelConfig;\n private lastUsedChatModel?: TModel;\n private lastUsedEmbedModel?: TEmbedModel;\n\n protected apiURL: string;\n protected name: string;\n protected id: string;\n protected headers: () => Promise<Record<string, string>>;\n protected supportFor: AxAIFeatures | ((model: TModel) => AxAIFeatures);\n\n // Add private metrics tracking properties\n private metrics: AxAIServiceMetrics = {\n latency: {\n chat: {\n mean: 0,\n p95: 0,\n p99: 0,\n samples: [],\n },\n embed: {\n mean: 0,\n p95: 0,\n p99: 0,\n samples: [],\n },\n },\n errors: {\n chat: {\n count: 0,\n rate: 0,\n total: 0,\n },\n embed: {\n count: 0,\n rate: 0,\n total: 0,\n },\n },\n };\n\n constructor(\n private readonly aiImpl: Readonly<\n AxAIServiceImpl<\n TModel,\n TEmbedModel,\n TChatRequest,\n TEmbedRequest,\n TChatResponse,\n TChatResponseDelta,\n TEmbedResponse\n >\n >,\n {\n name,\n apiURL,\n headers,\n modelInfo,\n defaults,\n options = {},\n supportFor,\n models,\n }: Readonly<AxBaseAIArgs<TModel, TEmbedModel>>\n ) {\n this.name = name;\n this.apiURL = apiURL;\n this.headers = headers;\n this.supportFor = supportFor;\n this.tracer = options.tracer ?? axGlobals.tracer;\n this.meter = options.meter ?? axGlobals.meter;\n this.modelInfo = modelInfo;\n this.models = models;\n this.id = randomUUID();\n\n const model = this.getModel(defaults.model) ?? defaults.model;\n const embedModel =\n this.getEmbedModel(defaults.embedModel) ?? defaults.embedModel;\n\n this.defaults = { model, embedModel };\n\n if (\n !defaults.model ||\n typeof defaults.model !== 'string' ||\n defaults.model === ''\n ) {\n throw new Error('No model defined');\n }\n\n this.setOptions(options);\n\n if (models) {\n validateModels(models);\n }\n }\n\n private getMetricsInstruments(): AxAIMetricsInstruments | undefined {\n return getOrCreateAIMetricsInstruments(this.meter);\n }\n\n public setName(name: string): void {\n this.name = name;\n }\n\n public getId(): string {\n return this.id;\n }\n\n public setAPIURL(apiURL: string): void {\n this.apiURL = apiURL;\n }\n\n public setHeaders(headers: () => Promise<Record<string, string>>): void {\n this.headers = headers;\n }\n\n setOptions(options: Readonly<AxAIServiceOptions>): void {\n this.debug = options.debug ?? false;\n this.rt = options.rateLimiter;\n this.fetch = options.fetch;\n this.timeout = options.timeout;\n this.tracer = options.tracer ?? axGlobals.tracer;\n this.meter = options.meter ?? axGlobals.meter;\n this.excludeContentFromTrace = options.excludeContentFromTrace;\n this.abortSignal = options.abortSignal;\n this.logger = options.logger ?? defaultLogger;\n }\n\n getOptions(): Readonly<AxAIServiceOptions> {\n return {\n debug: this.debug,\n rateLimiter: this.rt,\n fetch: this.fetch,\n tracer: this.tracer,\n meter: this.meter,\n timeout: this.timeout,\n excludeContentFromTrace: this.excludeContentFromTrace,\n abortSignal: this.abortSignal,\n logger: this.logger,\n };\n }\n\n getLogger(): AxLoggerFunction {\n return this.logger;\n }\n\n getModelList(): AxAIModelList | undefined {\n const models: AxAIModelList = [];\n for (const model of this.models ?? []) {\n if (model.isInternal) {\n continue;\n }\n\n if ('model' in model && model.model) {\n models.push({\n key: model.key,\n description: model.description,\n model: model.model as string,\n });\n }\n\n if ('embedModel' in model && model.embedModel) {\n models.push({\n key: model.key,\n description: model.description,\n embedModel: model.embedModel as string,\n });\n }\n }\n\n return models;\n }\n\n getName(): string {\n return this.name;\n }\n\n getFeatures(model?: TModel): AxAIFeatures {\n return typeof this.supportFor === 'function'\n ? this.supportFor(model ?? this.defaults.model)\n : this.supportFor;\n }\n\n getLastUsedChatModel(): TModel | undefined {\n return this.lastUsedChatModel;\n }\n\n getLastUsedEmbedModel(): TEmbedModel | undefined {\n return this.lastUsedEmbedModel;\n }\n\n getLastUsedModelConfig(): AxModelConfig | undefined {\n return this.lastUsedModelConfig;\n }\n\n // Method to calculate percentiles\n private calculatePercentile(\n samples: readonly number[],\n percentile: number\n ): number {\n if (samples.length === 0) return 0;\n const sorted = [...samples].sort((a, b) => a - b);\n const index = Math.ceil((percentile / 100) * sorted.length) - 1;\n return sorted[index] ?? 0;\n }\n\n // Method to update latency metrics\n private updateLatencyMetrics(type: 'chat' | 'embed', duration: number): void {\n const metrics = this.metrics.latency[type];\n metrics.samples.push(duration);\n\n // Keep only last 1000 samples to prevent memory issues\n if (metrics.samples.length > 1000) {\n metrics.samples.shift();\n }\n\n // Update statistics\n metrics.mean =\n metrics.samples.reduce((a, b) => a + b, 0) / metrics.samples.length;\n metrics.p95 = this.calculatePercentile(metrics.samples, 95);\n metrics.p99 = this.calculatePercentile(metrics.samples, 99);\n\n // Export to OpenTelemetry metrics\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n const model =\n type === 'chat'\n ? (this.lastUsedChatModel as string)\n : (this.lastUsedEmbedModel as string);\n\n // Record individual latency measurement\n recordLatencyMetric(metricsInstruments, type, duration, this.name, model);\n\n // Record latency statistics as gauges\n recordLatencyStatsMetrics(\n metricsInstruments,\n type,\n metrics.mean,\n metrics.p95,\n metrics.p99,\n this.name,\n model\n );\n }\n }\n\n // Method to update error metrics\n private updateErrorMetrics(type: 'chat' | 'embed', isError: boolean): void {\n const metrics = this.metrics.errors[type];\n metrics.total++;\n if (isError) {\n metrics.count++;\n }\n metrics.rate = metrics.count / metrics.total;\n\n // Export to OpenTelemetry metrics\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n const model =\n type === 'chat'\n ? (this.lastUsedChatModel as string)\n : (this.lastUsedEmbedModel as string);\n\n // Always record request count\n recordRequestMetric(metricsInstruments, type, this.name, model);\n\n // Record error count if there was an error\n if (isError) {\n recordErrorMetric(metricsInstruments, type, this.name, model);\n }\n\n // Record current error rate as a gauge\n recordErrorRateMetric(\n metricsInstruments,\n type,\n metrics.rate,\n this.name,\n model\n );\n }\n }\n\n // Method to record token usage metrics\n private recordTokenUsage(modelUsage?: AxModelUsage): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments && modelUsage?.tokens) {\n const { promptTokens, completionTokens, totalTokens, thoughtsTokens } =\n modelUsage.tokens;\n\n if (promptTokens) {\n recordTokenMetric(\n metricsInstruments,\n 'input',\n promptTokens,\n this.name,\n modelUsage.model\n );\n }\n\n if (completionTokens) {\n recordTokenMetric(\n metricsInstruments,\n 'output',\n completionTokens,\n this.name,\n modelUsage.model\n );\n }\n\n if (totalTokens) {\n recordTokenMetric(\n metricsInstruments,\n 'total',\n totalTokens,\n this.name,\n modelUsage.model\n );\n }\n\n if (thoughtsTokens) {\n recordTokenMetric(\n metricsInstruments,\n 'thoughts',\n thoughtsTokens,\n this.name,\n modelUsage.model\n );\n }\n }\n }\n\n // Helper method to calculate request size in bytes\n private calculateRequestSize(req: unknown): number {\n try {\n return new TextEncoder().encode(JSON.stringify(req)).length;\n } catch {\n return 0;\n }\n }\n\n // Helper method to calculate response size in bytes\n private calculateResponseSize(response: unknown): number {\n try {\n return new TextEncoder().encode(JSON.stringify(response)).length;\n } catch {\n return 0;\n }\n }\n\n // Helper method to detect multimodal content\n private detectMultimodalContent(req: Readonly<AxChatRequest<TModel>>): {\n hasImages: boolean;\n hasAudio: boolean;\n } {\n let hasImages = false;\n let hasAudio = false;\n\n if (req.chatPrompt && Array.isArray(req.chatPrompt)) {\n for (const message of req.chatPrompt) {\n if (message.role === 'user' && Array.isArray(message.content)) {\n for (const part of message.content) {\n if (part.type === 'image') {\n hasImages = true;\n } else if (part.type === 'audio') {\n hasAudio = true;\n }\n }\n }\n }\n }\n\n return { hasImages, hasAudio };\n }\n\n // Helper method to calculate prompt length\n private calculatePromptLength(req: Readonly<AxChatRequest<TModel>>): number {\n let totalLength = 0;\n\n if (req.chatPrompt && Array.isArray(req.chatPrompt)) {\n for (const message of req.chatPrompt) {\n if (message.role === 'system' || message.role === 'assistant') {\n if (message.content) {\n totalLength += message.content.length;\n }\n } else if (message.role === 'user') {\n if (typeof message.content === 'string') {\n totalLength += message.content.length;\n } else if (Array.isArray(message.content)) {\n for (const part of message.content) {\n if (part.type === 'text') {\n totalLength += part.text.length;\n }\n }\n }\n } else if (message.role === 'function') {\n if (message.result) {\n totalLength += message.result.length;\n }\n }\n }\n }\n\n return totalLength;\n }\n\n // Helper method to calculate context window usage\n private calculateContextWindowUsage(\n model: TModel,\n modelUsage?: AxModelUsage\n ): number {\n if (!modelUsage?.tokens?.promptTokens) return 0;\n\n // Get model info to find context window size\n const modelInfo = this.modelInfo.find(\n (info) => info.name === (model as string)\n );\n if (!modelInfo?.contextWindow) return 0;\n\n return modelUsage.tokens.promptTokens / modelInfo.contextWindow;\n }\n\n // Helper method to estimate cost\n private estimateCost(model: TModel, modelUsage?: AxModelUsage): number {\n if (!modelUsage?.tokens) return 0;\n\n // Get model info to find pricing\n const modelInfo = this.modelInfo.find(\n (info) => info.name === (model as string)\n );\n if (\n !modelInfo ||\n (!modelInfo.promptTokenCostPer1M && !modelInfo.completionTokenCostPer1M)\n )\n return 0;\n\n const { promptTokens = 0, completionTokens = 0 } = modelUsage.tokens;\n const promptCostPer1M = modelInfo.promptTokenCostPer1M || 0;\n const completionCostPer1M = modelInfo.completionTokenCostPer1M || 0;\n\n return (\n (promptTokens * promptCostPer1M) / 1000000 +\n (completionTokens * completionCostPer1M) / 1000000\n );\n }\n\n // Helper method to estimate cost by model name\n private estimateCostByName(\n modelName: string,\n modelUsage?: AxModelUsage\n ): number {\n if (!modelUsage?.tokens) return 0;\n\n // Get model info to find pricing\n const modelInfo = this.modelInfo.find((info) => info.name === modelName);\n if (\n !modelInfo ||\n (!modelInfo.promptTokenCostPer1M && !modelInfo.completionTokenCostPer1M)\n )\n return 0;\n\n const { promptTokens = 0, completionTokens = 0 } = modelUsage.tokens;\n const promptCostPer1M = modelInfo.promptTokenCostPer1M || 0;\n const completionCostPer1M = modelInfo.completionTokenCostPer1M || 0;\n\n return (\n (promptTokens * promptCostPer1M) / 1000000 +\n (completionTokens * completionCostPer1M) / 1000000\n );\n }\n\n // Helper method to record function call metrics\n private recordFunctionCallMetrics(\n functionCalls?: readonly unknown[],\n model?: TModel\n ): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (!metricsInstruments || !functionCalls) return;\n\n for (const call of functionCalls) {\n if (\n call &&\n typeof call === 'object' &&\n 'function' in call &&\n call.function &&\n typeof call.function === 'object' &&\n 'name' in call.function\n ) {\n recordFunctionCallMetric(\n metricsInstruments,\n (call.function as { name: string }).name,\n undefined, // latency would need to be tracked separately\n this.name,\n model as string\n );\n }\n }\n }\n\n // Helper method to record timeout metrics\n private recordTimeoutMetric(type: 'chat' | 'embed'): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n const model =\n type === 'chat'\n ? (this.lastUsedChatModel as string)\n : (this.lastUsedEmbedModel as string);\n recordTimeoutMetric(metricsInstruments, type, this.name, model);\n }\n }\n\n // Helper method to record abort metrics\n private recordAbortMetric(type: 'chat' | 'embed'): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n const model =\n type === 'chat'\n ? (this.lastUsedChatModel as string)\n : (this.lastUsedEmbedModel as string);\n recordAbortMetric(metricsInstruments, type, this.name, model);\n }\n }\n\n // Comprehensive method to record all chat-related metrics\n private recordChatMetrics(\n req: Readonly<AxChatRequest<TModel>>,\n options?: Readonly<\n AxAIPromptConfig & AxAIServiceActionOptions<TModel, TEmbedModel>\n >,\n result?: AxChatResponse | ReadableStream<AxChatResponse>\n ): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (!metricsInstruments) return;\n\n const model = this.lastUsedChatModel as string;\n const modelConfig = this.lastUsedModelConfig;\n\n // Record streaming request metric\n const isStreaming = modelConfig?.stream ?? false;\n recordStreamingRequestMetric(\n metricsInstruments,\n 'chat',\n isStreaming,\n this.name,\n model\n );\n\n // Record multimodal request metric\n const { hasImages, hasAudio } = this.detectMultimodalContent(req);\n recordMultimodalRequestMetric(\n metricsInstruments,\n hasImages,\n hasAudio,\n this.name,\n model\n );\n\n // Record prompt length metric\n const promptLength = this.calculatePromptLength(req);\n recordPromptLengthMetric(\n metricsInstruments,\n promptLength,\n this.name,\n model\n );\n\n // Record model configuration metrics\n recordModelConfigMetrics(\n metricsInstruments,\n modelConfig?.temperature,\n modelConfig?.maxTokens,\n this.name,\n model\n );\n\n // Record thinking budget usage if applicable\n if (\n options?.thinkingTokenBudget &&\n this.modelUsage?.tokens?.thoughtsTokens\n ) {\n recordThinkingBudgetUsageMetric(\n metricsInstruments,\n this.modelUsage.tokens.thoughtsTokens,\n this.name,\n model\n );\n }\n\n // Record request size\n const requestSize = this.calculateRequestSize(req);\n recordRequestSizeMetric(\n metricsInstruments,\n 'chat',\n requestSize,\n this.name,\n model\n );\n\n // Record response size and function calls for non-streaming responses\n if (result && !isStreaming) {\n const chatResponse = result as AxChatResponse;\n const responseSize = this.calculateResponseSize(chatResponse);\n recordResponseSizeMetric(\n metricsInstruments,\n 'chat',\n responseSize,\n this.name,\n model\n );\n\n // Record function call metrics\n if (chatResponse.results) {\n for (const chatResult of chatResponse.results) {\n if (chatResult.functionCalls) {\n this.recordFunctionCallMetrics(\n chatResult.functionCalls,\n this.lastUsedChatModel\n );\n }\n }\n }\n\n // Record context window usage\n const contextUsage = this.calculateContextWindowUsage(\n this.lastUsedChatModel!,\n chatResponse.modelUsage\n );\n if (contextUsage > 0) {\n recordContextWindowUsageMetric(\n metricsInstruments,\n contextUsage,\n this.name,\n model\n );\n }\n\n // Record estimated cost\n const estimatedCost = this.estimateCost(\n this.lastUsedChatModel!,\n chatResponse.modelUsage\n );\n if (estimatedCost > 0) {\n recordEstimatedCostMetric(\n metricsInstruments,\n 'chat',\n estimatedCost,\n this.name,\n model\n );\n }\n }\n }\n\n // Comprehensive method to record all embed-related metrics\n private recordEmbedMetrics(\n req: Readonly<AxEmbedRequest<TEmbedModel>>,\n result: Readonly<AxEmbedResponse>\n ): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (!metricsInstruments) return;\n\n const model = this.lastUsedEmbedModel as string;\n\n // Record request size\n const requestSize = this.calculateRequestSize(req);\n recordRequestSizeMetric(\n metricsInstruments,\n 'embed',\n requestSize,\n this.name,\n model\n );\n\n // Record response size\n const responseSize = this.calculateResponseSize(result);\n recordResponseSizeMetric(\n metricsInstruments,\n 'embed',\n responseSize,\n this.name,\n model\n );\n\n // Record estimated cost\n const estimatedCost = this.estimateCostByName(model, result.modelUsage);\n if (estimatedCost > 0) {\n recordEstimatedCostMetric(\n metricsInstruments,\n 'embed',\n estimatedCost,\n this.name,\n model\n );\n }\n }\n\n // Public method to get metrics\n public getMetrics(): AxAIServiceMetrics {\n return structuredClone(this.metrics);\n }\n\n async chat(\n req: Readonly<AxChatRequest<TModel>>,\n options?: Readonly<\n AxAIPromptConfig & AxAIServiceActionOptions<TModel, TEmbedModel>\n >\n ): Promise<AxChatResponse | ReadableStream<AxChatResponse>> {\n const startTime = performance.now();\n let isError = false;\n let result: AxChatResponse | ReadableStream<AxChatResponse>;\n\n try {\n result = await this._chat1(req, options);\n return result;\n } catch (error) {\n isError = true;\n // Check for specific error types\n if (error instanceof Error) {\n if (\n error.message.includes('timeout') ||\n error.name === 'TimeoutError'\n ) {\n this.recordTimeoutMetric('chat');\n } else if (\n error.message.includes('abort') ||\n error.name === 'AbortError'\n ) {\n this.recordAbortMetric('chat');\n }\n }\n throw error;\n } finally {\n const duration = performance.now() - startTime;\n this.updateLatencyMetrics('chat', duration);\n this.updateErrorMetrics('chat', isError);\n\n // Record additional metrics if successful\n if (!isError) {\n this.recordChatMetrics(req, options, result!);\n }\n }\n }\n\n private async _chat1(\n req: Readonly<AxChatRequest<TModel>>,\n options?: Readonly<\n AxAIPromptConfig & AxAIServiceActionOptions<TModel, TEmbedModel>\n >\n ): Promise<AxChatResponse | ReadableStream<AxChatResponse>> {\n const model = this.getModel(req.model) ?? req.model ?? this.defaults.model;\n\n // Validate chat prompt messages for empty content\n if (req.chatPrompt && Array.isArray(req.chatPrompt)) {\n validateAxMessageArray(req.chatPrompt);\n }\n\n const modelConfig = {\n ...this.aiImpl.getModelConfig(),\n ...req.modelConfig,\n };\n\n // Check for thinkingTokenBudget support\n if (\n options?.thinkingTokenBudget &&\n !this.getFeatures(model).hasThinkingBudget\n ) {\n throw new Error(\n `Model ${model as string} does not support thinkingTokenBudget.`\n );\n }\n\n // Check for showThoughts support\n if (options?.showThoughts && !this.getFeatures(model).hasShowThoughts) {\n throw new Error(\n `Model ${model as string} does not support showThoughts.`\n );\n }\n\n // Check for expensive model usage\n const modelInfo = this.modelInfo.find(\n (info) => info.name === (model as string)\n );\n if (modelInfo?.isExpensive && options?.useExpensiveModel !== 'yes') {\n throw new Error(\n `Model ${model as string} is marked as expensive and requires explicit confirmation. Set useExpensiveModel: \"yes\" to proceed.`\n );\n }\n\n // stream is true by default unless explicitly set to false\n modelConfig.stream =\n (options?.stream !== undefined ? options.stream : modelConfig.stream) ??\n true;\n\n const canStream = this.getFeatures(model).streaming;\n if (!canStream) {\n modelConfig.stream = false;\n }\n\n if (this.tracer) {\n return await this.tracer.startActiveSpan(\n 'AI Chat Request',\n {\n kind: SpanKind.SERVER,\n attributes: {\n [axSpanAttributes.LLM_SYSTEM]: this.name,\n [axSpanAttributes.LLM_OPERATION_NAME]: 'chat',\n [axSpanAttributes.LLM_REQUEST_MODEL]: model as string,\n [axSpanAttributes.LLM_REQUEST_MAX_TOKENS]:\n modelConfig.maxTokens ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_TEMPERATURE]: modelConfig.temperature,\n [axSpanAttributes.LLM_REQUEST_TOP_P]: modelConfig.topP ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_TOP_K]: modelConfig.topK ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY]:\n modelConfig.frequencyPenalty ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_PRESENCE_PENALTY]:\n modelConfig.presencePenalty ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_STOP_SEQUENCES]:\n modelConfig.stopSequences?.join(', ') ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_LLM_IS_STREAMING]:\n modelConfig.stream ?? 'Not set',\n },\n },\n options?.traceContext ?? context.active(),\n async (span) => {\n return await this._chat2(model, modelConfig, req, options, span);\n }\n );\n }\n return await this._chat2(model, modelConfig, req, options);\n }\n\n private cleanupFunctionSchema(\n fn: Readonly<NonNullable<AxChatRequest['functions']>[number]>\n ): NonNullable<AxChatRequest['functions']>[number] {\n const cleanFn = { ...fn };\n if (cleanFn.parameters) {\n const cleanParams = { ...cleanFn.parameters };\n\n // Remove empty required array\n if (\n Array.isArray(cleanParams.required) &&\n cleanParams.required.length === 0\n ) {\n delete cleanParams.required;\n }\n\n // Remove empty properties object\n if (\n cleanParams.properties &&\n Object.keys(cleanParams.properties).length === 0\n ) {\n delete cleanParams.properties;\n }\n\n // After cleaning, remove the entire parameters object if it's effectively empty\n // i.e., either no keys left or just { type: 'object' } remaining.\n if (\n Object.keys(cleanParams).length === 0 ||\n (Object.keys(cleanParams).length === 1 && cleanParams.type === 'object')\n ) {\n delete cleanFn.parameters;\n } else {\n cleanFn.parameters = cleanParams;\n }\n }\n return cleanFn;\n }\n\n private async _chat2(\n model: TModel,\n modelConfig: Readonly<AxModelConfig>,\n chatReq: Readonly<Omit<AxChatRequest<TModel>, 'modelConfig'>>,\n options?: Readonly<AxAIServiceActionOptions<TModel, TEmbedModel>>,\n span?: Span\n ): Promise<AxChatResponse | ReadableStream<AxChatResponse>> {\n if (!this.aiImpl.createChatReq) {\n throw new Error('generateChatReq not implemented');\n }\n\n const debug = options?.debug ?? this.debug;\n\n let functions: NonNullable<AxChatRequest['functions']> | undefined;\n\n if (chatReq.functions && chatReq.functions.length > 0) {\n functions = chatReq.functions.map((fn) => this.cleanupFunctionSchema(fn));\n }\n\n const req = {\n ...chatReq,\n model,\n functions,\n modelConfig,\n };\n\n // Store the last used model and config\n this.lastUsedChatModel = model;\n this.lastUsedModelConfig = modelConfig;\n\n const fn = async () => {\n const [apiConfig, reqValue] = await this.aiImpl.createChatReq(\n req,\n options as AxAIPromptConfig\n );\n\n if (span?.isRecording()) {\n setChatRequestEvents(chatReq, span, this.excludeContentFromTrace);\n }\n\n const res = await apiCall(\n {\n name: apiConfig.name,\n url: this.apiURL,\n headers: await this.buildHeaders(apiConfig.headers),\n stream: modelConfig.stream,\n timeout: this.timeout,\n debug,\n fetch: this.fetch,\n span,\n abortSignal: options?.abortSignal ?? this.abortSignal,\n },\n reqValue\n );\n return res;\n };\n\n if (debug) {\n logChatRequest(\n req.chatPrompt,\n options?.debugHideSystemPrompt,\n options?.logger ?? this.logger\n );\n }\n\n const rt = options?.rateLimiter ?? this.rt;\n const rv = rt ? await rt(fn, { modelUsage: this.modelUsage }) : await fn();\n\n if (modelConfig.stream) {\n if (!this.aiImpl.createChatStreamResp) {\n throw new Error('generateChatResp not implemented');\n }\n\n const respFn = this.aiImpl.createChatStreamResp.bind(this);\n const wrappedRespFn =\n (state: object) => (resp: Readonly<TChatResponseDelta>) => {\n const res = respFn(resp, state);\n res.sessionId = options?.sessionId;\n\n // Only call getTokenUsage if modelUsage is not already provided by the service\n if (!res.modelUsage) {\n const tokenUsage = this.aiImpl.getTokenUsage();\n if (tokenUsage) {\n res.modelUsage = {\n ai: this.name,\n model: model as string,\n tokens: tokenUsage,\n };\n }\n }\n this.modelUsage = res.modelUsage;\n this.recordTokenUsage(res.modelUsage);\n\n if (span?.isRecording()) {\n setChatResponseEvents(res, span, this.excludeContentFromTrace);\n }\n\n if (debug) {\n logResponse(res, options?.logger ?? this.logger);\n }\n return res;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n const doneCb = async (_values: readonly AxChatResponse[]) => {\n if (span?.isRecording()) {\n span.end();\n }\n };\n\n const st = (rv as ReadableStream<TChatResponseDelta>).pipeThrough(\n new RespTransformStream<TChatResponseDelta, AxChatResponse>(\n wrappedRespFn({}),\n doneCb\n )\n );\n return st;\n }\n\n if (!this.aiImpl.createChatResp) {\n throw new Error('generateChatResp not implemented');\n }\n\n const res = this.aiImpl.createChatResp(rv as TChatResponse);\n res.sessionId = options?.sessionId;\n\n // Only call getTokenUsage if modelUsage is not already provided by the service\n if (!res.modelUsage) {\n const tokenUsage = this.aiImpl.getTokenUsage();\n if (tokenUsage) {\n res.modelUsage = {\n ai: this.name,\n model: model as string,\n tokens: tokenUsage,\n };\n }\n }\n\n if (res.modelUsage) {\n this.modelUsage = res.modelUsage;\n this.recordTokenUsage(res.modelUsage);\n }\n\n if (span?.isRecording()) {\n setChatResponseEvents(res, span, this.excludeContentFromTrace);\n span.end();\n }\n\n if (debug) {\n logResponse(res, options?.logger ?? this.logger);\n }\n\n return res;\n }\n\n async embed(\n req: Readonly<AxEmbedRequest<TEmbedModel>>,\n options?: Readonly<AxAIServiceActionOptions<TModel, TEmbedModel>>\n ): Promise<AxEmbedResponse> {\n const startTime = performance.now();\n let isError = false;\n let result: AxEmbedResponse;\n\n try {\n result = await this._embed1(req, options);\n return result;\n } catch (error) {\n isError = true;\n // Check for specific error types\n if (error instanceof Error) {\n if (\n error.message.includes('timeout') ||\n error.name === 'TimeoutError'\n ) {\n this.recordTimeoutMetric('embed');\n } else if (\n error.message.includes('abort') ||\n error.name === 'AbortError'\n ) {\n this.recordAbortMetric('embed');\n }\n }\n throw error;\n } finally {\n const duration = performance.now() - startTime;\n this.updateLatencyMetrics('embed', duration);\n this.updateErrorMetrics('embed', isError);\n\n // Record additional metrics if successful\n if (!isError) {\n this.recordEmbedMetrics(req, result!);\n }\n }\n }\n\n private async _embed1(\n req: Readonly<AxEmbedRequest<TEmbedModel>>,\n options?: Readonly<AxAIServiceActionOptions<TModel, TEmbedModel>>\n ): Promise<AxEmbedResponse> {\n const embedModel =\n this.getEmbedModel(req.embedModel) ??\n req.embedModel ??\n this.defaults.embedModel;\n\n if (!embedModel) {\n throw new Error('No embed model defined');\n }\n\n if (this.tracer) {\n await this.tracer?.startActiveSpan(\n 'AI Embed Request',\n {\n kind: SpanKind.SERVER,\n attributes: {\n [axSpanAttributes.LLM_SYSTEM]: this.name,\n [axSpanAttributes.LLM_OPERATION_NAME]: 'embeddings',\n [axSpanAttributes.LLM_REQUEST_MODEL]: embedModel as string,\n },\n },\n options?.traceContext ?? context.active(),\n async (span) => {\n try {\n return await this._embed2(embedModel, req, options, span);\n } finally {\n span.end();\n }\n }\n );\n }\n return this._embed2(embedModel, req, options);\n }\n\n private async _embed2(\n embedModel: TEmbedModel,\n embedReq: Readonly<AxEmbedRequest<TEmbedModel>>,\n options?: Readonly<AxAIServiceActionOptions<TModel, TEmbedModel>>,\n span?: Span\n ): Promise<AxEmbedResponse> {\n if (!this.aiImpl.createEmbedReq) {\n throw new Error('generateEmbedReq not implemented');\n }\n if (!this.aiImpl.createEmbedResp) {\n throw new Error('generateEmbedResp not implemented');\n }\n\n const debug = options?.debug ?? this.debug;\n\n const req = {\n ...embedReq,\n embedModel,\n };\n\n // Store the last used embed model\n this.lastUsedEmbedModel = embedModel;\n\n const fn = async () => {\n const [apiConfig, reqValue] = await this.aiImpl.createEmbedReq!(req);\n\n const res = await apiCall(\n {\n name: apiConfig.name,\n url: this.apiURL,\n headers: await this.buildHeaders(apiConfig.headers),\n debug,\n fetch: this.fetch,\n timeout: this.timeout,\n span,\n abortSignal: options?.abortSignal ?? this.abortSignal,\n },\n reqValue\n );\n return res;\n };\n\n const resValue = this.rt\n ? await this.rt(fn, { modelUsage: this.embedModelUsage })\n : await fn();\n const res = this.aiImpl.createEmbedResp!(resValue as TEmbedResponse);\n\n res.sessionId = options?.sessionId;\n\n // Only call getTokenUsage if modelUsage is not already provided by the service\n if (!res.modelUsage) {\n const tokenUsage = this.aiImpl.getTokenUsage();\n if (tokenUsage) {\n res.modelUsage = {\n ai: this.name,\n model: embedModel as string,\n tokens: tokenUsage,\n };\n }\n }\n this.embedModelUsage = res.modelUsage;\n this.recordTokenUsage(res.modelUsage);\n\n if (span?.isRecording() && res.modelUsage?.tokens) {\n span.addEvent(axSpanEvents.GEN_AI_USAGE, {\n [axSpanAttributes.LLM_USAGE_INPUT_TOKENS]:\n res.modelUsage.tokens.promptTokens,\n [axSpanAttributes.LLM_USAGE_OUTPUT_TOKENS]:\n res.modelUsage.tokens.completionTokens ?? 0,\n [axSpanAttributes.LLM_USAGE_TOTAL_TOKENS]:\n res.modelUsage.tokens.totalTokens,\n });\n }\n\n span?.end();\n return res;\n }\n\n private async buildHeaders(\n headers: Record<string, string> = {}\n ): Promise<Record<string, string>> {\n return { ...headers, ...(await this.headers()) };\n }\n\n private getModelByKey(\n modelName?: TModel | TEmbedModel\n ): AxAIInputModelList<TModel, TEmbedModel>[number] | undefined {\n if (!modelName) {\n return undefined;\n }\n const item = this.models?.find((v) => v.key === modelName);\n return item;\n }\n\n private getModel(modelName?: TModel): TModel | undefined {\n const item = this.getModelByKey(modelName);\n return item && 'model' in item ? item.model : undefined;\n }\n\n private getEmbedModel(modelName?: TEmbedModel): TEmbedModel | undefined {\n const item = this.getModelByKey(modelName);\n return item && 'embedModel' in item ? item.embedModel : undefined;\n }\n}\n\nexport function setChatRequestEvents(\n req: Readonly<AxChatRequest<unknown>>,\n span: Span,\n excludeContentFromTrace?: boolean\n): void {\n const userMessages: string[] = [];\n\n if (\n req.chatPrompt &&\n Array.isArray(req.chatPrompt) &&\n req.chatPrompt.length > 0\n ) {\n for (const prompt of req.chatPrompt) {\n switch (prompt.role) {\n case 'system':\n if (prompt.content) {\n const eventData: { content?: string } = {};\n if (!excludeContentFromTrace) {\n eventData.content = prompt.content;\n }\n span.addEvent(axSpanEvents.GEN_AI_SYSTEM_MESSAGE, eventData);\n }\n break;\n case 'user':\n if (typeof prompt.content === 'string') {\n userMessages.push(prompt.content);\n } else if (Array.isArray(prompt.content)) {\n for (const part of prompt.content) {\n if (part.type === 'text') {\n userMessages.push(part.text);\n }\n }\n }\n break;\n case 'assistant': {\n const functionCalls = prompt.functionCalls?.map((call) => {\n return {\n id: call.id,\n type: call.type,\n function: call.function.name,\n arguments: call.function.params,\n };\n });\n\n if (functionCalls && functionCalls.length > 0) {\n const eventData: { content?: string; function_calls: string } = {\n function_calls: JSON.stringify(functionCalls, null, 2),\n };\n if (!excludeContentFromTrace && prompt.content) {\n eventData.content = prompt.content;\n }\n span.addEvent(axSpanEvents.GEN_AI_ASSISTANT_MESSAGE, eventData);\n } else if (prompt.content) {\n const eventData: { content?: string } = {};\n if (!excludeContentFromTrace) {\n eventData.content = prompt.content;\n }\n span.addEvent(axSpanEvents.GEN_AI_ASSISTANT_MESSAGE, eventData);\n }\n break;\n }\n\n case 'function': {\n const eventData: { content?: string; id: string } = {\n id: prompt.functionId,\n };\n if (!excludeContentFromTrace) {\n eventData.content = prompt.result;\n }\n span.addEvent(axSpanEvents.GEN_AI_TOOL_MESSAGE, eventData);\n break;\n }\n }\n }\n }\n\n // Always add user message event, even if empty\n const userEventData: { content?: string } = {};\n if (!excludeContentFromTrace) {\n userEventData.content = userMessages.join('\\n');\n }\n span.addEvent(axSpanEvents.GEN_AI_USER_MESSAGE, userEventData);\n}\n\nexport function setChatResponseEvents(\n res: Readonly<AxChatResponse>,\n span: Span,\n excludeContentFromTrace?: boolean\n) {\n if (res.modelUsage?.tokens) {\n const thoughTokens = res.modelUsage.tokens.thoughtsTokens\n ? {\n [axSpanAttributes.LLM_USAGE_THOUGHTS_TOKENS]:\n res.modelUsage.tokens.thoughtsTokens,\n }\n : {};\n span.addEvent(axSpanEvents.GEN_AI_USAGE, {\n [axSpanAttributes.LLM_USAGE_INPUT_TOKENS]:\n res.modelUsage.tokens.promptTokens,\n [axSpanAttributes.LLM_USAGE_OUTPUT_TOKENS]:\n res.modelUsage.tokens.completionTokens ?? 0,\n [axSpanAttributes.LLM_USAGE_TOTAL_TOKENS]:\n res.modelUsage.tokens.totalTokens,\n ...thoughTokens,\n });\n }\n\n if (!res.results) {\n return;\n }\n\n for (let index = 0; index < res.results.length; index++) {\n const result = res.results[index];\n if (!result) {\n continue;\n }\n\n // Skip empty results that have no meaningful content to avoid empty GEN_AI_CHOICE events\n if (\n !result.content &&\n !result.thought &&\n !result.functionCalls?.length &&\n !result.finishReason\n ) {\n continue;\n }\n\n const toolCalls = result.functionCalls?.map((call) => {\n return {\n id: call.id,\n type: call.type,\n function: call.function.name,\n arguments: call.function.params,\n };\n });\n\n const message: { content?: string; tool_calls?: unknown[] } = {};\n\n if (toolCalls && toolCalls.length > 0) {\n if (!excludeContentFromTrace) {\n message.content = result.content;\n }\n message.tool_calls = toolCalls;\n } else {\n if (!excludeContentFromTrace) {\n message.content = result.content ?? '';\n }\n }\n\n span.addEvent(axSpanEvents.GEN_AI_CHOICE, {\n finish_reason: result.finishReason,\n index,\n message: JSON.stringify(message, null, 2),\n });\n }\n}\n\nexport function validateAxMessageArray<T>(values: T[]): void {\n // Validate AxMessage array items\n for (let i = 0; i < values.length; i++) {\n const message = values[i];\n if (!message || typeof message !== 'object') {\n throw new Error(\n `AxMessage array validation failed: Item at index ${i} is not a valid message object`\n );\n }\n if (\n 'content' in message &&\n typeof message.content === 'string' &&\n message.content.trim() === ''\n ) {\n throw new Error(\n `AxMessage array validation failed: Item at index ${i} has empty content`\n );\n }\n }\n}\n\nfunction validateModels<TModel, TEmbedModel>(\n models: Readonly<AxAIInputModelList<TModel, TEmbedModel>>\n): void {\n // Validate duplicate keys in models.\n const keys = new Set<string>();\n for (const model of models) {\n if (keys.has(model.key)) {\n throw new Error(\n `Duplicate model key detected: \"${model.key}\". Each model key must be unique.`\n );\n }\n keys.add(model.key);\n }\n}\n","import type { AxAPI } from '../../util/apicall.js';\nimport {\n AxBaseAI,\n axBaseAIDefaultConfig,\n axBaseAIDefaultCreativeConfig,\n} from '../base.js';\nimport type {\n AxAIInputModelList,\n AxAIPromptConfig,\n AxAIServiceImpl,\n AxAIServiceOptions,\n AxChatResponse,\n AxInternalChatRequest,\n AxModelConfig,\n AxTokenUsage,\n} from '../types.js';\n\nimport { axModelInfoHuggingFace } from './info.js';\nimport {\n type AxAIHuggingFaceConfig,\n AxAIHuggingFaceModel,\n type AxAIHuggingFaceRequest,\n type AxAIHuggingFaceResponse,\n} from './types.js';\n\nexport const axAIHuggingFaceDefaultConfig = (): AxAIHuggingFaceConfig =>\n structuredClone({\n model: AxAIHuggingFaceModel.MetaLlama270BChatHF,\n ...axBaseAIDefaultConfig(),\n });\n\nexport const axAIHuggingFaceCreativeConfig = (): AxAIHuggingFaceConfig =>\n structuredClone({\n model: AxAIHuggingFaceModel.MetaLlama270BChatHF,\n ...axBaseAIDefaultCreativeConfig(),\n });\n\nexport interface AxAIHuggingFaceArgs {\n name: 'huggingface';\n apiKey: string;\n config?: Readonly<Partial<AxAIHuggingFaceConfig>>;\n options?: Readonly<AxAIServiceOptions>;\n models?: AxAIInputModelList<AxAIHuggingFaceModel, undefined>;\n}\n\nclass AxAIHuggingFaceImpl\n implements\n AxAIServiceImpl<\n AxAIHuggingFaceModel,\n unknown,\n AxAIHuggingFaceRequest,\n unknown,\n AxAIHuggingFaceResponse,\n unknown,\n unknown\n >\n{\n private tokensUsed: AxTokenUsage | undefined;\n\n constructor(private config: AxAIHuggingFaceConfig) {}\n\n getTokenUsage(): AxTokenUsage | undefined {\n return this.tokensUsed;\n }\n\n getModelConfig(): AxModelConfig {\n const { config } = this;\n return {\n maxTokens: config.maxTokens,\n temperature: config.temperature,\n topP: config.topP,\n topK: config.topK,\n n: config.n,\n presencePenalty: config.presencePenalty,\n } as AxModelConfig;\n }\n\n createChatReq = (\n req: Readonly<AxInternalChatRequest<AxAIHuggingFaceModel>>,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _config: Readonly<AxAIPromptConfig>\n ): [AxAPI, AxAIHuggingFaceRequest] => {\n const model = req.model;\n\n const functionsList = req.functions\n ? `Functions:\\n${JSON.stringify(req.functions, null, 2)}\\n`\n : '';\n\n const prompt = req.chatPrompt\n ?.map((msg) => {\n switch (msg.role) {\n case 'user':\n return `User: ${msg.content}`;\n case 'system':\n return `System: ${msg.content}`;\n case 'function':\n return `Function Result: ${msg.result}`;\n case 'assistant': {\n const fc = msg.functionCalls\n ?.map((fc) => {\n const args =\n typeof fc.function.params === 'string'\n ? fc.function.params\n : JSON.stringify(fc.function.params);\n\n return `${fc.function.name}(${args})`;\n })\n .join('\\n');\n if (fc) {\n return `Assistant: ${msg.content}\\n Functions:\\n${fc}`;\n }\n return `Assistant: ${msg.content}`;\n }\n default:\n throw new Error('Unknown role');\n }\n\n //return `${msg.role}: ${msg.content}`;\n })\n .join('\\n');\n\n const inputs = `${functionsList} ${prompt}`.trim();\n\n const apiConfig = {\n name: '/models',\n };\n\n const reqValue: AxAIHuggingFaceRequest = {\n model,\n inputs,\n parameters: {\n max_new_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,\n repetition_penalty:\n req.modelConfig?.presencePenalty ?? this.config.presencePenalty,\n temperature: req.modelConfig?.temperature ?? this.config.temperature,\n top_p: req.modelConfig?.topP ?? this.config.topP,\n top_k: req.modelConfig?.topK ?? this.config.topK,\n return_full_text: this.config.returnFullText,\n num_return_sequences: this.config.n,\n do_sample: this.config.doSample,\n max_time: this.config.maxTime,\n },\n options: {\n use_cache: this.config.useCache,\n wait_for_model: this.config.waitForModel,\n },\n };\n\n return [apiConfig, reqValue];\n };\n\n createChatResp = (\n resp: Readonly<AxAIHuggingFaceResponse>\n ): AxChatResponse => {\n return {\n results: [\n {\n index: 0,\n content: resp.generated_text,\n },\n ],\n };\n };\n}\n\nexport class AxAIHuggingFace extends AxBaseAI<\n AxAIHuggingFaceModel,\n unknown,\n AxAIHuggingFaceRequest,\n unknown,\n AxAIHuggingFaceResponse,\n unknown,\n unknown\n> {\n constructor({\n apiKey,\n config,\n options,\n models,\n }: Readonly<Omit<AxAIHuggingFaceArgs, 'name'>>) {\n if (!apiKey || apiKey === '') {\n throw new Error('HuggingFace API key not set');\n }\n const Config = {\n ...axAIHuggingFaceDefaultConfig(),\n ...config,\n };\n\n const aiImpl = new AxAIHuggingFaceImpl(Config);\n\n super(aiImpl, {\n name: 'HuggingFace',\n apiURL: 'https://api-inference.huggingface.co',\n headers: async () => ({ Authorization: `Bearer ${apiKey}` }),\n modelInfo: axModelInfoHuggingFace,\n defaults: { model: Config.model },\n options,\n supportFor: { functions: false, streaming: false },\n models,\n });\n }\n}\n","// cspell:ignore mistral, mixtral, codestral, nemo\n\nexport enum AxAIMistralModel {\n Mistral7B = 'open-mistral-7b',\n Mistral8x7B = 'open-mixtral-8x7b',\n MistralSmall = 'mistral-small-latest',\n MistralNemo = 'mistral-nemo-latest',\n MistralLarge = 'mistral-large-latest',\n Codestral = 'codestral-latest',\n OpenCodestralMamba = 'open-codestral-mamba',\n OpenMistralNemo = 'open-mistral-nemo-latest',\n}\n\nexport enum AxAIMistralEmbedModels {\n MistralEmbed = 'mistral-embed',\n}\n","import type {\n AxAIOpenAIEmbedRequest,\n AxAIOpenAIEmbedResponse,\n AxAPI,\n} from '@ax-llm/ax/index.js';\nimport { AxAIRefusalError } from '../../util/apicall.js';\nimport type {\n AxAIPromptConfig,\n AxAIServiceImpl,\n AxChatRequest,\n AxChatResponse,\n AxChatResponseResult,\n AxInternalChatRequest,\n AxInternalEmbedRequest,\n AxModelConfig,\n AxTokenUsage,\n} from '../types.js';\nimport type {\n AxAIOpenAIResponsesCodeInterpreterToolCall,\n AxAIOpenAIResponsesComputerToolCall,\n AxAIOpenAIResponsesConfig,\n AxAIOpenAIResponsesDefineFunctionTool,\n AxAIOpenAIResponsesFileSearchToolCall,\n AxAIOpenAIResponsesImageGenerationToolCall,\n AxAIOpenAIResponsesInputContentPart,\n AxAIOpenAIResponsesInputItem,\n AxAIOpenAIResponsesInputMessageItem,\n AxAIOpenAIResponsesLocalShellToolCall,\n AxAIOpenAIResponsesMCPToolCall,\n AxAIOpenAIResponsesOutputRefusalContentPart,\n AxAIOpenAIResponsesOutputTextContentPart,\n AxAIOpenAIResponsesRequest,\n AxAIOpenAIResponsesResponse,\n AxAIOpenAIResponsesResponseDelta,\n AxAIOpenAIResponsesStreamEvent,\n AxAIOpenAIResponsesToolDefinition,\n AxAIOpenAIResponsesWebSearchToolCall,\n Mutable,\n RequestFunctionDefinition,\n ResponsesReqUpdater,\n UserMessageContentItem,\n} from './responses_types.js';\nimport { AxAIOpenAIResponsesModel } from './responses_types.js';\n\n/**\n * Checks if the given OpenAI Responses model is a thinking/reasoning model.\n * Thinking models (o1, o3, o4 series) have different parameter restrictions.\n */\nexport const isOpenAIResponsesThinkingModel = (model: string): boolean => {\n const thinkingModels = [\n AxAIOpenAIResponsesModel.O1,\n AxAIOpenAIResponsesModel.O1Mini,\n AxAIOpenAIResponsesModel.O1Pro,\n AxAIOpenAIResponsesModel.O3,\n AxAIOpenAIResponsesModel.O3Mini,\n AxAIOpenAIResponsesModel.O3Pro,\n AxAIOpenAIResponsesModel.O4Mini,\n ];\n return thinkingModels.includes(model as AxAIOpenAIResponsesModel);\n};\n\nexport class AxAIOpenAIResponsesImpl<\n TModel,\n TEmbedModel, // Kept for interface compatibility, but not used by this impl.\n TResponsesReq extends AxAIOpenAIResponsesRequest<TModel>,\n> implements\n AxAIServiceImpl<\n TModel,\n TEmbedModel,\n Readonly<AxAIOpenAIResponsesRequest<TModel>>, // ChatReq (now ResponsesReq)\n Readonly<AxAIOpenAIEmbedRequest<TEmbedModel>>, // EmbedReq\n Readonly<AxAIOpenAIResponsesResponse>, // ChatResp (now ResponsesResp)\n Readonly<AxAIOpenAIResponsesResponseDelta>, // ChatRespDelta (now ResponsesRespDelta)\n Readonly<AxAIOpenAIEmbedResponse> // EmbedResp\n >\n{\n private tokensUsed: AxTokenUsage | undefined;\n\n constructor(\n private readonly config: Readonly<\n AxAIOpenAIResponsesConfig<TModel, TEmbedModel>\n >,\n private readonly streamingUsage: boolean, // If /v1/responses supports include_usage for streams\n private readonly responsesReqUpdater?: ResponsesReqUpdater<\n TModel,\n TResponsesReq\n >\n ) {}\n\n getTokenUsage(): Readonly<AxTokenUsage> | undefined {\n return this.tokensUsed;\n }\n\n getModelConfig(): Readonly<AxModelConfig> {\n const { config } = this;\n return {\n maxTokens: config.maxTokens, // maps to max_output_tokens\n temperature: config.temperature,\n // presencePenalty, frequencyPenalty are not direct params in /v1/responses\n stopSequences: config.stopSequences, // /v1/responses uses 'truncation' or relies on item structure\n topP: config.topP,\n // n: config.n, // Not a direct parameter in /v1/responses\n stream: config.stream,\n };\n }\n\n private mapInternalContentToResponsesInput(\n content: ReadonlyArray<UserMessageContentItem> // Expects an array of content items, string case handled by caller\n ): ReadonlyArray<AxAIOpenAIResponsesInputContentPart> {\n const mappedParts: Mutable<AxAIOpenAIResponsesInputContentPart>[] =\n content.map((part: UserMessageContentItem) => {\n // AxUserMessageContentItem ensures part is one of {type: text}, {type: image}, {type: audio}\n if (part.type === 'text') {\n return { type: 'text', text: part.text };\n }\n if (part.type === 'image') {\n const url = `data:${part.mimeType};base64,${part.image}`;\n return {\n type: 'image_url',\n image_url: { url, details: part.details ?? 'auto' },\n };\n }\n if (part.type === 'audio') {\n return {\n type: 'input_audio',\n input_audio: { data: part.data, format: part.format ?? 'wav' },\n };\n }\n // This should be exhaustive given AxUserMessageContentItem's definition\n const ExhaustiveCheck: never = part;\n throw new Error(\n `Unsupported content part: ${JSON.stringify(ExhaustiveCheck)}`\n );\n });\n return mappedParts as ReadonlyArray<AxAIOpenAIResponsesInputContentPart>;\n }\n\n private createResponsesReqInternalInput(\n chatPrompt: ReadonlyArray<AxChatRequest<TModel>['chatPrompt'][number]>,\n excludeSystemMessages = false // New parameter\n ): ReadonlyArray<AxAIOpenAIResponsesInputItem> {\n // Map from AxChatPromptItemType roles to AxAIOpenAI /v1/responses API roles:\n // - 'system' -> 'system' (may be skipped if excludeSystemMessages is true)\n // - 'user' -> 'user'\n // - 'assistant' -> 'assistant'\n // - 'function' -> Special handling for function call outputs (different structure)\n //\n // Note: AxAIOpenAI's /v1/responses API also supports a 'developer' role that isn't\n // currently mapped from our AxChatPromptItemType structure.\n\n const items: Mutable<AxAIOpenAIResponsesInputItem>[] = [];\n for (const msg of chatPrompt) {\n if (excludeSystemMessages && msg.role === 'system') {\n continue; // Skip system messages if they are handled by top-level 'instructions'\n }\n\n let mappedContent:\n | string\n | ReadonlyArray<AxAIOpenAIResponsesInputContentPart>;\n // Type guard for content based on role\n if (\n msg.role === 'system' ||\n msg.role === 'user' ||\n (msg.role === 'assistant' && msg.content)\n ) {\n if (typeof msg.content === 'string') {\n mappedContent = msg.content;\n } else if (Array.isArray(msg.content)) {\n // Only for user role typically\n mappedContent = this.mapInternalContentToResponsesInput(\n msg.content as ReadonlyArray<UserMessageContentItem>\n );\n } else {\n // Handle cases where content might be undefined for assistant, or unexpected type\n if (msg.role === 'assistant' && !msg.content && msg.functionCalls) {\n // This is fine, assistant message can be just functionCalls\n } else {\n throw new Error(`Invalid content type for role ${msg.role}`);\n }\n mappedContent = ''; // Default or skip\n }\n } else if (msg.role === 'function') {\n // Function role does not have 'content' in the same way, it has 'result'\n mappedContent = ''; // Placeholder, not directly used for content field in function_call_output\n } else {\n mappedContent = ''; // Default for roles that might not have content or are handled differently\n }\n\n switch (msg.role) {\n case 'system': // Will be skipped if excludeSystemMessages is true\n items.push({\n type: 'message',\n role: 'system',\n content: mappedContent as string,\n });\n break;\n case 'user':\n items.push({\n type: 'message',\n role: 'user',\n content: mappedContent,\n name: msg.name,\n });\n break;\n case 'assistant':\n if (msg.content || msg.functionCalls) {\n // Assistant can have content, functionCalls, or both\n const assistantMessage: Mutable<AxAIOpenAIResponsesInputMessageItem> =\n {\n type: 'message',\n role: 'assistant',\n content: '',\n }; // Start with empty content\n if (msg.content) {\n assistantMessage.content = mappedContent;\n }\n if (msg.name) {\n assistantMessage.name = msg.name;\n }\n // If only function calls, content might remain empty or not be applicable in the same way for AxAIOpenAI item\n // AxAIOpenAI /v1/responses expects assistant messages with tool calls to be structured carefully.\n // For now, pushing the textual content if present. Tool calls are separate items.\n if (msg.content)\n items.push(\n assistantMessage as AxAIOpenAIResponsesInputMessageItem\n );\n\n if (msg.functionCalls) {\n for (const call of msg.functionCalls) {\n items.push({\n type: 'function_call',\n call_id: call.id,\n name: call.function.name,\n arguments:\n typeof call.function.params === 'object'\n ? JSON.stringify(call.function.params)\n : call.function.params || '',\n });\n }\n }\n }\n break;\n case 'function': // This is a tool result\n items.push({\n type: 'function_call_output',\n call_id: msg.functionId!,\n output: msg.result!,\n });\n break;\n default: {\n // Fix for any type\n const invalidRole = (msg as { role: string }).role;\n throw new Error(`Invalid role in chat prompt: ${invalidRole}`);\n }\n }\n }\n return items as ReadonlyArray<AxAIOpenAIResponsesInputItem>;\n }\n\n createChatReq(\n req: Readonly<AxInternalChatRequest<TModel>>,\n config: Readonly<AxAIPromptConfig>\n ): [Readonly<AxAPI>, Readonly<AxAIOpenAIResponsesRequest<TModel>>] {\n const model = req.model;\n const apiConfig: Readonly<AxAPI> = { name: '/responses' };\n\n let instructionsFromPrompt: string | null = null;\n let systemMessageFoundAndUsed = false;\n if (req.chatPrompt) {\n for (const item of req.chatPrompt) {\n if (item.role === 'system' && typeof item.content === 'string') {\n instructionsFromPrompt = item.content;\n systemMessageFoundAndUsed = true;\n break;\n }\n }\n }\n\n const finalInstructions =\n instructionsFromPrompt ?? this.config.systemPrompt ?? null;\n\n const tools: ReadonlyArray<AxAIOpenAIResponsesToolDefinition> | undefined =\n req.functions?.map(\n (\n v: Readonly<RequestFunctionDefinition>\n ): AxAIOpenAIResponsesDefineFunctionTool => ({\n type: 'function' as const,\n name: v.name,\n description: v.description,\n parameters: v.parameters ?? {},\n })\n );\n\n // Set include field based on showThoughts option, but override if thinkingTokenBudget is 'none'\n const includeFields: // | 'file_search_call.results'\n 'message.input_image.image_url'[] =\n // | 'computer_call_output.output.image_url'\n // | 'reasoning.encrypted_content'\n // | 'code_interpreter_call.outputs'\n [];\n\n const isThinkingModel = isOpenAIResponsesThinkingModel(model as string);\n\n let reasoningSummary = this.config.reasoningSummary;\n\n if (!config?.showThoughts) {\n reasoningSummary = undefined;\n } else if (!reasoningSummary) {\n reasoningSummary = 'auto';\n }\n\n let reasoningEffort = this.config.reasoningEffort;\n\n // Handle thinkingTokenBudget config parameter\n if (config?.thinkingTokenBudget) {\n switch (config.thinkingTokenBudget) {\n case 'none':\n reasoningEffort = undefined;\n break;\n case 'minimal':\n reasoningEffort = 'low';\n break;\n case 'low':\n reasoningEffort = 'medium';\n break;\n case 'medium':\n case 'high':\n case 'highest':\n reasoningEffort = 'high';\n break;\n }\n }\n\n const mutableReq: Mutable<AxAIOpenAIResponsesRequest<TModel>> = {\n model,\n input: '', // Will be set below\n instructions: finalInstructions,\n tools: tools?.length ? tools : undefined,\n tool_choice:\n req.functionCall === 'none' ||\n req.functionCall === 'auto' ||\n req.functionCall === 'required'\n ? req.functionCall\n : typeof req.functionCall === 'object' && req.functionCall.function\n ? { type: 'function', name: req.functionCall.function.name }\n : undefined,\n // For thinking models, don't set these parameters as they're not supported\n ...(isThinkingModel\n ? {\n max_output_tokens:\n req.modelConfig?.maxTokens ?? this.config.maxTokens ?? undefined,\n }\n : {\n temperature:\n req.modelConfig?.temperature ??\n this.config.temperature ??\n undefined,\n top_p: req.modelConfig?.topP ?? this.config.topP ?? undefined,\n presence_penalty:\n req.modelConfig?.presencePenalty ??\n this.config.presencePenalty ??\n undefined,\n frequency_penalty:\n req.modelConfig?.frequencyPenalty ??\n this.config.frequencyPenalty ??\n undefined,\n }),\n stream: req.modelConfig?.stream ?? this.config.stream ?? false, // Sourced from modelConfig or global config\n // Optional fields from AxAIOpenAIResponsesRequest that need to be in Mutable for initialization\n background: undefined,\n include: includeFields.length > 0 ? includeFields : undefined,\n metadata: undefined,\n parallel_tool_calls: this.config.parallelToolCalls,\n previous_response_id: undefined,\n ...(reasoningEffort\n ? {\n reasoning: {\n effort: reasoningEffort,\n summary: reasoningSummary,\n },\n }\n : {}),\n service_tier: this.config.serviceTier,\n store: this.config.store,\n text: undefined,\n truncation: undefined,\n user: this.config.user,\n seed: this.config.seed,\n };\n\n // Populate from this.config if properties exist on AxAIOpenAIConfig\n if (this.config.user) mutableReq.user = this.config.user;\n if (this.config.parallelToolCalls !== undefined)\n mutableReq.parallel_tool_calls = this.config.parallelToolCalls;\n if (this.config.responseFormat)\n mutableReq.text = {\n format: {\n type: this.config.responseFormat as\n | 'text'\n | 'json_object'\n | 'json_schema',\n },\n };\n if (this.config.seed) mutableReq.seed = this.config.seed;\n // TODO: Check AxAIOpenAIConfig for other fields like store, background, include, metadata, service_tier, truncation\n\n const inputItems = req.chatPrompt\n ? this.createResponsesReqInternalInput(\n req.chatPrompt,\n systemMessageFoundAndUsed\n )\n : [];\n\n if (inputItems.length > 0) {\n mutableReq.input = inputItems;\n } else if (\n req.chatPrompt &&\n req.chatPrompt.length === 1 &&\n req.chatPrompt[0]?.role === 'user' &&\n req.chatPrompt[0]?.content &&\n typeof req.chatPrompt[0].content === 'string' &&\n !finalInstructions\n ) {\n // Fallback to simple string input if only one user message and no instructions\n mutableReq.input = req.chatPrompt[0].content;\n } else if (inputItems.length === 0 && !finalInstructions) {\n throw new Error('Responses API request must have input or instructions.');\n }\n\n let currentReasoning = mutableReq.reasoning ?? {};\n if (this.config.reasoningEffort) {\n currentReasoning = {\n ...currentReasoning,\n effort: this.config.reasoningEffort,\n };\n }\n\n // Handle thinkingTokenBudget config parameter\n if (config?.thinkingTokenBudget) {\n switch (config.thinkingTokenBudget) {\n case 'none':\n // When thinkingTokenBudget is 'none', remove reasoning entirely\n currentReasoning = {};\n break;\n case 'minimal':\n currentReasoning = {\n ...currentReasoning,\n effort: 'low',\n };\n break;\n case 'low':\n currentReasoning = {\n ...currentReasoning,\n effort: 'medium',\n };\n break;\n case 'medium':\n case 'high':\n case 'highest':\n currentReasoning = {\n ...currentReasoning,\n effort: 'high',\n };\n break;\n }\n }\n\n if (Object.keys(currentReasoning).length > 0 && currentReasoning.effort) {\n mutableReq.reasoning = currentReasoning;\n } else {\n mutableReq.reasoning = undefined; // Ensure reasoning is not sent if empty or only has non-effort keys by mistake\n }\n\n let finalReqToProcess: Readonly<AxAIOpenAIResponsesRequest<TModel>> =\n mutableReq as Readonly<AxAIOpenAIResponsesRequest<TModel>>;\n\n if (this.responsesReqUpdater) {\n finalReqToProcess = this.responsesReqUpdater(\n finalReqToProcess as Readonly<TResponsesReq>\n );\n }\n\n return [apiConfig, finalReqToProcess];\n }\n\n // Create Chat Response from /v1/responses (non-streaming)\n createChatResp(\n resp: Readonly<AxAIOpenAIResponsesResponse>\n ): Readonly<AxChatResponse> {\n const { id, output, usage } = resp;\n\n if (usage) {\n this.tokensUsed = {\n promptTokens: usage.prompt_tokens,\n completionTokens: usage.completion_tokens,\n totalTokens: usage.total_tokens,\n };\n }\n\n const currentResult: Partial<AxChatResponseResult> = {};\n\n for (const item of output ?? []) {\n switch (item.type) {\n case 'message':\n currentResult.id = item.id;\n currentResult.content = contentToText(item.content, id);\n currentResult.finishReason =\n item.status === 'completed' ? 'stop' : 'content_filter';\n break;\n\n case 'reasoning':\n currentResult.id = item.id;\n // Use encrypted_content if available (when showThoughts is enabled), otherwise use summary\n if (item.encrypted_content) {\n currentResult.thought = item.encrypted_content;\n } else {\n currentResult.thought = item.summary\n .map((s: string | object) =>\n typeof s === 'object' ? JSON.stringify(s) : s\n )\n .join('\\n');\n }\n break;\n\n case 'file_search_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'file_search',\n params: {\n queries: item.queries,\n results: item.results,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'web_search_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'web_search',\n params: {\n queries: item.queries,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'computer_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'computer_use',\n params: {\n action: item.action,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'code_interpreter_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'code_interpreter',\n params: {\n code: item.code,\n results: item.results,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'image_generation_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'image_generation',\n params: {\n result: item.result,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'local_shell_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'local_shell',\n params: {\n action: item.action,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'mcp_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'mcp',\n params: {\n name: item.name,\n args: item.args,\n serverLabel: item.server_label,\n output: item.output,\n error: item.error,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'function_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: item.name,\n params: item.arguments,\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n }\n }\n\n return {\n results: [{ ...currentResult, index: 0 }],\n remoteId: id,\n };\n }\n\n // Create Chat Stream Response from /v1/responses stream events\n createChatStreamResp(\n streamEvent: Readonly<AxAIOpenAIResponsesResponseDelta>\n ): Readonly<AxChatResponse> {\n // Handle new streaming event format\n const event = streamEvent as AxAIOpenAIResponsesStreamEvent;\n\n // Create a basic result structure\n const baseResult: AxChatResponseResult = {\n index: 0,\n id: '',\n content: '',\n finishReason: 'stop',\n };\n\n let remoteId: string | undefined;\n\n switch (event.type) {\n case 'response.created':\n case 'response.in_progress':\n case 'response.queued':\n // Response lifecycle events - return empty content with metadata\n remoteId = event.response.id;\n baseResult.id = `${event.response.id}_res_0`;\n break;\n\n case 'response.output_item.added':\n // New output item added\n switch (event.item.type) {\n case 'message':\n baseResult.id = event.item.id;\n baseResult.content = contentToText(\n event.item.content,\n event.item.id\n );\n break;\n case 'function_call':\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: event.item.id,\n type: 'function' as const,\n function: {\n name: event.item.name,\n params: event.item.arguments,\n },\n },\n ];\n break;\n case 'file_search_call':\n {\n const fileSearchItem =\n event.item as AxAIOpenAIResponsesFileSearchToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: fileSearchItem.id,\n type: 'function' as const,\n function: {\n name: 'file_search',\n params: {\n queries: fileSearchItem.queries || [],\n results: fileSearchItem.results?.map((r) => ({\n fileId: r.file_id,\n filename: r.filename,\n score: r.score,\n text: r.text,\n attributes: r.attributes,\n })),\n },\n },\n },\n ];\n }\n break;\n case 'web_search_call':\n {\n const webSearchItem =\n event.item as AxAIOpenAIResponsesWebSearchToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: webSearchItem.id,\n type: 'function' as const,\n function: {\n name: 'web_search',\n params: {\n queries: webSearchItem.queries || [],\n },\n },\n },\n ];\n }\n break;\n case 'computer_call':\n {\n const computerItem =\n event.item as AxAIOpenAIResponsesComputerToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: computerItem.id,\n type: 'function' as const,\n function: {\n name: 'computer_use',\n params: {\n action: computerItem.action || {},\n },\n },\n },\n ];\n }\n break;\n case 'code_interpreter_call':\n {\n const codeItem =\n event.item as AxAIOpenAIResponsesCodeInterpreterToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: codeItem.id,\n type: 'function' as const,\n function: {\n name: 'code_interpreter',\n params: {\n code: codeItem.code || '',\n results: codeItem.results,\n },\n },\n },\n ];\n }\n break;\n case 'image_generation_call':\n {\n const imageItem =\n event.item as AxAIOpenAIResponsesImageGenerationToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: imageItem.id,\n type: 'function' as const,\n function: {\n name: 'image_generation',\n params: {\n result: imageItem.result,\n },\n },\n },\n ];\n }\n break;\n case 'local_shell_call':\n {\n const shellItem =\n event.item as AxAIOpenAIResponsesLocalShellToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: shellItem.id,\n type: 'function' as const,\n function: {\n name: 'local_shell',\n params: {\n action: shellItem.action || {},\n },\n },\n },\n ];\n }\n break;\n case 'mcp_call':\n {\n const mcpItem = event.item as AxAIOpenAIResponsesMCPToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: mcpItem.id,\n type: 'function' as const,\n function: {\n name: 'mcp',\n params: {\n name: mcpItem.name || '',\n args: mcpItem.args || '',\n serverLabel: mcpItem.server_label || '',\n output: mcpItem.output,\n error: mcpItem.error,\n },\n },\n },\n ];\n }\n break;\n // case 'reasoning':\n // {\n // const reasoningItem =\n // event.item as AxAIOpenAIResponsesReasoningItem\n // baseResult.id = event.item.id\n // // Use encrypted_content if available (when showThoughts is enabled), otherwise use summary\n // if (reasoningItem.encrypted_content) {\n // baseResult.thought = reasoningItem.encrypted_content\n // } else if (reasoningItem.summary) {\n // baseResult.thought = reasoningItem.summary\n // .map((s: string | object) =>\n // typeof s === 'object' ? JSON.stringify(s) : s\n // )\n // .join('\\n')\n // }\n // }\n // break\n }\n break;\n\n case 'response.content_part.added':\n // Content part added - return the initial text if any\n baseResult.id = event.item_id;\n baseResult.content = contentToText([event.part], event.item_id);\n break;\n\n case 'response.output_text.delta':\n // Text delta - return just the delta content\n baseResult.id = event.item_id;\n baseResult.content = event.delta;\n break;\n\n case 'response.output_text.done':\n break;\n\n case 'response.function_call_arguments.delta':\n // Function call arguments delta - return delta with empty name\n baseResult.id = event.item_id;\n baseResult.functionCalls = [\n {\n id: event.item_id,\n type: 'function' as const,\n function: {\n name: '',\n params: event.delta,\n },\n },\n ];\n break;\n\n // case 'response.function_call_arguments.done':\n // // Function call arguments done - don't return function calls here\n // // The mergeFunctionCalls will handle combining name and arguments\n // baseResult.id = event.item_id\n // baseResult.finishReason = 'function_call'\n // break\n\n case 'response.reasoning_summary_text.delta':\n // Reasoning summary delta\n baseResult.id = event.item_id;\n baseResult.thought = event.delta;\n break;\n\n // case 'response.reasoning_summary_text.done':\n // // Reasoning summary done\n // baseResult.id = event.item_id\n // baseResult.thought = event.text\n // break\n\n // File search tool events\n case 'response.file_search_call.in_progress':\n case 'response.file_search_call.searching':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.file_search_call.completed':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n // Web search tool events\n case 'response.web_search_call.in_progress':\n case 'response.web_search_call.searching':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.web_search_call.completed':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n // Image generation tool events\n case 'response.image_generation_call.in_progress':\n case 'response.image_generation_call.generating':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.image_generation_call.completed':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.image_generation_call.partial_image':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n // Could potentially add partial image data to content or a special field\n break;\n\n // MCP tool events\n case 'response.mcp_call.in_progress':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.mcp_call.arguments.delta':\n baseResult.id = event.item_id;\n baseResult.functionCalls = [\n {\n id: event.item_id,\n type: 'function' as const,\n function: {\n name: '',\n params: event.delta,\n },\n },\n ];\n break;\n\n case 'response.mcp_call.arguments.done':\n baseResult.id = event.item_id;\n baseResult.functionCalls = [\n {\n id: event.item_id,\n type: 'function' as const,\n function: {\n name: '',\n params: event.arguments,\n },\n },\n ];\n break;\n\n case 'response.mcp_call.completed':\n case 'response.mcp_call.failed':\n // These events don't have item_id, use a generic ID\n baseResult.id = 'mcp_call_event';\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.mcp_list_tools.in_progress':\n case 'response.mcp_list_tools.completed':\n case 'response.mcp_list_tools.failed':\n // MCP list tools events don't have item_id\n baseResult.id = 'mcp_list_tools_event';\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.output_item.done':\n // Item completion\n\n switch (event.item.type) {\n case 'message':\n baseResult.id = event.item.id;\n baseResult.finishReason =\n event.item.status === 'completed' ? 'stop' : 'error';\n break;\n case 'function_call':\n case 'file_search_call':\n case 'web_search_call':\n case 'computer_call':\n case 'code_interpreter_call':\n case 'image_generation_call':\n case 'local_shell_call':\n case 'mcp_call':\n // Tool calls completed - finishReason indicates function execution needed\n baseResult.id = event.item.id;\n baseResult.finishReason = 'function_call';\n break;\n // case 'reasoning':\n // // Reasoning completed\n // baseResult.id = event.item.id\n // break\n }\n break;\n\n case 'response.completed':\n // Response completion - handle usage\n if (event.response.usage) {\n this.tokensUsed = {\n promptTokens: event.response.usage.prompt_tokens,\n completionTokens: event.response.usage.completion_tokens,\n totalTokens: event.response.usage.total_tokens,\n };\n }\n remoteId = event.response.id;\n baseResult.id = `${event.response.id}_completed`;\n baseResult.finishReason = 'stop';\n break;\n\n case 'response.failed':\n // Response failure\n remoteId = event.response.id;\n baseResult.id = `${event.response.id}_failed`;\n baseResult.finishReason = 'error';\n break;\n\n case 'response.incomplete':\n // Response incomplete\n remoteId = event.response.id;\n baseResult.id = `${event.response.id}_incomplete`;\n baseResult.finishReason = 'length';\n break;\n\n case 'error':\n // Error event\n baseResult.id = 'error';\n baseResult.content = `Error: ${event.message}`;\n baseResult.finishReason = 'error';\n break;\n\n default:\n // For unhandled events, return empty result\n baseResult.id = 'unknown';\n break;\n }\n\n return {\n results: [baseResult],\n remoteId,\n } as Readonly<AxChatResponse>;\n }\n\n createEmbedReq(\n req: Readonly<AxInternalEmbedRequest<TEmbedModel>>\n ): [AxAPI, AxAIOpenAIEmbedRequest<TEmbedModel>] {\n const model = req.embedModel;\n\n if (!model) {\n throw new Error('Embed model not set');\n }\n\n if (!req.texts || req.texts.length === 0) {\n throw new Error('Embed texts is empty');\n }\n\n const apiConfig = {\n name: '/embeddings',\n };\n\n const reqValue = {\n model: model,\n input: req.texts,\n dimensions: this.config.dimensions,\n };\n\n return [apiConfig, reqValue];\n }\n}\n\n// const getThought = (item: AxAIOpenAIResponsesReasoningItem): string => {\n// if (item.encrypted_content) {\n// return item.encrypted_content\n// }\n// return item.summary.map((s) => s.text).join('\\n')\n// }\n\nconst contentToText = (\n content: ReadonlyArray<\n | AxAIOpenAIResponsesOutputTextContentPart\n | AxAIOpenAIResponsesOutputRefusalContentPart\n >,\n responseId?: string\n): string => {\n // Check for refusal content and throw exception\n const refusalContent = content.filter((c) => c.type === 'refusal');\n if (refusalContent.length > 0) {\n const refusalMessage = refusalContent.map((c) => c.refusal).join('\\n');\n throw new AxAIRefusalError(refusalMessage, undefined, responseId);\n }\n\n // Return only text content\n return content\n .filter((c) => c.type === 'output_text')\n .map((c) => c.text)\n .join('\\n');\n};\n","import type { AxModelInfo } from '../types.js';\n\nimport { AxAIRekaModel } from './types.js';\n/**\n * OpenAI: Model information\n */\nexport const axModelInfoReka: AxModelInfo[] = [\n {\n name: AxAIRekaModel.RekaCore,\n currency: 'usd',\n promptTokenCostPer1M: 3,\n completionTokenCostPer1M: 15,\n },\n {\n name: AxAIRekaModel.RekaFlash,\n currency: 'usd',\n promptTokenCostPer1M: 0.8,\n completionTokenCostPer1M: 2,\n },\n {\n name: AxAIRekaModel.RekaEdge,\n currency: 'usd',\n promptTokenCostPer1M: 0.4,\n completionTokenCostPer1M: 1,\n },\n];\n","import { apiCall } from '../util/apicall.js';\n\nimport { AxDBBase, type AxDBBaseArgs, type AxDBBaseOpOptions } from './base.js';\nimport type {\n AxDBQueryRequest,\n AxDBQueryResponse,\n AxDBUpsertRequest,\n AxDBUpsertResponse,\n} from './types.js';\n\nexport type AxDBWeaviateOpOptions = AxDBBaseOpOptions;\n\ntype AxWeaviateUpsertResponse = {\n id: string;\n result?: { errors?: { error: { message: string }[] } };\n};\n\ntype AxWeaviateQueryResponse = {\n errors?: { location: string; message: string; path: string }[];\n data: {\n Get: {\n [key: string]: {\n [key: string]: unknown;\n }[];\n };\n };\n};\n\nexport interface AxDBWeaviateArgs extends AxDBBaseArgs {\n name: 'weaviate';\n apiKey: string;\n host: string;\n fetch?: typeof fetch;\n}\n\n/**\n * Weaviate: DB Service\n */\nexport class AxDBWeaviate extends AxDBBase {\n private apiKey: string;\n private apiURL: string;\n\n constructor({\n apiKey,\n host,\n fetch,\n tracer,\n }: Readonly<Omit<AxDBWeaviateArgs, 'name'>>) {\n if (!apiKey || apiKey === '') {\n throw new Error('Weaviate API key not set');\n }\n super({ name: 'Weaviate', fetch, tracer });\n this.apiKey = apiKey;\n this.apiURL = host;\n }\n\n override _upsert = async (\n req: Readonly<AxDBUpsertRequest>,\n update?: boolean,\n options?: Readonly<AxDBWeaviateOpOptions>\n ): Promise<AxDBUpsertResponse> => {\n const res = (await apiCall(\n {\n url: this.apiURL,\n headers: { Authorization: `Bearer ${this.apiKey}` },\n name: `/v1/objects/${req.table}/${req.id}`,\n put: !!update,\n fetch: this.fetch,\n span: options?.span,\n },\n {\n id: req.id,\n class: req.table,\n tenant: req.namespace,\n vector: req.values,\n properties: req.metadata ?? {},\n }\n )) as AxWeaviateUpsertResponse;\n\n if (res?.result?.errors) {\n throw new Error(\n `Weaviate upsert failed: ${res.result.errors.error\n .map(({ message }) => message)\n .join(', ')}`\n );\n }\n\n return {\n ids: [res.id],\n };\n };\n\n override _batchUpsert = async (\n batchReq: Readonly<AxDBUpsertRequest[]>,\n update?: boolean,\n options?: Readonly<AxDBWeaviateOpOptions>\n ): Promise<AxDBUpsertResponse> => {\n if (update) {\n throw new Error('Weaviate does not support batch update');\n }\n if (batchReq.length === 0) {\n throw new Error('Batch request is empty');\n }\n const objects = batchReq.map((req) => ({\n id: req.id,\n class: req.table,\n tenant: req.namespace,\n vector: req.values,\n properties: req.metadata ?? {},\n }));\n\n const res = (await apiCall(\n {\n url: this.apiURL,\n headers: { Authorization: `Bearer ${this.apiKey}` },\n name: '/v1/batch/objects',\n fetch: this.fetch,\n span: options?.span,\n },\n { objects }\n )) as AxWeaviateUpsertResponse[];\n\n if (res?.some(({ result }) => result?.errors)) {\n throw new Error(\n `Weaviate batch upsert failed: ${res\n .map(({ result }) =>\n result?.errors?.error.map(({ message }) => message).join(', ')\n )\n .join(', ')}`\n );\n }\n\n return {\n ids: res.map(({ id }) => id),\n };\n };\n\n override _query = async (\n req: Readonly<AxDBQueryRequest>,\n options?: Readonly<AxDBWeaviateOpOptions>\n ): Promise<AxDBQueryResponse> => {\n let filter = '';\n\n if (req.columns && req.columns.length === 0) {\n throw new Error('Weaviate requires at least one column');\n }\n\n if (req.values) {\n filter = `nearVector: {\n vector: [${req.values.join(',')}],\n }`;\n } else if (req.text) {\n filter = `nearText: {\n concepts: ['${req.text}'],\n }`;\n } else {\n throw new Error('Weaviate requires either text or values');\n }\n\n const res = (await apiCall(\n {\n url: this.apiURL,\n headers: { Authorization: `Bearer ${this.apiKey}` },\n name: '/v1/graphql',\n fetch: this.fetch,\n span: options?.span,\n },\n {\n query: `{\n Get {\n ${req.table} (\n limit: ${req.limit || 10},\n ${filter}\n ) {\n ${req.columns?.join('\\n')}\n }\n }\n }`,\n }\n )) as AxWeaviateQueryResponse;\n\n if (res.errors) {\n throw new Error(\n `Weaviate query failed: ${res.errors\n .map(({ message }) => message)\n .join(', ')}`\n );\n }\n\n const resMatches = res.data.Get[req.table];\n\n if (!resMatches) {\n return { matches: [] };\n }\n\n const matches = resMatches.map((match) => {\n return {\n id: match.id as string,\n score: 1,\n metadata: match,\n };\n });\n return { matches } as AxDBQueryResponse;\n };\n}\n","import type { AxAIService } from '../ai/types.js';\nimport type { AxDBQueryResponse, AxDBService } from '../db/types.js';\nimport type { AxProgram } from '../dsp/program.js';\n\nexport type AxRewriteIn = { query: string };\nexport type AxRewriteOut = { rewrittenQuery: string };\n\nexport type AxRerankerIn = { query: string; items: string[] };\nexport type AxRerankerOut = { rankedItems: string[] };\n\nexport interface AxDBLoaderOptions {\n chunker?: (text: string) => string[];\n rewriter?: AxProgram<AxRewriteIn, AxRewriteOut>;\n reranker?: AxProgram<AxRerankerIn, AxRerankerOut>;\n}\n\nexport interface AxDBManagerArgs {\n ai: AxAIService;\n db: AxDBService;\n config?: AxDBLoaderOptions;\n}\n\nexport interface AxDBMatch {\n score: number;\n text: string;\n}\n\nconst table = '_internal';\n\nexport class AxDBManager {\n private ai: AxAIService;\n private db: AxDBService;\n private chunker: (text: string) => string[];\n private rewriter?: AxProgram<AxRewriteIn, AxRewriteOut>;\n private reranker?: AxProgram<AxRerankerIn, AxRerankerOut>;\n\n constructor({ ai, db, config }: Readonly<AxDBManagerArgs>) {\n this.ai = ai;\n this.db = db;\n this.chunker = config?.chunker ?? this.defaultChunker;\n this.reranker = config?.reranker;\n this.rewriter = config?.rewriter;\n }\n\n private defaultChunker = (text: string): string[] => {\n // Default chunking by paragraphs\n return text.split(/\\n\\n+/);\n };\n\n insert = async (\n text: Readonly<string | string[]>,\n options?: Readonly<{\n batchSize?: number;\n maxWordsPerChunk?: number;\n minWordsPerChunk?: number;\n abortSignal?: AbortSignal;\n }>\n ): Promise<void> => {\n try {\n const chunkerInput = Array.isArray(text)\n ? text.join('\\n\\n')\n : (text as string);\n\n // Chunk the text using the specified or default chunking function\n const initialChunks = this.chunker(chunkerInput).filter(\n (chunk) => chunk.length > 0\n );\n\n const maxWordsPerChunk = options?.maxWordsPerChunk;\n const minWordsPerChunk = options?.minWordsPerChunk;\n\n const chunks = processChunks({\n initialChunks,\n minWordsPerChunk,\n maxWordsPerChunk,\n });\n\n const bs = options?.batchSize ?? 10;\n\n // Process chunks in batches of 10\n for (let i = 0; i < chunks.length; i += bs) {\n const batch = chunks.slice(i, i + bs);\n\n // Get embeddings for the whole batch from the AI service in one call\n const ret = await this.ai.embed(\n { texts: batch },\n {\n abortSignal: options?.abortSignal,\n }\n );\n\n // Prepare batch for bulk upsert\n const embeddings = ret.embeddings\n .map((embedding, index) => ({\n id: `chunk_${Date.now() + index}`, // Unique ID for each chunk, adjusted by index\n table,\n values: embedding,\n metadata: { text: batch[index] ?? '' },\n }))\n .filter((v) => v.metadata?.text && v.metadata?.text.length > 0);\n\n // Batch upsert embeddings\n await this.db.batchUpsert(embeddings);\n }\n } catch (error) {\n throw new Error(`Error processing text: ${error}`);\n }\n };\n\n query = async (\n query: Readonly<string | string[] | number | number[]>,\n {\n topPercent,\n abortSignal,\n }:\n | Readonly<{ topPercent?: number; abortSignal?: AbortSignal }>\n | undefined = {}\n ): Promise<AxDBMatch[][]> => {\n const texts = Array.isArray(query) ? query : [query];\n\n if (typeof texts[0] === 'string' && this.rewriter) {\n for (const [i, text] of texts.entries()) {\n const { rewrittenQuery } = await this.rewriter.forward(this.ai, {\n query: text,\n });\n texts[i] = rewrittenQuery;\n }\n }\n\n let queries: Promise<AxDBQueryResponse>[];\n\n if (typeof texts[0] === 'string') {\n const embedResults = await this.ai.embed(\n { texts },\n {\n abortSignal,\n }\n );\n queries = embedResults.embeddings.map((values) =>\n this.db.query({ table, values })\n );\n } else {\n queries = texts.map((values) => this.db.query({ table, values }));\n }\n\n const queryResults = await Promise.all(queries);\n const res: AxDBMatch[][] = [];\n\n for (const { matches } of queryResults) {\n const m = matches\n .filter((v) => v.metadata?.text && v.metadata?.text.length > 0)\n .map(({ score, metadata }) => ({\n score,\n text: metadata?.text ?? '',\n }));\n\n const tp = topPercent && topPercent > 1 ? topPercent / 100 : topPercent;\n const resultItems = tp ? getTopInPercent(m, tp) : m;\n\n if (this.reranker) {\n const { rankedItems } = await this.reranker.forward(this.ai, {\n query: texts[0] as string,\n items: resultItems.map((item) => item.text),\n });\n\n const items = rankedItems\n .map((item) => resultItems.find((r) => r.text === item))\n .filter((v) => v !== undefined) as AxDBMatch[];\n\n res.push(items);\n } else {\n res.push(resultItems);\n }\n }\n\n return res;\n };\n}\n\nconst processChunks = ({\n initialChunks,\n maxWordsPerChunk = 350,\n minWordsPerChunk = 250,\n}: Readonly<{\n initialChunks: readonly string[];\n maxWordsPerChunk?: number;\n minWordsPerChunk?: number;\n}>): string[] => {\n const chunks: string[] = [];\n\n let currentChunk = '';\n let currentWordCount = 0;\n\n initialChunks.forEach((chunk) => {\n const words = chunk.split(/\\s+/); // Split the chunk into words\n const wordCount = words.length; // Count words in the current chunk\n\n if (currentWordCount + wordCount <= maxWordsPerChunk) {\n // Add to the current chunk if within the max size limit\n currentChunk += `${chunk}\\n\\n`;\n currentWordCount += wordCount;\n } else if (\n currentWordCount > 0 &&\n currentWordCount + wordCount <= maxWordsPerChunk * 1.5\n ) {\n // If the total word count exceeds the limit but is less than 150% of the maxWordsPerChunk\n currentChunk += `${chunk}\\n\\n`;\n currentWordCount += wordCount;\n } else {\n // If the current chunk is not empty and adding the new chunk exceeds the adjusted limit\n if (currentWordCount > minWordsPerChunk) {\n chunks.push(currentChunk.trim());\n currentChunk = '';\n currentWordCount = 0;\n }\n // Handle the case where the chunk itself is larger than the limit\n if (wordCount > maxWordsPerChunk) {\n const remainingWords = words;\n while (remainingWords.length > maxWordsPerChunk * 1.5) {\n const slice = remainingWords.splice(0, maxWordsPerChunk);\n chunks.push(slice.join(' '));\n }\n // Add the last portion if it fits the condition of being within 150% of maxWordsPerChunk\n if (remainingWords.length > 0) {\n currentChunk += `${remainingWords.join(' ')}\\n\\n`;\n currentWordCount += remainingWords.length;\n }\n } else {\n // If the new chunk is smaller than the maximum words per chunk\n currentChunk = `${chunk}\\n\\n`;\n currentWordCount = wordCount;\n }\n }\n });\n\n // Push the last chunk if it exists and meets the minimum words condition\n if (currentWordCount > minWordsPerChunk || chunks.length === 0) {\n chunks.push(currentChunk.trim());\n }\n return chunks;\n};\n\nconst getTopInPercent = (\n entries: readonly AxDBMatch[],\n percent = 0.1\n): AxDBMatch[] => {\n // Sort entries by score in ascending order\n const sortedEntries = [...entries].sort((a, b) => a.score - b.score);\n\n // Calculate the number of entries to take (top 10%)\n const topTenPercentCount = Math.ceil(sortedEntries.length * percent);\n\n // Return the top 10% of entries\n return sortedEntries.slice(0, topTenPercentCount);\n};\n","import {\n logChatRequest,\n logChatRequestMessage,\n logFunctionResults,\n logResponseDelta,\n logResponseResult,\n} from '../ai/debug.js';\nimport type {\n AxChatRequest,\n AxChatResponseResult,\n AxFunctionResult,\n AxLoggerFunction,\n} from '../ai/types.js';\nimport {\n axValidateChatRequestMessage,\n axValidateChatResponseResult,\n} from '../ai/validate.js';\n\nimport type { AxAIMemory, AxMemoryData } from './types.js';\n\nexport class MemoryImpl {\n private data: AxMemoryData = [];\n\n constructor(\n private options?: {\n debug?: boolean;\n debugHideSystemPrompt?: boolean;\n logger?: AxLoggerFunction;\n }\n ) {}\n\n addRequest(items: AxChatRequest['chatPrompt'], index: number): void {\n this.data.push(\n ...items.map((item) => {\n const value = structuredClone(item);\n return {\n role: item.role,\n chat: [{ index, value }],\n };\n })\n );\n\n if (this.options?.debug) {\n debugRequest(\n items,\n this.options?.debugHideSystemPrompt,\n this.options?.logger\n );\n }\n }\n\n addFunctionResults(results: Readonly<AxFunctionResult[]>): void {\n const chat = results.map(({ index, ...value }) => ({\n index,\n value: structuredClone(value),\n }));\n\n const lastItem = this.getLast();\n if (lastItem?.role === 'function') {\n lastItem.chat.push(...chat);\n } else {\n this.data.push({ role: 'function', chat });\n }\n\n if (this.options?.debug) {\n debugFunctionResults(results, this.options?.logger);\n }\n }\n\n addResponse(results: Readonly<AxChatResponseResult[]>): void {\n const chat = results.map(({ index, ...value }) => ({\n index,\n value: structuredClone(value),\n }));\n\n this.data.push({ role: 'assistant', chat });\n\n if (this.options?.debug) {\n for (const result of results) {\n debugResponse(result, this.options?.logger);\n }\n }\n }\n\n updateResult({\n content,\n name,\n functionCalls,\n delta,\n index,\n }: Readonly<AxChatResponseResult & { delta?: string; index: number }>): void {\n const lastItem = this.data.at(-1);\n\n const log = (logger?: AxLoggerFunction) => {\n if (this.options?.debug) {\n if (delta && typeof delta === 'string') {\n debugResponseDelta(delta, logger);\n } else {\n debugResponse({ content, name, functionCalls, index }, logger);\n }\n }\n };\n\n if (\n !lastItem ||\n lastItem.role !== 'assistant' ||\n (lastItem.role === 'assistant' && !lastItem.updatable)\n ) {\n this.data.push({\n role: 'assistant',\n updatable: true,\n chat: [\n { index, value: structuredClone({ content, name, functionCalls }) },\n ],\n });\n log(this.options?.logger);\n return;\n }\n\n const chat = lastItem.chat.find((v) => v.index === index);\n\n if (!chat) {\n lastItem.chat.push({\n index,\n value: structuredClone({ content, name, functionCalls }),\n });\n log(this.options?.logger);\n return;\n }\n\n if (typeof content === 'string' && content.trim() !== '') {\n (chat.value as { content: string }).content = content;\n }\n\n if (typeof name === 'string' && name.trim() !== '') {\n (chat.value as { name: string }).name = name;\n }\n\n if (Array.isArray(functionCalls) && functionCalls.length > 0) {\n (chat.value as { functionCalls: typeof functionCalls }).functionCalls =\n functionCalls;\n }\n\n log(this.options?.logger);\n }\n\n addTag(name: string): void {\n const lastItem = this.data.at(-1);\n if (!lastItem) {\n return;\n }\n\n if (!lastItem.tags) {\n lastItem.tags = [];\n }\n\n if (!lastItem.tags.includes(name)) {\n lastItem.tags.push(name);\n }\n }\n\n rewindToTag(name: string): AxMemoryData {\n const tagIndex = this.data.findIndex((item) => item.tags?.includes(name));\n if (tagIndex === -1) {\n throw new Error(`Tag \"${name}\" not found`);\n }\n\n // Remove and return the tagged item and everything after it\n return this.data.splice(tagIndex);\n }\n\n removeByTag(name: string): AxMemoryData {\n const indices = this.data.reduce<number[]>((acc, item, index) => {\n if (item.tags?.includes(name)) {\n acc.push(index);\n }\n return acc;\n }, []);\n\n if (indices.length === 0) {\n throw new Error(`No items found with tag \"${name}\"`);\n }\n\n return indices\n .reverse()\n .map((index) => this.data.splice(index, 1).at(0))\n .filter((item) => item !== undefined)\n .reverse();\n }\n\n history(index: number): AxChatRequest['chatPrompt'] {\n const result: AxChatRequest['chatPrompt'] = [];\n\n for (const { role, chat } of this.data) {\n let values: unknown;\n\n if (role === 'function') {\n values = chat.filter((v) => v.index === index).map((v) => v.value);\n } else {\n values = chat.find((v) => v.index === index)?.value;\n }\n\n if (Array.isArray(values) && values.length > 0) {\n result.push(\n ...values.map(\n (v) => ({ ...v, role }) as AxChatRequest['chatPrompt'][number]\n )\n );\n } else if (typeof values === 'object' && values !== null) {\n result.push({ ...values, role } as AxChatRequest['chatPrompt'][number]);\n }\n // Skip when values is undefined (no matching index found)\n }\n return result;\n }\n\n getLast(): AxMemoryData[number] | undefined {\n return this.data.at(-1);\n }\n\n reset(): void {\n this.data = [];\n }\n}\n\nexport class AxMemory implements AxAIMemory {\n private memories = new Map<string, MemoryImpl>();\n private defaultMemory: MemoryImpl;\n\n constructor(\n private options?: {\n debug?: boolean;\n debugHideSystemPrompt?: boolean;\n }\n ) {\n this.defaultMemory = new MemoryImpl(options);\n }\n\n private getMemory(sessionId?: string): MemoryImpl {\n if (!sessionId) {\n return this.defaultMemory;\n }\n\n if (!this.memories.has(sessionId)) {\n this.memories.set(sessionId, new MemoryImpl(this.options));\n }\n\n return this.memories.get(sessionId) as MemoryImpl;\n }\n\n addRequest(value: AxChatRequest['chatPrompt'], sessionId?: string): void {\n for (const item of value) {\n axValidateChatRequestMessage(item);\n }\n this.getMemory(sessionId).addRequest(value, 0);\n }\n\n addResponse(\n results: Readonly<AxChatResponseResult[]>,\n sessionId?: string\n ): void {\n axValidateChatResponseResult(results);\n this.getMemory(sessionId).addResponse(results);\n }\n\n addFunctionResults(\n results: Readonly<AxFunctionResult[]>,\n sessionId?: string\n ): void {\n this.getMemory(sessionId).addFunctionResults(results);\n }\n\n updateResult(\n result: Readonly<AxChatResponseResult & { delta?: string }>,\n sessionId?: string\n ): void {\n this.getMemory(sessionId).updateResult(result);\n }\n\n addTag(name: string, sessionId?: string) {\n this.getMemory(sessionId).addTag(name);\n }\n\n rewindToTag(name: string, sessionId?: string) {\n return this.getMemory(sessionId).rewindToTag(name);\n }\n\n history(index: number, sessionId?: string) {\n return this.getMemory(sessionId).history(index);\n }\n\n getLast(sessionId?: string) {\n return this.getMemory(sessionId).getLast();\n }\n\n reset(sessionId?: string): void {\n if (!sessionId) {\n this.defaultMemory.reset();\n } else {\n this.memories.set(sessionId, new MemoryImpl(this.options));\n }\n }\n}\n\nfunction debugRequest(\n value: AxChatRequest['chatPrompt'][number] | AxChatRequest['chatPrompt'],\n hideSystemPrompt?: boolean,\n logger?: AxLoggerFunction\n) {\n if (Array.isArray(value)) {\n logChatRequest(value, hideSystemPrompt, logger);\n } else {\n logChatRequestMessage(value, hideSystemPrompt, logger);\n }\n}\n\nfunction debugResponse(\n value: Readonly<AxChatResponseResult & { index: number }>,\n logger?: AxLoggerFunction\n) {\n logResponseResult(value, logger);\n}\n\nfunction debugResponseDelta(delta: string, logger?: AxLoggerFunction) {\n logResponseDelta(delta, logger);\n}\n\nfunction debugFunctionResults(\n results: Readonly<AxFunctionResult[]>,\n logger?: AxLoggerFunction\n) {\n logFunctionResults(results, logger);\n}\n","import type { AxFunctionJSONSchema } from '../ai/types.js';\n\n// Extended type to handle flexible JSON schemas with union types\ntype FlexibleJSONSchema = AxFunctionJSONSchema & {\n anyOf?: FlexibleJSONSchema[];\n oneOf?: FlexibleJSONSchema[];\n allOf?: FlexibleJSONSchema[];\n properties?: Record<string, FlexibleJSONSchema | undefined>;\n};\n\ninterface ValidationError {\n path: string;\n issue: string;\n fix: string;\n example?: string;\n}\n\nexport const validateJSONSchema = (\n schema: Readonly<AxFunctionJSONSchema>\n): void => {\n const errors: ValidationError[] = [];\n\n const validateSchemaObject = (\n schema: Readonly<FlexibleJSONSchema | undefined>,\n path = ''\n ): void => {\n // Skip validation if schema is undefined or null\n if (!schema || typeof schema !== 'object') {\n return;\n }\n\n const validTypes = [\n 'array',\n 'integer',\n 'number',\n 'string',\n 'boolean',\n 'null',\n 'object',\n ];\n\n // Handle schemas with anyOf (union types)\n if (schema.anyOf && Array.isArray(schema.anyOf)) {\n if (schema.anyOf.length === 0) {\n errors.push({\n path: path || 'root',\n issue: 'anyOf array is empty',\n fix: 'Add at least one schema to the anyOf array',\n example: 'anyOf: [{ type: \"string\" }, { type: \"null\" }]',\n });\n }\n // Validate each schema in anyOf\n schema.anyOf.forEach((subSchema: FlexibleJSONSchema, index: number) => {\n validateSchemaObject(subSchema, `${path}anyOf[${index}].`);\n });\n return;\n }\n\n // Handle schemas with oneOf\n if (schema.oneOf && Array.isArray(schema.oneOf)) {\n if (schema.oneOf.length === 0) {\n errors.push({\n path: path || 'root',\n issue: 'oneOf array is empty',\n fix: 'Add at least one schema to the oneOf array',\n example: 'oneOf: [{ type: \"string\" }, { type: \"number\" }]',\n });\n }\n schema.oneOf.forEach((subSchema: FlexibleJSONSchema, index: number) => {\n validateSchemaObject(subSchema, `${path}oneOf[${index}].`);\n });\n return;\n }\n\n // Handle schemas with allOf\n if (schema.allOf && Array.isArray(schema.allOf)) {\n if (schema.allOf.length === 0) {\n errors.push({\n path: path || 'root',\n issue: 'allOf array is empty',\n fix: 'Add at least one schema to the allOf array',\n example:\n 'allOf: [{ type: \"object\" }, { properties: { name: { type: \"string\" } } }]',\n });\n }\n schema.allOf.forEach((subSchema: FlexibleJSONSchema, index: number) => {\n validateSchemaObject(subSchema, `${path}allOf[${index}].`);\n });\n return;\n }\n\n // Skip validation if no type is specified (might be a reference or other valid schema)\n if (!schema.type) {\n return;\n }\n\n if (!validTypes.includes(schema.type)) {\n errors.push({\n path: path || 'root',\n issue: `Invalid type '${schema.type}'`,\n fix: `Change type to one of: ${validTypes.join(', ')}`,\n example: `{ type: \"string\" } or { type: \"object\" }`,\n });\n return;\n }\n\n if (schema.type === 'object') {\n if (schema.properties) {\n if (\n typeof schema.properties !== 'object' ||\n Array.isArray(schema.properties)\n ) {\n errors.push({\n path: path || 'root',\n issue: 'properties must be an object, not an array or primitive',\n fix: 'Change properties to be an object with property names as keys',\n example:\n 'properties: { name: { type: \"string\" }, age: { type: \"number\" } }',\n });\n } else {\n for (const key in schema.properties) {\n const value = schema.properties[key];\n // Skip undefined or null properties\n if (value === undefined || value === null) {\n continue;\n }\n if (typeof value !== 'object') {\n errors.push({\n path: `${path}${key}`,\n issue: `Property schema must be an object, got ${typeof value}`,\n fix: 'Define the property as a proper schema object',\n example: `${key}: { type: \"string\", description: \"...\" }`,\n });\n continue;\n }\n validateSchemaObject(value, `${path}${key}.`);\n }\n }\n }\n\n if (schema.required) {\n if (!Array.isArray(schema.required)) {\n errors.push({\n path: path || 'root',\n issue: `'required' must be an array, got ${typeof schema.required}`,\n fix: 'Change required to be an array of property names',\n example:\n 'required: [\"name\", \"email\"] instead of required: \"name,email\"',\n });\n } else if (schema.required.length === 0) {\n // This is valid but might be worth noting\n } else {\n // Validate that required properties exist in properties\n if (schema.properties) {\n for (const requiredProp of schema.required) {\n if (typeof requiredProp !== 'string') {\n errors.push({\n path: `${path}required`,\n issue: `Required property names must be strings, got ${typeof requiredProp}`,\n fix: 'Ensure all items in required array are strings',\n example:\n 'required: [\"name\", \"email\"] not required: [123, \"email\"]',\n });\n } else if (!(requiredProp in schema.properties)) {\n errors.push({\n path: `${path}required`,\n issue: `Required property '${requiredProp}' is not defined in properties`,\n fix: `Either add '${requiredProp}' to properties or remove it from required`,\n example: `properties: { ${requiredProp}: { type: \"string\" } }`,\n });\n }\n }\n }\n }\n }\n }\n\n if (schema.type === 'array') {\n if (schema.items) {\n if (typeof schema.items !== 'object') {\n errors.push({\n path: `${path}items`,\n issue: `Array items schema must be an object, got ${typeof schema.items}`,\n fix: 'Define items as a proper schema object',\n example:\n 'items: { type: \"string\" } or items: { type: \"object\", properties: {...} }',\n });\n } else {\n validateSchemaObject(schema.items, `${path}items.`);\n }\n }\n }\n };\n\n validateSchemaObject(schema);\n\n if (errors.length > 0) {\n const errorMessage = [\n 'JSON Schema validation failed:',\n '',\n ...errors.map((error, index) => {\n const parts = [\n `${index + 1}. Path: ${error.path}`,\n ` Issue: ${error.issue}`,\n ` Fix: ${error.fix}`,\n ];\n if (error.example) {\n parts.push(` Example: ${error.example}`);\n }\n return parts.join('\\n');\n }),\n '',\n 'Please fix these issues and try again.',\n ].join('\\n');\n\n throw new Error(errorMessage);\n }\n};\n\n// Example Usage:\n\n/*\nconst validSchema: AxFunctionJSONSchema = {\n type: 'object',\n properties: {\n id: { type: 'integer' },\n name: { type: 'string' },\n email: { type: 'string' },\n isActive: { type: 'boolean' },\n tags: {\n type: 'array',\n items: { type: 'string' }\n },\n optionalField: {\n anyOf: [\n { type: 'string' },\n { type: 'null' }\n ]\n }\n },\n required: ['id', 'name', 'email']\n};\n\nconst invalidSchema: any = {\n type: 'object',\n properties: {\n id: { type: 'integer' },\n name: { type: 'string' },\n email: { type: 'unknownType' }, // Invalid type\n isActive: { type: 'boolean' },\n tags: {\n type: 'array',\n items: { type: 'string' }\n }\n },\n required: 'id,name,email' // Invalid 'required' field\n};\n\ntry {\n validateJSONSchema(validSchema);\n} catch (error) {\n console.error('Schema validation failed:', error.message);\n}\n\ntry {\n validateJSONSchema(invalidSchema);\n} catch (error) {\n console.error('Schema validation failed:', error.message);\n}\n*/\n","import type {\n AxAIService,\n AxAIServiceActionOptions,\n AxChatRequest,\n AxChatResponseResult,\n AxFunction,\n AxFunctionResult,\n} from '../ai/types.js';\nimport type { AxMemory } from '../mem/memory.js';\n\nimport { axGlobals } from './globals.js';\nimport { validateJSONSchema } from './jsonschema.js';\n\nexport class AxFunctionError extends Error {\n constructor(\n private fields: {\n field: string;\n message: string;\n }[]\n ) {\n super();\n this.name = this.constructor.name;\n }\n\n getFields = () => this.fields;\n\n override toString(): string {\n return [\n `${this.name}: Function validation error`,\n ...this.fields.map((field) => ` - ${field.field}: ${field.message}`),\n ].join('\\n');\n }\n\n [Symbol.for('nodejs.util.inspect.custom')](\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _depth: number,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: Record<string, unknown>\n ) {\n return this.toString();\n }\n}\n\ntype FunctionFieldErrors = ConstructorParameters<typeof AxFunctionError>[0];\n\nexport class FunctionError extends Error {\n constructor(\n private readonly fields: FunctionFieldErrors,\n private readonly func: Readonly<AxFunction>,\n private readonly funcId?: string\n ) {\n super();\n }\n\n getFunctionId = () => this.funcId;\n\n private getFieldDescription(fieldName: string): string {\n if (!this.func.parameters?.properties?.[fieldName]) {\n return '';\n }\n\n const fieldSchema = this.func.parameters.properties[fieldName];\n let description = fieldSchema.description;\n\n if (fieldSchema.enum?.length) {\n description += ` Allowed values are: ${fieldSchema.enum.join(', ')}`;\n }\n\n return description;\n }\n\n public getFixingInstructions = () => {\n const bulletPoints = this.fields.map((fieldError) => {\n const schemaDescription =\n this.getFieldDescription(fieldError.field) || '';\n return `- \\`${fieldError.field}\\` - ${fieldError.message} (${schemaDescription}).`;\n });\n\n return `Errors In Function Arguments: Fix the following invalid arguments to '${this.func.name}'\\n${bulletPoints.join('\\n')}`;\n };\n\n override toString(): string {\n return [\n `${this.name}: Function execution error in '${this.func.name}'`,\n ...this.fields.map((field) => {\n const description = this.getFieldDescription(field.field);\n return ` - ${field.field}: ${field.message}${description ? ` (${description})` : ''}`;\n }),\n this.funcId ? ` Function ID: ${this.funcId}` : '',\n ].join('\\n');\n }\n\n [Symbol.for('nodejs.util.inspect.custom')](\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _depth: number,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: Record<string, unknown>\n ) {\n return this.toString();\n }\n}\n\nexport type AxChatResponseFunctionCall = {\n id: string;\n name: string;\n args: string;\n};\n\nexport class AxFunctionProcessor {\n private funcList: Readonly<AxFunction[]> = [];\n\n constructor(funcList: Readonly<AxFunction[]>) {\n this.funcList = funcList;\n }\n\n private executeFunction = async (\n fnSpec: Readonly<AxFunction>,\n func: Readonly<AxChatResponseFunctionCall>,\n options?: Readonly<AxAIServiceActionOptions>\n ) => {\n let args: unknown;\n\n if (typeof func.args === 'string' && func.args.length > 0) {\n args = JSON.parse(func.args);\n } else {\n args = func.args;\n }\n\n const opt = options\n ? {\n sessionId: options.sessionId,\n traceId: options.traceId,\n ai: options.ai,\n }\n : undefined;\n\n let res: unknown;\n if (!fnSpec.parameters) {\n res =\n fnSpec.func.length === 1 ? await fnSpec.func(opt) : await fnSpec.func();\n } else {\n res =\n fnSpec.func.length === 2\n ? await fnSpec.func(args, opt)\n : await fnSpec.func(args);\n }\n\n // Use the formatter from options or fall back to globals\n const formatter =\n options?.functionResultFormatter ?? axGlobals.functionResultFormatter;\n return formatter(res);\n };\n\n public execute = async (\n func: Readonly<AxChatResponseFunctionCall>,\n options?: Readonly<AxAIServiceActionOptions>\n ) => {\n const fnSpec = this.funcList.find(\n (v) => v.name.localeCompare(func.name) === 0\n );\n if (!fnSpec) {\n throw new Error(`Function not found: ${func.name}`);\n }\n if (!fnSpec.func) {\n throw new Error(`No handler for function: ${func.name}`);\n }\n\n // execute value function calls\n try {\n return await this.executeFunction(fnSpec, func, options);\n } catch (e) {\n if (e instanceof AxFunctionError) {\n throw new FunctionError(e.getFields(), fnSpec, func.id);\n }\n throw e;\n }\n };\n}\n\nexport type AxInputFunctionType = (\n | AxFunction\n | {\n toFunction: () => AxFunction | AxFunction[];\n }\n)[];\n\nexport const parseFunctions = (\n newFuncs: Readonly<AxInputFunctionType>,\n existingFuncs?: readonly AxFunction[]\n): AxFunction[] => {\n if (newFuncs.length === 0) {\n return [...(existingFuncs ?? [])];\n }\n\n // biome-ignore lint/complexity/useFlatMap: cannot use flatMap here\n const functions = newFuncs\n .map((f) => {\n if ('toFunction' in f) {\n return f.toFunction();\n }\n return f;\n })\n .flat();\n\n for (const fn of functions.filter((v) => v.parameters)) {\n if (fn.parameters) {\n validateJSONSchema(fn.parameters);\n }\n }\n\n return [...(existingFuncs ?? []), ...functions];\n};\n\ntype ProcessFunctionsArgs = {\n ai: Readonly<AxAIService>;\n functionList: Readonly<AxFunction[]>;\n functionCalls: readonly AxChatResponseFunctionCall[];\n mem: Readonly<AxMemory>;\n sessionId?: string;\n traceId?: string;\n span?: import('@opentelemetry/api').Span;\n excludeContentFromTrace?: boolean;\n index: number;\n functionResultFormatter?: (result: unknown) => string;\n};\n\nexport const processFunctions = async ({\n ai,\n functionList,\n functionCalls,\n mem,\n sessionId,\n traceId,\n span,\n excludeContentFromTrace,\n index,\n functionResultFormatter,\n}: Readonly<ProcessFunctionsArgs>) => {\n const funcProc = new AxFunctionProcessor(functionList);\n const functionsExecuted = new Set<string>();\n\n // Map each function call to a promise that resolves to the function result or null\n const promises = functionCalls.map((func) => {\n if (!func.id) {\n throw new Error(`Function ${func.name} did not return an ID`);\n }\n\n const promise: Promise<AxFunctionResult | undefined> = funcProc\n .execute(func, { sessionId, traceId, ai, functionResultFormatter })\n .then((functionResult) => {\n functionsExecuted.add(func.name.toLowerCase());\n\n // Add telemetry event for successful function call\n if (span) {\n const eventData: { name: string; args?: string; result?: string } = {\n name: func.name,\n };\n if (!excludeContentFromTrace) {\n eventData.args = func.args;\n eventData.result = functionResult ?? '';\n }\n span.addEvent('function.call', eventData);\n }\n\n return {\n result: functionResult ?? '',\n role: 'function' as const,\n functionId: func.id,\n index,\n };\n })\n .catch((e) => {\n if (!(e instanceof FunctionError)) {\n throw e;\n }\n const result = e.getFixingInstructions();\n\n // Add telemetry event for function error\n if (span) {\n const errorEventData: {\n name: string;\n args?: string;\n message: string;\n fixing_instructions?: string;\n } = {\n name: func.name,\n message: e.toString(),\n };\n if (!excludeContentFromTrace) {\n errorEventData.args = func.args;\n errorEventData.fixing_instructions = result;\n }\n span.addEvent('function.error', errorEventData);\n }\n\n if (ai.getOptions().debug) {\n const logger = ai.getLogger();\n logger(`❌ Function Error Correction:\\n${result}`, {\n tags: ['error'],\n });\n }\n\n return {\n functionId: func.id,\n isError: true,\n index,\n result,\n role: 'function' as const,\n };\n });\n\n return promise;\n });\n\n // Wait for all promises to resolve\n const results = await Promise.all(promises);\n const functionResults = results.filter((result) => result !== undefined);\n\n mem.addFunctionResults(functionResults, sessionId);\n\n if (functionResults.some((result) => result.isError)) {\n mem.addTag('error', sessionId);\n }\n\n return functionsExecuted;\n};\n\nexport function parseFunctionCalls(\n ai: Readonly<AxAIService>,\n functionCalls: Readonly<AxChatResponseResult['functionCalls']>,\n _values: Record<string, unknown>,\n model?: string\n): AxChatResponseFunctionCall[] | undefined {\n if (!functionCalls || functionCalls.length === 0) {\n return;\n }\n if (!ai.getFeatures(model).functions) {\n throw new Error('Functions are not supported by the AI service');\n }\n\n const funcs: AxChatResponseFunctionCall[] = functionCalls.map((f) => ({\n id: f.id,\n name: f.function.name,\n args: f.function.params as string,\n }));\n\n // for (const [i, f] of funcs.entries()) {\n // values['functionName' + i] = f.name;\n // values['functionArguments' + i] =\n // typeof f.args === 'object' ? JSON.stringify(f.args) : f.args;\n // }\n return funcs;\n}\n\ntype FunctionCall = AxChatRequest['functionCall'] | undefined;\n\n/**\n * Utility function to parse a list of functions into AxFunction array\n */\nexport function createFunctionConfig(\n functionList?: AxInputFunctionType,\n definedFunctionCall?: FunctionCall,\n firstStep?: boolean\n): { functions: AxFunction[]; functionCall: FunctionCall } {\n const functionCall = definedFunctionCall;\n\n if (\n !firstStep &&\n (functionCall === 'required' || typeof functionCall === 'function')\n ) {\n return { functions: [], functionCall: undefined };\n }\n\n if (!functionList) {\n return { functions: [], functionCall: functionCall };\n }\n\n // biome-ignore lint/complexity/useFlatMap: you cannot use flatMap here\n const functions = functionList\n .map((f) => {\n if ('toFunction' in f) {\n return f.toFunction();\n }\n return f;\n })\n .flat();\n\n return { functions, functionCall };\n}\n","import type { Counter, Gauge, Histogram, Meter } from '@opentelemetry/api';\n\nimport { axGlobals } from './globals.js';\n\n// Metrics configuration interface\nexport interface AxMetricsConfig {\n enabled: boolean;\n enabledCategories: (\n | 'generation'\n | 'streaming'\n | 'functions'\n | 'errors'\n | 'performance'\n )[];\n maxLabelLength: number;\n samplingRate: number;\n}\n\n// Default metrics configuration\nexport const axDefaultMetricsConfig: AxMetricsConfig = {\n enabled: true,\n enabledCategories: [\n 'generation',\n 'streaming',\n 'functions',\n 'errors',\n 'performance',\n ],\n maxLabelLength: 100,\n samplingRate: 1.0,\n};\n\n// Standardized error categories for consistent error classification\nexport type AxErrorCategory =\n | 'validation_error'\n | 'assertion_error'\n | 'timeout_error'\n | 'abort_error'\n | 'network_error'\n | 'auth_error'\n | 'rate_limit_error'\n | 'function_error'\n | 'parsing_error'\n | 'unknown_error';\n\nexport interface AxGenMetricsInstruments {\n // Generation flow metrics\n generationLatencyHistogram?: Histogram;\n generationRequestsCounter?: Counter;\n generationErrorsCounter?: Counter;\n\n // Multi-step flow metrics\n multiStepGenerationsCounter?: Counter;\n stepsPerGenerationHistogram?: Histogram;\n maxStepsReachedCounter?: Counter;\n\n // Error correction metrics\n validationErrorsCounter?: Counter;\n assertionErrorsCounter?: Counter;\n errorCorrectionAttemptsHistogram?: Histogram;\n errorCorrectionSuccessCounter?: Counter;\n errorCorrectionFailureCounter?: Counter;\n maxRetriesReachedCounter?: Counter;\n\n // Function calling metrics\n functionsEnabledGenerationsCounter?: Counter;\n functionCallStepsCounter?: Counter;\n functionsExecutedPerGenerationHistogram?: Histogram;\n functionErrorCorrectionCounter?: Counter;\n\n // Field processing metrics\n fieldProcessorsExecutedCounter?: Counter;\n streamingFieldProcessorsExecutedCounter?: Counter;\n\n // Streaming specific metrics\n streamingGenerationsCounter?: Counter;\n streamingDeltasEmittedCounter?: Counter;\n streamingFinalizationLatencyHistogram?: Histogram;\n\n // Memory and samples metrics\n samplesGeneratedHistogram?: Histogram;\n resultPickerUsageCounter?: Counter;\n resultPickerLatencyHistogram?: Histogram;\n\n // Signature complexity metrics\n inputFieldsGauge?: Gauge;\n outputFieldsGauge?: Gauge;\n examplesUsedGauge?: Gauge;\n demosUsedGauge?: Gauge;\n\n // Performance metrics\n promptRenderLatencyHistogram?: Histogram;\n extractionLatencyHistogram?: Histogram;\n assertionLatencyHistogram?: Histogram;\n\n // State management\n stateCreationLatencyHistogram?: Histogram;\n memoryUpdateLatencyHistogram?: Histogram;\n}\n\n// Singleton instance for metrics instruments\nlet globalGenMetricsInstruments: AxGenMetricsInstruments | undefined;\n\n// Function to get or create metrics instruments (singleton pattern)\nexport const getOrCreateGenMetricsInstruments = (\n meter?: Meter\n): AxGenMetricsInstruments | undefined => {\n // Return existing instance if available\n if (globalGenMetricsInstruments) {\n return globalGenMetricsInstruments;\n }\n\n // Try to use provided meter or fall back to global\n const activeMeter = meter ?? axGlobals.meter;\n if (activeMeter) {\n globalGenMetricsInstruments = createGenMetricsInstruments(activeMeter);\n return globalGenMetricsInstruments;\n }\n\n return undefined;\n};\n\n// Function to reset the singleton (useful for testing)\nexport const resetGenMetricsInstruments = (): void => {\n globalGenMetricsInstruments = undefined;\n};\n\n// Health check for metrics system\nexport const axCheckMetricsHealth = (): {\n healthy: boolean;\n issues: string[];\n} => {\n const issues: string[] = [];\n\n if (!axGlobals.meter) {\n issues.push('Global meter not initialized');\n }\n\n if (!globalGenMetricsInstruments && axGlobals.meter) {\n issues.push('Metrics instruments not created despite available meter');\n }\n\n return {\n healthy: issues.length === 0,\n issues,\n };\n};\n\nexport const createGenMetricsInstruments = (\n meter: Meter\n): AxGenMetricsInstruments => {\n return {\n // Generation flow metrics\n // Note: Histogram buckets should be configured at the exporter level\n // Recommended buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000] ms\n generationLatencyHistogram: meter.createHistogram(\n 'ax_gen_generation_duration_ms',\n {\n description: 'End-to-end duration of AxGen generation requests',\n unit: 'ms',\n }\n ),\n\n generationRequestsCounter: meter.createCounter(\n 'ax_gen_generation_requests_total',\n {\n description: 'Total number of AxGen generation requests',\n }\n ),\n\n generationErrorsCounter: meter.createCounter(\n 'ax_gen_generation_errors_total',\n {\n description: 'Total number of failed AxGen generations',\n }\n ),\n\n // Multi-step flow metrics\n multiStepGenerationsCounter: meter.createCounter(\n 'ax_gen_multistep_generations_total',\n {\n description: 'Total number of generations that required multiple steps',\n }\n ),\n\n stepsPerGenerationHistogram: meter.createHistogram(\n 'ax_gen_steps_per_generation',\n {\n description: 'Number of steps taken per generation',\n }\n ),\n\n maxStepsReachedCounter: meter.createCounter(\n 'ax_gen_max_steps_reached_total',\n {\n description: 'Total number of generations that hit max steps limit',\n }\n ),\n\n // Error correction metrics\n validationErrorsCounter: meter.createCounter(\n 'ax_gen_validation_errors_total',\n {\n description: 'Total number of validation errors encountered',\n }\n ),\n\n assertionErrorsCounter: meter.createCounter(\n 'ax_gen_assertion_errors_total',\n {\n description: 'Total number of assertion errors encountered',\n }\n ),\n\n errorCorrectionAttemptsHistogram: meter.createHistogram(\n 'ax_gen_error_correction_attempts',\n {\n description: 'Number of error correction attempts per generation',\n }\n ),\n\n errorCorrectionSuccessCounter: meter.createCounter(\n 'ax_gen_error_correction_success_total',\n {\n description: 'Total number of successful error corrections',\n }\n ),\n\n errorCorrectionFailureCounter: meter.createCounter(\n 'ax_gen_error_correction_failure_total',\n {\n description: 'Total number of failed error corrections',\n }\n ),\n\n maxRetriesReachedCounter: meter.createCounter(\n 'ax_gen_max_retries_reached_total',\n {\n description: 'Total number of generations that hit max retries limit',\n }\n ),\n\n // Function calling metrics\n functionsEnabledGenerationsCounter: meter.createCounter(\n 'ax_gen_functions_enabled_generations_total',\n {\n description: 'Total number of generations with functions enabled',\n }\n ),\n\n functionCallStepsCounter: meter.createCounter(\n 'ax_gen_function_call_steps_total',\n {\n description: 'Total number of steps that included function calls',\n }\n ),\n\n functionsExecutedPerGenerationHistogram: meter.createHistogram(\n 'ax_gen_functions_executed_per_generation',\n {\n description: 'Number of unique functions executed per generation',\n }\n ),\n\n functionErrorCorrectionCounter: meter.createCounter(\n 'ax_gen_function_error_correction_total',\n {\n description: 'Total number of function-related error corrections',\n }\n ),\n\n // Field processing metrics\n fieldProcessorsExecutedCounter: meter.createCounter(\n 'ax_gen_field_processors_executed_total',\n {\n description: 'Total number of field processors executed',\n }\n ),\n\n streamingFieldProcessorsExecutedCounter: meter.createCounter(\n 'ax_gen_streaming_field_processors_executed_total',\n {\n description: 'Total number of streaming field processors executed',\n }\n ),\n\n // Streaming specific metrics\n streamingGenerationsCounter: meter.createCounter(\n 'ax_gen_streaming_generations_total',\n {\n description: 'Total number of streaming generations',\n }\n ),\n\n streamingDeltasEmittedCounter: meter.createCounter(\n 'ax_gen_streaming_deltas_emitted_total',\n {\n description: 'Total number of streaming deltas emitted',\n }\n ),\n\n streamingFinalizationLatencyHistogram: meter.createHistogram(\n 'ax_gen_streaming_finalization_duration_ms',\n {\n description: 'Duration of streaming response finalization',\n unit: 'ms',\n }\n ),\n\n // Memory and samples metrics\n samplesGeneratedHistogram: meter.createHistogram(\n 'ax_gen_samples_generated',\n {\n description: 'Number of samples generated per request',\n }\n ),\n\n resultPickerUsageCounter: meter.createCounter(\n 'ax_gen_result_picker_usage_total',\n {\n description: 'Total number of times result picker was used',\n }\n ),\n\n resultPickerLatencyHistogram: meter.createHistogram(\n 'ax_gen_result_picker_duration_ms',\n {\n description: 'Duration of result picker execution',\n unit: 'ms',\n }\n ),\n\n // Signature complexity metrics\n inputFieldsGauge: meter.createGauge('ax_gen_input_fields', {\n description: 'Number of input fields in signature',\n }),\n\n outputFieldsGauge: meter.createGauge('ax_gen_output_fields', {\n description: 'Number of output fields in signature',\n }),\n\n examplesUsedGauge: meter.createGauge('ax_gen_examples_used', {\n description: 'Number of examples used in generation',\n }),\n\n demosUsedGauge: meter.createGauge('ax_gen_demos_used', {\n description: 'Number of demos used in generation',\n }),\n\n // Performance metrics\n promptRenderLatencyHistogram: meter.createHistogram(\n 'ax_gen_prompt_render_duration_ms',\n {\n description: 'Duration of prompt template rendering',\n unit: 'ms',\n }\n ),\n\n extractionLatencyHistogram: meter.createHistogram(\n 'ax_gen_extraction_duration_ms',\n {\n description: 'Duration of value extraction from responses',\n unit: 'ms',\n }\n ),\n\n assertionLatencyHistogram: meter.createHistogram(\n 'ax_gen_assertion_duration_ms',\n {\n description: 'Duration of assertion checking',\n unit: 'ms',\n }\n ),\n\n // State management\n stateCreationLatencyHistogram: meter.createHistogram(\n 'ax_gen_state_creation_duration_ms',\n {\n description: 'Duration of state creation for multiple samples',\n unit: 'ms',\n }\n ),\n\n memoryUpdateLatencyHistogram: meter.createHistogram(\n 'ax_gen_memory_update_duration_ms',\n {\n description: 'Duration of memory updates during generation',\n unit: 'ms',\n }\n ),\n };\n};\n\n// Global metrics configuration\nlet currentMetricsConfig: AxMetricsConfig = axDefaultMetricsConfig;\n\n// Function to update metrics configuration\nexport const axUpdateMetricsConfig = (\n config: Readonly<Partial<AxMetricsConfig>>\n): void => {\n currentMetricsConfig = { ...currentMetricsConfig, ...config };\n};\n\n// Function to get current metrics configuration\nexport const axGetMetricsConfig = (): AxMetricsConfig => {\n return { ...currentMetricsConfig };\n};\n\n// Utility function to sanitize metric labels\nconst sanitizeLabels = (\n labels: Record<string, unknown>\n): Record<string, string> => {\n const sanitized: Record<string, string> = {};\n for (const [key, value] of Object.entries(labels)) {\n if (value !== undefined && value !== null) {\n const stringValue = String(value);\n // Limit label length based on configuration\n const maxLength = currentMetricsConfig.maxLabelLength;\n sanitized[key] =\n stringValue.length > maxLength\n ? stringValue.substring(0, maxLength)\n : stringValue;\n }\n }\n return sanitized;\n};\n\n// Recording functions for generation flow metrics\nexport const recordGenerationMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n duration: number,\n success: boolean,\n signatureName?: string,\n aiService?: string,\n model?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n success: success.toString(),\n ...(signatureName ? { signature: signatureName } : {}),\n ...(aiService ? { ai_service: aiService } : {}),\n ...(model ? { model } : {}),\n });\n\n if (instruments.generationLatencyHistogram) {\n instruments.generationLatencyHistogram.record(duration, labels);\n }\n\n if (instruments.generationRequestsCounter) {\n instruments.generationRequestsCounter.add(1, labels);\n }\n\n if (!success && instruments.generationErrorsCounter) {\n instruments.generationErrorsCounter.add(1, labels);\n }\n } catch (error) {\n // Log error but don't propagate to avoid breaking the main flow\n console.warn('Failed to record generation metric:', error);\n }\n};\n\n// Recording functions for multi-step metrics\nexport const recordMultiStepMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n stepsUsed: number,\n maxSteps: number,\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n if (stepsUsed > 1 && instruments.multiStepGenerationsCounter) {\n instruments.multiStepGenerationsCounter.add(1, labels);\n }\n\n if (instruments.stepsPerGenerationHistogram) {\n instruments.stepsPerGenerationHistogram.record(stepsUsed, labels);\n }\n\n if (stepsUsed >= maxSteps && instruments.maxStepsReachedCounter) {\n instruments.maxStepsReachedCounter.add(1, labels);\n }\n } catch (error) {\n console.warn('Failed to record multi-step metric:', error);\n }\n};\n\n// Recording functions for error correction metrics\nexport const recordValidationErrorMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n errorType: 'validation' | 'assertion',\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n error_type: errorType,\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n if (errorType === 'validation' && instruments.validationErrorsCounter) {\n instruments.validationErrorsCounter.add(1, labels);\n }\n\n if (errorType === 'assertion' && instruments.assertionErrorsCounter) {\n instruments.assertionErrorsCounter.add(1, labels);\n }\n } catch (error) {\n console.warn('Failed to record validation error metric:', error);\n }\n};\n\nexport const recordErrorCorrectionMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n attempts: number,\n success: boolean,\n maxRetries: number,\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n success: success.toString(),\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n if (instruments.errorCorrectionAttemptsHistogram) {\n instruments.errorCorrectionAttemptsHistogram.record(attempts, labels);\n }\n\n if (success && instruments.errorCorrectionSuccessCounter) {\n instruments.errorCorrectionSuccessCounter.add(1, labels);\n }\n\n if (!success) {\n if (instruments.errorCorrectionFailureCounter) {\n instruments.errorCorrectionFailureCounter.add(1, labels);\n }\n if (attempts >= maxRetries && instruments.maxRetriesReachedCounter) {\n instruments.maxRetriesReachedCounter.add(1, labels);\n }\n }\n } catch (error) {\n console.warn('Failed to record error correction metric:', error);\n }\n};\n\n// Recording functions for function calling metrics\nexport const recordFunctionCallingMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n functionsEnabled: boolean,\n functionsExecuted: number,\n hadFunctionCalls: boolean,\n functionErrorCorrection = false,\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n functions_enabled: functionsEnabled.toString(),\n had_function_calls: hadFunctionCalls.toString(),\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n if (functionsEnabled && instruments.functionsEnabledGenerationsCounter) {\n instruments.functionsEnabledGenerationsCounter.add(1, labels);\n }\n\n if (hadFunctionCalls && instruments.functionCallStepsCounter) {\n instruments.functionCallStepsCounter.add(1, labels);\n }\n\n if (\n functionsExecuted > 0 &&\n instruments.functionsExecutedPerGenerationHistogram\n ) {\n instruments.functionsExecutedPerGenerationHistogram.record(\n functionsExecuted,\n labels\n );\n }\n\n if (functionErrorCorrection && instruments.functionErrorCorrectionCounter) {\n instruments.functionErrorCorrectionCounter.add(1, labels);\n }\n } catch (error) {\n console.warn('Failed to record function calling metric:', error);\n }\n};\n\n// Recording functions for field processing metrics\nexport const recordFieldProcessingMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n fieldProcessorsExecuted: number,\n streamingFieldProcessorsExecuted: number,\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n if (\n fieldProcessorsExecuted > 0 &&\n instruments.fieldProcessorsExecutedCounter\n ) {\n instruments.fieldProcessorsExecutedCounter.add(\n fieldProcessorsExecuted,\n labels\n );\n }\n\n if (\n streamingFieldProcessorsExecuted > 0 &&\n instruments.streamingFieldProcessorsExecutedCounter\n ) {\n instruments.streamingFieldProcessorsExecutedCounter.add(\n streamingFieldProcessorsExecuted,\n labels\n );\n }\n } catch (error) {\n console.warn('Failed to record field processing metric:', error);\n }\n};\n\n// Recording functions for streaming metrics\nexport const recordStreamingMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n isStreaming: boolean,\n deltasEmitted: number,\n finalizationDuration?: number,\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n is_streaming: isStreaming.toString(),\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n if (isStreaming && instruments.streamingGenerationsCounter) {\n instruments.streamingGenerationsCounter.add(1, labels);\n }\n\n if (deltasEmitted > 0 && instruments.streamingDeltasEmittedCounter) {\n instruments.streamingDeltasEmittedCounter.add(deltasEmitted, labels);\n }\n\n if (\n finalizationDuration &&\n instruments.streamingFinalizationLatencyHistogram\n ) {\n instruments.streamingFinalizationLatencyHistogram.record(\n finalizationDuration,\n labels\n );\n }\n } catch (error) {\n console.warn('Failed to record streaming metric:', error);\n }\n};\n\n// Recording functions for samples metrics\nexport const recordSamplesMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n samplesCount: number,\n resultPickerUsed: boolean,\n resultPickerLatency?: number,\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n result_picker_used: resultPickerUsed.toString(),\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n if (instruments.samplesGeneratedHistogram) {\n instruments.samplesGeneratedHistogram.record(samplesCount, labels);\n }\n\n if (resultPickerUsed && instruments.resultPickerUsageCounter) {\n instruments.resultPickerUsageCounter.add(1, labels);\n }\n\n if (resultPickerLatency && instruments.resultPickerLatencyHistogram) {\n instruments.resultPickerLatencyHistogram.record(\n resultPickerLatency,\n labels\n );\n }\n } catch (error) {\n console.warn('Failed to record samples metric:', error);\n }\n};\n\n// Recording functions for signature complexity metrics\nexport const recordSignatureComplexityMetrics = (\n instruments: Readonly<AxGenMetricsInstruments>,\n inputFields: number,\n outputFields: number,\n examplesCount: number,\n demosCount: number,\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n if (instruments.inputFieldsGauge) {\n instruments.inputFieldsGauge.record(inputFields, labels);\n }\n\n if (instruments.outputFieldsGauge) {\n instruments.outputFieldsGauge.record(outputFields, labels);\n }\n\n if (instruments.examplesUsedGauge) {\n instruments.examplesUsedGauge.record(examplesCount, labels);\n }\n\n if (instruments.demosUsedGauge) {\n instruments.demosUsedGauge.record(demosCount, labels);\n }\n } catch (error) {\n console.warn('Failed to record signature complexity metrics:', error);\n }\n};\n\n// Recording functions for performance metrics\nexport const recordPerformanceMetric = (\n instruments: Readonly<AxGenMetricsInstruments>,\n metricType:\n | 'prompt_render'\n | 'extraction'\n | 'assertion'\n | 'state_creation'\n | 'memory_update',\n duration: number,\n signatureName?: string\n): void => {\n try {\n const labels = sanitizeLabels({\n metric_type: metricType,\n ...(signatureName ? { signature: signatureName } : {}),\n });\n\n switch (metricType) {\n case 'prompt_render':\n if (instruments.promptRenderLatencyHistogram) {\n instruments.promptRenderLatencyHistogram.record(duration, labels);\n }\n break;\n case 'extraction':\n if (instruments.extractionLatencyHistogram) {\n instruments.extractionLatencyHistogram.record(duration, labels);\n }\n break;\n case 'assertion':\n if (instruments.assertionLatencyHistogram) {\n instruments.assertionLatencyHistogram.record(duration, labels);\n }\n break;\n case 'state_creation':\n if (instruments.stateCreationLatencyHistogram) {\n instruments.stateCreationLatencyHistogram.record(duration, labels);\n }\n break;\n case 'memory_update':\n if (instruments.memoryUpdateLatencyHistogram) {\n instruments.memoryUpdateLatencyHistogram.record(duration, labels);\n }\n break;\n }\n } catch (error) {\n console.warn('Failed to record performance metric:', error);\n }\n};\n","/* eslint-disable functional/prefer-immutable-types */\nimport { ColorLog } from '../util/log.js';\n\nimport type { AxExample, AxOptimizationStats } from './optimizer.js';\nimport type { AxGenDeltaOut, AxProgramUsage } from './program.js';\nimport type { AxField } from './sig.js';\nimport type { AxFieldValue, AxGenOut } from './types.js';\n\nconst colorLog = new ColorLog();\n\nexport const updateProgressBar = (\n current: number,\n total: number,\n success: number,\n _elapsedTime: number, // in seconds\n msg: string,\n progressBarWidth = 20 // Default width of the progress bar\n): void => {\n const percentage = ((current / total) * 100).toFixed(1);\n const filledBarLength = Math.round((progressBarWidth * current) / total);\n const emptyBarLength = progressBarWidth - filledBarLength;\n const filledBar = colorLog.blueBright('█'.repeat(filledBarLength));\n const emptyBar = ' '.repeat(emptyBarLength);\n const successRate = total > 0 ? ((success / total) * 100).toFixed(1) : '0.0';\n\n // More user-friendly message\n const friendlyMsg = msg.includes('Running MIPROv2 optimization')\n ? 'Testing prompt variations'\n : msg.includes('Tuning Prompt')\n ? 'Generating training examples'\n : msg;\n\n // Use newline instead of carriage return to avoid overwriting structured logs\n process.stdout.write(\n `│ ${friendlyMsg}: ${current}/${total} (${colorLog.yellow(percentage)}%) |${filledBar}${emptyBar}| Success rate: ${colorLog.greenBright(successRate)}%\\n`\n );\n};\n\nexport const validateValue = (\n field: Readonly<AxField>,\n value: Readonly<AxFieldValue>\n): void => {\n const ft = field.type ?? { name: 'string', isArray: false };\n\n const validateSingleValue = (\n expectedType: string,\n val: Readonly<AxFieldValue>\n ): boolean => {\n switch (expectedType) {\n case 'class':\n return typeof val === 'string';\n case 'code':\n return typeof val === 'string';\n case 'string':\n return typeof val === 'string';\n case 'number':\n return typeof val === 'number';\n case 'boolean':\n return typeof val === 'boolean';\n case 'date':\n return val instanceof Date || typeof val === 'string';\n case 'datetime':\n return val instanceof Date || typeof val === 'string';\n case 'json':\n return typeof val === 'object' || typeof val === 'string';\n default:\n return false; // Unknown or unsupported type\n }\n };\n\n const validImage = (val: Readonly<AxFieldValue>): boolean => {\n if (\n !val ||\n typeof val !== 'object' ||\n !('mimeType' in val) ||\n !('data' in val)\n ) {\n return false;\n }\n return true;\n };\n\n if (field.type?.name === 'image') {\n let msg: string | undefined;\n if (Array.isArray(value)) {\n for (const item of value) {\n if (!validImage(item)) {\n msg = 'object ({ mimeType: string; data: string })';\n break;\n }\n }\n } else if (!validImage(value)) {\n msg = 'object ({ mimeType: string; data: string })';\n }\n\n if (msg) {\n throw new Error(\n `Validation failed: Expected '${field.name}' to be type '${msg}' instead got '${value}'`\n );\n }\n return;\n }\n\n const validAudio = (val: Readonly<AxFieldValue>): boolean => {\n if (!val || typeof val !== 'object' || !('data' in val)) {\n return false;\n }\n return true;\n };\n\n if (field.type?.name === 'audio') {\n let msg: string | undefined;\n if (Array.isArray(value)) {\n for (const item of value) {\n if (!validAudio(item)) {\n msg = 'object ({ data: string; format?: string })';\n break;\n }\n }\n } else if (!validAudio(value)) {\n msg = 'object ({ data: string; format?: string })';\n }\n\n if (msg) {\n throw new Error(\n `Validation failed: Expected '${field.name}' to be type '${msg}' instead got '${value}'`\n );\n }\n return;\n }\n\n let isValid = true;\n\n if (ft.isArray) {\n if (!Array.isArray(value)) {\n isValid = false;\n } else {\n for (const item of value) {\n if (!validateSingleValue(ft.name, item)) {\n isValid = false;\n break;\n }\n }\n }\n } else {\n isValid = validateSingleValue(ft.name, value);\n }\n\n if (!isValid) {\n const gotType = Array.isArray(value) ? 'array' : typeof value;\n throw new Error(\n `Validation failed: Expected '${field.name}' to be a ${field.type?.isArray ? 'an array of ' : ''}${ft.name} instead got '${gotType}' (${JSON.stringify(value)})`\n );\n }\n};\n\nexport function mergeProgramUsage(\n usages: readonly AxProgramUsage[]\n): AxProgramUsage[] {\n const usageMap: { [key: string]: AxProgramUsage } = {};\n\n for (const usage of usages) {\n const key = `${usage.ai}:${usage.model}`;\n\n if (!usageMap[key]) {\n usageMap[key] = { ...usage };\n continue;\n }\n\n const currentUsage = usageMap[key];\n if (currentUsage) {\n const tokens = currentUsage.tokens ?? {\n promptTokens: 0,\n completionTokens: 0,\n totalTokens: 0,\n };\n tokens.promptTokens += usage?.tokens?.promptTokens ?? 0;\n tokens.completionTokens += usage?.tokens?.completionTokens ?? 0;\n tokens.totalTokens += usage?.tokens?.totalTokens ?? 0;\n currentUsage.tokens = tokens;\n }\n }\n\n return Object.values(usageMap);\n}\n\n/**\n * Parses a markdown list from a string. This is a very forgiving parser that\n * will try to handle anything that looks vaguely like a markdown list.\n */\nexport const parseMarkdownList = (input: string): string[] => {\n // Handle empty input\n if (!input.trim()) {\n return [];\n }\n\n const listBullets = new Set(['-', '*', '+']);\n const numberedListRegex = /^\\d+[\\s]*[.)\\]]\\s*/;\n\n const lines = input.split('\\n');\n const list = [];\n\n for (const line of lines) {\n const trimmedLine = line.trim();\n // Skip empty lines\n if (!trimmedLine) {\n continue;\n }\n\n // Check for bullet points\n if (trimmedLine[0] && listBullets.has(trimmedLine[0])) {\n list.push(trimmedLine.slice(1).trim());\n }\n // Check for numbered lists (e.g., \"1.\", \"2.\", etc.)\n else if (numberedListRegex.test(trimmedLine)) {\n list.push(trimmedLine.replace(numberedListRegex, '').trim());\n }\n // If it's not a list item and we haven't collected any items yet, do nothing\n else if (list.length === 0) {\n // Skip non-list lines at the beginning\n }\n // If we've already started collecting list items, then this non-list line\n //is an error\n else {\n throw new Error('Could not parse markdown list: mixed content detected');\n }\n }\n\n // If we didn't find any list items, throw error\n if (list.length === 0) {\n throw new Error('Could not parse markdown list: no valid list items found');\n }\n\n return list;\n};\n\nexport function mergeDeltas<OUT extends AxGenOut>(\n base: AxGenDeltaOut<OUT>[],\n currentDelta: AxGenDeltaOut<OUT>\n) {\n type ValueTypeOfAxGenOut = AxGenOut[keyof AxGenOut];\n\n const { index, delta, version } = currentDelta;\n\n // Cast once for mutation – safe because we'll only assign validated keys\n const target = base.find((b) => b.index === index)?.delta as Record<\n string,\n ValueTypeOfAxGenOut\n >;\n\n if (!target) {\n base.push({ index, delta, version });\n return base;\n }\n\n for (const key of Object.keys(delta)) {\n const baseValue = target[key];\n const deltaValue = (delta as Record<string, unknown>)[key];\n\n if (baseValue === undefined && Array.isArray(deltaValue)) {\n target[key] = [...deltaValue];\n } else if (Array.isArray(baseValue) && Array.isArray(deltaValue)) {\n // Concatenate arrays\n target[key] = [...(baseValue as unknown[]), ...deltaValue];\n } else if (\n (baseValue === undefined || typeof baseValue === 'string') &&\n typeof deltaValue === 'string'\n ) {\n // Concatenate strings\n target[key] = `${baseValue ?? ''}${deltaValue}`;\n } else {\n // For all other types, overwrite with the new value\n target[key] = deltaValue as ValueTypeOfAxGenOut;\n }\n }\n return base;\n}\n\nexport class LRUCache<K, V> {\n private cache = new Map<K, V>();\n private readonly maxSize: number;\n\n constructor(maxSize: number) {\n this.maxSize = maxSize;\n }\n\n get(key: K): V | undefined {\n const value = this.cache.get(key);\n if (value) {\n // Refresh position by deleting and re-adding\n this.cache.delete(key);\n this.cache.set(key, value);\n }\n return value;\n }\n\n set(key: K, value: V): void {\n if (this.cache.has(key)) {\n this.cache.delete(key);\n } else if (this.cache.size >= this.maxSize) {\n // Remove oldest entry (first item in map)\n const firstKey = this.cache.keys().next().value;\n if (firstKey) {\n this.cache.delete(firstKey);\n }\n }\n this.cache.set(key, value);\n }\n}\n\nconst globalPrefixCache = new LRUCache<string, string[]>(500);\n\n/**\n * Checks if a streaming string matches a prefix, either fully or partially from the end.\n * For streaming content, partial matches are checked from shortest to longest since\n * the content grows at the end and we want to detect partial prefixes as they form.\n * @param content The string to check (potentially streaming)\n * @param prefix The prefix to look for\n * @param startIndex Optional starting index for the search\n * @returns\n * - index >= 0: Position of full match\n * - -1: No match found\n * - -2: Partial match from the end\n * - -3: String is only whitespace\n */\nexport function matchesContent(\n content: string,\n prefix: string,\n startIndex = 0,\n prefixCache: LRUCache<string, string[]> = globalPrefixCache\n): number {\n // Check if string starts with a markdown block with optional language\n if (/^```[a-zA-Z]*\\s*$/.test(content)) {\n return -4;\n }\n\n // Check if string is only whitespace\n if (/^[\\s`]*$/.test(content)) {\n return -3;\n }\n\n // First check if the complete prefix exists anywhere after startIndex\n const exactMatchIndex = content.indexOf(prefix, startIndex);\n\n if (exactMatchIndex !== -1) {\n return exactMatchIndex;\n }\n\n // Get or create cached prefixes\n const prefixes =\n prefixCache.get(prefix) ??\n Array.from({ length: prefix.length }, (_, i) => prefix.slice(0, i + 1));\n\n // Set in cache if it wasn't there\n if (!prefixCache.get(prefix)) {\n prefixCache.set(prefix, prefixes);\n }\n\n // Check for partial matches at the end (for streaming content)\n // We want to find the longest partial prefix that the content ends with\n let longestPartialMatch = -1;\n\n // Start from the longest prefix and work backwards to find the longest match\n for (let i = prefixes.length - 1; i >= 0; i--) {\n const partialPrefix = prefixes[i] as string;\n\n // Check if content ends with this partial prefix\n if (content.endsWith(partialPrefix)) {\n longestPartialMatch = i;\n break; // Found the longest match, no need to continue\n }\n }\n\n // Return -2 for partial match, -1 for no match\n return longestPartialMatch >= 0 ? -2 : -1;\n}\n\nexport const formatTime = (ms: number): string => {\n const seconds = Math.floor(ms / 1000);\n if (seconds < 60) return `${seconds}s`;\n\n const minutes = Math.floor(seconds / 60);\n const remainingSeconds = seconds % 60;\n if (minutes < 60) return `${minutes}m ${remainingSeconds}s`;\n\n const hours = Math.floor(minutes / 60);\n const remainingMinutes = minutes % 60;\n return `${hours}h ${remainingMinutes}m ${remainingSeconds}s`;\n};\n\nexport const calculateETA = (\n current: number,\n total: number,\n elapsedMs: number\n): string => {\n if (current === 0) return 'calculating...';\n\n const msPerItem = elapsedMs / current;\n const remainingItems = total - current;\n const etaMs = msPerItem * remainingItems;\n\n return formatTime(etaMs);\n};\n\ninterface ProgressConfigInfo {\n maxRounds: number;\n batchSize: number;\n earlyStoppingPatience: number;\n costMonitoring: boolean;\n verboseMode: boolean;\n debugMode: boolean;\n}\n\nexport const updateDetailedProgress = <T extends AxGenOut = AxGenOut>(\n roundIndex: number,\n current: number,\n total: number,\n elapsedTime: number,\n example: Readonly<AxExample>,\n stats: Readonly<AxOptimizationStats>,\n configInfo: Readonly<ProgressConfigInfo>,\n result?: T,\n error?: Error\n): void => {\n // Clear line and create a formatted output\n process.stdout.write('\\r\\x1b[K');\n\n const percentage = ((current / total) * 100).toFixed(1);\n const formattedTime = formatTime(elapsedTime);\n const eta = calculateETA(current, total, elapsedTime);\n\n // Basic progress info (always shown) - more user-friendly\n let output = `Training round ${roundIndex + 1}/${configInfo.maxRounds}: ${current}/${total} (${percentage}%) [${formattedTime}, ETA: ${eta}]`;\n\n // Add success stats in a cleaner format\n const successRate =\n stats.totalCalls > 0 ? (stats.successfulDemos / stats.totalCalls) * 100 : 0;\n output += ` | Success rate: ${successRate.toFixed(1)}% (${stats.successfulDemos}/${stats.totalCalls})`;\n\n // Additional info for verbose mode\n if (configInfo.verboseMode || configInfo.debugMode) {\n if (configInfo.costMonitoring) {\n output += `\\n Tokens: ~${stats.estimatedTokenUsage.toLocaleString()} total`;\n }\n\n output += `\\n Batch: ${Math.floor(current / configInfo.batchSize) + 1}/${Math.ceil(total / configInfo.batchSize)}`;\n\n if (configInfo.earlyStoppingPatience > 0 && stats.earlyStopping) {\n output += `\\n Best round: ${stats.earlyStopping.bestScoreRound + 1}, Patience: ${configInfo.earlyStoppingPatience}`;\n }\n }\n\n // Debug mode gets even more info\n if (configInfo.debugMode) {\n // Truncate example keys for display\n const exampleKeys = Object.keys(example)\n .map((k) => {\n const valueStr = JSON.stringify(example[k]);\n const truncated =\n valueStr.length > 30 ? `${valueStr.substring(0, 30)}...` : valueStr;\n return `${k}: ${truncated}`;\n })\n .join(', ');\n\n output += `\\n Example: {${exampleKeys}}`;\n\n if (error) {\n output += `\\n ERROR: ${error.message}`;\n } else if (result) {\n // Truncate result for display\n const resultStr = JSON.stringify(result);\n const truncatedResult =\n resultStr.length > 50 ? `${resultStr.substring(0, 50)}...` : resultStr;\n output += `\\n Result: ${truncatedResult}`;\n }\n\n // Add temperature info\n output += `\\n Temperature: ${(0.7 + 0.001 * current).toFixed(3)}`;\n }\n\n console.log(output);\n};\n","/* eslint-disable @typescript-eslint/naming-convention */\n\nimport { parseLLMFriendlyDate, parseLLMFriendlyDateTime } from './datetime.js';\nimport { ValidationError } from './errors.js';\nimport type { GenDeltaOut } from './program.js';\nimport type { AxField, AxSignature } from './sig.js';\nimport type { AxGenOut } from './types.js';\nimport { matchesContent, parseMarkdownList } from './util.js';\n\nexport const extractValues = (\n sig: Readonly<AxSignature>,\n values: Record<string, unknown>,\n content: string,\n strictMode = false\n) => {\n const xstate = { extractedFields: [], streamedIndex: {}, s: -1 };\n streamingExtractValues(sig, values, xstate, content, { strictMode });\n streamingExtractFinalValue(sig, values, xstate, content);\n\n // Filter out internal fields\n for (const field of sig.getOutputFields()) {\n if (field.isInternal) {\n delete values[field.name];\n }\n }\n};\n\nexport interface extractionState {\n prevFields?: { field: AxField; s: number; e: number }[];\n currField?: AxField;\n currFieldIndex?: number;\n inAssumedField?: boolean;\n extractedFields: AxField[];\n streamedIndex: Record<string, number>;\n s: number;\n inBlock?: boolean;\n}\n\n// Helper function to check for missing required fields\nconst checkMissingRequiredFields = (\n _xstate: Readonly<extractionState>,\n values: Record<string, unknown>,\n outputFields: Readonly<AxField[]>\n) => {\n const missingFields: AxField[] = [];\n\n for (const field of outputFields) {\n if (field && !field.isOptional && values[field.name] === undefined) {\n missingFields.push(field);\n }\n }\n\n if (missingFields.length > 0) {\n throw new ValidationError({\n message: `Required ${missingFields.length === 1 ? 'field' : 'fields'} not found`,\n fields: missingFields,\n });\n }\n};\n\nexport interface StreamingExtractValuesOptions {\n strictMode?: boolean;\n skipEarlyFail?: boolean;\n}\n\nexport const streamingExtractValues = (\n sig: Readonly<AxSignature>,\n values: Record<string, unknown>,\n // eslint-disable-next-line functional/prefer-immutable-types\n xstate: extractionState,\n content: string,\n { strictMode, skipEarlyFail }: StreamingExtractValuesOptions = {}\n) => {\n const fields = sig.getOutputFields();\n let expectedField: AxField | undefined;\n\n for (const [index, field] of fields.entries()) {\n // If the field is the current field and it's not assumed, skip it\n if (index === xstate.currFieldIndex && !xstate.inAssumedField) {\n continue;\n }\n\n // If field is already in values and it's not the current field and it's not assumed, skip it\n if (\n field.name in values &&\n !(index === xstate.currFieldIndex && xstate.inAssumedField)\n ) {\n continue;\n }\n\n const isFirst = xstate.extractedFields.length === 0;\n const prefix = `${(isFirst ? '' : '\\n') + field.title}:`;\n\n let e = matchesContent(content, prefix, xstate.s);\n let prefixLen = prefix.length;\n\n switch (e) {\n case -1:\n if (skipEarlyFail) {\n continue;\n }\n\n // If there is only one field then we assume the content is streaming to the first field\n // Note: optimization for single field responses\n if (\n !strictMode &&\n fields.length === 1 &&\n xstate.currField === undefined\n ) {\n xstate.inAssumedField = true;\n expectedField = field;\n prefixLen = 0;\n e = 0;\n break;\n }\n\n // if multiple fields, we need to validate the field name of the first required field\n if (xstate.currField === undefined && !field.isOptional) {\n throw new ValidationError({\n message: 'Expected (Required) field not found',\n fields: [field],\n });\n }\n\n expectedField = field.isOptional ? undefined : field;\n continue; // Field is not found, continue to the next field\n case -2:\n return true; // Partial match at end, skip and gather more content\n case -3:\n return true; // String is only whitespace, skip and gather more content\n case -4:\n xstate.inBlock = true;\n return true; // String is only backticks, skip and gather more content\n }\n // We found a field!!!\n\n // If the field we found is not the expected field, throw an error\n if (expectedField && expectedField.name !== field.name) {\n throw new ValidationError({\n message: 'Expected (Required) field not found',\n fields: [expectedField],\n });\n }\n\n if (xstate.currField !== undefined && xstate.inAssumedField) {\n xstate.inAssumedField = false;\n xstate.streamedIndex[xstate.currField.name] = 0;\n xstate.currField = undefined;\n }\n\n // Lets wrap up the last field which is still the current field\n if (xstate.currField) {\n const val = content.substring(xstate.s, e).trim();\n const parsedValue = validateAndParseFieldValue(xstate.currField, val);\n if (parsedValue !== undefined) {\n values[xstate.currField.name] = parsedValue;\n }\n if (xstate.prevFields) {\n xstate.prevFields?.push({ field: xstate.currField, s: xstate.s, e });\n } else {\n xstate.prevFields = [{ field: xstate.currField, s: xstate.s, e }];\n }\n }\n\n // Lets update the state for the new current field\n\n xstate.s = e + prefixLen;\n xstate.currField = field;\n xstate.currFieldIndex = index;\n\n if (!xstate.extractedFields.includes(field)) {\n xstate.extractedFields.push(field);\n }\n\n if (xstate.streamedIndex[field.name] === undefined) {\n xstate.streamedIndex[field.name] = 0;\n }\n }\n};\n\nexport const streamingExtractFinalValue = (\n sig: Readonly<AxSignature>,\n values: Record<string, unknown>,\n // eslint-disable-next-line functional/prefer-immutable-types\n xstate: extractionState,\n content: string\n) => {\n if (xstate.currField) {\n const val = content.substring(xstate.s).trim();\n\n const parsedValue = validateAndParseFieldValue(xstate.currField, val);\n if (parsedValue !== undefined) {\n values[xstate.currField.name] = parsedValue;\n }\n }\n // Check all previous required fields before processing current field\n checkMissingRequiredFields(xstate, values, sig.getOutputFields());\n};\n\nconst convertValueToType = (\n field: Readonly<AxField>,\n val: string,\n required = false\n) => {\n switch (field.type?.name) {\n case 'code':\n return extractBlock(val);\n\n case 'string':\n return val;\n\n case 'number': {\n const v = Number(val);\n if (Number.isNaN(v)) {\n if (field.isOptional && !required) {\n return;\n }\n throw new Error('Invalid number');\n }\n return v;\n }\n\n case 'boolean': {\n if (typeof val === 'boolean') {\n return val;\n }\n const v = val.toLowerCase();\n if (v === 'true') {\n return true;\n }\n if (v === 'false') {\n return false;\n }\n if (field.isOptional && !required) {\n return;\n }\n throw new Error('Invalid boolean');\n }\n case 'date':\n return parseLLMFriendlyDate(field, val, required);\n\n case 'datetime':\n return parseLLMFriendlyDateTime(field, val, required);\n\n case 'class': {\n const className = val;\n if (field.type.options && !field.type.options.includes(className)) {\n if (field.isOptional) {\n return;\n }\n throw new Error(\n `Invalid class '${val}', expected one of the following: ${field.type.options.join(', ')}`\n );\n }\n return className as string;\n }\n\n default:\n return val as string; // Unknown type\n }\n};\n\nexport function* yieldDelta<OUT extends AxGenOut>(\n content: string,\n field: Readonly<AxField>,\n s: number,\n e: number,\n // eslint-disable-next-line functional/prefer-immutable-types\n xstate: extractionState,\n index: number\n): GenDeltaOut<OUT> {\n const { name: fieldName, isInternal } = field;\n const { isArray: fieldIsArray, name: fieldTypeName } = field.type ?? {};\n\n if (\n isInternal ||\n fieldIsArray ||\n (fieldTypeName && fieldTypeName !== 'string' && fieldTypeName !== 'code')\n ) {\n return;\n }\n\n const pos = xstate.streamedIndex[fieldName] ?? 0;\n const isFirstChunk = pos === 0;\n\n const d1 = content.substring(s + pos, e);\n if (d1.length === 0) {\n return;\n }\n\n // Remove trailing whitespace, tabs, and newlines\n let d2 = d1.replace(/\\s+$/, '');\n\n // If this field is a \"code\" type, remove trailing backticks\n if (xstate.currField?.type?.name === 'code') {\n d2 = d2.replace(/\\s*```\\s*$/, '');\n }\n\n // Only trim start for the first chunk\n let d3 = isFirstChunk ? d2.trimStart() : d2;\n\n if (xstate.currField?.type?.name === 'code') {\n // Remove any leading triple-backtick fences (with optional language specifier)\n d3 = d3.replace(/^[ ]*```[a-zA-Z0-9]*\\n\\s*/, '');\n }\n\n if (d3.length > 0) {\n yield { index, delta: { [fieldName]: d3 } as unknown as Partial<OUT> };\n xstate.streamedIndex[fieldName] = pos + d2.length;\n }\n}\n\nexport function* streamValues<OUT extends AxGenOut>(\n sig: Readonly<AxSignature>,\n content: string,\n values: Readonly<Record<string, OUT>>,\n // eslint-disable-next-line functional/prefer-immutable-types\n xstate: extractionState,\n index: number\n): GenDeltaOut<OUT> {\n for (const prevField of xstate.prevFields ?? []) {\n const { field, s, e } = prevField;\n yield* yieldDelta<OUT>(content, field, s, e, xstate, index);\n }\n xstate.prevFields = undefined;\n\n if (!xstate.currField || xstate.currField.isInternal) {\n return;\n }\n\n yield* yieldDelta<OUT>(\n content,\n xstate.currField,\n xstate.s,\n content.length,\n xstate,\n index\n );\n\n const outputFields = sig.getOutputFields();\n\n for (const key of Object.keys(values)) {\n const field = outputFields.find((f) => f.name === key);\n if (!field || field.isInternal) {\n continue;\n }\n\n const value = values[key];\n\n if (Array.isArray(value)) {\n const s = xstate.streamedIndex?.[key] ?? 0;\n const v = value.slice(s);\n if (v && v.length > 0) {\n yield { index, delta: { [key]: v } as unknown as Partial<OUT> };\n xstate.streamedIndex[key] = s + v.length;\n }\n continue;\n }\n\n if (!xstate.streamedIndex[key]) {\n yield { index, delta: { [key]: value } as unknown as Partial<OUT> };\n xstate.streamedIndex[key] = 1;\n }\n }\n}\n\nfunction validateAndParseFieldValue(\n field: Readonly<AxField>,\n fieldValue: string | undefined\n): unknown {\n if (\n !fieldValue ||\n fieldValue === '' ||\n /^(null|undefined)\\s*$/i.test(fieldValue)\n ) {\n if (field.isOptional) {\n return;\n }\n throw new ValidationError({\n message: 'Required field is missing',\n fields: [field],\n value: fieldValue,\n });\n }\n\n let value: unknown | undefined;\n\n if (field.type?.name === 'json') {\n try {\n const text = extractBlock(fieldValue);\n value = JSON.parse(text);\n return value;\n } catch (e) {\n throw new ValidationError({\n message: `Invalid JSON: ${(e as Error).message}`,\n fields: [field],\n value: fieldValue,\n });\n }\n }\n\n if (field.type?.isArray) {\n try {\n try {\n value = JSON.parse(fieldValue);\n } catch {\n // If JSON parsing fails, try markdown parsing\n value = parseMarkdownList(fieldValue);\n }\n if (!Array.isArray(value)) {\n throw new Error('Expected an array');\n }\n } catch (e) {\n throw new ValidationError({\n message: `Invalid Array: ${(e as Error).message}`,\n fields: [field],\n value: fieldValue,\n });\n }\n }\n\n try {\n if (Array.isArray(value)) {\n for (const [index, item] of value.entries()) {\n if (item !== undefined) {\n const v = typeof item === 'string' ? item.trim() : item;\n value[index] = convertValueToType(field, v, true);\n }\n }\n } else {\n value = convertValueToType(field, fieldValue);\n }\n } catch (e) {\n throw new ValidationError({\n message: (e as Error).message,\n fields: [field],\n value: fieldValue,\n });\n }\n\n if (typeof value === 'string' && value === '') {\n return undefined;\n }\n\n return value;\n}\n\nexport const extractBlock = (input: string): string => {\n const markdownBlockPattern = /```([A-Za-z]*)\\n([\\s\\S]*?)\\n```/g;\n const match = markdownBlockPattern.exec(input);\n if (!match) {\n return input;\n }\n if (match.length === 3) {\n return match[2] as string;\n }\n if (match.length === 2) {\n return match[1] as string;\n }\n return input;\n};\n","// ReadableStream is available globally in modern browsers and Node.js 16+\n\nimport type { AxChatResponse, AxModelUsage } from '../ai/types.js';\nimport { mergeFunctionCalls } from '../ai/util.js';\nimport type { AxAIMemory } from '../mem/types.js';\n\nimport {\n type AxAssertion,\n type AxStreamingAssertion,\n assertAssertions,\n assertStreamingAssertions,\n} from './asserts.js';\nimport {\n extractValues,\n streamingExtractFinalValue,\n streamingExtractValues,\n streamValues,\n} from './extract.js';\nimport {\n type AxFieldProcessor,\n processFieldProcessors,\n processStreamingFieldProcessors,\n} from './fieldProcessor.js';\nimport { parseFunctionCalls, processFunctions } from './functions.js';\nimport type { AxResponseHandlerArgs, InternalAxGenState } from './generate.js';\nimport type { AsyncGenDeltaOut, DeltaOut } from './program.js';\nimport type { AxSignature } from './sig.js';\nimport type { AxGenOut } from './types.js';\n\ntype ProcessStreamingResponseArgs = Readonly<\n AxResponseHandlerArgs<ReadableStream<AxChatResponse>>\n> & {\n states: InternalAxGenState[];\n usage: AxModelUsage[];\n asserts: AxAssertion[];\n streamingAsserts: AxStreamingAssertion[];\n fieldProcessors: AxFieldProcessor[];\n streamingFieldProcessors: AxFieldProcessor[];\n thoughtFieldName: string;\n signature: AxSignature;\n excludeContentFromTrace: boolean;\n functionResultFormatter?: (result: unknown) => string;\n};\n\nexport async function* processStreamingResponse<OUT extends AxGenOut>({\n res,\n usage,\n states,\n ...args\n}: ProcessStreamingResponseArgs): AsyncGenDeltaOut<OUT> {\n const skipEarlyFail =\n (args.ai.getFeatures().functionCot ?? false) &&\n args.functions !== undefined &&\n args.functions.length > 0;\n\n // Handle ReadableStream async iteration for browser compatibility\n const reader = res.getReader();\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n const v = value;\n if (v.modelUsage) {\n usage.push(v.modelUsage);\n }\n\n for (const result of v.results) {\n if (\n (!result.content || result.content === '') &&\n (!result.thought || result.thought === '') &&\n (!result.functionCalls || result.functionCalls.length === 0)\n ) {\n continue;\n }\n\n const state = states.find((s) => s.index === result.index);\n if (!state) {\n throw new Error(`No state found for result (index: ${result.index})`);\n }\n\n yield* ProcessStreamingResponse<OUT>({\n ...args,\n result,\n skipEarlyFail,\n state,\n });\n }\n }\n } finally {\n reader.releaseLock();\n }\n\n // Finalize the streams\n for (const state of states) {\n yield* finalizeStreamingResponse<OUT>({\n ...args,\n state,\n });\n }\n}\n\ntype ProcessStreamingResponseArgs2 = Readonly<\n Omit<\n ProcessStreamingResponseArgs,\n | 'res'\n | 'states'\n | 'usage'\n | 'excludeContentFromTrace'\n | 'ai'\n | 'model'\n | 'traceId'\n | 'functions'\n | 'span'\n | 'fieldProcessors'\n > & {\n result: AxChatResponse['results'][number];\n skipEarlyFail: boolean;\n state: InternalAxGenState;\n }\n>;\n\nasync function* ProcessStreamingResponse<OUT extends AxGenOut>({\n result,\n mem,\n sessionId,\n strictMode,\n skipEarlyFail,\n state,\n signature,\n streamingFieldProcessors,\n thoughtFieldName,\n streamingAsserts,\n asserts,\n}: ProcessStreamingResponseArgs2): AsyncGenDeltaOut<OUT> {\n if (result.functionCalls && result.functionCalls.length > 0) {\n mergeFunctionCalls(state.functionCalls, result.functionCalls);\n mem.updateResult(\n {\n name: result.name,\n content: result.content,\n functionCalls: state.functionCalls,\n delta: result.functionCalls?.[0]?.function?.params as string,\n index: result.index,\n },\n sessionId\n );\n } else if (result.content && result.content.length > 0) {\n if (result.thought && result.thought.length > 0) {\n yield {\n index: result.index,\n delta: { [thoughtFieldName]: result.thought } as Partial<OUT>,\n };\n }\n\n state.content += result.content;\n mem.updateResult(\n {\n name: result.name,\n content: state.content,\n delta: result.content,\n index: result.index,\n },\n sessionId\n );\n\n const skip = streamingExtractValues(\n signature,\n state.values,\n state.xstate,\n state.content,\n { strictMode, skipEarlyFail }\n );\n\n if (skip) {\n return;\n }\n\n if (streamingAsserts.length !== 0) {\n await assertStreamingAssertions(\n streamingAsserts,\n state.xstate,\n state.content\n );\n }\n\n if (streamingFieldProcessors.length !== 0) {\n await processStreamingFieldProcessors(\n streamingFieldProcessors,\n state.content,\n state.xstate,\n mem,\n state.values,\n sessionId\n );\n }\n\n yield* streamValues<OUT>(\n signature,\n state.content,\n state.values as Record<string, OUT>,\n state.xstate,\n result.index\n );\n\n await assertAssertions(asserts, state.values);\n } else if (result.thought && result.thought.length > 0) {\n state.values[thoughtFieldName] =\n (state.values[thoughtFieldName] ?? '') + result.thought;\n\n yield {\n index: result.index,\n delta: { [thoughtFieldName]: result.thought } as Partial<OUT>,\n };\n }\n\n if (result.finishReason === 'length') {\n throw new Error(\n `Max tokens reached before completion\\nContent: ${state.content}`\n );\n }\n}\n\ntype FinalizeStreamingResponseArgs = Readonly<\n Omit<ProcessStreamingResponseArgs, 'res' | 'states' | 'usage'> & {\n state: InternalAxGenState;\n }\n>;\n\nexport async function* finalizeStreamingResponse<OUT extends AxGenOut>({\n state,\n signature,\n ai,\n model,\n functions,\n mem,\n sessionId,\n traceId,\n span,\n excludeContentFromTrace,\n streamingAsserts,\n asserts,\n fieldProcessors,\n streamingFieldProcessors,\n functionResultFormatter,\n}: FinalizeStreamingResponseArgs) {\n const funcs = parseFunctionCalls(\n ai,\n state.functionCalls,\n state.values,\n model\n );\n if (funcs) {\n if (!functions) {\n throw new Error('Functions are not defined');\n }\n const fx = await processFunctions({\n ai,\n functionList: functions,\n functionCalls: funcs,\n mem,\n sessionId,\n traceId,\n span,\n index: state.index,\n excludeContentFromTrace,\n functionResultFormatter,\n });\n state.functionsExecuted = new Set([...state.functionsExecuted, ...fx]);\n } else {\n streamingExtractFinalValue(\n signature,\n state.values,\n state.xstate,\n state.content\n );\n\n await assertStreamingAssertions(\n streamingAsserts,\n state.xstate,\n state.content,\n true\n );\n await assertAssertions(asserts, state.values);\n\n if (fieldProcessors.length) {\n await processFieldProcessors(\n fieldProcessors,\n state.values,\n mem,\n sessionId\n );\n }\n\n if (streamingFieldProcessors.length !== 0) {\n await processStreamingFieldProcessors(\n streamingFieldProcessors,\n state.content,\n state.xstate,\n mem,\n state.values,\n sessionId,\n true\n );\n }\n\n yield* streamValues<OUT>(\n signature,\n state.content,\n state.values as Record<string, OUT>,\n state.xstate,\n state.index\n );\n }\n}\n\nexport async function* processResponse<OUT extends AxGenOut>({\n ai,\n res,\n mem,\n sessionId,\n traceId,\n functions,\n span,\n strictMode,\n states,\n usage,\n excludeContentFromTrace,\n asserts,\n fieldProcessors,\n thoughtFieldName,\n signature,\n functionResultFormatter,\n}: Readonly<AxResponseHandlerArgs<AxChatResponse>> & {\n states: InternalAxGenState[];\n usage: AxModelUsage[];\n excludeContentFromTrace: boolean;\n asserts: AxAssertion[];\n fieldProcessors: AxFieldProcessor[];\n thoughtFieldName: string;\n signature: AxSignature;\n functionResultFormatter?: (result: unknown) => string;\n}): AsyncGenDeltaOut<OUT> {\n const results = res.results ?? [];\n\n mem.addResponse(results, sessionId);\n\n for (const result of results) {\n const state = states[result.index];\n\n if (!state) {\n throw new Error(`No state found for result (index: ${result.index})`);\n }\n\n if (res.modelUsage) {\n usage.push(res.modelUsage);\n }\n\n if (result.functionCalls?.length) {\n const funcs = parseFunctionCalls(ai, result.functionCalls, state.values);\n if (funcs) {\n if (!functions) {\n throw new Error('Functions are not defined');\n }\n\n const fx = await processFunctions({\n ai,\n functionList: functions,\n functionCalls: funcs,\n mem,\n sessionId,\n traceId,\n span,\n excludeContentFromTrace,\n index: result.index,\n functionResultFormatter,\n });\n\n state.functionsExecuted = new Set([...state.functionsExecuted, ...fx]);\n }\n } else if (result.content) {\n if (result.thought && result.thought.length > 0) {\n state.values[thoughtFieldName] = result.thought;\n }\n\n extractValues(signature, state.values, result.content, strictMode);\n await assertAssertions(asserts, state.values);\n\n if (fieldProcessors.length) {\n await processFieldProcessors(\n fieldProcessors,\n state.values,\n mem,\n sessionId\n );\n }\n }\n\n if (result.finishReason === 'length') {\n throw new Error(\n `Max tokens reached before completion\\nContent: ${result.content}`\n );\n }\n }\n\n const values = states.map((s) => s.values);\n\n // Strip out values whose signature fields have isInternal: true\n for (const v of values) {\n for (const field of signature.getOutputFields()) {\n if (field.isInternal) {\n delete v[field.name];\n }\n }\n }\n\n const outputFields = signature.getOutputFields();\n const deltas: DeltaOut<OUT>[] = values.map((v, index) => {\n const delta: Record<string, unknown> = {};\n for (const field of outputFields) {\n if (field.isInternal) {\n continue;\n }\n delta[field.name] = v[field.name];\n }\n // Include thought field if it exists in the values\n if (v[thoughtFieldName] !== undefined) {\n delta[thoughtFieldName] = v[thoughtFieldName];\n }\n return { index, delta: delta as Partial<OUT> };\n });\n\n for (const delta of deltas) {\n yield delta;\n }\n}\n\nexport function shouldContinueSteps(\n mem: AxAIMemory,\n stopFunction: string | undefined,\n states: InternalAxGenState[],\n sessionId?: string\n) {\n const lastMemItem = mem.getLast(sessionId);\n\n if (!lastMemItem) {\n return true;\n }\n\n for (const [index, state] of states.entries()) {\n const stopFunctionExecuted =\n stopFunction && state.functionsExecuted.has(stopFunction);\n\n const chat = lastMemItem.chat[index];\n\n if (!chat) {\n throw new Error(`No chat message found for result (index: ${index})`);\n }\n\n const isFunction = lastMemItem.role === 'function';\n const isProcessor = lastMemItem.tags\n ? lastMemItem.tags.some((tag) => tag === 'processor')\n : false;\n\n // If any state has stop function executed, return false immediately\n if (isFunction && stopFunction && stopFunctionExecuted) {\n return false;\n }\n\n // If this state doesn't meet continuation criteria, return false\n if (!(isFunction || isProcessor)) {\n return false;\n }\n }\n\n // All states meet continuation criteria\n return true;\n}\n","// Updated type definitions\n\nexport type TypeNotClass =\n | 'string'\n | 'number'\n | 'boolean'\n | 'json'\n | 'image'\n | 'audio'\n | 'datetime'\n | 'date'\n | 'code';\nexport type Type = TypeNotClass | 'class';\nexport type ParsedIdentifier = string;\nexport type ParsedString = string;\n\nexport type ParsedSignature = {\n desc?: string;\n inputs: InputParsedField[];\n outputs: OutputParsedField[];\n};\n\nexport type InputParsedField = {\n name: ParsedIdentifier;\n desc?: string;\n type?: { name: TypeNotClass; isArray: boolean };\n isOptional?: boolean;\n};\n\nexport type OutputParsedField = {\n name: ParsedIdentifier;\n desc?: string;\n type?:\n | { name: TypeNotClass; isArray: boolean; options?: string[] }\n | { name: 'class'; isArray: boolean; options: string[] };\n isOptional?: boolean;\n isInternal?: boolean;\n};\n\nimport { axGlobals } from './globals.js';\n\nclass SignatureValidationError extends Error {\n constructor(\n message: string,\n public readonly position: number,\n public readonly context: string,\n public readonly suggestion?: string\n ) {\n super(message);\n this.name = 'SignatureValidationError';\n }\n}\n\nclass SignatureParser {\n private input: string;\n private position: number;\n private currentFieldName: string | null = null;\n private currentSection: 'description' | 'inputs' | 'outputs' = 'description';\n\n constructor(input: string) {\n this.input = input.trim();\n this.position = 0;\n\n if (!this.input) {\n throw new SignatureValidationError(\n 'Empty signature provided',\n 0,\n '',\n 'A signature must contain at least input and output fields separated by \"->\". Example: \"userQuery:string -> aiResponse:string\"'\n );\n }\n }\n\n parse(): ParsedSignature {\n try {\n this.skipWhitespace();\n const optionalDesc = this.parseParsedString();\n this.skipWhitespace();\n\n this.currentSection = 'inputs';\n // Use the specialized input field parser\n const inputs = this.parseFieldList(\n this.parseInputField.bind(this),\n 'input'\n );\n this.skipWhitespace();\n\n if (this.position >= this.input.length) {\n throw new SignatureValidationError(\n 'Incomplete signature: Missing output section',\n this.position,\n this.getErrorContext(),\n 'Add \"->\" followed by output fields. Example: \"-> responseText:string\"'\n );\n }\n\n this.expectArrow();\n this.skipWhitespace();\n\n if (this.position >= this.input.length) {\n throw new SignatureValidationError(\n 'Incomplete signature: No output fields specified after \"->\"',\n this.position,\n this.getErrorContext(),\n 'Add at least one output field. Example: \"-> responseText:string\"'\n );\n }\n\n this.currentSection = 'outputs';\n // Use the specialized output field parser\n const outputs = this.parseFieldList(\n this.parseOutputField.bind(this),\n 'output'\n );\n\n // Check for any remaining content that shouldn't be there\n this.skipWhitespace();\n if (this.position < this.input.length) {\n const remaining = this.input.slice(this.position);\n throw new SignatureValidationError(\n `Unexpected content after signature: \"${remaining}\"`,\n this.position,\n this.getErrorContext(),\n 'Remove any extra content after the output fields'\n );\n }\n\n // Validate the parsed signature\n this.validateParsedSignature({\n desc: optionalDesc?.trim(),\n inputs,\n outputs,\n });\n\n return {\n desc: optionalDesc?.trim(),\n inputs,\n outputs,\n };\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n\n // Wrap other errors with better context\n const errorMessage =\n error instanceof Error ? error.message : 'Unknown error';\n throw new SignatureValidationError(\n errorMessage,\n this.position,\n this.getErrorContext()\n );\n }\n }\n\n private validateParsedSignature(signature: Readonly<ParsedSignature>): void {\n // Check for duplicate field names within inputs\n const inputNames = new Set<string>();\n for (const field of signature.inputs) {\n if (inputNames.has(field.name)) {\n throw new SignatureValidationError(\n `Duplicate input field name: \"${field.name}\"`,\n 0,\n '',\n 'Each field name must be unique within the signature'\n );\n }\n inputNames.add(field.name);\n }\n\n // Check for duplicate field names within outputs\n const outputNames = new Set<string>();\n for (const field of signature.outputs) {\n if (outputNames.has(field.name)) {\n throw new SignatureValidationError(\n `Duplicate output field name: \"${field.name}\"`,\n 0,\n '',\n 'Each field name must be unique within the signature'\n );\n }\n outputNames.add(field.name);\n }\n\n // Check for field names that appear in both inputs and outputs\n for (const outputField of signature.outputs) {\n if (inputNames.has(outputField.name)) {\n throw new SignatureValidationError(\n `Field name \"${outputField.name}\" appears in both inputs and outputs`,\n 0,\n '',\n 'Use different names for input and output fields to avoid confusion'\n );\n }\n }\n\n // Validate that we have at least one input and one output\n if (signature.inputs.length === 0) {\n throw new SignatureValidationError(\n 'Signature must have at least one input field',\n 0,\n '',\n 'Add an input field before \"->\". Example: \"userInput:string -> ...\"'\n );\n }\n\n if (signature.outputs.length === 0) {\n throw new SignatureValidationError(\n 'Signature must have at least one output field',\n 0,\n '',\n 'Add an output field after \"->\". Example: \"... -> responseText:string\"'\n );\n }\n }\n\n private getErrorContext(): string {\n const start = Math.max(0, this.position - 25);\n const end = Math.min(this.input.length, this.position + 25);\n const before = this.input.slice(start, this.position);\n const after = this.input.slice(this.position, end);\n const pointer = `${' '.repeat(before.length)}^`;\n\n const lines = [\n `Position ${this.position} in signature:`,\n `\"${before}${after}\"`,\n ` ${pointer}`,\n ];\n\n return lines.join('\\n');\n }\n\n private parseFieldList<T extends InputParsedField | OutputParsedField>(\n parseFieldFn: () => T,\n section: 'input' | 'output'\n ): T[] {\n const fields: T[] = [];\n this.skipWhitespace();\n\n if (this.position >= this.input.length) {\n throw new SignatureValidationError(\n `Empty ${section} section: Expected at least one field`,\n this.position,\n this.getErrorContext(),\n `Add a ${section} field. Example: ${section === 'input' ? 'userInput:string' : 'responseText:string'}`\n );\n }\n\n // Parse first field\n try {\n fields.push(parseFieldFn());\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n throw new SignatureValidationError(\n `Invalid first ${section} field: ${error instanceof Error ? error.message : 'Unknown error'}`,\n this.position,\n this.getErrorContext()\n );\n }\n\n this.skipWhitespace();\n\n // Parse remaining fields\n while (this.position < this.input.length) {\n if (\n this.input[this.position] === '-' &&\n this.position + 1 < this.input.length &&\n this.input[this.position + 1] === '>'\n ) {\n break;\n }\n\n if (this.match(',')) {\n this.skipWhitespace();\n if (this.position >= this.input.length) {\n throw new SignatureValidationError(\n `Unexpected end of input after comma in ${section} section`,\n this.position,\n this.getErrorContext(),\n `Add another ${section} field after the comma`\n );\n }\n try {\n fields.push(parseFieldFn());\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n throw new SignatureValidationError(\n `Invalid ${section} field after comma: ${error instanceof Error ? error.message : 'Unknown error'}`,\n this.position,\n this.getErrorContext()\n );\n }\n this.skipWhitespace();\n } else {\n break;\n }\n }\n\n return fields;\n }\n\n // -------------------------------\n // Parse input fields (no \"class\" type and no internal flag)\n // -------------------------------\n private parseInputField(): InputParsedField {\n this.skipWhitespace();\n const name = this.parseParsedIdentifier();\n this.currentFieldName = name;\n\n // Validate field name for inputs\n this.validateFieldName(name, 'input');\n\n // Only the optional marker is allowed\n let isOptional: boolean | undefined;\n while (true) {\n if (this.match('?')) {\n isOptional = true;\n continue;\n }\n if (this.match('!')) {\n throw new SignatureValidationError(\n `Input field \"${name}\" cannot use the internal marker \"!\"`,\n this.position - 1,\n this.getErrorContext(),\n 'Internal markers (!) are only allowed on output fields'\n );\n }\n break;\n }\n\n let type: { name: TypeNotClass; isArray: boolean } | undefined;\n this.skipWhitespace();\n if (this.match(':')) {\n this.skipWhitespace();\n // Disallow the \"class\" type in input fields\n if (/^class\\b/.test(this.input.slice(this.position))) {\n throw new SignatureValidationError(\n `Input field \"${name}\" cannot use the \"class\" type`,\n this.position,\n this.getErrorContext(),\n 'Class types are only allowed on output fields. Use \"string\" type for input classifications'\n );\n }\n try {\n const typeName = this.parseTypeNotClass();\n const isArray = this.match('[]');\n type = { name: typeName, isArray };\n\n // Validate specific type constraints for input fields\n if ((typeName === 'image' || typeName === 'audio') && isArray) {\n throw new SignatureValidationError(\n `Input field \"${name}\": Arrays of ${typeName} are not supported`,\n this.position,\n this.getErrorContext(),\n `Use a single ${typeName} type instead: \"${typeName}\"`\n );\n }\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n throw new SignatureValidationError(\n `Input field \"${name}\": ${error instanceof Error ? error.message : 'Unknown error'}`,\n this.position,\n this.getErrorContext()\n );\n }\n }\n\n this.skipWhitespace();\n const desc = this.parseParsedString();\n\n return {\n name,\n desc: desc?.trim(),\n type,\n isOptional,\n };\n }\n\n // -------------------------------\n // Parse output fields (supports both \"class\" type and the internal marker)\n // -------------------------------\n private parseOutputField(): OutputParsedField {\n this.skipWhitespace();\n const name = this.parseParsedIdentifier();\n this.currentFieldName = name;\n\n // Validate field name for outputs\n this.validateFieldName(name, 'output');\n\n let isOptional = false;\n let isInternal = false;\n while (true) {\n if (this.match('?')) {\n isOptional = true;\n continue;\n }\n if (this.match('!')) {\n isInternal = true;\n continue;\n }\n break;\n }\n\n let type:\n | { name: TypeNotClass; isArray: boolean; options?: string[] }\n | { name: 'class'; isArray: boolean; options: string[] }\n | undefined;\n this.skipWhitespace();\n if (this.match(':')) {\n this.skipWhitespace();\n if (this.match('class')) {\n const isArray = this.match('[]');\n this.skipWhitespace();\n const classNamesString = this.parseParsedString();\n if (!classNamesString) {\n throw new SignatureValidationError(\n `Output field \"${name}\": Missing class options after \"class\" type`,\n this.position,\n this.getErrorContext(),\n 'Add class names in quotes. Example: class \"positive, negative, neutral\"'\n );\n }\n const options = classNamesString\n .split(/[,|]/)\n .map((s) => s.trim())\n .filter((s) => s.length > 0);\n\n if (options.length === 0) {\n throw new SignatureValidationError(\n `Output field \"${name}\": Empty class list provided`,\n this.position,\n this.getErrorContext(),\n 'Provide at least one class option. Example: \"positive, negative\"'\n );\n }\n\n type = { name: 'class', isArray, options };\n } else {\n try {\n const typeName = this.parseTypeNotClass();\n const isArray = this.match('[]');\n type = { name: typeName, isArray };\n\n // Validate specific type constraints\n if (typeName === 'image' && isArray) {\n throw new SignatureValidationError(\n `Output field \"${name}\": Arrays of images are not supported`,\n this.position,\n this.getErrorContext(),\n 'Use a single image type instead: \"image\"'\n );\n }\n\n if (typeName === 'audio' && isArray) {\n throw new SignatureValidationError(\n `Output field \"${name}\": Arrays of audio are not supported`,\n this.position,\n this.getErrorContext(),\n 'Use a single audio type instead: \"audio\"'\n );\n }\n\n if (typeName === 'image') {\n throw new SignatureValidationError(\n `Output field \"${name}\": Image type is not supported in output fields`,\n this.position,\n this.getErrorContext(),\n 'Image types can only be used in input fields'\n );\n }\n\n if (typeName === 'audio') {\n throw new SignatureValidationError(\n `Output field \"${name}\": Audio type is not supported in output fields`,\n this.position,\n this.getErrorContext(),\n 'Audio types can only be used in input fields'\n );\n }\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n throw new SignatureValidationError(\n `Output field \"${name}\": ${error instanceof Error ? error.message : 'Unknown error'}`,\n this.position,\n this.getErrorContext()\n );\n }\n }\n }\n\n this.skipWhitespace();\n const desc = this.parseParsedString();\n\n return {\n name,\n desc: desc?.trim(),\n type,\n isOptional,\n isInternal,\n };\n }\n\n private validateFieldName(name: string, fieldType: 'input' | 'output'): void {\n // Check for reserved/generic names that should be more descriptive\n if (axGlobals.signatureStrict) {\n const reservedNames = [\n 'text',\n 'object',\n 'image',\n 'string',\n 'number',\n 'boolean',\n 'json',\n 'array',\n 'datetime',\n 'date',\n 'time',\n 'type',\n 'class',\n 'input',\n 'output',\n 'data',\n 'value',\n 'result',\n 'response',\n 'request',\n 'item',\n 'element',\n ];\n\n if (reservedNames.includes(name.toLowerCase())) {\n const suggestions =\n fieldType === 'input'\n ? ['userInput', 'questionText', 'documentContent', 'messageText']\n : ['responseText', 'analysisResult', 'categoryType', 'summaryText'];\n\n throw new SignatureValidationError(\n `Field name \"${name}\" is too generic`,\n this.position,\n this.getErrorContext(),\n `Use a more descriptive name. Examples: ${suggestions.join(', ')}`\n );\n }\n }\n\n // Check naming convention\n const camelCaseRegex = /^[a-z][a-zA-Z0-9]*$/;\n const snakeCaseRegex = /^[a-z]+(_[a-z0-9]+)*$/;\n\n if (!camelCaseRegex.test(name) && !snakeCaseRegex.test(name)) {\n throw new SignatureValidationError(\n `Invalid field name \"${name}\"`,\n this.position,\n this.getErrorContext(),\n 'Field names must be in camelCase (e.g., \"userInput\") or snake_case (e.g., \"user_input\")'\n );\n }\n\n // Check for minimum length\n if (name.length < 2) {\n throw new SignatureValidationError(\n `Field name \"${name}\" is too short`,\n this.position,\n this.getErrorContext(),\n 'Field names must be at least 2 characters long'\n );\n }\n\n // Check for maximum length\n if (name.length > 50) {\n throw new SignatureValidationError(\n `Field name \"${name}\" is too long (${name.length} characters)`,\n this.position,\n this.getErrorContext(),\n 'Field names should be 50 characters or less'\n );\n }\n }\n\n private parseTypeNotClass(): TypeNotClass {\n const types: TypeNotClass[] = [\n 'string',\n 'number',\n 'boolean',\n 'json',\n 'image',\n 'audio',\n 'datetime',\n 'date',\n 'code',\n ];\n\n const foundType = types.find((type) => this.match(type));\n if (!foundType) {\n const currentWord =\n this.input.slice(this.position).match(/^\\w+/)?.[0] || '';\n const suggestion = this.suggestType(currentWord);\n\n const baseMessage = `Invalid type \"${currentWord || 'empty'}\"`;\n const suggestionPart = suggestion\n ? `. Did you mean \"${suggestion}\"?`\n : '';\n const fullMessage = `${baseMessage}${suggestionPart}`;\n\n throw new SignatureValidationError(\n fullMessage,\n this.position,\n this.getErrorContext(),\n `Expected one of: ${types.join(', ')}`\n );\n }\n return foundType;\n }\n\n private suggestType(input: string): string | null {\n const suggestions: Record<string, string> = {\n str: 'string',\n text: 'string',\n int: 'number',\n integer: 'number',\n float: 'number',\n double: 'number',\n bool: 'boolean',\n object: 'json',\n dict: 'json',\n timestamp: 'datetime',\n time: 'datetime',\n img: 'image',\n picture: 'image',\n sound: 'audio',\n voice: 'audio',\n classification: 'class',\n category: 'class',\n };\n\n return suggestions[input.toLowerCase()] || null;\n }\n\n private parseParsedIdentifier(): ParsedIdentifier {\n this.skipWhitespace();\n const match = /^[a-zA-Z_][a-zA-Z_0-9]*/.exec(\n this.input.slice(this.position)\n );\n if (match) {\n this.position += match[0].length;\n return match[0];\n }\n\n const invalidMatch = /^\\S+/.exec(this.input.slice(this.position));\n const invalidId = invalidMatch ? invalidMatch[0] : '';\n\n if (invalidId === '') {\n throw new SignatureValidationError(\n 'Expected field name but found end of input',\n this.position,\n this.getErrorContext(),\n 'Add a field name. Field names must start with a letter or underscore'\n );\n }\n\n if (/^\\d/.test(invalidId)) {\n throw new SignatureValidationError(\n `Invalid field name \"${invalidId}\" - cannot start with a number`,\n this.position,\n this.getErrorContext(),\n 'Field names must start with a letter or underscore. Example: \"userInput\" or \"_internal\"'\n );\n }\n\n throw new SignatureValidationError(\n `Invalid field name \"${invalidId}\"`,\n this.position,\n this.getErrorContext(),\n 'Field names must start with a letter or underscore and contain only letters, numbers, or underscores'\n );\n }\n\n private parseParsedString(): string | undefined {\n const quoteChars = [\"'\", '\"'];\n for (const quoteChar of quoteChars) {\n if (this.match(quoteChar)) {\n let content = '';\n let escaped = false;\n const startPos = this.position - 1;\n\n while (this.position < this.input.length) {\n const char = this.input[this.position];\n this.position++;\n if (escaped) {\n content += char;\n escaped = false;\n } else if (char === '\\\\') {\n escaped = true;\n } else if (char === quoteChar) {\n return content;\n } else {\n content += char;\n }\n }\n\n const partialString = this.input.slice(\n startPos,\n Math.min(this.position, startPos + 20)\n );\n throw new SignatureValidationError(\n `Unterminated string starting at position ${startPos}`,\n startPos,\n this.getErrorContext(),\n `Add closing ${quoteChar} to complete the string: ${partialString}${quoteChar}`\n );\n }\n }\n return undefined;\n }\n\n private skipWhitespace() {\n const match = /^[\\s\\t\\r\\n]+/.exec(this.input.slice(this.position));\n if (match) {\n this.position += match[0].length;\n }\n }\n\n private match(strOrRegex: string | RegExp): boolean {\n let match: RegExpExecArray | null;\n if (typeof strOrRegex === 'string') {\n if (this.input.startsWith(strOrRegex, this.position)) {\n this.position += strOrRegex.length;\n return true;\n }\n } else {\n match = strOrRegex.exec(this.input.slice(this.position));\n if (match) {\n this.position += match[0].length;\n return true;\n }\n }\n return false;\n }\n\n private expectArrow() {\n if (!this.match('->')) {\n const found = this.input.slice(this.position, this.position + 10);\n const suggestion = found.includes('>')\n ? 'Use \"->\" (dash followed by greater-than)'\n : found.includes('-')\n ? 'Add \">\" after the dash'\n : 'Add \"->\" to separate input and output fields';\n\n throw new SignatureValidationError(\n `Expected \"->\" but found \"${found}...\"`,\n this.position,\n this.getErrorContext(),\n suggestion\n );\n }\n }\n}\n\nexport function parseSignature(input: string): ParsedSignature {\n const parser = new SignatureParser(input);\n return parser.parse();\n}\n","import type { AxChatRequest } from '../ai/types.js';\n\nimport { formatDateWithTimezone } from './datetime.js';\nimport type { AxInputFunctionType } from './functions.js';\nimport type { AxField, AxIField, AxSignature } from './sig.js';\nimport type { AxFieldValue, AxGenIn, AxMessage } from './types.js';\nimport { validateValue } from './util.js';\n\ntype Writeable<T> = { -readonly [P in keyof T]: T[P] };\n\n// Define options type for AxPromptTemplate constructor\nexport interface AxPromptTemplateOptions {\n functions?: Readonly<AxInputFunctionType>;\n thoughtFieldName?: string;\n}\ntype AxChatRequestChatPrompt = Writeable<AxChatRequest['chatPrompt'][0]>;\n\ntype ChatRequestUserMessage = Exclude<\n Extract<AxChatRequestChatPrompt, { role: 'user' }>['content'],\n string\n>;\n\nconst functionCallInstructions = `\n## Function Call Instructions\n- Complete the task, using the functions defined earlier in this prompt. \n- Output fields should only be generated after all functions have been called.\n- Use the function results to generate the output fields.`;\n\nconst formattingRules = `\n## Strict Output Formatting Rules\n- Output must strictly follow the defined plain-text \\`field name: value\\` field format.\n- Output field, values must strictly adhere to the specified output field formatting rules.\n- No formatting rules should override these **Strict Output Formatting Rules**\n- Do not add any text before or after the output fields, just the field name and value.\n- Do not use code blocks.`;\n\nexport type AxFieldTemplateFn = (\n field: Readonly<AxField>,\n value: Readonly<AxFieldValue>\n) => ChatRequestUserMessage;\n\nexport class AxPromptTemplate {\n private sig: Readonly<AxSignature>;\n private fieldTemplates?: Record<string, AxFieldTemplateFn>;\n private task: { type: 'text'; text: string };\n private readonly thoughtFieldName: string;\n private readonly functions?: Readonly<AxInputFunctionType>;\n\n constructor(\n sig: Readonly<AxSignature>,\n options?: Readonly<AxPromptTemplateOptions>,\n fieldTemplates?: Record<string, AxFieldTemplateFn>\n ) {\n this.sig = sig;\n this.fieldTemplates = fieldTemplates;\n this.thoughtFieldName = options?.thoughtFieldName ?? 'thought';\n this.functions = options?.functions;\n\n const task = [];\n\n const inArgs = renderDescFields(this.sig.getInputFields());\n const outArgs = renderDescFields(this.sig.getOutputFields());\n task.push(\n `You will be provided with the following fields: ${inArgs}. Your task is to generate new fields: ${outArgs}.`\n );\n\n // biome-ignore lint/complexity/useFlatMap: you cannot use flatMap here\n const funcs = this.functions\n ?.map((f) => ('toFunction' in f ? f.toFunction() : f))\n ?.flat();\n\n const funcList = funcs\n ?.map((fn) => `- \\`${fn.name}\\`: ${formatDescription(fn.description)}`)\n .join('\\n');\n\n if (funcList && funcList.length > 0) {\n task.push(`## Available Functions\\n${funcList}`);\n }\n\n const inputFields = renderInputFields(this.sig.getInputFields());\n task.push(`## Input Fields\\n${inputFields}`);\n\n const outputFields = renderOutputFields(this.sig.getOutputFields());\n task.push(`## Output Fields\\n${outputFields}`);\n\n if (funcList && funcList.length > 0) {\n task.push(functionCallInstructions.trim());\n }\n\n task.push(formattingRules.trim());\n\n const desc = this.sig.getDescription();\n if (desc) {\n const text = formatDescription(desc);\n task.push(text);\n }\n\n this.task = {\n type: 'text' as const,\n text: task.join('\\n\\n'),\n };\n }\n\n private renderSingleValueUserContent = <T extends AxGenIn>(\n values: T,\n renderedExamples: ChatRequestUserMessage,\n renderedDemos: ChatRequestUserMessage,\n examplesInSystemPrompt: boolean\n ): string | ChatRequestUserMessage => {\n const completion = this.renderInputFields(values);\n const promptList: ChatRequestUserMessage = examplesInSystemPrompt\n ? completion\n : [...renderedExamples, ...renderedDemos, ...completion];\n\n const prompt = promptList.filter((v) => v !== undefined);\n\n return prompt.every((v) => v.type === 'text')\n ? prompt.map((v) => v.text).join('\\n')\n : prompt.reduce(combineConsecutiveStrings('\\n'), []);\n };\n\n public render = <T extends AxGenIn>(\n values: T | ReadonlyArray<AxMessage<T>>, // Allow T (AxGenIn) or array of AxMessages\n {\n examples,\n demos,\n }: Readonly<{\n skipSystemPrompt?: boolean;\n examples?: Record<string, AxFieldValue>[]; // Keep as is, examples are specific structures\n demos?: Record<string, AxFieldValue>[]; // Keep as is\n }>\n ): Extract<\n AxChatRequest['chatPrompt'][number],\n { role: 'user' | 'system' | 'assistant' }\n >[] => {\n const renderedExamples = examples\n ? [\n { type: 'text' as const, text: '\\n\\n## Examples\\n' },\n ...this.renderExamples(examples),\n ]\n : [];\n\n const renderedDemos = demos ? this.renderDemos(demos) : [];\n\n // Check if demos and examples are all text type\n const allTextExamples = renderedExamples.every((v) => v.type === 'text');\n const allTextDemos = renderedDemos.every((v) => v.type === 'text');\n const examplesInSystemPrompt = allTextExamples && allTextDemos;\n\n let systemContent = this.task.text;\n\n if (examplesInSystemPrompt) {\n const combinedItems = [\n { type: 'text' as const, text: systemContent },\n ...renderedExamples,\n ...renderedDemos,\n ];\n combinedItems.reduce(combineConsecutiveStrings(''), []);\n\n if (combinedItems?.[0]) {\n systemContent = combinedItems[0].text;\n }\n }\n\n const systemPrompt = {\n role: 'system' as const,\n content: systemContent,\n };\n\n if (Array.isArray(values)) {\n const messages: Extract<\n AxChatRequest['chatPrompt'][number],\n { role: 'user' } | { role: 'assistant' }\n >[] = [];\n\n const history = values as ReadonlyArray<AxMessage<T>>;\n\n let firstItem = true;\n for (const message of history) {\n let content: string | ChatRequestUserMessage;\n\n if (firstItem) {\n content = this.renderSingleValueUserContent(\n message.values,\n renderedExamples,\n renderedDemos,\n examplesInSystemPrompt\n );\n firstItem = false;\n } else {\n content = this.renderSingleValueUserContent(\n message.values,\n [],\n [],\n false\n );\n }\n\n if (message.role === 'user') {\n messages.push({ role: 'user', content });\n continue;\n }\n\n if (message.role !== 'assistant') {\n throw new Error('Invalid message role');\n }\n\n if (typeof content !== 'string') {\n throw new Error(\n 'Assistant message cannot contain non-text content like images, files,etc'\n );\n }\n\n messages.push({ role: 'assistant', content });\n }\n\n return [systemPrompt, ...messages];\n }\n\n // values is T (AxGenIn) - existing logic path\n const userContent = this.renderSingleValueUserContent(\n values as T,\n renderedExamples,\n renderedDemos,\n examplesInSystemPrompt\n );\n\n return [systemPrompt, { role: 'user' as const, content: userContent }];\n };\n\n public renderExtraFields = (extraFields: readonly AxIField[]) => {\n const prompt: ChatRequestUserMessage = [];\n\n if (!extraFields || extraFields.length === 0) {\n return prompt;\n }\n\n const groupedFields = extraFields.reduce(\n (acc, field) => {\n const title = field.title;\n if (!acc[title]) {\n acc[title] = [];\n }\n acc[title].push(field);\n return acc;\n },\n {} as Record<string, AxIField[]>\n );\n\n const formattedGroupedFields = Object.entries(groupedFields)\n .map(([title, fields]) => {\n if (fields.length === 1) {\n const field = fields[0]!;\n return {\n title,\n name: field.name,\n description: field.description,\n };\n }\n if (fields.length > 1) {\n const valuesList = fields\n .map((field) => `- ${field.description}`)\n .join('\\n');\n return {\n title,\n name: fields[0]!.name,\n description: valuesList,\n };\n }\n })\n .filter(Boolean) as AxIField[];\n\n formattedGroupedFields.forEach((field) => {\n const fn = this.fieldTemplates?.[field.name] ?? this.defaultRenderInField;\n prompt.push(...fn(field, field.description));\n });\n\n return prompt;\n };\n\n private renderExamples = (data: Readonly<Record<string, AxFieldValue>[]>) => {\n const list: ChatRequestUserMessage = [];\n const exampleContext = {\n isExample: true,\n };\n\n for (const [index, item] of data.entries()) {\n const renderedInputItem = this.sig\n .getInputFields()\n .map((field) =>\n this.renderInField(field, item, {\n ...exampleContext,\n isInputField: true,\n })\n )\n .filter((v) => v !== undefined)\n .flat();\n\n const renderedOutputItem = this.sig\n .getOutputFields()\n .map((field) =>\n this.renderInField(field, item, {\n ...exampleContext,\n isInputField: false,\n })\n )\n .filter((v) => v !== undefined)\n .flat();\n\n const renderedItem = [...renderedInputItem, ...renderedOutputItem];\n\n if (\n index > 0 &&\n renderedItem.length > 0 &&\n renderedItem[0]?.type === 'text'\n ) {\n list.push({ type: 'text' as const, text: '---\\n\\n' });\n }\n\n renderedItem.forEach((v) => {\n if ('text' in v) {\n v.text = `${v.text}\\n`;\n }\n list.push(v);\n });\n }\n\n return list;\n };\n\n private renderDemos = (data: Readonly<Record<string, AxFieldValue>[]>) => {\n const list: ChatRequestUserMessage = [];\n const inputFields = this.sig.getInputFields();\n const outputFields = this.sig.getOutputFields();\n const demoContext = {\n isExample: true,\n };\n\n for (const item of data) {\n const inputRenderedItems = inputFields\n .map((field) =>\n this.renderInField(field, item, {\n ...demoContext,\n isInputField: true,\n })\n )\n .filter((v) => v !== undefined)\n .flat();\n\n const outputRenderedItems = outputFields\n .map((field) =>\n this.renderInField(field, item, {\n ...demoContext,\n isInputField: false,\n })\n )\n .filter((v) => v !== undefined)\n .flat();\n\n const renderedItem = [...inputRenderedItems, ...outputRenderedItems];\n\n renderedItem.slice(0, -1).forEach((v) => {\n if ('text' in v) {\n v.text = `${v.text}\\n`;\n }\n list.push(v);\n });\n }\n\n return list;\n };\n\n private renderInputFields = <T extends AxGenIn>(values: T) => {\n const renderedItems = this.sig\n .getInputFields()\n .map((field) => this.renderInField(field, values, undefined))\n .filter((v) => v !== undefined)\n .flat();\n\n renderedItems\n .filter((v) => v.type === 'text')\n .forEach((v) => {\n v.text = `${v.text}\\n`;\n });\n\n return renderedItems;\n };\n\n private renderInField = (\n field: Readonly<AxField>,\n values: Readonly<Record<string, AxFieldValue>>,\n context?: {\n isExample?: boolean;\n strictExamples?: boolean;\n optionalOutputFields?: string[];\n isInputField?: boolean;\n }\n ) => {\n const value = values[field.name];\n\n if (isEmptyValue(field, value, context)) {\n return;\n }\n\n if (field.type) {\n validateValue(field, value!);\n }\n\n const processedValue = processValue(field, value!);\n\n const textFieldFn: AxFieldTemplateFn =\n this.fieldTemplates?.[field.name] ?? this.defaultRenderInField;\n\n return textFieldFn(field, processedValue);\n };\n\n private defaultRenderInField = (\n field: Readonly<AxField>,\n value: Readonly<AxFieldValue>\n ): ChatRequestUserMessage => {\n if (field.type?.name === 'image') {\n const validateImage = (\n value: Readonly<AxFieldValue>\n ): { mimeType: string; data: string } => {\n if (!value) {\n throw new Error('Image field value is required.');\n }\n\n if (typeof value !== 'object') {\n throw new Error('Image field value must be an object.');\n }\n if (!('mimeType' in value)) {\n throw new Error('Image field must have mimeType');\n }\n if (!('data' in value)) {\n throw new Error('Image field must have data');\n }\n return value as { mimeType: string; data: string };\n };\n\n let result: ChatRequestUserMessage = [\n { type: 'text', text: `${field.title}: ` as string },\n ];\n\n if (field.type.isArray) {\n if (!Array.isArray(value)) {\n throw new Error('Image field value must be an array.');\n }\n result = result.concat(\n (value as unknown[]).map((v) => {\n // Cast to unknown[] before map\n const validated = validateImage(v as AxFieldValue);\n return {\n type: 'image',\n mimeType: validated.mimeType,\n image: validated.data,\n };\n })\n );\n } else {\n const validated = validateImage(value);\n result.push({\n type: 'image',\n mimeType: validated.mimeType,\n image: validated.data,\n });\n }\n return result;\n }\n\n if (field.type?.name === 'audio') {\n const validateAudio = (\n value: Readonly<AxFieldValue>\n ): { format?: 'wav'; data: string } => {\n if (!value) {\n throw new Error('Audio field value is required.');\n }\n\n if (typeof value !== 'object') {\n throw new Error('Audio field value must be an object.');\n }\n if (!('data' in value)) {\n throw new Error('Audio field must have data');\n }\n return value as { format?: 'wav'; data: string };\n };\n\n let result: ChatRequestUserMessage = [\n { type: 'text', text: `${field.title}: ` as string },\n ];\n\n if (field.type.isArray) {\n if (!Array.isArray(value)) {\n throw new Error('Audio field value must be an array.');\n }\n result = result.concat(\n (value as unknown[]).map((v) => {\n // Cast to unknown[] before map\n const validated = validateAudio(v as AxFieldValue);\n return {\n type: 'audio',\n format: validated.format ?? 'wav',\n data: validated.data,\n };\n })\n );\n } else {\n const validated = validateAudio(value);\n result.push({\n type: 'audio',\n format: validated.format ?? 'wav',\n data: validated.data,\n });\n }\n return result;\n }\n\n const text = [field.title, ': '];\n\n if (Array.isArray(value)) {\n text.push('\\n');\n text.push(value.map((v) => `- ${v}`).join('\\n'));\n } else {\n text.push(value as string);\n }\n return [{ type: 'text', text: text.join('') }];\n };\n}\n\nconst renderDescFields = (list: readonly AxField[]) =>\n list.map((v) => `\\`${v.title}\\``).join(', ');\n\nconst renderInputFields = (fields: readonly AxField[]) => {\n const rows = fields.map((field) => {\n const name = field.title;\n const type = field.type?.name ? toFieldType(field.type) : 'string';\n\n const requiredMsg = field.isOptional\n ? `This optional ${type} field may be omitted`\n : `A ${type} field`;\n\n const description = field.description\n ? ` ${formatDescription(field.description)}`\n : '';\n\n return `${name}: (${requiredMsg})${description}`.trim();\n });\n\n return rows.join('\\n');\n};\n\nconst renderOutputFields = (fields: readonly AxField[]) => {\n const rows = fields.map((field) => {\n const name = field.title;\n const type = field.type?.name ? toFieldType(field.type) : 'string';\n\n const requiredMsg = field.isOptional\n ? `Only include this ${type} field if its value is available`\n : `This ${type} field must be included`;\n\n let description = '';\n\n if (field.description && field.description.length > 0) {\n const value =\n field.type?.name === 'class'\n ? field.description\n : formatDescription(field.description);\n description = ` ${value}`;\n }\n\n if (field.type?.options && field.type.options.length > 0) {\n if (description.length > 0) {\n description += '. ';\n }\n description += `Allowed values: ${field.type.options.join(', ')}`;\n }\n\n return `${name}: (${requiredMsg})${description}`.trim();\n });\n\n return rows.join('\\n');\n};\n\nconst processValue = (\n field: Readonly<AxField>,\n value: Readonly<AxFieldValue>\n): AxFieldValue => {\n if (field.type?.name === 'date' && value instanceof Date) {\n const v = value.toISOString();\n return v.slice(0, v.indexOf('T'));\n }\n if (field.type?.name === 'datetime' && value instanceof Date) {\n return formatDateWithTimezone(value);\n }\n if (field.type?.name === 'image' && typeof value === 'object') {\n return value;\n }\n if (field.type?.name === 'audio' && typeof value === 'object') {\n return value;\n }\n if (typeof value === 'string') {\n return value;\n }\n return JSON.stringify(value, null, 2);\n};\n\nexport const toFieldType = (type: Readonly<AxField['type']>) => {\n const baseType = (() => {\n switch (type?.name) {\n case 'string':\n return 'string';\n case 'number':\n return 'number';\n case 'boolean':\n return 'boolean (true or false)';\n case 'date':\n return 'date (\"YYYY-MM-DD\" format)';\n case 'datetime':\n return 'date time (\"YYYY-MM-DD HH:mm Timezone\" format)';\n case 'json':\n return 'JSON object';\n case 'class':\n return 'classification class';\n case 'code':\n return 'code';\n default:\n return 'string';\n }\n })();\n\n return type?.isArray ? `json array of ${baseType} items` : baseType;\n};\n\nfunction combineConsecutiveStrings(separator: string) {\n return (acc: ChatRequestUserMessage, current: ChatRequestUserMessage[0]) => {\n if (current.type === 'text') {\n const previous = acc.length > 0 ? acc[acc.length - 1] : null;\n if (previous && previous.type === 'text') {\n previous.text += separator + current.text;\n } else {\n acc.push(current);\n }\n } else {\n acc.push(current);\n }\n return acc;\n };\n}\n\nconst isEmptyValue = (\n field: Readonly<AxField>,\n value?: Readonly<AxFieldValue>,\n context?: {\n isExample?: boolean;\n isInputField?: boolean;\n }\n) => {\n if (typeof value === 'boolean') {\n return false;\n }\n\n if (\n !value ||\n ((Array.isArray(value) || typeof value === 'string') && value.length === 0)\n ) {\n // Handle examples case - all fields can be missing in examples\n if (context?.isExample) {\n return true;\n }\n\n // Handle non-examples case (regular field validation)\n if (field.isOptional || field.isInternal) {\n return true;\n }\n\n const fieldType = context?.isInputField !== false ? 'input' : 'output';\n throw new Error(\n `Value for ${fieldType} field '${field.name}' is required.`\n );\n }\n return false;\n};\n\nfunction formatDescription(str: string) {\n const value = str.trim();\n return value.length > 0\n ? `${value.charAt(0).toUpperCase()}${value.slice(1)}${value.endsWith('.') ? '' : '.'}`\n : '';\n}\n","import type { AxAIMemory } from '../mem/types.js';\n\nimport type {\n AxGenDeltaOut,\n AxResultPickerFunction,\n AxResultPickerFunctionFunctionResults,\n} from './program.js';\nimport type { AxGenOut } from './types.js';\n\nexport interface AxSamplePickerOptions<OUT extends AxGenOut> {\n resultPicker?: AxResultPickerFunction<OUT>;\n}\n\n/**\n * Checks if there are function calls in memory\n */\nfunction checkForFunctionCalls(mem: AxAIMemory, sessionId?: string): boolean {\n const history = mem.history(0, sessionId);\n\n // Check for both function calls and function results\n const hasFunctionResults = history.some((msg) => msg.role === 'function');\n const hasFunctionCalls = history.some(\n (msg) =>\n msg.role === 'assistant' &&\n 'functionCalls' in msg &&\n Array.isArray(msg.functionCalls) &&\n msg.functionCalls.length > 0\n );\n\n return hasFunctionCalls && hasFunctionResults;\n}\n\n/**\n * Extracts function execution results from memory\n */\nfunction extractFunctionResults(\n mem: AxAIMemory,\n sessionId?: string\n): AxResultPickerFunctionFunctionResults['results'] {\n const history = mem.history(0, sessionId);\n const results: {\n index: number;\n functionName: string;\n functionId: string;\n args: string | object;\n result: string;\n isError?: boolean;\n }[] = [];\n\n // Find assistant messages with function calls\n const assistantMessages = history.filter(\n (msg) =>\n msg.role === 'assistant' &&\n 'functionCalls' in msg &&\n Array.isArray(msg.functionCalls) &&\n msg.functionCalls.length > 0\n );\n\n // Find function result messages\n const functionMessages = history.filter((msg) => msg.role === 'function');\n\n // Match function calls with their results\n for (const assistantMsg of assistantMessages) {\n if ('functionCalls' in assistantMsg && assistantMsg.functionCalls) {\n for (const funcCall of assistantMsg.functionCalls) {\n // Find the corresponding function result\n const funcResult = functionMessages.find(\n (msg) => 'functionId' in msg && msg.functionId === funcCall.id\n );\n\n if (\n funcResult &&\n 'result' in funcResult &&\n 'functionId' in funcResult\n ) {\n results.push({\n index: results.length, // Use sequential index for function results\n functionName: funcCall.function.name,\n functionId: funcCall.id,\n args: funcCall.function.params || '',\n result: String(funcResult.result),\n isError:\n 'isError' in funcResult ? Boolean(funcResult.isError) : false,\n });\n }\n }\n }\n }\n return results;\n}\n\n/**\n * Selects a result from multiple samples using the provided result picker function.\n * If no result picker is provided or only one result exists, returns the first result.\n */\nexport async function selectFromSamples<OUT extends AxGenOut>(\n buffer: AxGenDeltaOut<OUT>[],\n options?: AxSamplePickerOptions<OUT>,\n mem?: AxAIMemory,\n sessionId?: string\n): Promise<number> {\n // If no result picker or only one result, use index 0\n if (!options?.resultPicker || buffer.length <= 1) {\n return 0;\n }\n\n const resultPicker = options.resultPicker;\n\n // Check if there are function calls in memory to determine data type\n const hasFunctionCalls = mem ? checkForFunctionCalls(mem, sessionId) : false;\n\n if (hasFunctionCalls && mem) {\n // Extract function execution data from memory\n const functionResults = extractFunctionResults(mem, sessionId);\n const selectedIndex = await resultPicker({\n type: 'function',\n results: functionResults,\n });\n\n // Validate the selected index\n if (selectedIndex < 0 || selectedIndex >= functionResults.length) {\n throw new Error(\n `Result picker returned invalid index: ${selectedIndex}. Must be between 0 and ${functionResults.length - 1}`\n );\n }\n\n return selectedIndex;\n }\n // Use field results\n const fieldResults = buffer.map((b, index) => ({\n index,\n sample: b.delta,\n }));\n\n const selectedIndex = await resultPicker({\n type: 'fields',\n results: fieldResults,\n });\n\n // Validate the selected index\n if (selectedIndex < 0 || selectedIndex >= buffer.length) {\n throw new Error(\n `Result picker returned invalid index: ${selectedIndex}. Must be between 0 and ${buffer.length - 1}`\n );\n }\n\n return selectedIndex;\n}\n\n/**\n * Selects a result index from memory using the provided result picker function.\n * If no result picker is provided or only one result exists, returns 0.\n * If the last memory is not from an assistant role, returns 0.\n */\nexport async function selectFromSamplesInMemory<OUT extends AxGenOut>(\n mem: AxAIMemory,\n sessionId?: string,\n options?: AxSamplePickerOptions<OUT>\n): Promise<number> {\n const lastMemory = mem?.getLast(sessionId);\n\n // If no memory or not from assistant role, return 0\n if (!lastMemory || lastMemory.role !== 'assistant') {\n return 0;\n }\n\n // If only one chat sample, return 0\n if (lastMemory.chat.length <= 1) {\n return 0;\n }\n\n // Convert memory chat to buffer format for selectFromSamples\n const buffer = lastMemory.chat.map((chat) => ({\n version: 0,\n index: chat.index,\n delta: chat.value as OUT,\n }));\n\n const selectedIndex = await selectFromSamples(\n buffer,\n options,\n mem,\n sessionId\n );\n return selectedIndex;\n}\n","import type { AxAIService } from '../ai/types.js';\nimport type { AxAIMemory } from '../mem/types.js';\n\nimport type { AxPromptTemplate } from './prompt.js';\nimport type { AxIField } from './sig.js';\n\nexport function handleValidationError(\n mem: AxAIMemory,\n errorFields: AxIField[],\n ai: Readonly<AxAIService>,\n promptTemplate: Readonly<AxPromptTemplate>,\n sessionId?: string\n) {\n mem.addRequest(\n [\n {\n role: 'user' as const,\n content: promptTemplate.renderExtraFields(errorFields),\n },\n ],\n sessionId\n );\n mem.addTag('error', sessionId);\n\n if (ai.getOptions().debug) {\n const errors = errorFields\n .map((field) => `- ${field.title}: ${field.description}`)\n .join('\\n');\n\n const logger = ai.getLogger();\n logger(`❌ Error Correction:\\n${errors}`, {\n tags: ['error'],\n });\n }\n}\n","// ReadableStream is available globally in modern browsers and Node.js 16+\n\nimport {\n type Context,\n context,\n type Meter,\n type Span,\n SpanKind,\n trace,\n} from '@opentelemetry/api';\n\nimport { validateAxMessageArray } from '../ai/base.js';\nimport type {\n AxAIService,\n AxChatRequest,\n AxChatResponseResult,\n AxFunction,\n} from '../ai/types.js';\nimport { AxMemory } from '../mem/memory.js';\nimport type { AxAIMemory } from '../mem/types.js';\nimport { AxAIServiceStreamTerminatedError } from '../util/apicall.js';\n\nimport {\n type AxAssertion,\n AxAssertionError,\n type AxStreamingAssertion,\n} from './asserts.js';\nimport { ValidationError } from './errors.js';\nimport type { extractionState } from './extract.js';\nimport type { AxFieldProcessor } from './fieldProcessor.js';\nimport {\n type AxChatResponseFunctionCall,\n createFunctionConfig,\n parseFunctions,\n} from './functions.js';\nimport {\n type AxGenMetricsInstruments,\n getOrCreateGenMetricsInstruments,\n recordErrorCorrectionMetric,\n recordFieldProcessingMetric,\n recordFunctionCallingMetric,\n recordGenerationMetric,\n recordMultiStepMetric,\n recordPerformanceMetric,\n recordSamplesMetric,\n recordSignatureComplexityMetrics,\n recordStreamingMetric,\n recordValidationErrorMetric,\n} from './metrics.js';\nimport {\n processResponse,\n processStreamingResponse,\n shouldContinueSteps,\n} from './processResponse.js';\nimport {\n type AsyncGenDeltaOut,\n type AxGenDeltaOut,\n type AxGenStreamingOut,\n AxProgram,\n type AxProgramExamples,\n type AxProgramForwardOptions,\n type AxProgramStreamingForwardOptions,\n type AxResultPickerFunction,\n type AxSetExamplesOptions,\n} from './program.js';\nimport { AxPromptTemplate } from './prompt.js';\nimport { selectFromSamples, selectFromSamplesInMemory } from './samples.js';\nimport type { AxIField, AxSignature } from './sig.js';\nimport type {\n AxGenIn,\n AxGenIn as AxGenInType,\n AxGenOut,\n AxGenOut as AxGenOutType,\n AxMessage,\n} from './types.js';\nimport { mergeDeltas } from './util.js';\nimport { handleValidationError } from './validate.js';\n\nexport type AxGenerateResult<OUT extends AxGenOutType> = OUT & {\n thought?: string;\n};\n\nexport interface AxResponseHandlerArgs<T> {\n ai: Readonly<AxAIService>;\n model?: string;\n res: T;\n mem: AxAIMemory;\n sessionId?: string;\n traceId?: string;\n functions: Readonly<AxFunction[]>;\n strictMode?: boolean;\n span?: Span;\n}\n\nexport interface AxStreamingEvent<T> {\n event: 'delta' | 'done' | 'error';\n data: {\n contentDelta?: string;\n partialValues?: Partial<T>;\n error?: string;\n functions?: AxChatResponseFunctionCall[];\n };\n}\n\nexport type InternalAxGenState = {\n index: number;\n values: AxGenOutType;\n content: string;\n functionsExecuted: Set<string>;\n functionCalls: NonNullable<AxChatResponseResult['functionCalls']>;\n xstate: extractionState;\n};\n\nexport class AxGen<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> extends AxProgram<IN, OUT> {\n private promptTemplate: AxPromptTemplate;\n private asserts: AxAssertion[];\n private streamingAsserts: AxStreamingAssertion[];\n private options?: Omit<AxProgramForwardOptions, 'functions'>;\n private functions?: AxFunction[];\n private fieldProcessors: AxFieldProcessor[] = [];\n private streamingFieldProcessors: AxFieldProcessor[] = [];\n private excludeContentFromTrace = false;\n private thoughtFieldName: string;\n\n constructor(\n signature: NonNullable<ConstructorParameters<typeof AxSignature>[0]>,\n options?: Readonly<AxProgramForwardOptions>\n ) {\n super(signature, {\n description: options?.description,\n traceLabel: options?.traceLabel,\n });\n\n this.options = options;\n this.thoughtFieldName = options?.thoughtFieldName ?? 'thought';\n const promptTemplateOptions = {\n functions: options?.functions,\n thoughtFieldName: this.thoughtFieldName,\n };\n this.promptTemplate = new (options?.promptTemplate ?? AxPromptTemplate)(\n this.signature,\n promptTemplateOptions\n );\n this.asserts = this.options?.asserts ?? [];\n this.streamingAsserts = this.options?.streamingAsserts ?? [];\n this.excludeContentFromTrace = options?.excludeContentFromTrace ?? false;\n this.usage = [];\n\n if (options?.functions) {\n this.functions = parseFunctions(options.functions);\n }\n }\n\n private getSignatureName(): string {\n return this.signature.getDescription() || 'unknown_signature';\n }\n\n private getMetricsInstruments(): AxGenMetricsInstruments | undefined {\n return getOrCreateGenMetricsInstruments();\n }\n\n public updateMeter(meter?: Meter): void {\n // This now just updates the global singleton, no need to store locally\n getOrCreateGenMetricsInstruments(meter);\n }\n\n private createStates(n: number) {\n return Array.from({ length: n }, (_, index) => ({\n index,\n functionCalls: [],\n values: {},\n content: '',\n functionsExecuted: new Set<string>(),\n xstate: {\n extractedFields: [],\n streamedIndex: {},\n s: -1,\n },\n }));\n }\n\n public addAssert = (fn: AxAssertion['fn'], message?: string) => {\n this.asserts.push({ fn, message });\n };\n\n public addStreamingAssert = (\n fieldName: string,\n fn: AxStreamingAssertion['fn'],\n message?: string\n ) => {\n this.streamingAsserts.push({ fieldName, fn, message });\n };\n\n private addFieldProcessorInternal = (\n fieldName: string,\n fn: AxFieldProcessor['process'],\n streaming = false\n ) => {\n const field = this.signature\n .getOutputFields()\n .find((f) => f.name === fieldName);\n\n if (!field) {\n throw new Error(`addFieldProcessor: field ${fieldName} not found`);\n }\n\n if (streaming) {\n const ft = field.type?.name;\n const isText = !ft || ft === 'string' || ft === 'code';\n\n if (!isText) {\n throw new Error(\n `addFieldProcessor: field ${fieldName} is must be a text field`\n );\n }\n this.streamingFieldProcessors.push({ field, process: fn });\n } else {\n this.fieldProcessors.push({ field, process: fn });\n }\n };\n\n public addStreamingFieldProcessor = (\n fieldName: string,\n fn: AxFieldProcessor['process']\n ) => {\n this.addFieldProcessorInternal(fieldName, fn, true);\n };\n\n public addFieldProcessor = (\n fieldName: string,\n fn: AxFieldProcessor['process']\n ) => {\n this.addFieldProcessorInternal(fieldName, fn, false);\n };\n\n private async forwardSendRequest({\n ai,\n mem,\n options,\n traceContext,\n functions,\n functionCall,\n }: Readonly<{\n ai: Readonly<AxAIService>;\n mem: AxAIMemory;\n options?: Omit<AxProgramForwardOptions, 'ai' | 'mem'>;\n traceContext?: Context;\n functions: AxFunction[];\n functionCall: AxChatRequest['functionCall'] | undefined;\n }>) {\n const {\n sessionId,\n traceId,\n model,\n rateLimiter,\n stream,\n thinkingTokenBudget,\n showThoughts,\n } = options ?? {};\n\n // Use selectFromSamplesInMemory to choose the best sample before getting history\n const selectedIndex = await selectFromSamplesInMemory<OUT>(mem, sessionId, {\n resultPicker: options?.resultPicker as\n | AxResultPickerFunction<OUT>\n | undefined,\n });\n\n const chatPrompt = mem?.history(selectedIndex, sessionId) ?? [];\n\n if (chatPrompt.length === 0) {\n throw new Error('No chat prompt found');\n }\n const modelConfig = {\n ...options?.modelConfig,\n ...(options?.sampleCount ? { n: options.sampleCount } : {}),\n ...(options?.sampleCount && options?.modelConfig?.temperature === 1\n ? { temperature: 0.8 }\n : {}),\n };\n\n const res = await ai.chat(\n {\n chatPrompt,\n functions,\n functionCall,\n modelConfig,\n model,\n },\n {\n sessionId,\n traceId,\n rateLimiter,\n stream,\n debug: false, // we do our own debug logging\n thinkingTokenBudget,\n showThoughts,\n traceContext,\n abortSignal: options?.abortSignal,\n }\n );\n\n return res;\n }\n\n private async *forwardCore({\n ai,\n mem,\n options,\n firstStep,\n span,\n traceContext,\n }: Readonly<{\n ai: Readonly<AxAIService>;\n mem: AxAIMemory;\n options: Omit<AxProgramForwardOptions, 'ai' | 'mem'>;\n firstStep: boolean;\n span?: Span;\n traceContext?: Context;\n }>): AsyncGenDeltaOut<OUT> {\n const { sessionId, traceId, functions: functionList } = options ?? {};\n const definedFunctionCall =\n options?.functionCall ?? this.options?.functionCall;\n const strictMode = options?.strictMode ?? false;\n const model = options.model;\n const states = this.createStates(options.sampleCount ?? 1);\n const usage = this.usage;\n\n const { functions, functionCall } = createFunctionConfig(\n functionList,\n definedFunctionCall,\n firstStep\n );\n\n const res = await this.forwardSendRequest({\n ai,\n mem,\n options,\n traceContext,\n functions,\n functionCall,\n });\n\n if (res instanceof ReadableStream) {\n yield* processStreamingResponse({\n ai,\n model,\n res,\n mem,\n traceId,\n sessionId,\n functions,\n strictMode,\n span,\n states,\n usage,\n asserts: this.asserts,\n streamingAsserts: this.streamingAsserts,\n fieldProcessors: this.fieldProcessors,\n streamingFieldProcessors: this.streamingFieldProcessors,\n thoughtFieldName: this.thoughtFieldName,\n excludeContentFromTrace: this.excludeContentFromTrace,\n signature: this.signature,\n functionResultFormatter:\n options?.functionResultFormatter ??\n this.options?.functionResultFormatter,\n });\n } else {\n yield* processResponse({\n ai,\n model,\n res,\n mem,\n traceId,\n sessionId,\n functions,\n span,\n strictMode,\n states,\n usage,\n asserts: this.asserts,\n fieldProcessors: this.fieldProcessors,\n thoughtFieldName: this.thoughtFieldName,\n excludeContentFromTrace: this.excludeContentFromTrace,\n signature: this.signature,\n functionResultFormatter:\n options?.functionResultFormatter ??\n this.options?.functionResultFormatter,\n });\n }\n\n this.getLogger(ai, options)?.('', { tags: ['responseEnd'] });\n }\n\n private async *_forward2(\n ai: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n states: InternalAxGenState[],\n options: Readonly<AxProgramForwardOptions>,\n span?: Span,\n traceContext?: Context\n ): AxGenStreamingOut<OUT> {\n const stopFunction = (\n options?.stopFunction ?? this.options?.stopFunction\n )?.toLowerCase();\n\n const maxRetries = options.maxRetries ?? this.options?.maxRetries ?? 10;\n const maxSteps = options.maxSteps ?? this.options?.maxSteps ?? 10;\n const debugHideSystemPrompt = options.debugHideSystemPrompt;\n const memOptions = {\n debug: this.isDebug(ai, options),\n debugHideSystemPrompt,\n logger: this.getLogger(ai, options),\n };\n\n const mem = options.mem ?? this.options?.mem ?? new AxMemory(memOptions);\n\n let err: ValidationError | AxAssertionError | undefined;\n\n if (options?.functions && options.functions.length > 0) {\n const promptTemplateClass =\n this.options?.promptTemplate ?? AxPromptTemplate;\n const currentPromptTemplateOptions = {\n functions: options.functions,\n thoughtFieldName: this.thoughtFieldName,\n };\n this.promptTemplate = new promptTemplateClass(\n this.signature,\n currentPromptTemplateOptions\n );\n }\n\n // New logic:\n let prompt: AxChatRequest['chatPrompt'];\n\n // Track prompt rendering performance\n const promptRenderStart = performance.now();\n\n if (Array.isArray(values)) {\n // Validate AxMessage array items\n validateAxMessageArray(values);\n\n // We'll need to decide how to get the 'individual' IN for demos/examples if needed by render.\n // For now, assume render will handle the array directly.\n // The generic type for render might need to be T (from render<T extends ...>)\n // and T will be inferred as ReadonlyArray<AxMessage>\n prompt = this.promptTemplate.render(values, {\n examples: this.examples,\n demos: this.demos,\n });\n } else {\n // Ensure `values` here is correctly inferred as AxGenInType\n prompt = this.promptTemplate.render(values as AxGenInType, {\n // Cast if necessary\n examples: this.examples,\n demos: this.demos,\n });\n }\n\n const promptRenderDuration = performance.now() - promptRenderStart;\n\n // Record prompt render performance metric\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n recordPerformanceMetric(\n metricsInstruments,\n 'prompt_render',\n promptRenderDuration,\n this.getSignatureName()\n );\n }\n\n // Track memory update performance\n const memoryUpdateStart = performance.now();\n mem.addRequest(prompt, options.sessionId);\n const memoryUpdateDuration = performance.now() - memoryUpdateStart;\n\n // Record memory update performance metric\n if (metricsInstruments) {\n recordPerformanceMetric(\n metricsInstruments,\n 'memory_update',\n memoryUpdateDuration,\n this.getSignatureName()\n );\n }\n\n multiStepLoop: for (let n = 0; n < maxSteps; n++) {\n const firstStep = n === 0;\n for (let errCount = 0; errCount < maxRetries; errCount++) {\n try {\n const generator = this.forwardCore({\n options,\n ai,\n mem,\n firstStep,\n span,\n traceContext,\n });\n\n for await (const result of generator) {\n if (result !== undefined) {\n yield {\n version: errCount,\n index: result.index,\n delta: result.delta,\n };\n }\n }\n\n const shouldContinue = shouldContinueSteps(\n mem,\n stopFunction,\n states,\n options?.sessionId\n );\n\n if (shouldContinue) {\n // Record multi-step generation metric\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n recordMultiStepMetric(\n metricsInstruments,\n n + 1,\n maxSteps,\n this.getSignatureName()\n );\n }\n continue multiStepLoop;\n }\n\n // Record successful completion metrics\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n recordMultiStepMetric(\n metricsInstruments,\n n + 1,\n maxSteps,\n this.getSignatureName()\n );\n\n // Count unique functions executed across all states\n const allFunctionsExecuted = new Set<string>();\n states.forEach((state) => {\n state.functionsExecuted.forEach((func) =>\n allFunctionsExecuted.add(func)\n );\n });\n\n // Record function metrics if functions were used\n if (allFunctionsExecuted.size > 0) {\n recordFunctionCallingMetric(\n metricsInstruments,\n true,\n allFunctionsExecuted.size,\n true,\n false,\n this.getSignatureName()\n );\n }\n\n // Record field processing metrics\n recordFieldProcessingMetric(\n metricsInstruments,\n this.fieldProcessors.length,\n this.streamingFieldProcessors.length,\n this.getSignatureName()\n );\n }\n\n return;\n } catch (e) {\n let errorFields: AxIField[] | undefined;\n\n span?.recordException(e as Error);\n\n if (e instanceof ValidationError) {\n errorFields = e.getFixingInstructions();\n err = e;\n\n // Record validation error metric\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n recordValidationErrorMetric(\n metricsInstruments,\n 'validation',\n this.getSignatureName()\n );\n }\n\n // Add telemetry event for validation error\n if (span) {\n span.addEvent('validation.error', {\n message: e.toString(),\n fixing_instructions:\n errorFields?.map((f) => f.title).join(', ') ?? '',\n });\n }\n } else if (e instanceof AxAssertionError) {\n const e1 = e as AxAssertionError;\n errorFields = e1.getFixingInstructions();\n err = e;\n\n // Record assertion error metric\n const assertionMetricsInstruments = this.getMetricsInstruments();\n if (assertionMetricsInstruments) {\n recordValidationErrorMetric(\n assertionMetricsInstruments,\n 'assertion',\n this.getSignatureName()\n );\n }\n\n // Add telemetry event for assertion error\n if (span) {\n span.addEvent('assertion.error', {\n message: e1.toString(),\n fixing_instructions:\n errorFields?.map((f) => f.title).join(', ') ?? '',\n });\n }\n } else if (e instanceof AxAIServiceStreamTerminatedError) {\n // Do nothing allow error correction to happen\n } else {\n throw enhanceError(e, ai, this.signature);\n }\n\n if (errorFields) {\n handleValidationError(\n mem,\n errorFields,\n ai,\n this.promptTemplate,\n options.sessionId\n );\n }\n }\n }\n\n // Record max retries reached\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n recordErrorCorrectionMetric(\n metricsInstruments,\n maxRetries,\n false, // failed\n maxRetries,\n this.getSignatureName()\n );\n }\n\n throw enhanceError(\n new Error(`Unable to fix validation error: ${err?.toString()}`),\n ai,\n this.signature\n );\n }\n\n // Record max steps reached\n if (metricsInstruments) {\n recordMultiStepMetric(\n metricsInstruments,\n maxSteps,\n maxSteps,\n this.getSignatureName()\n );\n }\n\n throw enhanceError(\n new Error(`Max steps reached: ${maxSteps}`),\n ai,\n this.signature\n );\n }\n\n public async *_forward1(\n ai: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options: Readonly<AxProgramForwardOptions>\n ): AxGenStreamingOut<OUT> {\n // Track state creation performance\n const stateCreationStart = performance.now();\n const states = this.createStates(options.sampleCount ?? 1);\n const stateCreationDuration = performance.now() - stateCreationStart;\n\n // Record state creation performance metric\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n recordPerformanceMetric(\n metricsInstruments,\n 'state_creation',\n stateCreationDuration,\n this.getSignatureName()\n );\n }\n\n const tracer =\n options?.tracer ?? this.options?.tracer ?? ai.getOptions().tracer;\n\n let functions: AxFunction[] | undefined = this.functions;\n\n if (options?.functions) {\n functions = parseFunctions(options.functions, this.functions);\n }\n\n if (!tracer) {\n yield* this._forward2(ai, values, states, {\n ...options,\n functions,\n });\n return;\n }\n\n const funcNames = functions?.map((f) => f.name).join(',');\n\n const attributes = {\n signature: JSON.stringify(this.signature.toJSON(), null, 2),\n ...(this.examples\n ? { examples: JSON.stringify(this.examples, null, 2) }\n : {}),\n ...(funcNames ? { provided_functions: funcNames } : {}),\n ...(options?.model ? { model: options.model } : {}),\n ...(options?.thinkingTokenBudget\n ? { thinking_token_budget: options.thinkingTokenBudget }\n : {}),\n ...(options?.showThoughts ? { show_thoughts: options.showThoughts } : {}),\n ...(options?.maxSteps ? { max_steps: options.maxSteps } : {}),\n ...(options?.maxRetries ? { max_retries: options.maxRetries } : {}),\n };\n\n const traceLabel =\n this.traceLabel && options.traceLabel\n ? `${this.traceLabel} > ${options.traceLabel}`\n : (options.traceLabel ?? this.traceLabel);\n const spanName = traceLabel ? `AxGen > ${traceLabel}` : 'AxGen';\n\n const span = tracer.startSpan(spanName, {\n kind: SpanKind.SERVER,\n attributes,\n });\n\n const currentContext = context.active();\n const traceContext = trace.setSpan(currentContext, span);\n\n try {\n if (!this.excludeContentFromTrace) {\n span.addEvent('input', { content: JSON.stringify(values, null, 2) });\n }\n\n yield* this._forward2(\n ai,\n values,\n states,\n {\n ...options,\n functions,\n },\n span,\n traceContext\n );\n\n if (!this.excludeContentFromTrace) {\n const valuesList = states.map((s) => s.values);\n const values = valuesList.length === 1 ? valuesList[0] : valuesList;\n span.addEvent('output', {\n content: JSON.stringify(values, null, 2),\n });\n }\n } finally {\n span.end();\n }\n }\n\n public override async forward(\n ai: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options?: Readonly<AxProgramForwardOptions>\n ): Promise<OUT> {\n const startTime = performance.now();\n const signatureName = this.getSignatureName();\n const isStreaming = options?.stream ?? false;\n let success = false;\n let errorCorrectionAttempts = 0;\n let functionsEnabled = false;\n const functionsExecuted = 0;\n let resultPickerUsed = false;\n\n try {\n // Record signature complexity metrics\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n recordSignatureComplexityMetrics(\n metricsInstruments,\n this.signature.getInputFields().length,\n this.signature.getOutputFields().length,\n this.examples?.length ?? 0,\n this.demos?.length ?? 0,\n signatureName\n );\n }\n\n // Check if functions are enabled\n functionsEnabled = !!(options?.functions || this.functions);\n\n const generator = this._forward1(ai, values, options ?? {});\n\n let buffer: AxGenDeltaOut<OUT>[] = [];\n let currentVersion = 0;\n let deltasEmitted = 0;\n\n for await (const delta of generator) {\n if (delta.version !== currentVersion) {\n buffer = [];\n }\n currentVersion = delta.version;\n buffer = mergeDeltas<OUT>(buffer, delta);\n deltasEmitted++;\n }\n\n // Track error correction attempts from the version count\n errorCorrectionAttempts = currentVersion;\n\n // Use result picker to select from multiple samples\n const resultPickerStart = performance.now();\n resultPickerUsed = !!options?.resultPicker;\n\n const selectedIndex = await selectFromSamples(\n buffer,\n {\n resultPicker: options?.resultPicker as\n | AxResultPickerFunction<OUT>\n | undefined,\n },\n // Pass memory to enable function result selection\n options?.mem,\n options?.sessionId\n );\n\n const resultPickerLatency = performance.now() - resultPickerStart;\n\n const selectedResult = buffer[selectedIndex];\n const result = selectedResult?.delta ?? {};\n this.trace = { ...values, ...result } as unknown as OUT;\n\n success = true;\n\n // Record samples metrics\n if (metricsInstruments) {\n recordSamplesMetric(\n metricsInstruments,\n buffer.length,\n resultPickerUsed,\n resultPickerUsed ? resultPickerLatency : undefined,\n signatureName\n );\n\n // Record streaming metrics\n recordStreamingMetric(\n metricsInstruments,\n isStreaming,\n deltasEmitted,\n undefined, // finalization latency not applicable here\n signatureName\n );\n }\n\n return result as unknown as OUT;\n } catch (error) {\n success = false;\n throw error;\n } finally {\n const duration = performance.now() - startTime;\n\n // Record generation metrics\n const finalMetricsInstruments = this.getMetricsInstruments();\n if (finalMetricsInstruments) {\n recordGenerationMetric(\n finalMetricsInstruments,\n duration,\n success,\n signatureName,\n ai.getName(),\n options?.model\n );\n\n // Record function calling metrics if functions were used\n if (functionsEnabled) {\n recordFunctionCallingMetric(\n finalMetricsInstruments,\n functionsEnabled,\n functionsExecuted,\n functionsExecuted > 0,\n false, // function error correction tracking would need more complex logic\n signatureName\n );\n }\n\n // Record error correction metrics\n if (errorCorrectionAttempts > 0) {\n recordErrorCorrectionMetric(\n finalMetricsInstruments,\n errorCorrectionAttempts,\n success,\n options?.maxRetries ?? 10,\n signatureName\n );\n }\n }\n }\n }\n\n override async *streamingForward(\n ai: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options?: Readonly<AxProgramStreamingForwardOptions>\n ): AxGenStreamingOut<OUT> {\n // If no result picker, use normal streaming\n if (!options?.resultPicker) {\n yield* this._forward1(ai, values, {\n ...options,\n stream: true,\n });\n return;\n }\n\n // For result picker, we need to buffer all results first\n const generator = this._forward1(ai, values, {\n ...options,\n stream: true,\n });\n\n let buffer: AxGenDeltaOut<OUT>[] = [];\n let currentVersion = 0;\n\n for await (const delta of generator) {\n if (delta.version !== currentVersion) {\n buffer = [];\n }\n currentVersion = delta.version;\n buffer = mergeDeltas<OUT>(buffer, delta);\n }\n\n // Use result picker to select from samples\n const selectedIndex = await selectFromSamples(\n buffer,\n {\n resultPicker: options?.resultPicker as\n | AxResultPickerFunction<OUT>\n | undefined,\n },\n // Pass memory to enable function result selection\n options?.mem,\n options?.sessionId\n );\n\n // Yield the selected result\n const selectedResult = buffer[selectedIndex];\n if (selectedResult) {\n yield {\n version: currentVersion,\n index: selectedIndex,\n delta: selectedResult.delta,\n };\n }\n }\n\n public override setExamples(\n examples: Readonly<AxProgramExamples<IN, OUT>>,\n options?: Readonly<AxSetExamplesOptions>\n ) {\n super.setExamples(examples, options);\n // No need to update prompt template - all fields can be missing in examples\n }\n\n private isDebug(\n ai: Readonly<AxAIService>,\n options?: Readonly<AxProgramForwardOptions>\n ) {\n return (\n options?.debug ?? this.options?.debug ?? ai.getOptions().debug ?? false\n );\n }\n\n private getLogger(\n ai: Readonly<AxAIService>,\n options?: Readonly<AxProgramForwardOptions>\n ) {\n return options?.logger ?? this.options?.logger ?? ai.getLogger();\n }\n}\n\nexport type AxGenerateErrorDetails = {\n model?: string;\n maxTokens?: number;\n streaming: boolean;\n signature: {\n input: Readonly<AxIField[]>;\n output: Readonly<AxIField[]>;\n description?: string;\n };\n};\n\ntype ErrorOptions = { cause?: Error };\n\nexport class AxGenerateError extends Error {\n public readonly details: AxGenerateErrorDetails;\n\n constructor(\n message: string,\n details: Readonly<AxGenerateErrorDetails>,\n options?: ErrorOptions\n ) {\n super(message);\n this.name = 'AxGenerateError';\n this.details = details;\n // Set cause property dynamically to avoid TypeScript issues\n if (options?.cause) {\n (this as ErrorOptions).cause = options.cause;\n }\n }\n}\n\nfunction enhanceError(\n e: unknown,\n ai: Readonly<AxAIService>,\n signature: Readonly<AxSignature>\n): Error {\n const originalError = e instanceof Error ? e : new Error(String(e));\n const model = ai.getLastUsedChatModel() as string | undefined;\n const modelConfig = ai.getLastUsedModelConfig();\n\n const details = {\n model: model,\n maxTokens: modelConfig?.maxTokens,\n streaming: modelConfig?.stream ?? false,\n signature: {\n input: signature.getInputFields(),\n output: signature.getOutputFields(),\n description: signature.getDescription(),\n },\n };\n\n // Return custom error with short message and details as object property\n return new AxGenerateError('Generate failed', details, {\n cause: originalError,\n });\n}\n","import type { AxAIService } from '../ai/types.js';\nimport { AxDBMemory, type AxDBState } from '../db/memory.js';\nimport { ColorLog } from '../util/log.js';\n\nconst colorLog = new ColorLog();\n\nexport interface AxSimpleClassifierForwardOptions {\n cutoff?: number;\n abortSignal?: AbortSignal;\n}\n\nexport class AxSimpleClassifierClass {\n private readonly name: string;\n private readonly context: readonly string[];\n\n constructor(name: string, context: readonly string[]) {\n this.name = name;\n this.context = context;\n }\n\n public getName(): string {\n return this.name;\n }\n\n public getContext(): readonly string[] {\n return this.context;\n }\n}\n\nexport class AxSimpleClassifier {\n private readonly ai: AxAIService;\n\n private db: AxDBMemory;\n private debug?: boolean;\n\n public constructor(ai: AxAIService) {\n this.db = new AxDBMemory();\n this.ai = ai;\n }\n\n public getState(): AxDBState | undefined {\n return this.db.getDB();\n }\n\n public setState(state: AxDBState) {\n this.db.setDB(state);\n }\n\n public setClasses = async (\n classes: readonly AxSimpleClassifierClass[],\n options?: Readonly<{ abortSignal?: AbortSignal }>\n ): Promise<void> => {\n for (const c of classes) {\n const ret = await this.ai.embed(\n { texts: c.getContext() },\n {\n abortSignal: options?.abortSignal,\n }\n );\n await this.db.upsert({\n id: c.getName(),\n table: 'classes',\n values: ret.embeddings[0],\n });\n }\n };\n\n public async forward(\n text: string,\n options?: Readonly<AxSimpleClassifierForwardOptions>\n ): Promise<string> {\n const { embeddings } = await this.ai.embed(\n { texts: [text] },\n {\n abortSignal: options?.abortSignal,\n }\n );\n\n const matches = await this.db.query({\n table: 'classes',\n values: embeddings[0],\n });\n\n let m = matches.matches;\n if (typeof options?.cutoff === 'number') {\n const { cutoff } = options;\n m = m.filter((m) => m.score <= cutoff);\n }\n\n if (this.debug) {\n console.log(\n `${colorLog.whiteBright(`query: ${text}`)}\\n${colorLog.greenBright(\n JSON.stringify(m.map((m) => `${m.id}, ${m.score}`))\n )}`\n );\n }\n\n const matchedClass = m.at(0);\n if (!matchedClass) {\n return '';\n }\n\n return matchedClass.id;\n }\n\n public setOptions(options: Readonly<{ debug?: boolean }>): void {\n if (typeof options.debug === 'boolean') {\n this.debug = options.debug;\n }\n }\n}\n","export const stopwords = new Set([\n '0o',\n '0s',\n '3a',\n '3b',\n '3d',\n '6b',\n '6o',\n 'a',\n 'a1',\n 'a2',\n 'a3',\n 'a4',\n 'ab',\n 'able',\n 'about',\n 'above',\n 'abst',\n 'ac',\n 'accordance',\n 'according',\n 'accordingly',\n 'across',\n 'act',\n 'actually',\n 'ad',\n 'added',\n 'adj',\n 'ae',\n 'af',\n 'affected',\n 'affecting',\n 'affects',\n 'after',\n 'afterwards',\n 'ag',\n 'again',\n 'against',\n 'ah',\n 'ain',\n \"ain't\",\n 'aj',\n 'al',\n 'all',\n 'allow',\n 'allows',\n 'almost',\n 'alone',\n 'along',\n 'already',\n 'also',\n 'although',\n 'always',\n 'am',\n 'among',\n 'amongst',\n 'amoungst',\n 'amount',\n 'an',\n 'and',\n 'announce',\n 'another',\n 'any',\n 'anybody',\n 'anyhow',\n 'anymore',\n 'anyone',\n 'anything',\n 'anyway',\n 'anyways',\n 'anywhere',\n 'ao',\n 'ap',\n 'apart',\n 'apparently',\n 'appear',\n 'appreciate',\n 'appropriate',\n 'approximately',\n 'ar',\n 'are',\n 'aren',\n 'arent',\n \"aren't\",\n 'arise',\n 'around',\n 'as',\n \"a's\",\n 'aside',\n 'ask',\n 'asking',\n 'associated',\n 'at',\n 'au',\n 'auth',\n 'av',\n 'available',\n 'aw',\n 'away',\n 'awfully',\n 'ax',\n 'ay',\n 'az',\n 'b',\n 'b1',\n 'b2',\n 'b3',\n 'ba',\n 'back',\n 'bc',\n 'bd',\n 'be',\n 'became',\n 'because',\n 'become',\n 'becomes',\n 'becoming',\n 'been',\n 'before',\n 'beforehand',\n 'begin',\n 'beginning',\n 'beginnings',\n 'begins',\n 'behind',\n 'being',\n 'believe',\n 'below',\n 'beside',\n 'besides',\n 'best',\n 'better',\n 'between',\n 'beyond',\n 'bi',\n 'bill',\n 'biol',\n 'bj',\n 'bk',\n 'bl',\n 'bn',\n 'both',\n 'bottom',\n 'bp',\n 'br',\n 'brief',\n 'briefly',\n 'bs',\n 'bt',\n 'bu',\n 'but',\n 'bx',\n 'by',\n 'c',\n 'c1',\n 'c2',\n 'c3',\n 'ca',\n 'call',\n 'came',\n 'can',\n 'cannot',\n 'cant',\n \"can't\",\n 'cause',\n 'causes',\n 'cc',\n 'cd',\n 'ce',\n 'certain',\n 'certainly',\n 'cf',\n 'cg',\n 'ch',\n 'changes',\n 'ci',\n 'cit',\n 'cj',\n 'cl',\n 'clearly',\n 'cm',\n \"c'mon\",\n 'cn',\n 'co',\n 'com',\n 'come',\n 'comes',\n 'con',\n 'concerning',\n 'consequently',\n 'consider',\n 'considering',\n 'contain',\n 'containing',\n 'contains',\n 'corresponding',\n 'could',\n 'couldn',\n 'couldnt',\n \"couldn't\",\n 'course',\n 'cp',\n 'cq',\n 'cr',\n 'cry',\n 'cs',\n \"c's\",\n 'ct',\n 'cu',\n 'currently',\n 'cv',\n 'cx',\n 'cy',\n 'cz',\n 'd',\n 'd2',\n 'da',\n 'date',\n 'dc',\n 'dd',\n 'de',\n 'definitely',\n 'describe',\n 'described',\n 'despite',\n 'detail',\n 'df',\n 'di',\n 'did',\n 'didn',\n \"didn't\",\n 'different',\n 'dj',\n 'dk',\n 'dl',\n 'do',\n 'does',\n 'doesn',\n \"doesn't\",\n 'doing',\n 'don',\n 'done',\n \"don't\",\n 'down',\n 'downwards',\n 'dp',\n 'dr',\n 'ds',\n 'dt',\n 'du',\n 'due',\n 'during',\n 'dx',\n 'dy',\n 'e',\n 'e2',\n 'e3',\n 'ea',\n 'each',\n 'ec',\n 'ed',\n 'edu',\n 'ee',\n 'ef',\n 'effect',\n 'eg',\n 'ei',\n 'eight',\n 'eighty',\n 'either',\n 'ej',\n 'el',\n 'eleven',\n 'else',\n 'elsewhere',\n 'em',\n 'empty',\n 'en',\n 'end',\n 'ending',\n 'enough',\n 'entirely',\n 'eo',\n 'ep',\n 'eq',\n 'er',\n 'es',\n 'especially',\n 'est',\n 'et',\n 'et-al',\n 'etc',\n 'eu',\n 'ev',\n 'even',\n 'ever',\n 'every',\n 'everybody',\n 'everyone',\n 'everything',\n 'everywhere',\n 'ex',\n 'exactly',\n 'example',\n 'except',\n 'ey',\n 'f',\n 'f2',\n 'fa',\n 'far',\n 'fc',\n 'few',\n 'ff',\n 'fi',\n 'fifteen',\n 'fifth',\n 'fify',\n 'fill',\n 'find',\n 'fire',\n 'first',\n 'five',\n 'fix',\n 'fj',\n 'fl',\n 'fn',\n 'fo',\n 'followed',\n 'following',\n 'follows',\n 'for',\n 'former',\n 'formerly',\n 'forth',\n 'forty',\n 'found',\n 'four',\n 'fr',\n 'from',\n 'front',\n 'ft',\n 'fu',\n 'full',\n 'further',\n 'furthermore',\n 'fy',\n 'g',\n 'ga',\n 'gave',\n 'ge',\n 'get',\n 'gets',\n 'getting',\n 'gi',\n 'give',\n 'given',\n 'gives',\n 'giving',\n 'gj',\n 'gl',\n 'go',\n 'goes',\n 'going',\n 'gone',\n 'got',\n 'gotten',\n 'gr',\n 'greetings',\n 'gs',\n 'gy',\n 'h',\n 'h2',\n 'h3',\n 'had',\n 'hadn',\n \"hadn't\",\n 'happens',\n 'hardly',\n 'has',\n 'hasn',\n 'hasnt',\n \"hasn't\",\n 'have',\n 'haven',\n \"haven't\",\n 'having',\n 'he',\n 'hed',\n \"he'd\",\n \"he'll\",\n 'hello',\n 'help',\n 'hence',\n 'her',\n 'here',\n 'hereafter',\n 'hereby',\n 'herein',\n 'heres',\n \"here's\",\n 'hereupon',\n 'hers',\n 'herself',\n 'hes',\n \"he's\",\n 'hh',\n 'hi',\n 'hid',\n 'him',\n 'himself',\n 'his',\n 'hither',\n 'hj',\n 'ho',\n 'home',\n 'hopefully',\n 'how',\n 'howbeit',\n 'however',\n \"how's\",\n 'hr',\n 'hs',\n 'http',\n 'hu',\n 'hundred',\n 'hy',\n 'i',\n 'i2',\n 'i3',\n 'i4',\n 'i6',\n 'i7',\n 'i8',\n 'ia',\n 'ib',\n 'ibid',\n 'ic',\n 'id',\n \"i'd\",\n 'ie',\n 'if',\n 'ig',\n 'ignored',\n 'ih',\n 'ii',\n 'ij',\n 'il',\n \"i'll\",\n 'im',\n \"i'm\",\n 'immediate',\n 'immediately',\n 'importance',\n 'important',\n 'in',\n 'inasmuch',\n 'inc',\n 'indeed',\n 'index',\n 'indicate',\n 'indicated',\n 'indicates',\n 'information',\n 'inner',\n 'insofar',\n 'instead',\n 'interest',\n 'into',\n 'invention',\n 'inward',\n 'io',\n 'ip',\n 'iq',\n 'ir',\n 'is',\n 'isn',\n \"isn't\",\n 'it',\n 'itd',\n \"it'd\",\n \"it'll\",\n 'its',\n \"it's\",\n 'itself',\n 'iv',\n \"i've\",\n 'ix',\n 'iy',\n 'iz',\n 'j',\n 'jj',\n 'jr',\n 'js',\n 'jt',\n 'ju',\n 'just',\n 'k',\n 'ke',\n 'keep',\n 'keeps',\n 'kept',\n 'kg',\n 'kj',\n 'km',\n 'know',\n 'known',\n 'knows',\n 'ko',\n 'l',\n 'l2',\n 'la',\n 'largely',\n 'last',\n 'lately',\n 'later',\n 'latter',\n 'latterly',\n 'lb',\n 'lc',\n 'le',\n 'least',\n 'les',\n 'less',\n 'lest',\n 'let',\n 'lets',\n \"let's\",\n 'lf',\n 'like',\n 'liked',\n 'likely',\n 'line',\n 'little',\n 'lj',\n 'll',\n 'll',\n 'ln',\n 'lo',\n 'look',\n 'looking',\n 'looks',\n 'los',\n 'lr',\n 'ls',\n 'lt',\n 'ltd',\n 'm',\n 'm2',\n 'ma',\n 'made',\n 'mainly',\n 'make',\n 'makes',\n 'many',\n 'may',\n 'maybe',\n 'me',\n 'mean',\n 'means',\n 'meantime',\n 'meanwhile',\n 'merely',\n 'mg',\n 'might',\n 'mightn',\n \"mightn't\",\n 'mill',\n 'million',\n 'mine',\n 'miss',\n 'ml',\n 'mn',\n 'mo',\n 'more',\n 'moreover',\n 'most',\n 'mostly',\n 'move',\n 'mr',\n 'mrs',\n 'ms',\n 'mt',\n 'mu',\n 'much',\n 'mug',\n 'must',\n 'mustn',\n \"mustn't\",\n 'my',\n 'myself',\n 'model',\n 'n',\n 'n2',\n 'na',\n 'name',\n 'namely',\n 'nay',\n 'nc',\n 'nd',\n 'ne',\n 'near',\n 'nearly',\n 'necessarily',\n 'necessary',\n 'need',\n 'needn',\n \"needn't\",\n 'needs',\n 'neither',\n 'never',\n 'nevertheless',\n 'new',\n 'next',\n 'ng',\n 'ni',\n 'nine',\n 'ninety',\n 'nj',\n 'nl',\n 'nn',\n 'no',\n 'nobody',\n 'non',\n 'none',\n 'nonetheless',\n 'noone',\n 'nor',\n 'normally',\n 'nos',\n 'not',\n 'noted',\n 'nothing',\n 'novel',\n 'now',\n 'nowhere',\n 'nr',\n 'ns',\n 'nt',\n 'ny',\n 'o',\n 'oa',\n 'ob',\n 'obtain',\n 'obtained',\n 'obviously',\n 'oc',\n 'od',\n 'of',\n 'off',\n 'often',\n 'og',\n 'oh',\n 'oi',\n 'oj',\n 'ok',\n 'okay',\n 'ol',\n 'old',\n 'om',\n 'omitted',\n 'on',\n 'once',\n 'one',\n 'ones',\n 'only',\n 'onto',\n 'oo',\n 'op',\n 'oq',\n 'or',\n 'ord',\n 'os',\n 'ot',\n 'other',\n 'others',\n 'otherwise',\n 'ou',\n 'ought',\n 'our',\n 'ours',\n 'ourselves',\n 'out',\n 'outside',\n 'over',\n 'overall',\n 'ow',\n 'owing',\n 'own',\n 'ox',\n 'oz',\n 'p',\n 'p1',\n 'p2',\n 'p3',\n 'page',\n 'pagecount',\n 'pages',\n 'par',\n 'part',\n 'particular',\n 'particularly',\n 'pas',\n 'past',\n 'pc',\n 'pd',\n 'pe',\n 'per',\n 'perhaps',\n 'pf',\n 'ph',\n 'pi',\n 'pj',\n 'pk',\n 'pl',\n 'placed',\n 'please',\n 'plus',\n 'pm',\n 'pn',\n 'po',\n 'poorly',\n 'possible',\n 'possibly',\n 'potentially',\n 'pp',\n 'pq',\n 'pr',\n 'predominantly',\n 'present',\n 'presumably',\n 'previously',\n 'primarily',\n 'probably',\n 'promptly',\n 'proud',\n 'provides',\n 'ps',\n 'pt',\n 'pu',\n 'put',\n 'py',\n 'q',\n 'qj',\n 'qu',\n 'que',\n 'quickly',\n 'quite',\n 'qv',\n 'r',\n 'r2',\n 'ra',\n 'ran',\n 'rather',\n 'rc',\n 'rd',\n 're',\n 'readily',\n 'really',\n 'reasonably',\n 'recent',\n 'recently',\n 'ref',\n 'refs',\n 'regarding',\n 'regardless',\n 'regards',\n 'related',\n 'relatively',\n 'research',\n 'research-articl',\n 'respectively',\n 'resulted',\n 'resulting',\n 'results',\n 'rf',\n 'rh',\n 'ri',\n 'right',\n 'rj',\n 'rl',\n 'rm',\n 'rn',\n 'ro',\n 'rq',\n 'rr',\n 'rs',\n 'rt',\n 'ru',\n 'run',\n 'rv',\n 'ry',\n 's',\n 's2',\n 'sa',\n 'said',\n 'same',\n 'saw',\n 'say',\n 'saying',\n 'says',\n 'sc',\n 'sd',\n 'se',\n 'sec',\n 'second',\n 'secondly',\n 'section',\n 'see',\n 'seeing',\n 'seem',\n 'seemed',\n 'seeming',\n 'seems',\n 'seen',\n 'self',\n 'selves',\n 'sensible',\n 'sent',\n 'serious',\n 'seriously',\n 'seven',\n 'several',\n 'sf',\n 'shall',\n 'shan',\n \"shan't\",\n 'she',\n 'shed',\n \"she'd\",\n \"she'll\",\n 'shes',\n \"she's\",\n 'should',\n 'shouldn',\n \"shouldn't\",\n \"should've\",\n 'show',\n 'showed',\n 'shown',\n 'showns',\n 'shows',\n 'si',\n 'side',\n 'significant',\n 'significantly',\n 'similar',\n 'similarly',\n 'since',\n 'sincere',\n 'six',\n 'sixty',\n 'sj',\n 'sl',\n 'slightly',\n 'sm',\n 'sn',\n 'so',\n 'some',\n 'somebody',\n 'somehow',\n 'someone',\n 'somethan',\n 'something',\n 'sometime',\n 'sometimes',\n 'somewhat',\n 'somewhere',\n 'soon',\n 'sorry',\n 'sp',\n 'specifically',\n 'specified',\n 'specify',\n 'specifying',\n 'sq',\n 'sr',\n 'ss',\n 'st',\n 'still',\n 'stop',\n 'strongly',\n 'sub',\n 'substantially',\n 'successfully',\n 'such',\n 'sufficiently',\n 'suggest',\n 'sup',\n 'sure',\n 'sy',\n 'system',\n 'sz',\n 't',\n 't1',\n 't2',\n 't3',\n 'take',\n 'taken',\n 'taking',\n 'tb',\n 'tc',\n 'td',\n 'te',\n 'tell',\n 'ten',\n 'tends',\n 'tf',\n 'th',\n 'than',\n 'thank',\n 'thanks',\n 'thanx',\n 'that',\n \"that'll\",\n 'thats',\n \"that's\",\n \"that've\",\n 'the',\n 'their',\n 'theirs',\n 'them',\n 'themselves',\n 'then',\n 'thence',\n 'there',\n 'thereafter',\n 'thereby',\n 'thered',\n 'therefore',\n 'therein',\n \"there'll\",\n 'thereof',\n 'therere',\n 'theres',\n \"there's\",\n 'thereto',\n 'thereupon',\n \"there've\",\n 'these',\n 'they',\n 'theyd',\n \"they'd\",\n \"they'll\",\n 'theyre',\n \"they're\",\n \"they've\",\n 'thickv',\n 'thin',\n 'think',\n 'third',\n 'this',\n 'thorough',\n 'thoroughly',\n 'those',\n 'thou',\n 'though',\n 'thoughh',\n 'thousand',\n 'three',\n 'throug',\n 'through',\n 'throughout',\n 'thru',\n 'thus',\n 'ti',\n 'til',\n 'tip',\n 'tj',\n 'tl',\n 'tm',\n 'tn',\n 'to',\n 'together',\n 'too',\n 'took',\n 'top',\n 'toward',\n 'towards',\n 'tp',\n 'tq',\n 'tr',\n 'tried',\n 'tries',\n 'truly',\n 'try',\n 'trying',\n 'ts',\n \"t's\",\n 'tt',\n 'tv',\n 'twelve',\n 'twenty',\n 'twice',\n 'two',\n 'tx',\n 'u',\n 'u201d',\n 'ue',\n 'ui',\n 'uj',\n 'uk',\n 'um',\n 'un',\n 'under',\n 'unfortunately',\n 'unless',\n 'unlike',\n 'unlikely',\n 'until',\n 'unto',\n 'uo',\n 'up',\n 'upon',\n 'ups',\n 'ur',\n 'us',\n 'use',\n 'used',\n 'useful',\n 'usefully',\n 'usefulness',\n 'uses',\n 'using',\n 'usually',\n 'ut',\n 'v',\n 'va',\n 'value',\n 'various',\n 'vd',\n 've',\n 've',\n 'very',\n 'via',\n 'viz',\n 'vj',\n 'vo',\n 'vol',\n 'vols',\n 'volumtype',\n 'vq',\n 'vs',\n 'vt',\n 'vu',\n 'w',\n 'wa',\n 'want',\n 'wants',\n 'was',\n 'wasn',\n 'wasnt',\n \"wasn't\",\n 'way',\n 'we',\n 'wed',\n \"we'd\",\n 'welcome',\n 'well',\n \"we'll\",\n 'well-b',\n 'went',\n 'were',\n \"we're\",\n 'weren',\n 'werent',\n \"weren't\",\n \"we've\",\n 'what',\n 'whatever',\n \"what'll\",\n 'whats',\n \"what's\",\n 'when',\n 'whence',\n 'whenever',\n \"when's\",\n 'where',\n 'whereafter',\n 'whereas',\n 'whereby',\n 'wherein',\n 'wheres',\n \"where's\",\n 'whereupon',\n 'wherever',\n 'whether',\n 'which',\n 'while',\n 'whim',\n 'whither',\n 'who',\n 'whod',\n 'whoever',\n 'whole',\n \"who'll\",\n 'whom',\n 'whomever',\n 'whos',\n \"who's\",\n 'whose',\n 'why',\n \"why's\",\n 'wi',\n 'widely',\n 'will',\n 'willing',\n 'wish',\n 'with',\n 'within',\n 'without',\n 'wo',\n 'won',\n 'wonder',\n 'wont',\n \"won't\",\n 'words',\n 'world',\n 'would',\n 'wouldn',\n 'wouldnt',\n \"wouldn't\",\n 'www',\n 'x',\n 'x1',\n 'x2',\n 'x3',\n 'xf',\n 'xi',\n 'xj',\n 'xk',\n 'xl',\n 'xn',\n 'xo',\n 'xs',\n 'xt',\n 'xv',\n 'xx',\n 'y',\n 'y2',\n 'yes',\n 'yet',\n 'yj',\n 'yl',\n 'you',\n 'youd',\n \"you'd\",\n \"you'll\",\n 'your',\n 'youre',\n \"you're\",\n 'yours',\n 'yourself',\n 'yourselves',\n \"you've\",\n 'yr',\n 'ys',\n 'yt',\n 'z',\n 'zero',\n 'zi',\n 'zz',\n 'task',\n]);\n","import type { AxAIService } from '../ai/types.js';\n\nimport type { AxExample, AxMetricFn } from './optimizer.js';\nimport type { AxProgram } from './program.js';\nimport type { AxGenIn, AxGenOut } from './types.js';\nimport { updateProgressBar } from './util.js';\n\nexport type AxEvaluateArgs<IN extends AxGenIn, OUT extends AxGenOut> = {\n ai: AxAIService;\n program: Readonly<AxProgram<IN, OUT>>;\n examples: Readonly<AxExample[]>;\n};\n\nexport class AxTestPrompt<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> {\n private ai: AxAIService;\n private program: Readonly<AxProgram<IN, OUT>>;\n private examples: Readonly<AxExample[]>;\n\n constructor({\n ai,\n program,\n examples = [],\n }: Readonly<AxEvaluateArgs<IN, OUT>>) {\n if (examples.length === 0) {\n throw new Error('No examples found');\n }\n this.ai = ai;\n this.program = program;\n this.examples = examples;\n }\n\n public async run(metricFn: AxMetricFn) {\n const st = Date.now();\n const total = this.examples.length;\n let sumOfScores = 0;\n\n for (let i = 0; i < total; i++) {\n const ex = this.examples[i];\n if (!ex) {\n throw new Error('Invalid example');\n }\n\n const res = await this.program.forward(this.ai, ex as IN);\n const score = await metricFn({ prediction: res, example: ex });\n sumOfScores += score;\n\n const et = Date.now() - st;\n // Assuming updateProgressBar's 3rd argument is a count/value that represents progress.\n // If it specifically needs a 'success count', this might need adjustment.\n // For now, using sumOfScores, but it might represent total score, not #successes.\n // If AxMetricFn is always 0 or 1, sumOfScores is equivalent to successCount.\n updateProgressBar(i, total, sumOfScores, et, 'Testing Prompt', 30);\n }\n\n const averageScore = total > 0 ? sumOfScores / total : 0;\n console.log(\n '\\nPerformance: ',\n sumOfScores,\n '/',\n total,\n 'Average Score: ',\n averageScore,\n '\\n'\n );\n }\n}\n","import type { Counter, Gauge, Histogram, Meter } from '@opentelemetry/api';\n\nimport type { AxAIService, AxLoggerFunction } from '../ai/types.js';\n\nimport { axGlobals } from './globals.js';\nimport { axDefaultOptimizerLogger } from './loggers.js';\nimport type { AxProgram, AxProgramDemos } from './program.js';\nimport type { AxFieldValue, AxGenIn, AxGenOut } from './types.js';\n\n// Logger utilities are now exported from ./loggers.js\n\n// Common types used by optimizers\nexport type AxExample = Record<string, AxFieldValue>;\n\nexport type AxMetricFn = <T extends AxGenOut = AxGenOut>(\n arg0: Readonly<{ prediction: T; example: AxExample }>\n) => number | Promise<number>;\n\nexport type AxMetricFnArgs = Parameters<AxMetricFn>[0];\n\n// Multi-objective metric function for Pareto optimization\nexport type AxMultiMetricFn = <T extends AxGenOut = AxGenOut>(\n arg0: Readonly<{ prediction: T; example: AxExample }>\n) => Record<string, number>;\n\n// Progress tracking interface for real-time updates\nexport interface AxOptimizationProgress {\n round: number;\n totalRounds: number;\n currentScore: number;\n bestScore: number;\n tokensUsed: number;\n timeElapsed: number;\n successfulExamples: number;\n totalExamples: number;\n currentConfiguration?: Record<string, unknown>;\n convergenceInfo?: {\n improvement: number;\n stagnationRounds: number;\n isConverging: boolean;\n };\n}\n\n// Cost tracking interface for monitoring resource usage\nexport interface AxCostTracker {\n trackTokens(count: number, model: string): void;\n getCurrentCost(): number;\n getTokenUsage(): Record<string, number>;\n getTotalTokens(): number;\n isLimitReached(): boolean;\n reset(): void;\n}\n\n// Checkpoint interface for saving/loading optimization state\nexport interface AxOptimizationCheckpoint {\n version: string;\n timestamp: number;\n optimizerType: string;\n optimizerConfig: Record<string, unknown>;\n\n // Current optimization state\n currentRound: number;\n totalRounds: number;\n bestScore: number;\n bestConfiguration?: Record<string, unknown>;\n\n // Historical data\n scoreHistory: number[];\n configurationHistory: Record<string, unknown>[];\n\n // Resource usage\n stats: AxOptimizationStats;\n\n // Optimizer-specific state\n optimizerState: Record<string, unknown>;\n\n // Examples and validation data\n examples: readonly AxExample[];\n validationSet?: readonly AxExample[];\n}\n\n// Simple checkpoint functions - users implement these as needed\nexport type AxCheckpointSaveFn = (\n checkpoint: Readonly<AxOptimizationCheckpoint>\n) => Promise<string>;\nexport type AxCheckpointLoadFn = (\n checkpointId: string\n) => Promise<AxOptimizationCheckpoint | null>;\n\n// Cost tracker configuration options\nexport interface AxCostTrackerOptions {\n // Cost-based limits\n costPerModel?: Record<string, number>;\n maxCost?: number;\n\n // Token-based limits\n maxTokens?: number;\n}\n\n// Enhanced optimizer arguments - no longer includes program\nexport type AxOptimizerArgs = {\n studentAI: AxAIService;\n teacherAI?: AxAIService; // For generating high-quality examples/corrections\n examples: readonly AxExample[];\n\n // Evaluation strategy\n validationSet?: readonly AxExample[];\n\n // Quality thresholds\n minSuccessRate?: number;\n targetScore?: number;\n\n // Monitoring & callbacks\n onProgress?: (progress: Readonly<AxOptimizationProgress>) => void;\n onEarlyStop?: (reason: string, stats: Readonly<AxOptimizationStats>) => void;\n costTracker?: AxCostTracker;\n\n // Checkpointing\n checkpointSave?: AxCheckpointSaveFn;\n checkpointLoad?: AxCheckpointLoadFn;\n checkpointInterval?: number; // Save checkpoint every N rounds\n resumeFromCheckpoint?: string; // Checkpoint ID to resume from\n\n // Logging\n logger?: AxLoggerFunction;\n verbose?: boolean;\n\n // Reproducibility\n seed?: number;\n};\n\n// Enhanced optimization statistics\nexport interface AxOptimizationStats {\n totalCalls: number;\n successfulDemos: number;\n estimatedTokenUsage: number;\n earlyStopped: boolean;\n earlyStopping?: {\n bestScoreRound: number;\n patienceExhausted: boolean;\n reason: string;\n };\n\n // Resource usage tracking\n resourceUsage: {\n totalTokens: number;\n totalTime: number;\n avgLatencyPerEval: number;\n peakMemoryUsage?: number;\n costByModel: Record<string, number>;\n };\n\n // Quality metrics\n convergenceInfo: {\n converged: boolean;\n finalImprovement: number;\n stagnationRounds: number;\n convergenceThreshold: number;\n };\n\n // Evaluation breakdown\n evaluationBreakdown?: {\n trainingScore: number;\n validationScore: number;\n crossValidationScores?: number[];\n standardDeviation?: number;\n };\n}\n\n// Optimizer metrics configuration interface\nexport interface AxOptimizerMetricsConfig {\n enabled: boolean;\n enabledCategories: (\n | 'optimization'\n | 'convergence'\n | 'resource_usage'\n | 'teacher_student'\n | 'checkpointing'\n | 'pareto'\n )[];\n maxLabelLength: number;\n samplingRate: number;\n}\n\n// Default optimizer metrics configuration\nexport const axDefaultOptimizerMetricsConfig: AxOptimizerMetricsConfig = {\n enabled: true,\n enabledCategories: [\n 'optimization',\n 'convergence',\n 'resource_usage',\n 'teacher_student',\n 'checkpointing',\n 'pareto',\n ],\n maxLabelLength: 100,\n samplingRate: 1.0,\n};\n\n// Optimizer metrics instruments interface\nexport interface AxOptimizerMetricsInstruments {\n // Optimization flow metrics\n optimizationLatencyHistogram?: Histogram;\n optimizationRequestsCounter?: Counter;\n optimizationErrorsCounter?: Counter;\n\n // Convergence metrics\n convergenceRoundsHistogram?: Histogram;\n convergenceScoreGauge?: Gauge;\n convergenceImprovementGauge?: Gauge;\n stagnationRoundsGauge?: Gauge;\n earlyStoppingCounter?: Counter;\n\n // Resource usage metrics\n tokenUsageCounter?: Counter;\n costUsageCounter?: Counter;\n memoryUsageGauge?: Gauge;\n optimizationDurationHistogram?: Histogram;\n\n // Teacher-student metrics\n teacherStudentUsageCounter?: Counter;\n teacherStudentLatencyHistogram?: Histogram;\n teacherStudentScoreImprovementGauge?: Gauge;\n\n // Checkpointing metrics\n checkpointSaveCounter?: Counter;\n checkpointLoadCounter?: Counter;\n checkpointSaveLatencyHistogram?: Histogram;\n checkpointLoadLatencyHistogram?: Histogram;\n\n // Pareto optimization metrics\n paretoOptimizationsCounter?: Counter;\n paretoFrontSizeHistogram?: Histogram;\n paretoHypervolumeGauge?: Gauge;\n paretoSolutionsGeneratedHistogram?: Histogram;\n\n // Program complexity metrics\n programInputFieldsGauge?: Gauge;\n programOutputFieldsGauge?: Gauge;\n examplesCountGauge?: Gauge;\n validationSetSizeGauge?: Gauge;\n\n // Performance metrics\n evaluationLatencyHistogram?: Histogram;\n demoGenerationLatencyHistogram?: Histogram;\n metricComputationLatencyHistogram?: Histogram;\n\n // Configuration metrics\n optimizerTypeGauge?: Gauge;\n targetScoreGauge?: Gauge;\n maxRoundsGauge?: Gauge;\n}\n\n// Singleton instance for optimizer metrics instruments\nlet globalOptimizerMetricsInstruments:\n | AxOptimizerMetricsInstruments\n | undefined;\n\n// Function to get or create optimizer metrics instruments (singleton pattern)\nexport const getOrCreateOptimizerMetricsInstruments = (\n meter?: Meter\n): AxOptimizerMetricsInstruments | undefined => {\n // Return existing instance if available\n if (globalOptimizerMetricsInstruments) {\n return globalOptimizerMetricsInstruments;\n }\n\n if (meter) {\n globalOptimizerMetricsInstruments =\n createOptimizerMetricsInstruments(meter);\n return globalOptimizerMetricsInstruments;\n }\n\n return undefined;\n};\n\n// Function to reset the optimizer metrics singleton (useful for testing)\nexport const resetOptimizerMetricsInstruments = (): void => {\n globalOptimizerMetricsInstruments = undefined;\n};\n\n// Global optimizer metrics configuration\nlet currentOptimizerMetricsConfig: AxOptimizerMetricsConfig =\n axDefaultOptimizerMetricsConfig;\n\n// Function to update optimizer metrics configuration\nexport const axUpdateOptimizerMetricsConfig = (\n config: Readonly<Partial<AxOptimizerMetricsConfig>>\n): void => {\n currentOptimizerMetricsConfig = {\n ...currentOptimizerMetricsConfig,\n ...config,\n };\n};\n\n// Function to get current optimizer metrics configuration\nexport const axGetOptimizerMetricsConfig = (): AxOptimizerMetricsConfig => {\n return { ...currentOptimizerMetricsConfig };\n};\n\nexport const createOptimizerMetricsInstruments = (\n meter: Meter\n): AxOptimizerMetricsInstruments => {\n return {\n // Optimization flow metrics\n optimizationLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_optimization_duration_ms',\n {\n description: 'End-to-end duration of optimization runs',\n unit: 'ms',\n }\n ),\n\n optimizationRequestsCounter: meter.createCounter(\n 'ax_optimizer_optimization_requests_total',\n {\n description: 'Total number of optimization requests',\n }\n ),\n\n optimizationErrorsCounter: meter.createCounter(\n 'ax_optimizer_optimization_errors_total',\n {\n description: 'Total number of failed optimizations',\n }\n ),\n\n // Convergence metrics\n convergenceRoundsHistogram: meter.createHistogram(\n 'ax_optimizer_convergence_rounds',\n {\n description: 'Number of rounds until convergence',\n }\n ),\n\n convergenceScoreGauge: meter.createGauge('ax_optimizer_convergence_score', {\n description: 'Current best score during optimization',\n }),\n\n convergenceImprovementGauge: meter.createGauge(\n 'ax_optimizer_convergence_improvement',\n {\n description: 'Improvement in score from baseline',\n }\n ),\n\n stagnationRoundsGauge: meter.createGauge('ax_optimizer_stagnation_rounds', {\n description: 'Number of rounds without improvement',\n }),\n\n earlyStoppingCounter: meter.createCounter(\n 'ax_optimizer_early_stopping_total',\n {\n description: 'Total number of early stopping events',\n }\n ),\n\n // Resource usage metrics\n tokenUsageCounter: meter.createCounter('ax_optimizer_token_usage_total', {\n description: 'Total tokens used during optimization',\n }),\n\n costUsageCounter: meter.createCounter('ax_optimizer_cost_usage_total', {\n description: 'Total cost incurred during optimization',\n unit: '$',\n }),\n\n memoryUsageGauge: meter.createGauge('ax_optimizer_memory_usage_bytes', {\n description: 'Peak memory usage during optimization',\n unit: 'By',\n }),\n\n optimizationDurationHistogram: meter.createHistogram(\n 'ax_optimizer_duration_ms',\n {\n description: 'Duration of optimization runs',\n unit: 'ms',\n }\n ),\n\n // Teacher-student metrics\n teacherStudentUsageCounter: meter.createCounter(\n 'ax_optimizer_teacher_student_usage_total',\n {\n description: 'Total number of teacher-student interactions',\n }\n ),\n\n teacherStudentLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_teacher_student_latency_ms',\n {\n description: 'Latency of teacher-student interactions',\n unit: 'ms',\n }\n ),\n\n teacherStudentScoreImprovementGauge: meter.createGauge(\n 'ax_optimizer_teacher_student_score_improvement',\n {\n description: 'Score improvement from teacher-student interactions',\n }\n ),\n\n // Checkpointing metrics\n checkpointSaveCounter: meter.createCounter(\n 'ax_optimizer_checkpoint_save_total',\n {\n description: 'Total number of checkpoint saves',\n }\n ),\n\n checkpointLoadCounter: meter.createCounter(\n 'ax_optimizer_checkpoint_load_total',\n {\n description: 'Total number of checkpoint loads',\n }\n ),\n\n checkpointSaveLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_checkpoint_save_latency_ms',\n {\n description: 'Latency of checkpoint save operations',\n unit: 'ms',\n }\n ),\n\n checkpointLoadLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_checkpoint_load_latency_ms',\n {\n description: 'Latency of checkpoint load operations',\n unit: 'ms',\n }\n ),\n\n // Pareto optimization metrics\n paretoOptimizationsCounter: meter.createCounter(\n 'ax_optimizer_pareto_optimizations_total',\n {\n description: 'Total number of Pareto optimizations',\n }\n ),\n\n paretoFrontSizeHistogram: meter.createHistogram(\n 'ax_optimizer_pareto_front_size',\n {\n description: 'Size of Pareto frontier',\n }\n ),\n\n paretoHypervolumeGauge: meter.createGauge(\n 'ax_optimizer_pareto_hypervolume',\n {\n description: 'Hypervolume of Pareto frontier',\n }\n ),\n\n paretoSolutionsGeneratedHistogram: meter.createHistogram(\n 'ax_optimizer_pareto_solutions_generated',\n {\n description: 'Number of solutions generated for Pareto optimization',\n }\n ),\n\n // Program complexity metrics\n programInputFieldsGauge: meter.createGauge(\n 'ax_optimizer_program_input_fields',\n {\n description: 'Number of input fields in optimized program',\n }\n ),\n\n programOutputFieldsGauge: meter.createGauge(\n 'ax_optimizer_program_output_fields',\n {\n description: 'Number of output fields in optimized program',\n }\n ),\n\n examplesCountGauge: meter.createGauge('ax_optimizer_examples_count', {\n description: 'Number of training examples used',\n }),\n\n validationSetSizeGauge: meter.createGauge(\n 'ax_optimizer_validation_set_size',\n {\n description: 'Size of validation set used',\n }\n ),\n\n // Performance metrics\n evaluationLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_evaluation_latency_ms',\n {\n description: 'Latency of program evaluations',\n unit: 'ms',\n }\n ),\n\n demoGenerationLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_demo_generation_latency_ms',\n {\n description: 'Latency of demo generation',\n unit: 'ms',\n }\n ),\n\n metricComputationLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_metric_computation_latency_ms',\n {\n description: 'Latency of metric computation',\n unit: 'ms',\n }\n ),\n\n // Configuration metrics\n optimizerTypeGauge: meter.createGauge('ax_optimizer_type', {\n description: 'Type of optimizer being used',\n }),\n\n targetScoreGauge: meter.createGauge('ax_optimizer_target_score', {\n description: 'Target score for optimization',\n }),\n\n maxRoundsGauge: meter.createGauge('ax_optimizer_max_rounds', {\n description: 'Maximum rounds for optimization',\n }),\n };\n};\n\n// Utility function to sanitize optimizer metric labels\nconst sanitizeOptimizerLabels = (\n labels: Record<string, unknown>\n): Record<string, string> => {\n const sanitized: Record<string, string> = {};\n for (const [key, value] of Object.entries(labels)) {\n if (value !== undefined && value !== null) {\n const stringValue = String(value);\n // Limit label length based on configuration\n const maxLength = currentOptimizerMetricsConfig.maxLabelLength;\n sanitized[key] =\n stringValue.length > maxLength\n ? stringValue.substring(0, maxLength)\n : stringValue;\n }\n }\n return sanitized;\n};\n\n// Recording functions for optimization flow metrics\nexport const recordOptimizationMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n duration: number,\n success: boolean,\n optimizerType: string,\n programSignature?: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n success: success.toString(),\n optimizer_type: optimizerType,\n ...(programSignature ? { program_signature: programSignature } : {}),\n });\n\n if (instruments.optimizationLatencyHistogram) {\n instruments.optimizationLatencyHistogram.record(duration, labels);\n }\n\n if (instruments.optimizationRequestsCounter) {\n instruments.optimizationRequestsCounter.add(1, labels);\n }\n\n if (!success && instruments.optimizationErrorsCounter) {\n instruments.optimizationErrorsCounter.add(1, labels);\n }\n } catch (error) {\n console.warn('Failed to record optimization metric:', error);\n }\n};\n\n// Recording functions for convergence metrics\nexport const recordConvergenceMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n rounds: number,\n currentScore: number,\n improvement: number,\n stagnationRounds: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.convergenceRoundsHistogram) {\n instruments.convergenceRoundsHistogram.record(rounds, labels);\n }\n\n if (instruments.convergenceScoreGauge) {\n instruments.convergenceScoreGauge.record(currentScore, labels);\n }\n\n if (instruments.convergenceImprovementGauge) {\n instruments.convergenceImprovementGauge.record(improvement, labels);\n }\n\n if (instruments.stagnationRoundsGauge) {\n instruments.stagnationRoundsGauge.record(stagnationRounds, labels);\n }\n } catch (error) {\n console.warn('Failed to record convergence metric:', error);\n }\n};\n\nexport const recordEarlyStoppingMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n reason: string,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n reason,\n optimizer_type: optimizerType,\n });\n\n if (instruments.earlyStoppingCounter) {\n instruments.earlyStoppingCounter.add(1, labels);\n }\n } catch (error) {\n console.warn('Failed to record early stopping metric:', error);\n }\n};\n\n// Recording functions for resource usage metrics\nexport const recordResourceUsageMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n tokensUsed: number,\n costIncurred: number,\n optimizerType: string,\n memoryUsage?: number\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.tokenUsageCounter) {\n instruments.tokenUsageCounter.add(tokensUsed, labels);\n }\n\n if (instruments.costUsageCounter) {\n instruments.costUsageCounter.add(costIncurred, labels);\n }\n\n if (memoryUsage !== undefined && instruments.memoryUsageGauge) {\n instruments.memoryUsageGauge.record(memoryUsage, labels);\n }\n } catch (error) {\n console.warn('Failed to record resource usage metric:', error);\n }\n};\n\nexport const recordOptimizationDurationMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n duration: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.optimizationDurationHistogram) {\n instruments.optimizationDurationHistogram.record(duration, labels);\n }\n } catch (error) {\n console.warn('Failed to record optimization duration metric:', error);\n }\n};\n\n// Recording functions for teacher-student metrics\nexport const recordTeacherStudentMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n latency: number,\n scoreImprovement: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.teacherStudentUsageCounter) {\n instruments.teacherStudentUsageCounter.add(1, labels);\n }\n\n if (instruments.teacherStudentLatencyHistogram) {\n instruments.teacherStudentLatencyHistogram.record(latency, labels);\n }\n\n if (instruments.teacherStudentScoreImprovementGauge) {\n instruments.teacherStudentScoreImprovementGauge.record(\n scoreImprovement,\n labels\n );\n }\n } catch (error) {\n console.warn('Failed to record teacher-student metric:', error);\n }\n};\n\n// Recording functions for checkpointing metrics\nexport const recordCheckpointMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n operation: 'save' | 'load',\n latency: number,\n success: boolean,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n operation,\n success: success.toString(),\n optimizer_type: optimizerType,\n });\n\n if (operation === 'save') {\n if (instruments.checkpointSaveCounter) {\n instruments.checkpointSaveCounter.add(1, labels);\n }\n if (instruments.checkpointSaveLatencyHistogram) {\n instruments.checkpointSaveLatencyHistogram.record(latency, labels);\n }\n } else {\n if (instruments.checkpointLoadCounter) {\n instruments.checkpointLoadCounter.add(1, labels);\n }\n if (instruments.checkpointLoadLatencyHistogram) {\n instruments.checkpointLoadLatencyHistogram.record(latency, labels);\n }\n }\n } catch (error) {\n console.warn('Failed to record checkpoint metric:', error);\n }\n};\n\n// Recording functions for Pareto optimization metrics\nexport const recordParetoMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n frontSize: number,\n solutionsGenerated: number,\n optimizerType: string,\n hypervolume?: number\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.paretoOptimizationsCounter) {\n instruments.paretoOptimizationsCounter.add(1, labels);\n }\n\n if (instruments.paretoFrontSizeHistogram) {\n instruments.paretoFrontSizeHistogram.record(frontSize, labels);\n }\n\n if (hypervolume !== undefined && instruments.paretoHypervolumeGauge) {\n instruments.paretoHypervolumeGauge.record(hypervolume, labels);\n }\n\n if (instruments.paretoSolutionsGeneratedHistogram) {\n instruments.paretoSolutionsGeneratedHistogram.record(\n solutionsGenerated,\n labels\n );\n }\n } catch (error) {\n console.warn('Failed to record Pareto metric:', error);\n }\n};\n\n// Recording functions for program complexity metrics\nexport const recordProgramComplexityMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n inputFields: number,\n outputFields: number,\n examplesCount: number,\n validationSetSize: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.programInputFieldsGauge) {\n instruments.programInputFieldsGauge.record(inputFields, labels);\n }\n\n if (instruments.programOutputFieldsGauge) {\n instruments.programOutputFieldsGauge.record(outputFields, labels);\n }\n\n if (instruments.examplesCountGauge) {\n instruments.examplesCountGauge.record(examplesCount, labels);\n }\n\n if (instruments.validationSetSizeGauge) {\n instruments.validationSetSizeGauge.record(validationSetSize, labels);\n }\n } catch (error) {\n console.warn('Failed to record program complexity metric:', error);\n }\n};\n\n// Recording functions for performance metrics\nexport const recordOptimizerPerformanceMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n metricType: 'evaluation' | 'demo_generation' | 'metric_computation',\n duration: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n metric_type: metricType,\n optimizer_type: optimizerType,\n });\n\n switch (metricType) {\n case 'evaluation':\n if (instruments.evaluationLatencyHistogram) {\n instruments.evaluationLatencyHistogram.record(duration, labels);\n }\n break;\n case 'demo_generation':\n if (instruments.demoGenerationLatencyHistogram) {\n instruments.demoGenerationLatencyHistogram.record(duration, labels);\n }\n break;\n case 'metric_computation':\n if (instruments.metricComputationLatencyHistogram) {\n instruments.metricComputationLatencyHistogram.record(\n duration,\n labels\n );\n }\n break;\n }\n } catch (error) {\n console.warn('Failed to record optimizer performance metric:', error);\n }\n};\n\n// Recording functions for configuration metrics\nexport const recordOptimizerConfigurationMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n optimizerType: string,\n targetScore?: number,\n maxRounds?: number\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.optimizerTypeGauge) {\n instruments.optimizerTypeGauge.record(1, labels);\n }\n\n if (targetScore !== undefined && instruments.targetScoreGauge) {\n instruments.targetScoreGauge.record(targetScore, labels);\n }\n\n if (maxRounds !== undefined && instruments.maxRoundsGauge) {\n instruments.maxRoundsGauge.record(maxRounds, labels);\n }\n } catch (error) {\n console.warn('Failed to record optimizer configuration metric:', error);\n }\n};\n\n// Simplified result - no program since it's passed to compile\nexport interface AxOptimizerResult<OUT extends AxGenOut> {\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n stats: AxOptimizationStats;\n bestScore: number;\n finalConfiguration?: Record<string, unknown>;\n\n // Optimization history for analysis\n scoreHistory?: number[];\n configurationHistory?: Record<string, unknown>[];\n}\n\n// Pareto optimization result for multi-objective optimization\nexport interface AxParetoResult<OUT extends AxGenOut = AxGenOut>\n extends AxOptimizerResult<OUT> {\n paretoFront: ReadonlyArray<{\n demos: readonly AxProgramDemos<AxGenIn, OUT>[];\n scores: Readonly<Record<string, number>>;\n configuration: Readonly<Record<string, unknown>>;\n dominatedSolutions: number;\n }>;\n\n // Multi-objective specific stats\n hypervolume?: number;\n paretoFrontSize: number;\n convergenceMetrics?: Record<string, number>;\n}\n\n// Compile options that can override constructor arguments\nexport interface AxCompileOptions {\n // Method-specific options\n maxIterations?: number;\n earlyStoppingPatience?: number;\n verbose?: boolean;\n\n // Override args for this specific run\n overrideValidationSet?: readonly AxExample[];\n overrideTargetScore?: number;\n overrideCostTracker?: AxCostTracker;\n overrideTeacherAI?: AxAIService;\n\n // Progress monitoring overrides\n overrideOnProgress?: (progress: Readonly<AxOptimizationProgress>) => void;\n overrideOnEarlyStop?: (\n reason: string,\n stats: Readonly<AxOptimizationStats>\n ) => void;\n\n // Checkpointing overrides\n overrideCheckpointSave?: AxCheckpointSaveFn;\n overrideCheckpointLoad?: AxCheckpointLoadFn;\n overrideCheckpointInterval?: number;\n saveCheckpointOnComplete?: boolean;\n}\n\n// Enhanced base optimizer interface\nexport interface AxOptimizer<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> {\n /**\n * Optimize a program using the provided metric function\n * @param program The program to optimize (moved from constructor)\n * @param metricFn Evaluation metric function to assess program performance\n * @param options Optional configuration options that can override constructor settings\n * @returns Optimization result containing demos, stats, and configuration\n */\n compile(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): Promise<AxOptimizerResult<OUT>>;\n\n /**\n * Optimize a program with real-time streaming updates\n * @param program The program to optimize\n * @param metricFn Evaluation metric function\n * @param options Optional configuration options\n * @returns Async iterator yielding optimization progress\n */\n compileStream?(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): AsyncIterableIterator<AxOptimizationProgress>;\n\n /**\n * Multi-objective optimization using Pareto frontier\n * @param program The program to optimize\n * @param metricFn Multi-objective metric function\n * @param options Optional configuration options\n * @returns Pareto optimization result\n */\n compilePareto?(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMultiMetricFn,\n options?: AxCompileOptions\n ): Promise<AxParetoResult<OUT>>;\n\n /**\n * Get current optimization statistics\n * @returns Current optimization statistics\n */\n getStats(): AxOptimizationStats;\n\n /**\n * Cancel ongoing optimization gracefully\n * @returns Promise that resolves when cancellation is complete\n */\n cancel?(): Promise<void>;\n\n /**\n * Reset optimizer state for reuse with different programs\n */\n reset?(): void;\n\n /**\n * Get optimizer-specific configuration\n * @returns Current optimizer configuration\n */\n getConfiguration?(): Record<string, unknown>;\n\n /**\n * Update optimizer configuration\n * @param config New configuration to merge with existing\n */\n updateConfiguration?(config: Readonly<Record<string, unknown>>): void;\n\n /**\n * Validate that the optimizer can handle the given program\n * @param program Program to validate\n * @returns Validation result with any issues found\n */\n validateProgram?(program: Readonly<AxProgram<IN, OUT>>): {\n isValid: boolean;\n issues: string[];\n suggestions: string[];\n };\n}\n\n// Specific optimizer options interfaces\n\nexport interface AxBootstrapOptimizerOptions {\n maxRounds?: number;\n maxExamples?: number;\n maxDemos?: number;\n batchSize?: number;\n earlyStoppingPatience?: number;\n teacherAI?: AxAIService;\n costMonitoring?: boolean;\n maxTokensPerGeneration?: number;\n verboseMode?: boolean;\n debugMode?: boolean;\n\n // Enhanced options\n adaptiveBatching?: boolean;\n dynamicTemperature?: boolean;\n qualityThreshold?: number;\n diversityWeight?: number;\n}\n\nexport interface AxMiPROOptimizerOptions {\n numCandidates?: number;\n initTemperature?: number;\n maxBootstrappedDemos?: number;\n maxLabeledDemos?: number;\n numTrials?: number;\n minibatch?: boolean;\n minibatchSize?: number;\n minibatchFullEvalSteps?: number;\n programAwareProposer?: boolean;\n dataAwareProposer?: boolean;\n viewDataBatchSize?: number;\n tipAwareProposer?: boolean;\n fewshotAwareProposer?: boolean;\n verbose?: boolean;\n earlyStoppingTrials?: number;\n minImprovementThreshold?: number;\n\n // Enhanced options\n bayesianOptimization?: boolean;\n acquisitionFunction?:\n | 'expected_improvement'\n | 'upper_confidence_bound'\n | 'probability_improvement';\n explorationWeight?: number;\n\n // New option: number of samples to generate per forward call for self-consistency\n sampleCount?: number;\n}\n\n// Legacy compile options (for backward compatibility)\nexport interface AxBootstrapCompileOptions extends AxCompileOptions {\n validationExamples?: readonly AxExample[];\n maxDemos?: number;\n teacherProgram?: Readonly<AxProgram<AxGenIn, AxGenOut>>;\n}\n\nexport interface AxMiPROCompileOptions extends AxCompileOptions {\n validationExamples?: readonly AxExample[];\n teacher?: Readonly<AxProgram<AxGenIn, AxGenOut>>;\n auto?: 'light' | 'medium' | 'heavy';\n\n // Enhanced MiPRO options\n instructionCandidates?: string[];\n customProposer?: (\n context: Readonly<{\n programSummary: string;\n dataSummary: string;\n previousInstructions: string[];\n }>\n ) => Promise<string[]>;\n}\n\n// Default cost tracker implementation\nexport class AxDefaultCostTracker implements AxCostTracker {\n private tokenUsage: Record<string, number> = {};\n private totalTokens = 0;\n\n // Configuration options\n private readonly costPerModel: Record<string, number>;\n private readonly maxCost?: number;\n private readonly maxTokens?: number;\n\n constructor(options?: AxCostTrackerOptions) {\n this.costPerModel = options?.costPerModel ?? {};\n this.maxCost = options?.maxCost;\n this.maxTokens = options?.maxTokens;\n }\n\n trackTokens(count: number, model: string): void {\n this.tokenUsage[model] = (this.tokenUsage[model] || 0) + count;\n this.totalTokens += count;\n }\n\n getCurrentCost(): number {\n // Calculate cost on-demand\n let totalCost = 0;\n for (const [model, tokens] of Object.entries(this.tokenUsage)) {\n const costPer1K = this.costPerModel[model] || 0.001; // Default fallback\n totalCost += (tokens / 1000) * costPer1K;\n }\n return totalCost;\n }\n\n getTokenUsage(): Record<string, number> {\n return { ...this.tokenUsage };\n }\n\n getTotalTokens(): number {\n return this.totalTokens;\n }\n\n isLimitReached(): boolean {\n // Check token limit if configured\n if (this.maxTokens !== undefined && this.totalTokens >= this.maxTokens) {\n return true;\n }\n\n // Check cost limit if configured (calculate cost on-demand)\n if (this.maxCost !== undefined) {\n const currentCost = this.getCurrentCost();\n if (currentCost >= this.maxCost) {\n return true;\n }\n }\n\n return false;\n }\n\n reset(): void {\n this.tokenUsage = {};\n this.totalTokens = 0;\n }\n}\n\n/**\n * Abstract base class for optimizers that provides common functionality\n * and standardized handling of AxOptimizerArgs\n */\nexport abstract class AxBaseOptimizer<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> implements AxOptimizer<IN, OUT>\n{\n // Common AxOptimizerArgs fields\n protected readonly studentAI: AxAIService;\n protected readonly teacherAI?: AxAIService;\n protected readonly examples: readonly AxExample[];\n protected readonly validationSet?: readonly AxExample[];\n protected readonly targetScore?: number;\n protected readonly minSuccessRate?: number;\n protected readonly onProgress?: (\n progress: Readonly<AxOptimizationProgress>\n ) => void;\n protected readonly onEarlyStop?: (\n reason: string,\n stats: Readonly<AxOptimizationStats>\n ) => void;\n protected readonly costTracker?: AxCostTracker;\n protected readonly seed?: number;\n\n // Checkpointing fields\n protected readonly checkpointSave?: AxCheckpointSaveFn;\n protected readonly checkpointLoad?: AxCheckpointLoadFn;\n protected readonly checkpointInterval?: number;\n protected readonly resumeFromCheckpoint?: string;\n\n // Logging fields\n protected readonly logger?: AxLoggerFunction;\n protected readonly verbose?: boolean;\n\n // Checkpoint state\n private currentRound = 0;\n private scoreHistory: number[] = [];\n private configurationHistory: Record<string, unknown>[] = [];\n\n // Common optimization statistics\n protected stats: AxOptimizationStats;\n\n // Metrics instruments\n protected readonly metricsInstruments?: AxOptimizerMetricsInstruments;\n\n constructor(args: Readonly<AxOptimizerArgs>) {\n if (args.examples.length === 0) {\n throw new Error('No examples found');\n }\n\n // Set common fields from AxOptimizerArgs\n this.studentAI = args.studentAI;\n this.teacherAI = args.teacherAI;\n this.examples = args.examples;\n this.validationSet = args.validationSet;\n this.targetScore = args.targetScore;\n this.minSuccessRate = args.minSuccessRate;\n this.onProgress = args.onProgress;\n this.onEarlyStop = args.onEarlyStop;\n this.seed = args.seed;\n\n // Set up checkpointing\n this.checkpointSave = args.checkpointSave;\n this.checkpointLoad = args.checkpointLoad;\n this.checkpointInterval = args.checkpointInterval ?? 10; // Default: checkpoint every 10 rounds\n this.resumeFromCheckpoint = args.resumeFromCheckpoint;\n\n // Set up logging\n this.logger = args.logger;\n this.verbose = args.verbose;\n\n // Set up cost tracker with default if not provided\n const costTracker = new AxDefaultCostTracker({\n maxTokens: 1000000,\n });\n this.costTracker = args.costTracker ?? costTracker;\n\n // Initialize metrics instruments\n this.metricsInstruments = getOrCreateOptimizerMetricsInstruments(\n axGlobals.meter\n );\n\n // Initialize common stats structure\n this.stats = this.initializeStats();\n }\n\n /**\n * Initialize the optimization statistics structure\n */\n protected initializeStats(): AxOptimizationStats {\n return {\n totalCalls: 0,\n successfulDemos: 0,\n estimatedTokenUsage: 0,\n earlyStopped: false,\n resourceUsage: {\n totalTokens: 0,\n totalTime: 0,\n avgLatencyPerEval: 0,\n costByModel: {},\n },\n convergenceInfo: {\n converged: false,\n finalImprovement: 0,\n stagnationRounds: 0,\n convergenceThreshold: 0.01,\n },\n };\n }\n\n /**\n * Set up reproducible random seed if provided\n */\n protected setupRandomSeed(): void {\n if (this.seed !== undefined) {\n // Note: For full reproducibility, we'd need a proper PRNG\n Math.random = (() => {\n let seed = this.seed!;\n return () => {\n seed = (seed * 9301 + 49297) % 233280;\n return seed / 233280;\n };\n })();\n }\n }\n\n /**\n * Check if optimization should stop early due to cost limits\n */\n protected checkCostLimits(): boolean {\n return this.costTracker?.isLimitReached() ?? false;\n }\n\n /**\n * Check if target score has been reached\n */\n protected checkTargetScore(currentScore: number): boolean {\n return this.targetScore !== undefined && currentScore >= this.targetScore;\n }\n\n /**\n * Update resource usage statistics\n */\n protected updateResourceUsage(startTime: number, tokensUsed = 0): void {\n this.stats.resourceUsage.totalTime = Date.now() - startTime;\n this.stats.resourceUsage.totalTokens += tokensUsed;\n\n if (this.stats.totalCalls > 0) {\n this.stats.resourceUsage.avgLatencyPerEval =\n this.stats.resourceUsage.totalTime / this.stats.totalCalls;\n }\n }\n\n /**\n * Trigger early stopping with appropriate callbacks\n */\n protected triggerEarlyStopping(reason: string, bestScoreRound: number): void {\n this.stats.earlyStopped = true;\n this.stats.earlyStopping = {\n bestScoreRound,\n patienceExhausted: reason.includes('improvement'),\n reason,\n };\n\n // Record early stopping metrics (use a default optimizer type)\n this.recordEarlyStoppingMetrics(reason, 'unknown');\n\n if (this.onEarlyStop) {\n this.onEarlyStop(reason, this.stats);\n }\n }\n\n /**\n * Get the validation set, with fallback to a split of examples\n */\n protected getValidationSet(options?: AxCompileOptions): readonly AxExample[] {\n return (\n options?.overrideValidationSet ||\n this.validationSet ||\n this.examples.slice(0, Math.floor(this.examples.length * 0.2))\n );\n }\n\n /**\n * Get the AI service to use for a specific task, preferring teacher when available\n * @param preferTeacher Whether to prefer teacher AI over student AI\n * @param options Optional compile options that may override teacher AI\n * @returns The appropriate AI service to use\n */\n protected getAIService(\n preferTeacher = false,\n options?: AxCompileOptions\n ): AxAIService {\n // Check for override teacher AI first\n if (preferTeacher && options?.overrideTeacherAI) {\n return options.overrideTeacherAI;\n }\n\n // Then check for configured teacher AI\n if (preferTeacher && this.teacherAI) {\n return this.teacherAI;\n }\n\n return this.studentAI;\n }\n\n /**\n * Check if teacher AI is available (including overrides)\n * @param options Optional compile options that may override teacher AI\n * @returns True if teacher AI is configured or overridden\n */\n protected hasTeacherAI(options?: AxCompileOptions): boolean {\n return (\n options?.overrideTeacherAI !== undefined || this.teacherAI !== undefined\n );\n }\n\n /**\n * Get teacher AI if available, otherwise return student AI\n * @param options Optional compile options that may override teacher AI\n * @returns Teacher AI if available, otherwise student AI\n */\n protected getTeacherOrStudentAI(options?: AxCompileOptions): AxAIService {\n return options?.overrideTeacherAI || this.teacherAI || this.studentAI;\n }\n\n /**\n * Execute a task with teacher AI if available, otherwise use student AI\n * @param task Function that takes an AI service and returns a promise\n * @param preferTeacher Whether to prefer teacher AI (default: true)\n * @param options Optional compile options that may override teacher AI\n * @returns Result of the task execution\n */\n protected async executeWithTeacher<T>(\n task: (ai: AxAIService) => Promise<T>,\n preferTeacher = true,\n options?: AxCompileOptions\n ): Promise<T> {\n const ai = this.getAIService(preferTeacher, options);\n return await task(ai);\n }\n\n /**\n * Abstract method that must be implemented by concrete optimizers\n */\n public abstract compile(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): Promise<AxOptimizerResult<OUT>>;\n\n /**\n * Get current optimization statistics\n */\n public getStats(): AxOptimizationStats {\n return { ...this.stats };\n }\n\n /**\n * Reset optimizer state for reuse with different programs\n */\n public reset(): void {\n this.stats = this.initializeStats();\n this.costTracker?.reset();\n this.currentRound = 0;\n this.scoreHistory = [];\n this.configurationHistory = [];\n }\n\n /**\n * Basic program validation that can be extended by concrete optimizers\n */\n public validateProgram(program: Readonly<AxProgram<IN, OUT>>): {\n isValid: boolean;\n issues: string[];\n suggestions: string[];\n } {\n const issues: string[] = [];\n const suggestions: string[] = [];\n\n // Check if program has required methods for optimization\n if (!('forward' in program) || typeof program.forward !== 'function') {\n issues.push('Program must have a forward method');\n }\n\n // Check if we have enough examples\n if (this.examples.length < 2) {\n issues.push('Need at least 2 examples for optimization');\n suggestions.push('Provide more training examples');\n }\n\n // Check if validation set is reasonable\n const valSetSize = this.getValidationSet().length;\n if (valSetSize < 1) {\n issues.push('Validation set is empty');\n suggestions.push('Provide examples or a validation set');\n }\n\n return {\n isValid: issues.length === 0,\n issues,\n suggestions,\n };\n }\n\n /**\n * Multi-objective optimization using Pareto frontier\n * Default implementation that leverages the single-objective compile method\n * @param program The program to optimize\n * @param metricFn Multi-objective metric function that returns multiple scores\n * @param options Optional configuration options\n * @returns Pareto optimization result with frontier of non-dominated solutions\n */\n public async compilePareto(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMultiMetricFn,\n options?: AxCompileOptions\n ): Promise<AxParetoResult<OUT>> {\n const startTime = Date.now();\n\n if (options?.verbose) {\n this.getLogger(options)?.(\n 'Starting Pareto optimization using base implementation',\n { tags: ['discovery'] }\n );\n this.getLogger(options)?.(\n 'This will run multiple single-objective optimizations',\n { tags: ['discovery'] }\n );\n }\n\n // Strategy 1: Generate different weighted combinations of objectives\n const solutions = await this.generateWeightedSolutions(\n program,\n metricFn,\n options\n );\n\n // Strategy 2: Generate constraint-based solutions (optimize one objective while constraining others)\n const constraintSolutions = await this.generateConstraintSolutions(\n program,\n metricFn,\n options\n );\n\n // Combine all solutions\n const allSolutions = [...solutions, ...constraintSolutions];\n\n if (options?.verbose) {\n this.getLogger(options)?.(\n `Generated ${allSolutions.length} candidate solutions`,\n { tags: ['discovery'] }\n );\n }\n\n // Find Pareto frontier\n const paretoFront = this.findParetoFrontier(allSolutions);\n\n // Calculate hypervolume if possible\n const hypervolume = this.calculateHypervolume(paretoFront);\n\n if (options?.verbose) {\n this.getLogger(options)?.(\n `Found ${paretoFront.length} non-dominated solutions`,\n { tags: ['discovery'] }\n );\n this.getLogger(options)?.(\n `Hypervolume: ${hypervolume?.toFixed(4) || 'N/A'}`,\n { tags: ['discovery'] }\n );\n }\n\n // Update stats\n this.updateResourceUsage(startTime);\n this.stats.convergenceInfo.converged = true;\n\n // Record Pareto optimization metrics\n this.recordParetoMetrics(\n paretoFront.length,\n allSolutions.length,\n 'base_optimizer',\n hypervolume\n );\n\n // Calculate best score as the maximum across all objectives and solutions\n const bestScore =\n paretoFront.length > 0\n ? Math.max(\n ...paretoFront.map((sol) => Math.max(...Object.values(sol.scores)))\n )\n : 0;\n\n return {\n demos: paretoFront.length > 0 ? [...paretoFront[0]!.demos] : undefined,\n stats: this.stats,\n bestScore,\n paretoFront,\n hypervolume,\n paretoFrontSize: paretoFront.length,\n finalConfiguration: {\n paretoFrontSize: paretoFront.length,\n hypervolume,\n strategy: 'weighted_combinations_and_constraints',\n numSolutions: allSolutions.length,\n },\n };\n }\n\n /**\n * Generate solutions using different weighted combinations of objectives\n */\n private async generateWeightedSolutions(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMultiMetricFn,\n options?: AxCompileOptions\n ): Promise<\n Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }>\n > {\n const solutions: Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }> = [];\n\n // First, determine the objectives by running the metric on a sample\n const sampleExample = this.examples[0]!;\n const samplePrediction = await program.forward(\n this.studentAI,\n sampleExample as IN\n );\n const sampleScores = await metricFn({\n prediction: samplePrediction,\n example: sampleExample,\n });\n const objectives = Object.keys(sampleScores);\n\n if (options?.verbose) {\n this.getLogger(options)?.(\n `Detected objectives: ${objectives.join(', ')}`,\n { tags: ['discovery'] }\n );\n }\n\n // Generate different weight combinations\n const weightCombinations = this.generateWeightCombinations(objectives);\n\n for (let i = 0; i < weightCombinations.length; i++) {\n const weights = weightCombinations[i]!;\n\n if (options?.verbose) {\n this.getLogger(options)?.(\n `Optimizing with weights: ${JSON.stringify(weights)}`,\n { tags: ['discovery'] }\n );\n }\n\n // Create a weighted single-objective metric\n const weightedMetric: AxMetricFn = async ({ prediction, example }) => {\n const scores = await metricFn({ prediction, example });\n let weightedScore = 0;\n for (const [objective, score] of Object.entries(scores)) {\n weightedScore += score * (weights[objective] || 0);\n }\n return weightedScore;\n };\n\n try {\n // Use the concrete optimizer's compile method\n const result = await this.compile(program, weightedMetric, {\n ...options,\n verbose: false, // Suppress inner optimization logs\n });\n\n // Evaluate the result with the multi-objective metric\n const scores = await this.evaluateWithMultiObjective(\n program,\n result,\n metricFn\n );\n\n solutions.push({\n scores,\n demos: result.demos,\n configuration: {\n ...result.finalConfiguration,\n weights,\n strategy: 'weighted_combination',\n },\n });\n } catch (error) {\n if (options?.verbose) {\n this.getLogger(options)?.(\n `Failed optimization with weights ${JSON.stringify(weights)}: ${error}`,\n { tags: ['warning'] }\n );\n }\n }\n }\n\n return solutions;\n }\n\n /**\n * Generate solutions using constraint-based optimization\n */\n private async generateConstraintSolutions(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMultiMetricFn,\n options?: AxCompileOptions\n ): Promise<\n Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }>\n > {\n const solutions: Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }> = [];\n\n // Get objectives from a sample evaluation\n const sampleExample = this.examples[0]!;\n const samplePrediction = await program.forward(\n this.studentAI,\n sampleExample as IN\n );\n const sampleScores = await metricFn({\n prediction: samplePrediction,\n example: sampleExample,\n });\n const objectives = Object.keys(sampleScores);\n\n // For each objective, optimize it while constraining others\n for (const primaryObjective of objectives) {\n if (options?.verbose) {\n this.getLogger(options)?.(\n `Optimizing ${primaryObjective} with constraints on other objectives`,\n { tags: ['discovery'] }\n );\n }\n\n // Create a constraint-based metric\n const constraintMetric: AxMetricFn = async ({ prediction, example }) => {\n const scores = await metricFn({ prediction, example });\n\n // Primary objective score\n const primaryScore = scores[primaryObjective] || 0;\n\n // Penalty for violating constraints on other objectives\n let penalty = 0;\n for (const [objective, score] of Object.entries(scores)) {\n if (objective !== primaryObjective) {\n // Simple constraint: other objectives should be at least 0.3\n // This is a heuristic - in practice you'd set domain-specific thresholds\n if (score < 0.3) {\n penalty += (0.3 - score) * 2; // Penalty factor\n }\n }\n }\n\n return primaryScore - penalty;\n };\n\n try {\n const result = await this.compile(program, constraintMetric, {\n ...options,\n verbose: false,\n });\n\n const scores = await this.evaluateWithMultiObjective(\n program,\n result,\n metricFn\n );\n\n solutions.push({\n scores,\n demos: result.demos,\n configuration: {\n ...result.finalConfiguration,\n primaryObjective,\n strategy: 'constraint_based',\n },\n });\n } catch (error) {\n if (options?.verbose) {\n this.getLogger(options)?.(\n `Failed constraint optimization for ${primaryObjective}: ${error}`,\n { tags: ['warning'] }\n );\n }\n }\n }\n\n return solutions;\n }\n\n /**\n * Generate different weight combinations for objectives\n */\n private generateWeightCombinations(\n objectives: string[]\n ): Record<string, number>[] {\n const combinations: Record<string, number>[] = [];\n\n // Single-objective focus (one objective gets weight 1, others get 0)\n for (const objective of objectives) {\n const weights: Record<string, number> = {};\n for (const obj of objectives) {\n weights[obj] = obj === objective ? 1 : 0;\n }\n combinations.push(weights);\n }\n\n // Equal weights\n const equalWeights: Record<string, number> = {};\n for (const objective of objectives) {\n equalWeights[objective] = 1 / objectives.length;\n }\n combinations.push(equalWeights);\n\n // If we have 2 objectives, generate more granular combinations\n if (objectives.length === 2) {\n const [obj1, obj2] = objectives;\n for (let w1 = 0.1; w1 <= 0.9; w1 += 0.2) {\n const w2 = 1 - w1;\n combinations.push({ [obj1!]: w1, [obj2!]: w2 });\n }\n }\n\n // If we have 3 objectives, generate some key combinations\n if (objectives.length === 3) {\n const [obj1, obj2, obj3] = objectives;\n combinations.push(\n { [obj1!]: 0.5, [obj2!]: 0.3, [obj3!]: 0.2 },\n { [obj1!]: 0.3, [obj2!]: 0.5, [obj3!]: 0.2 },\n { [obj1!]: 0.2, [obj2!]: 0.3, [obj3!]: 0.5 }\n );\n }\n\n return combinations;\n }\n\n /**\n * Evaluate a single-objective result with multi-objective metrics\n */\n private async evaluateWithMultiObjective(\n program: Readonly<AxProgram<IN, OUT>>,\n result: Readonly<AxOptimizerResult<OUT>>,\n metricFn: AxMultiMetricFn\n ): Promise<Record<string, number>> {\n const valSet = this.getValidationSet();\n const allScores: Record<string, number[]> = {};\n\n // Apply the optimized configuration to the program\n const testProgram = { ...program };\n if (result.demos && 'setDemos' in testProgram) {\n (\n testProgram as unknown as { setDemos: (demos: unknown) => void }\n ).setDemos(result.demos);\n }\n\n // Evaluate on validation set\n const evalSet = valSet.slice(0, Math.min(5, valSet.length));\n\n for (const example of evalSet) {\n try {\n const prediction = await testProgram.forward(\n this.studentAI,\n example as IN\n );\n const scores = await metricFn({ prediction, example });\n\n // Collect scores for each objective\n for (const [objective, score] of Object.entries(scores)) {\n if (!allScores[objective]) {\n allScores[objective] = [];\n }\n allScores[objective]!.push(score);\n }\n } catch {}\n }\n\n // Calculate average scores for each objective\n const avgScores: Record<string, number> = {};\n for (const [objective, scores] of Object.entries(allScores)) {\n avgScores[objective] =\n scores.length > 0\n ? scores.reduce((sum, score) => sum + score, 0) / scores.length\n : 0;\n }\n\n return avgScores;\n }\n\n /**\n * Find the Pareto frontier from a set of solutions\n */\n private findParetoFrontier(\n solutions: Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }>\n ): Array<{\n demos: readonly AxProgramDemos<AxGenIn, OUT>[];\n scores: Readonly<Record<string, number>>;\n configuration: Readonly<Record<string, unknown>>;\n dominatedSolutions: number;\n }> {\n const paretoFront: Array<{\n demos: readonly AxProgramDemos<AxGenIn, OUT>[];\n scores: Readonly<Record<string, number>>;\n configuration: Readonly<Record<string, unknown>>;\n dominatedSolutions: number;\n }> = [];\n\n // For each solution, check if it's dominated by any other solution\n for (let i = 0; i < solutions.length; i++) {\n const solutionA = solutions[i]!;\n let isDominated = false;\n let dominatedCount = 0;\n\n for (let j = 0; j < solutions.length; j++) {\n if (i === j) continue;\n\n const solutionB = solutions[j]!;\n\n // Check if B dominates A\n if (this.dominates(solutionB.scores, solutionA.scores)) {\n isDominated = true;\n break;\n }\n\n // Count how many solutions A dominates\n if (this.dominates(solutionA.scores, solutionB.scores)) {\n dominatedCount++;\n }\n }\n\n // If A is not dominated by any solution, it's on the Pareto frontier\n if (!isDominated) {\n paretoFront.push({\n demos: solutionA.demos || [],\n scores: solutionA.scores,\n configuration: solutionA.configuration,\n dominatedSolutions: dominatedCount,\n });\n }\n }\n\n return paretoFront;\n }\n\n /**\n * Check if solution A dominates solution B\n * A dominates B if A is better or equal in all objectives and strictly better in at least one\n */\n private dominates(\n scoresA: Record<string, number>,\n scoresB: Record<string, number>\n ): boolean {\n const objectives = Object.keys(scoresA);\n\n // Check if A is at least as good as B in all objectives\n let atLeastAsGood = true;\n let strictlyBetter = false;\n\n for (const objective of objectives) {\n const scoreA = scoresA[objective] || 0;\n const scoreB = scoresB[objective] || 0;\n\n if (scoreA < scoreB) {\n atLeastAsGood = false;\n break;\n }\n\n if (scoreA > scoreB) {\n strictlyBetter = true;\n }\n }\n\n return atLeastAsGood && strictlyBetter;\n }\n\n /**\n * Calculate hypervolume of the Pareto frontier\n * Simplified implementation using reference point at origin\n */\n private calculateHypervolume(\n paretoFront: Array<{\n scores: Readonly<Record<string, number>>;\n }>\n ): number | undefined {\n if (paretoFront.length === 0) return undefined;\n\n // For simplicity, calculate 2D hypervolume if we have exactly 2 objectives\n const firstSolution = paretoFront[0]!;\n const objectives = Object.keys(firstSolution.scores);\n\n if (objectives.length === 2) {\n const [obj1, obj2] = objectives;\n let hypervolume = 0;\n\n // Sort solutions by first objective (descending)\n const sortedSolutions = [...paretoFront].sort(\n (a, b) => (b.scores[obj1!] || 0) - (a.scores[obj1!] || 0)\n );\n\n let prevScore2 = 0;\n for (const solution of sortedSolutions) {\n const score1 = solution.scores[obj1!] || 0;\n const score2 = solution.scores[obj2!] || 0;\n\n // Calculate area contribution\n hypervolume += score1 * (score2 - prevScore2);\n prevScore2 = Math.max(prevScore2, score2);\n }\n\n return hypervolume;\n }\n\n // For higher dimensions, return undefined (would need more complex algorithm)\n return undefined;\n }\n\n /**\n * Save current optimization state to checkpoint\n */\n protected async saveCheckpoint(\n optimizerType: string,\n optimizerConfig: Record<string, unknown>,\n bestScore: number,\n bestConfiguration?: Record<string, unknown>,\n optimizerState: Record<string, unknown> = {},\n options?: AxCompileOptions\n ): Promise<string | undefined> {\n const saveFn = options?.overrideCheckpointSave || this.checkpointSave;\n if (!saveFn) return undefined;\n\n const startTime = Date.now();\n let success = false;\n let checkpointId: string | undefined;\n\n try {\n const checkpoint: AxOptimizationCheckpoint = {\n version: '1.0.0',\n timestamp: Date.now(),\n optimizerType,\n optimizerConfig,\n currentRound: this.currentRound,\n totalRounds:\n this.stats.resourceUsage.totalTime > 0 ? this.currentRound : 0,\n bestScore,\n bestConfiguration,\n scoreHistory: [...this.scoreHistory],\n configurationHistory: [...this.configurationHistory],\n stats: { ...this.stats },\n optimizerState,\n examples: this.examples,\n validationSet: this.validationSet,\n };\n\n checkpointId = await saveFn(checkpoint);\n success = true;\n } catch (error) {\n success = false;\n throw error;\n } finally {\n const latency = Date.now() - startTime;\n this.recordCheckpointMetrics('save', latency, success, optimizerType);\n }\n\n return checkpointId;\n }\n\n /**\n * Load optimization state from checkpoint\n */\n protected async loadCheckpoint(\n checkpointId: string,\n options?: AxCompileOptions\n ): Promise<AxOptimizationCheckpoint | null> {\n const loadFn = options?.overrideCheckpointLoad || this.checkpointLoad;\n if (!loadFn) return null;\n\n const startTime = Date.now();\n let success = false;\n let checkpoint: AxOptimizationCheckpoint | null = null;\n\n try {\n checkpoint = await loadFn(checkpointId);\n success = checkpoint !== null;\n } catch (error) {\n success = false;\n throw error;\n } finally {\n const latency = Date.now() - startTime;\n // Use a default optimizer type since we don't know it at load time\n this.recordCheckpointMetrics('load', latency, success, 'unknown');\n }\n\n return checkpoint;\n }\n\n /**\n * Restore optimizer state from checkpoint\n */\n protected restoreFromCheckpoint(\n checkpoint: Readonly<AxOptimizationCheckpoint>\n ): void {\n this.currentRound = checkpoint.currentRound;\n this.scoreHistory = [...checkpoint.scoreHistory];\n this.configurationHistory = [...checkpoint.configurationHistory];\n this.stats = { ...checkpoint.stats };\n }\n\n /**\n * Check if checkpoint should be saved\n */\n protected shouldSaveCheckpoint(\n round: number,\n options?: AxCompileOptions\n ): boolean {\n const interval =\n options?.overrideCheckpointInterval || this.checkpointInterval;\n return interval !== undefined && round % interval === 0;\n }\n\n /**\n * Update optimization progress and handle checkpointing\n */\n protected async updateOptimizationProgress(\n round: number,\n score: number,\n configuration: Record<string, unknown>,\n optimizerType: string,\n optimizerConfig: Record<string, unknown>,\n bestScore: number,\n bestConfiguration?: Record<string, unknown>,\n optimizerState: Record<string, unknown> = {},\n options?: AxCompileOptions\n ): Promise<void> {\n this.currentRound = round;\n this.scoreHistory.push(score);\n this.configurationHistory.push(configuration);\n\n // Save checkpoint if needed\n if (this.shouldSaveCheckpoint(round, options)) {\n await this.saveCheckpoint(\n optimizerType,\n optimizerConfig,\n bestScore,\n bestConfiguration,\n optimizerState,\n options\n );\n }\n }\n\n /**\n * Save final checkpoint on completion\n */\n protected async saveFinalCheckpoint(\n optimizerType: string,\n optimizerConfig: Record<string, unknown>,\n bestScore: number,\n bestConfiguration?: Record<string, unknown>,\n optimizerState: Record<string, unknown> = {},\n options?: AxCompileOptions\n ): Promise<void> {\n if (options?.saveCheckpointOnComplete !== false) {\n await this.saveCheckpoint(\n optimizerType,\n optimizerConfig,\n bestScore,\n bestConfiguration,\n { ...optimizerState, final: true },\n options\n );\n }\n }\n\n /**\n * Get the logger function with fallback hierarchy:\n * 1. Explicit logger passed to optimizer\n * 2. Logger from student AI service\n * 3. Default optimizer logger\n * 4. undefined if verbose is false\n */\n protected getLogger(\n options?: AxCompileOptions\n ): AxLoggerFunction | undefined {\n // Check if logging should be disabled\n const isVerbose = this.isLoggingEnabled(options);\n if (!isVerbose) {\n return undefined;\n }\n\n // Use explicit logger if provided\n if (this.logger) {\n return this.logger;\n }\n\n // Fall back to default optimizer logger\n return axDefaultOptimizerLogger;\n }\n\n /**\n * Check if logging is enabled based on verbose settings\n */\n protected isLoggingEnabled(options?: AxCompileOptions): boolean {\n // Explicit verbose setting in options takes precedence\n if (options?.verbose !== undefined) {\n return options.verbose;\n }\n\n // Use optimizer's verbose setting\n return this.verbose ?? true; // Default to true if not specified\n }\n\n /**\n * Record optimization start metrics\n */\n protected recordOptimizationStart(\n optimizerType: string,\n programSignature?: string\n ): void {\n if (!this.metricsInstruments) return;\n\n // Record program complexity metrics\n if (programSignature) {\n // Extract field counts from signature (simplified)\n const inputFields = (programSignature.match(/input:/g) || []).length;\n const outputFields = (programSignature.match(/output:/g) || []).length;\n\n recordProgramComplexityMetric(\n this.metricsInstruments,\n inputFields,\n outputFields,\n this.examples.length,\n this.getValidationSet().length,\n optimizerType\n );\n }\n\n // Record configuration metrics\n recordOptimizerConfigurationMetric(\n this.metricsInstruments,\n optimizerType,\n this.targetScore,\n undefined // maxRounds would be set by concrete optimizers\n );\n }\n\n /**\n * Record optimization completion metrics\n */\n protected recordOptimizationComplete(\n duration: number,\n success: boolean,\n optimizerType: string,\n programSignature?: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordOptimizationMetric(\n this.metricsInstruments,\n duration,\n success,\n optimizerType,\n programSignature\n );\n\n recordOptimizationDurationMetric(\n this.metricsInstruments,\n duration,\n optimizerType\n );\n\n // Record resource usage\n const currentCost = this.costTracker?.getCurrentCost() ?? 0;\n const totalTokens = this.costTracker?.getTotalTokens() ?? 0;\n recordResourceUsageMetric(\n this.metricsInstruments,\n totalTokens,\n currentCost,\n optimizerType\n );\n }\n\n /**\n * Record convergence metrics\n */\n protected recordConvergenceMetrics(\n rounds: number,\n currentScore: number,\n improvement: number,\n stagnationRounds: number,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordConvergenceMetric(\n this.metricsInstruments,\n rounds,\n currentScore,\n improvement,\n stagnationRounds,\n optimizerType\n );\n }\n\n /**\n * Record early stopping metrics\n */\n protected recordEarlyStoppingMetrics(\n reason: string,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordEarlyStoppingMetric(this.metricsInstruments, reason, optimizerType);\n }\n\n /**\n * Record teacher-student interaction metrics\n */\n protected recordTeacherStudentMetrics(\n latency: number,\n scoreImprovement: number,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordTeacherStudentMetric(\n this.metricsInstruments,\n latency,\n scoreImprovement,\n optimizerType\n );\n }\n\n /**\n * Record checkpoint metrics\n */\n protected recordCheckpointMetrics(\n operation: 'save' | 'load',\n latency: number,\n success: boolean,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordCheckpointMetric(\n this.metricsInstruments,\n operation,\n latency,\n success,\n optimizerType\n );\n }\n\n /**\n * Record Pareto optimization metrics\n */\n protected recordParetoMetrics(\n frontSize: number,\n solutionsGenerated: number,\n optimizerType: string,\n hypervolume?: number\n ): void {\n if (!this.metricsInstruments) return;\n\n recordParetoMetric(\n this.metricsInstruments,\n frontSize,\n solutionsGenerated,\n optimizerType,\n hypervolume\n );\n }\n\n /**\n * Record performance metrics\n */\n protected recordPerformanceMetrics(\n metricType: 'evaluation' | 'demo_generation' | 'metric_computation',\n duration: number,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordOptimizerPerformanceMetric(\n this.metricsInstruments,\n metricType,\n duration,\n optimizerType\n );\n }\n}\n","import type { AxAIService } from '../../ai/types.js';\nimport { AxGen } from '../generate.js';\nimport {\n AxBaseOptimizer,\n type AxCompileOptions,\n type AxExample,\n type AxMetricFn,\n type AxMiPROCompileOptions,\n type AxMiPROOptimizerOptions,\n type AxOptimizerArgs,\n type AxOptimizerResult,\n} from '../optimizer.js';\nimport type {\n AxProgram,\n AxProgramDemos,\n AxResultPickerFunction,\n} from '../program.js';\nimport type { AxGenIn, AxGenOut } from '../types.js';\nimport { updateProgressBar } from '../util.js';\n\nimport { AxBootstrapFewShot } from './bootstrapFewshot.js';\n\ninterface ConfigType extends Record<string, unknown> {\n instruction: string;\n bootstrappedDemos: number;\n labeledExamples: number;\n}\n\n// Extended result interface to include the optimized AxGen\nexport interface AxMiPROResult<IN extends AxGenIn, OUT extends AxGenOut>\n extends AxOptimizerResult<OUT> {\n optimizedGen?: AxGen<IN, OUT>;\n}\n\nexport class AxMiPRO<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> extends AxBaseOptimizer<IN, OUT> {\n // MiPRO-specific options\n private maxBootstrappedDemos: number;\n private maxLabeledDemos: number;\n private numCandidates: number;\n private initTemperature: number;\n private numTrials: number;\n private minibatch: boolean;\n private minibatchSize: number;\n private minibatchFullEvalSteps: number;\n private programAwareProposer: boolean;\n private dataAwareProposer: boolean;\n private viewDataBatchSize: number;\n private tipAwareProposer: boolean;\n private fewshotAwareProposer: boolean;\n private earlyStoppingTrials: number;\n private minImprovementThreshold: number;\n private bayesianOptimization: boolean;\n private acquisitionFunction:\n | 'expected_improvement'\n | 'upper_confidence_bound'\n | 'probability_improvement';\n private explorationWeight: number;\n\n // Self-consistency / multiple sampling\n private sampleCount: number;\n\n // Surrogate model state for Bayesian optimization\n private miproConfigHistory: { config: ConfigType; score: number }[] = [];\n private surrogateModel: Map<string, { mean: number; variance: number }> =\n new Map();\n\n constructor(\n args: Readonly<AxOptimizerArgs & { options?: AxMiPROOptimizerOptions }>\n ) {\n // Call parent constructor with base args\n super(args);\n\n const options = args.options || {};\n\n // MiPRO-specific options with proper defaults\n this.numCandidates = options.numCandidates ?? 5;\n this.initTemperature = options.initTemperature ?? 0.7;\n this.maxBootstrappedDemos = options.maxBootstrappedDemos ?? 3;\n this.maxLabeledDemos = options.maxLabeledDemos ?? 4;\n this.numTrials = options.numTrials ?? 30;\n this.minibatch = options.minibatch ?? true;\n this.minibatchSize = options.minibatchSize ?? 25;\n this.minibatchFullEvalSteps = options.minibatchFullEvalSteps ?? 10;\n this.programAwareProposer = options.programAwareProposer ?? true;\n this.dataAwareProposer = options.dataAwareProposer ?? true;\n this.viewDataBatchSize = options.viewDataBatchSize ?? 10;\n this.tipAwareProposer = options.tipAwareProposer ?? true;\n this.fewshotAwareProposer = options.fewshotAwareProposer ?? true;\n this.earlyStoppingTrials = options.earlyStoppingTrials ?? 5;\n this.minImprovementThreshold = options.minImprovementThreshold ?? 0.01;\n this.bayesianOptimization = options.bayesianOptimization ?? false;\n this.acquisitionFunction =\n options.acquisitionFunction ?? 'expected_improvement';\n this.explorationWeight = options.explorationWeight ?? 0.1;\n\n // Self-consistency options\n this.sampleCount = options.sampleCount ?? 1;\n\n // Update convergence threshold in stats\n this.stats.convergenceInfo.convergenceThreshold =\n this.minImprovementThreshold;\n }\n\n /**\n * Configures the optimizer for light, medium, or heavy optimization\n * @param level The optimization level: \"light\", \"medium\", or \"heavy\"\n */\n public configureAuto(level: 'light' | 'medium' | 'heavy'): void {\n switch (level) {\n case 'light':\n this.numCandidates = 3;\n this.numTrials = 10;\n this.minibatch = true;\n this.minibatchSize = 20;\n break;\n case 'medium':\n this.numCandidates = 5;\n this.numTrials = 20;\n this.minibatch = true;\n this.minibatchSize = 25;\n break;\n case 'heavy':\n this.numCandidates = 7;\n this.numTrials = 30;\n this.minibatch = true;\n this.minibatchSize = 30;\n break;\n }\n }\n\n /**\n * Generates creative tips for instruction generation\n */\n private generateTips(): string[] {\n return [\n 'Be very specific and detailed in your instructions.',\n 'Focus on step-by-step reasoning in your instructions.',\n 'Provide clear constraints and guidelines in your instructions.',\n 'Keep your instructions concise and to the point.',\n 'Emphasize accuracy and precision in your instructions.',\n 'Include examples of good outputs in your instructions.',\n 'Focus on handling edge cases in your instructions.',\n 'Explicitly outline the reasoning process in your instructions.',\n ];\n }\n\n /**\n * Generates program summary for context-aware instruction generation\n */\n private async generateProgramSummary(\n program: Readonly<AxProgram<IN, OUT>>,\n ai: Readonly<AxAIService>\n ): Promise<string> {\n // Extract program structure information\n const signature = program.getSignature();\n\n // Create program summary prompt based on paper's Appendix C.5\n const summaryPrompt = `\nAnalyze this language model program and provide a concise summary of its purpose and structure.\n\nProgram Signature: ${signature}\n\nProvide a 2-3 sentence summary focusing on:\n1. The main task or purpose of this program\n2. The input-output relationship\n3. Any special constraints or requirements\n\nSummary:`;\n\n try {\n const response = await ai.chat({\n chatPrompt: [{ role: 'user', content: summaryPrompt }],\n });\n if ('results' in response) {\n return (\n response.results[0]?.content?.trim() ||\n 'General language model program'\n );\n }\n return 'General language model program';\n } catch {\n return 'General language model program';\n }\n }\n\n /**\n * Generates dataset summary for context-aware instruction generation\n */\n private async generateDatasetSummary(\n examples: readonly AxExample[],\n ai: Readonly<AxAIService>\n ): Promise<string> {\n if (examples.length === 0) return 'No examples available';\n\n // Sample a few examples for analysis (based on paper's approach)\n const sampleSize = Math.min(this.viewDataBatchSize, examples.length);\n const sampledExamples = examples.slice(0, sampleSize);\n\n // Create dataset summary prompt based on paper's Appendix C.3\n const exampleTexts = sampledExamples\n .map((ex, i) => `Example ${i + 1}: ${JSON.stringify(ex)}`)\n .join('\\n');\n\n const summaryPrompt = `\nAnalyze this dataset and provide a concise summary of its characteristics.\n\nSample Examples:\n${exampleTexts}\n\nProvide a 2-3 sentence summary focusing on:\n1. The type of data and domain\n2. Common patterns or structures in the examples\n3. Key challenges or requirements for processing this data\n\nDataset Summary:`;\n\n try {\n const response = await ai.chat({\n chatPrompt: [{ role: 'user', content: summaryPrompt }],\n });\n if ('results' in response) {\n return response.results[0]?.content?.trim() || 'General dataset';\n }\n return 'General dataset';\n } catch {\n return 'General dataset';\n }\n }\n\n /**\n * Enhanced instruction generation using AI with program and data awareness\n */\n private async generateInstruction({\n tip,\n candidateIndex,\n ai,\n programSummary,\n datasetSummary,\n previousInstructions = [],\n }: Readonly<{\n tip: string | undefined;\n candidateIndex: number;\n ai: Readonly<AxAIService>;\n programSummary?: string;\n datasetSummary?: string;\n previousInstructions?: string[];\n }>): Promise<string> {\n // Build context-aware instruction generation prompt based on paper\n let contextInfo = '';\n\n if (this.programAwareProposer && programSummary) {\n contextInfo += `\\nProgram Context: ${programSummary}`;\n }\n\n if (this.dataAwareProposer && datasetSummary) {\n contextInfo += `\\nDataset Context: ${datasetSummary}`;\n }\n\n if (this.fewshotAwareProposer && previousInstructions.length > 0) {\n contextInfo += `\\nPrevious Instructions (avoid repeating): ${previousInstructions.slice(-3).join('; ')}`;\n }\n\n // Core instruction generation prompt inspired by paper's Appendix C.1\n const instructionPrompt = `\nGenerate a high-quality instruction for a language model program.\n\n${contextInfo}\n\n${tip ? `Tip: ${tip}` : ''}\n\nRequirements:\n1. Be specific and actionable\n2. Focus on accuracy and clarity\n3. Consider the program's purpose and data characteristics\n4. Make the instruction distinct from previous ones\n5. Keep it concise but comprehensive\n\nGenerate a single, well-crafted instruction:\nInstruction:`;\n\n try {\n const response = await ai.chat({\n chatPrompt: [\n {\n role: 'user',\n content: instructionPrompt,\n },\n ],\n });\n\n if ('results' in response) {\n const instruction = response.results[0]?.content?.trim();\n if (instruction && instruction.length > 10) {\n return instruction;\n }\n }\n } catch (error) {\n if (this.isLoggingEnabled()) {\n this.getLogger()?.(`Failed to generate AI instruction: ${error}`, {\n tags: ['optimizer', 'warning'],\n });\n }\n }\n\n // Fallback to enhanced templates if AI generation fails\n const enhancedTemplates = [\n 'Analyze the input systematically and provide a precise, well-reasoned response.',\n 'Think through this step-by-step, considering all relevant factors before responding.',\n 'Examine the input carefully and generate an accurate, detailed answer.',\n 'Process the information methodically and deliver a clear, comprehensive response.',\n 'Consider the context thoroughly and provide a thoughtful, accurate answer.',\n ];\n\n let instruction =\n enhancedTemplates[candidateIndex % enhancedTemplates.length] ||\n enhancedTemplates[0]!;\n\n if (tip) {\n instruction = `${instruction} ${tip}`;\n }\n\n return instruction;\n }\n\n /**\n * Generates instruction candidates using enhanced AI-powered generation\n * @param options Optional compile options that may override teacher AI\n * @returns Array of generated instruction candidates\n */\n private async proposeInstructionCandidates(\n program: Readonly<AxProgram<IN, OUT>>,\n options?: AxCompileOptions\n ): Promise<string[]> {\n const instructions: string[] = [];\n const aiToUse = this.getTeacherOrStudentAI(options);\n\n // Generate contextual information if enabled\n let programSummary: string | undefined;\n let datasetSummary: string | undefined;\n\n if (this.programAwareProposer) {\n programSummary = await this.generateProgramSummary(program, aiToUse);\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(`Program summary: ${programSummary}`, {\n tags: ['optimizer', 'config'],\n });\n }\n }\n\n if (this.dataAwareProposer) {\n datasetSummary = await this.generateDatasetSummary(\n this.examples,\n aiToUse\n );\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(`Dataset summary: ${datasetSummary}`, {\n tags: ['optimizer', 'config'],\n });\n }\n }\n\n // Generate creative tips for tip-aware proposing\n const tips = this.tipAwareProposer ? this.generateTips() : [];\n\n // Generate instructions for each candidate\n for (let i = 0; i < this.numCandidates; i++) {\n const tipIndex = tips.length > 0 ? i % tips.length : -1;\n const tipToUse = tipIndex >= 0 ? tips[tipIndex] : undefined;\n\n const instruction = await this.generateInstruction({\n tip: tipToUse,\n candidateIndex: i,\n ai: aiToUse,\n programSummary,\n datasetSummary,\n previousInstructions: instructions, // Pass previous instructions for diversity\n });\n\n instructions.push(instruction);\n }\n\n return instructions;\n }\n\n /**\n * Bootstraps few-shot examples for the program\n */\n private async bootstrapFewShotExamples(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMetricFn\n ): Promise<AxProgramDemos<IN, OUT>[]> {\n if (this.isLoggingEnabled()) {\n this.getLogger()?.('Bootstrapping few-shot examples...', {\n tags: ['optimizer', 'phase'],\n });\n }\n\n // Initialize the bootstrapper for this program\n const bootstrapper = new AxBootstrapFewShot<IN, OUT>({\n studentAI: this.studentAI,\n examples: this.examples,\n options: {\n maxDemos: this.maxBootstrappedDemos,\n maxRounds: 3,\n verboseMode: this.isLoggingEnabled(),\n },\n });\n\n const result = await bootstrapper.compile(program, metricFn, {\n maxDemos: this.maxBootstrappedDemos,\n });\n\n return (result.demos || []) as AxProgramDemos<IN, OUT>[];\n }\n\n /**\n * Selects labeled examples directly from the training set\n */\n private selectLabeledExamples(): AxExample[] {\n const selectedExamples: AxExample[] = [];\n\n // Random sampling from the training set\n const indices = new Set<number>();\n while (\n indices.size < this.maxLabeledDemos &&\n indices.size < this.examples.length\n ) {\n const idx = Math.floor(Math.random() * this.examples.length);\n if (!indices.has(idx)) {\n indices.add(idx);\n const example = this.examples[idx];\n if (example) {\n selectedExamples.push(example);\n }\n }\n }\n\n return selectedExamples;\n }\n\n /**\n * Runs optimization to find the best combination of few-shot examples and instructions\n */\n private async runOptimization(\n program: Readonly<AxProgram<IN, OUT>>,\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[],\n instructions: readonly string[],\n validationExamples: readonly AxExample[],\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): Promise<{ bestConfig: ConfigType; bestScore: number }> {\n let bestConfig: ConfigType = {\n instruction: instructions[0] || '',\n bootstrappedDemos: Math.min(1, bootstrappedDemos.length),\n labeledExamples: Math.min(1, labeledExamples.length),\n };\n let bestScore = 0;\n let stagnationRounds = 0;\n const scoreHistory: number[] = [];\n\n // Check for checkpoint resume\n let startRound = 0;\n if (this.resumeFromCheckpoint) {\n const checkpoint = await this.loadCheckpoint(\n this.resumeFromCheckpoint,\n options\n );\n if (checkpoint && checkpoint.optimizerType === 'MiPRO') {\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(\n `Resuming from checkpoint at round ${checkpoint.currentRound}`,\n { tags: ['optimizer', 'checkpoint'] }\n );\n }\n\n this.restoreFromCheckpoint(checkpoint);\n startRound = checkpoint.currentRound;\n bestScore = checkpoint.bestScore;\n bestConfig = (checkpoint.bestConfiguration as ConfigType) || bestConfig;\n stagnationRounds =\n checkpoint.stats.convergenceInfo?.stagnationRounds || 0;\n }\n }\n\n // Optimization loop with early stopping and checkpointing\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(\n `Running optimization trials (${this.numTrials} total)`,\n { tags: ['optimizer', 'phase'] }\n );\n }\n\n for (let i = startRound; i < this.numTrials; i++) {\n let config: ConfigType;\n\n if (this.bayesianOptimization && this.miproConfigHistory.length > 2) {\n // Use Bayesian optimization with acquisition function\n config = await this.selectConfigurationViaBayesianOptimization(\n instructions,\n bootstrappedDemos,\n labeledExamples\n );\n } else {\n // Random or round-robin selection (exploration phase)\n config = {\n instruction:\n instructions[i % instructions.length] || instructions[0] || '',\n bootstrappedDemos: Math.min(\n Math.floor(Math.random() * (bootstrappedDemos.length + 1)),\n this.maxBootstrappedDemos\n ),\n labeledExamples: Math.min(\n Math.floor(Math.random() * (labeledExamples.length + 1)),\n this.maxLabeledDemos\n ),\n };\n }\n\n const score = await this.evaluateConfig(\n program,\n config,\n bootstrappedDemos,\n labeledExamples,\n validationExamples,\n metricFn,\n i + 1 // Pass current trial number for adaptive evaluation\n );\n\n // Update surrogate model with observed score\n this.updateSurrogateModel(config, score);\n\n scoreHistory.push(score);\n\n // Check for improvement\n const improvement = score - bestScore;\n if (improvement > this.minImprovementThreshold) {\n bestScore = score;\n bestConfig = config;\n stagnationRounds = 0;\n\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(\n `Trial ${i + 1}/${this.numTrials}: New best score ${bestScore.toFixed(3)}`,\n { tags: ['optimizer', 'progress'] }\n );\n }\n } else {\n stagnationRounds++;\n }\n\n // Update optimization progress with checkpointing\n await this.updateOptimizationProgress(\n i + 1,\n score,\n config,\n 'MiPRO',\n this.getConfiguration(),\n bestScore,\n bestConfig,\n {\n stagnationRounds,\n bootstrappedDemos: bootstrappedDemos.length,\n labeledExamples: labeledExamples.length,\n instructions: instructions.length,\n },\n options\n );\n\n // Progress callback\n if (this.onProgress) {\n this.onProgress({\n round: i + 1,\n totalRounds: this.numTrials,\n currentScore: score,\n bestScore,\n tokensUsed: this.stats.resourceUsage.totalTokens,\n timeElapsed: Date.now(),\n successfulExamples: this.stats.successfulDemos,\n totalExamples: this.examples.length,\n currentConfiguration: config,\n convergenceInfo: {\n improvement,\n stagnationRounds,\n isConverging: stagnationRounds < this.earlyStoppingTrials,\n },\n });\n }\n\n // Update progress bar\n updateProgressBar(\n i + 1,\n this.numTrials,\n Math.round(bestScore * 100),\n 0,\n 'Running MIPROv2 optimization',\n 30\n );\n\n // Cost tracking check (handles token/time/cost budgets)\n if (this.checkCostLimits()) {\n this.triggerEarlyStopping('Cost limit reached', i + 1);\n break;\n }\n\n // Early stopping check\n if (stagnationRounds >= this.earlyStoppingTrials) {\n this.triggerEarlyStopping(\n `No improvement for ${this.earlyStoppingTrials} trials`,\n i - stagnationRounds + 1\n );\n break;\n }\n\n // Target score check\n if (this.checkTargetScore(bestScore)) {\n this.triggerEarlyStopping(\n `Target score ${this.targetScore} reached`,\n i + 1\n );\n break;\n }\n }\n\n // Update convergence info\n this.stats.convergenceInfo.stagnationRounds = stagnationRounds;\n this.stats.convergenceInfo.finalImprovement =\n scoreHistory.length > 1 ? bestScore - scoreHistory[0]! : 0;\n this.stats.convergenceInfo.converged =\n stagnationRounds < this.earlyStoppingTrials;\n\n return { bestConfig, bestScore };\n }\n\n private async evaluateConfig(\n program: Readonly<AxProgram<IN, OUT>>,\n config: Readonly<ConfigType>,\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[],\n validationExamples: readonly AxExample[],\n metricFn: AxMetricFn,\n currentTrial = 0\n ): Promise<number> {\n // Create a copy of the program and apply the configuration\n const testProgram = { ...program };\n this.applyConfigToProgram(\n testProgram,\n config,\n bootstrappedDemos,\n labeledExamples\n );\n\n let totalScore = 0;\n let count = 0;\n\n // Adaptive minibatch size based on paper's approach\n let evalSize: number;\n if (this.minibatch) {\n // Start with smaller batches and increase for more promising configurations\n const baseSize = Math.min(this.minibatchSize, validationExamples.length);\n\n // Use full evaluation for top configurations in later trials\n const isFullEvalTrial = currentTrial % this.minibatchFullEvalSteps === 0;\n if (isFullEvalTrial || currentTrial > this.numTrials * 0.8) {\n evalSize = Math.min(validationExamples.length, baseSize * 2);\n } else {\n // Stochastic minibatch evaluation\n evalSize = Math.max(3, Math.min(baseSize, validationExamples.length));\n }\n } else {\n evalSize = validationExamples.length;\n }\n\n // Randomly sample evaluation examples for stochastic evaluation\n const evalIndices = this.shuffleArray([\n ...Array(validationExamples.length).keys(),\n ]).slice(0, evalSize);\n const evalSet = evalIndices.map((i) => validationExamples[i]!);\n\n for (const example of evalSet) {\n try {\n const prediction = await testProgram.forward(\n this.studentAI,\n example as IN,\n this.sampleCount > 1\n ? {\n sampleCount: this.sampleCount,\n resultPicker:\n axMajorityVotePicker<OUT>() as AxResultPickerFunction<AxGenOut>,\n }\n : undefined\n );\n const score = await metricFn({ prediction, example });\n totalScore += score;\n count++;\n this.stats.totalCalls++;\n } catch {}\n }\n\n return count > 0 ? totalScore / count : 0;\n }\n\n /**\n * Fisher-Yates shuffle for stochastic evaluation\n */\n private shuffleArray<T>(array: T[]): T[] {\n const shuffled = [...array];\n for (let i = shuffled.length - 1; i > 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [shuffled[i], shuffled[j]] = [shuffled[j]!, shuffled[i]!];\n }\n return shuffled;\n }\n\n private applyConfigToProgram(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n program: any,\n config: Readonly<ConfigType>,\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[]\n ): void {\n // Set instruction if the program supports it\n if (program.setInstruction) {\n program.setInstruction(config.instruction);\n }\n\n // Set demos if needed\n if (config.bootstrappedDemos > 0 && program.setDemos) {\n program.setDemos(bootstrappedDemos.slice(0, config.bootstrappedDemos));\n }\n\n // Set examples if needed\n if (config.labeledExamples > 0 && program.setExamples) {\n program.setExamples(labeledExamples.slice(0, config.labeledExamples));\n }\n }\n\n /**\n * The main compile method to run MIPROv2 optimization\n */\n public async compile(\n program: Readonly<AxProgram<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): Promise<AxMiPROResult<IN, OUT>> {\n const startTime = Date.now();\n\n // Initialize random seed if provided\n this.setupRandomSeed();\n\n // Configure auto settings if provided (cast to access MiPRO-specific options)\n const miproOptions = options as AxMiPROCompileOptions;\n if (miproOptions?.auto) {\n this.configureAuto(miproOptions.auto);\n }\n\n // Use validation set from parent class method\n const validationExamples =\n this.getValidationSet(options) ||\n (miproOptions?.validationExamples ??\n this.examples.slice(0, Math.floor(this.examples.length * 0.2)));\n\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(\n `Starting MIPROv2 optimization with ${this.numTrials} trials`,\n { tags: ['optimizer', 'start'] }\n );\n this.getLogger(options)?.(\n `Using ${this.examples.length} examples for training and ${validationExamples.length} for validation`,\n { tags: ['optimizer', 'config'] }\n );\n if (this.teacherAI) {\n this.getLogger(options)?.(\n 'Using separate teacher model for instruction generation',\n { tags: ['optimizer', 'config'] }\n );\n }\n }\n\n // Step 1: Bootstrap few-shot examples\n let bootstrappedDemos: AxProgramDemos<IN, OUT>[] = [];\n if (this.maxBootstrappedDemos > 0) {\n bootstrappedDemos = await this.bootstrapFewShotExamples(\n program,\n metricFn\n );\n\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(\n `Generated ${bootstrappedDemos.length} bootstrapped demonstrations`,\n { tags: ['optimizer', 'result'] }\n );\n }\n }\n\n // Step 2: Select labeled examples from training set\n let labeledExamples: AxExample[] = [];\n if (this.maxLabeledDemos > 0) {\n labeledExamples = this.selectLabeledExamples();\n\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(\n `Selected ${labeledExamples.length} labeled examples from training set`,\n { tags: ['optimizer', 'result'] }\n );\n }\n }\n\n // Step 3: Generate instruction candidates\n const instructions = await this.proposeInstructionCandidates(\n program,\n options\n );\n\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(\n `Generated ${instructions.length} instruction candidates`,\n { tags: ['optimizer', 'result'] }\n );\n if (this.hasTeacherAI(options)) {\n this.getLogger(options)?.(\n 'Using teacher AI for instruction generation',\n { tags: ['optimizer', 'config'] }\n );\n }\n }\n\n // Step 4: Run optimization to find the best configuration\n const { bestConfig, bestScore } = await this.runOptimization(\n program,\n bootstrappedDemos,\n labeledExamples,\n instructions,\n validationExamples,\n metricFn,\n options\n );\n\n if (this.isLoggingEnabled(options)) {\n this.getLogger(options)?.(\n `Optimization complete. Best score: ${bestScore}`,\n { tags: ['optimizer', 'complete'] }\n );\n this.getLogger(options)?.(\n `Best configuration: ${JSON.stringify(bestConfig)}`,\n { tags: ['optimizer', 'result'] }\n );\n }\n\n // Check if target score was reached\n if (this.checkTargetScore(bestScore)) {\n this.triggerEarlyStopping(\n `Target score ${this.targetScore} reached with score ${bestScore}`,\n this.numTrials\n );\n }\n\n // Create a new AxGen instance with the optimized configuration\n let signature: any;\n if (\n 'getSignature' in program &&\n typeof program.getSignature === 'function'\n ) {\n signature = program.getSignature();\n } else {\n // Fallback: create a basic signature\n signature = 'input -> output';\n }\n\n const optimizedGen = new AxGen<IN, OUT>(signature);\n\n // Apply the best configuration to the new AxGen\n this.applyConfigToAxGen(\n optimizedGen,\n bestConfig,\n bootstrappedDemos,\n labeledExamples\n );\n\n // Update stats using parent class method\n this.updateResourceUsage(startTime);\n this.stats.convergenceInfo.converged = true;\n this.stats.convergenceInfo.finalImprovement = bestScore;\n\n // Save final checkpoint\n await this.saveFinalCheckpoint(\n 'MiPRO',\n this.getConfiguration(),\n bestScore,\n bestConfig,\n {\n bootstrappedDemos: bootstrappedDemos.length,\n labeledExamples: labeledExamples.length,\n instructions: instructions.length,\n optimizedGen: !!optimizedGen,\n },\n options\n );\n\n return {\n demos: bootstrappedDemos,\n stats: this.stats,\n bestScore,\n optimizedGen,\n finalConfiguration: {\n instruction: bestConfig.instruction,\n bootstrappedDemos: bestConfig.bootstrappedDemos,\n labeledExamples: bestConfig.labeledExamples,\n numCandidates: this.numCandidates,\n numTrials: this.numTrials,\n sampleCount: this.sampleCount,\n },\n };\n }\n\n /**\n * Applies a configuration to an AxGen instance\n */\n private applyConfigToAxGen(\n axgen: Readonly<AxGen<IN, OUT>>,\n config: Readonly<ConfigType>,\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[]\n ): void {\n // Set instruction if the AxGen supports it\n if (\n 'setInstruction' in axgen &&\n typeof axgen.setInstruction === 'function'\n ) {\n axgen.setInstruction(config.instruction);\n }\n\n // Set demos if needed\n if (config.bootstrappedDemos > 0) {\n axgen.setDemos(bootstrappedDemos.slice(0, config.bootstrappedDemos));\n }\n\n // Set examples if needed\n if (config.labeledExamples > 0) {\n axgen.setExamples(\n labeledExamples.slice(\n 0,\n config.labeledExamples\n ) as unknown as readonly (OUT & IN)[]\n );\n }\n }\n\n /**\n * Get optimizer-specific configuration\n * @returns Current optimizer configuration\n */\n public getConfiguration(): Record<string, unknown> {\n return {\n numCandidates: this.numCandidates,\n initTemperature: this.initTemperature,\n maxBootstrappedDemos: this.maxBootstrappedDemos,\n maxLabeledDemos: this.maxLabeledDemos,\n numTrials: this.numTrials,\n minibatch: this.minibatch,\n minibatchSize: this.minibatchSize,\n minibatchFullEvalSteps: this.minibatchFullEvalSteps,\n programAwareProposer: this.programAwareProposer,\n dataAwareProposer: this.dataAwareProposer,\n tipAwareProposer: this.tipAwareProposer,\n fewshotAwareProposer: this.fewshotAwareProposer,\n earlyStoppingTrials: this.earlyStoppingTrials,\n minImprovementThreshold: this.minImprovementThreshold,\n bayesianOptimization: this.bayesianOptimization,\n acquisitionFunction: this.acquisitionFunction,\n explorationWeight: this.explorationWeight,\n sampleCount: this.sampleCount,\n };\n }\n\n /**\n * Update optimizer configuration\n * @param config New configuration to merge with existing\n */\n public updateConfiguration(config: Readonly<Record<string, unknown>>): void {\n if (config.numCandidates !== undefined) {\n this.numCandidates = config.numCandidates as number;\n }\n if (config.initTemperature !== undefined) {\n this.initTemperature = config.initTemperature as number;\n }\n if (config.maxBootstrappedDemos !== undefined) {\n this.maxBootstrappedDemos = config.maxBootstrappedDemos as number;\n }\n if (config.maxLabeledDemos !== undefined) {\n this.maxLabeledDemos = config.maxLabeledDemos as number;\n }\n if (config.numTrials !== undefined) {\n this.numTrials = config.numTrials as number;\n }\n if (config.minibatch !== undefined) {\n this.minibatch = config.minibatch as boolean;\n }\n if (config.minibatchSize !== undefined) {\n this.minibatchSize = config.minibatchSize as number;\n }\n if (config.earlyStoppingTrials !== undefined) {\n this.earlyStoppingTrials = config.earlyStoppingTrials as number;\n }\n if (config.minImprovementThreshold !== undefined) {\n this.minImprovementThreshold = config.minImprovementThreshold as number;\n }\n if (config.sampleCount !== undefined) {\n this.sampleCount = config.sampleCount as number;\n }\n // Note: verbose is now handled by the base class and cannot be updated here\n }\n\n /**\n * Reset optimizer state for reuse with different programs\n */\n public override reset(): void {\n super.reset();\n // Reset surrogate model state\n this.miproConfigHistory = [];\n this.surrogateModel.clear();\n // Update convergence threshold after reset\n this.stats.convergenceInfo.convergenceThreshold =\n this.minImprovementThreshold;\n }\n\n /**\n * Validate that the optimizer can handle the given program\n * @param program Program to validate\n * @returns Validation result with any issues found\n */\n public override validateProgram(program: Readonly<AxProgram<IN, OUT>>): {\n isValid: boolean;\n issues: string[];\n suggestions: string[];\n } {\n // Start with base validation\n const result = super.validateProgram(program);\n\n // Add MiPRO-specific validation\n if (\n this.examples.length <\n this.maxBootstrappedDemos + this.maxLabeledDemos\n ) {\n result.issues.push(\n `Not enough examples: need at least ${\n this.maxBootstrappedDemos + this.maxLabeledDemos\n }, got ${this.examples.length}`\n );\n result.suggestions.push(\n 'Reduce maxBootstrappedDemos or maxLabeledDemos, or provide more examples'\n );\n }\n\n // Check if validation set is reasonable for MiPRO\n const validationSetSize = this.getValidationSet().length;\n if (validationSetSize < 5) {\n result.issues.push(\n 'Validation set too small for reliable MiPRO optimization'\n );\n result.suggestions.push(\n 'Provide more examples or a larger validation set'\n );\n }\n\n return {\n isValid: result.issues.length === 0,\n issues: result.issues,\n suggestions: result.suggestions,\n };\n }\n\n /**\n * Encodes a configuration into a string key for surrogate model lookup\n */\n private encodeConfiguration(config: Readonly<ConfigType>): string {\n return `${config.instruction.length}_${config.bootstrappedDemos}_${config.labeledExamples}`;\n }\n\n /**\n * Updates the surrogate model with a new configuration-score pair\n */\n private updateSurrogateModel(\n config: Readonly<ConfigType>,\n score: number\n ): void {\n this.miproConfigHistory.push({ config: { ...config }, score });\n\n // Simple Gaussian Process approximation for the surrogate model\n const key = this.encodeConfiguration(config);\n\n // Find similar configurations (same instruction length and demo counts)\n const similarConfigs = this.miproConfigHistory.filter(\n (entry) => this.encodeConfiguration(entry.config) === key\n );\n\n if (similarConfigs.length > 0) {\n const scores = similarConfigs.map((entry) => entry.score);\n const mean = scores.reduce((sum, s) => sum + s, 0) / scores.length;\n const variance =\n scores.length > 1\n ? scores.reduce((sum, s) => sum + (s - mean) ** 2, 0) /\n (scores.length - 1)\n : 0.1; // Default variance for single observation\n\n this.surrogateModel.set(key, { mean, variance });\n }\n }\n\n /**\n * Predicts performance using the surrogate model\n */\n private predictPerformance(config: Readonly<ConfigType>): {\n mean: number;\n variance: number;\n } {\n const key = this.encodeConfiguration(config);\n\n if (this.surrogateModel.has(key)) {\n return this.surrogateModel.get(key)!;\n }\n\n // For unseen configurations, use prior knowledge\n if (this.miproConfigHistory.length > 0) {\n // Find most similar configurations based on demo counts\n const similarities = this.miproConfigHistory.map((entry) => {\n const diff =\n Math.abs(entry.config.bootstrappedDemos - config.bootstrappedDemos) +\n Math.abs(entry.config.labeledExamples - config.labeledExamples);\n return { score: entry.score, similarity: 1 / (1 + diff) };\n });\n\n // Weighted average based on similarity\n const totalWeight = similarities.reduce(\n (sum, s) => sum + s.similarity,\n 0\n );\n const weightedMean =\n similarities.reduce((sum, s) => sum + s.score * s.similarity, 0) /\n totalWeight;\n\n return { mean: weightedMean, variance: 0.2 }; // Higher variance for unseen configs\n }\n\n // Default prior for completely unknown configurations\n return { mean: 0.5, variance: 0.3 };\n }\n\n /**\n * Calculates acquisition function value for Bayesian optimization\n */\n private calculateAcquisitionValue(config: Readonly<ConfigType>): number {\n const prediction = this.predictPerformance(config);\n const { mean, variance } = prediction;\n const std = Math.sqrt(variance);\n\n // Current best score\n const bestScore =\n this.miproConfigHistory.length > 0\n ? Math.max(...this.miproConfigHistory.map((entry) => entry.score))\n : 0;\n\n switch (this.acquisitionFunction) {\n case 'expected_improvement': {\n const improvement = mean - bestScore;\n if (std === 0) return Math.max(0, improvement);\n\n const z = improvement / std;\n const phi = 0.5 * (1 + this.erf(z / Math.sqrt(2))); // CDF of standard normal\n const pdfValue = Math.exp(-0.5 * z * z) / Math.sqrt(2 * Math.PI); // PDF of standard normal\n\n return improvement * phi + std * pdfValue;\n }\n\n case 'upper_confidence_bound': {\n return mean + this.explorationWeight * std;\n }\n\n case 'probability_improvement': {\n const improvement = mean - bestScore;\n if (std === 0) return improvement > 0 ? 1 : 0;\n\n const z = improvement / std;\n return 0.5 * (1 + this.erf(z / Math.sqrt(2)));\n }\n\n default:\n return mean;\n }\n }\n\n /**\n * Error function approximation for acquisition function calculations\n */\n private erf(x: number): number {\n // Abramowitz and Stegun approximation\n const a1 = 0.254829592;\n const a2 = -0.284496736;\n const a3 = 1.421413741;\n const a4 = -1.453152027;\n const a5 = 1.061405429;\n const p = 0.3275911;\n\n const sign = x >= 0 ? 1 : -1;\n const absX = Math.abs(x);\n\n const t = 1.0 / (1.0 + p * absX);\n const y =\n 1.0 -\n ((((a5 * t + a4) * t + a3) * t + a2) * t + a1) *\n t *\n Math.exp(-absX * absX);\n\n return sign * y;\n }\n\n /**\n * Selects the next configuration to evaluate using Bayesian optimization\n */\n private async selectConfigurationViaBayesianOptimization(\n instructions: readonly string[],\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[]\n ): Promise<ConfigType> {\n const candidates: Array<{ config: ConfigType; acquisitionValue: number }> =\n [];\n\n // Generate candidate configurations\n const numCandidates = Math.min(20, instructions.length * 3); // Reasonable number of candidates\n\n for (let i = 0; i < numCandidates; i++) {\n const config: ConfigType = {\n instruction:\n instructions[i % instructions.length] || instructions[0] || '',\n bootstrappedDemos: Math.min(\n Math.floor(Math.random() * (bootstrappedDemos.length + 1)),\n this.maxBootstrappedDemos\n ),\n labeledExamples: Math.min(\n Math.floor(Math.random() * (labeledExamples.length + 1)),\n this.maxLabeledDemos\n ),\n };\n\n const acquisitionValue = this.calculateAcquisitionValue(config);\n candidates.push({ config, acquisitionValue });\n }\n\n // Sort by acquisition value (higher is better)\n candidates.sort((a, b) => b.acquisitionValue - a.acquisitionValue);\n\n // Return the most promising configuration\n return candidates[0]!.config;\n }\n}\n\n// ---------------------------------------\n// Helper: Majority-vote result picker for self-consistency\n// ---------------------------------------\nconst axMajorityVotePicker = <\n OUT extends AxGenOut,\n>(): AxResultPickerFunction<OUT> => {\n // Return a picker function capturing no external state\n return async (data) => {\n // If we have field results, do majority vote on stringified payload\n if (data.type === 'fields') {\n const counts: Record<string, { count: number; index: number }> = {};\n for (const { index, sample } of data.results) {\n const key = JSON.stringify(sample);\n if (!counts[key]) {\n counts[key] = { count: 0, index };\n }\n counts[key]!.count += 1;\n }\n\n // Select the sample with highest count (ties -> first seen)\n let bestKey: string | undefined;\n let bestCount = -1;\n for (const [k, v] of Object.entries(counts)) {\n if (v.count > bestCount) {\n bestCount = v.count;\n bestKey = k;\n }\n }\n return counts[bestKey!]?.index ?? 0;\n }\n\n // For function results, fall back to first sample (could be improved)\n return data.results[0]?.index ?? 0;\n };\n};\n","import type { AxMCPTransport } from './transport.js';\nimport type {\n JSONRPCNotification,\n JSONRPCRequest,\n JSONRPCResponse,\n} from './types.js';\n\nexport class AxMCPHTTPSSETransport implements AxMCPTransport {\n private endpoint: string | null = null;\n private sseUrl: string;\n private eventSource?: EventSource;\n\n constructor(sseUrl: string) {\n this.sseUrl = sseUrl;\n }\n\n async connect(): Promise<void> {\n return new Promise((resolve, reject) => {\n this.eventSource = new EventSource(this.sseUrl);\n\n this.eventSource.addEventListener('endpoint', (event: Event) => {\n try {\n const messageEvent = event as MessageEvent;\n const data = JSON.parse(messageEvent.data);\n if (!data.uri) {\n throw new Error('Endpoint URI missing in SSE event data');\n }\n this.endpoint = data.uri;\n resolve();\n } catch (error) {\n reject(error);\n }\n });\n\n this.eventSource.onerror = () => {\n reject(new Error('Failed to establish SSE connection'));\n };\n });\n }\n\n async send(\n message: JSONRPCRequest<unknown> | JSONRPCNotification\n ): Promise<JSONRPCResponse<unknown>> {\n if (!this.endpoint) {\n throw new Error(\n 'HTTPTransport endpoint is not initialized. Call connect() first.'\n );\n }\n\n const res = await fetch(this.endpoint, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(message),\n });\n\n if (!res.ok) {\n throw new Error(`HTTP error ${res.status}: ${res.statusText}`);\n }\n\n return res.json() as Promise<JSONRPCResponse<unknown>>;\n }\n\n async sendNotification(\n message: Readonly<JSONRPCNotification>\n ): Promise<void> {\n if (!this.endpoint) {\n throw new Error(\n 'HTTPTransport endpoint is not initialized. Call connect() first.'\n );\n }\n await fetch(this.endpoint, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(message),\n });\n }\n}\n\nexport interface AxMCPStreamableHTTPTransportOptions {\n /**\n * Custom headers to include with all HTTP requests\n * Note: Content-Type, Accept, and Mcp-Session-Id are managed automatically\n */\n headers?: Record<string, string>;\n\n /**\n * Authorization header value (convenience for common use case)\n * If provided, will be added to the headers as 'Authorization'\n */\n authorization?: string;\n}\n\n/**\n * AxMCPStreambleHTTPTransport implements the 2025-03-26 Streamable HTTP transport specification\n * This transport uses a single HTTP endpoint that supports both POST and GET methods\n */\nexport class AxMCPStreambleHTTPTransport implements AxMCPTransport {\n private mcpEndpoint: string;\n private sessionId?: string;\n private eventSource?: EventSource;\n private pendingRequests = new Map<\n string | number,\n {\n resolve: (value: JSONRPCResponse<unknown>) => void;\n reject: (reason: unknown) => void;\n }\n >();\n private messageHandler?: (\n message: JSONRPCRequest<unknown> | JSONRPCNotification\n ) => void;\n private customHeaders: Record<string, string>;\n\n constructor(\n mcpEndpoint: string,\n options?: AxMCPStreamableHTTPTransportOptions\n ) {\n this.mcpEndpoint = mcpEndpoint;\n this.customHeaders = { ...options?.headers };\n\n // Add authorization header if provided\n if (options?.authorization) {\n this.customHeaders.Authorization = options.authorization;\n }\n }\n\n /**\n * Update custom headers (useful for refreshing tokens)\n */\n setHeaders(headers: Record<string, string>): void {\n this.customHeaders = { ...headers };\n }\n\n /**\n * Update authorization header (convenience method)\n */\n setAuthorization(authorization: string): void {\n this.customHeaders.Authorization = authorization;\n }\n\n /**\n * Get a copy of the current custom headers\n */\n getHeaders(): Record<string, string> {\n return { ...this.customHeaders };\n }\n\n /**\n * Build headers for HTTP requests, merging custom headers with required ones\n */\n private buildHeaders(\n baseHeaders: Record<string, string>\n ): Record<string, string> {\n const headers = { ...this.customHeaders, ...baseHeaders };\n\n if (this.sessionId) {\n headers['Mcp-Session-Id'] = this.sessionId;\n }\n\n return headers;\n }\n\n /**\n * Set a handler for incoming server messages (requests/notifications)\n */\n setMessageHandler(\n handler: (message: JSONRPCRequest<unknown> | JSONRPCNotification) => void\n ): void {\n this.messageHandler = handler;\n }\n\n async connect(): Promise<void> {\n // For Streamable HTTP, connection is implicit when making requests\n // But we can optionally open a GET SSE stream for server-initiated messages\n return Promise.resolve();\n }\n\n /**\n * Opens an SSE stream to listen for server-initiated messages\n */\n async openListeningStream(): Promise<void> {\n return new Promise((resolve, reject) => {\n const headers = this.buildHeaders({\n Accept: 'text/event-stream',\n });\n\n // Note: EventSource doesn't support custom headers in standard browsers\n // For custom headers with SSE, you may need to use fetch with ReadableStream\n // or use a library that supports custom headers\n const url = new URL(this.mcpEndpoint);\n\n // If we have custom headers, we need to use fetch instead of EventSource\n if (Object.keys(this.customHeaders).length > 0) {\n this.openListeningStreamWithFetch(headers).then(resolve).catch(reject);\n return;\n }\n\n this.eventSource = new EventSource(url.toString());\n\n this.eventSource.onopen = () => {\n resolve();\n };\n\n this.eventSource.onmessage = (event) => {\n try {\n const message = JSON.parse(event.data);\n if (this.messageHandler) {\n this.messageHandler(message);\n }\n } catch (error) {\n console.error('Failed to parse SSE message:', error);\n }\n };\n\n this.eventSource.onerror = () => {\n reject(new Error('Failed to establish SSE connection'));\n };\n });\n }\n\n /**\n * Opens an SSE stream using fetch API to support custom headers\n */\n private async openListeningStreamWithFetch(\n headers: Record<string, string>\n ): Promise<void> {\n const response = await fetch(this.mcpEndpoint, {\n method: 'GET',\n headers,\n });\n\n if (!response.ok) {\n throw new Error(\n `Failed to open SSE stream: ${response.status} ${response.statusText}`\n );\n }\n\n if (!response.body) {\n throw new Error('No response body available for SSE stream');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n const processStream = async (): Promise<void> => {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n reader.releaseLock();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || ''; // Keep incomplete line in buffer\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const data = line.slice(6); // Remove 'data: ' prefix\n if (data === '[DONE]') {\n return;\n }\n\n try {\n const message = JSON.parse(data);\n if (this.messageHandler) {\n this.messageHandler(message);\n }\n } catch (error) {\n console.error('Failed to parse SSE data:', error);\n }\n }\n }\n\n // Continue reading\n await processStream();\n } catch (error) {\n reader.releaseLock();\n throw error;\n }\n };\n\n await processStream();\n }\n\n async send(\n message: Readonly<JSONRPCRequest<unknown>>\n ): Promise<JSONRPCResponse<unknown>> {\n const headers = this.buildHeaders({\n 'Content-Type': 'application/json',\n Accept: 'application/json, text/event-stream',\n });\n\n const response = await fetch(this.mcpEndpoint, {\n method: 'POST',\n headers,\n body: JSON.stringify(message),\n });\n\n if (!response.ok) {\n if (response.status === 404 && this.sessionId) {\n // Session expired, clear it\n this.sessionId = undefined;\n throw new Error('Session expired. Please reinitialize.');\n }\n throw new Error(`HTTP error ${response.status}: ${response.statusText}`);\n }\n\n // Check if this is the initialization response with session ID\n const sessionIdHeader = response.headers.get('Mcp-Session-Id');\n if (sessionIdHeader) {\n this.sessionId = sessionIdHeader;\n }\n\n const contentType = response.headers.get('Content-Type');\n\n if (contentType?.includes('text/event-stream')) {\n // Handle SSE response\n return this.handleSSEResponse(response, message.id);\n }\n if (contentType?.includes('application/json')) {\n // Handle JSON response\n return response.json() as Promise<JSONRPCResponse<unknown>>;\n }\n throw new Error(`Unexpected content type: ${contentType}`);\n }\n\n private async handleSSEResponse(\n response: Response,\n requestId: string | number\n ): Promise<JSONRPCResponse<unknown>> {\n return new Promise((resolve, reject) => {\n const reader = response.body?.getReader();\n if (!reader) {\n reject(new Error('No response body reader available'));\n return;\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n\n const processChunk = async (): Promise<void> => {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n reader.releaseLock();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || ''; // Keep incomplete line in buffer\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const data = line.slice(6); // Remove 'data: ' prefix\n if (data === '[DONE]') {\n return;\n }\n\n try {\n const message = JSON.parse(data);\n\n // Check if this is the response to our request\n if ('id' in message && message.id === requestId) {\n resolve(message as JSONRPCResponse<unknown>);\n return;\n }\n\n // Handle other messages (server requests/notifications)\n if (this.messageHandler) {\n this.messageHandler(message);\n }\n } catch (error) {\n console.error('Failed to parse SSE data:', error);\n }\n }\n }\n\n // Continue reading\n await processChunk();\n } catch (error) {\n reader.releaseLock();\n reject(error);\n }\n };\n\n processChunk().catch(reject);\n });\n }\n\n async sendNotification(\n message: Readonly<JSONRPCNotification>\n ): Promise<void> {\n const headers = this.buildHeaders({\n 'Content-Type': 'application/json',\n Accept: 'application/json, text/event-stream',\n });\n\n const response = await fetch(this.mcpEndpoint, {\n method: 'POST',\n headers,\n body: JSON.stringify(message),\n });\n\n if (!response.ok) {\n if (response.status === 404 && this.sessionId) {\n // Session expired, clear it\n this.sessionId = undefined;\n throw new Error('Session expired. Please reinitialize.');\n }\n throw new Error(`HTTP error ${response.status}: ${response.statusText}`);\n }\n\n // For notifications, we expect 202 Accepted with no body\n if (response.status !== 202) {\n console.warn(`Unexpected status for notification: ${response.status}`);\n }\n }\n\n /**\n * Explicitly terminate the session (if supported by server)\n */\n async terminateSession(): Promise<void> {\n if (!this.sessionId) {\n return;\n }\n\n try {\n const headers = this.buildHeaders({});\n\n const response = await fetch(this.mcpEndpoint, {\n method: 'DELETE',\n headers,\n });\n\n if (response.status === 405) {\n // Server doesn't support explicit session termination\n console.info('Server does not support explicit session termination');\n }\n } catch (error) {\n console.error('Failed to terminate session:', error);\n } finally {\n this.sessionId = undefined;\n }\n }\n\n /**\n * Close any open connections\n */\n close(): void {\n if (this.eventSource) {\n this.eventSource.close();\n this.eventSource = undefined;\n }\n }\n}\n","import type {\n AxAIModelList,\n AxAIService,\n AxFunction,\n AxFunctionHandler,\n AxFunctionJSONSchema,\n} from '../ai/types.js';\nimport type { AxInputFunctionType } from '../dsp/functions.js';\nimport { AxGen } from '../dsp/generate.js';\nimport type {\n AxGenStreamingOut,\n AxProgram,\n AxProgramDemos,\n AxProgramExamples,\n AxProgramForwardOptions,\n AxProgramStreamingForwardOptions,\n AxSetExamplesOptions,\n AxTunable,\n AxUsable,\n} from '../dsp/program.js';\nimport type { AxSignature } from '../dsp/sig.js';\nimport type { AxGenIn, AxGenOut, AxMessage } from '../dsp/types.js';\n\n/**\n * Interface for agents that can be used as child agents.\n * Provides methods to get the agent's function definition and features.\n */\nexport interface AxAgentic<IN extends AxGenIn, OUT extends AxGenOut>\n extends AxTunable<IN, OUT>,\n AxUsable {\n getFunction(): AxFunction;\n getFeatures(): AxAgentFeatures;\n}\n\nexport type AxAgentOptions = Omit<AxProgramForwardOptions, 'functions'> & {\n disableSmartModelRouting?: boolean;\n /** List of field names that should not be automatically passed from parent to child agents */\n excludeFieldsFromPassthrough?: string[];\n debug?: boolean;\n};\n\nexport interface AxAgentFeatures {\n /** Whether this agent can use smart model routing (requires an AI service) */\n canConfigureSmartModelRouting: boolean;\n /** List of fields that this agent excludes from parent->child value passing */\n excludeFieldsFromPassthrough: string[];\n}\n\n/**\n * Processes a child agent's function, applying model routing and input injection as needed.\n * Handles both the schema modifications and function wrapping.\n */\nfunction processChildAgentFunction<IN extends AxGenIn>(\n childFunction: Readonly<AxFunction>,\n parentValues: IN | AxMessage<IN>[],\n parentInputKeys: string[],\n modelList: AxAIModelList | undefined,\n options: Readonly<{\n debug: boolean;\n disableSmartModelRouting: boolean;\n excludeFieldsFromPassthrough: string[];\n canConfigureSmartModelRouting: boolean;\n }>\n): AxFunction {\n const processedFunction = { ...childFunction };\n\n // Process input field injection\n if (processedFunction.parameters) {\n const childKeys = processedFunction.parameters.properties\n ? Object.keys(processedFunction.parameters.properties)\n : [];\n\n // Find common keys between parent and child, excluding 'model' and specified exclusions\n const commonKeys = parentInputKeys\n .filter((key) => childKeys.includes(key))\n .filter((key) => key !== 'model');\n const injectionKeys = commonKeys.filter(\n (key) => !options.excludeFieldsFromPassthrough.includes(key)\n );\n\n if (injectionKeys.length > 0) {\n // Remove injected fields from child schema\n processedFunction.parameters = removePropertiesFromSchema(\n processedFunction.parameters,\n injectionKeys\n );\n\n // Wrap function to inject parent values\n const originalFunc = processedFunction.func;\n // add debug logging if enabled\n processedFunction.func = async (childArgs, funcOptions) => {\n // Extract values from parentValues - handle both IN and AxMessage<IN>[] cases\n let valuesToInject: Partial<IN> = {};\n if (Array.isArray(parentValues)) {\n // If parentValues is an array of messages, find the most recent user message\n const lastUserMessage = parentValues\n .filter((msg) => msg.role === 'user')\n .pop();\n if (lastUserMessage) {\n valuesToInject = pick(\n lastUserMessage.values,\n injectionKeys as (keyof IN)[]\n );\n }\n } else {\n // If parentValues is a single IN object\n valuesToInject = pick(parentValues, injectionKeys as (keyof IN)[]);\n }\n\n const updatedChildArgs = {\n ...childArgs,\n ...valuesToInject,\n };\n\n if (options.debug && injectionKeys.length > 0) {\n const ai = funcOptions?.ai;\n if (ai) {\n const logger = ai.getLogger();\n logger(\n `Function Params: ${JSON.stringify(updatedChildArgs, null, 2)}`,\n { tags: ['functionArg'] }\n );\n }\n }\n\n return await originalFunc(updatedChildArgs, funcOptions);\n };\n }\n\n return processedFunction;\n }\n\n // Apply smart model routing if enabled\n if (\n modelList &&\n !options.disableSmartModelRouting &&\n options.canConfigureSmartModelRouting\n ) {\n processedFunction.parameters = addModelParameter(\n processedFunction.parameters,\n modelList\n );\n }\n\n return processedFunction;\n}\n\nconst descriptionError = new Error(\n 'Agent description must be at least 20 characters (explain in detail what the agent does)'\n);\n\nconst definitionError = new Error(\n 'Agent definition is the prompt you give to the LLM for the agent. It must be detailed and at least 100 characters'\n);\n\n/**\n * An AI agent that can process inputs using an AI service and coordinate with child agents.\n * Supports features like smart model routing and automatic input field passing to child agents.\n */\nexport class AxAgent<IN extends AxGenIn, OUT extends AxGenOut>\n implements AxAgentic<IN, OUT>\n{\n private ai?: AxAIService;\n private program: AxProgram<IN, OUT>;\n private functions?: AxInputFunctionType;\n private agents?: AxAgentic<IN, OUT>[];\n private disableSmartModelRouting?: boolean;\n private excludeFieldsFromPassthrough: string[];\n private debug?: boolean;\n\n private name: string;\n // private subAgentList?: string\n private func: AxFunction;\n\n constructor(\n {\n ai,\n name,\n description,\n definition,\n signature,\n agents,\n functions,\n }: Readonly<{\n ai?: Readonly<AxAIService>;\n name: string;\n description: string;\n definition?: string;\n signature: NonNullable<ConstructorParameters<typeof AxSignature>[0]>;\n agents?: AxAgentic<IN, OUT>[];\n functions?: AxInputFunctionType;\n }>,\n options?: Readonly<AxAgentOptions>\n ) {\n const { disableSmartModelRouting, excludeFieldsFromPassthrough, debug } =\n options ?? {};\n\n this.ai = ai;\n this.agents = agents;\n this.functions = functions;\n this.disableSmartModelRouting = disableSmartModelRouting;\n this.excludeFieldsFromPassthrough = excludeFieldsFromPassthrough ?? [];\n this.debug = debug;\n\n if (!name || name.length < 5) {\n throw new Error(\n 'Agent name must be at least 10 characters (more descriptive)'\n );\n }\n\n if (!description || description.length < 20) {\n throw descriptionError;\n }\n\n if (definition && definition.length < 100) {\n throw definitionError;\n }\n\n this.program = new AxGen<IN, OUT>(signature, {\n ...options,\n description: definition ?? description,\n });\n\n for (const agent of agents ?? []) {\n this.program.register(\n agent as unknown as Readonly<AxTunable<IN, OUT> & AxUsable>\n );\n }\n\n this.name = name;\n // this.subAgentList = agents?.map((a) => a.getFunction().name).join(', ')\n\n this.func = {\n name: toCamelCase(this.name),\n description,\n parameters: this.program.getSignature().toJSONSchema(),\n func: () => this.forward,\n };\n\n const mm = ai?.getModelList();\n // Only add model parameter if smart routing is enabled and model list exists\n if (mm && !this.disableSmartModelRouting) {\n this.func.parameters = addModelParameter(this.func.parameters, mm);\n }\n }\n\n public setExamples(\n examples: Readonly<AxProgramExamples<IN, OUT>>,\n options?: Readonly<AxSetExamplesOptions>\n ) {\n this.program.setExamples(examples, options);\n }\n\n public setId(id: string) {\n this.program.setId(id);\n }\n\n public setParentId(parentId: string) {\n this.program.setParentId(parentId);\n }\n\n public getTraces() {\n return this.program.getTraces();\n }\n\n public setDemos(demos: readonly AxProgramDemos<IN, OUT>[]) {\n this.program.setDemos(demos);\n }\n\n public getUsage() {\n return this.program.getUsage();\n }\n\n public resetUsage() {\n this.program.resetUsage();\n }\n\n public getFunction(): AxFunction {\n const boundFunc = this.forward.bind(this);\n\n // Create a wrapper function that excludes the 'ai' parameter\n const wrappedFunc: AxFunctionHandler = async (\n valuesAndModel: IN & { model: string },\n options?\n ): Promise<string> => {\n const { model, ...values } = valuesAndModel;\n\n const ai = this.ai ?? options?.ai;\n if (!ai) {\n throw new Error('AI service is required to run the agent');\n }\n const debug = this.getDebug(ai, options);\n\n if (debug) {\n const logger = ai.getLogger();\n logger(`🤖 Agent ${this.name} starting...`, {\n tags: ['start'],\n });\n }\n\n const ret = await boundFunc(ai, values as unknown as IN, {\n ...options,\n model,\n });\n\n if (debug) {\n const logger = ai.getLogger();\n logger(`🤖 Agent ${this.name} completed.`, { tags: ['end'] });\n }\n\n const sig = this.program.getSignature();\n const outFields = sig.getOutputFields();\n const result = Object.keys(ret)\n .map((k) => {\n const field = outFields.find((f) => f.name === k);\n if (field) {\n return `${field.title}: ${ret[k]}`;\n }\n return `${k}: ${ret[k]}`;\n })\n .join('\\n');\n\n return result;\n };\n\n return {\n ...this.func,\n func: wrappedFunc,\n };\n }\n\n public getFeatures(): AxAgentFeatures {\n return {\n canConfigureSmartModelRouting: this.ai === undefined,\n excludeFieldsFromPassthrough: this.excludeFieldsFromPassthrough,\n };\n }\n\n /**\n * Initializes the agent's execution context, processing child agents and their functions.\n */\n private init(\n parentAi: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options: Readonly<AxProgramForwardOptions> | undefined\n ) {\n const ai = this.ai ?? parentAi;\n const mm = ai?.getModelList();\n\n // Get parent's input schema and keys\n const parentSchema = this.program.getSignature().getInputFields();\n const parentKeys = parentSchema.map((p) => p.name);\n const debug = this.getDebug(ai, options);\n\n // Process each child agent's function\n const agentFuncs = this.agents?.map((agent) => {\n const f = agent.getFeatures();\n\n const processOptions = {\n debug,\n disableSmartModelRouting: !!this.disableSmartModelRouting,\n excludeFieldsFromPassthrough: f.excludeFieldsFromPassthrough,\n canConfigureSmartModelRouting: f.canConfigureSmartModelRouting,\n };\n\n return processChildAgentFunction(\n agent.getFunction(),\n values,\n parentKeys,\n mm,\n processOptions\n );\n });\n\n // Combine all functions\n const functions: AxInputFunctionType = [\n ...(options?.functions ?? this.functions ?? []),\n ...(agentFuncs ?? []),\n ];\n\n return { ai, functions, debug };\n }\n\n public async forward(\n parentAi: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options?: Readonly<AxProgramForwardOptions>\n ): Promise<OUT> {\n const { ai, functions, debug } = this.init(parentAi, values, options);\n return await this.program.forward(ai, values, {\n ...options,\n debug,\n functions,\n });\n }\n\n public async *streamingForward(\n parentAi: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options?: Readonly<AxProgramStreamingForwardOptions>\n ): AxGenStreamingOut<OUT> {\n const { ai, functions, debug } = this.init(parentAi, values, options);\n return yield* this.program.streamingForward(ai, values, {\n ...options,\n debug,\n functions,\n });\n }\n\n /**\n * Updates the agent's description.\n * This updates both the stored description and the function's description.\n *\n * @param description - New description for the agent (must be at least 20 characters)\n * @throws Error if description is too short\n */\n public setDescription(description: string): void {\n if (!description || description.length < 20) {\n throw descriptionError;\n }\n\n this.program.getSignature().setDescription(description);\n this.func.description = description;\n }\n\n public setDefinition(definition: string): void {\n if (!definition || definition.length < 100) {\n throw definitionError;\n }\n\n this.program.getSignature().setDescription(definition);\n }\n\n private getDebug(\n ai: AxAIService,\n options?: Readonly<AxProgramForwardOptions>\n ): boolean {\n return options?.debug ?? this.debug ?? ai?.getOptions()?.debug ?? false;\n }\n}\n\nfunction toCamelCase(inputString: string): string {\n // Split the string by any non-alphanumeric character (including underscores, spaces, hyphens)\n const words = inputString.split(/[^a-zA-Z0-9]/);\n\n // Map through each word, capitalize the first letter of each word except the first word\n const camelCaseString = words\n .map((word, index) => {\n // Lowercase the word to handle cases like uppercase letters in input\n const lowerWord = word.toLowerCase();\n\n // Capitalize the first letter of each word except the first one\n if (index > 0 && lowerWord && lowerWord[0]) {\n return lowerWord[0].toUpperCase() + lowerWord.slice(1);\n }\n\n return lowerWord;\n })\n .join('');\n\n return camelCaseString;\n}\n\n/**\n * Adds a required model parameter to a JSON Schema definition based on provided model mappings.\n * The model parameter will be an enum with values from the model map keys.\n *\n * @param parameters - The original JSON Schema parameters definition (optional)\n * @param models - Array of model mappings containing keys, model names and descriptions\n * @returns Updated JSON Schema with added model parameter\n */\nexport function addModelParameter(\n parameters: AxFunctionJSONSchema | undefined,\n models: AxAIModelList\n): AxFunctionJSONSchema {\n // If parameters is undefined, create a base schema\n const baseSchema: AxFunctionJSONSchema = parameters\n ? structuredClone(parameters)\n : {\n type: 'object',\n properties: {},\n required: [],\n };\n\n // Check if model parameter already exists\n if (baseSchema.properties?.model) {\n return baseSchema;\n }\n\n // Create the model property schema\n const modelProperty: AxFunctionJSONSchema & {\n enum: string[];\n description: string;\n } = {\n type: 'string',\n enum: models.map((m) => m.key),\n description: `The AI model to use for this function call. Available options: ${models\n .map((m) => `\\`${m.key}\\` ${m.description}`)\n .join(', ')}`,\n };\n\n // Create new properties object with model parameter\n const newProperties = {\n ...(baseSchema.properties ?? {}),\n model: modelProperty,\n };\n\n // Add model to required fields\n const newRequired = [...(baseSchema.required ?? []), 'model'];\n\n // Return updated schema\n return {\n ...baseSchema,\n properties: newProperties,\n required: newRequired,\n };\n}\n\n// New helper: removePropertiesFromSchema\n// Clones a JSON schema and removes properties and required fields matching the provided keys.\nfunction removePropertiesFromSchema(\n schema: Readonly<AxFunctionJSONSchema>,\n keys: string[]\n): AxFunctionJSONSchema {\n const newSchema = structuredClone(schema);\n if (newSchema.properties) {\n for (const key of keys) {\n delete newSchema.properties[key];\n }\n }\n if (Array.isArray(newSchema.required)) {\n const filteredRequired = newSchema.required.filter(\n (r: string) => !keys.includes(r)\n );\n Object.defineProperty(newSchema, 'required', {\n value: filteredRequired,\n writable: true,\n configurable: true,\n });\n }\n return newSchema;\n}\n\n// New helper: pick\n// Returns an object composed of the picked object properties.\nfunction pick<T extends object, K extends keyof T>(\n obj: T,\n keys: K[]\n): Pick<T, K> {\n const result = {} as Pick<T, K>;\n for (const key of keys) {\n if (key in obj) {\n result[key] = obj[key];\n }\n }\n return result;\n}\n"]}
1
+ {"version":3,"sources":["../dsp/modelinfo.ts","../util/crypto.ts","../util/sse.ts","../util/stream.ts","../util/apicall.ts","../dsp/loggers.ts","../trace/trace.ts","../ai/base.ts","../ai/huggingface/api.ts","../ai/mistral/types.ts","../ai/openai/responses_api.ts","../ai/reka/info.ts","../db/weaviate.ts","../docs/manager.ts","../mem/memory.ts","../dsp/jsonschema.ts","../dsp/functions.ts","../dsp/util.ts","../dsp/extract.ts","../dsp/processResponse.ts","../dsp/parser.ts","../dsp/prompt.ts","../dsp/samples.ts","../dsp/classifier.ts","../dsp/stopwords.ts","../dsp/evaluate.ts","../dsp/optimizerLogging.ts","../dsp/optimizer.ts","../dsp/optimizers/miproV2.ts","../mcp/httpTransport.ts","../prompts/agent.ts"],"names":["getModelInfo","model","modelInfo","models","modelEntry","v","mappedModel","exactMatch","normalizedName","normalizedMatch","webCrypto","randomUUID","sha256","data","encoder","inputData","hashBuffer","b","Hash","chunk","encoding","hash","i","char","createHash","algorithm","SSEParser","options","controller","error","rawData","lines","line","colonIndex","field","value","retryValue","parsedData","e","TextDecodeTransformer","text","TextDecoderStreamPolyfill","defaultRetryConfig","defaultTimeoutMs","textDecoderStream","AxAIServiceError","message","url","requestBody","responseBody","context","resultItems"],"mappings":"AAQO,o5CAASA,CAAAA,CAAuD,CACrE,KAAA,CAAAC,CAAAA,CACA,SAAA,CAAAC,CAAAA,CACA,MAAA,CAAAC,CACF,CAAA,CAEiC,CAE/B,IAAMC,CAAAA,iBAAaD,CAAAA,6BAAQ,IAAA,mBAAME,CAAAA,EAAMA,CAAAA,CAAE,GAAA,GAAQJ,CAAK,GAAA,CAChDK,CAAAA,CACJF,CAAAA,EAAc,OAAA,GAAWA,CAAAA,CACpBA,CAAAA,CAAW,KAAA,CACXH,CAAAA,CAGDM,CAAAA,CAAaL,CAAAA,CAAU,IAAA,CAAMG,CAAAA,EAAMA,CAAAA,CAAE,IAAA,GAASJ,CAAK,CAAA,CACzD,EAAA,CAAIM,CAAAA,CAAY,OAAOA,CAAAA,CAGvB,IAAMC,CAAAA,CAAiBF,CAAAA,CAEpB,OAAA,CAAQ,yBAAA,CAA2B,EAAE,CAAA,CAErC,OAAA,CAAQ,UAAA,CAAY,EAAE,CAAA,CACtB,OAAA,CAAQ,SAAA,CAAW,EAAE,CAAA,CACrB,OAAA,CAAQ,YAAA,CAAc,EAAE,CAAA,CACxB,OAAA,CAAQ,SAAA,CAAW,EAAE,CAAA,CACrB,OAAA,CAAQ,2BAAA,CAA6B,EAAE,CAAA,CACvC,OAAA,CAAQ,cAAA,CAAgB,EAAE,CAAA,CAC1B,OAAA,CAAQ,QAAA,CAAU,EAAE,CAAA,CAGjBG,CAAAA,CAAkBP,CAAAA,CAAU,IAAA,CAAMG,CAAAA,EAAMA,CAAAA,CAAE,IAAA,GAASG,CAAc,CAAA,CACvE,OAAIC,CAAAA,EAGG,IACT,CCvCA,IAAMC,EAAAA,CAAAA,CAAa,CAAA,CAAA,EAAM,CACvB,EAAA,CAAI,UAAA,CAAW,MAAA,EAAU,OAAO,UAAA,CAAW,MAAA,CAAO,UAAA,EAAe,UAAA,CAC/D,OAAO,UAAA,CAAW,MAAA,CAGpB,MAAM,IAAI,KAAA,CACR,+FACF,CACF,CAAA,CAAA,CAAG,CAAA,CAMI,SAASC,CAAAA,CAAAA,CAAqB,CACnC,OAAOD,EAAAA,CAAU,UAAA,CAAW,CAC9B,CAOA,MAAA,SAAsBE,EAAAA,CAAOC,CAAAA,CAA6C,CACxE,IAAMC,CAAAA,CAAU,IAAI,WAAA,CACdC,CAAAA,CAAY,OAAOF,CAAAA,EAAS,QAAA,CAAWC,CAAAA,CAAQ,MAAA,CAAOD,CAAI,CAAA,CAAIA,CAAAA,CAE9DG,CAAAA,CAAa,MAAMN,EAAAA,CAAU,MAAA,CAAO,MAAA,CAAO,SAAA,CAAWK,CAAS,CAAA,CAMrE,OALkB,KAAA,CAAM,IAAA,CAAK,IAAI,UAAA,CAAWC,CAAU,CAAC,CAAA,CAEpD,GAAA,CAAKC,CAAAA,EAAMA,CAAAA,CAAE,QAAA,CAAS,EAAE,CAAA,CAAE,QAAA,CAAS,CAAA,CAAG,GAAG,CAAC,CAAA,CAC1C,IAAA,CAAK,EAAE,CAGZ,CAMO,IAAMC,EAAAA,WAAN,KAAW,qEACR,IAAA,CAAO,GAAA,MAEf,CAAOC,CAAAA,CAAqB,CAC1B,OAAA,IAAA,CAAK,IAAA,EAAQA,CAAAA,CACN,IACT,CAEA,MAAA,CAAOC,CAAAA,CAAyB,CAC9B,EAAA,CAAIA,CAAAA,GAAa,KAAA,CACf,MAAM,IAAI,KAAA,CAAM,gCAAgC,CAAA,CAMlD,IAAML,CAAAA,CADU,IAAI,WAAA,CAAY,CAAA,CACN,MAAA,CAAO,IAAA,CAAK,IAAI,CAAA,CAEtCM,CAAAA,CAAO,CAAA,CACX,GAAA,CAAA,IAASC,CAAAA,CAAI,CAAA,CAAGA,CAAAA,CAAIP,CAAAA,CAAU,MAAA,CAAQO,CAAAA,EAAAA,CAAK,CACzC,IAAMC,CAAAA,CAAOR,CAAAA,CAAUO,CAAC,CAAA,CACxBD,CAAAA,CAAAA,CAAQA,CAAAA,EAAQ,CAAA,CAAA,CAAKA,CAAAA,CAAOE,CAAAA,CAC5BF,CAAAA,CAAOA,CAAAA,CAAOA,CAChB,CAGA,OAAO,IAAA,CAAK,GAAA,CAAIA,CAAI,CAAA,CAAE,QAAA,CAAS,EAAE,CAAA,CAAE,QAAA,CAAS,CAAA,CAAG,GAAG,CACpD,CAEA,MAAM,WAAA,CAAA,CAA+B,CACnC,OAAOT,EAAAA,CAAO,IAAA,CAAK,IAAI,CACzB,CACF,UAAA,CAOO,SAASY,EAAAA,CAAWC,CAAAA,CAAyB,CAClD,EAAA,CAAIA,CAAAA,GAAc,QAAA,CAChB,MAAM,IAAI,KAAA,CAAM,qCAAqC,CAAA,CAEvD,OAAO,IAAIP,EACb,CC5EO,IAAMQ,EAAAA,YAAN,MAAA,QAAqC,eAA2B,iBAC7D,MAAA,CAAS,GAAA,gBACT,YAAA,CAAkC,CAAE,OAAA,CAAS,EAAG,EAAA,WAIxD,CAAYC,CAAAA,CAA+B,CAAC,CAAA,CAAG,CAC7C,KAAA,CAAM,CACJ,SAAA,CAAW,CAACR,CAAAA,CAAOS,CAAAA,CAAAA,EAAe,IAAA,CAAK,WAAA,CAAYT,CAAAA,CAAOS,CAAU,CAAA,CACpE,KAAA,CAAQA,CAAAA,EAAe,IAAA,CAAK,WAAA,CAAYA,CAAU,CACpD,CAAC,4EAAA,CAED,IAAA,CAAK,UAAA,CAAaD,CAAAA,CAAQ,UAAA,EAAc,IAAA,CAAK,KAAA,CAC7C,IAAA,CAAK,OAAA,CACHA,CAAAA,CAAQ,OAAA,EAAA,CACP,CAACE,CAAAA,CAAOC,CAAAA,CAAAA,EAAY,CACnB,OAAA,CAAQ,IAAA,CAAK,6BAAA,CAA+BD,CAAK,CAAA,CACjD,OAAA,CAAQ,GAAA,CAAI,gCAAA,CAAkCC,CAAO,CACvD,CAAA,CACJ,CAEQ,WAAA,CACNX,CAAAA,CACAS,CAAAA,CACM,CACN,IAAA,CAAK,MAAA,EAAUT,CAAAA,CACf,IAAA,CAAK,aAAA,CAAcS,CAAU,CAC/B,CAEQ,WAAA,CAAYA,CAAAA,CAAuD,CACzE,IAAA,CAAK,aAAA,CAAcA,CAAU,CAAA,CACzB,IAAA,CAAK,YAAA,CAAa,OAAA,EACpB,IAAA,CAAK,YAAA,CAAaA,CAAU,CAEhC,CAEQ,aAAA,CAAcA,CAAAA,CAAuD,CAG3E,IAAMG,CAAAA,CADmB,IAAA,CAAK,MAAA,CAAO,OAAA,CAAQ,UAAA,CAAY,CAAA;AAAA,CAAI,CAAA,CAC9B,KAAA,CAAM,CAAA;AAAA,CAAI,CAAA,CACzC,IAAA,CAAK,MAAA,CAASA,CAAAA,CAAM,GAAA,CAAI,CAAA,EAAK,EAAA,CAE7B,GAAA,CAAA,IAAWC,EAAAA,GAAQD,CAAAA,CACbC,CAAAA,GAAS,EAAA,CACX,IAAA,CAAK,YAAA,CAAaJ,CAAU,CAAA,CAE5B,IAAA,CAAK,SAAA,CAAUI,CAAI,CAGzB,CAEQ,SAAA,CAAUA,CAAAA,CAAoB,CACpC,EAAA,CAAIA,CAAAA,CAAK,UAAA,CAAW,GAAG,CAAA,CACrB,MAAA,CAGF,IAAMC,CAAAA,CAAaD,CAAAA,CAAK,OAAA,CAAQ,GAAG,CAAA,CACnC,EAAA,CAAIC,CAAAA,GAAe,CAAA,CAAA,CAAI,CACrB,IAAA,CAAK,YAAA,CAAa,OAAA,EAAA,CACf,IAAA,CAAK,YAAA,CAAa,OAAA,EAAW,CAAC,IAAA,CAAK,YAAA,CAAa,OAAA,CAAQ,QAAA,CAAS,CAAA;AAAA,CAAI,CAAA,CAClE,CAAA;AAAA,CAAA,CACA,EAAA,CAAA,CAAMD,CAAAA,CAAK,IAAA,CAAK,CAAA,CACtB,MACF,CAEA,IAAME,CAAAA,CAAQF,CAAAA,CAAK,KAAA,CAAM,CAAA,CAAGC,CAAU,CAAA,CAAE,IAAA,CAAK,CAAA,CACvCE,CAAAA,CAAQH,CAAAA,CAAK,KAAA,CAAMC,CAAAA,CAAa,CAAC,CAAA,CAAE,IAAA,CAAK,CAAA,CAE9C,MAAA,CAAQC,CAAAA,CAAO,CACb,IAAK,OAAA,CACH,IAAA,CAAK,YAAA,CAAa,KAAA,CAAQC,CAAAA,CAC1B,KAAA,CACF,IAAK,MAAA,CACH,IAAA,CAAK,YAAA,CAAa,OAAA,EAAA,CACf,IAAA,CAAK,YAAA,CAAa,OAAA,EACnB,CAAC,IAAA,CAAK,YAAA,CAAa,OAAA,CAAQ,QAAA,CAAS,CAAA;AAAA,CAAI,CAAA,CACpC,CAAA;AAAA,CAAA,CACA,EAAA,CAAA,CAAMA,CAAAA,CACZ,KAAA,CACF,IAAK,IAAA,CACH,IAAA,CAAK,YAAA,CAAa,EAAA,CAAKA,CAAAA,CACvB,KAAA,CACF,IAAK,OAAA,CAAS,CACZ,IAAMC,CAAAA,CAAa,MAAA,CAAO,QAAA,CAASD,CAAAA,CAAO,EAAE,CAAA,CACvC,MAAA,CAAO,KAAA,CAAMC,CAAU,CAAA,EAAA,CAC1B,IAAA,CAAK,YAAA,CAAa,KAAA,CAAQA,CAAAA,CAAAA,CAE5B,KACF,CACF,CACF,CAEQ,YAAA,CAAaR,CAAAA,CAAuD,CAC1E,EAAA,CAAI,IAAA,CAAK,YAAA,CAAa,OAAA,CAAS,CAK7B,EAAA,CAJK,IAAA,CAAK,YAAA,CAAa,KAAA,EAAA,CACrB,IAAA,CAAK,YAAA,CAAa,KAAA,CAAQ,SAAA,CAAA,CAGxB,IAAA,CAAK,YAAA,CAAa,OAAA,CAAQ,IAAA,CAAK,CAAA,GAAM,QAAA,CAAU,CAIjD,IAAA,CAAK,YAAA,CAAe,CAAE,OAAA,CAAS,EAAG,CAAA,CAClC,MACF,CAEA,GAAI,CACF,IAAMS,CAAAA,CAAgB,IAAA,CAAK,UAAA,CAAW,IAAA,CAAK,YAAA,CAAa,OAAO,CAAA,CAC/DT,CAAAA,CAAW,OAAA,CAAQS,CAAU,CAC/B,CAAA,KAAA,CAASC,CAAAA,CAAG,CACV,IAAA,CAAK,OAAA,CAAQA,CAAAA,CAAY,IAAA,CAAK,YAAA,CAAa,OAAO,CACpD,CAEA,IAAA,CAAK,YAAA,CAAe,CAAE,OAAA,CAAS,EAAG,CACpC,CACF,CACF,WAAA,CC1HA,IAAMC,EAAAA,CAAN,KAEA,CACU,WAER,CAAA,CAAc,CACZ,IAAA,CAAK,OAAA,CAAU,IAAI,WACrB,CAEA,SAAA,CACEpB,CAAAA,CACAS,CAAAA,CACA,CACA,EAAA,CAAI,CAAA,CAAET,EAAAA,WAAiB,WAAA,EAAe,WAAA,CAAY,MAAA,CAAOA,CAAK,CAAA,CAAA,CAC5D,MAAM,IAAI,SAAA,CAAU,mCAAmC,CAAA,CAEzD,IAAMqB,CAAAA,CAAO,IAAA,CAAK,OAAA,CAAQ,MAAA,CAAOrB,CAAAA,CAAO,CAAE,MAAA,CAAQ,CAAA,CAAK,CAAC,CAAA,CACpDqB,CAAAA,CAAK,MAAA,GAAW,CAAA,EAClBZ,CAAAA,CAAW,OAAA,CAAQY,CAAI,CAE3B,CAEA,KAAA,CAAMZ,CAAAA,CAAsD,CAC1D,IAAMY,CAAAA,CAAO,IAAA,CAAK,OAAA,CAAQ,MAAA,CAAO,CAAA,CAC7BA,CAAAA,CAAK,MAAA,GAAW,CAAA,EAClBZ,CAAAA,CAAW,OAAA,CAAQY,CAAI,CAE3B,CACF,CAAA,CAEaC,EAAAA,CAAN,MAAA,QAAwC,eAG7C,CACA,WAAA,CAAA,CAAc,CACZ,KAAA,CAAM,IAAIF,EAAuB,CACnC,CACF,CAAA,CCaO,IAAMG,EAAAA,CAAkC,CAC7C,UAAA,CAAY,CAAA,CACZ,cAAA,CAAgB,GAAA,CAChB,UAAA,CAAY,GAAA,CACZ,aAAA,CAAe,CAAA,CACf,oBAAA,CAAsB,CAAC,GAAA,CAAK,GAAA,CAAK,GAAA,CAAK,GAAA,CAAK,GAAA,CAAK,GAAG,CACrD,CAAA,CAEMC,EAAAA,CAAmB,GAAA,CACnBC,EAAAA,kBACH,UAAA,CAAmB,iBAAA,SAAqBH,IAAAA,CAG9BI,CAAAA,4BAAN,MAAA,QAA+B,KAAM,CAK1C,WAAA,CACEC,CAAAA,CACgBC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,CAChBC,CAAAA,CAAmC,CAAC,CAAA,CACpC,CACA,KAAA,CAAMJ,CAAO,CAAA,CALG,IAAA,CAAA,GAAA,CAAAC,CAAAA,CACA,IAAA,CAAA,WAAA,CAAAC,CAAAA,CACA,IAAA,CAAA,YAAA,CAAAC,CAAAA,CAIhB,IAAA,CAAK,IAAA,CAAO,IAAA,CAAK,WAAA,CAAY,IAAA,CAC7B,IAAA,CAAK,SAAA,CAAY,IAAI,IAAA,CAAK,CAAA,CAAE,WAAA,CAAY,CAAA,CACxC,IAAA,CAAK,OAAA,CAAUtC,CAAAA,CAAW,CAAA,CAC1B,IAAA,CAAK,OAAA,CAAUuC,CAAAA,CAEf,IAAA,CAAK,KAAA,CAAQ,IAAA,CAAK,QAAA,CAAS,CAC7B,CAlBgB,QAoBP,CAAA,CAAmB,CAC1B,MAAO,CACL,CAAA,EAAA;AA2JU;AAmJR;AC7WiD;AAEE;AAkB1B;AAQL;AAEsB;AAGW;AAWvC;AAGJ;AAWkC;AAgBD;AAAY;AAAoE;AAAY;AAI/G;AAAY;AAEhB;AAGD;AAAsC;AAAY;AAGnC;AAGV,QAAA;AAAY;AAIjB;AAAuC;AAAY;AAI9C;AAAY;AAahC;AAKe;AAA2D;AAAY;AAA2C;AAGlH;AAA6D;AAAY;AAA2C;AAGpH;AAA4D;AAAY;AAA2C;AAG7E;AAAY;AAGJ;AAAY;AAGD;AAAY;AAMhE;AAAY;AAI2D;AAAY;AAMnF;AAAY;AAwBO;AAAY;AAA8C;AAAc;AAI3F;AAAY;AAEhB;AAGD;AAAA;AAAkC;AAED;AAE5B,QAAA;AAAY;AAIjB;AAAA;AAA+B;AAI1B;AAAY;AAahC;AAKe;AAAsC,kBAAA;AAAc;AAAiC;AAGrF,OAAA;AAAwC,oBAAA;AAAc;AAAiC;AAGvF,OAAA;AAAuC,mBAAA;AAAc;AAAiC;AAGtF,OAAA;AAA6B;AAGC,gBAAA;AAAc;AAGL;AAAc;AAIhD;AAAY;AAI6B;AAA0B;AAInE;AAAY;ACpO1C;AC48C8C;AC95Ca;AAsBzC;AAE0B;AAAA;AAUlC;AC9GW;AC0nBW;AAsfxB;ACzmCgB;ACqIiB,qBAAA;AAG5B,SAAA;AACmB,wBAAA;AAenB,SAAA;AAAA;AAEQ,YAAA;AACe,qBAAA;AAChB,cAAA;AAAA;AAEc,gBAAA;AAAK;AAAA;AAAA;AClHrB,SAAA;AAAA;AA+GHC;AA4Ba;AAOA;AAAA;AAkBuB;AAAA;AAKtB;AAAA;ACxHhB;AClDS;AA0Jb;ACvLA;AAgDuF;AAAwB;AAW/G;AAgKJ;AChGmB;AA0NV;ACrJS;AC6IjB,SAAA;AC9RkB,SAAA;AAyPX;AChTkB;AAAA;AAAA;AAMT,yDAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA6CZ,yBAAA;AAGI;AAIF;AAGA;AAgBQ;AAAA;AAkBiB;AACS;AAmBP;AAAA;AAAA;AA6HvB;AAsD+B;AAAA;AAKrB;AA0CA;AAmBF;AAgIhB;AAWsC;AA2B7B;AAgCA;ACpjBG;ACyBN,4GAAA;AC2Gd;ACpFI,aAAA;AC/CJ;AAUyE;AAClD;AACiD,EAAA;AACuE,EAAA;AACU,EAAA;AAClI;AAK2E;AACsE,EAAA;AAKhK;AAAoD;AACrC;AACkD,EAAA;AACT,EAAA;AACmB,EAAA;AAC5D;AAKf;AAA6D;AAC9C;AACgE,EAAA;AAC4F,EAAA;AAChF,EAAA;AACmE,EAAA;AAC/I;AAK0F;AAUxG,EAAA;AAK2F;AACxB,EAAA;AAKN;AACQ,EAAA;AAC0E,EAAA;AAK/F;AACiD,EAAA;AAqBjD;AAClD;AAEsD,QAAA;AAE0B,UAAA;AAM1B;AAEyC,eAAA;AAE3C,QAAA;AAMN;AAEjB,QAAA;AAEoB,aAAA;AAMhD;AACD;AAE+C,YAAA;AAEc,aAAA;AAET,OAAA;AAMd;AAEX,OAAA;AAEyG,WAAA;AAMnI;AAE0B,OAAA;AAE6F,OAAA;AAMvH;AAEqC,OAAA;AAEO,QAAA;AAK3B;AACL;ACkLd;AC/MkB;AAAA;AAGI,mBAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA2CJ,QAAA;AAAA;AAAA;AAAA;AAFd;AAME;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA4CO,gBAAA;AAIA,iBAAA;AAIA,iBAAA;AAIS,yCAAA;AAAA;AAAA;AAGjB;AAAA;AAEa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA0IP,YAAA;ACxDkB;ACpR9B;AAgWP","file":"/home/runner/work/ax/ax/src/ax/dist/index.cjs","sourcesContent":["import type { AxAIInputModelList, AxModelInfo } from '../ai/types.js';\n\ninterface GetModelInfoParams<TModel = string, TEmbedModel = undefined> {\n model: TModel;\n modelInfo: readonly AxModelInfo[];\n models?: AxAIInputModelList<TModel, TEmbedModel>;\n}\n\nexport function getModelInfo<TModel = string, TEmbedModel = undefined>({\n model,\n modelInfo,\n models,\n}: Readonly<\n GetModelInfoParams<TModel, TEmbedModel>\n>): Readonly<AxModelInfo> | null {\n // First check if there's a mapping for this model\n const modelEntry = models?.find((v) => v.key === model);\n const mappedModel =\n modelEntry && 'model' in modelEntry\n ? (modelEntry.model as string)\n : (model as string);\n\n // Try exact match first\n const exactMatch = modelInfo.find((v) => v.name === model);\n if (exactMatch) return exactMatch;\n\n // Handle normalization if no exact match\n const normalizedName = mappedModel\n // Remove vendor prefixes\n .replace(/^(anthropic\\.|openai\\.)/, '')\n // Remove various postfixes one by one, stopping after first match\n .replace(/-latest$/, '')\n .replace(/-\\d{8}$/, '') // YYYYMMDD\n .replace(/-v\\d+:\\d+$/, '') // v2:0\n .replace(/@\\d{8}$/, '') // @YYYYMMDD\n .replace(/-\\d{2,}(-[a-zA-Z0-9-]+)?$/, '') // XX or XXXXX-something\n .replace(/-v\\d+@\\d{8}$/, '') // vX@YYYYMMDD\n .replace(/-v\\d+$/, ''); // Remove standalone version number\n\n // Try to find a match with the normalized name\n const normalizedMatch = modelInfo.find((v) => v.name === normalizedName);\n if (normalizedMatch) return normalizedMatch;\n\n // Return default if no match found\n return null;\n}\n","/**\n * Cross-platform crypto utilities that work in both Node.js and browser environments\n * using Web Crypto API standards\n */\n\n// Web Crypto API is available in both modern Node.js (16+) and browsers via globalThis.crypto\nconst webCrypto = (() => {\n if (globalThis.crypto && typeof globalThis.crypto.randomUUID === 'function') {\n return globalThis.crypto;\n }\n\n throw new Error(\n 'Web Crypto API with randomUUID support not available. Requires Node.js 16+ or modern browser.'\n );\n})();\n\n/**\n * Generate a random UUID using Web Crypto API\n * @returns A random UUID string\n */\nexport function randomUUID(): string {\n return webCrypto.randomUUID();\n}\n\n/**\n * Create a SHA-256 hash of the input data\n * @param data - The data to hash (string or ArrayBuffer)\n * @returns A promise that resolves to the hex-encoded hash\n */\nexport async function sha256(data: string | ArrayBuffer): Promise<string> {\n const encoder = new TextEncoder();\n const inputData = typeof data === 'string' ? encoder.encode(data) : data;\n\n const hashBuffer = await webCrypto.subtle.digest('SHA-256', inputData);\n const hashArray = Array.from(new Uint8Array(hashBuffer));\n const hashHex = hashArray\n .map((b) => b.toString(16).padStart(2, '0'))\n .join('');\n\n return hashHex;\n}\n\n/**\n * Create a hash instance that can be updated incrementally (similar to Node.js createHash)\n * Note: This is a synchronous wrapper around async Web Crypto API - uses simplified hash for compatibility\n */\nexport class Hash {\n private data = '';\n\n update(chunk: string): this {\n this.data += chunk;\n return this;\n }\n\n digest(encoding: 'hex'): string {\n if (encoding !== 'hex') {\n throw new Error('Only hex encoding is supported');\n }\n\n // For browser compatibility, we use a simple hash function\n // This maintains API compatibility but is not cryptographically secure\n const encoder = new TextEncoder();\n const inputData = encoder.encode(this.data);\n\n let hash = 0;\n for (let i = 0; i < inputData.length; i++) {\n const char = inputData[i]!;\n hash = (hash << 5) - hash + char;\n hash = hash & hash; // Convert to 32-bit integer\n }\n\n // Convert to hex string\n return Math.abs(hash).toString(16).padStart(8, '0');\n }\n\n async digestAsync(): Promise<string> {\n return sha256(this.data);\n }\n}\n\n/**\n * Create a hash instance (compatibility function)\n * @param algorithm - The hash algorithm (only 'sha256' supported)\n * @returns A Hash instance\n */\nexport function createHash(algorithm: string): Hash {\n if (algorithm !== 'sha256') {\n throw new Error('Only SHA-256 algorithm is supported');\n }\n return new Hash();\n}\n\n/**\n * Get the crypto object for use in JavaScript interpreter contexts\n * @returns The Web Crypto API object\n */\nexport function getCrypto() {\n return webCrypto;\n}\n","// Web Streams API types are now available globally via DOM types in tsconfig\n\ninterface CurrentEventState {\n event?: string;\n rawData: string;\n id?: string;\n retry?: number;\n}\n\ninterface SSEParserOptions<T> {\n dataParser?: (data: string) => T;\n onError?: (error: Error, rawData: string) => void;\n}\n\nexport class SSEParser<T = unknown> extends TransformStream<string, T> {\n private buffer = '';\n private currentEvent: CurrentEventState = { rawData: '' };\n private dataParser: (data: string) => T;\n private onError: (error: Error, rawData: string) => void;\n\n constructor(options: SSEParserOptions<T> = {}) {\n super({\n transform: (chunk, controller) => this.handleChunk(chunk, controller),\n flush: (controller) => this.handleFlush(controller),\n });\n\n this.dataParser = options.dataParser || JSON.parse;\n this.onError =\n options.onError ||\n ((error, rawData) => {\n console.warn('Failed to parse event data:', error);\n console.log('Raw data that failed to parse:', rawData);\n });\n }\n\n private handleChunk(\n chunk: string,\n controller: TransformStreamDefaultController<T>\n ): void {\n this.buffer += chunk;\n this.processBuffer(controller);\n }\n\n private handleFlush(controller: TransformStreamDefaultController<T>): void {\n this.processBuffer(controller);\n if (this.currentEvent.rawData) {\n this.processEvent(controller);\n }\n }\n\n private processBuffer(controller: TransformStreamDefaultController<T>): void {\n // Normalize newlines to \\n\n const normalizedBuffer = this.buffer.replace(/\\r\\n|\\r/g, '\\n');\n const lines = normalizedBuffer.split('\\n');\n this.buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line === '') {\n this.processEvent(controller);\n } else {\n this.parseLine(line);\n }\n }\n }\n\n private parseLine(line: string): void {\n if (line.startsWith(':')) {\n return; // Ignore comment lines\n }\n\n const colonIndex = line.indexOf(':');\n if (colonIndex === -1) {\n this.currentEvent.rawData +=\n (this.currentEvent.rawData && !this.currentEvent.rawData.endsWith('\\n')\n ? '\\n'\n : '') + line.trim();\n return;\n }\n\n const field = line.slice(0, colonIndex).trim();\n const value = line.slice(colonIndex + 1).trim();\n\n switch (field) {\n case 'event':\n this.currentEvent.event = value;\n break;\n case 'data':\n this.currentEvent.rawData +=\n (this.currentEvent.rawData &&\n !this.currentEvent.rawData.endsWith('\\n')\n ? '\\n'\n : '') + value;\n break;\n case 'id':\n this.currentEvent.id = value;\n break;\n case 'retry': {\n const retryValue = Number.parseInt(value, 10);\n if (!Number.isNaN(retryValue)) {\n this.currentEvent.retry = retryValue;\n }\n break;\n }\n }\n }\n\n private processEvent(controller: TransformStreamDefaultController<T>): void {\n if (this.currentEvent.rawData) {\n if (!this.currentEvent.event) {\n this.currentEvent.event = 'message';\n }\n\n if (this.currentEvent.rawData.trim() === '[DONE]') {\n // maybe we want to emit [DONE] to signal the end of the stream\n // controller.enqueue('[DONE]' as any)\n // Reset the current event\n this.currentEvent = { rawData: '' };\n return;\n }\n\n try {\n const parsedData: T = this.dataParser(this.currentEvent.rawData);\n controller.enqueue(parsedData);\n } catch (e) {\n this.onError(e as Error, this.currentEvent.rawData);\n }\n\n this.currentEvent = { rawData: '' };\n }\n }\n}\n","// Web Streams API types are now available globally via DOM types in tsconfig\n\nexport interface TextDecoderCommon {\n readonly encoding: string;\n readonly fatal: boolean;\n readonly ignoreBOM: boolean;\n}\n\nclass TextDecodeTransformer\n implements Transformer<ArrayBuffer | Uint8Array, string>\n{\n private decoder;\n\n constructor() {\n this.decoder = new TextDecoder();\n }\n\n transform(\n chunk: ArrayBuffer | Uint8Array,\n controller: TransformStreamDefaultController<string>\n ) {\n if (!(chunk instanceof ArrayBuffer || ArrayBuffer.isView(chunk))) {\n throw new TypeError('Input data must be a BufferSource');\n }\n const text = this.decoder.decode(chunk, { stream: true });\n if (text.length !== 0) {\n controller.enqueue(text);\n }\n }\n\n flush(controller: TransformStreamDefaultController<string>) {\n const text = this.decoder.decode();\n if (text.length !== 0) {\n controller.enqueue(text);\n }\n }\n}\n\nexport class TextDecoderStreamPolyfill extends TransformStream<\n ArrayBuffer | Uint8Array,\n string\n> {\n constructor() {\n super(new TextDecodeTransformer());\n }\n}\n","// Web Streams API types are now available globally via DOM types in tsconfig\nimport type { Span } from '@opentelemetry/api';\nimport { randomUUID } from './crypto.js';\n\nimport { SSEParser } from './sse.js';\nimport { TextDecoderStreamPolyfill } from './stream.js';\n\n// Configuration Types\nexport interface RetryConfig {\n maxRetries: number;\n initialDelayMs: number;\n maxDelayMs: number;\n backoffFactor: number;\n retryableStatusCodes: number[];\n}\n\nexport interface RequestMetrics {\n startTime: number;\n retryCount: number;\n lastRetryTime?: number;\n streamChunks?: number;\n lastChunkTime?: number;\n streamDuration?: number;\n errorTime?: number;\n}\n\n// Validation Interfaces\ninterface RequestValidation {\n validateRequest?: (request: unknown) => boolean | Promise<boolean>;\n}\n\ninterface ResponseValidation {\n validateResponse?: (response: unknown) => boolean | Promise<boolean>;\n}\n\n// API Base Types\nexport interface AxAPI {\n name?: string;\n headers?: Record<string, string>;\n put?: boolean;\n}\n\n// Enhanced API Configuration\nexport interface AxAPIConfig\n extends AxAPI,\n RequestValidation,\n ResponseValidation {\n url: string | URL;\n stream?: boolean;\n debug?: boolean;\n fetch?: typeof fetch;\n span?: Span;\n timeout?: number;\n retry?: Partial<RetryConfig>;\n abortSignal?: AbortSignal;\n}\n\n// Default Configurations\nexport const defaultRetryConfig: RetryConfig = {\n maxRetries: 3,\n initialDelayMs: 1000,\n maxDelayMs: 60000,\n backoffFactor: 2,\n retryableStatusCodes: [500, 408, 429, 502, 503, 504],\n};\n\nconst defaultTimeoutMs = 30000;\nconst textDecoderStream =\n (globalThis as any).TextDecoderStream ?? TextDecoderStreamPolyfill;\n\n// Error Classes\nexport class AxAIServiceError extends Error {\n public readonly timestamp: string;\n public readonly errorId: string;\n public readonly context: Record<string, unknown>;\n\n constructor(\n message: string,\n public readonly url: string,\n public readonly requestBody: unknown,\n public readonly responseBody: unknown,\n context: Record<string, unknown> = {}\n ) {\n super(message);\n this.name = this.constructor.name;\n this.timestamp = new Date().toISOString();\n this.errorId = randomUUID();\n this.context = context;\n\n this.stack = this.toString();\n }\n\n override toString(): string {\n return [\n `${this.name}: ${this.message}`,\n `URL: ${this.url}`,\n `Request Body: ${JSON.stringify(this.requestBody, null, 2)}`,\n `Response Body: ${JSON.stringify(this.responseBody, null, 2)}`,\n `Context: ${JSON.stringify(this.context, null, 2)}`,\n `Timestamp: ${this.timestamp}`,\n `Error ID: ${this.errorId}`,\n ].join('\\n');\n }\n\n // For Node.js, override the custom inspect method so console.log shows our custom string.\n [Symbol.for('nodejs.util.inspect.custom')](\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _depth: number,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: Record<string, unknown>\n ) {\n return this.toString();\n }\n}\n\nexport class AxAIServiceStatusError extends AxAIServiceError {\n constructor(\n public readonly status: number,\n public readonly statusText: string,\n url: string,\n requestBody: unknown,\n responseBody: unknown,\n context?: Record<string, unknown>\n ) {\n super(`HTTP ${status} - ${statusText}`, url, requestBody, {\n httpStatus: status,\n httpStatusText: statusText,\n responseBody,\n ...context,\n });\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceNetworkError extends AxAIServiceError {\n constructor(\n public readonly originalError: Error,\n url: string,\n requestBody: unknown,\n responseBody: unknown,\n context?: Record<string, unknown>\n ) {\n super(\n `Network Error: ${originalError.message}`,\n url,\n requestBody,\n responseBody,\n {\n originalErrorName: originalError.name,\n originalErrorStack: originalError.stack,\n ...context,\n }\n );\n this.name = this.constructor.name;\n this.stack = originalError.stack;\n }\n}\n\nexport class AxAIServiceResponseError extends AxAIServiceError {\n constructor(\n message: string,\n url: string,\n requestBody?: unknown,\n context?: Record<string, unknown>\n ) {\n super(message, url, requestBody, undefined, context);\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceStreamTerminatedError extends AxAIServiceError {\n constructor(\n url: string,\n requestBody?: unknown,\n public readonly lastChunk?: unknown,\n context?: Record<string, unknown>\n ) {\n super(\n 'Stream terminated unexpectedly by remote host',\n url,\n requestBody,\n undefined,\n {\n lastChunk,\n ...context,\n }\n );\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceTimeoutError extends AxAIServiceError {\n constructor(\n url: string,\n timeoutMs: number,\n requestBody?: unknown,\n context?: Record<string, unknown>\n ) {\n super(\n `Request timed out after ${timeoutMs}ms`,\n url,\n requestBody,\n undefined,\n { timeoutMs, ...context }\n );\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceAbortedError extends AxAIServiceError {\n constructor(\n url: string,\n reason?: string,\n requestBody?: unknown,\n context?: Record<string, unknown>\n ) {\n super(\n `Request aborted${reason ? `: ${reason}` : ''}`,\n url,\n requestBody,\n undefined,\n { abortReason: reason, ...context }\n );\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIServiceAuthenticationError extends AxAIServiceError {\n constructor(\n url: string,\n requestBody: unknown,\n responseBody: unknown,\n context?: Record<string, unknown>\n ) {\n super('Authentication failed', url, requestBody, responseBody, context);\n this.name = this.constructor.name;\n }\n}\n\nexport class AxAIRefusalError extends Error {\n public readonly timestamp: string;\n public readonly errorId: string;\n\n constructor(\n public readonly refusalMessage: string,\n public readonly model?: string,\n public readonly requestId?: string\n ) {\n super(`Model refused to fulfill request: ${refusalMessage}`);\n this.name = 'AxAIRefusalError';\n this.timestamp = new Date().toISOString();\n this.errorId = randomUUID();\n }\n\n override toString(): string {\n return [\n `${this.name}: ${this.message}`,\n `Refusal: ${this.refusalMessage}`,\n this.model ? `Model: ${this.model}` : '',\n this.requestId ? `Request ID: ${this.requestId}` : '',\n `Timestamp: ${this.timestamp}`,\n `Error ID: ${this.errorId}`,\n ]\n .filter(Boolean)\n .join('\\n');\n }\n\n // For Node.js, override the custom inspect method so console.log shows our custom string.\n [Symbol.for('nodejs.util.inspect.custom')](\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _depth: number,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: Record<string, unknown>\n ) {\n return this.toString();\n }\n}\n\n// Utility Functions\nasync function safeReadResponseBody(response: Response): Promise<unknown> {\n try {\n if (response.headers.get('content-type')?.includes('application/json')) {\n return await response.json();\n }\n\n // Clone the response so we can read it without consuming the original\n const clonedResponse = response.clone();\n return await clonedResponse.text();\n } catch (e) {\n // If we can't read the body, return a descriptive message\n return `[ReadableStream - read failed: ${(e as Error).message}]`;\n }\n}\n\nfunction calculateRetryDelay(\n attempt: number,\n config: Readonly<RetryConfig>\n): number {\n const delay = Math.min(\n config.maxDelayMs,\n config.initialDelayMs * config.backoffFactor ** attempt\n );\n return delay * (0.75 + Math.random() * 0.5);\n}\n\nfunction createRequestMetrics(): RequestMetrics {\n return {\n startTime: Date.now(),\n retryCount: 0,\n };\n}\n\n// eslint-disable-next-line functional/prefer-immutable-types\nfunction updateRetryMetrics(metrics: RequestMetrics): void {\n metrics.retryCount++;\n metrics.lastRetryTime = Date.now();\n}\n\nfunction shouldRetry(\n error: Error,\n status: number | undefined,\n attempt: number,\n config: Readonly<RetryConfig>\n): boolean {\n if (attempt >= config.maxRetries) return false;\n if (status && config.retryableStatusCodes.includes(status)) return true;\n\n return (\n error instanceof AxAIServiceNetworkError &&\n !(error instanceof AxAIServiceAuthenticationError)\n );\n}\n\n// Enhanced API Call Function\nexport const apiCall = async <TRequest = unknown, TResponse = unknown>(\n api: Readonly<AxAPIConfig>,\n json: TRequest\n): Promise<TResponse | ReadableStream<TResponse>> => {\n const retryConfig: RetryConfig = { ...defaultRetryConfig, ...api.retry };\n const timeoutMs = api.timeout ?? defaultTimeoutMs;\n const metrics = createRequestMetrics();\n let timeoutId: NodeJS.Timeout;\n\n const baseUrl = new URL(process.env.PROXY ?? api.url);\n const apiPath = `${[baseUrl.pathname, api.name]\n .filter(Boolean)\n .join('/')\n .replace(/\\/+/g, '/')}${baseUrl.search}`;\n const apiUrl = new URL(apiPath, baseUrl);\n\n const requestId = randomUUID();\n\n // Validate request if validator is provided\n if (api.validateRequest) {\n const isValid = await api.validateRequest(json);\n if (!isValid) {\n throw new AxAIServiceResponseError(\n 'Invalid request data',\n apiUrl.href,\n json,\n { validation: 'request' }\n );\n }\n }\n\n // Set up telemetry\n api.span?.setAttributes({\n 'http.request.method': api.put ? 'PUT' : 'POST',\n 'url.full': apiUrl.href,\n 'request.id': requestId,\n 'request.startTime': metrics.startTime,\n });\n\n let attempt = 0;\n\n while (true) {\n // Combine user abort signal with timeout signal\n const combinedAbortController = new AbortController();\n\n // Handle user abort signal\n if (api.abortSignal) {\n if (api.abortSignal.aborted) {\n throw new AxAIServiceAbortedError(\n apiUrl.href,\n api.abortSignal.reason,\n json,\n { metrics }\n );\n }\n\n const userAbortHandler = () => {\n combinedAbortController.abort(\n api.abortSignal!.reason || 'User aborted request'\n );\n };\n api.abortSignal.addEventListener('abort', userAbortHandler, {\n once: true,\n });\n\n // Clean up listener if we complete before abort\n const originalAbort = combinedAbortController.abort.bind(\n combinedAbortController\n );\n combinedAbortController.abort = (reason?: string) => {\n api.abortSignal!.removeEventListener('abort', userAbortHandler);\n originalAbort(reason);\n };\n }\n\n timeoutId = setTimeout(() => {\n combinedAbortController.abort('Request timeout');\n }, timeoutMs);\n\n try {\n // Set up timeout with proper cleanup\n\n const res = await (api.fetch ?? fetch)(apiUrl, {\n method: api.put ? 'PUT' : 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'X-Request-ID': requestId,\n 'X-Retry-Count': attempt.toString(),\n ...api.headers,\n },\n body: JSON.stringify(json),\n signal: combinedAbortController.signal,\n });\n\n clearTimeout(timeoutId);\n\n // Handle authentication errors\n if (res.status === 401 || res.status === 403) {\n const responseBody = await safeReadResponseBody(res);\n throw new AxAIServiceAuthenticationError(\n apiUrl.href,\n json,\n responseBody,\n {\n metrics,\n }\n );\n }\n\n // Handle retryable status codes\n if (\n res.status >= 400 &&\n shouldRetry(new Error(), res.status, attempt, retryConfig)\n ) {\n const delay = calculateRetryDelay(attempt, retryConfig);\n attempt++;\n updateRetryMetrics(metrics);\n\n api.span?.addEvent('retry', {\n attempt,\n delay,\n status: res.status,\n 'metrics.startTime': metrics.startTime,\n 'metrics.retryCount': metrics.retryCount,\n 'metrics.lastRetryTime': metrics.lastRetryTime,\n });\n\n await new Promise((resolve) => setTimeout(resolve, delay));\n continue;\n }\n\n if (res.status >= 400) {\n const responseBody = await safeReadResponseBody(res);\n throw new AxAIServiceStatusError(\n res.status,\n res.statusText,\n apiUrl.href,\n json,\n responseBody,\n { metrics }\n );\n }\n\n // Handle non-streaming response\n if (!api.stream) {\n const resJson = await res.json();\n\n // Validate response if validator is provided\n if (api.validateResponse) {\n const isValid = await api.validateResponse(resJson);\n if (!isValid) {\n throw new AxAIServiceResponseError(\n 'Invalid response data',\n apiUrl.href,\n json,\n { validation: 'response' }\n );\n }\n }\n\n api.span?.setAttributes({\n 'response.time': Date.now() - metrics.startTime,\n 'response.retries': metrics.retryCount,\n });\n\n return resJson as TResponse;\n }\n\n // Handle streaming response\n if (!res.body) {\n throw new AxAIServiceResponseError(\n 'Response body is null',\n apiUrl.href,\n json,\n { metrics }\n );\n }\n\n let lastChunk: TResponse | undefined;\n let chunkCount = 0;\n\n // Enhanced tracking stream\n const trackingStream = new TransformStream<TResponse, TResponse>({\n transform(chunk, controller) {\n lastChunk = chunk;\n chunkCount++;\n metrics.streamChunks = chunkCount;\n metrics.lastChunkTime = Date.now();\n controller.enqueue(chunk);\n\n api.span?.addEvent('stream.chunk', {\n 'stream.chunks': chunkCount,\n 'stream.duration': Date.now() - metrics.startTime,\n 'response.retries': metrics.retryCount,\n });\n },\n });\n\n // Flag to track if the controller is closed.\n let closed = false;\n\n // Enhanced wrapped stream\n return new ReadableStream<TResponse>({\n start(controller) {\n const reader = res\n .body!.pipeThrough(new textDecoderStream())\n .pipeThrough(new SSEParser<TResponse>())\n .pipeThrough(trackingStream)\n .getReader();\n\n async function read() {\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n if (!closed) {\n closed = true;\n controller.close();\n }\n break;\n }\n\n // Check if the controller is already closed before enqueuing.\n if (closed) break;\n controller.enqueue(value);\n }\n } catch (e) {\n const error = e as Error;\n const streamMetrics = {\n ...metrics,\n streamDuration: Date.now() - metrics.startTime,\n };\n\n if (\n error.name === 'AbortError' ||\n error.message?.includes('aborted')\n ) {\n controller.error(\n new AxAIServiceStreamTerminatedError(\n apiUrl.href,\n json,\n lastChunk,\n { streamMetrics }\n )\n );\n } else if (\n error instanceof TypeError &&\n error.message.includes('cancelled')\n ) {\n controller.error(\n new AxAIServiceStreamTerminatedError(\n apiUrl.href,\n json,\n lastChunk,\n {\n streamMetrics,\n cancelReason: 'Stream cancelled by client',\n }\n )\n );\n } else {\n controller.error(\n new AxAIServiceNetworkError(\n error,\n apiUrl.href,\n json,\n '[ReadableStream - consumed during streaming]',\n {\n streamMetrics,\n }\n )\n );\n }\n throw error;\n } finally {\n clearTimeout(timeoutId);\n reader.releaseLock();\n }\n }\n\n read();\n },\n // When the consumer cancels the stream, set our flag to stop processing further.\n cancel() {\n closed = true;\n },\n });\n } catch (error) {\n if (error instanceof Error && error.name === 'AbortError') {\n // Check if this was a user abort or timeout\n if (api.abortSignal?.aborted) {\n throw new AxAIServiceAbortedError(\n apiUrl.href,\n api.abortSignal.reason,\n json,\n { metrics }\n );\n }\n throw new AxAIServiceTimeoutError(apiUrl.href, timeoutMs, json, {\n metrics,\n });\n }\n\n if (api.span?.isRecording()) {\n api.span.recordException(error as Error);\n api.span.setAttributes({\n 'error.time': Date.now() - metrics.startTime,\n 'error.retries': metrics.retryCount,\n });\n }\n\n // Handle retryable network errors\n if (\n error instanceof AxAIServiceNetworkError &&\n shouldRetry(error, undefined, attempt, retryConfig)\n ) {\n const delay = calculateRetryDelay(attempt, retryConfig);\n attempt++;\n updateRetryMetrics(metrics);\n\n api.span?.addEvent('retry', {\n attempt,\n delay,\n error: error.message,\n 'metrics.startTime': metrics.startTime,\n 'metrics.retryCount': metrics.retryCount,\n 'metrics.lastRetryTime': metrics.lastRetryTime,\n });\n\n await new Promise((resolve) => setTimeout(resolve, delay));\n continue;\n }\n\n if (error instanceof AxAIServiceError) {\n error.context.metrics = metrics;\n }\n\n throw error;\n } finally {\n if (timeoutId !== undefined) {\n clearTimeout(timeoutId);\n }\n }\n }\n};\n\nexport function createApiConfig(\n config: Readonly<Partial<AxAPIConfig>>\n): AxAPIConfig {\n return {\n timeout: defaultTimeoutMs,\n retry: defaultRetryConfig,\n ...config,\n url: config.url!, // URL is required\n };\n}\n","import type {\n AxChatRequest,\n AxLoggerData,\n AxLoggerFunction,\n} from '../ai/types.js';\nimport { ColorLog } from '../util/log.js';\n\nconst _colorLog = new ColorLog();\n\n// Default output function that writes to stdout\nconst defaultOutput = (message: string): void => {\n process.stdout.write(message);\n};\n\n// Helper function to format chat message for display\nconst formatChatMessage = (\n msg: AxChatRequest['chatPrompt'][number],\n hideContent?: boolean,\n cl?: ColorLog\n) => {\n const colorize = (text: string, colorMethod?: keyof ColorLog) => {\n if (cl && colorMethod && colorMethod in cl) {\n return (cl[colorMethod] as (t: string) => string)(text);\n }\n return text;\n };\n\n switch (msg.role) {\n case 'system':\n return `${colorize('[ SYSTEM ]', 'magentaBright')}\\n${colorize(msg.content, 'magenta')}`;\n case 'function':\n return `${colorize('[ FUNCTION RESULT ]', 'yellow')}\\n${colorize(msg.result ?? '[No result]', 'yellowDim')}`;\n case 'user': {\n const header = `${colorize('[ USER ]', 'greenBright')}\\n`;\n if (typeof msg.content === 'string') {\n return header + colorize(msg.content, 'green');\n }\n const items = msg.content.map((item) => {\n if (item.type === 'text') {\n return colorize(item.text, 'green');\n }\n if (item.type === 'image') {\n const content = hideContent ? '[Image]' : `[Image: ${item.image}]`;\n return colorize(content, 'green');\n }\n if (item.type === 'audio') {\n const content = hideContent ? '[Audio]' : `[Audio: ${item.data}]`;\n return colorize(content, 'green');\n }\n return colorize('[Unknown content type]', 'gray');\n });\n return header + items.join('\\n');\n }\n case 'assistant': {\n let header = colorize('[ ASSISTANT', 'cyanBright');\n if (msg.name) {\n header += ` ${msg.name}`;\n }\n header += ' ]';\n let result = `${header}\\n`;\n if (msg.content) {\n result += `${colorize(msg.content, 'cyan')}\\n`;\n }\n if (msg.functionCalls && msg.functionCalls.length > 0) {\n result += `${colorize('[ FUNCTION CALLS ]', 'yellow')}\\n`;\n msg.functionCalls.forEach((call, i) => {\n const params =\n typeof call.function.params === 'string'\n ? call.function.params\n : JSON.stringify(call.function.params, null, 2);\n result += colorize(\n `${i + 1}. ${call.function.name}(${params}) [id: ${call.id}]`,\n 'yellowDim'\n );\n if (i < msg.functionCalls!.length - 1) {\n result += '\\n';\n }\n });\n result += '\\n';\n }\n if (\n !msg.content &&\n (!msg.functionCalls || msg.functionCalls.length === 0)\n ) {\n result += colorize('[No content]', 'gray');\n }\n return result;\n }\n default:\n return `${colorize('[ UNKNOWN ]', 'redBright')}\\n${colorize(JSON.stringify(msg), 'gray')}`;\n }\n};\n\n// Factory function to create a default logger with customizable output\nexport const axCreateDefaultColorLogger = (\n output: (message: string) => void = defaultOutput\n): AxLoggerFunction => {\n const cl = new ColorLog();\n const divider = cl.gray('─'.repeat(60));\n return (message: AxLoggerData) => {\n const typedData = message;\n let formattedMessage = '';\n\n switch (typedData.name) {\n case 'ChatRequestChatPrompt':\n formattedMessage = `${typedData.step > 0 ? `\\n${divider}\\n` : ''}${cl.blueBright(`[ CHAT REQUEST Step ${typedData.step} ]`)}\\n${divider}\\n`;\n typedData.value.forEach((msg, i) => {\n formattedMessage += formatChatMessage(msg, undefined, cl);\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n formattedMessage += `\\n${divider}`; // Keep closing for steps\n break;\n case 'FunctionResults':\n formattedMessage = `\\n${cl.yellow('[ FUNCTION RESULTS ]')}\\n${divider}\\n`;\n typedData.value.forEach((result, i) => {\n formattedMessage += cl.yellowDim(\n `Function: ${result.functionId}\\nResult: ${result.result}`\n );\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n break;\n case 'ChatResponseResults':\n formattedMessage = `\\n${cl.cyanBright('[ CHAT RESPONSE ]')}\\n${divider}\\n`;\n typedData.value.forEach((result, i) => {\n formattedMessage += cl.cyan(result.content || '[No content]');\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n break;\n case 'ChatResponseStreamingResult': {\n const streamingContent =\n typedData.value.delta || typedData.value.content || '';\n // Add newline prefix if this is actual content (not just a delta)\n const needsNewline =\n streamingContent.trim().length > 0 &&\n (streamingContent.includes('Reply:') ||\n streamingContent.includes('🤖') ||\n streamingContent.length > 50);\n formattedMessage = needsNewline\n ? `\\n${cl.cyanBright(streamingContent)}`\n : cl.cyanBright(streamingContent);\n break;\n }\n case 'FunctionError':\n formattedMessage = `\\n${cl.redBright(`[ FUNCTION ERROR #${typedData.index} ]`)}\\n${divider}\\n${cl.white(typedData.fixingInstructions)}\\n${cl.red(`Error: ${typedData.error}`)}`;\n break;\n case 'ValidationError':\n formattedMessage = `\\n${cl.redBright(`[ VALIDATION ERROR #${typedData.index} ]`)}\\n${divider}\\n${cl.white(typedData.fixingInstructions)}\\n${cl.red(`Error: ${typedData.error}`)}`;\n break;\n case 'AssertionError':\n formattedMessage = `\\n${cl.redBright(`[ ASSERTION ERROR #${typedData.index} ]`)}\\n${divider}\\n${cl.white(typedData.fixingInstructions)}\\n${cl.red(`Error: ${typedData.error}`)}`;\n break;\n case 'ResultPickerUsed':\n formattedMessage = `${cl.greenBright('[ RESULT PICKER ]')}\\n${divider}\\n${cl.green(`Selected sample ${typedData.selectedIndex + 1} of ${typedData.sampleCount} (${typedData.latency.toFixed(2)}ms)`)}`;\n break;\n case 'Notification':\n formattedMessage = `${cl.gray(`[ NOTIFICATION ${typedData.id} ]`)}\\n${divider}\\n${cl.white(typedData.value)}`;\n break;\n case 'EmbedRequest':\n formattedMessage = `${cl.orange(`[ EMBED REQUEST ${typedData.embedModel} ]`)}\\n${divider}\\n`;\n typedData.value.forEach((text, i) => {\n formattedMessage += cl.white(\n `Text ${i + 1}: ${text.substring(0, 100)}${text.length > 100 ? '...' : ''}`\n );\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n break;\n case 'EmbedResponse':\n formattedMessage = `${cl.orange(`[ EMBED RESPONSE (${typedData.totalEmbeddings} embeddings) ]`)}\\n${divider}\\n`;\n typedData.value.forEach((embedding, i) => {\n formattedMessage += cl.white(\n `Embedding ${i + 1}: [${embedding.sample.join(', ')}${embedding.truncated ? ', ...' : ''}] (length: ${embedding.length})`\n );\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n break;\n default:\n formattedMessage = cl.gray(JSON.stringify(typedData, null, 2));\n }\n\n output(formattedMessage);\n };\n};\n\nexport const defaultLogger: AxLoggerFunction = axCreateDefaultColorLogger();\n\n// Factory function to create a text-only logger (no colors) with customizable output\nexport const axCreateDefaultTextLogger = (\n output: (message: string) => void = defaultOutput\n): AxLoggerFunction => {\n const divider = '─'.repeat(60);\n return (message: AxLoggerData) => {\n const typedData = message;\n let formattedMessage = '';\n\n switch (typedData.name) {\n case 'ChatRequestChatPrompt':\n formattedMessage = `${typedData.step > 0 ? `\\n${divider}\\n` : ''}[ CHAT REQUEST Step ${typedData.step} ]\\n${divider}\\n`;\n typedData.value.forEach((msg, i) => {\n formattedMessage += formatChatMessage(msg);\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n formattedMessage += `\\n${divider}`; // Keep closing for steps\n break;\n case 'FunctionResults':\n formattedMessage = `\\n[ FUNCTION RESULTS ]\\n${divider}\\n`;\n typedData.value.forEach((result, i) => {\n formattedMessage += `Function: ${result.functionId}\\nResult: ${result.result}`;\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n break;\n case 'ChatResponseResults':\n formattedMessage = `\\n[ CHAT RESPONSE ]\\n${divider}\\n`;\n typedData.value.forEach((result, i) => {\n formattedMessage += result.content || '[No content]';\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n break;\n case 'ChatResponseStreamingResult': {\n const streamingContent =\n typedData.value.delta || typedData.value.content || '';\n // Add newline prefix if this is actual content (not just a delta)\n const needsNewline =\n streamingContent.trim().length > 0 &&\n (streamingContent.includes('Reply:') ||\n streamingContent.includes('🤖') ||\n streamingContent.length > 50);\n formattedMessage = needsNewline\n ? `\\n${streamingContent}`\n : streamingContent;\n break;\n }\n case 'FunctionError':\n formattedMessage = `\\n[ FUNCTION ERROR #${typedData.index} ]\\n${divider}\\n${typedData.fixingInstructions}\\nError: ${typedData.error}`;\n break;\n case 'ValidationError':\n formattedMessage = `\\n[ VALIDATION ERROR #${typedData.index} ]\\n${divider}\\n${typedData.fixingInstructions}\\nError: ${typedData.error}`;\n break;\n case 'AssertionError':\n formattedMessage = `\\n[ ASSERTION ERROR #${typedData.index} ]\\n${divider}\\n${typedData.fixingInstructions}\\nError: ${typedData.error}`;\n break;\n case 'ResultPickerUsed':\n formattedMessage = `[ RESULT PICKER ]\\n${divider}\\nSelected sample ${typedData.selectedIndex + 1} of ${typedData.sampleCount} (${typedData.latency.toFixed(2)}ms)`;\n break;\n case 'Notification':\n formattedMessage = `[ NOTIFICATION ${typedData.id} ]\\n${divider}\\n${typedData.value}`;\n break;\n case 'EmbedRequest':\n formattedMessage = `[ EMBED REQUEST ${typedData.embedModel} ]\\n${divider}\\n`;\n typedData.value.forEach((text, i) => {\n formattedMessage += `Text ${i + 1}: ${text.substring(0, 100)}${text.length > 100 ? '...' : ''}`;\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n break;\n case 'EmbedResponse':\n formattedMessage = `[ EMBED RESPONSE (${typedData.totalEmbeddings} embeddings) ]\\n${divider}\\n`;\n typedData.value.forEach((embedding, i) => {\n formattedMessage += `Embedding ${i + 1}: [${embedding.sample.join(', ')}${embedding.truncated ? ', ...' : ''}] (length: ${embedding.length})`;\n if (i < typedData.value.length - 1)\n formattedMessage += `\\n${divider}\\n`;\n });\n break;\n default:\n formattedMessage = JSON.stringify(typedData, null, 2);\n }\n\n output(formattedMessage);\n };\n};\n","export const axSpanAttributes = {\n // LLM\n LLM_SYSTEM: 'gen_ai.system',\n LLM_OPERATION_NAME: 'gen_ai.operation.name',\n LLM_REQUEST_MODEL: 'gen_ai.request.model',\n LLM_REQUEST_MAX_TOKENS: 'gen_ai.request.max_tokens',\n LLM_REQUEST_TEMPERATURE: 'gen_ai.request.temperature',\n LLM_REQUEST_TOP_K: 'gen_ai.request.top_k',\n LLM_REQUEST_FREQUENCY_PENALTY: 'gen_ai.request.frequency_penalty',\n LLM_REQUEST_PRESENCE_PENALTY: 'gen_ai.request.presence_penalty',\n LLM_REQUEST_STOP_SEQUENCES: 'gen_ai.request.stop_sequences',\n LLM_REQUEST_LLM_IS_STREAMING: 'gen_ai.request.llm_is_streaming',\n LLM_REQUEST_TOP_P: 'gen_ai.request.top_p',\n\n LLM_USAGE_INPUT_TOKENS: 'gen_ai.usage.input_tokens',\n LLM_USAGE_OUTPUT_TOKENS: 'gen_ai.usage.output_tokens',\n LLM_USAGE_TOTAL_TOKENS: 'gen_ai.usage.total_tokens',\n LLM_USAGE_THOUGHTS_TOKENS: 'gen_ai.usage.thoughts_tokens',\n\n // Vector DB\n DB_SYSTEM: 'db.system',\n DB_TABLE: 'db.table',\n DB_NAMESPACE: 'db.namespace',\n DB_ID: 'db.id',\n DB_QUERY_TEXT: 'db.query.text',\n DB_VECTOR: 'db.vector',\n DB_OPERATION_NAME: 'db.operation.name',\n DB_VECTOR_QUERY_TOP_K: 'db.vector.query.top_k',\n\n DB_QUERY_EMBEDDINGS: 'db.query.embeddings',\n DB_QUERY_RESULT: 'db.query.result',\n\n // Query Embeddings\n DB_QUERY_EMBEDDINGS_VECTOR: 'db.query.embeddings.vector',\n\n // Query Result (canonical format)\n DB_QUERY_RESULT_ID: 'db.query.result.id',\n DB_QUERY_RESULT_SCORE: 'db.query.result.score',\n DB_QUERY_RESULT_DISTANCE: 'db.query.result.distance',\n DB_QUERY_RESULT_METADATA: 'db.query.result.metadata',\n DB_QUERY_RESULT_VECTOR: 'db.query.result.vector',\n DB_QUERY_RESULT_DOCUMENT: 'db.query.result.document',\n};\n\nexport const axSpanEvents = {\n GEN_AI_USER_MESSAGE: 'gen_ai.user.message',\n GEN_AI_SYSTEM_MESSAGE: 'gen_ai.system.message',\n GEN_AI_ASSISTANT_MESSAGE: 'gen_ai.assistant.message',\n GEN_AI_TOOL_MESSAGE: 'gen_ai.tool.message', // For tool messages in request & response tool calls\n GEN_AI_CHOICE: 'gen_ai.choice',\n GEN_AI_USAGE: 'gen_ai.usage',\n};\n\nexport enum AxLLMRequestTypeValues {\n COMPLETION = 'completion',\n CHAT = 'chat',\n RERANK = 'rerank',\n UNKNOWN = 'unknown',\n}\n\nexport enum AxSpanKindValues {\n WORKFLOW = 'workflow',\n TASK = 'task',\n AGENT = 'agent',\n TOOL = 'tool',\n UNKNOWN = 'unknown',\n}\n","// ReadableStream is available globally in modern browsers and Node.js 16+ via DOM types\nimport { context, type Span, SpanKind } from '@opentelemetry/api';\nimport { axGlobals } from '../dsp/globals.js';\nimport { defaultLogger } from '../dsp/loggers.js';\nimport { axSpanAttributes, axSpanEvents } from '../trace/trace.js';\nimport { apiCall } from '../util/apicall.js';\nimport { randomUUID } from '../util/crypto.js';\nimport { RespTransformStream } from '../util/transform.js';\nimport {\n logChatRequest,\n logEmbedRequest,\n logEmbedResponse,\n logResponse,\n logResponseStreamingResult,\n} from './debug.js';\nimport {\n type AxAIMetricsInstruments,\n getOrCreateAIMetricsInstruments,\n recordAbortMetric,\n recordContextWindowUsageMetric,\n recordErrorMetric,\n recordErrorRateMetric,\n recordEstimatedCostMetric,\n recordFunctionCallMetric,\n recordLatencyMetric,\n recordLatencyStatsMetrics,\n recordModelConfigMetrics,\n recordMultimodalRequestMetric,\n recordPromptLengthMetric,\n recordRequestMetric,\n recordRequestSizeMetric,\n recordResponseSizeMetric,\n recordStreamingRequestMetric,\n recordThinkingBudgetUsageMetric,\n recordTimeoutMetric,\n recordTokenMetric,\n} from './metrics.js';\nimport type {\n AxAIInputModelList,\n AxAIModelList,\n AxAIPromptConfig,\n AxAIService,\n AxAIServiceActionOptions,\n AxAIServiceImpl,\n AxAIServiceMetrics,\n AxAIServiceOptions,\n AxChatRequest,\n AxChatResponse,\n AxEmbedRequest,\n AxEmbedResponse,\n AxLoggerFunction,\n AxModelConfig,\n AxModelInfo,\n AxModelUsage,\n} from './types.js';\n\nexport interface AxAIFeatures {\n functions: boolean;\n streaming: boolean;\n functionCot?: boolean;\n hasThinkingBudget?: boolean;\n hasShowThoughts?: boolean;\n}\n\nexport interface AxBaseAIArgs<TModel, TEmbedModel> {\n name: string;\n apiURL: string;\n headers: () => Promise<Record<string, string>>;\n modelInfo: Readonly<AxModelInfo[]>;\n defaults: Readonly<{ model: TModel; embedModel?: TEmbedModel }>;\n options?: Readonly<AxAIServiceOptions>;\n supportFor: AxAIFeatures | ((model: TModel) => AxAIFeatures);\n models?: AxAIInputModelList<TModel, TEmbedModel>;\n}\n\nexport const axBaseAIDefaultConfig = (): AxModelConfig =>\n structuredClone({\n temperature: 0,\n topK: 40,\n topP: 0.9,\n });\n\nexport const axBaseAIDefaultCreativeConfig = (): AxModelConfig =>\n structuredClone({\n temperature: 0.4,\n topP: 0.7,\n frequencyPenalty: 0.2,\n });\n\nexport class AxBaseAI<\n TModel,\n TEmbedModel,\n TChatRequest,\n TEmbedRequest,\n TChatResponse,\n TChatResponseDelta,\n TEmbedResponse,\n> implements AxAIService<TModel, TEmbedModel>\n{\n private debug = false;\n\n private rt?: AxAIServiceOptions['rateLimiter'];\n private fetch?: AxAIServiceOptions['fetch'];\n private tracer?: AxAIServiceOptions['tracer'];\n private meter?: AxAIServiceOptions['meter'];\n private timeout?: AxAIServiceOptions['timeout'];\n private excludeContentFromTrace?: boolean;\n private models?: AxAIInputModelList<TModel, TEmbedModel>;\n private abortSignal?: AbortSignal;\n private logger: AxLoggerFunction = defaultLogger;\n\n private modelInfo: readonly AxModelInfo[];\n private modelUsage?: AxModelUsage;\n private embedModelUsage?: AxModelUsage;\n private defaults: AxBaseAIArgs<TModel, TEmbedModel>['defaults'];\n private lastUsedModelConfig?: AxModelConfig;\n private lastUsedChatModel?: TModel;\n private lastUsedEmbedModel?: TEmbedModel;\n\n protected apiURL: string;\n protected name: string;\n protected id: string;\n protected headers: () => Promise<Record<string, string>>;\n protected supportFor: AxAIFeatures | ((model: TModel) => AxAIFeatures);\n\n // Add private metrics tracking properties\n private metrics: AxAIServiceMetrics = {\n latency: {\n chat: {\n mean: 0,\n p95: 0,\n p99: 0,\n samples: [],\n },\n embed: {\n mean: 0,\n p95: 0,\n p99: 0,\n samples: [],\n },\n },\n errors: {\n chat: {\n count: 0,\n rate: 0,\n total: 0,\n },\n embed: {\n count: 0,\n rate: 0,\n total: 0,\n },\n },\n };\n\n constructor(\n private readonly aiImpl: Readonly<\n AxAIServiceImpl<\n TModel,\n TEmbedModel,\n TChatRequest,\n TEmbedRequest,\n TChatResponse,\n TChatResponseDelta,\n TEmbedResponse\n >\n >,\n {\n name,\n apiURL,\n headers,\n modelInfo,\n defaults,\n options = {},\n supportFor,\n models,\n }: Readonly<AxBaseAIArgs<TModel, TEmbedModel>>\n ) {\n this.name = name;\n this.apiURL = apiURL;\n this.headers = headers;\n this.supportFor = supportFor;\n this.tracer = options.tracer ?? axGlobals.tracer;\n this.meter = options.meter ?? axGlobals.meter;\n this.modelInfo = modelInfo;\n this.models = models;\n this.id = randomUUID();\n\n const model = this.getModel(defaults.model) ?? defaults.model;\n const embedModel =\n this.getEmbedModel(defaults.embedModel) ?? defaults.embedModel;\n\n this.defaults = { model, embedModel };\n\n if (\n !defaults.model ||\n typeof defaults.model !== 'string' ||\n defaults.model === ''\n ) {\n throw new Error('No model defined');\n }\n\n this.setOptions(options);\n\n if (models) {\n validateModels(models);\n }\n }\n\n private getMetricsInstruments(): AxAIMetricsInstruments | undefined {\n return getOrCreateAIMetricsInstruments(this.meter);\n }\n\n public setName(name: string): void {\n this.name = name;\n }\n\n public getId(): string {\n return this.id;\n }\n\n public setAPIURL(apiURL: string): void {\n this.apiURL = apiURL;\n }\n\n public setHeaders(headers: () => Promise<Record<string, string>>): void {\n this.headers = headers;\n }\n\n setOptions(options: Readonly<AxAIServiceOptions>): void {\n this.debug = options.debug ?? false;\n this.rt = options.rateLimiter;\n this.fetch = options.fetch;\n this.timeout = options.timeout;\n this.tracer = options.tracer ?? axGlobals.tracer;\n this.meter = options.meter ?? axGlobals.meter;\n this.excludeContentFromTrace = options.excludeContentFromTrace;\n this.abortSignal = options.abortSignal;\n this.logger = options.logger ?? defaultLogger;\n }\n\n getOptions(): Readonly<AxAIServiceOptions> {\n return {\n debug: this.debug,\n rateLimiter: this.rt,\n fetch: this.fetch,\n tracer: this.tracer,\n meter: this.meter,\n timeout: this.timeout,\n excludeContentFromTrace: this.excludeContentFromTrace,\n abortSignal: this.abortSignal,\n logger: this.logger,\n };\n }\n\n getLogger(): AxLoggerFunction {\n return this.logger;\n }\n\n getModelList(): AxAIModelList | undefined {\n const models: AxAIModelList = [];\n for (const model of this.models ?? []) {\n if (model.isInternal) {\n continue;\n }\n\n if ('model' in model && model.model) {\n models.push({\n key: model.key,\n description: model.description,\n model: model.model as string,\n });\n }\n\n if ('embedModel' in model && model.embedModel) {\n models.push({\n key: model.key,\n description: model.description,\n embedModel: model.embedModel as string,\n });\n }\n }\n\n return models;\n }\n\n getName(): string {\n return this.name;\n }\n\n getFeatures(model?: TModel): AxAIFeatures {\n return typeof this.supportFor === 'function'\n ? this.supportFor(model ?? this.defaults.model)\n : this.supportFor;\n }\n\n getLastUsedChatModel(): TModel | undefined {\n return this.lastUsedChatModel;\n }\n\n getLastUsedEmbedModel(): TEmbedModel | undefined {\n return this.lastUsedEmbedModel;\n }\n\n getLastUsedModelConfig(): AxModelConfig | undefined {\n return this.lastUsedModelConfig;\n }\n\n // Method to calculate percentiles\n private calculatePercentile(\n samples: readonly number[],\n percentile: number\n ): number {\n if (samples.length === 0) return 0;\n const sorted = [...samples].sort((a, b) => a - b);\n const index = Math.ceil((percentile / 100) * sorted.length) - 1;\n return sorted[index] ?? 0;\n }\n\n // Method to update latency metrics\n private updateLatencyMetrics(type: 'chat' | 'embed', duration: number): void {\n const metrics = this.metrics.latency[type];\n metrics.samples.push(duration);\n\n // Keep only last 1000 samples to prevent memory issues\n if (metrics.samples.length > 1000) {\n metrics.samples.shift();\n }\n\n // Update statistics\n metrics.mean =\n metrics.samples.reduce((a, b) => a + b, 0) / metrics.samples.length;\n metrics.p95 = this.calculatePercentile(metrics.samples, 95);\n metrics.p99 = this.calculatePercentile(metrics.samples, 99);\n\n // Export to OpenTelemetry metrics\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n const model =\n type === 'chat'\n ? (this.lastUsedChatModel as string)\n : (this.lastUsedEmbedModel as string);\n\n // Record individual latency measurement\n recordLatencyMetric(metricsInstruments, type, duration, this.name, model);\n\n // Record latency statistics as gauges\n recordLatencyStatsMetrics(\n metricsInstruments,\n type,\n metrics.mean,\n metrics.p95,\n metrics.p99,\n this.name,\n model\n );\n }\n }\n\n // Method to update error metrics\n private updateErrorMetrics(type: 'chat' | 'embed', isError: boolean): void {\n const metrics = this.metrics.errors[type];\n metrics.total++;\n if (isError) {\n metrics.count++;\n }\n metrics.rate = metrics.count / metrics.total;\n\n // Export to OpenTelemetry metrics\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n const model =\n type === 'chat'\n ? (this.lastUsedChatModel as string)\n : (this.lastUsedEmbedModel as string);\n\n // Always record request count\n recordRequestMetric(metricsInstruments, type, this.name, model);\n\n // Record error count if there was an error\n if (isError) {\n recordErrorMetric(metricsInstruments, type, this.name, model);\n }\n\n // Record current error rate as a gauge\n recordErrorRateMetric(\n metricsInstruments,\n type,\n metrics.rate,\n this.name,\n model\n );\n }\n }\n\n // Method to record token usage metrics\n private recordTokenUsage(modelUsage?: AxModelUsage): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments && modelUsage?.tokens) {\n const { promptTokens, completionTokens, totalTokens, thoughtsTokens } =\n modelUsage.tokens;\n\n if (promptTokens) {\n recordTokenMetric(\n metricsInstruments,\n 'input',\n promptTokens,\n this.name,\n modelUsage.model\n );\n }\n\n if (completionTokens) {\n recordTokenMetric(\n metricsInstruments,\n 'output',\n completionTokens,\n this.name,\n modelUsage.model\n );\n }\n\n if (totalTokens) {\n recordTokenMetric(\n metricsInstruments,\n 'total',\n totalTokens,\n this.name,\n modelUsage.model\n );\n }\n\n if (thoughtsTokens) {\n recordTokenMetric(\n metricsInstruments,\n 'thoughts',\n thoughtsTokens,\n this.name,\n modelUsage.model\n );\n }\n }\n }\n\n // Helper method to calculate request size in bytes\n private calculateRequestSize(req: unknown): number {\n try {\n return new TextEncoder().encode(JSON.stringify(req)).length;\n } catch {\n return 0;\n }\n }\n\n // Helper method to calculate response size in bytes\n private calculateResponseSize(response: unknown): number {\n try {\n return new TextEncoder().encode(JSON.stringify(response)).length;\n } catch {\n return 0;\n }\n }\n\n // Helper method to detect multimodal content\n private detectMultimodalContent(req: Readonly<AxChatRequest<TModel>>): {\n hasImages: boolean;\n hasAudio: boolean;\n } {\n let hasImages = false;\n let hasAudio = false;\n\n if (req.chatPrompt && Array.isArray(req.chatPrompt)) {\n for (const message of req.chatPrompt) {\n if (message.role === 'user' && Array.isArray(message.content)) {\n for (const part of message.content) {\n if (part.type === 'image') {\n hasImages = true;\n } else if (part.type === 'audio') {\n hasAudio = true;\n }\n }\n }\n }\n }\n\n return { hasImages, hasAudio };\n }\n\n // Helper method to calculate prompt length\n private calculatePromptLength(req: Readonly<AxChatRequest<TModel>>): number {\n let totalLength = 0;\n\n if (req.chatPrompt && Array.isArray(req.chatPrompt)) {\n for (const message of req.chatPrompt) {\n if (message.role === 'system' || message.role === 'assistant') {\n if (message.content) {\n totalLength += message.content.length;\n }\n } else if (message.role === 'user') {\n if (typeof message.content === 'string') {\n totalLength += message.content.length;\n } else if (Array.isArray(message.content)) {\n for (const part of message.content) {\n if (part.type === 'text') {\n totalLength += part.text.length;\n }\n }\n }\n } else if (message.role === 'function') {\n if (message.result) {\n totalLength += message.result.length;\n }\n }\n }\n }\n\n return totalLength;\n }\n\n // Helper method to calculate context window usage\n private calculateContextWindowUsage(\n model: TModel,\n modelUsage?: AxModelUsage\n ): number {\n if (!modelUsage?.tokens?.promptTokens) return 0;\n\n // Get model info to find context window size\n const modelInfo = this.modelInfo.find(\n (info) => info.name === (model as string)\n );\n if (!modelInfo?.contextWindow) return 0;\n\n return modelUsage.tokens.promptTokens / modelInfo.contextWindow;\n }\n\n // Helper method to estimate cost\n private estimateCost(model: TModel, modelUsage?: AxModelUsage): number {\n if (!modelUsage?.tokens) return 0;\n\n // Get model info to find pricing\n const modelInfo = this.modelInfo.find(\n (info) => info.name === (model as string)\n );\n if (\n !modelInfo ||\n (!modelInfo.promptTokenCostPer1M && !modelInfo.completionTokenCostPer1M)\n )\n return 0;\n\n const { promptTokens = 0, completionTokens = 0 } = modelUsage.tokens;\n const promptCostPer1M = modelInfo.promptTokenCostPer1M || 0;\n const completionCostPer1M = modelInfo.completionTokenCostPer1M || 0;\n\n return (\n (promptTokens * promptCostPer1M) / 1000000 +\n (completionTokens * completionCostPer1M) / 1000000\n );\n }\n\n // Helper method to estimate cost by model name\n private estimateCostByName(\n modelName: string,\n modelUsage?: AxModelUsage\n ): number {\n if (!modelUsage?.tokens) return 0;\n\n // Get model info to find pricing\n const modelInfo = this.modelInfo.find((info) => info.name === modelName);\n if (\n !modelInfo ||\n (!modelInfo.promptTokenCostPer1M && !modelInfo.completionTokenCostPer1M)\n )\n return 0;\n\n const { promptTokens = 0, completionTokens = 0 } = modelUsage.tokens;\n const promptCostPer1M = modelInfo.promptTokenCostPer1M || 0;\n const completionCostPer1M = modelInfo.completionTokenCostPer1M || 0;\n\n return (\n (promptTokens * promptCostPer1M) / 1000000 +\n (completionTokens * completionCostPer1M) / 1000000\n );\n }\n\n // Helper method to record function call metrics\n private recordFunctionCallMetrics(\n functionCalls?: readonly unknown[],\n model?: TModel\n ): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (!metricsInstruments || !functionCalls) return;\n\n for (const call of functionCalls) {\n if (\n call &&\n typeof call === 'object' &&\n 'function' in call &&\n call.function &&\n typeof call.function === 'object' &&\n 'name' in call.function\n ) {\n recordFunctionCallMetric(\n metricsInstruments,\n (call.function as { name: string }).name,\n undefined, // latency would need to be tracked separately\n this.name,\n model as string\n );\n }\n }\n }\n\n // Helper method to record timeout metrics\n private recordTimeoutMetric(type: 'chat' | 'embed'): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n const model =\n type === 'chat'\n ? (this.lastUsedChatModel as string)\n : (this.lastUsedEmbedModel as string);\n recordTimeoutMetric(metricsInstruments, type, this.name, model);\n }\n }\n\n // Helper method to record abort metrics\n private recordAbortMetric(type: 'chat' | 'embed'): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (metricsInstruments) {\n const model =\n type === 'chat'\n ? (this.lastUsedChatModel as string)\n : (this.lastUsedEmbedModel as string);\n recordAbortMetric(metricsInstruments, type, this.name, model);\n }\n }\n\n // Comprehensive method to record all chat-related metrics\n private recordChatMetrics(\n req: Readonly<AxChatRequest<TModel>>,\n options?: Readonly<\n AxAIPromptConfig & AxAIServiceActionOptions<TModel, TEmbedModel>\n >,\n result?: AxChatResponse | ReadableStream<AxChatResponse>\n ): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (!metricsInstruments) return;\n\n const model = this.lastUsedChatModel as string;\n const modelConfig = this.lastUsedModelConfig;\n\n // Record streaming request metric\n const isStreaming = modelConfig?.stream ?? false;\n recordStreamingRequestMetric(\n metricsInstruments,\n 'chat',\n isStreaming,\n this.name,\n model\n );\n\n // Record multimodal request metric\n const { hasImages, hasAudio } = this.detectMultimodalContent(req);\n recordMultimodalRequestMetric(\n metricsInstruments,\n hasImages,\n hasAudio,\n this.name,\n model\n );\n\n // Record prompt length metric\n const promptLength = this.calculatePromptLength(req);\n recordPromptLengthMetric(\n metricsInstruments,\n promptLength,\n this.name,\n model\n );\n\n // Record model configuration metrics\n recordModelConfigMetrics(\n metricsInstruments,\n modelConfig?.temperature,\n modelConfig?.maxTokens,\n this.name,\n model\n );\n\n // Record thinking budget usage if applicable\n if (\n options?.thinkingTokenBudget &&\n this.modelUsage?.tokens?.thoughtsTokens\n ) {\n recordThinkingBudgetUsageMetric(\n metricsInstruments,\n this.modelUsage.tokens.thoughtsTokens,\n this.name,\n model\n );\n }\n\n // Record request size\n const requestSize = this.calculateRequestSize(req);\n recordRequestSizeMetric(\n metricsInstruments,\n 'chat',\n requestSize,\n this.name,\n model\n );\n\n // Record response size and function calls for non-streaming responses\n if (result && !isStreaming) {\n const chatResponse = result as AxChatResponse;\n const responseSize = this.calculateResponseSize(chatResponse);\n recordResponseSizeMetric(\n metricsInstruments,\n 'chat',\n responseSize,\n this.name,\n model\n );\n\n // Record function call metrics\n if (chatResponse.results) {\n for (const chatResult of chatResponse.results) {\n if (chatResult.functionCalls) {\n this.recordFunctionCallMetrics(\n chatResult.functionCalls,\n this.lastUsedChatModel\n );\n }\n }\n }\n\n // Record context window usage\n const contextUsage = this.calculateContextWindowUsage(\n this.lastUsedChatModel!,\n chatResponse.modelUsage\n );\n if (contextUsage > 0) {\n recordContextWindowUsageMetric(\n metricsInstruments,\n contextUsage,\n this.name,\n model\n );\n }\n\n // Record estimated cost\n const estimatedCost = this.estimateCost(\n this.lastUsedChatModel!,\n chatResponse.modelUsage\n );\n if (estimatedCost > 0) {\n recordEstimatedCostMetric(\n metricsInstruments,\n 'chat',\n estimatedCost,\n this.name,\n model\n );\n }\n }\n }\n\n // Comprehensive method to record all embed-related metrics\n private recordEmbedMetrics(\n req: Readonly<AxEmbedRequest<TEmbedModel>>,\n result: Readonly<AxEmbedResponse>\n ): void {\n const metricsInstruments = this.getMetricsInstruments();\n if (!metricsInstruments) return;\n\n const model = this.lastUsedEmbedModel as string;\n\n // Record request size\n const requestSize = this.calculateRequestSize(req);\n recordRequestSizeMetric(\n metricsInstruments,\n 'embed',\n requestSize,\n this.name,\n model\n );\n\n // Record response size\n const responseSize = this.calculateResponseSize(result);\n recordResponseSizeMetric(\n metricsInstruments,\n 'embed',\n responseSize,\n this.name,\n model\n );\n\n // Record estimated cost\n const estimatedCost = this.estimateCostByName(model, result.modelUsage);\n if (estimatedCost > 0) {\n recordEstimatedCostMetric(\n metricsInstruments,\n 'embed',\n estimatedCost,\n this.name,\n model\n );\n }\n }\n\n // Public method to get metrics\n public getMetrics(): AxAIServiceMetrics {\n return structuredClone(this.metrics);\n }\n\n async chat(\n req: Readonly<AxChatRequest<TModel>>,\n options?: Readonly<\n AxAIPromptConfig & AxAIServiceActionOptions<TModel, TEmbedModel>\n >\n ): Promise<AxChatResponse | ReadableStream<AxChatResponse>> {\n const startTime = performance.now();\n let isError = false;\n let result: AxChatResponse | ReadableStream<AxChatResponse>;\n\n try {\n result = await this._chat1(req, options);\n return result;\n } catch (error) {\n isError = true;\n // Check for specific error types\n if (error instanceof Error) {\n if (\n error.message.includes('timeout') ||\n error.name === 'TimeoutError'\n ) {\n this.recordTimeoutMetric('chat');\n } else if (\n error.message.includes('abort') ||\n error.name === 'AbortError'\n ) {\n this.recordAbortMetric('chat');\n }\n }\n throw error;\n } finally {\n const duration = performance.now() - startTime;\n this.updateLatencyMetrics('chat', duration);\n this.updateErrorMetrics('chat', isError);\n\n // Record additional metrics if successful\n if (!isError) {\n this.recordChatMetrics(req, options, result!);\n }\n }\n }\n\n private async _chat1(\n req: Readonly<AxChatRequest<TModel>>,\n options?: Readonly<\n AxAIPromptConfig & AxAIServiceActionOptions<TModel, TEmbedModel>\n >\n ): Promise<AxChatResponse | ReadableStream<AxChatResponse>> {\n const model = this.getModel(req.model) ?? req.model ?? this.defaults.model;\n\n // Validate chat prompt messages for empty content\n if (req.chatPrompt && Array.isArray(req.chatPrompt)) {\n validateAxMessageArray(req.chatPrompt);\n }\n\n const modelConfig = {\n ...this.aiImpl.getModelConfig(),\n ...req.modelConfig,\n };\n\n // Check for thinkingTokenBudget support\n if (\n options?.thinkingTokenBudget &&\n !this.getFeatures(model).hasThinkingBudget\n ) {\n throw new Error(\n `Model ${model as string} does not support thinkingTokenBudget.`\n );\n }\n\n // Check for showThoughts support\n if (options?.showThoughts && !this.getFeatures(model).hasShowThoughts) {\n throw new Error(\n `Model ${model as string} does not support showThoughts.`\n );\n }\n\n // Check for expensive model usage\n const modelInfo = this.modelInfo.find(\n (info) => info.name === (model as string)\n );\n if (modelInfo?.isExpensive && options?.useExpensiveModel !== 'yes') {\n throw new Error(\n `Model ${model as string} is marked as expensive and requires explicit confirmation. Set useExpensiveModel: \"yes\" to proceed.`\n );\n }\n\n // stream is true by default unless explicitly set to false\n modelConfig.stream =\n (options?.stream !== undefined ? options.stream : modelConfig.stream) ??\n true;\n\n const canStream = this.getFeatures(model).streaming;\n if (!canStream) {\n modelConfig.stream = false;\n }\n\n if (this.tracer) {\n return await this.tracer.startActiveSpan(\n 'AI Chat Request',\n {\n kind: SpanKind.SERVER,\n attributes: {\n [axSpanAttributes.LLM_SYSTEM]: this.name,\n [axSpanAttributes.LLM_OPERATION_NAME]: 'chat',\n [axSpanAttributes.LLM_REQUEST_MODEL]: model as string,\n [axSpanAttributes.LLM_REQUEST_MAX_TOKENS]:\n modelConfig.maxTokens ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_TEMPERATURE]: modelConfig.temperature,\n [axSpanAttributes.LLM_REQUEST_TOP_P]: modelConfig.topP ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_TOP_K]: modelConfig.topK ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY]:\n modelConfig.frequencyPenalty ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_PRESENCE_PENALTY]:\n modelConfig.presencePenalty ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_STOP_SEQUENCES]:\n modelConfig.stopSequences?.join(', ') ?? 'Not set',\n [axSpanAttributes.LLM_REQUEST_LLM_IS_STREAMING]:\n modelConfig.stream ?? 'Not set',\n },\n },\n options?.traceContext ?? context.active(),\n async (span) => {\n return await this._chat2(model, modelConfig, req, options, span);\n }\n );\n }\n return await this._chat2(model, modelConfig, req, options);\n }\n\n private cleanupFunctionSchema(\n fn: Readonly<NonNullable<AxChatRequest['functions']>[number]>\n ): NonNullable<AxChatRequest['functions']>[number] {\n const cleanFn = { ...fn };\n if (cleanFn.parameters) {\n const cleanParams = { ...cleanFn.parameters };\n\n // Remove empty required array\n if (\n Array.isArray(cleanParams.required) &&\n cleanParams.required.length === 0\n ) {\n delete cleanParams.required;\n }\n\n // Remove empty properties object\n if (\n cleanParams.properties &&\n Object.keys(cleanParams.properties).length === 0\n ) {\n delete cleanParams.properties;\n }\n\n // After cleaning, remove the entire parameters object if it's effectively empty\n // i.e., either no keys left or just { type: 'object' } remaining.\n if (\n Object.keys(cleanParams).length === 0 ||\n (Object.keys(cleanParams).length === 1 && cleanParams.type === 'object')\n ) {\n delete cleanFn.parameters;\n } else {\n cleanFn.parameters = cleanParams;\n }\n }\n return cleanFn;\n }\n\n private async _chat2(\n model: TModel,\n modelConfig: Readonly<AxModelConfig>,\n chatReq: Readonly<Omit<AxChatRequest<TModel>, 'modelConfig'>>,\n options?: Readonly<AxAIServiceActionOptions<TModel, TEmbedModel>>,\n span?: Span\n ): Promise<AxChatResponse | ReadableStream<AxChatResponse>> {\n if (!this.aiImpl.createChatReq) {\n throw new Error('generateChatReq not implemented');\n }\n\n const debug = options?.debug ?? this.debug;\n\n let functions: NonNullable<AxChatRequest['functions']> | undefined;\n\n if (chatReq.functions && chatReq.functions.length > 0) {\n functions = chatReq.functions.map((fn) => this.cleanupFunctionSchema(fn));\n }\n\n const req = {\n ...chatReq,\n model,\n functions,\n modelConfig,\n };\n\n // Store the last used model and config\n this.lastUsedChatModel = model;\n this.lastUsedModelConfig = modelConfig;\n\n const fn = async () => {\n const [apiConfig, reqValue] = await this.aiImpl.createChatReq(\n req,\n options as AxAIPromptConfig\n );\n\n if (span?.isRecording()) {\n setChatRequestEvents(chatReq, span, this.excludeContentFromTrace);\n }\n\n const res = await apiCall(\n {\n name: apiConfig.name,\n url: this.apiURL,\n headers: await this.buildHeaders(apiConfig.headers),\n stream: modelConfig.stream,\n timeout: this.timeout,\n debug,\n fetch: this.fetch,\n span,\n abortSignal: options?.abortSignal ?? this.abortSignal,\n },\n reqValue\n );\n return res;\n };\n\n if (debug) {\n logChatRequest(\n req.chatPrompt,\n options?.stepIndex ?? 0,\n options?.debugHideSystemPrompt,\n options?.logger ?? this.logger\n );\n }\n\n const rt = options?.rateLimiter ?? this.rt;\n const rv = rt ? await rt(fn, { modelUsage: this.modelUsage }) : await fn();\n\n if (modelConfig.stream) {\n if (!this.aiImpl.createChatStreamResp) {\n throw new Error('generateChatResp not implemented');\n }\n\n const respFn = this.aiImpl.createChatStreamResp.bind(this);\n const wrappedRespFn =\n (state: object) => (resp: Readonly<TChatResponseDelta>) => {\n const res = respFn(resp, state);\n res.sessionId = options?.sessionId;\n\n // Only call getTokenUsage if modelUsage is not already provided by the service\n if (!res.modelUsage) {\n const tokenUsage = this.aiImpl.getTokenUsage();\n if (tokenUsage) {\n res.modelUsage = {\n ai: this.name,\n model: model as string,\n tokens: tokenUsage,\n };\n }\n }\n this.modelUsage = res.modelUsage;\n this.recordTokenUsage(res.modelUsage);\n\n if (span?.isRecording()) {\n setChatResponseEvents(res, span, this.excludeContentFromTrace);\n }\n\n if (debug) {\n // Log individual streaming results\n for (const result of res.results) {\n logResponseStreamingResult(\n result,\n result.index,\n options?.logger ?? this.logger\n );\n }\n }\n return res;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n const doneCb = async (_values: readonly AxChatResponse[]) => {\n if (span?.isRecording()) {\n span.end();\n }\n };\n\n const st = (rv as ReadableStream<TChatResponseDelta>).pipeThrough(\n new RespTransformStream<TChatResponseDelta, AxChatResponse>(\n wrappedRespFn({}),\n doneCb\n )\n );\n return st;\n }\n\n if (!this.aiImpl.createChatResp) {\n throw new Error('generateChatResp not implemented');\n }\n\n const res = this.aiImpl.createChatResp(rv as TChatResponse);\n res.sessionId = options?.sessionId;\n\n // Only call getTokenUsage if modelUsage is not already provided by the service\n if (!res.modelUsage) {\n const tokenUsage = this.aiImpl.getTokenUsage();\n if (tokenUsage) {\n res.modelUsage = {\n ai: this.name,\n model: model as string,\n tokens: tokenUsage,\n };\n }\n }\n\n if (res.modelUsage) {\n this.modelUsage = res.modelUsage;\n this.recordTokenUsage(res.modelUsage);\n }\n\n if (span?.isRecording()) {\n setChatResponseEvents(res, span, this.excludeContentFromTrace);\n span.end();\n }\n\n if (debug) {\n logResponse(res, options?.logger ?? this.logger);\n }\n\n return res;\n }\n\n async embed(\n req: Readonly<AxEmbedRequest<TEmbedModel>>,\n options?: Readonly<AxAIServiceActionOptions<TModel, TEmbedModel>>\n ): Promise<AxEmbedResponse> {\n const startTime = performance.now();\n let isError = false;\n let result: AxEmbedResponse;\n\n try {\n result = await this._embed1(req, options);\n return result;\n } catch (error) {\n isError = true;\n // Check for specific error types\n if (error instanceof Error) {\n if (\n error.message.includes('timeout') ||\n error.name === 'TimeoutError'\n ) {\n this.recordTimeoutMetric('embed');\n } else if (\n error.message.includes('abort') ||\n error.name === 'AbortError'\n ) {\n this.recordAbortMetric('embed');\n }\n }\n throw error;\n } finally {\n const duration = performance.now() - startTime;\n this.updateLatencyMetrics('embed', duration);\n this.updateErrorMetrics('embed', isError);\n\n // Record additional metrics if successful\n if (!isError) {\n this.recordEmbedMetrics(req, result!);\n }\n }\n }\n\n private async _embed1(\n req: Readonly<AxEmbedRequest<TEmbedModel>>,\n options?: Readonly<AxAIServiceActionOptions<TModel, TEmbedModel>>\n ): Promise<AxEmbedResponse> {\n const embedModel =\n this.getEmbedModel(req.embedModel) ??\n req.embedModel ??\n this.defaults.embedModel;\n\n if (!embedModel) {\n throw new Error('No embed model defined');\n }\n\n if (this.tracer) {\n await this.tracer?.startActiveSpan(\n 'AI Embed Request',\n {\n kind: SpanKind.SERVER,\n attributes: {\n [axSpanAttributes.LLM_SYSTEM]: this.name,\n [axSpanAttributes.LLM_OPERATION_NAME]: 'embeddings',\n [axSpanAttributes.LLM_REQUEST_MODEL]: embedModel as string,\n },\n },\n options?.traceContext ?? context.active(),\n async (span) => {\n try {\n return await this._embed2(embedModel, req, options, span);\n } finally {\n span.end();\n }\n }\n );\n }\n return this._embed2(embedModel, req, options);\n }\n\n private async _embed2(\n embedModel: TEmbedModel,\n embedReq: Readonly<AxEmbedRequest<TEmbedModel>>,\n options?: Readonly<AxAIServiceActionOptions<TModel, TEmbedModel>>,\n span?: Span\n ): Promise<AxEmbedResponse> {\n if (!this.aiImpl.createEmbedReq) {\n throw new Error('generateEmbedReq not implemented');\n }\n if (!this.aiImpl.createEmbedResp) {\n throw new Error('generateEmbedResp not implemented');\n }\n\n const debug = options?.debug ?? this.debug;\n\n const req = {\n ...embedReq,\n embedModel,\n };\n\n // Store the last used embed model\n this.lastUsedEmbedModel = embedModel;\n\n if (debug) {\n logEmbedRequest(\n req.texts ?? [],\n embedModel as string,\n options?.logger ?? this.logger\n );\n }\n\n const fn = async () => {\n const [apiConfig, reqValue] = await this.aiImpl.createEmbedReq!(req);\n\n const res = await apiCall(\n {\n name: apiConfig.name,\n url: this.apiURL,\n headers: await this.buildHeaders(apiConfig.headers),\n debug,\n fetch: this.fetch,\n timeout: this.timeout,\n span,\n abortSignal: options?.abortSignal ?? this.abortSignal,\n },\n reqValue\n );\n return res;\n };\n\n const resValue = this.rt\n ? await this.rt(fn, { modelUsage: this.embedModelUsage })\n : await fn();\n const res = this.aiImpl.createEmbedResp!(resValue as TEmbedResponse);\n\n res.sessionId = options?.sessionId;\n\n // Only call getTokenUsage if modelUsage is not already provided by the service\n if (!res.modelUsage) {\n const tokenUsage = this.aiImpl.getTokenUsage();\n if (tokenUsage) {\n res.modelUsage = {\n ai: this.name,\n model: embedModel as string,\n tokens: tokenUsage,\n };\n }\n }\n this.embedModelUsage = res.modelUsage;\n this.recordTokenUsage(res.modelUsage);\n\n if (span?.isRecording() && res.modelUsage?.tokens) {\n span.addEvent(axSpanEvents.GEN_AI_USAGE, {\n [axSpanAttributes.LLM_USAGE_INPUT_TOKENS]:\n res.modelUsage.tokens.promptTokens,\n [axSpanAttributes.LLM_USAGE_OUTPUT_TOKENS]:\n res.modelUsage.tokens.completionTokens ?? 0,\n [axSpanAttributes.LLM_USAGE_TOTAL_TOKENS]:\n res.modelUsage.tokens.totalTokens,\n });\n }\n\n if (debug) {\n logEmbedResponse(res.embeddings, options?.logger ?? this.logger);\n }\n\n span?.end();\n return res;\n }\n\n private async buildHeaders(\n headers: Record<string, string> = {}\n ): Promise<Record<string, string>> {\n return { ...headers, ...(await this.headers()) };\n }\n\n private getModelByKey(\n modelName?: TModel | TEmbedModel\n ): AxAIInputModelList<TModel, TEmbedModel>[number] | undefined {\n if (!modelName) {\n return undefined;\n }\n const item = this.models?.find((v) => v.key === modelName);\n return item;\n }\n\n private getModel(modelName?: TModel): TModel | undefined {\n const item = this.getModelByKey(modelName);\n return item && 'model' in item ? item.model : undefined;\n }\n\n private getEmbedModel(modelName?: TEmbedModel): TEmbedModel | undefined {\n const item = this.getModelByKey(modelName);\n return item && 'embedModel' in item ? item.embedModel : undefined;\n }\n}\n\nexport function setChatRequestEvents(\n req: Readonly<AxChatRequest<unknown>>,\n span: Span,\n excludeContentFromTrace?: boolean\n): void {\n const userMessages: string[] = [];\n\n if (\n req.chatPrompt &&\n Array.isArray(req.chatPrompt) &&\n req.chatPrompt.length > 0\n ) {\n for (const prompt of req.chatPrompt) {\n switch (prompt.role) {\n case 'system':\n if (prompt.content) {\n const eventData: { content?: string } = {};\n if (!excludeContentFromTrace) {\n eventData.content = prompt.content;\n }\n span.addEvent(axSpanEvents.GEN_AI_SYSTEM_MESSAGE, eventData);\n }\n break;\n case 'user':\n if (typeof prompt.content === 'string') {\n userMessages.push(prompt.content);\n } else if (Array.isArray(prompt.content)) {\n for (const part of prompt.content) {\n if (part.type === 'text') {\n userMessages.push(part.text);\n }\n }\n }\n break;\n case 'assistant': {\n const functionCalls = prompt.functionCalls?.map((call) => {\n return {\n id: call.id,\n type: call.type,\n function: call.function.name,\n arguments: call.function.params,\n };\n });\n\n if (functionCalls && functionCalls.length > 0) {\n const eventData: { content?: string; function_calls: string } = {\n function_calls: JSON.stringify(functionCalls, null, 2),\n };\n if (!excludeContentFromTrace && prompt.content) {\n eventData.content = prompt.content;\n }\n span.addEvent(axSpanEvents.GEN_AI_ASSISTANT_MESSAGE, eventData);\n } else if (prompt.content) {\n const eventData: { content?: string } = {};\n if (!excludeContentFromTrace) {\n eventData.content = prompt.content;\n }\n span.addEvent(axSpanEvents.GEN_AI_ASSISTANT_MESSAGE, eventData);\n }\n break;\n }\n\n case 'function': {\n const eventData: { content?: string; id: string } = {\n id: prompt.functionId,\n };\n if (!excludeContentFromTrace) {\n eventData.content = prompt.result;\n }\n span.addEvent(axSpanEvents.GEN_AI_TOOL_MESSAGE, eventData);\n break;\n }\n }\n }\n }\n\n // Always add user message event, even if empty\n const userEventData: { content?: string } = {};\n if (!excludeContentFromTrace) {\n userEventData.content = userMessages.join('\\n');\n }\n span.addEvent(axSpanEvents.GEN_AI_USER_MESSAGE, userEventData);\n}\n\nexport function setChatResponseEvents(\n res: Readonly<AxChatResponse>,\n span: Span,\n excludeContentFromTrace?: boolean\n) {\n if (res.modelUsage?.tokens) {\n const thoughTokens = res.modelUsage.tokens.thoughtsTokens\n ? {\n [axSpanAttributes.LLM_USAGE_THOUGHTS_TOKENS]:\n res.modelUsage.tokens.thoughtsTokens,\n }\n : {};\n span.addEvent(axSpanEvents.GEN_AI_USAGE, {\n [axSpanAttributes.LLM_USAGE_INPUT_TOKENS]:\n res.modelUsage.tokens.promptTokens,\n [axSpanAttributes.LLM_USAGE_OUTPUT_TOKENS]:\n res.modelUsage.tokens.completionTokens ?? 0,\n [axSpanAttributes.LLM_USAGE_TOTAL_TOKENS]:\n res.modelUsage.tokens.totalTokens,\n ...thoughTokens,\n });\n }\n\n if (!res.results) {\n return;\n }\n\n for (let index = 0; index < res.results.length; index++) {\n const result = res.results[index];\n if (!result) {\n continue;\n }\n\n // Skip empty results that have no meaningful content to avoid empty GEN_AI_CHOICE events\n if (\n !result.content &&\n !result.thought &&\n !result.functionCalls?.length &&\n !result.finishReason\n ) {\n continue;\n }\n\n const toolCalls = result.functionCalls?.map((call) => {\n return {\n id: call.id,\n type: call.type,\n function: call.function.name,\n arguments: call.function.params,\n };\n });\n\n const message: { content?: string; tool_calls?: unknown[] } = {};\n\n if (toolCalls && toolCalls.length > 0) {\n if (!excludeContentFromTrace) {\n message.content = result.content;\n }\n message.tool_calls = toolCalls;\n } else {\n if (!excludeContentFromTrace) {\n message.content = result.content ?? '';\n }\n }\n\n span.addEvent(axSpanEvents.GEN_AI_CHOICE, {\n finish_reason: result.finishReason,\n index,\n message: JSON.stringify(message, null, 2),\n });\n }\n}\n\nexport function validateAxMessageArray<T>(values: T[]): void {\n // Validate AxMessage array items\n for (let i = 0; i < values.length; i++) {\n const message = values[i];\n if (!message || typeof message !== 'object') {\n throw new Error(\n `AxMessage array validation failed: Item at index ${i} is not a valid message object`\n );\n }\n if (\n 'content' in message &&\n typeof message.content === 'string' &&\n message.content.trim() === ''\n ) {\n throw new Error(\n `AxMessage array validation failed: Item at index ${i} has empty content`\n );\n }\n }\n}\n\nfunction validateModels<TModel, TEmbedModel>(\n models: Readonly<AxAIInputModelList<TModel, TEmbedModel>>\n): void {\n // Validate duplicate keys in models.\n const keys = new Set<string>();\n for (const model of models) {\n if (keys.has(model.key)) {\n throw new Error(\n `Duplicate model key detected: \"${model.key}\". Each model key must be unique.`\n );\n }\n keys.add(model.key);\n }\n}\n","import type { AxAPI } from '../../util/apicall.js';\nimport {\n AxBaseAI,\n axBaseAIDefaultConfig,\n axBaseAIDefaultCreativeConfig,\n} from '../base.js';\nimport type {\n AxAIInputModelList,\n AxAIPromptConfig,\n AxAIServiceImpl,\n AxAIServiceOptions,\n AxChatResponse,\n AxInternalChatRequest,\n AxModelConfig,\n AxTokenUsage,\n} from '../types.js';\n\nimport { axModelInfoHuggingFace } from './info.js';\nimport {\n type AxAIHuggingFaceConfig,\n AxAIHuggingFaceModel,\n type AxAIHuggingFaceRequest,\n type AxAIHuggingFaceResponse,\n} from './types.js';\n\nexport const axAIHuggingFaceDefaultConfig = (): AxAIHuggingFaceConfig =>\n structuredClone({\n model: AxAIHuggingFaceModel.MetaLlama270BChatHF,\n ...axBaseAIDefaultConfig(),\n });\n\nexport const axAIHuggingFaceCreativeConfig = (): AxAIHuggingFaceConfig =>\n structuredClone({\n model: AxAIHuggingFaceModel.MetaLlama270BChatHF,\n ...axBaseAIDefaultCreativeConfig(),\n });\n\nexport interface AxAIHuggingFaceArgs {\n name: 'huggingface';\n apiKey: string;\n config?: Readonly<Partial<AxAIHuggingFaceConfig>>;\n options?: Readonly<AxAIServiceOptions>;\n models?: AxAIInputModelList<AxAIHuggingFaceModel, undefined>;\n}\n\nclass AxAIHuggingFaceImpl\n implements\n AxAIServiceImpl<\n AxAIHuggingFaceModel,\n unknown,\n AxAIHuggingFaceRequest,\n unknown,\n AxAIHuggingFaceResponse,\n unknown,\n unknown\n >\n{\n private tokensUsed: AxTokenUsage | undefined;\n\n constructor(private config: AxAIHuggingFaceConfig) {}\n\n getTokenUsage(): AxTokenUsage | undefined {\n return this.tokensUsed;\n }\n\n getModelConfig(): AxModelConfig {\n const { config } = this;\n return {\n maxTokens: config.maxTokens,\n temperature: config.temperature,\n topP: config.topP,\n topK: config.topK,\n n: config.n,\n presencePenalty: config.presencePenalty,\n } as AxModelConfig;\n }\n\n createChatReq = (\n req: Readonly<AxInternalChatRequest<AxAIHuggingFaceModel>>,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _config: Readonly<AxAIPromptConfig>\n ): [AxAPI, AxAIHuggingFaceRequest] => {\n const model = req.model;\n\n const functionsList = req.functions\n ? `Functions:\\n${JSON.stringify(req.functions, null, 2)}\\n`\n : '';\n\n const prompt = req.chatPrompt\n ?.map((msg) => {\n switch (msg.role) {\n case 'user':\n return `User: ${msg.content}`;\n case 'system':\n return `System: ${msg.content}`;\n case 'function':\n return `Function Result: ${msg.result}`;\n case 'assistant': {\n const fc = msg.functionCalls\n ?.map((fc) => {\n const args =\n typeof fc.function.params === 'string'\n ? fc.function.params\n : JSON.stringify(fc.function.params);\n\n return `${fc.function.name}(${args})`;\n })\n .join('\\n');\n if (fc) {\n return `Assistant: ${msg.content}\\n Functions:\\n${fc}`;\n }\n return `Assistant: ${msg.content}`;\n }\n default:\n throw new Error('Unknown role');\n }\n\n //return `${msg.role}: ${msg.content}`;\n })\n .join('\\n');\n\n const inputs = `${functionsList} ${prompt}`.trim();\n\n const apiConfig = {\n name: '/models',\n };\n\n const reqValue: AxAIHuggingFaceRequest = {\n model,\n inputs,\n parameters: {\n max_new_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,\n repetition_penalty:\n req.modelConfig?.presencePenalty ?? this.config.presencePenalty,\n temperature: req.modelConfig?.temperature ?? this.config.temperature,\n top_p: req.modelConfig?.topP ?? this.config.topP,\n top_k: req.modelConfig?.topK ?? this.config.topK,\n return_full_text: this.config.returnFullText,\n num_return_sequences: this.config.n,\n do_sample: this.config.doSample,\n max_time: this.config.maxTime,\n },\n options: {\n use_cache: this.config.useCache,\n wait_for_model: this.config.waitForModel,\n },\n };\n\n return [apiConfig, reqValue];\n };\n\n createChatResp = (\n resp: Readonly<AxAIHuggingFaceResponse>\n ): AxChatResponse => {\n return {\n results: [\n {\n index: 0,\n content: resp.generated_text,\n },\n ],\n };\n };\n}\n\nexport class AxAIHuggingFace extends AxBaseAI<\n AxAIHuggingFaceModel,\n unknown,\n AxAIHuggingFaceRequest,\n unknown,\n AxAIHuggingFaceResponse,\n unknown,\n unknown\n> {\n constructor({\n apiKey,\n config,\n options,\n models,\n }: Readonly<Omit<AxAIHuggingFaceArgs, 'name'>>) {\n if (!apiKey || apiKey === '') {\n throw new Error('HuggingFace API key not set');\n }\n const Config = {\n ...axAIHuggingFaceDefaultConfig(),\n ...config,\n };\n\n const aiImpl = new AxAIHuggingFaceImpl(Config);\n\n super(aiImpl, {\n name: 'HuggingFace',\n apiURL: 'https://api-inference.huggingface.co',\n headers: async () => ({ Authorization: `Bearer ${apiKey}` }),\n modelInfo: axModelInfoHuggingFace,\n defaults: { model: Config.model },\n options,\n supportFor: { functions: false, streaming: false },\n models,\n });\n }\n}\n","// cspell:ignore mistral, mixtral, codestral, nemo\n\nexport enum AxAIMistralModel {\n Mistral7B = 'open-mistral-7b',\n Mistral8x7B = 'open-mixtral-8x7b',\n MistralSmall = 'mistral-small-latest',\n MistralNemo = 'mistral-nemo-latest',\n MistralLarge = 'mistral-large-latest',\n Codestral = 'codestral-latest',\n OpenCodestralMamba = 'open-codestral-mamba',\n OpenMistralNemo = 'open-mistral-nemo-latest',\n}\n\nexport enum AxAIMistralEmbedModels {\n MistralEmbed = 'mistral-embed',\n}\n","import type {\n AxAIOpenAIEmbedRequest,\n AxAIOpenAIEmbedResponse,\n AxAPI,\n} from '@ax-llm/ax/index.js';\nimport { AxAIRefusalError } from '../../util/apicall.js';\nimport type {\n AxAIPromptConfig,\n AxAIServiceImpl,\n AxChatRequest,\n AxChatResponse,\n AxChatResponseResult,\n AxInternalChatRequest,\n AxInternalEmbedRequest,\n AxModelConfig,\n AxTokenUsage,\n} from '../types.js';\nimport type {\n AxAIOpenAIResponsesCodeInterpreterToolCall,\n AxAIOpenAIResponsesComputerToolCall,\n AxAIOpenAIResponsesConfig,\n AxAIOpenAIResponsesDefineFunctionTool,\n AxAIOpenAIResponsesFileSearchToolCall,\n AxAIOpenAIResponsesImageGenerationToolCall,\n AxAIOpenAIResponsesInputContentPart,\n AxAIOpenAIResponsesInputItem,\n AxAIOpenAIResponsesInputMessageItem,\n AxAIOpenAIResponsesLocalShellToolCall,\n AxAIOpenAIResponsesMCPToolCall,\n AxAIOpenAIResponsesOutputRefusalContentPart,\n AxAIOpenAIResponsesOutputTextContentPart,\n AxAIOpenAIResponsesRequest,\n AxAIOpenAIResponsesResponse,\n AxAIOpenAIResponsesResponseDelta,\n AxAIOpenAIResponsesStreamEvent,\n AxAIOpenAIResponsesToolDefinition,\n AxAIOpenAIResponsesWebSearchToolCall,\n Mutable,\n RequestFunctionDefinition,\n ResponsesReqUpdater,\n UserMessageContentItem,\n} from './responses_types.js';\nimport { AxAIOpenAIResponsesModel } from './responses_types.js';\n\n/**\n * Checks if the given OpenAI Responses model is a thinking/reasoning model.\n * Thinking models (o1, o3, o4 series) have different parameter restrictions.\n */\nexport const isOpenAIResponsesThinkingModel = (model: string): boolean => {\n const thinkingModels = [\n AxAIOpenAIResponsesModel.O1,\n AxAIOpenAIResponsesModel.O1Mini,\n AxAIOpenAIResponsesModel.O1Pro,\n AxAIOpenAIResponsesModel.O3,\n AxAIOpenAIResponsesModel.O3Mini,\n AxAIOpenAIResponsesModel.O3Pro,\n AxAIOpenAIResponsesModel.O4Mini,\n ];\n return thinkingModels.includes(model as AxAIOpenAIResponsesModel);\n};\n\nexport class AxAIOpenAIResponsesImpl<\n TModel,\n TEmbedModel, // Kept for interface compatibility, but not used by this impl.\n TResponsesReq extends AxAIOpenAIResponsesRequest<TModel>,\n> implements\n AxAIServiceImpl<\n TModel,\n TEmbedModel,\n Readonly<AxAIOpenAIResponsesRequest<TModel>>, // ChatReq (now ResponsesReq)\n Readonly<AxAIOpenAIEmbedRequest<TEmbedModel>>, // EmbedReq\n Readonly<AxAIOpenAIResponsesResponse>, // ChatResp (now ResponsesResp)\n Readonly<AxAIOpenAIResponsesResponseDelta>, // ChatRespDelta (now ResponsesRespDelta)\n Readonly<AxAIOpenAIEmbedResponse> // EmbedResp\n >\n{\n private tokensUsed: AxTokenUsage | undefined;\n\n constructor(\n private readonly config: Readonly<\n AxAIOpenAIResponsesConfig<TModel, TEmbedModel>\n >,\n private readonly streamingUsage: boolean, // If /v1/responses supports include_usage for streams\n private readonly responsesReqUpdater?: ResponsesReqUpdater<\n TModel,\n TResponsesReq\n >\n ) {}\n\n getTokenUsage(): Readonly<AxTokenUsage> | undefined {\n return this.tokensUsed;\n }\n\n getModelConfig(): Readonly<AxModelConfig> {\n const { config } = this;\n return {\n maxTokens: config.maxTokens, // maps to max_output_tokens\n temperature: config.temperature,\n // presencePenalty, frequencyPenalty are not direct params in /v1/responses\n stopSequences: config.stopSequences, // /v1/responses uses 'truncation' or relies on item structure\n topP: config.topP,\n // n: config.n, // Not a direct parameter in /v1/responses\n stream: config.stream,\n };\n }\n\n private mapInternalContentToResponsesInput(\n content: ReadonlyArray<UserMessageContentItem> // Expects an array of content items, string case handled by caller\n ): ReadonlyArray<AxAIOpenAIResponsesInputContentPart> {\n const mappedParts: Mutable<AxAIOpenAIResponsesInputContentPart>[] =\n content.map((part: UserMessageContentItem) => {\n // AxUserMessageContentItem ensures part is one of {type: text}, {type: image}, {type: audio}\n if (part.type === 'text') {\n return { type: 'text', text: part.text };\n }\n if (part.type === 'image') {\n const url = `data:${part.mimeType};base64,${part.image}`;\n return {\n type: 'image_url',\n image_url: { url, details: part.details ?? 'auto' },\n };\n }\n if (part.type === 'audio') {\n return {\n type: 'input_audio',\n input_audio: { data: part.data, format: part.format ?? 'wav' },\n };\n }\n // This should be exhaustive given AxUserMessageContentItem's definition\n const ExhaustiveCheck: never = part;\n throw new Error(\n `Unsupported content part: ${JSON.stringify(ExhaustiveCheck)}`\n );\n });\n return mappedParts as ReadonlyArray<AxAIOpenAIResponsesInputContentPart>;\n }\n\n private createResponsesReqInternalInput(\n chatPrompt: ReadonlyArray<AxChatRequest<TModel>['chatPrompt'][number]>,\n excludeSystemMessages = false // New parameter\n ): ReadonlyArray<AxAIOpenAIResponsesInputItem> {\n // Map from AxChatPromptItemType roles to AxAIOpenAI /v1/responses API roles:\n // - 'system' -> 'system' (may be skipped if excludeSystemMessages is true)\n // - 'user' -> 'user'\n // - 'assistant' -> 'assistant'\n // - 'function' -> Special handling for function call outputs (different structure)\n //\n // Note: AxAIOpenAI's /v1/responses API also supports a 'developer' role that isn't\n // currently mapped from our AxChatPromptItemType structure.\n\n const items: Mutable<AxAIOpenAIResponsesInputItem>[] = [];\n for (const msg of chatPrompt) {\n if (excludeSystemMessages && msg.role === 'system') {\n continue; // Skip system messages if they are handled by top-level 'instructions'\n }\n\n let mappedContent:\n | string\n | ReadonlyArray<AxAIOpenAIResponsesInputContentPart>;\n // Type guard for content based on role\n if (\n msg.role === 'system' ||\n msg.role === 'user' ||\n (msg.role === 'assistant' && msg.content)\n ) {\n if (typeof msg.content === 'string') {\n mappedContent = msg.content;\n } else if (Array.isArray(msg.content)) {\n // Only for user role typically\n mappedContent = this.mapInternalContentToResponsesInput(\n msg.content as ReadonlyArray<UserMessageContentItem>\n );\n } else {\n // Handle cases where content might be undefined for assistant, or unexpected type\n if (msg.role === 'assistant' && !msg.content && msg.functionCalls) {\n // This is fine, assistant message can be just functionCalls\n } else {\n throw new Error(`Invalid content type for role ${msg.role}`);\n }\n mappedContent = ''; // Default or skip\n }\n } else if (msg.role === 'function') {\n // Function role does not have 'content' in the same way, it has 'result'\n mappedContent = ''; // Placeholder, not directly used for content field in function_call_output\n } else {\n mappedContent = ''; // Default for roles that might not have content or are handled differently\n }\n\n switch (msg.role) {\n case 'system': // Will be skipped if excludeSystemMessages is true\n items.push({\n type: 'message',\n role: 'system',\n content: mappedContent as string,\n });\n break;\n case 'user':\n items.push({\n type: 'message',\n role: 'user',\n content: mappedContent,\n name: msg.name,\n });\n break;\n case 'assistant':\n if (msg.content || msg.functionCalls) {\n // Assistant can have content, functionCalls, or both\n const assistantMessage: Mutable<AxAIOpenAIResponsesInputMessageItem> =\n {\n type: 'message',\n role: 'assistant',\n content: '',\n }; // Start with empty content\n if (msg.content) {\n assistantMessage.content = mappedContent;\n }\n if (msg.name) {\n assistantMessage.name = msg.name;\n }\n // If only function calls, content might remain empty or not be applicable in the same way for AxAIOpenAI item\n // AxAIOpenAI /v1/responses expects assistant messages with tool calls to be structured carefully.\n // For now, pushing the textual content if present. Tool calls are separate items.\n if (msg.content)\n items.push(\n assistantMessage as AxAIOpenAIResponsesInputMessageItem\n );\n\n if (msg.functionCalls) {\n for (const call of msg.functionCalls) {\n items.push({\n type: 'function_call',\n call_id: call.id,\n name: call.function.name,\n arguments:\n typeof call.function.params === 'object'\n ? JSON.stringify(call.function.params)\n : call.function.params || '',\n });\n }\n }\n }\n break;\n case 'function': // This is a tool result\n items.push({\n type: 'function_call_output',\n call_id: msg.functionId!,\n output: msg.result!,\n });\n break;\n default: {\n // Fix for any type\n const invalidRole = (msg as { role: string }).role;\n throw new Error(`Invalid role in chat prompt: ${invalidRole}`);\n }\n }\n }\n return items as ReadonlyArray<AxAIOpenAIResponsesInputItem>;\n }\n\n createChatReq(\n req: Readonly<AxInternalChatRequest<TModel>>,\n config: Readonly<AxAIPromptConfig>\n ): [Readonly<AxAPI>, Readonly<AxAIOpenAIResponsesRequest<TModel>>] {\n const model = req.model;\n const apiConfig: Readonly<AxAPI> = { name: '/responses' };\n\n let instructionsFromPrompt: string | null = null;\n let systemMessageFoundAndUsed = false;\n if (req.chatPrompt) {\n for (const item of req.chatPrompt) {\n if (item.role === 'system' && typeof item.content === 'string') {\n instructionsFromPrompt = item.content;\n systemMessageFoundAndUsed = true;\n break;\n }\n }\n }\n\n const finalInstructions =\n instructionsFromPrompt ?? this.config.systemPrompt ?? null;\n\n const tools: ReadonlyArray<AxAIOpenAIResponsesToolDefinition> | undefined =\n req.functions?.map(\n (\n v: Readonly<RequestFunctionDefinition>\n ): AxAIOpenAIResponsesDefineFunctionTool => ({\n type: 'function' as const,\n name: v.name,\n description: v.description,\n parameters: v.parameters ?? {},\n })\n );\n\n // Set include field based on showThoughts option, but override if thinkingTokenBudget is 'none'\n const includeFields: // | 'file_search_call.results'\n 'message.input_image.image_url'[] =\n // | 'computer_call_output.output.image_url'\n // | 'reasoning.encrypted_content'\n // | 'code_interpreter_call.outputs'\n [];\n\n const isThinkingModel = isOpenAIResponsesThinkingModel(model as string);\n\n let reasoningSummary = this.config.reasoningSummary;\n\n if (!config?.showThoughts) {\n reasoningSummary = undefined;\n } else if (!reasoningSummary) {\n reasoningSummary = 'auto';\n }\n\n let reasoningEffort = this.config.reasoningEffort;\n\n // Handle thinkingTokenBudget config parameter\n if (config?.thinkingTokenBudget) {\n switch (config.thinkingTokenBudget) {\n case 'none':\n reasoningEffort = undefined;\n break;\n case 'minimal':\n reasoningEffort = 'low';\n break;\n case 'low':\n reasoningEffort = 'medium';\n break;\n case 'medium':\n case 'high':\n case 'highest':\n reasoningEffort = 'high';\n break;\n }\n }\n\n const mutableReq: Mutable<AxAIOpenAIResponsesRequest<TModel>> = {\n model,\n input: '', // Will be set below\n instructions: finalInstructions,\n tools: tools?.length ? tools : undefined,\n tool_choice:\n req.functionCall === 'none' ||\n req.functionCall === 'auto' ||\n req.functionCall === 'required'\n ? req.functionCall\n : typeof req.functionCall === 'object' && req.functionCall.function\n ? { type: 'function', name: req.functionCall.function.name }\n : undefined,\n // For thinking models, don't set these parameters as they're not supported\n ...(isThinkingModel\n ? {\n max_output_tokens:\n req.modelConfig?.maxTokens ?? this.config.maxTokens ?? undefined,\n }\n : {\n temperature:\n req.modelConfig?.temperature ??\n this.config.temperature ??\n undefined,\n top_p: req.modelConfig?.topP ?? this.config.topP ?? undefined,\n presence_penalty:\n req.modelConfig?.presencePenalty ??\n this.config.presencePenalty ??\n undefined,\n frequency_penalty:\n req.modelConfig?.frequencyPenalty ??\n this.config.frequencyPenalty ??\n undefined,\n }),\n stream: req.modelConfig?.stream ?? this.config.stream ?? false, // Sourced from modelConfig or global config\n // Optional fields from AxAIOpenAIResponsesRequest that need to be in Mutable for initialization\n background: undefined,\n include: includeFields.length > 0 ? includeFields : undefined,\n metadata: undefined,\n parallel_tool_calls: this.config.parallelToolCalls,\n previous_response_id: undefined,\n ...(reasoningEffort\n ? {\n reasoning: {\n effort: reasoningEffort,\n summary: reasoningSummary,\n },\n }\n : {}),\n service_tier: this.config.serviceTier,\n store: this.config.store,\n text: undefined,\n truncation: undefined,\n user: this.config.user,\n seed: this.config.seed,\n };\n\n // Populate from this.config if properties exist on AxAIOpenAIConfig\n if (this.config.user) mutableReq.user = this.config.user;\n if (this.config.parallelToolCalls !== undefined)\n mutableReq.parallel_tool_calls = this.config.parallelToolCalls;\n if (this.config.responseFormat)\n mutableReq.text = {\n format: {\n type: this.config.responseFormat as\n | 'text'\n | 'json_object'\n | 'json_schema',\n },\n };\n if (this.config.seed) mutableReq.seed = this.config.seed;\n // TODO: Check AxAIOpenAIConfig for other fields like store, background, include, metadata, service_tier, truncation\n\n const inputItems = req.chatPrompt\n ? this.createResponsesReqInternalInput(\n req.chatPrompt,\n systemMessageFoundAndUsed\n )\n : [];\n\n if (inputItems.length > 0) {\n mutableReq.input = inputItems;\n } else if (\n req.chatPrompt &&\n req.chatPrompt.length === 1 &&\n req.chatPrompt[0]?.role === 'user' &&\n req.chatPrompt[0]?.content &&\n typeof req.chatPrompt[0].content === 'string' &&\n !finalInstructions\n ) {\n // Fallback to simple string input if only one user message and no instructions\n mutableReq.input = req.chatPrompt[0].content;\n } else if (inputItems.length === 0 && !finalInstructions) {\n throw new Error('Responses API request must have input or instructions.');\n }\n\n let currentReasoning = mutableReq.reasoning ?? {};\n if (this.config.reasoningEffort) {\n currentReasoning = {\n ...currentReasoning,\n effort: this.config.reasoningEffort,\n };\n }\n\n // Handle thinkingTokenBudget config parameter\n if (config?.thinkingTokenBudget) {\n switch (config.thinkingTokenBudget) {\n case 'none':\n // When thinkingTokenBudget is 'none', remove reasoning entirely\n currentReasoning = {};\n break;\n case 'minimal':\n currentReasoning = {\n ...currentReasoning,\n effort: 'low',\n };\n break;\n case 'low':\n currentReasoning = {\n ...currentReasoning,\n effort: 'medium',\n };\n break;\n case 'medium':\n case 'high':\n case 'highest':\n currentReasoning = {\n ...currentReasoning,\n effort: 'high',\n };\n break;\n }\n }\n\n if (Object.keys(currentReasoning).length > 0 && currentReasoning.effort) {\n mutableReq.reasoning = currentReasoning;\n } else {\n mutableReq.reasoning = undefined; // Ensure reasoning is not sent if empty or only has non-effort keys by mistake\n }\n\n let finalReqToProcess: Readonly<AxAIOpenAIResponsesRequest<TModel>> =\n mutableReq as Readonly<AxAIOpenAIResponsesRequest<TModel>>;\n\n if (this.responsesReqUpdater) {\n finalReqToProcess = this.responsesReqUpdater(\n finalReqToProcess as Readonly<TResponsesReq>\n );\n }\n\n return [apiConfig, finalReqToProcess];\n }\n\n // Create Chat Response from /v1/responses (non-streaming)\n createChatResp(\n resp: Readonly<AxAIOpenAIResponsesResponse>\n ): Readonly<AxChatResponse> {\n const { id, output, usage } = resp;\n\n if (usage) {\n this.tokensUsed = {\n promptTokens: usage.prompt_tokens,\n completionTokens: usage.completion_tokens,\n totalTokens: usage.total_tokens,\n };\n }\n\n const currentResult: Partial<AxChatResponseResult> = {};\n\n for (const item of output ?? []) {\n switch (item.type) {\n case 'message':\n currentResult.id = item.id;\n currentResult.content = contentToText(item.content, id);\n currentResult.finishReason =\n item.status === 'completed' ? 'stop' : 'content_filter';\n break;\n\n case 'reasoning':\n currentResult.id = item.id;\n // Use encrypted_content if available (when showThoughts is enabled), otherwise use summary\n if (item.encrypted_content) {\n currentResult.thought = item.encrypted_content;\n } else {\n currentResult.thought = item.summary\n .map((s: string | object) =>\n typeof s === 'object' ? JSON.stringify(s) : s\n )\n .join('\\n');\n }\n break;\n\n case 'file_search_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'file_search',\n params: {\n queries: item.queries,\n results: item.results,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'web_search_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'web_search',\n params: {\n queries: item.queries,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'computer_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'computer_use',\n params: {\n action: item.action,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'code_interpreter_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'code_interpreter',\n params: {\n code: item.code,\n results: item.results,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'image_generation_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'image_generation',\n params: {\n result: item.result,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'local_shell_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'local_shell',\n params: {\n action: item.action,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'mcp_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: 'mcp',\n params: {\n name: item.name,\n args: item.args,\n serverLabel: item.server_label,\n output: item.output,\n error: item.error,\n },\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n case 'function_call':\n currentResult.id = item.id;\n currentResult.functionCalls = [\n {\n id: item.id,\n type: 'function' as const,\n function: {\n name: item.name,\n params: item.arguments,\n },\n },\n ];\n currentResult.finishReason = 'function_call';\n break;\n }\n }\n\n return {\n results: [{ ...currentResult, index: 0 }],\n remoteId: id,\n };\n }\n\n // Create Chat Stream Response from /v1/responses stream events\n createChatStreamResp(\n streamEvent: Readonly<AxAIOpenAIResponsesResponseDelta>\n ): Readonly<AxChatResponse> {\n // Handle new streaming event format\n const event = streamEvent as AxAIOpenAIResponsesStreamEvent;\n\n // Create a basic result structure\n const baseResult: AxChatResponseResult = {\n index: 0,\n id: '',\n content: '',\n finishReason: 'stop',\n };\n\n let remoteId: string | undefined;\n\n switch (event.type) {\n case 'response.created':\n case 'response.in_progress':\n case 'response.queued':\n // Response lifecycle events - return empty content with metadata\n remoteId = event.response.id;\n baseResult.id = `${event.response.id}_res_0`;\n break;\n\n case 'response.output_item.added':\n // New output item added\n switch (event.item.type) {\n case 'message':\n baseResult.id = event.item.id;\n baseResult.content = contentToText(\n event.item.content,\n event.item.id\n );\n break;\n case 'function_call':\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: event.item.id,\n type: 'function' as const,\n function: {\n name: event.item.name,\n params: event.item.arguments,\n },\n },\n ];\n break;\n case 'file_search_call':\n {\n const fileSearchItem =\n event.item as AxAIOpenAIResponsesFileSearchToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: fileSearchItem.id,\n type: 'function' as const,\n function: {\n name: 'file_search',\n params: {\n queries: fileSearchItem.queries || [],\n results: fileSearchItem.results?.map((r) => ({\n fileId: r.file_id,\n filename: r.filename,\n score: r.score,\n text: r.text,\n attributes: r.attributes,\n })),\n },\n },\n },\n ];\n }\n break;\n case 'web_search_call':\n {\n const webSearchItem =\n event.item as AxAIOpenAIResponsesWebSearchToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: webSearchItem.id,\n type: 'function' as const,\n function: {\n name: 'web_search',\n params: {\n queries: webSearchItem.queries || [],\n },\n },\n },\n ];\n }\n break;\n case 'computer_call':\n {\n const computerItem =\n event.item as AxAIOpenAIResponsesComputerToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: computerItem.id,\n type: 'function' as const,\n function: {\n name: 'computer_use',\n params: {\n action: computerItem.action || {},\n },\n },\n },\n ];\n }\n break;\n case 'code_interpreter_call':\n {\n const codeItem =\n event.item as AxAIOpenAIResponsesCodeInterpreterToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: codeItem.id,\n type: 'function' as const,\n function: {\n name: 'code_interpreter',\n params: {\n code: codeItem.code || '',\n results: codeItem.results,\n },\n },\n },\n ];\n }\n break;\n case 'image_generation_call':\n {\n const imageItem =\n event.item as AxAIOpenAIResponsesImageGenerationToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: imageItem.id,\n type: 'function' as const,\n function: {\n name: 'image_generation',\n params: {\n result: imageItem.result,\n },\n },\n },\n ];\n }\n break;\n case 'local_shell_call':\n {\n const shellItem =\n event.item as AxAIOpenAIResponsesLocalShellToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: shellItem.id,\n type: 'function' as const,\n function: {\n name: 'local_shell',\n params: {\n action: shellItem.action || {},\n },\n },\n },\n ];\n }\n break;\n case 'mcp_call':\n {\n const mcpItem = event.item as AxAIOpenAIResponsesMCPToolCall;\n baseResult.id = event.item.id;\n baseResult.functionCalls = [\n {\n id: mcpItem.id,\n type: 'function' as const,\n function: {\n name: 'mcp',\n params: {\n name: mcpItem.name || '',\n args: mcpItem.args || '',\n serverLabel: mcpItem.server_label || '',\n output: mcpItem.output,\n error: mcpItem.error,\n },\n },\n },\n ];\n }\n break;\n // case 'reasoning':\n // {\n // const reasoningItem =\n // event.item as AxAIOpenAIResponsesReasoningItem\n // baseResult.id = event.item.id\n // // Use encrypted_content if available (when showThoughts is enabled), otherwise use summary\n // if (reasoningItem.encrypted_content) {\n // baseResult.thought = reasoningItem.encrypted_content\n // } else if (reasoningItem.summary) {\n // baseResult.thought = reasoningItem.summary\n // .map((s: string | object) =>\n // typeof s === 'object' ? JSON.stringify(s) : s\n // )\n // .join('\\n')\n // }\n // }\n // break\n }\n break;\n\n case 'response.content_part.added':\n // Content part added - return the initial text if any\n baseResult.id = event.item_id;\n baseResult.content = contentToText([event.part], event.item_id);\n break;\n\n case 'response.output_text.delta':\n // Text delta - return just the delta content\n baseResult.id = event.item_id;\n baseResult.content = event.delta;\n break;\n\n case 'response.output_text.done':\n break;\n\n case 'response.function_call_arguments.delta':\n // Function call arguments delta - return delta with empty name\n baseResult.id = event.item_id;\n baseResult.functionCalls = [\n {\n id: event.item_id,\n type: 'function' as const,\n function: {\n name: '',\n params: event.delta,\n },\n },\n ];\n break;\n\n // case 'response.function_call_arguments.done':\n // // Function call arguments done - don't return function calls here\n // // The mergeFunctionCalls will handle combining name and arguments\n // baseResult.id = event.item_id\n // baseResult.finishReason = 'function_call'\n // break\n\n case 'response.reasoning_summary_text.delta':\n // Reasoning summary delta\n baseResult.id = event.item_id;\n baseResult.thought = event.delta;\n break;\n\n // case 'response.reasoning_summary_text.done':\n // // Reasoning summary done\n // baseResult.id = event.item_id\n // baseResult.thought = event.text\n // break\n\n // File search tool events\n case 'response.file_search_call.in_progress':\n case 'response.file_search_call.searching':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.file_search_call.completed':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n // Web search tool events\n case 'response.web_search_call.in_progress':\n case 'response.web_search_call.searching':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.web_search_call.completed':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n // Image generation tool events\n case 'response.image_generation_call.in_progress':\n case 'response.image_generation_call.generating':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.image_generation_call.completed':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.image_generation_call.partial_image':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n // Could potentially add partial image data to content or a special field\n break;\n\n // MCP tool events\n case 'response.mcp_call.in_progress':\n baseResult.id = event.item_id;\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.mcp_call.arguments.delta':\n baseResult.id = event.item_id;\n baseResult.functionCalls = [\n {\n id: event.item_id,\n type: 'function' as const,\n function: {\n name: '',\n params: event.delta,\n },\n },\n ];\n break;\n\n case 'response.mcp_call.arguments.done':\n baseResult.id = event.item_id;\n baseResult.functionCalls = [\n {\n id: event.item_id,\n type: 'function' as const,\n function: {\n name: '',\n params: event.arguments,\n },\n },\n ];\n break;\n\n case 'response.mcp_call.completed':\n case 'response.mcp_call.failed':\n // These events don't have item_id, use a generic ID\n baseResult.id = 'mcp_call_event';\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.mcp_list_tools.in_progress':\n case 'response.mcp_list_tools.completed':\n case 'response.mcp_list_tools.failed':\n // MCP list tools events don't have item_id\n baseResult.id = 'mcp_list_tools_event';\n baseResult.finishReason = 'function_call';\n break;\n\n case 'response.output_item.done':\n // Item completion\n\n switch (event.item.type) {\n case 'message':\n baseResult.id = event.item.id;\n baseResult.finishReason =\n event.item.status === 'completed' ? 'stop' : 'error';\n break;\n case 'function_call':\n case 'file_search_call':\n case 'web_search_call':\n case 'computer_call':\n case 'code_interpreter_call':\n case 'image_generation_call':\n case 'local_shell_call':\n case 'mcp_call':\n // Tool calls completed - finishReason indicates function execution needed\n baseResult.id = event.item.id;\n baseResult.finishReason = 'function_call';\n break;\n // case 'reasoning':\n // // Reasoning completed\n // baseResult.id = event.item.id\n // break\n }\n break;\n\n case 'response.completed':\n // Response completion - handle usage\n if (event.response.usage) {\n this.tokensUsed = {\n promptTokens: event.response.usage.prompt_tokens,\n completionTokens: event.response.usage.completion_tokens,\n totalTokens: event.response.usage.total_tokens,\n };\n }\n remoteId = event.response.id;\n baseResult.id = `${event.response.id}_completed`;\n baseResult.finishReason = 'stop';\n break;\n\n case 'response.failed':\n // Response failure\n remoteId = event.response.id;\n baseResult.id = `${event.response.id}_failed`;\n baseResult.finishReason = 'error';\n break;\n\n case 'response.incomplete':\n // Response incomplete\n remoteId = event.response.id;\n baseResult.id = `${event.response.id}_incomplete`;\n baseResult.finishReason = 'length';\n break;\n\n case 'error':\n // Error event\n baseResult.id = 'error';\n baseResult.content = `Error: ${event.message}`;\n baseResult.finishReason = 'error';\n break;\n\n default:\n // For unhandled events, return empty result\n baseResult.id = 'unknown';\n break;\n }\n\n return {\n results: [baseResult],\n remoteId,\n } as Readonly<AxChatResponse>;\n }\n\n createEmbedReq(\n req: Readonly<AxInternalEmbedRequest<TEmbedModel>>\n ): [AxAPI, AxAIOpenAIEmbedRequest<TEmbedModel>] {\n const model = req.embedModel;\n\n if (!model) {\n throw new Error('Embed model not set');\n }\n\n if (!req.texts || req.texts.length === 0) {\n throw new Error('Embed texts is empty');\n }\n\n const apiConfig = {\n name: '/embeddings',\n };\n\n const reqValue = {\n model: model,\n input: req.texts,\n dimensions: this.config.dimensions,\n };\n\n return [apiConfig, reqValue];\n }\n}\n\n// const getThought = (item: AxAIOpenAIResponsesReasoningItem): string => {\n// if (item.encrypted_content) {\n// return item.encrypted_content\n// }\n// return item.summary.map((s) => s.text).join('\\n')\n// }\n\nconst contentToText = (\n content: ReadonlyArray<\n | AxAIOpenAIResponsesOutputTextContentPart\n | AxAIOpenAIResponsesOutputRefusalContentPart\n >,\n responseId?: string\n): string => {\n // Check for refusal content and throw exception\n const refusalContent = content.filter((c) => c.type === 'refusal');\n if (refusalContent.length > 0) {\n const refusalMessage = refusalContent.map((c) => c.refusal).join('\\n');\n throw new AxAIRefusalError(refusalMessage, undefined, responseId);\n }\n\n // Return only text content\n return content\n .filter((c) => c.type === 'output_text')\n .map((c) => c.text)\n .join('\\n');\n};\n","import type { AxModelInfo } from '../types.js';\n\nimport { AxAIRekaModel } from './types.js';\n/**\n * OpenAI: Model information\n */\nexport const axModelInfoReka: AxModelInfo[] = [\n {\n name: AxAIRekaModel.RekaCore,\n currency: 'usd',\n promptTokenCostPer1M: 3,\n completionTokenCostPer1M: 15,\n },\n {\n name: AxAIRekaModel.RekaFlash,\n currency: 'usd',\n promptTokenCostPer1M: 0.8,\n completionTokenCostPer1M: 2,\n },\n {\n name: AxAIRekaModel.RekaEdge,\n currency: 'usd',\n promptTokenCostPer1M: 0.4,\n completionTokenCostPer1M: 1,\n },\n];\n","import { apiCall } from '../util/apicall.js';\n\nimport { AxDBBase, type AxDBBaseArgs, type AxDBBaseOpOptions } from './base.js';\nimport type {\n AxDBQueryRequest,\n AxDBQueryResponse,\n AxDBUpsertRequest,\n AxDBUpsertResponse,\n} from './types.js';\n\nexport type AxDBWeaviateOpOptions = AxDBBaseOpOptions;\n\ntype AxWeaviateUpsertResponse = {\n id: string;\n result?: { errors?: { error: { message: string }[] } };\n};\n\ntype AxWeaviateQueryResponse = {\n errors?: { location: string; message: string; path: string }[];\n data: {\n Get: {\n [key: string]: {\n [key: string]: unknown;\n }[];\n };\n };\n};\n\nexport interface AxDBWeaviateArgs extends AxDBBaseArgs {\n name: 'weaviate';\n apiKey: string;\n host: string;\n fetch?: typeof fetch;\n}\n\n/**\n * Weaviate: DB Service\n */\nexport class AxDBWeaviate extends AxDBBase {\n private apiKey: string;\n private apiURL: string;\n\n constructor({\n apiKey,\n host,\n fetch,\n tracer,\n }: Readonly<Omit<AxDBWeaviateArgs, 'name'>>) {\n if (!apiKey || apiKey === '') {\n throw new Error('Weaviate API key not set');\n }\n super({ name: 'Weaviate', fetch, tracer });\n this.apiKey = apiKey;\n this.apiURL = host;\n }\n\n override _upsert = async (\n req: Readonly<AxDBUpsertRequest>,\n update?: boolean,\n options?: Readonly<AxDBWeaviateOpOptions>\n ): Promise<AxDBUpsertResponse> => {\n const res = (await apiCall(\n {\n url: this.apiURL,\n headers: { Authorization: `Bearer ${this.apiKey}` },\n name: `/v1/objects/${req.table}/${req.id}`,\n put: !!update,\n fetch: this.fetch,\n span: options?.span,\n },\n {\n id: req.id,\n class: req.table,\n tenant: req.namespace,\n vector: req.values,\n properties: req.metadata ?? {},\n }\n )) as AxWeaviateUpsertResponse;\n\n if (res?.result?.errors) {\n throw new Error(\n `Weaviate upsert failed: ${res.result.errors.error\n .map(({ message }) => message)\n .join(', ')}`\n );\n }\n\n return {\n ids: [res.id],\n };\n };\n\n override _batchUpsert = async (\n batchReq: Readonly<AxDBUpsertRequest[]>,\n update?: boolean,\n options?: Readonly<AxDBWeaviateOpOptions>\n ): Promise<AxDBUpsertResponse> => {\n if (update) {\n throw new Error('Weaviate does not support batch update');\n }\n if (batchReq.length === 0) {\n throw new Error('Batch request is empty');\n }\n const objects = batchReq.map((req) => ({\n id: req.id,\n class: req.table,\n tenant: req.namespace,\n vector: req.values,\n properties: req.metadata ?? {},\n }));\n\n const res = (await apiCall(\n {\n url: this.apiURL,\n headers: { Authorization: `Bearer ${this.apiKey}` },\n name: '/v1/batch/objects',\n fetch: this.fetch,\n span: options?.span,\n },\n { objects }\n )) as AxWeaviateUpsertResponse[];\n\n if (res?.some(({ result }) => result?.errors)) {\n throw new Error(\n `Weaviate batch upsert failed: ${res\n .map(({ result }) =>\n result?.errors?.error.map(({ message }) => message).join(', ')\n )\n .join(', ')}`\n );\n }\n\n return {\n ids: res.map(({ id }) => id),\n };\n };\n\n override _query = async (\n req: Readonly<AxDBQueryRequest>,\n options?: Readonly<AxDBWeaviateOpOptions>\n ): Promise<AxDBQueryResponse> => {\n let filter = '';\n\n if (req.columns && req.columns.length === 0) {\n throw new Error('Weaviate requires at least one column');\n }\n\n if (req.values) {\n filter = `nearVector: {\n vector: [${req.values.join(',')}],\n }`;\n } else if (req.text) {\n filter = `nearText: {\n concepts: ['${req.text}'],\n }`;\n } else {\n throw new Error('Weaviate requires either text or values');\n }\n\n const res = (await apiCall(\n {\n url: this.apiURL,\n headers: { Authorization: `Bearer ${this.apiKey}` },\n name: '/v1/graphql',\n fetch: this.fetch,\n span: options?.span,\n },\n {\n query: `{\n Get {\n ${req.table} (\n limit: ${req.limit || 10},\n ${filter}\n ) {\n ${req.columns?.join('\\n')}\n }\n }\n }`,\n }\n )) as AxWeaviateQueryResponse;\n\n if (res.errors) {\n throw new Error(\n `Weaviate query failed: ${res.errors\n .map(({ message }) => message)\n .join(', ')}`\n );\n }\n\n const resMatches = res.data.Get[req.table];\n\n if (!resMatches) {\n return { matches: [] };\n }\n\n const matches = resMatches.map((match) => {\n return {\n id: match.id as string,\n score: 1,\n metadata: match,\n };\n });\n return { matches } as AxDBQueryResponse;\n };\n}\n","import type { AxAIService } from '../ai/types.js';\nimport type { AxDBQueryResponse, AxDBService } from '../db/types.js';\nimport type { AxGen } from '../dsp/generate.js';\n\nexport type AxRewriteIn = { query: string };\nexport type AxRewriteOut = { rewrittenQuery: string };\n\nexport type AxRerankerIn = { query: string; items: string[] };\nexport type AxRerankerOut = { rankedItems: string[] };\n\nexport interface AxDBLoaderOptions {\n chunker?: (text: string) => string[];\n rewriter?: AxGen<AxRewriteIn, AxRewriteOut>;\n reranker?: AxGen<AxRerankerIn, AxRerankerOut>;\n}\n\nexport interface AxDBManagerArgs {\n ai: AxAIService;\n db: AxDBService;\n config?: AxDBLoaderOptions;\n}\n\nexport interface AxDBMatch {\n score: number;\n text: string;\n}\n\nconst table = '_internal';\n\nexport class AxDBManager {\n private ai: AxAIService;\n private db: AxDBService;\n private chunker: (text: string) => string[];\n private rewriter?: AxGen<AxRewriteIn, AxRewriteOut>;\n private reranker?: AxGen<AxRerankerIn, AxRerankerOut>;\n\n constructor({ ai, db, config }: Readonly<AxDBManagerArgs>) {\n this.ai = ai;\n this.db = db;\n this.chunker = config?.chunker ?? this.defaultChunker;\n this.reranker = config?.reranker;\n this.rewriter = config?.rewriter;\n }\n\n private defaultChunker = (text: string): string[] => {\n // Default chunking by paragraphs\n return text.split(/\\n\\n+/);\n };\n\n insert = async (\n text: Readonly<string | string[]>,\n options?: Readonly<{\n batchSize?: number;\n maxWordsPerChunk?: number;\n minWordsPerChunk?: number;\n abortSignal?: AbortSignal;\n }>\n ): Promise<void> => {\n try {\n const chunkerInput = Array.isArray(text)\n ? text.join('\\n\\n')\n : (text as string);\n\n // Chunk the text using the specified or default chunking function\n const initialChunks = this.chunker(chunkerInput).filter(\n (chunk) => chunk.length > 0\n );\n\n const maxWordsPerChunk = options?.maxWordsPerChunk;\n const minWordsPerChunk = options?.minWordsPerChunk;\n\n const chunks = processChunks({\n initialChunks,\n minWordsPerChunk,\n maxWordsPerChunk,\n });\n\n const bs = options?.batchSize ?? 10;\n\n // Process chunks in batches of 10\n for (let i = 0; i < chunks.length; i += bs) {\n const batch = chunks.slice(i, i + bs);\n\n // Get embeddings for the whole batch from the AI service in one call\n const ret = await this.ai.embed(\n { texts: batch },\n {\n abortSignal: options?.abortSignal,\n }\n );\n\n // Prepare batch for bulk upsert\n const embeddings = ret.embeddings\n .map((embedding, index) => ({\n id: `chunk_${Date.now() + index}`, // Unique ID for each chunk, adjusted by index\n table,\n values: embedding,\n metadata: { text: batch[index] ?? '' },\n }))\n .filter((v) => v.metadata?.text && v.metadata?.text.length > 0);\n\n // Batch upsert embeddings\n await this.db.batchUpsert(embeddings);\n }\n } catch (error) {\n throw new Error(`Error processing text: ${error}`);\n }\n };\n\n query = async (\n query: Readonly<string | string[] | number | number[]>,\n {\n topPercent,\n abortSignal,\n }:\n | Readonly<{ topPercent?: number; abortSignal?: AbortSignal }>\n | undefined = {}\n ): Promise<AxDBMatch[][]> => {\n const texts = Array.isArray(query) ? query : [query];\n\n if (typeof texts[0] === 'string' && this.rewriter) {\n for (const [i, text] of texts.entries()) {\n const { rewrittenQuery } = await this.rewriter.forward(this.ai, {\n query: text,\n });\n texts[i] = rewrittenQuery;\n }\n }\n\n let queries: Promise<AxDBQueryResponse>[];\n\n if (typeof texts[0] === 'string') {\n const embedResults = await this.ai.embed(\n { texts },\n {\n abortSignal,\n }\n );\n queries = embedResults.embeddings.map((values) =>\n this.db.query({ table, values })\n );\n } else {\n queries = texts.map((values) => this.db.query({ table, values }));\n }\n\n const queryResults = await Promise.all(queries);\n const res: AxDBMatch[][] = [];\n\n for (const { matches } of queryResults) {\n const m = matches\n .filter((v) => v.metadata?.text && v.metadata?.text.length > 0)\n .map(({ score, metadata }) => ({\n score,\n text: metadata?.text ?? '',\n }));\n\n const tp = topPercent && topPercent > 1 ? topPercent / 100 : topPercent;\n const resultItems = tp ? getTopInPercent(m, tp) : m;\n\n if (this.reranker) {\n const { rankedItems } = await this.reranker.forward(this.ai, {\n query: texts[0] as string,\n items: resultItems.map((item) => item.text),\n });\n\n const items = rankedItems\n .map((item) => resultItems.find((r) => r.text === item))\n .filter((v) => v !== undefined) as AxDBMatch[];\n\n res.push(items);\n } else {\n res.push(resultItems);\n }\n }\n\n return res;\n };\n}\n\nconst processChunks = ({\n initialChunks,\n maxWordsPerChunk = 350,\n minWordsPerChunk = 250,\n}: Readonly<{\n initialChunks: readonly string[];\n maxWordsPerChunk?: number;\n minWordsPerChunk?: number;\n}>): string[] => {\n const chunks: string[] = [];\n\n let currentChunk = '';\n let currentWordCount = 0;\n\n initialChunks.forEach((chunk) => {\n const words = chunk.split(/\\s+/); // Split the chunk into words\n const wordCount = words.length; // Count words in the current chunk\n\n if (currentWordCount + wordCount <= maxWordsPerChunk) {\n // Add to the current chunk if within the max size limit\n currentChunk += `${chunk}\\n\\n`;\n currentWordCount += wordCount;\n } else if (\n currentWordCount > 0 &&\n currentWordCount + wordCount <= maxWordsPerChunk * 1.5\n ) {\n // If the total word count exceeds the limit but is less than 150% of the maxWordsPerChunk\n currentChunk += `${chunk}\\n\\n`;\n currentWordCount += wordCount;\n } else {\n // If the current chunk is not empty and adding the new chunk exceeds the adjusted limit\n if (currentWordCount > minWordsPerChunk) {\n chunks.push(currentChunk.trim());\n currentChunk = '';\n currentWordCount = 0;\n }\n // Handle the case where the chunk itself is larger than the limit\n if (wordCount > maxWordsPerChunk) {\n const remainingWords = words;\n while (remainingWords.length > maxWordsPerChunk * 1.5) {\n const slice = remainingWords.splice(0, maxWordsPerChunk);\n chunks.push(slice.join(' '));\n }\n // Add the last portion if it fits the condition of being within 150% of maxWordsPerChunk\n if (remainingWords.length > 0) {\n currentChunk += `${remainingWords.join(' ')}\\n\\n`;\n currentWordCount += remainingWords.length;\n }\n } else {\n // If the new chunk is smaller than the maximum words per chunk\n currentChunk = `${chunk}\\n\\n`;\n currentWordCount = wordCount;\n }\n }\n });\n\n // Push the last chunk if it exists and meets the minimum words condition\n if (currentWordCount > minWordsPerChunk || chunks.length === 0) {\n chunks.push(currentChunk.trim());\n }\n return chunks;\n};\n\nconst getTopInPercent = (\n entries: readonly AxDBMatch[],\n percent = 0.1\n): AxDBMatch[] => {\n // Sort entries by score in ascending order\n const sortedEntries = [...entries].sort((a, b) => a.score - b.score);\n\n // Calculate the number of entries to take (top 10%)\n const topTenPercentCount = Math.ceil(sortedEntries.length * percent);\n\n // Return the top 10% of entries\n return sortedEntries.slice(0, topTenPercentCount);\n};\n","// Removed debug imports - logging now handled in base.ts\nimport type {\n AxChatRequest,\n AxChatResponseResult,\n AxFunctionResult,\n} from '../ai/types.js';\nimport {\n axValidateChatRequestMessage,\n axValidateChatResponseResult,\n} from '../ai/validate.js';\n\nimport type { AxAIMemory, AxMemoryData } from './types.js';\n\nexport class MemoryImpl {\n private data: AxMemoryData = [];\n\n addRequest(items: AxChatRequest['chatPrompt'], index: number): void {\n this.data.push(\n ...items.map((item) => {\n const value = structuredClone(item);\n return {\n role: item.role,\n chat: [{ index, value }],\n };\n })\n );\n }\n\n addFunctionResults(results: Readonly<AxFunctionResult[]>): void {\n const chat = results.map(({ index, ...value }) => ({\n index,\n value: structuredClone(value),\n }));\n\n const lastItem = this.getLast();\n if (lastItem?.role === 'function') {\n lastItem.chat.push(...chat);\n } else {\n this.data.push({ role: 'function', chat });\n }\n }\n\n addResponse(results: Readonly<AxChatResponseResult[]>): void {\n const chat = results.map(({ index, ...value }) => ({\n index,\n value: structuredClone(value),\n }));\n\n this.data.push({ role: 'assistant', chat });\n }\n\n updateResult({\n content,\n name,\n functionCalls,\n index,\n }: Readonly<AxChatResponseResult & { index: number }>): void {\n const lastItem = this.data.at(-1);\n\n if (\n !lastItem ||\n lastItem.role !== 'assistant' ||\n (lastItem.role === 'assistant' && !lastItem.updatable)\n ) {\n this.data.push({\n role: 'assistant',\n updatable: true,\n chat: [\n { index, value: structuredClone({ content, name, functionCalls }) },\n ],\n });\n return;\n }\n\n const chat = lastItem.chat.find((v) => v.index === index);\n\n if (!chat) {\n lastItem.chat.push({\n index,\n value: structuredClone({ content, name, functionCalls }),\n });\n return;\n }\n\n if (typeof content === 'string' && content.trim() !== '') {\n (chat.value as { content: string }).content = content;\n }\n\n if (typeof name === 'string' && name.trim() !== '') {\n (chat.value as { name: string }).name = name;\n }\n\n if (Array.isArray(functionCalls) && functionCalls.length > 0) {\n (chat.value as { functionCalls: typeof functionCalls }).functionCalls =\n functionCalls;\n }\n }\n\n addTag(name: string): void {\n const lastItem = this.data.at(-1);\n if (!lastItem) {\n return;\n }\n\n if (!lastItem.tags) {\n lastItem.tags = [];\n }\n\n if (!lastItem.tags.includes(name)) {\n lastItem.tags.push(name);\n }\n }\n\n rewindToTag(name: string): AxMemoryData {\n const tagIndex = this.data.findIndex((item) => item.tags?.includes(name));\n if (tagIndex === -1) {\n throw new Error(`Tag \"${name}\" not found`);\n }\n\n // Remove and return the tagged item and everything after it\n return this.data.splice(tagIndex);\n }\n\n removeByTag(name: string): AxMemoryData {\n const indices = this.data.reduce<number[]>((acc, item, index) => {\n if (item.tags?.includes(name)) {\n acc.push(index);\n }\n return acc;\n }, []);\n\n if (indices.length === 0) {\n throw new Error(`No items found with tag \"${name}\"`);\n }\n\n return indices\n .reverse()\n .map((index) => this.data.splice(index, 1).at(0))\n .filter((item) => item !== undefined)\n .reverse();\n }\n\n history(index: number): AxChatRequest['chatPrompt'] {\n const result: AxChatRequest['chatPrompt'] = [];\n\n for (const { role, chat } of this.data) {\n let values: unknown;\n\n if (role === 'function') {\n values = chat.filter((v) => v.index === index).map((v) => v.value);\n } else {\n values = chat.find((v) => v.index === index)?.value;\n }\n\n if (Array.isArray(values) && values.length > 0) {\n result.push(\n ...values.map(\n (v) => ({ ...v, role }) as AxChatRequest['chatPrompt'][number]\n )\n );\n } else if (typeof values === 'object' && values !== null) {\n result.push({ ...values, role } as AxChatRequest['chatPrompt'][number]);\n }\n // Skip when values is undefined (no matching index found)\n }\n return result;\n }\n\n getLast(): AxMemoryData[number] | undefined {\n return this.data.at(-1);\n }\n\n reset(): void {\n this.data = [];\n }\n}\n\nexport class AxMemory implements AxAIMemory {\n private memories = new Map<string, MemoryImpl>();\n private defaultMemory: MemoryImpl;\n\n constructor() {\n this.defaultMemory = new MemoryImpl();\n }\n\n private getMemory(sessionId?: string): MemoryImpl {\n if (!sessionId) {\n return this.defaultMemory;\n }\n\n if (!this.memories.has(sessionId)) {\n this.memories.set(sessionId, new MemoryImpl());\n }\n\n return this.memories.get(sessionId) as MemoryImpl;\n }\n\n addRequest(value: AxChatRequest['chatPrompt'], sessionId?: string): void {\n for (const item of value) {\n axValidateChatRequestMessage(item);\n }\n this.getMemory(sessionId).addRequest(value, 0);\n }\n\n addResponse(\n results: Readonly<AxChatResponseResult[]>,\n sessionId?: string\n ): void {\n axValidateChatResponseResult(results);\n this.getMemory(sessionId).addResponse(results);\n }\n\n addFunctionResults(\n results: Readonly<AxFunctionResult[]>,\n sessionId?: string\n ): void {\n this.getMemory(sessionId).addFunctionResults(results);\n }\n\n updateResult(\n result: Readonly<AxChatResponseResult & { delta?: string }>,\n sessionId?: string\n ): void {\n this.getMemory(sessionId).updateResult(result);\n }\n\n addTag(name: string, sessionId?: string) {\n this.getMemory(sessionId).addTag(name);\n }\n\n rewindToTag(name: string, sessionId?: string) {\n return this.getMemory(sessionId).rewindToTag(name);\n }\n\n history(index: number, sessionId?: string) {\n return this.getMemory(sessionId).history(index);\n }\n\n getLast(sessionId?: string) {\n return this.getMemory(sessionId).getLast();\n }\n\n reset(sessionId?: string): void {\n if (!sessionId) {\n this.defaultMemory.reset();\n } else {\n this.memories.set(sessionId, new MemoryImpl());\n }\n }\n}\n\n// Debug functions removed - logging now handled in base.ts\n","import type { AxFunctionJSONSchema } from '../ai/types.js';\n\n// Extended type to handle flexible JSON schemas with union types\ntype FlexibleJSONSchema = AxFunctionJSONSchema & {\n anyOf?: FlexibleJSONSchema[];\n oneOf?: FlexibleJSONSchema[];\n allOf?: FlexibleJSONSchema[];\n properties?: Record<string, FlexibleJSONSchema | undefined>;\n};\n\ninterface ValidationError {\n path: string;\n issue: string;\n fix: string;\n example?: string;\n}\n\nexport const validateJSONSchema = (\n schema: Readonly<AxFunctionJSONSchema>\n): void => {\n const errors: ValidationError[] = [];\n\n const validateSchemaObject = (\n schema: Readonly<FlexibleJSONSchema | undefined>,\n path = ''\n ): void => {\n // Skip validation if schema is undefined or null\n if (!schema || typeof schema !== 'object') {\n return;\n }\n\n const validTypes = [\n 'array',\n 'integer',\n 'number',\n 'string',\n 'boolean',\n 'null',\n 'object',\n ];\n\n // Handle schemas with anyOf (union types)\n if (schema.anyOf && Array.isArray(schema.anyOf)) {\n if (schema.anyOf.length === 0) {\n errors.push({\n path: path || 'root',\n issue: 'anyOf array is empty',\n fix: 'Add at least one schema to the anyOf array',\n example: 'anyOf: [{ type: \"string\" }, { type: \"null\" }]',\n });\n }\n // Validate each schema in anyOf\n schema.anyOf.forEach((subSchema: FlexibleJSONSchema, index: number) => {\n validateSchemaObject(subSchema, `${path}anyOf[${index}].`);\n });\n return;\n }\n\n // Handle schemas with oneOf\n if (schema.oneOf && Array.isArray(schema.oneOf)) {\n if (schema.oneOf.length === 0) {\n errors.push({\n path: path || 'root',\n issue: 'oneOf array is empty',\n fix: 'Add at least one schema to the oneOf array',\n example: 'oneOf: [{ type: \"string\" }, { type: \"number\" }]',\n });\n }\n schema.oneOf.forEach((subSchema: FlexibleJSONSchema, index: number) => {\n validateSchemaObject(subSchema, `${path}oneOf[${index}].`);\n });\n return;\n }\n\n // Handle schemas with allOf\n if (schema.allOf && Array.isArray(schema.allOf)) {\n if (schema.allOf.length === 0) {\n errors.push({\n path: path || 'root',\n issue: 'allOf array is empty',\n fix: 'Add at least one schema to the allOf array',\n example:\n 'allOf: [{ type: \"object\" }, { properties: { name: { type: \"string\" } } }]',\n });\n }\n schema.allOf.forEach((subSchema: FlexibleJSONSchema, index: number) => {\n validateSchemaObject(subSchema, `${path}allOf[${index}].`);\n });\n return;\n }\n\n // Skip validation if no type is specified (might be a reference or other valid schema)\n if (!schema.type) {\n return;\n }\n\n if (!validTypes.includes(schema.type)) {\n errors.push({\n path: path || 'root',\n issue: `Invalid type '${schema.type}'`,\n fix: `Change type to one of: ${validTypes.join(', ')}`,\n example: `{ type: \"string\" } or { type: \"object\" }`,\n });\n return;\n }\n\n if (schema.type === 'object') {\n if (schema.properties) {\n if (\n typeof schema.properties !== 'object' ||\n Array.isArray(schema.properties)\n ) {\n errors.push({\n path: path || 'root',\n issue: 'properties must be an object, not an array or primitive',\n fix: 'Change properties to be an object with property names as keys',\n example:\n 'properties: { name: { type: \"string\" }, age: { type: \"number\" } }',\n });\n } else {\n for (const key in schema.properties) {\n const value = schema.properties[key];\n // Skip undefined or null properties\n if (value === undefined || value === null) {\n continue;\n }\n if (typeof value !== 'object') {\n errors.push({\n path: `${path}${key}`,\n issue: `Property schema must be an object, got ${typeof value}`,\n fix: 'Define the property as a proper schema object',\n example: `${key}: { type: \"string\", description: \"...\" }`,\n });\n continue;\n }\n validateSchemaObject(value, `${path}${key}.`);\n }\n }\n }\n\n if (schema.required) {\n if (!Array.isArray(schema.required)) {\n errors.push({\n path: path || 'root',\n issue: `'required' must be an array, got ${typeof schema.required}`,\n fix: 'Change required to be an array of property names',\n example:\n 'required: [\"name\", \"email\"] instead of required: \"name,email\"',\n });\n } else if (schema.required.length === 0) {\n // This is valid but might be worth noting\n } else {\n // Validate that required properties exist in properties\n if (schema.properties) {\n for (const requiredProp of schema.required) {\n if (typeof requiredProp !== 'string') {\n errors.push({\n path: `${path}required`,\n issue: `Required property names must be strings, got ${typeof requiredProp}`,\n fix: 'Ensure all items in required array are strings',\n example:\n 'required: [\"name\", \"email\"] not required: [123, \"email\"]',\n });\n } else if (!(requiredProp in schema.properties)) {\n errors.push({\n path: `${path}required`,\n issue: `Required property '${requiredProp}' is not defined in properties`,\n fix: `Either add '${requiredProp}' to properties or remove it from required`,\n example: `properties: { ${requiredProp}: { type: \"string\" } }`,\n });\n }\n }\n }\n }\n }\n }\n\n if (schema.type === 'array') {\n if (schema.items) {\n if (typeof schema.items !== 'object') {\n errors.push({\n path: `${path}items`,\n issue: `Array items schema must be an object, got ${typeof schema.items}`,\n fix: 'Define items as a proper schema object',\n example:\n 'items: { type: \"string\" } or items: { type: \"object\", properties: {...} }',\n });\n } else {\n validateSchemaObject(schema.items, `${path}items.`);\n }\n }\n }\n };\n\n validateSchemaObject(schema);\n\n if (errors.length > 0) {\n const errorMessage = [\n 'JSON Schema validation failed:',\n '',\n ...errors.map((error, index) => {\n const parts = [\n `${index + 1}. Path: ${error.path}`,\n ` Issue: ${error.issue}`,\n ` Fix: ${error.fix}`,\n ];\n if (error.example) {\n parts.push(` Example: ${error.example}`);\n }\n return parts.join('\\n');\n }),\n '',\n 'Please fix these issues and try again.',\n ].join('\\n');\n\n throw new Error(errorMessage);\n }\n};\n\n// Example Usage:\n\n/*\nconst validSchema: AxFunctionJSONSchema = {\n type: 'object',\n properties: {\n id: { type: 'integer' },\n name: { type: 'string' },\n email: { type: 'string' },\n isActive: { type: 'boolean' },\n tags: {\n type: 'array',\n items: { type: 'string' }\n },\n optionalField: {\n anyOf: [\n { type: 'string' },\n { type: 'null' }\n ]\n }\n },\n required: ['id', 'name', 'email']\n};\n\nconst invalidSchema: any = {\n type: 'object',\n properties: {\n id: { type: 'integer' },\n name: { type: 'string' },\n email: { type: 'unknownType' }, // Invalid type\n isActive: { type: 'boolean' },\n tags: {\n type: 'array',\n items: { type: 'string' }\n }\n },\n required: 'id,name,email' // Invalid 'required' field\n};\n\ntry {\n validateJSONSchema(validSchema);\n} catch (error) {\n console.error('Schema validation failed:', error.message);\n}\n\ntry {\n validateJSONSchema(invalidSchema);\n} catch (error) {\n console.error('Schema validation failed:', error.message);\n}\n*/\n","import { logFunctionError, logFunctionResults } from '../ai/debug.js';\nimport type {\n AxAIService,\n AxAIServiceActionOptions,\n AxChatRequest,\n AxChatResponseResult,\n AxFunction,\n AxFunctionResult,\n} from '../ai/types.js';\nimport type { AxMemory } from '../mem/memory.js';\nimport { axGlobals } from './globals.js';\nimport { validateJSONSchema } from './jsonschema.js';\n\nexport class AxFunctionError extends Error {\n constructor(\n private fields: {\n field: string;\n message: string;\n }[]\n ) {\n super();\n this.name = this.constructor.name;\n }\n\n getFields = () => this.fields;\n\n override toString(): string {\n return [\n `${this.name}: Function validation error`,\n ...this.fields.map((field) => ` - ${field.field}: ${field.message}`),\n ].join('\\n');\n }\n\n [Symbol.for('nodejs.util.inspect.custom')](\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _depth: number,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: Record<string, unknown>\n ) {\n return this.toString();\n }\n}\n\ntype FunctionFieldErrors = ConstructorParameters<typeof AxFunctionError>[0];\n\nexport class FunctionError extends Error {\n constructor(\n private readonly fields: FunctionFieldErrors,\n private readonly func: Readonly<AxFunction>,\n private readonly funcId?: string\n ) {\n super();\n }\n\n getFunctionId = () => this.funcId;\n\n private getFieldDescription(fieldName: string): string {\n if (!this.func.parameters?.properties?.[fieldName]) {\n return '';\n }\n\n const fieldSchema = this.func.parameters.properties[fieldName];\n let description = fieldSchema.description;\n\n if (fieldSchema.enum?.length) {\n description += ` Allowed values are: ${fieldSchema.enum.join(', ')}`;\n }\n\n return description;\n }\n\n public getFixingInstructions = () => {\n const bulletPoints = this.fields.map((fieldError) => {\n const schemaDescription =\n this.getFieldDescription(fieldError.field) || '';\n return `- \\`${fieldError.field}\\` - ${fieldError.message} (${schemaDescription}).`;\n });\n\n return `Errors In Function Arguments: Fix the following invalid arguments to '${this.func.name}'\\n${bulletPoints.join('\\n')}`;\n };\n\n override toString(): string {\n return [\n `${this.name}: Function execution error in '${this.func.name}'`,\n ...this.fields.map((field) => {\n const description = this.getFieldDescription(field.field);\n return ` - ${field.field}: ${field.message}${description ? ` (${description})` : ''}`;\n }),\n this.funcId ? ` Function ID: ${this.funcId}` : '',\n ].join('\\n');\n }\n\n [Symbol.for('nodejs.util.inspect.custom')](\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _depth: number,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: Record<string, unknown>\n ) {\n return this.toString();\n }\n}\n\nexport type AxChatResponseFunctionCall = {\n id: string;\n name: string;\n args: string;\n};\n\nexport class AxFunctionProcessor {\n private funcList: Readonly<AxFunction[]> = [];\n\n constructor(funcList: Readonly<AxFunction[]>) {\n this.funcList = funcList;\n }\n\n private executeFunction = async (\n fnSpec: Readonly<AxFunction>,\n func: Readonly<AxChatResponseFunctionCall>,\n options?: Readonly<AxAIServiceActionOptions>\n ) => {\n let args: unknown;\n\n if (typeof func.args === 'string' && func.args.length > 0) {\n args = JSON.parse(func.args);\n } else {\n args = func.args;\n }\n\n const opt = options\n ? {\n sessionId: options.sessionId,\n traceId: options.traceId,\n ai: options.ai,\n }\n : undefined;\n\n let res: unknown;\n if (!fnSpec.parameters) {\n res =\n fnSpec.func.length === 1 ? await fnSpec.func(opt) : await fnSpec.func();\n } else {\n res =\n fnSpec.func.length === 2\n ? await fnSpec.func(args, opt)\n : await fnSpec.func(args);\n }\n\n // Use the formatter from options or fall back to globals\n const formatter =\n options?.functionResultFormatter ?? axGlobals.functionResultFormatter;\n return formatter(res);\n };\n\n public execute = async (\n func: Readonly<AxChatResponseFunctionCall>,\n options?: Readonly<AxAIServiceActionOptions>\n ) => {\n const fnSpec = this.funcList.find(\n (v) => v.name.localeCompare(func.name) === 0\n );\n if (!fnSpec) {\n throw new Error(`Function not found: ${func.name}`);\n }\n if (!fnSpec.func) {\n throw new Error(`No handler for function: ${func.name}`);\n }\n\n // execute value function calls\n try {\n return await this.executeFunction(fnSpec, func, options);\n } catch (e) {\n if (e instanceof AxFunctionError) {\n throw new FunctionError(e.getFields(), fnSpec, func.id);\n }\n throw e;\n }\n };\n}\n\nexport type AxInputFunctionType = (\n | AxFunction\n | {\n toFunction: () => AxFunction | AxFunction[];\n }\n)[];\n\nexport const parseFunctions = (\n newFuncs: Readonly<AxInputFunctionType>,\n existingFuncs?: readonly AxFunction[]\n): AxFunction[] => {\n if (newFuncs.length === 0) {\n return [...(existingFuncs ?? [])];\n }\n\n // biome-ignore lint/complexity/useFlatMap: cannot use flatMap here\n const functions = newFuncs\n .map((f) => {\n if ('toFunction' in f) {\n return f.toFunction();\n }\n return f;\n })\n .flat();\n\n for (const fn of functions.filter((v) => v.parameters)) {\n if (fn.parameters) {\n validateJSONSchema(fn.parameters);\n }\n }\n\n return [...(existingFuncs ?? []), ...functions];\n};\n\ntype ProcessFunctionsArgs = {\n ai: Readonly<AxAIService>;\n functionList: Readonly<AxFunction[]>;\n functionCalls: readonly AxChatResponseFunctionCall[];\n mem: Readonly<AxMemory>;\n sessionId?: string;\n traceId?: string;\n span?: import('@opentelemetry/api').Span;\n excludeContentFromTrace?: boolean;\n index: number;\n functionResultFormatter?: (result: unknown) => string;\n};\n\nexport const processFunctions = async ({\n ai,\n functionList,\n functionCalls,\n mem,\n sessionId,\n traceId,\n span,\n excludeContentFromTrace,\n index,\n functionResultFormatter,\n}: Readonly<ProcessFunctionsArgs>) => {\n const funcProc = new AxFunctionProcessor(functionList);\n const functionsExecuted = new Set<string>();\n\n // Map each function call to a promise that resolves to the function result or null\n const promises = functionCalls.map((func) => {\n if (!func.id) {\n throw new Error(`Function ${func.name} did not return an ID`);\n }\n\n const promise: Promise<AxFunctionResult | undefined> = funcProc\n .execute(func, { sessionId, traceId, ai, functionResultFormatter })\n .then((functionResult) => {\n functionsExecuted.add(func.name.toLowerCase());\n\n // Add telemetry event for successful function call\n if (span) {\n const eventData: { name: string; args?: string; result?: string } = {\n name: func.name,\n };\n if (!excludeContentFromTrace) {\n eventData.args = func.args;\n eventData.result = functionResult ?? '';\n }\n span.addEvent('function.call', eventData);\n }\n\n return {\n result: functionResult ?? '',\n role: 'function' as const,\n functionId: func.id,\n index,\n };\n })\n .catch((e) => {\n if (!(e instanceof FunctionError)) {\n throw e;\n }\n const result = e.getFixingInstructions();\n\n // Add telemetry event for function error\n if (span) {\n const errorEventData: {\n name: string;\n args?: string;\n message: string;\n fixing_instructions?: string;\n } = {\n name: func.name,\n message: e.toString(),\n };\n if (!excludeContentFromTrace) {\n errorEventData.args = func.args;\n errorEventData.fixing_instructions = result;\n }\n span.addEvent('function.error', errorEventData);\n }\n\n if (ai.getOptions().debug) {\n const logger = ai.getLogger();\n logFunctionError(e, index, result, logger);\n }\n\n return {\n functionId: func.id,\n isError: true,\n index,\n result,\n role: 'function' as const,\n };\n });\n\n return promise;\n });\n\n // Wait for all promises to resolve\n const results = await Promise.all(promises);\n const functionResults = results.filter((result) => result !== undefined);\n\n mem.addFunctionResults(functionResults, sessionId);\n\n // Log successful function results if debug is enabled\n if (ai.getOptions().debug) {\n const successfulResults = functionResults.filter(\n (result) => !result.isError\n );\n if (successfulResults.length > 0) {\n const logger = ai.getLogger();\n logFunctionResults(successfulResults, logger);\n }\n }\n\n if (functionResults.some((result) => result.isError)) {\n mem.addTag('error', sessionId);\n }\n\n return functionsExecuted;\n};\n\nexport function parseFunctionCalls(\n ai: Readonly<AxAIService>,\n functionCalls: Readonly<AxChatResponseResult['functionCalls']>,\n _values: Record<string, unknown>,\n model?: string\n): AxChatResponseFunctionCall[] | undefined {\n if (!functionCalls || functionCalls.length === 0) {\n return;\n }\n if (!ai.getFeatures(model).functions) {\n throw new Error('Functions are not supported by the AI service');\n }\n\n const funcs: AxChatResponseFunctionCall[] = functionCalls.map((f) => ({\n id: f.id,\n name: f.function.name,\n args: f.function.params as string,\n }));\n\n // for (const [i, f] of funcs.entries()) {\n // values['functionName' + i] = f.name;\n // values['functionArguments' + i] =\n // typeof f.args === 'object' ? JSON.stringify(f.args) : f.args;\n // }\n return funcs;\n}\n\ntype FunctionCall = AxChatRequest['functionCall'] | undefined;\n\n/**\n * Utility function to parse a list of functions into AxFunction array\n */\nexport function createFunctionConfig(\n functionList?: AxInputFunctionType,\n definedFunctionCall?: FunctionCall,\n firstStep?: boolean\n): { functions: AxFunction[]; functionCall: FunctionCall } {\n const functionCall = definedFunctionCall;\n\n if (\n !firstStep &&\n (functionCall === 'required' || typeof functionCall === 'function')\n ) {\n return { functions: [], functionCall: undefined };\n }\n\n if (!functionList) {\n return { functions: [], functionCall: functionCall };\n }\n\n // biome-ignore lint/complexity/useFlatMap: you cannot use flatMap here\n const functions = functionList\n .map((f) => {\n if ('toFunction' in f) {\n return f.toFunction();\n }\n return f;\n })\n .flat();\n\n return { functions, functionCall };\n}\n","/* eslint-disable functional/prefer-immutable-types */\nimport { ColorLog } from '../util/log.js';\n\nimport type { AxExample, AxOptimizationStats } from './optimizer.js';\nimport type { AxField } from './sig.js';\nimport type {\n AxFieldValue,\n AxGenDeltaOut,\n AxGenOut,\n AxProgramUsage,\n} from './types.js';\n\nconst colorLog = new ColorLog();\n\nexport const updateProgressBar = (\n current: number,\n total: number,\n success: number,\n _elapsedTime: number, // in seconds\n msg: string,\n progressBarWidth = 20 // Default width of the progress bar\n): void => {\n const percentage = ((current / total) * 100).toFixed(1);\n const filledBarLength = Math.round((progressBarWidth * current) / total);\n const emptyBarLength = progressBarWidth - filledBarLength;\n const filledBar = colorLog.blueBright('█'.repeat(filledBarLength));\n const emptyBar = ' '.repeat(emptyBarLength);\n const successRate = total > 0 ? ((success / total) * 100).toFixed(1) : '0.0';\n\n // More user-friendly message\n const friendlyMsg = msg.includes('Running MIPROv2 optimization')\n ? 'Testing prompt variations'\n : msg.includes('Tuning Prompt')\n ? 'Generating training examples'\n : msg;\n\n // Use newline instead of carriage return to avoid overwriting structured logs\n process.stdout.write(\n `│ ${friendlyMsg}: ${current}/${total} (${colorLog.yellow(percentage)}%) |${filledBar}${emptyBar}| Success rate: ${colorLog.greenBright(successRate)}%\\n`\n );\n};\n\nexport const validateValue = (\n field: Readonly<AxField>,\n value: Readonly<AxFieldValue>\n): void => {\n const ft = field.type ?? { name: 'string', isArray: false };\n\n const validateSingleValue = (\n expectedType: string,\n val: Readonly<AxFieldValue>\n ): boolean => {\n switch (expectedType) {\n case 'class':\n return typeof val === 'string';\n case 'code':\n return typeof val === 'string';\n case 'string':\n return typeof val === 'string';\n case 'number':\n return typeof val === 'number';\n case 'boolean':\n return typeof val === 'boolean';\n case 'date':\n return val instanceof Date || typeof val === 'string';\n case 'datetime':\n return val instanceof Date || typeof val === 'string';\n case 'json':\n return typeof val === 'object' || typeof val === 'string';\n default:\n return false; // Unknown or unsupported type\n }\n };\n\n const validImage = (val: Readonly<AxFieldValue>): boolean => {\n if (\n !val ||\n typeof val !== 'object' ||\n !('mimeType' in val) ||\n !('data' in val)\n ) {\n return false;\n }\n return true;\n };\n\n if (field.type?.name === 'image') {\n let msg: string | undefined;\n if (Array.isArray(value)) {\n for (const item of value) {\n if (!validImage(item)) {\n msg = 'object ({ mimeType: string; data: string })';\n break;\n }\n }\n } else if (!validImage(value)) {\n msg = 'object ({ mimeType: string; data: string })';\n }\n\n if (msg) {\n throw new Error(\n `Validation failed: Expected '${field.name}' to be type '${msg}' instead got '${value}'`\n );\n }\n return;\n }\n\n const validAudio = (val: Readonly<AxFieldValue>): boolean => {\n if (!val || typeof val !== 'object' || !('data' in val)) {\n return false;\n }\n return true;\n };\n\n if (field.type?.name === 'audio') {\n let msg: string | undefined;\n if (Array.isArray(value)) {\n for (const item of value) {\n if (!validAudio(item)) {\n msg = 'object ({ data: string; format?: string })';\n break;\n }\n }\n } else if (!validAudio(value)) {\n msg = 'object ({ data: string; format?: string })';\n }\n\n if (msg) {\n throw new Error(\n `Validation failed: Expected '${field.name}' to be type '${msg}' instead got '${value}'`\n );\n }\n return;\n }\n\n let isValid = true;\n\n if (ft.isArray) {\n if (!Array.isArray(value)) {\n isValid = false;\n } else {\n for (const item of value) {\n if (!validateSingleValue(ft.name, item)) {\n isValid = false;\n break;\n }\n }\n }\n } else {\n isValid = validateSingleValue(ft.name, value);\n }\n\n if (!isValid) {\n const gotType = Array.isArray(value) ? 'array' : typeof value;\n throw new Error(\n `Validation failed: Expected '${field.name}' to be a ${field.type?.isArray ? 'an array of ' : ''}${ft.name} instead got '${gotType}' (${JSON.stringify(value)})`\n );\n }\n};\n\nexport function mergeProgramUsage(\n usages: readonly AxProgramUsage[]\n): AxProgramUsage[] {\n const usageMap: { [key: string]: AxProgramUsage } = {};\n\n for (const usage of usages) {\n const key = `${usage.ai}:${usage.model}`;\n\n if (!usageMap[key]) {\n usageMap[key] = { ...usage };\n continue;\n }\n\n const currentUsage = usageMap[key];\n if (currentUsage) {\n const tokens = currentUsage.tokens ?? {\n promptTokens: 0,\n completionTokens: 0,\n totalTokens: 0,\n };\n tokens.promptTokens += usage?.tokens?.promptTokens ?? 0;\n tokens.completionTokens += usage?.tokens?.completionTokens ?? 0;\n tokens.totalTokens += usage?.tokens?.totalTokens ?? 0;\n currentUsage.tokens = tokens;\n }\n }\n\n return Object.values(usageMap);\n}\n\n/**\n * Parses a markdown list from a string. This is a very forgiving parser that\n * will try to handle anything that looks vaguely like a markdown list.\n */\nexport const parseMarkdownList = (input: string): string[] => {\n // Handle empty input\n if (!input.trim()) {\n return [];\n }\n\n const listBullets = new Set(['-', '*', '+']);\n const numberedListRegex = /^\\d+[\\s]*[.)\\]]\\s*/;\n\n const lines = input.split('\\n');\n const list = [];\n\n for (const line of lines) {\n const trimmedLine = line.trim();\n // Skip empty lines\n if (!trimmedLine) {\n continue;\n }\n\n // Check for bullet points\n if (trimmedLine[0] && listBullets.has(trimmedLine[0])) {\n list.push(trimmedLine.slice(1).trim());\n }\n // Check for numbered lists (e.g., \"1.\", \"2.\", etc.)\n else if (numberedListRegex.test(trimmedLine)) {\n list.push(trimmedLine.replace(numberedListRegex, '').trim());\n }\n // If it's not a list item and we haven't collected any items yet, do nothing\n else if (list.length === 0) {\n // Skip non-list lines at the beginning\n }\n // If we've already started collecting list items, then this non-list line\n //is an error\n else {\n throw new Error('Could not parse markdown list: mixed content detected');\n }\n }\n\n // If we didn't find any list items, throw error\n if (list.length === 0) {\n throw new Error('Could not parse markdown list: no valid list items found');\n }\n\n return list;\n};\n\nexport function mergeDeltas<OUT extends AxGenOut>(\n base: AxGenDeltaOut<OUT>[],\n currentDelta: AxGenDeltaOut<OUT>\n) {\n type ValueTypeOfAxGenOut = AxGenOut[keyof AxGenOut];\n\n const { index, delta, version } = currentDelta;\n\n // Cast once for mutation – safe because we'll only assign validated keys\n const target = base.find((b) => b.index === index)?.delta as Record<\n string,\n ValueTypeOfAxGenOut\n >;\n\n if (!target) {\n base.push({ index, delta, version });\n return base;\n }\n\n for (const key of Object.keys(delta)) {\n const baseValue = target[key];\n const deltaValue = (delta as Record<string, unknown>)[key];\n\n if (baseValue === undefined && Array.isArray(deltaValue)) {\n target[key] = [...deltaValue];\n } else if (Array.isArray(baseValue) && Array.isArray(deltaValue)) {\n // Concatenate arrays\n target[key] = [...(baseValue as unknown[]), ...deltaValue];\n } else if (\n (baseValue === undefined || typeof baseValue === 'string') &&\n typeof deltaValue === 'string'\n ) {\n // Concatenate strings\n target[key] = `${baseValue ?? ''}${deltaValue}`;\n } else {\n // For all other types, overwrite with the new value\n target[key] = deltaValue as ValueTypeOfAxGenOut;\n }\n }\n return base;\n}\n\nexport class LRUCache<K, V> {\n private cache = new Map<K, V>();\n private readonly maxSize: number;\n\n constructor(maxSize: number) {\n this.maxSize = maxSize;\n }\n\n get(key: K): V | undefined {\n const value = this.cache.get(key);\n if (value) {\n // Refresh position by deleting and re-adding\n this.cache.delete(key);\n this.cache.set(key, value);\n }\n return value;\n }\n\n set(key: K, value: V): void {\n if (this.cache.has(key)) {\n this.cache.delete(key);\n } else if (this.cache.size >= this.maxSize) {\n // Remove oldest entry (first item in map)\n const firstKey = this.cache.keys().next().value;\n if (firstKey) {\n this.cache.delete(firstKey);\n }\n }\n this.cache.set(key, value);\n }\n}\n\nconst globalPrefixCache = new LRUCache<string, string[]>(500);\n\n/**\n * Checks if a streaming string matches a prefix, either fully or partially from the end.\n * For streaming content, partial matches are checked from shortest to longest since\n * the content grows at the end and we want to detect partial prefixes as they form.\n * @param content The string to check (potentially streaming)\n * @param prefix The prefix to look for\n * @param startIndex Optional starting index for the search\n * @returns\n * - index >= 0: Position of full match\n * - -1: No match found\n * - -2: Partial match from the end\n * - -3: String is only whitespace\n */\nexport function matchesContent(\n content: string,\n prefix: string,\n startIndex = 0,\n prefixCache: LRUCache<string, string[]> = globalPrefixCache\n): number {\n // Check if string starts with a markdown block with optional language\n if (/^```[a-zA-Z]*\\s*$/.test(content)) {\n return -4;\n }\n\n // Check if string is only whitespace\n if (/^[\\s`]*$/.test(content)) {\n return -3;\n }\n\n // First check if the complete prefix exists anywhere after startIndex\n const exactMatchIndex = content.indexOf(prefix, startIndex);\n\n if (exactMatchIndex !== -1) {\n return exactMatchIndex;\n }\n\n // Get or create cached prefixes\n const prefixes =\n prefixCache.get(prefix) ??\n Array.from({ length: prefix.length }, (_, i) => prefix.slice(0, i + 1));\n\n // Set in cache if it wasn't there\n if (!prefixCache.get(prefix)) {\n prefixCache.set(prefix, prefixes);\n }\n\n // Check for partial matches at the end (for streaming content)\n // We want to find the longest partial prefix that the content ends with\n let longestPartialMatch = -1;\n\n // Start from the longest prefix and work backwards to find the longest match\n for (let i = prefixes.length - 1; i >= 0; i--) {\n const partialPrefix = prefixes[i] as string;\n\n // Check if content ends with this partial prefix\n if (content.endsWith(partialPrefix)) {\n longestPartialMatch = i;\n break; // Found the longest match, no need to continue\n }\n }\n\n // Return -2 for partial match, -1 for no match\n return longestPartialMatch >= 0 ? -2 : -1;\n}\n\nexport const formatTime = (ms: number): string => {\n const seconds = Math.floor(ms / 1000);\n if (seconds < 60) return `${seconds}s`;\n\n const minutes = Math.floor(seconds / 60);\n const remainingSeconds = seconds % 60;\n if (minutes < 60) return `${minutes}m ${remainingSeconds}s`;\n\n const hours = Math.floor(minutes / 60);\n const remainingMinutes = minutes % 60;\n return `${hours}h ${remainingMinutes}m ${remainingSeconds}s`;\n};\n\nexport const calculateETA = (\n current: number,\n total: number,\n elapsedMs: number\n): string => {\n if (current === 0) return 'calculating...';\n\n const msPerItem = elapsedMs / current;\n const remainingItems = total - current;\n const etaMs = msPerItem * remainingItems;\n\n return formatTime(etaMs);\n};\n\ninterface ProgressConfigInfo {\n maxRounds: number;\n batchSize: number;\n earlyStoppingPatience: number;\n costMonitoring: boolean;\n verboseMode: boolean;\n debugMode: boolean;\n}\n\nexport const updateDetailedProgress = <T extends AxGenOut = AxGenOut>(\n roundIndex: number,\n current: number,\n total: number,\n elapsedTime: number,\n example: Readonly<AxExample>,\n stats: Readonly<AxOptimizationStats>,\n configInfo: Readonly<ProgressConfigInfo>,\n result?: T,\n error?: Error\n): void => {\n // Clear line and create a formatted output\n process.stdout.write('\\r\\x1b[K');\n\n const percentage = ((current / total) * 100).toFixed(1);\n const formattedTime = formatTime(elapsedTime);\n const eta = calculateETA(current, total, elapsedTime);\n\n // Basic progress info (always shown) - more user-friendly\n let output = `Training round ${roundIndex + 1}/${configInfo.maxRounds}: ${current}/${total} (${percentage}%) [${formattedTime}, ETA: ${eta}]`;\n\n // Add success stats in a cleaner format\n const successRate =\n stats.totalCalls > 0 ? (stats.successfulDemos / stats.totalCalls) * 100 : 0;\n output += ` | Success rate: ${successRate.toFixed(1)}% (${stats.successfulDemos}/${stats.totalCalls})`;\n\n // Additional info for verbose mode\n if (configInfo.verboseMode || configInfo.debugMode) {\n if (configInfo.costMonitoring) {\n output += `\\n Tokens: ~${stats.estimatedTokenUsage.toLocaleString()} total`;\n }\n\n output += `\\n Batch: ${Math.floor(current / configInfo.batchSize) + 1}/${Math.ceil(total / configInfo.batchSize)}`;\n\n if (configInfo.earlyStoppingPatience > 0 && stats.earlyStopping) {\n output += `\\n Best round: ${stats.earlyStopping.bestScoreRound + 1}, Patience: ${configInfo.earlyStoppingPatience}`;\n }\n }\n\n // Debug mode gets even more info\n if (configInfo.debugMode) {\n // Truncate example keys for display\n const exampleKeys = Object.keys(example)\n .map((k) => {\n const valueStr = JSON.stringify(example[k]);\n const truncated =\n valueStr.length > 30 ? `${valueStr.substring(0, 30)}...` : valueStr;\n return `${k}: ${truncated}`;\n })\n .join(', ');\n\n output += `\\n Example: {${exampleKeys}}`;\n\n if (error) {\n output += `\\n ERROR: ${error.message}`;\n } else if (result) {\n // Truncate result for display\n const resultStr = JSON.stringify(result);\n const truncatedResult =\n resultStr.length > 50 ? `${resultStr.substring(0, 50)}...` : resultStr;\n output += `\\n Result: ${truncatedResult}`;\n }\n\n // Add temperature info\n output += `\\n Temperature: ${(0.7 + 0.001 * current).toFixed(3)}`;\n }\n\n console.log(output);\n};\n","/* eslint-disable @typescript-eslint/naming-convention */\n\nimport { parseLLMFriendlyDate, parseLLMFriendlyDateTime } from './datetime.js';\nimport { ValidationError } from './errors.js';\nimport type { AxField, AxSignature } from './sig.js';\nimport type { AxGenOut, GenDeltaOut } from './types.js';\nimport { matchesContent, parseMarkdownList } from './util.js';\n\nexport const extractValues = (\n sig: Readonly<AxSignature>,\n values: Record<string, unknown>,\n content: string,\n strictMode = false\n) => {\n const xstate = { extractedFields: [], streamedIndex: {}, s: -1 };\n streamingExtractValues(sig, values, xstate, content, { strictMode });\n streamingExtractFinalValue(sig, values, xstate, content);\n\n // Filter out internal fields\n for (const field of sig.getOutputFields()) {\n if (field.isInternal) {\n delete values[field.name];\n }\n }\n};\n\nexport interface extractionState {\n prevFields?: { field: AxField; s: number; e: number }[];\n currField?: AxField;\n currFieldIndex?: number;\n inAssumedField?: boolean;\n extractedFields: AxField[];\n streamedIndex: Record<string, number>;\n s: number;\n inBlock?: boolean;\n}\n\n// Helper function to check for missing required fields\nconst checkMissingRequiredFields = (\n _xstate: Readonly<extractionState>,\n values: Record<string, unknown>,\n outputFields: Readonly<AxField[]>\n) => {\n const missingFields: AxField[] = [];\n\n for (const field of outputFields) {\n if (field && !field.isOptional && values[field.name] === undefined) {\n missingFields.push(field);\n }\n }\n\n if (missingFields.length > 0) {\n throw new ValidationError({\n message: `Required ${missingFields.length === 1 ? 'field' : 'fields'} not found`,\n fields: missingFields,\n });\n }\n};\n\nexport interface StreamingExtractValuesOptions {\n strictMode?: boolean;\n skipEarlyFail?: boolean;\n}\n\nexport const streamingExtractValues = (\n sig: Readonly<AxSignature>,\n values: Record<string, unknown>,\n // eslint-disable-next-line functional/prefer-immutable-types\n xstate: extractionState,\n content: string,\n { strictMode, skipEarlyFail }: StreamingExtractValuesOptions = {}\n) => {\n const fields = sig.getOutputFields();\n let expectedField: AxField | undefined;\n\n for (const [index, field] of fields.entries()) {\n // If the field is the current field and it's not assumed, skip it\n if (index === xstate.currFieldIndex && !xstate.inAssumedField) {\n continue;\n }\n\n // If field is already in values and it's not the current field and it's not assumed, skip it\n if (\n field.name in values &&\n !(index === xstate.currFieldIndex && xstate.inAssumedField)\n ) {\n continue;\n }\n\n const isFirst = xstate.extractedFields.length === 0;\n const prefix = `${(isFirst ? '' : '\\n') + field.title}:`;\n\n let e = matchesContent(content, prefix, xstate.s);\n let prefixLen = prefix.length;\n\n switch (e) {\n case -1:\n if (skipEarlyFail) {\n continue;\n }\n\n // If there is only one field then we assume the content is streaming to the first field\n // Note: optimization for single field responses\n if (\n !strictMode &&\n fields.length === 1 &&\n xstate.currField === undefined\n ) {\n xstate.inAssumedField = true;\n expectedField = field;\n prefixLen = 0;\n e = 0;\n break;\n }\n\n // if multiple fields, we need to validate the field name of the first required field\n if (xstate.currField === undefined && !field.isOptional) {\n throw new ValidationError({\n message: 'Expected (Required) field not found',\n fields: [field],\n });\n }\n\n expectedField = field.isOptional ? undefined : field;\n continue; // Field is not found, continue to the next field\n case -2:\n return true; // Partial match at end, skip and gather more content\n case -3:\n return true; // String is only whitespace, skip and gather more content\n case -4:\n xstate.inBlock = true;\n return true; // String is only backticks, skip and gather more content\n }\n // We found a field!!!\n\n // If the field we found is not the expected field, throw an error\n if (expectedField && expectedField.name !== field.name) {\n throw new ValidationError({\n message: 'Expected (Required) field not found',\n fields: [expectedField],\n });\n }\n\n if (xstate.currField !== undefined && xstate.inAssumedField) {\n xstate.inAssumedField = false;\n xstate.streamedIndex[xstate.currField.name] = 0;\n xstate.currField = undefined;\n }\n\n // Lets wrap up the last field which is still the current field\n if (xstate.currField) {\n const val = content.substring(xstate.s, e).trim();\n const parsedValue = validateAndParseFieldValue(xstate.currField, val);\n if (parsedValue !== undefined) {\n values[xstate.currField.name] = parsedValue;\n }\n if (xstate.prevFields) {\n xstate.prevFields?.push({ field: xstate.currField, s: xstate.s, e });\n } else {\n xstate.prevFields = [{ field: xstate.currField, s: xstate.s, e }];\n }\n }\n\n // Lets update the state for the new current field\n\n xstate.s = e + prefixLen;\n xstate.currField = field;\n xstate.currFieldIndex = index;\n\n if (!xstate.extractedFields.includes(field)) {\n xstate.extractedFields.push(field);\n }\n\n if (xstate.streamedIndex[field.name] === undefined) {\n xstate.streamedIndex[field.name] = 0;\n }\n }\n};\n\nexport const streamingExtractFinalValue = (\n sig: Readonly<AxSignature>,\n values: Record<string, unknown>,\n // eslint-disable-next-line functional/prefer-immutable-types\n xstate: extractionState,\n content: string\n) => {\n if (xstate.currField) {\n const val = content.substring(xstate.s).trim();\n\n const parsedValue = validateAndParseFieldValue(xstate.currField, val);\n if (parsedValue !== undefined) {\n values[xstate.currField.name] = parsedValue;\n }\n }\n // Check all previous required fields before processing current field\n checkMissingRequiredFields(xstate, values, sig.getOutputFields());\n};\n\nconst convertValueToType = (\n field: Readonly<AxField>,\n val: string,\n required = false\n) => {\n switch (field.type?.name) {\n case 'code':\n return extractBlock(val);\n\n case 'string':\n return val;\n\n case 'number': {\n const v = Number(val);\n if (Number.isNaN(v)) {\n if (field.isOptional && !required) {\n return;\n }\n throw new Error('Invalid number');\n }\n return v;\n }\n\n case 'boolean': {\n if (typeof val === 'boolean') {\n return val;\n }\n const v = val.toLowerCase();\n if (v === 'true') {\n return true;\n }\n if (v === 'false') {\n return false;\n }\n if (field.isOptional && !required) {\n return;\n }\n throw new Error('Invalid boolean');\n }\n case 'date':\n return parseLLMFriendlyDate(field, val, required);\n\n case 'datetime':\n return parseLLMFriendlyDateTime(field, val, required);\n\n case 'class': {\n const className = val;\n if (field.type.options && !field.type.options.includes(className)) {\n if (field.isOptional) {\n return;\n }\n throw new Error(\n `Invalid class '${val}', expected one of the following: ${field.type.options.join(', ')}`\n );\n }\n return className as string;\n }\n\n default:\n return val as string; // Unknown type\n }\n};\n\nexport function* yieldDelta<OUT extends AxGenOut>(\n content: string,\n field: Readonly<AxField>,\n s: number,\n e: number,\n // eslint-disable-next-line functional/prefer-immutable-types\n xstate: extractionState,\n index: number\n): GenDeltaOut<OUT> {\n const { name: fieldName, isInternal } = field;\n const { isArray: fieldIsArray, name: fieldTypeName } = field.type ?? {};\n\n if (\n isInternal ||\n fieldIsArray ||\n (fieldTypeName && fieldTypeName !== 'string' && fieldTypeName !== 'code')\n ) {\n return;\n }\n\n const pos = xstate.streamedIndex[fieldName] ?? 0;\n const isFirstChunk = pos === 0;\n\n const d1 = content.substring(s + pos, e);\n if (d1.length === 0) {\n return;\n }\n\n // Remove trailing whitespace, tabs, and newlines\n let d2 = d1.replace(/\\s+$/, '');\n\n // If this field is a \"code\" type, remove trailing backticks\n if (xstate.currField?.type?.name === 'code') {\n d2 = d2.replace(/\\s*```\\s*$/, '');\n }\n\n // Only trim start for the first chunk\n let d3 = isFirstChunk ? d2.trimStart() : d2;\n\n if (xstate.currField?.type?.name === 'code') {\n // Remove any leading triple-backtick fences (with optional language specifier)\n d3 = d3.replace(/^[ ]*```[a-zA-Z0-9]*\\n\\s*/, '');\n }\n\n if (d3.length > 0) {\n yield { index, delta: { [fieldName]: d3 } as unknown as Partial<OUT> };\n xstate.streamedIndex[fieldName] = pos + d2.length;\n }\n}\n\nexport function* streamValues<OUT extends AxGenOut>(\n sig: Readonly<AxSignature>,\n content: string,\n values: Readonly<Record<string, OUT>>,\n // eslint-disable-next-line functional/prefer-immutable-types\n xstate: extractionState,\n index: number\n): GenDeltaOut<OUT> {\n for (const prevField of xstate.prevFields ?? []) {\n const { field, s, e } = prevField;\n yield* yieldDelta<OUT>(content, field, s, e, xstate, index);\n }\n xstate.prevFields = undefined;\n\n if (!xstate.currField || xstate.currField.isInternal) {\n return;\n }\n\n yield* yieldDelta<OUT>(\n content,\n xstate.currField,\n xstate.s,\n content.length,\n xstate,\n index\n );\n\n const outputFields = sig.getOutputFields();\n\n for (const key of Object.keys(values)) {\n const field = outputFields.find((f) => f.name === key);\n if (!field || field.isInternal) {\n continue;\n }\n\n const value = values[key];\n\n if (Array.isArray(value)) {\n const s = xstate.streamedIndex?.[key] ?? 0;\n const v = value.slice(s);\n if (v && v.length > 0) {\n yield { index, delta: { [key]: v } as unknown as Partial<OUT> };\n xstate.streamedIndex[key] = s + v.length;\n }\n continue;\n }\n\n if (!xstate.streamedIndex[key]) {\n yield { index, delta: { [key]: value } as unknown as Partial<OUT> };\n xstate.streamedIndex[key] = 1;\n }\n }\n}\n\nfunction validateAndParseFieldValue(\n field: Readonly<AxField>,\n fieldValue: string | undefined\n): unknown {\n if (\n !fieldValue ||\n fieldValue === '' ||\n /^(null|undefined)\\s*$/i.test(fieldValue)\n ) {\n if (field.isOptional) {\n return;\n }\n throw new ValidationError({\n message: 'Required field is missing',\n fields: [field],\n value: fieldValue,\n });\n }\n\n let value: unknown | undefined;\n\n if (field.type?.name === 'json') {\n try {\n const text = extractBlock(fieldValue);\n value = JSON.parse(text);\n return value;\n } catch (e) {\n throw new ValidationError({\n message: `Invalid JSON: ${(e as Error).message}`,\n fields: [field],\n value: fieldValue,\n });\n }\n }\n\n if (field.type?.isArray) {\n try {\n try {\n value = JSON.parse(fieldValue);\n } catch {\n // If JSON parsing fails, try markdown parsing\n value = parseMarkdownList(fieldValue);\n }\n if (!Array.isArray(value)) {\n throw new Error('Expected an array');\n }\n } catch (e) {\n throw new ValidationError({\n message: `Invalid Array: ${(e as Error).message}`,\n fields: [field],\n value: fieldValue,\n });\n }\n }\n\n try {\n if (Array.isArray(value)) {\n for (const [index, item] of value.entries()) {\n if (item !== undefined) {\n const v = typeof item === 'string' ? item.trim() : item;\n value[index] = convertValueToType(field, v, true);\n }\n }\n } else {\n value = convertValueToType(field, fieldValue);\n }\n } catch (e) {\n throw new ValidationError({\n message: (e as Error).message,\n fields: [field],\n value: fieldValue,\n });\n }\n\n if (typeof value === 'string' && value === '') {\n return undefined;\n }\n\n return value;\n}\n\nexport const extractBlock = (input: string): string => {\n const markdownBlockPattern = /```([A-Za-z]*)\\n([\\s\\S]*?)\\n```/g;\n const match = markdownBlockPattern.exec(input);\n if (!match) {\n return input;\n }\n if (match.length === 3) {\n return match[2] as string;\n }\n if (match.length === 2) {\n return match[1] as string;\n }\n return input;\n};\n","// ReadableStream is available globally in modern browsers and Node.js 16+\n\nimport type { AxChatResponse, AxModelUsage } from '../ai/types.js';\nimport { mergeFunctionCalls } from '../ai/util.js';\nimport type { AxAIMemory } from '../mem/types.js';\n\nimport {\n type AxAssertion,\n type AxStreamingAssertion,\n assertAssertions,\n assertStreamingAssertions,\n} from './asserts.js';\nimport {\n extractValues,\n streamingExtractFinalValue,\n streamingExtractValues,\n streamValues,\n} from './extract.js';\nimport {\n type AxFieldProcessor,\n processFieldProcessors,\n processStreamingFieldProcessors,\n} from './fieldProcessor.js';\nimport { parseFunctionCalls, processFunctions } from './functions.js';\nimport type { AxResponseHandlerArgs, InternalAxGenState } from './generate.js';\nimport type { AxSignature } from './sig.js';\nimport type { AsyncGenDeltaOut, AxGenOut, DeltaOut } from './types.js';\n\ntype ProcessStreamingResponseArgs = Readonly<\n AxResponseHandlerArgs<ReadableStream<AxChatResponse>>\n> & {\n states: InternalAxGenState[];\n usage: AxModelUsage[];\n asserts: AxAssertion[];\n streamingAsserts: AxStreamingAssertion[];\n fieldProcessors: AxFieldProcessor[];\n streamingFieldProcessors: AxFieldProcessor[];\n thoughtFieldName: string;\n signature: AxSignature;\n excludeContentFromTrace: boolean;\n functionResultFormatter?: (result: unknown) => string;\n};\n\nexport async function* processStreamingResponse<OUT extends AxGenOut>({\n res,\n usage,\n states,\n ...args\n}: ProcessStreamingResponseArgs): AsyncGenDeltaOut<OUT> {\n const skipEarlyFail =\n (args.ai.getFeatures().functionCot ?? false) &&\n args.functions !== undefined &&\n args.functions.length > 0;\n\n // Handle ReadableStream async iteration for browser compatibility\n const reader = res.getReader();\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n const v = value;\n if (v.modelUsage) {\n usage.push(v.modelUsage);\n }\n\n for (const result of v.results) {\n if (\n (!result.content || result.content === '') &&\n (!result.thought || result.thought === '') &&\n (!result.functionCalls || result.functionCalls.length === 0)\n ) {\n continue;\n }\n\n const state = states.find((s) => s.index === result.index);\n if (!state) {\n throw new Error(`No state found for result (index: ${result.index})`);\n }\n\n yield* ProcessStreamingResponse<OUT>({\n ...args,\n result,\n skipEarlyFail,\n state,\n });\n }\n }\n } finally {\n reader.releaseLock();\n }\n\n // Finalize the streams\n for (const state of states) {\n yield* finalizeStreamingResponse<OUT>({\n ...args,\n state,\n });\n }\n}\n\ntype ProcessStreamingResponseArgs2 = Readonly<\n Omit<\n ProcessStreamingResponseArgs,\n | 'res'\n | 'states'\n | 'usage'\n | 'excludeContentFromTrace'\n | 'ai'\n | 'model'\n | 'traceId'\n | 'functions'\n | 'span'\n | 'fieldProcessors'\n > & {\n result: AxChatResponse['results'][number];\n skipEarlyFail: boolean;\n state: InternalAxGenState;\n }\n>;\n\nasync function* ProcessStreamingResponse<OUT extends AxGenOut>({\n result,\n mem,\n sessionId,\n strictMode,\n skipEarlyFail,\n state,\n signature,\n streamingFieldProcessors,\n thoughtFieldName,\n streamingAsserts,\n asserts,\n}: ProcessStreamingResponseArgs2): AsyncGenDeltaOut<OUT> {\n if (result.functionCalls && result.functionCalls.length > 0) {\n mergeFunctionCalls(state.functionCalls, result.functionCalls);\n mem.updateResult(\n {\n name: result.name,\n content: result.content,\n functionCalls: state.functionCalls,\n delta: result.functionCalls?.[0]?.function?.params as string,\n index: result.index,\n },\n sessionId\n );\n } else if (result.content && result.content.length > 0) {\n if (result.thought && result.thought.length > 0) {\n yield {\n index: result.index,\n delta: { [thoughtFieldName]: result.thought } as Partial<OUT>,\n };\n }\n\n state.content += result.content;\n mem.updateResult(\n {\n name: result.name,\n content: state.content,\n delta: result.content,\n index: result.index,\n },\n sessionId\n );\n\n const skip = streamingExtractValues(\n signature,\n state.values,\n state.xstate,\n state.content,\n { strictMode, skipEarlyFail }\n );\n\n if (skip) {\n return;\n }\n\n if (streamingAsserts.length !== 0) {\n await assertStreamingAssertions(\n streamingAsserts,\n state.xstate,\n state.content\n );\n }\n\n if (streamingFieldProcessors.length !== 0) {\n await processStreamingFieldProcessors(\n streamingFieldProcessors,\n state.content,\n state.xstate,\n mem,\n state.values,\n sessionId\n );\n }\n\n yield* streamValues<OUT>(\n signature,\n state.content,\n state.values as Record<string, OUT>,\n state.xstate,\n result.index\n );\n\n await assertAssertions(asserts, state.values);\n } else if (result.thought && result.thought.length > 0) {\n state.values[thoughtFieldName] =\n (state.values[thoughtFieldName] ?? '') + result.thought;\n\n yield {\n index: result.index,\n delta: { [thoughtFieldName]: result.thought } as Partial<OUT>,\n };\n }\n\n if (result.finishReason === 'length') {\n throw new Error(\n `Max tokens reached before completion\\nContent: ${state.content}`\n );\n }\n}\n\ntype FinalizeStreamingResponseArgs = Readonly<\n Omit<ProcessStreamingResponseArgs, 'res' | 'states' | 'usage'> & {\n state: InternalAxGenState;\n }\n>;\n\nexport async function* finalizeStreamingResponse<OUT extends AxGenOut>({\n state,\n signature,\n ai,\n model,\n functions,\n mem,\n sessionId,\n traceId,\n span,\n excludeContentFromTrace,\n streamingAsserts,\n asserts,\n fieldProcessors,\n streamingFieldProcessors,\n functionResultFormatter,\n}: FinalizeStreamingResponseArgs) {\n const funcs = parseFunctionCalls(\n ai,\n state.functionCalls,\n state.values,\n model\n );\n if (funcs) {\n if (!functions) {\n throw new Error('Functions are not defined');\n }\n const fx = await processFunctions({\n ai,\n functionList: functions,\n functionCalls: funcs,\n mem,\n sessionId,\n traceId,\n span,\n index: state.index,\n excludeContentFromTrace,\n functionResultFormatter,\n });\n state.functionsExecuted = new Set([...state.functionsExecuted, ...fx]);\n } else {\n streamingExtractFinalValue(\n signature,\n state.values,\n state.xstate,\n state.content\n );\n\n await assertStreamingAssertions(\n streamingAsserts,\n state.xstate,\n state.content,\n true\n );\n await assertAssertions(asserts, state.values);\n\n if (fieldProcessors.length) {\n await processFieldProcessors(\n fieldProcessors,\n state.values,\n mem,\n sessionId\n );\n }\n\n if (streamingFieldProcessors.length !== 0) {\n await processStreamingFieldProcessors(\n streamingFieldProcessors,\n state.content,\n state.xstate,\n mem,\n state.values,\n sessionId,\n true\n );\n }\n\n yield* streamValues<OUT>(\n signature,\n state.content,\n state.values as Record<string, OUT>,\n state.xstate,\n state.index\n );\n }\n}\n\nexport async function* processResponse<OUT extends AxGenOut>({\n ai,\n res,\n mem,\n sessionId,\n traceId,\n functions,\n span,\n strictMode,\n states,\n usage,\n excludeContentFromTrace,\n asserts,\n fieldProcessors,\n thoughtFieldName,\n signature,\n functionResultFormatter,\n}: Readonly<AxResponseHandlerArgs<AxChatResponse>> & {\n states: InternalAxGenState[];\n usage: AxModelUsage[];\n excludeContentFromTrace: boolean;\n asserts: AxAssertion[];\n fieldProcessors: AxFieldProcessor[];\n thoughtFieldName: string;\n signature: AxSignature;\n functionResultFormatter?: (result: unknown) => string;\n}): AsyncGenDeltaOut<OUT> {\n const results = res.results ?? [];\n\n mem.addResponse(results, sessionId);\n\n for (const result of results) {\n const state = states[result.index];\n\n if (!state) {\n throw new Error(`No state found for result (index: ${result.index})`);\n }\n\n if (res.modelUsage) {\n usage.push(res.modelUsage);\n }\n\n if (result.functionCalls?.length) {\n const funcs = parseFunctionCalls(ai, result.functionCalls, state.values);\n if (funcs) {\n if (!functions) {\n throw new Error('Functions are not defined');\n }\n\n const fx = await processFunctions({\n ai,\n functionList: functions,\n functionCalls: funcs,\n mem,\n sessionId,\n traceId,\n span,\n excludeContentFromTrace,\n index: result.index,\n functionResultFormatter,\n });\n\n state.functionsExecuted = new Set([...state.functionsExecuted, ...fx]);\n }\n } else if (result.content) {\n if (result.thought && result.thought.length > 0) {\n state.values[thoughtFieldName] = result.thought;\n }\n\n extractValues(signature, state.values, result.content, strictMode);\n await assertAssertions(asserts, state.values);\n\n if (fieldProcessors.length) {\n await processFieldProcessors(\n fieldProcessors,\n state.values,\n mem,\n sessionId\n );\n }\n }\n\n if (result.finishReason === 'length') {\n throw new Error(\n `Max tokens reached before completion\\nContent: ${result.content}`\n );\n }\n }\n\n const values = states.map((s) => s.values);\n\n // Strip out values whose signature fields have isInternal: true\n for (const v of values) {\n for (const field of signature.getOutputFields()) {\n if (field.isInternal) {\n delete v[field.name];\n }\n }\n }\n\n const outputFields = signature.getOutputFields();\n const deltas: DeltaOut<OUT>[] = values.map((v, index) => {\n const delta: Record<string, unknown> = {};\n for (const field of outputFields) {\n if (field.isInternal) {\n continue;\n }\n delta[field.name] = v[field.name];\n }\n // Include thought field if it exists in the values\n if (v[thoughtFieldName] !== undefined) {\n delta[thoughtFieldName] = v[thoughtFieldName];\n }\n return { index, delta: delta as Partial<OUT> };\n });\n\n for (const delta of deltas) {\n yield delta;\n }\n}\n\nexport function shouldContinueSteps(\n mem: AxAIMemory,\n stopFunction: string | undefined,\n states: InternalAxGenState[],\n sessionId?: string\n) {\n const lastMemItem = mem.getLast(sessionId);\n\n if (!lastMemItem) {\n return true;\n }\n\n for (const [index, state] of states.entries()) {\n const stopFunctionExecuted =\n stopFunction && state.functionsExecuted.has(stopFunction);\n\n const chat = lastMemItem.chat[index];\n\n if (!chat) {\n throw new Error(`No chat message found for result (index: ${index})`);\n }\n\n const isFunction = lastMemItem.role === 'function';\n const isProcessor = lastMemItem.tags\n ? lastMemItem.tags.some((tag) => tag === 'processor')\n : false;\n\n // If any state has stop function executed, return false immediately\n if (isFunction && stopFunction && stopFunctionExecuted) {\n return false;\n }\n\n // If this state doesn't meet continuation criteria, return false\n if (!(isFunction || isProcessor)) {\n return false;\n }\n }\n\n // All states meet continuation criteria\n return true;\n}\n","// Updated type definitions\n\nexport type TypeNotClass =\n | 'string'\n | 'number'\n | 'boolean'\n | 'json'\n | 'image'\n | 'audio'\n | 'datetime'\n | 'date'\n | 'code';\nexport type Type = TypeNotClass | 'class';\nexport type ParsedIdentifier = string;\nexport type ParsedString = string;\n\nexport type ParsedSignature = {\n desc?: string;\n inputs: InputParsedField[];\n outputs: OutputParsedField[];\n};\n\nexport type InputParsedField = {\n name: ParsedIdentifier;\n desc?: string;\n type?: { name: TypeNotClass; isArray: boolean };\n isOptional?: boolean;\n};\n\nexport type OutputParsedField = {\n name: ParsedIdentifier;\n desc?: string;\n type?:\n | { name: TypeNotClass; isArray: boolean; options?: string[] }\n | { name: 'class'; isArray: boolean; options: string[] };\n isOptional?: boolean;\n isInternal?: boolean;\n};\n\nimport { axGlobals } from './globals.js';\n\nclass SignatureValidationError extends Error {\n constructor(\n message: string,\n public readonly position: number,\n public readonly context: string,\n public readonly suggestion?: string\n ) {\n super(message);\n this.name = 'SignatureValidationError';\n }\n}\n\nclass SignatureParser {\n private input: string;\n private position: number;\n private currentFieldName: string | null = null;\n private currentSection: 'description' | 'inputs' | 'outputs' = 'description';\n\n constructor(input: string) {\n this.input = input.trim();\n this.position = 0;\n\n if (!this.input) {\n throw new SignatureValidationError(\n 'Empty signature provided',\n 0,\n '',\n 'A signature must contain at least input and output fields separated by \"->\". Example: \"userQuery:string -> aiResponse:string\"'\n );\n }\n }\n\n parse(): ParsedSignature {\n try {\n this.skipWhitespace();\n const optionalDesc = this.parseParsedString();\n this.skipWhitespace();\n\n this.currentSection = 'inputs';\n // Use the specialized input field parser\n const inputs = this.parseFieldList(\n this.parseInputField.bind(this),\n 'input'\n );\n this.skipWhitespace();\n\n if (this.position >= this.input.length) {\n throw new SignatureValidationError(\n 'Incomplete signature: Missing output section',\n this.position,\n this.getErrorContext(),\n 'Add \"->\" followed by output fields. Example: \"-> responseText:string\"'\n );\n }\n\n this.expectArrow();\n this.skipWhitespace();\n\n if (this.position >= this.input.length) {\n throw new SignatureValidationError(\n 'Incomplete signature: No output fields specified after \"->\"',\n this.position,\n this.getErrorContext(),\n 'Add at least one output field. Example: \"-> responseText:string\"'\n );\n }\n\n this.currentSection = 'outputs';\n // Use the specialized output field parser\n const outputs = this.parseFieldList(\n this.parseOutputField.bind(this),\n 'output'\n );\n\n // Check for any remaining content that shouldn't be there\n this.skipWhitespace();\n if (this.position < this.input.length) {\n const remaining = this.input.slice(this.position);\n throw new SignatureValidationError(\n `Unexpected content after signature: \"${remaining}\"`,\n this.position,\n this.getErrorContext(),\n 'Remove any extra content after the output fields'\n );\n }\n\n // Validate the parsed signature\n this.validateParsedSignature({\n desc: optionalDesc?.trim(),\n inputs,\n outputs,\n });\n\n return {\n desc: optionalDesc?.trim(),\n inputs,\n outputs,\n };\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n\n // Wrap other errors with better context\n const errorMessage =\n error instanceof Error ? error.message : 'Unknown error';\n throw new SignatureValidationError(\n errorMessage,\n this.position,\n this.getErrorContext()\n );\n }\n }\n\n private validateParsedSignature(signature: Readonly<ParsedSignature>): void {\n // Check for duplicate field names within inputs\n const inputNames = new Set<string>();\n for (const field of signature.inputs) {\n if (inputNames.has(field.name)) {\n throw new SignatureValidationError(\n `Duplicate input field name: \"${field.name}\"`,\n 0,\n '',\n 'Each field name must be unique within the signature'\n );\n }\n inputNames.add(field.name);\n }\n\n // Check for duplicate field names within outputs\n const outputNames = new Set<string>();\n for (const field of signature.outputs) {\n if (outputNames.has(field.name)) {\n throw new SignatureValidationError(\n `Duplicate output field name: \"${field.name}\"`,\n 0,\n '',\n 'Each field name must be unique within the signature'\n );\n }\n outputNames.add(field.name);\n }\n\n // Check for field names that appear in both inputs and outputs\n for (const outputField of signature.outputs) {\n if (inputNames.has(outputField.name)) {\n throw new SignatureValidationError(\n `Field name \"${outputField.name}\" appears in both inputs and outputs`,\n 0,\n '',\n 'Use different names for input and output fields to avoid confusion'\n );\n }\n }\n\n // Validate that we have at least one input and one output\n if (signature.inputs.length === 0) {\n throw new SignatureValidationError(\n 'Signature must have at least one input field',\n 0,\n '',\n 'Add an input field before \"->\". Example: \"userInput:string -> ...\"'\n );\n }\n\n if (signature.outputs.length === 0) {\n throw new SignatureValidationError(\n 'Signature must have at least one output field',\n 0,\n '',\n 'Add an output field after \"->\". Example: \"... -> responseText:string\"'\n );\n }\n }\n\n private getErrorContext(): string {\n const start = Math.max(0, this.position - 25);\n const end = Math.min(this.input.length, this.position + 25);\n const before = this.input.slice(start, this.position);\n const after = this.input.slice(this.position, end);\n const pointer = `${' '.repeat(before.length)}^`;\n\n const lines = [\n `Position ${this.position} in signature:`,\n `\"${before}${after}\"`,\n ` ${pointer}`,\n ];\n\n return lines.join('\\n');\n }\n\n private parseFieldList<T extends InputParsedField | OutputParsedField>(\n parseFieldFn: () => T,\n section: 'input' | 'output'\n ): T[] {\n const fields: T[] = [];\n this.skipWhitespace();\n\n if (this.position >= this.input.length) {\n throw new SignatureValidationError(\n `Empty ${section} section: Expected at least one field`,\n this.position,\n this.getErrorContext(),\n `Add a ${section} field. Example: ${section === 'input' ? 'userInput:string' : 'responseText:string'}`\n );\n }\n\n // Parse first field\n try {\n fields.push(parseFieldFn());\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n throw new SignatureValidationError(\n `Invalid first ${section} field: ${error instanceof Error ? error.message : 'Unknown error'}`,\n this.position,\n this.getErrorContext()\n );\n }\n\n this.skipWhitespace();\n\n // Parse remaining fields\n while (this.position < this.input.length) {\n if (\n this.input[this.position] === '-' &&\n this.position + 1 < this.input.length &&\n this.input[this.position + 1] === '>'\n ) {\n break;\n }\n\n if (this.match(',')) {\n this.skipWhitespace();\n if (this.position >= this.input.length) {\n throw new SignatureValidationError(\n `Unexpected end of input after comma in ${section} section`,\n this.position,\n this.getErrorContext(),\n `Add another ${section} field after the comma`\n );\n }\n try {\n fields.push(parseFieldFn());\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n throw new SignatureValidationError(\n `Invalid ${section} field after comma: ${error instanceof Error ? error.message : 'Unknown error'}`,\n this.position,\n this.getErrorContext()\n );\n }\n this.skipWhitespace();\n } else {\n break;\n }\n }\n\n return fields;\n }\n\n // -------------------------------\n // Parse input fields (no \"class\" type and no internal flag)\n // -------------------------------\n private parseInputField(): InputParsedField {\n this.skipWhitespace();\n const name = this.parseParsedIdentifier();\n this.currentFieldName = name;\n\n // Validate field name for inputs\n this.validateFieldName(name, 'input');\n\n // Only the optional marker is allowed\n let isOptional: boolean | undefined;\n while (true) {\n if (this.match('?')) {\n isOptional = true;\n continue;\n }\n if (this.match('!')) {\n throw new SignatureValidationError(\n `Input field \"${name}\" cannot use the internal marker \"!\"`,\n this.position - 1,\n this.getErrorContext(),\n 'Internal markers (!) are only allowed on output fields'\n );\n }\n break;\n }\n\n let type: { name: TypeNotClass; isArray: boolean } | undefined;\n this.skipWhitespace();\n if (this.match(':')) {\n this.skipWhitespace();\n // Disallow the \"class\" type in input fields\n if (/^class\\b/.test(this.input.slice(this.position))) {\n throw new SignatureValidationError(\n `Input field \"${name}\" cannot use the \"class\" type`,\n this.position,\n this.getErrorContext(),\n 'Class types are only allowed on output fields. Use \"string\" type for input classifications'\n );\n }\n try {\n const typeName = this.parseTypeNotClass();\n const isArray = this.match('[]');\n type = { name: typeName, isArray };\n\n // Validate specific type constraints for input fields\n if ((typeName === 'image' || typeName === 'audio') && isArray) {\n throw new SignatureValidationError(\n `Input field \"${name}\": Arrays of ${typeName} are not supported`,\n this.position,\n this.getErrorContext(),\n `Use a single ${typeName} type instead: \"${typeName}\"`\n );\n }\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n throw new SignatureValidationError(\n `Input field \"${name}\": ${error instanceof Error ? error.message : 'Unknown error'}`,\n this.position,\n this.getErrorContext()\n );\n }\n }\n\n this.skipWhitespace();\n const desc = this.parseParsedString();\n\n return {\n name,\n desc: desc?.trim(),\n type,\n isOptional,\n };\n }\n\n // -------------------------------\n // Parse output fields (supports both \"class\" type and the internal marker)\n // -------------------------------\n private parseOutputField(): OutputParsedField {\n this.skipWhitespace();\n const name = this.parseParsedIdentifier();\n this.currentFieldName = name;\n\n // Validate field name for outputs\n this.validateFieldName(name, 'output');\n\n let isOptional = false;\n let isInternal = false;\n while (true) {\n if (this.match('?')) {\n isOptional = true;\n continue;\n }\n if (this.match('!')) {\n isInternal = true;\n continue;\n }\n break;\n }\n\n let type:\n | { name: TypeNotClass; isArray: boolean; options?: string[] }\n | { name: 'class'; isArray: boolean; options: string[] }\n | undefined;\n this.skipWhitespace();\n if (this.match(':')) {\n this.skipWhitespace();\n if (this.match('class')) {\n const isArray = this.match('[]');\n this.skipWhitespace();\n const classNamesString = this.parseParsedString();\n if (!classNamesString) {\n throw new SignatureValidationError(\n `Output field \"${name}\": Missing class options after \"class\" type`,\n this.position,\n this.getErrorContext(),\n 'Add class names in quotes. Example: class \"positive, negative, neutral\"'\n );\n }\n const options = classNamesString\n .split(/[,|]/)\n .map((s) => s.trim())\n .filter((s) => s.length > 0);\n\n if (options.length === 0) {\n throw new SignatureValidationError(\n `Output field \"${name}\": Empty class list provided`,\n this.position,\n this.getErrorContext(),\n 'Provide at least one class option. Example: \"positive, negative\"'\n );\n }\n\n type = { name: 'class', isArray, options };\n } else {\n try {\n const typeName = this.parseTypeNotClass();\n const isArray = this.match('[]');\n type = { name: typeName, isArray };\n\n // Validate specific type constraints\n if (typeName === 'image' && isArray) {\n throw new SignatureValidationError(\n `Output field \"${name}\": Arrays of images are not supported`,\n this.position,\n this.getErrorContext(),\n 'Use a single image type instead: \"image\"'\n );\n }\n\n if (typeName === 'audio' && isArray) {\n throw new SignatureValidationError(\n `Output field \"${name}\": Arrays of audio are not supported`,\n this.position,\n this.getErrorContext(),\n 'Use a single audio type instead: \"audio\"'\n );\n }\n\n if (typeName === 'image') {\n throw new SignatureValidationError(\n `Output field \"${name}\": Image type is not supported in output fields`,\n this.position,\n this.getErrorContext(),\n 'Image types can only be used in input fields'\n );\n }\n\n if (typeName === 'audio') {\n throw new SignatureValidationError(\n `Output field \"${name}\": Audio type is not supported in output fields`,\n this.position,\n this.getErrorContext(),\n 'Audio types can only be used in input fields'\n );\n }\n } catch (error) {\n if (error instanceof SignatureValidationError) {\n throw error;\n }\n throw new SignatureValidationError(\n `Output field \"${name}\": ${error instanceof Error ? error.message : 'Unknown error'}`,\n this.position,\n this.getErrorContext()\n );\n }\n }\n }\n\n this.skipWhitespace();\n const desc = this.parseParsedString();\n\n return {\n name,\n desc: desc?.trim(),\n type,\n isOptional,\n isInternal,\n };\n }\n\n private validateFieldName(name: string, fieldType: 'input' | 'output'): void {\n // Check for reserved/generic names that should be more descriptive\n if (axGlobals.signatureStrict) {\n const reservedNames = [\n 'text',\n 'object',\n 'image',\n 'string',\n 'number',\n 'boolean',\n 'json',\n 'array',\n 'datetime',\n 'date',\n 'time',\n 'type',\n 'class',\n 'input',\n 'output',\n 'data',\n 'value',\n 'result',\n 'response',\n 'request',\n 'item',\n 'element',\n ];\n\n if (reservedNames.includes(name.toLowerCase())) {\n const suggestions =\n fieldType === 'input'\n ? ['userInput', 'questionText', 'documentContent', 'messageText']\n : ['responseText', 'analysisResult', 'categoryType', 'summaryText'];\n\n throw new SignatureValidationError(\n `Field name \"${name}\" is too generic`,\n this.position,\n this.getErrorContext(),\n `Use a more descriptive name. Examples: ${suggestions.join(', ')}`\n );\n }\n }\n\n // Check naming convention\n const camelCaseRegex = /^[a-z][a-zA-Z0-9]*$/;\n const snakeCaseRegex = /^[a-z]+(_[a-z0-9]+)*$/;\n\n if (!camelCaseRegex.test(name) && !snakeCaseRegex.test(name)) {\n throw new SignatureValidationError(\n `Invalid field name \"${name}\"`,\n this.position,\n this.getErrorContext(),\n 'Field names must be in camelCase (e.g., \"userInput\") or snake_case (e.g., \"user_input\")'\n );\n }\n\n // Check for minimum length\n if (name.length < 2) {\n throw new SignatureValidationError(\n `Field name \"${name}\" is too short`,\n this.position,\n this.getErrorContext(),\n 'Field names must be at least 2 characters long'\n );\n }\n\n // Check for maximum length\n if (name.length > 50) {\n throw new SignatureValidationError(\n `Field name \"${name}\" is too long (${name.length} characters)`,\n this.position,\n this.getErrorContext(),\n 'Field names should be 50 characters or less'\n );\n }\n }\n\n private parseTypeNotClass(): TypeNotClass {\n const types: TypeNotClass[] = [\n 'string',\n 'number',\n 'boolean',\n 'json',\n 'image',\n 'audio',\n 'datetime',\n 'date',\n 'code',\n ];\n\n const foundType = types.find((type) => this.match(type));\n if (!foundType) {\n const currentWord =\n this.input.slice(this.position).match(/^\\w+/)?.[0] || '';\n const suggestion = this.suggestType(currentWord);\n\n const baseMessage = `Invalid type \"${currentWord || 'empty'}\"`;\n const suggestionPart = suggestion\n ? `. Did you mean \"${suggestion}\"?`\n : '';\n const fullMessage = `${baseMessage}${suggestionPart}`;\n\n throw new SignatureValidationError(\n fullMessage,\n this.position,\n this.getErrorContext(),\n `Expected one of: ${types.join(', ')}`\n );\n }\n return foundType;\n }\n\n private suggestType(input: string): string | null {\n const suggestions: Record<string, string> = {\n str: 'string',\n text: 'string',\n int: 'number',\n integer: 'number',\n float: 'number',\n double: 'number',\n bool: 'boolean',\n object: 'json',\n dict: 'json',\n timestamp: 'datetime',\n time: 'datetime',\n img: 'image',\n picture: 'image',\n sound: 'audio',\n voice: 'audio',\n classification: 'class',\n category: 'class',\n };\n\n return suggestions[input.toLowerCase()] || null;\n }\n\n private parseParsedIdentifier(): ParsedIdentifier {\n this.skipWhitespace();\n const match = /^[a-zA-Z_][a-zA-Z_0-9]*/.exec(\n this.input.slice(this.position)\n );\n if (match) {\n this.position += match[0].length;\n return match[0];\n }\n\n const invalidMatch = /^\\S+/.exec(this.input.slice(this.position));\n const invalidId = invalidMatch ? invalidMatch[0] : '';\n\n if (invalidId === '') {\n throw new SignatureValidationError(\n 'Expected field name but found end of input',\n this.position,\n this.getErrorContext(),\n 'Add a field name. Field names must start with a letter or underscore'\n );\n }\n\n if (/^\\d/.test(invalidId)) {\n throw new SignatureValidationError(\n `Invalid field name \"${invalidId}\" - cannot start with a number`,\n this.position,\n this.getErrorContext(),\n 'Field names must start with a letter or underscore. Example: \"userInput\" or \"_internal\"'\n );\n }\n\n throw new SignatureValidationError(\n `Invalid field name \"${invalidId}\"`,\n this.position,\n this.getErrorContext(),\n 'Field names must start with a letter or underscore and contain only letters, numbers, or underscores'\n );\n }\n\n private parseParsedString(): string | undefined {\n const quoteChars = [\"'\", '\"'];\n for (const quoteChar of quoteChars) {\n if (this.match(quoteChar)) {\n let content = '';\n let escaped = false;\n const startPos = this.position - 1;\n\n while (this.position < this.input.length) {\n const char = this.input[this.position];\n this.position++;\n if (escaped) {\n content += char;\n escaped = false;\n } else if (char === '\\\\') {\n escaped = true;\n } else if (char === quoteChar) {\n return content;\n } else {\n content += char;\n }\n }\n\n const partialString = this.input.slice(\n startPos,\n Math.min(this.position, startPos + 20)\n );\n throw new SignatureValidationError(\n `Unterminated string starting at position ${startPos}`,\n startPos,\n this.getErrorContext(),\n `Add closing ${quoteChar} to complete the string: ${partialString}${quoteChar}`\n );\n }\n }\n return undefined;\n }\n\n private skipWhitespace() {\n const match = /^[\\s\\t\\r\\n]+/.exec(this.input.slice(this.position));\n if (match) {\n this.position += match[0].length;\n }\n }\n\n private match(strOrRegex: string | RegExp): boolean {\n let match: RegExpExecArray | null;\n if (typeof strOrRegex === 'string') {\n if (this.input.startsWith(strOrRegex, this.position)) {\n this.position += strOrRegex.length;\n return true;\n }\n } else {\n match = strOrRegex.exec(this.input.slice(this.position));\n if (match) {\n this.position += match[0].length;\n return true;\n }\n }\n return false;\n }\n\n private expectArrow() {\n if (!this.match('->')) {\n const found = this.input.slice(this.position, this.position + 10);\n const suggestion = found.includes('>')\n ? 'Use \"->\" (dash followed by greater-than)'\n : found.includes('-')\n ? 'Add \">\" after the dash'\n : 'Add \"->\" to separate input and output fields';\n\n throw new SignatureValidationError(\n `Expected \"->\" but found \"${found}...\"`,\n this.position,\n this.getErrorContext(),\n suggestion\n );\n }\n }\n}\n\nexport function parseSignature(input: string): ParsedSignature {\n const parser = new SignatureParser(input);\n return parser.parse();\n}\n","import type { AxChatRequest } from '../ai/types.js';\n\nimport { formatDateWithTimezone } from './datetime.js';\nimport type { AxInputFunctionType } from './functions.js';\nimport type { AxField, AxIField, AxSignature } from './sig.js';\nimport type { AxFieldValue, AxGenIn, AxMessage } from './types.js';\nimport { validateValue } from './util.js';\n\ntype Writeable<T> = { -readonly [P in keyof T]: T[P] };\n\n// Define options type for AxPromptTemplate constructor\nexport interface AxPromptTemplateOptions {\n functions?: Readonly<AxInputFunctionType>;\n thoughtFieldName?: string;\n}\ntype AxChatRequestChatPrompt = Writeable<AxChatRequest['chatPrompt'][0]>;\n\ntype ChatRequestUserMessage = Exclude<\n Extract<AxChatRequestChatPrompt, { role: 'user' }>['content'],\n string\n>;\n\nconst functionCallInstructions = `\n## Function Call Instructions\n- Complete the task, using the functions defined earlier in this prompt. \n- Output fields should only be generated after all functions have been called.\n- Use the function results to generate the output fields.`;\n\nconst formattingRules = `\n## Strict Output Formatting Rules\n- Output must strictly follow the defined plain-text \\`field name: value\\` field format.\n- Output field, values must strictly adhere to the specified output field formatting rules.\n- No formatting rules should override these **Strict Output Formatting Rules**\n- Do not add any text before or after the output fields, just the field name and value.\n- Do not use code blocks.`;\n\nexport type AxFieldTemplateFn = (\n field: Readonly<AxField>,\n value: Readonly<AxFieldValue>\n) => ChatRequestUserMessage;\n\nexport class AxPromptTemplate {\n private sig: Readonly<AxSignature>;\n private fieldTemplates?: Record<string, AxFieldTemplateFn>;\n private task: { type: 'text'; text: string };\n private readonly thoughtFieldName: string;\n private readonly functions?: Readonly<AxInputFunctionType>;\n\n constructor(\n sig: Readonly<AxSignature>,\n options?: Readonly<AxPromptTemplateOptions>,\n fieldTemplates?: Record<string, AxFieldTemplateFn>\n ) {\n this.sig = sig;\n this.fieldTemplates = fieldTemplates;\n this.thoughtFieldName = options?.thoughtFieldName ?? 'thought';\n this.functions = options?.functions;\n\n const task = [];\n\n const inArgs = renderDescFields(this.sig.getInputFields());\n const outArgs = renderDescFields(this.sig.getOutputFields());\n task.push(\n `You will be provided with the following fields: ${inArgs}. Your task is to generate new fields: ${outArgs}.`\n );\n\n // biome-ignore lint/complexity/useFlatMap: you cannot use flatMap here\n const funcs = this.functions\n ?.map((f) => ('toFunction' in f ? f.toFunction() : f))\n ?.flat();\n\n const funcList = funcs\n ?.map((fn) => `- \\`${fn.name}\\`: ${formatDescription(fn.description)}`)\n .join('\\n');\n\n if (funcList && funcList.length > 0) {\n task.push(`## Available Functions\\n${funcList}`);\n }\n\n const inputFields = renderInputFields(this.sig.getInputFields());\n task.push(`## Input Fields\\n${inputFields}`);\n\n const outputFields = renderOutputFields(this.sig.getOutputFields());\n task.push(`## Output Fields\\n${outputFields}`);\n\n if (funcList && funcList.length > 0) {\n task.push(functionCallInstructions.trim());\n }\n\n task.push(formattingRules.trim());\n\n const desc = this.sig.getDescription();\n if (desc) {\n const text = formatDescription(desc);\n task.push(text);\n }\n\n this.task = {\n type: 'text' as const,\n text: task.join('\\n\\n'),\n };\n }\n\n private renderSingleValueUserContent = <T extends AxGenIn>(\n values: T,\n renderedExamples: ChatRequestUserMessage,\n renderedDemos: ChatRequestUserMessage,\n examplesInSystemPrompt: boolean\n ): string | ChatRequestUserMessage => {\n const completion = this.renderInputFields(values);\n const promptList: ChatRequestUserMessage = examplesInSystemPrompt\n ? completion\n : [...renderedExamples, ...renderedDemos, ...completion];\n\n const prompt = promptList.filter((v) => v !== undefined);\n\n return prompt.every((v) => v.type === 'text')\n ? prompt.map((v) => v.text).join('\\n')\n : prompt.reduce(combineConsecutiveStrings('\\n'), []);\n };\n\n public render = <T extends AxGenIn>(\n values: T | ReadonlyArray<AxMessage<T>>, // Allow T (AxGenIn) or array of AxMessages\n {\n examples,\n demos,\n }: Readonly<{\n skipSystemPrompt?: boolean;\n examples?: Record<string, AxFieldValue>[]; // Keep as is, examples are specific structures\n demos?: Record<string, AxFieldValue>[]; // Keep as is\n }>\n ): Extract<\n AxChatRequest['chatPrompt'][number],\n { role: 'user' | 'system' | 'assistant' }\n >[] => {\n const renderedExamples = examples\n ? [\n { type: 'text' as const, text: '\\n\\n## Examples\\n' },\n ...this.renderExamples(examples),\n ]\n : [];\n\n const renderedDemos = demos ? this.renderDemos(demos) : [];\n\n // Check if demos and examples are all text type\n const allTextExamples = renderedExamples.every((v) => v.type === 'text');\n const allTextDemos = renderedDemos.every((v) => v.type === 'text');\n const examplesInSystemPrompt = allTextExamples && allTextDemos;\n\n let systemContent = this.task.text;\n\n if (examplesInSystemPrompt) {\n const combinedItems = [\n { type: 'text' as const, text: systemContent },\n ...renderedExamples,\n ...renderedDemos,\n ];\n combinedItems.reduce(combineConsecutiveStrings(''), []);\n\n if (combinedItems?.[0]) {\n systemContent = combinedItems[0].text;\n }\n }\n\n const systemPrompt = {\n role: 'system' as const,\n content: systemContent,\n };\n\n if (Array.isArray(values)) {\n const messages: Extract<\n AxChatRequest['chatPrompt'][number],\n { role: 'user' } | { role: 'assistant' }\n >[] = [];\n\n const history = values as ReadonlyArray<AxMessage<T>>;\n\n let firstItem = true;\n for (const message of history) {\n let content: string | ChatRequestUserMessage;\n\n if (firstItem) {\n content = this.renderSingleValueUserContent(\n message.values,\n renderedExamples,\n renderedDemos,\n examplesInSystemPrompt\n );\n firstItem = false;\n } else {\n content = this.renderSingleValueUserContent(\n message.values,\n [],\n [],\n false\n );\n }\n\n if (message.role === 'user') {\n messages.push({ role: 'user', content });\n continue;\n }\n\n if (message.role !== 'assistant') {\n throw new Error('Invalid message role');\n }\n\n if (typeof content !== 'string') {\n throw new Error(\n 'Assistant message cannot contain non-text content like images, files,etc'\n );\n }\n\n messages.push({ role: 'assistant', content });\n }\n\n return [systemPrompt, ...messages];\n }\n\n // values is T (AxGenIn) - existing logic path\n const userContent = this.renderSingleValueUserContent(\n values as T,\n renderedExamples,\n renderedDemos,\n examplesInSystemPrompt\n );\n\n return [systemPrompt, { role: 'user' as const, content: userContent }];\n };\n\n public renderExtraFields = (extraFields: readonly AxIField[]) => {\n const prompt: ChatRequestUserMessage = [];\n\n if (!extraFields || extraFields.length === 0) {\n return prompt;\n }\n\n const groupedFields = extraFields.reduce(\n (acc, field) => {\n const title = field.title;\n if (!acc[title]) {\n acc[title] = [];\n }\n acc[title].push(field);\n return acc;\n },\n {} as Record<string, AxIField[]>\n );\n\n const formattedGroupedFields = Object.entries(groupedFields)\n .map(([title, fields]) => {\n if (fields.length === 1) {\n const field = fields[0]!;\n return {\n title,\n name: field.name,\n description: field.description,\n };\n }\n if (fields.length > 1) {\n const valuesList = fields\n .map((field) => `- ${field.description}`)\n .join('\\n');\n return {\n title,\n name: fields[0]!.name,\n description: valuesList,\n };\n }\n })\n .filter(Boolean) as AxIField[];\n\n formattedGroupedFields.forEach((field) => {\n const fn = this.fieldTemplates?.[field.name] ?? this.defaultRenderInField;\n prompt.push(...fn(field, field.description));\n });\n\n return prompt;\n };\n\n private renderExamples = (data: Readonly<Record<string, AxFieldValue>[]>) => {\n const list: ChatRequestUserMessage = [];\n const exampleContext = {\n isExample: true,\n };\n\n for (const [index, item] of data.entries()) {\n const renderedInputItem = this.sig\n .getInputFields()\n .map((field) =>\n this.renderInField(field, item, {\n ...exampleContext,\n isInputField: true,\n })\n )\n .filter((v) => v !== undefined)\n .flat();\n\n const renderedOutputItem = this.sig\n .getOutputFields()\n .map((field) =>\n this.renderInField(field, item, {\n ...exampleContext,\n isInputField: false,\n })\n )\n .filter((v) => v !== undefined)\n .flat();\n\n const renderedItem = [...renderedInputItem, ...renderedOutputItem];\n\n if (\n index > 0 &&\n renderedItem.length > 0 &&\n renderedItem[0]?.type === 'text'\n ) {\n list.push({ type: 'text' as const, text: '---\\n\\n' });\n }\n\n renderedItem.forEach((v) => {\n if ('text' in v) {\n v.text = `${v.text}\\n`;\n }\n list.push(v);\n });\n }\n\n return list;\n };\n\n private renderDemos = (data: Readonly<Record<string, AxFieldValue>[]>) => {\n const list: ChatRequestUserMessage = [];\n const inputFields = this.sig.getInputFields();\n const outputFields = this.sig.getOutputFields();\n const demoContext = {\n isExample: true,\n };\n\n for (const item of data) {\n const inputRenderedItems = inputFields\n .map((field) =>\n this.renderInField(field, item, {\n ...demoContext,\n isInputField: true,\n })\n )\n .filter((v) => v !== undefined)\n .flat();\n\n const outputRenderedItems = outputFields\n .map((field) =>\n this.renderInField(field, item, {\n ...demoContext,\n isInputField: false,\n })\n )\n .filter((v) => v !== undefined)\n .flat();\n\n const renderedItem = [...inputRenderedItems, ...outputRenderedItems];\n\n renderedItem.slice(0, -1).forEach((v) => {\n if ('text' in v) {\n v.text = `${v.text}\\n`;\n }\n list.push(v);\n });\n }\n\n return list;\n };\n\n private renderInputFields = <T extends AxGenIn>(values: T) => {\n const renderedItems = this.sig\n .getInputFields()\n .map((field) => this.renderInField(field, values, undefined))\n .filter((v) => v !== undefined)\n .flat();\n\n renderedItems\n .filter((v) => v.type === 'text')\n .forEach((v) => {\n v.text = `${v.text}\\n`;\n });\n\n return renderedItems;\n };\n\n private renderInField = (\n field: Readonly<AxField>,\n values: Readonly<Record<string, AxFieldValue>>,\n context?: {\n isExample?: boolean;\n strictExamples?: boolean;\n optionalOutputFields?: string[];\n isInputField?: boolean;\n }\n ) => {\n const value = values[field.name];\n\n if (isEmptyValue(field, value, context)) {\n return;\n }\n\n if (field.type) {\n validateValue(field, value!);\n }\n\n const processedValue = processValue(field, value!);\n\n const textFieldFn: AxFieldTemplateFn =\n this.fieldTemplates?.[field.name] ?? this.defaultRenderInField;\n\n return textFieldFn(field, processedValue);\n };\n\n private defaultRenderInField = (\n field: Readonly<AxField>,\n value: Readonly<AxFieldValue>\n ): ChatRequestUserMessage => {\n if (field.type?.name === 'image') {\n const validateImage = (\n value: Readonly<AxFieldValue>\n ): { mimeType: string; data: string } => {\n if (!value) {\n throw new Error('Image field value is required.');\n }\n\n if (typeof value !== 'object') {\n throw new Error('Image field value must be an object.');\n }\n if (!('mimeType' in value)) {\n throw new Error('Image field must have mimeType');\n }\n if (!('data' in value)) {\n throw new Error('Image field must have data');\n }\n return value as { mimeType: string; data: string };\n };\n\n let result: ChatRequestUserMessage = [\n { type: 'text', text: `${field.title}: ` as string },\n ];\n\n if (field.type.isArray) {\n if (!Array.isArray(value)) {\n throw new Error('Image field value must be an array.');\n }\n result = result.concat(\n (value as unknown[]).map((v) => {\n // Cast to unknown[] before map\n const validated = validateImage(v as AxFieldValue);\n return {\n type: 'image',\n mimeType: validated.mimeType,\n image: validated.data,\n };\n })\n );\n } else {\n const validated = validateImage(value);\n result.push({\n type: 'image',\n mimeType: validated.mimeType,\n image: validated.data,\n });\n }\n return result;\n }\n\n if (field.type?.name === 'audio') {\n const validateAudio = (\n value: Readonly<AxFieldValue>\n ): { format?: 'wav'; data: string } => {\n if (!value) {\n throw new Error('Audio field value is required.');\n }\n\n if (typeof value !== 'object') {\n throw new Error('Audio field value must be an object.');\n }\n if (!('data' in value)) {\n throw new Error('Audio field must have data');\n }\n return value as { format?: 'wav'; data: string };\n };\n\n let result: ChatRequestUserMessage = [\n { type: 'text', text: `${field.title}: ` as string },\n ];\n\n if (field.type.isArray) {\n if (!Array.isArray(value)) {\n throw new Error('Audio field value must be an array.');\n }\n result = result.concat(\n (value as unknown[]).map((v) => {\n // Cast to unknown[] before map\n const validated = validateAudio(v as AxFieldValue);\n return {\n type: 'audio',\n format: validated.format ?? 'wav',\n data: validated.data,\n };\n })\n );\n } else {\n const validated = validateAudio(value);\n result.push({\n type: 'audio',\n format: validated.format ?? 'wav',\n data: validated.data,\n });\n }\n return result;\n }\n\n const text = [field.title, ': '];\n\n if (Array.isArray(value)) {\n text.push('\\n');\n text.push(value.map((v) => `- ${v}`).join('\\n'));\n } else {\n text.push(value as string);\n }\n return [{ type: 'text', text: text.join('') }];\n };\n}\n\nconst renderDescFields = (list: readonly AxField[]) =>\n list.map((v) => `\\`${v.title}\\``).join(', ');\n\nconst renderInputFields = (fields: readonly AxField[]) => {\n const rows = fields.map((field) => {\n const name = field.title;\n const type = field.type?.name ? toFieldType(field.type) : 'string';\n\n const requiredMsg = field.isOptional\n ? `This optional ${type} field may be omitted`\n : `A ${type} field`;\n\n const description = field.description\n ? ` ${formatDescription(field.description)}`\n : '';\n\n return `${name}: (${requiredMsg})${description}`.trim();\n });\n\n return rows.join('\\n');\n};\n\nconst renderOutputFields = (fields: readonly AxField[]) => {\n const rows = fields.map((field) => {\n const name = field.title;\n const type = field.type?.name ? toFieldType(field.type) : 'string';\n\n const requiredMsg = field.isOptional\n ? `Only include this ${type} field if its value is available`\n : `This ${type} field must be included`;\n\n let description = '';\n\n if (field.description && field.description.length > 0) {\n const value =\n field.type?.name === 'class'\n ? field.description\n : formatDescription(field.description);\n description = ` ${value}`;\n }\n\n if (field.type?.options && field.type.options.length > 0) {\n if (description.length > 0) {\n description += '. ';\n }\n description += `Allowed values: ${field.type.options.join(', ')}`;\n }\n\n return `${name}: (${requiredMsg})${description}`.trim();\n });\n\n return rows.join('\\n');\n};\n\nconst processValue = (\n field: Readonly<AxField>,\n value: Readonly<AxFieldValue>\n): AxFieldValue => {\n if (field.type?.name === 'date' && value instanceof Date) {\n const v = value.toISOString();\n return v.slice(0, v.indexOf('T'));\n }\n if (field.type?.name === 'datetime' && value instanceof Date) {\n return formatDateWithTimezone(value);\n }\n if (field.type?.name === 'image' && typeof value === 'object') {\n return value;\n }\n if (field.type?.name === 'audio' && typeof value === 'object') {\n return value;\n }\n if (typeof value === 'string') {\n return value;\n }\n return JSON.stringify(value, null, 2);\n};\n\nexport const toFieldType = (type: Readonly<AxField['type']>) => {\n const baseType = (() => {\n switch (type?.name) {\n case 'string':\n return 'string';\n case 'number':\n return 'number';\n case 'boolean':\n return 'boolean (true or false)';\n case 'date':\n return 'date (\"YYYY-MM-DD\" format)';\n case 'datetime':\n return 'date time (\"YYYY-MM-DD HH:mm Timezone\" format)';\n case 'json':\n return 'JSON object';\n case 'class':\n return 'classification class';\n case 'code':\n return 'code';\n default:\n return 'string';\n }\n })();\n\n return type?.isArray ? `json array of ${baseType} items` : baseType;\n};\n\nfunction combineConsecutiveStrings(separator: string) {\n return (acc: ChatRequestUserMessage, current: ChatRequestUserMessage[0]) => {\n if (current.type === 'text') {\n const previous = acc.length > 0 ? acc[acc.length - 1] : null;\n if (previous && previous.type === 'text') {\n previous.text += separator + current.text;\n } else {\n acc.push(current);\n }\n } else {\n acc.push(current);\n }\n return acc;\n };\n}\n\nconst isEmptyValue = (\n field: Readonly<AxField>,\n value?: Readonly<AxFieldValue>,\n context?: {\n isExample?: boolean;\n isInputField?: boolean;\n }\n) => {\n if (typeof value === 'boolean') {\n return false;\n }\n\n if (\n !value ||\n ((Array.isArray(value) || typeof value === 'string') && value.length === 0)\n ) {\n // Handle examples case - all fields can be missing in examples\n if (context?.isExample) {\n return true;\n }\n\n // Handle non-examples case (regular field validation)\n if (field.isOptional || field.isInternal) {\n return true;\n }\n\n const fieldType = context?.isInputField !== false ? 'input' : 'output';\n throw new Error(\n `Value for ${fieldType} field '${field.name}' is required.`\n );\n }\n return false;\n};\n\nfunction formatDescription(str: string) {\n const value = str.trim();\n return value.length > 0\n ? `${value.charAt(0).toUpperCase()}${value.slice(1)}${value.endsWith('.') ? '' : '.'}`\n : '';\n}\n","import type { AxAIMemory } from '../mem/types.js';\nimport type {\n AxGenDeltaOut,\n AxGenOut,\n AxResultPickerFunction,\n AxResultPickerFunctionFunctionResults,\n} from './types.js';\n\nexport interface AxSamplePickerOptions<OUT extends AxGenOut> {\n resultPicker?: AxResultPickerFunction<OUT>;\n}\n\n/**\n * Checks if there are function calls in memory\n */\nfunction checkForFunctionCalls(mem: AxAIMemory, sessionId?: string): boolean {\n const history = mem.history(0, sessionId);\n\n // Check for both function calls and function results\n const hasFunctionResults = history.some((msg) => msg.role === 'function');\n const hasFunctionCalls = history.some(\n (msg) =>\n msg.role === 'assistant' &&\n 'functionCalls' in msg &&\n Array.isArray(msg.functionCalls) &&\n msg.functionCalls.length > 0\n );\n\n return hasFunctionCalls && hasFunctionResults;\n}\n\n/**\n * Extracts function execution results from memory\n */\nfunction extractFunctionResults(\n mem: AxAIMemory,\n sessionId?: string\n): AxResultPickerFunctionFunctionResults['results'] {\n const history = mem.history(0, sessionId);\n const results: {\n index: number;\n functionName: string;\n functionId: string;\n args: string | object;\n result: string;\n isError?: boolean;\n }[] = [];\n\n // Find assistant messages with function calls\n const assistantMessages = history.filter(\n (msg) =>\n msg.role === 'assistant' &&\n 'functionCalls' in msg &&\n Array.isArray(msg.functionCalls) &&\n msg.functionCalls.length > 0\n );\n\n // Find function result messages\n const functionMessages = history.filter((msg) => msg.role === 'function');\n\n // Match function calls with their results\n for (const assistantMsg of assistantMessages) {\n if ('functionCalls' in assistantMsg && assistantMsg.functionCalls) {\n for (const funcCall of assistantMsg.functionCalls) {\n // Find the corresponding function result\n const funcResult = functionMessages.find(\n (msg) => 'functionId' in msg && msg.functionId === funcCall.id\n );\n\n if (\n funcResult &&\n 'result' in funcResult &&\n 'functionId' in funcResult\n ) {\n results.push({\n index: results.length, // Use sequential index for function results\n functionName: funcCall.function.name,\n functionId: funcCall.id,\n args: funcCall.function.params || '',\n result: String(funcResult.result),\n isError:\n 'isError' in funcResult ? Boolean(funcResult.isError) : false,\n });\n }\n }\n }\n }\n return results;\n}\n\n/**\n * Selects a result from multiple samples using the provided result picker function.\n * If no result picker is provided or only one result exists, returns the first result.\n */\nexport async function selectFromSamples<OUT extends AxGenOut>(\n buffer: AxGenDeltaOut<OUT>[],\n options?: AxSamplePickerOptions<OUT>,\n mem?: AxAIMemory,\n sessionId?: string\n): Promise<number> {\n // If no result picker or only one result, use index 0\n if (!options?.resultPicker || buffer.length <= 1) {\n return 0;\n }\n\n const resultPicker = options.resultPicker;\n\n // Check if there are function calls in memory to determine data type\n const hasFunctionCalls = mem ? checkForFunctionCalls(mem, sessionId) : false;\n\n if (hasFunctionCalls && mem) {\n // Extract function execution data from memory\n const functionResults = extractFunctionResults(mem, sessionId);\n const selectedIndex = await resultPicker({\n type: 'function',\n results: functionResults,\n });\n\n // Validate the selected index\n if (selectedIndex < 0 || selectedIndex >= functionResults.length) {\n throw new Error(\n `Result picker returned invalid index: ${selectedIndex}. Must be between 0 and ${functionResults.length - 1}`\n );\n }\n\n return selectedIndex;\n }\n // Use field results\n const fieldResults = buffer.map((b, index) => ({\n index,\n sample: b.delta,\n }));\n\n const selectedIndex = await resultPicker({\n type: 'fields',\n results: fieldResults,\n });\n\n // Validate the selected index\n if (selectedIndex < 0 || selectedIndex >= buffer.length) {\n throw new Error(\n `Result picker returned invalid index: ${selectedIndex}. Must be between 0 and ${buffer.length - 1}`\n );\n }\n\n return selectedIndex;\n}\n\n/**\n * Selects a result index from memory using the provided result picker function.\n * If no result picker is provided or only one result exists, returns 0.\n * If the last memory is not from an assistant role, returns 0.\n */\nexport async function selectFromSamplesInMemory<OUT extends AxGenOut>(\n mem: AxAIMemory,\n sessionId?: string,\n options?: AxSamplePickerOptions<OUT>\n): Promise<number> {\n const lastMemory = mem?.getLast(sessionId);\n\n // If no memory or not from assistant role, return 0\n if (!lastMemory || lastMemory.role !== 'assistant') {\n return 0;\n }\n\n // If only one chat sample, return 0\n if (lastMemory.chat.length <= 1) {\n return 0;\n }\n\n // Convert memory chat to buffer format for selectFromSamples\n const buffer = lastMemory.chat.map((chat) => ({\n version: 0,\n index: chat.index,\n delta: chat.value as OUT,\n }));\n\n const selectedIndex = await selectFromSamples(\n buffer,\n options,\n mem,\n sessionId\n );\n return selectedIndex;\n}\n","import type { AxAIService } from '../ai/types.js';\nimport { AxDBMemory, type AxDBState } from '../db/memory.js';\nimport { ColorLog } from '../util/log.js';\n\nconst colorLog = new ColorLog();\n\nexport interface AxSimpleClassifierForwardOptions {\n cutoff?: number;\n abortSignal?: AbortSignal;\n}\n\nexport class AxSimpleClassifierClass {\n private readonly name: string;\n private readonly context: readonly string[];\n\n constructor(name: string, context: readonly string[]) {\n this.name = name;\n this.context = context;\n }\n\n public getName(): string {\n return this.name;\n }\n\n public getContext(): readonly string[] {\n return this.context;\n }\n}\n\nexport class AxSimpleClassifier {\n private readonly ai: AxAIService;\n\n private db: AxDBMemory;\n private debug?: boolean;\n\n public constructor(ai: AxAIService) {\n this.db = new AxDBMemory();\n this.ai = ai;\n }\n\n public getState(): AxDBState | undefined {\n return this.db.getDB();\n }\n\n public setState(state: AxDBState) {\n this.db.setDB(state);\n }\n\n public setClasses = async (\n classes: readonly AxSimpleClassifierClass[],\n options?: Readonly<{ abortSignal?: AbortSignal }>\n ): Promise<void> => {\n for (const c of classes) {\n const ret = await this.ai.embed(\n { texts: c.getContext() },\n {\n abortSignal: options?.abortSignal,\n }\n );\n await this.db.upsert({\n id: c.getName(),\n table: 'classes',\n values: ret.embeddings[0],\n });\n }\n };\n\n public async forward(\n text: string,\n options?: Readonly<AxSimpleClassifierForwardOptions>\n ): Promise<string> {\n const { embeddings } = await this.ai.embed(\n { texts: [text] },\n {\n abortSignal: options?.abortSignal,\n }\n );\n\n const matches = await this.db.query({\n table: 'classes',\n values: embeddings[0],\n });\n\n let m = matches.matches;\n if (typeof options?.cutoff === 'number') {\n const { cutoff } = options;\n m = m.filter((m) => m.score <= cutoff);\n }\n\n if (this.debug) {\n console.log(\n `${colorLog.whiteBright(`query: ${text}`)}\\n${colorLog.greenBright(\n JSON.stringify(m.map((m) => `${m.id}, ${m.score}`))\n )}`\n );\n }\n\n const matchedClass = m.at(0);\n if (!matchedClass) {\n return '';\n }\n\n return matchedClass.id;\n }\n\n public setOptions(options: Readonly<{ debug?: boolean }>): void {\n if (typeof options.debug === 'boolean') {\n this.debug = options.debug;\n }\n }\n}\n","export const stopwords = new Set([\n '0o',\n '0s',\n '3a',\n '3b',\n '3d',\n '6b',\n '6o',\n 'a',\n 'a1',\n 'a2',\n 'a3',\n 'a4',\n 'ab',\n 'able',\n 'about',\n 'above',\n 'abst',\n 'ac',\n 'accordance',\n 'according',\n 'accordingly',\n 'across',\n 'act',\n 'actually',\n 'ad',\n 'added',\n 'adj',\n 'ae',\n 'af',\n 'affected',\n 'affecting',\n 'affects',\n 'after',\n 'afterwards',\n 'ag',\n 'again',\n 'against',\n 'ah',\n 'ain',\n \"ain't\",\n 'aj',\n 'al',\n 'all',\n 'allow',\n 'allows',\n 'almost',\n 'alone',\n 'along',\n 'already',\n 'also',\n 'although',\n 'always',\n 'am',\n 'among',\n 'amongst',\n 'amoungst',\n 'amount',\n 'an',\n 'and',\n 'announce',\n 'another',\n 'any',\n 'anybody',\n 'anyhow',\n 'anymore',\n 'anyone',\n 'anything',\n 'anyway',\n 'anyways',\n 'anywhere',\n 'ao',\n 'ap',\n 'apart',\n 'apparently',\n 'appear',\n 'appreciate',\n 'appropriate',\n 'approximately',\n 'ar',\n 'are',\n 'aren',\n 'arent',\n \"aren't\",\n 'arise',\n 'around',\n 'as',\n \"a's\",\n 'aside',\n 'ask',\n 'asking',\n 'associated',\n 'at',\n 'au',\n 'auth',\n 'av',\n 'available',\n 'aw',\n 'away',\n 'awfully',\n 'ax',\n 'ay',\n 'az',\n 'b',\n 'b1',\n 'b2',\n 'b3',\n 'ba',\n 'back',\n 'bc',\n 'bd',\n 'be',\n 'became',\n 'because',\n 'become',\n 'becomes',\n 'becoming',\n 'been',\n 'before',\n 'beforehand',\n 'begin',\n 'beginning',\n 'beginnings',\n 'begins',\n 'behind',\n 'being',\n 'believe',\n 'below',\n 'beside',\n 'besides',\n 'best',\n 'better',\n 'between',\n 'beyond',\n 'bi',\n 'bill',\n 'biol',\n 'bj',\n 'bk',\n 'bl',\n 'bn',\n 'both',\n 'bottom',\n 'bp',\n 'br',\n 'brief',\n 'briefly',\n 'bs',\n 'bt',\n 'bu',\n 'but',\n 'bx',\n 'by',\n 'c',\n 'c1',\n 'c2',\n 'c3',\n 'ca',\n 'call',\n 'came',\n 'can',\n 'cannot',\n 'cant',\n \"can't\",\n 'cause',\n 'causes',\n 'cc',\n 'cd',\n 'ce',\n 'certain',\n 'certainly',\n 'cf',\n 'cg',\n 'ch',\n 'changes',\n 'ci',\n 'cit',\n 'cj',\n 'cl',\n 'clearly',\n 'cm',\n \"c'mon\",\n 'cn',\n 'co',\n 'com',\n 'come',\n 'comes',\n 'con',\n 'concerning',\n 'consequently',\n 'consider',\n 'considering',\n 'contain',\n 'containing',\n 'contains',\n 'corresponding',\n 'could',\n 'couldn',\n 'couldnt',\n \"couldn't\",\n 'course',\n 'cp',\n 'cq',\n 'cr',\n 'cry',\n 'cs',\n \"c's\",\n 'ct',\n 'cu',\n 'currently',\n 'cv',\n 'cx',\n 'cy',\n 'cz',\n 'd',\n 'd2',\n 'da',\n 'date',\n 'dc',\n 'dd',\n 'de',\n 'definitely',\n 'describe',\n 'described',\n 'despite',\n 'detail',\n 'df',\n 'di',\n 'did',\n 'didn',\n \"didn't\",\n 'different',\n 'dj',\n 'dk',\n 'dl',\n 'do',\n 'does',\n 'doesn',\n \"doesn't\",\n 'doing',\n 'don',\n 'done',\n \"don't\",\n 'down',\n 'downwards',\n 'dp',\n 'dr',\n 'ds',\n 'dt',\n 'du',\n 'due',\n 'during',\n 'dx',\n 'dy',\n 'e',\n 'e2',\n 'e3',\n 'ea',\n 'each',\n 'ec',\n 'ed',\n 'edu',\n 'ee',\n 'ef',\n 'effect',\n 'eg',\n 'ei',\n 'eight',\n 'eighty',\n 'either',\n 'ej',\n 'el',\n 'eleven',\n 'else',\n 'elsewhere',\n 'em',\n 'empty',\n 'en',\n 'end',\n 'ending',\n 'enough',\n 'entirely',\n 'eo',\n 'ep',\n 'eq',\n 'er',\n 'es',\n 'especially',\n 'est',\n 'et',\n 'et-al',\n 'etc',\n 'eu',\n 'ev',\n 'even',\n 'ever',\n 'every',\n 'everybody',\n 'everyone',\n 'everything',\n 'everywhere',\n 'ex',\n 'exactly',\n 'example',\n 'except',\n 'ey',\n 'f',\n 'f2',\n 'fa',\n 'far',\n 'fc',\n 'few',\n 'ff',\n 'fi',\n 'fifteen',\n 'fifth',\n 'fify',\n 'fill',\n 'find',\n 'fire',\n 'first',\n 'five',\n 'fix',\n 'fj',\n 'fl',\n 'fn',\n 'fo',\n 'followed',\n 'following',\n 'follows',\n 'for',\n 'former',\n 'formerly',\n 'forth',\n 'forty',\n 'found',\n 'four',\n 'fr',\n 'from',\n 'front',\n 'ft',\n 'fu',\n 'full',\n 'further',\n 'furthermore',\n 'fy',\n 'g',\n 'ga',\n 'gave',\n 'ge',\n 'get',\n 'gets',\n 'getting',\n 'gi',\n 'give',\n 'given',\n 'gives',\n 'giving',\n 'gj',\n 'gl',\n 'go',\n 'goes',\n 'going',\n 'gone',\n 'got',\n 'gotten',\n 'gr',\n 'greetings',\n 'gs',\n 'gy',\n 'h',\n 'h2',\n 'h3',\n 'had',\n 'hadn',\n \"hadn't\",\n 'happens',\n 'hardly',\n 'has',\n 'hasn',\n 'hasnt',\n \"hasn't\",\n 'have',\n 'haven',\n \"haven't\",\n 'having',\n 'he',\n 'hed',\n \"he'd\",\n \"he'll\",\n 'hello',\n 'help',\n 'hence',\n 'her',\n 'here',\n 'hereafter',\n 'hereby',\n 'herein',\n 'heres',\n \"here's\",\n 'hereupon',\n 'hers',\n 'herself',\n 'hes',\n \"he's\",\n 'hh',\n 'hi',\n 'hid',\n 'him',\n 'himself',\n 'his',\n 'hither',\n 'hj',\n 'ho',\n 'home',\n 'hopefully',\n 'how',\n 'howbeit',\n 'however',\n \"how's\",\n 'hr',\n 'hs',\n 'http',\n 'hu',\n 'hundred',\n 'hy',\n 'i',\n 'i2',\n 'i3',\n 'i4',\n 'i6',\n 'i7',\n 'i8',\n 'ia',\n 'ib',\n 'ibid',\n 'ic',\n 'id',\n \"i'd\",\n 'ie',\n 'if',\n 'ig',\n 'ignored',\n 'ih',\n 'ii',\n 'ij',\n 'il',\n \"i'll\",\n 'im',\n \"i'm\",\n 'immediate',\n 'immediately',\n 'importance',\n 'important',\n 'in',\n 'inasmuch',\n 'inc',\n 'indeed',\n 'index',\n 'indicate',\n 'indicated',\n 'indicates',\n 'information',\n 'inner',\n 'insofar',\n 'instead',\n 'interest',\n 'into',\n 'invention',\n 'inward',\n 'io',\n 'ip',\n 'iq',\n 'ir',\n 'is',\n 'isn',\n \"isn't\",\n 'it',\n 'itd',\n \"it'd\",\n \"it'll\",\n 'its',\n \"it's\",\n 'itself',\n 'iv',\n \"i've\",\n 'ix',\n 'iy',\n 'iz',\n 'j',\n 'jj',\n 'jr',\n 'js',\n 'jt',\n 'ju',\n 'just',\n 'k',\n 'ke',\n 'keep',\n 'keeps',\n 'kept',\n 'kg',\n 'kj',\n 'km',\n 'know',\n 'known',\n 'knows',\n 'ko',\n 'l',\n 'l2',\n 'la',\n 'largely',\n 'last',\n 'lately',\n 'later',\n 'latter',\n 'latterly',\n 'lb',\n 'lc',\n 'le',\n 'least',\n 'les',\n 'less',\n 'lest',\n 'let',\n 'lets',\n \"let's\",\n 'lf',\n 'like',\n 'liked',\n 'likely',\n 'line',\n 'little',\n 'lj',\n 'll',\n 'll',\n 'ln',\n 'lo',\n 'look',\n 'looking',\n 'looks',\n 'los',\n 'lr',\n 'ls',\n 'lt',\n 'ltd',\n 'm',\n 'm2',\n 'ma',\n 'made',\n 'mainly',\n 'make',\n 'makes',\n 'many',\n 'may',\n 'maybe',\n 'me',\n 'mean',\n 'means',\n 'meantime',\n 'meanwhile',\n 'merely',\n 'mg',\n 'might',\n 'mightn',\n \"mightn't\",\n 'mill',\n 'million',\n 'mine',\n 'miss',\n 'ml',\n 'mn',\n 'mo',\n 'more',\n 'moreover',\n 'most',\n 'mostly',\n 'move',\n 'mr',\n 'mrs',\n 'ms',\n 'mt',\n 'mu',\n 'much',\n 'mug',\n 'must',\n 'mustn',\n \"mustn't\",\n 'my',\n 'myself',\n 'model',\n 'n',\n 'n2',\n 'na',\n 'name',\n 'namely',\n 'nay',\n 'nc',\n 'nd',\n 'ne',\n 'near',\n 'nearly',\n 'necessarily',\n 'necessary',\n 'need',\n 'needn',\n \"needn't\",\n 'needs',\n 'neither',\n 'never',\n 'nevertheless',\n 'new',\n 'next',\n 'ng',\n 'ni',\n 'nine',\n 'ninety',\n 'nj',\n 'nl',\n 'nn',\n 'no',\n 'nobody',\n 'non',\n 'none',\n 'nonetheless',\n 'noone',\n 'nor',\n 'normally',\n 'nos',\n 'not',\n 'noted',\n 'nothing',\n 'novel',\n 'now',\n 'nowhere',\n 'nr',\n 'ns',\n 'nt',\n 'ny',\n 'o',\n 'oa',\n 'ob',\n 'obtain',\n 'obtained',\n 'obviously',\n 'oc',\n 'od',\n 'of',\n 'off',\n 'often',\n 'og',\n 'oh',\n 'oi',\n 'oj',\n 'ok',\n 'okay',\n 'ol',\n 'old',\n 'om',\n 'omitted',\n 'on',\n 'once',\n 'one',\n 'ones',\n 'only',\n 'onto',\n 'oo',\n 'op',\n 'oq',\n 'or',\n 'ord',\n 'os',\n 'ot',\n 'other',\n 'others',\n 'otherwise',\n 'ou',\n 'ought',\n 'our',\n 'ours',\n 'ourselves',\n 'out',\n 'outside',\n 'over',\n 'overall',\n 'ow',\n 'owing',\n 'own',\n 'ox',\n 'oz',\n 'p',\n 'p1',\n 'p2',\n 'p3',\n 'page',\n 'pagecount',\n 'pages',\n 'par',\n 'part',\n 'particular',\n 'particularly',\n 'pas',\n 'past',\n 'pc',\n 'pd',\n 'pe',\n 'per',\n 'perhaps',\n 'pf',\n 'ph',\n 'pi',\n 'pj',\n 'pk',\n 'pl',\n 'placed',\n 'please',\n 'plus',\n 'pm',\n 'pn',\n 'po',\n 'poorly',\n 'possible',\n 'possibly',\n 'potentially',\n 'pp',\n 'pq',\n 'pr',\n 'predominantly',\n 'present',\n 'presumably',\n 'previously',\n 'primarily',\n 'probably',\n 'promptly',\n 'proud',\n 'provides',\n 'ps',\n 'pt',\n 'pu',\n 'put',\n 'py',\n 'q',\n 'qj',\n 'qu',\n 'que',\n 'quickly',\n 'quite',\n 'qv',\n 'r',\n 'r2',\n 'ra',\n 'ran',\n 'rather',\n 'rc',\n 'rd',\n 're',\n 'readily',\n 'really',\n 'reasonably',\n 'recent',\n 'recently',\n 'ref',\n 'refs',\n 'regarding',\n 'regardless',\n 'regards',\n 'related',\n 'relatively',\n 'research',\n 'research-articl',\n 'respectively',\n 'resulted',\n 'resulting',\n 'results',\n 'rf',\n 'rh',\n 'ri',\n 'right',\n 'rj',\n 'rl',\n 'rm',\n 'rn',\n 'ro',\n 'rq',\n 'rr',\n 'rs',\n 'rt',\n 'ru',\n 'run',\n 'rv',\n 'ry',\n 's',\n 's2',\n 'sa',\n 'said',\n 'same',\n 'saw',\n 'say',\n 'saying',\n 'says',\n 'sc',\n 'sd',\n 'se',\n 'sec',\n 'second',\n 'secondly',\n 'section',\n 'see',\n 'seeing',\n 'seem',\n 'seemed',\n 'seeming',\n 'seems',\n 'seen',\n 'self',\n 'selves',\n 'sensible',\n 'sent',\n 'serious',\n 'seriously',\n 'seven',\n 'several',\n 'sf',\n 'shall',\n 'shan',\n \"shan't\",\n 'she',\n 'shed',\n \"she'd\",\n \"she'll\",\n 'shes',\n \"she's\",\n 'should',\n 'shouldn',\n \"shouldn't\",\n \"should've\",\n 'show',\n 'showed',\n 'shown',\n 'showns',\n 'shows',\n 'si',\n 'side',\n 'significant',\n 'significantly',\n 'similar',\n 'similarly',\n 'since',\n 'sincere',\n 'six',\n 'sixty',\n 'sj',\n 'sl',\n 'slightly',\n 'sm',\n 'sn',\n 'so',\n 'some',\n 'somebody',\n 'somehow',\n 'someone',\n 'somethan',\n 'something',\n 'sometime',\n 'sometimes',\n 'somewhat',\n 'somewhere',\n 'soon',\n 'sorry',\n 'sp',\n 'specifically',\n 'specified',\n 'specify',\n 'specifying',\n 'sq',\n 'sr',\n 'ss',\n 'st',\n 'still',\n 'stop',\n 'strongly',\n 'sub',\n 'substantially',\n 'successfully',\n 'such',\n 'sufficiently',\n 'suggest',\n 'sup',\n 'sure',\n 'sy',\n 'system',\n 'sz',\n 't',\n 't1',\n 't2',\n 't3',\n 'take',\n 'taken',\n 'taking',\n 'tb',\n 'tc',\n 'td',\n 'te',\n 'tell',\n 'ten',\n 'tends',\n 'tf',\n 'th',\n 'than',\n 'thank',\n 'thanks',\n 'thanx',\n 'that',\n \"that'll\",\n 'thats',\n \"that's\",\n \"that've\",\n 'the',\n 'their',\n 'theirs',\n 'them',\n 'themselves',\n 'then',\n 'thence',\n 'there',\n 'thereafter',\n 'thereby',\n 'thered',\n 'therefore',\n 'therein',\n \"there'll\",\n 'thereof',\n 'therere',\n 'theres',\n \"there's\",\n 'thereto',\n 'thereupon',\n \"there've\",\n 'these',\n 'they',\n 'theyd',\n \"they'd\",\n \"they'll\",\n 'theyre',\n \"they're\",\n \"they've\",\n 'thickv',\n 'thin',\n 'think',\n 'third',\n 'this',\n 'thorough',\n 'thoroughly',\n 'those',\n 'thou',\n 'though',\n 'thoughh',\n 'thousand',\n 'three',\n 'throug',\n 'through',\n 'throughout',\n 'thru',\n 'thus',\n 'ti',\n 'til',\n 'tip',\n 'tj',\n 'tl',\n 'tm',\n 'tn',\n 'to',\n 'together',\n 'too',\n 'took',\n 'top',\n 'toward',\n 'towards',\n 'tp',\n 'tq',\n 'tr',\n 'tried',\n 'tries',\n 'truly',\n 'try',\n 'trying',\n 'ts',\n \"t's\",\n 'tt',\n 'tv',\n 'twelve',\n 'twenty',\n 'twice',\n 'two',\n 'tx',\n 'u',\n 'u201d',\n 'ue',\n 'ui',\n 'uj',\n 'uk',\n 'um',\n 'un',\n 'under',\n 'unfortunately',\n 'unless',\n 'unlike',\n 'unlikely',\n 'until',\n 'unto',\n 'uo',\n 'up',\n 'upon',\n 'ups',\n 'ur',\n 'us',\n 'use',\n 'used',\n 'useful',\n 'usefully',\n 'usefulness',\n 'uses',\n 'using',\n 'usually',\n 'ut',\n 'v',\n 'va',\n 'value',\n 'various',\n 'vd',\n 've',\n 've',\n 'very',\n 'via',\n 'viz',\n 'vj',\n 'vo',\n 'vol',\n 'vols',\n 'volumtype',\n 'vq',\n 'vs',\n 'vt',\n 'vu',\n 'w',\n 'wa',\n 'want',\n 'wants',\n 'was',\n 'wasn',\n 'wasnt',\n \"wasn't\",\n 'way',\n 'we',\n 'wed',\n \"we'd\",\n 'welcome',\n 'well',\n \"we'll\",\n 'well-b',\n 'went',\n 'were',\n \"we're\",\n 'weren',\n 'werent',\n \"weren't\",\n \"we've\",\n 'what',\n 'whatever',\n \"what'll\",\n 'whats',\n \"what's\",\n 'when',\n 'whence',\n 'whenever',\n \"when's\",\n 'where',\n 'whereafter',\n 'whereas',\n 'whereby',\n 'wherein',\n 'wheres',\n \"where's\",\n 'whereupon',\n 'wherever',\n 'whether',\n 'which',\n 'while',\n 'whim',\n 'whither',\n 'who',\n 'whod',\n 'whoever',\n 'whole',\n \"who'll\",\n 'whom',\n 'whomever',\n 'whos',\n \"who's\",\n 'whose',\n 'why',\n \"why's\",\n 'wi',\n 'widely',\n 'will',\n 'willing',\n 'wish',\n 'with',\n 'within',\n 'without',\n 'wo',\n 'won',\n 'wonder',\n 'wont',\n \"won't\",\n 'words',\n 'world',\n 'would',\n 'wouldn',\n 'wouldnt',\n \"wouldn't\",\n 'www',\n 'x',\n 'x1',\n 'x2',\n 'x3',\n 'xf',\n 'xi',\n 'xj',\n 'xk',\n 'xl',\n 'xn',\n 'xo',\n 'xs',\n 'xt',\n 'xv',\n 'xx',\n 'y',\n 'y2',\n 'yes',\n 'yet',\n 'yj',\n 'yl',\n 'you',\n 'youd',\n \"you'd\",\n \"you'll\",\n 'your',\n 'youre',\n \"you're\",\n 'yours',\n 'yourself',\n 'yourselves',\n \"you've\",\n 'yr',\n 'ys',\n 'yt',\n 'z',\n 'zero',\n 'zi',\n 'zz',\n 'task',\n]);\n","import type { AxAIService } from '../ai/types.js';\nimport type { AxGen } from './generate.js';\nimport type { AxExample, AxMetricFn } from './optimizer.js';\nimport type { AxGenIn, AxGenOut } from './types.js';\nimport { updateProgressBar } from './util.js';\n\nexport type AxEvaluateArgs<IN extends AxGenIn, OUT extends AxGenOut> = {\n ai: AxAIService;\n program: Readonly<AxGen<IN, OUT>>;\n examples: Readonly<AxExample[]>;\n};\n\nexport class AxTestPrompt<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> {\n private ai: AxAIService;\n private program: Readonly<AxGen<IN, OUT>>;\n private examples: Readonly<AxExample[]>;\n\n constructor({\n ai,\n program,\n examples = [],\n }: Readonly<AxEvaluateArgs<IN, OUT>>) {\n if (examples.length === 0) {\n throw new Error('No examples found');\n }\n this.ai = ai;\n this.program = program;\n this.examples = examples;\n }\n\n public async run(metricFn: AxMetricFn) {\n const st = Date.now();\n const total = this.examples.length;\n let sumOfScores = 0;\n\n for (let i = 0; i < total; i++) {\n const ex = this.examples[i];\n if (!ex) {\n throw new Error('Invalid example');\n }\n\n const res = await this.program.forward(this.ai, ex as IN);\n const score = await metricFn({ prediction: res, example: ex });\n sumOfScores += score;\n\n const et = Date.now() - st;\n // Assuming updateProgressBar's 3rd argument is a count/value that represents progress.\n // If it specifically needs a 'success count', this might need adjustment.\n // For now, using sumOfScores, but it might represent total score, not #successes.\n // If AxMetricFn is always 0 or 1, sumOfScores is equivalent to successCount.\n updateProgressBar(i, total, sumOfScores, et, 'Testing Prompt', 30);\n }\n\n const averageScore = total > 0 ? sumOfScores / total : 0;\n console.log(\n '\\nPerformance: ',\n sumOfScores,\n '/',\n total,\n 'Average Score: ',\n averageScore,\n '\\n'\n );\n }\n}\n","import { ColorLog } from '../util/log.js';\nimport type {\n AxOptimizerLoggerData,\n AxOptimizerLoggerFunction,\n} from './optimizerTypes.js';\n\n// Default output function that writes to stdout\nconst defaultOutput = (message: string): void => {\n process.stdout.write(message);\n};\n\n/**\n * Factory function to create a default optimizer logger with color formatting\n */\nexport const axCreateDefaultOptimizerColorLogger = (\n output: (message: string) => void = defaultOutput\n): AxOptimizerLoggerFunction => {\n const cl = new ColorLog();\n const lightDivider = cl.gray('─'.repeat(50));\n const heavyDivider = cl.gray('━'.repeat(50));\n\n return (data: AxOptimizerLoggerData) => {\n let formattedMessage = '';\n\n switch (data.name) {\n case 'OptimizationStart':\n formattedMessage =\n `\\n${cl.blueBright('● ')}${cl.whiteBright('Optimization Started')}\\n` +\n `${lightDivider}\\n` +\n ` ${cl.white('Optimizer:')} ${cl.cyan(data.value.optimizerType)}\\n` +\n ` ${cl.white('Examples:')} ${cl.green(data.value.exampleCount.toString())} training, ${cl.green(data.value.validationCount.toString())} validation\\n` +\n ` ${cl.white('Config:')} ${cl.white(JSON.stringify(data.value.config).slice(0, 80))}${JSON.stringify(data.value.config).length > 80 ? '...' : ''}\\n` +\n `${heavyDivider}\\n`;\n break;\n\n case 'RoundProgress':\n formattedMessage =\n `${cl.yellow('● ')}${cl.whiteBright(`Round ${data.value.round}/${data.value.totalRounds}`)}\\n` +\n ` ${cl.white('Score:')} ${cl.green(data.value.currentScore.toFixed(3))} ${cl.white('(best:')} ${cl.greenBright(data.value.bestScore.toFixed(3))}${cl.white(')')}\\n`;\n break;\n\n case 'EarlyStopping':\n formattedMessage =\n `\\n${cl.red('● ')}${cl.whiteBright('Early Stopping')}\\n` +\n `${lightDivider}\\n` +\n ` ${cl.white('Round:')} ${cl.yellow(data.value.round.toString())}\\n` +\n ` ${cl.white('Reason:')} ${cl.yellow(data.value.reason)}\\n` +\n ` ${cl.white('Final Score:')} ${cl.green(data.value.finalScore.toFixed(3))}\\n` +\n `${heavyDivider}\\n`;\n break;\n\n case 'OptimizationComplete':\n formattedMessage =\n `\\n${cl.green('● ')}${cl.whiteBright('Optimization Complete')}\\n` +\n `${lightDivider}\\n` +\n ` ${cl.white('Best Score:')} ${cl.greenBright(data.value.bestScore.toFixed(3))}\\n` +\n ` ${cl.white('Best Config:')} ${cl.cyan(JSON.stringify(data.value.bestConfiguration).slice(0, 80))}${JSON.stringify(data.value.bestConfiguration).length > 80 ? '...' : ''}\\n` +\n ` ${cl.white('Total Calls:')} ${cl.white(data.value.stats.totalCalls?.toString() || 'N/A')}\\n` +\n ` ${cl.white('Success Rate:')} ${cl.green(`${(((data.value.stats.successfulDemos || 0) / Math.max(data.value.stats.totalCalls || 1, 1)) * 100).toFixed(1)}%`)}\\n` +\n `${heavyDivider}\\n`;\n break;\n\n case 'ConfigurationProposal':\n formattedMessage =\n `${cl.magenta('● ')}${cl.whiteBright(`${data.value.type} Proposals`)} ${cl.white(`(${data.value.count})`)}\\n` +\n ` ${cl.white('Candidates:')} ${cl.white(\n data.value.proposals\n .slice(0, 2)\n .map((p) =>\n typeof p === 'string'\n ? `\"${p.slice(0, 40)}...\"`\n : `${JSON.stringify(p).slice(0, 40)}...`\n )\n .join(', ')\n )}\\n`;\n break;\n\n case 'BootstrappedDemos':\n formattedMessage =\n `${cl.cyan('● ')}${cl.whiteBright('Bootstrapped Demos')} ${cl.white(`(${data.value.count})`)}\\n` +\n ` ${cl.white('Generated:')} ${cl.green(data.value.count.toString())} demonstration examples\\n`;\n break;\n\n case 'BestConfigFound':\n formattedMessage =\n `${cl.green('● ')}${cl.whiteBright('Best Configuration Found')}\\n` +\n ` ${cl.white('Score:')} ${cl.greenBright(data.value.score.toFixed(3))}\\n` +\n ` ${cl.white('Config:')} ${cl.cyan(JSON.stringify(data.value.config).slice(0, 80))}${JSON.stringify(data.value.config).length > 80 ? '...' : ''}\\n`;\n break;\n\n default:\n formattedMessage =\n `${cl.red('● ')}${cl.whiteBright('Unknown Event')}\\n` +\n ` ${cl.white(JSON.stringify(data).slice(0, 100))}${JSON.stringify(data).length > 100 ? '...' : ''}\\n`;\n }\n\n output(formattedMessage);\n };\n};\n\n/**\n * Factory function to create a text-only optimizer logger (no colors)\n */\nexport const axCreateDefaultOptimizerTextLogger = (\n output: (message: string) => void = defaultOutput\n): AxOptimizerLoggerFunction => {\n const divider = '─'.repeat(60);\n\n return (data: AxOptimizerLoggerData) => {\n let formattedMessage = '';\n\n switch (data.name) {\n case 'OptimizationStart':\n formattedMessage =\n `[ OPTIMIZATION START: ${data.value.optimizerType} ]\n${divider}\n` +\n `Config: ${JSON.stringify(data.value.config, null, 2)}\n` +\n `Examples: ${data.value.exampleCount}, Validation: ${data.value.validationCount}\n` +\n `${divider}`;\n break;\n case 'RoundProgress':\n formattedMessage =\n `[ ROUND ${data.value.round}/${data.value.totalRounds} ]\n` +\n `Current Score: ${data.value.currentScore.toFixed(3)}, Best: ${data.value.bestScore.toFixed(3)}\n` +\n `Config: ${JSON.stringify(data.value.configuration)}\n` +\n `${divider}`;\n break;\n case 'EarlyStopping':\n formattedMessage =\n `[ EARLY STOPPING at Round ${data.value.round} ]\n` +\n `Reason: ${data.value.reason}\n` +\n `Final Score: ${data.value.finalScore.toFixed(3)}\n` +\n `${divider}`;\n break;\n case 'OptimizationComplete':\n formattedMessage =\n `[ OPTIMIZATION COMPLETE ]\n${divider}\n` +\n `Best Score: ${data.value.bestScore.toFixed(3)}\n` +\n `Best Config: ${JSON.stringify(data.value.bestConfiguration)}\n` +\n `Stats: ${JSON.stringify(data.value.stats, null, 2)}\n` +\n `${divider}`;\n break;\n case 'ConfigurationProposal':\n formattedMessage =\n `[ CONFIG PROPOSAL: ${data.value.type} ]\n` +\n `Count: ${data.value.count}\n` +\n `Proposals: ${JSON.stringify(data.value.proposals.slice(0, 3), null, 2)} ${data.value.proposals.length > 3 ? '... (truncated)' : ''}\n` +\n `${divider}`;\n break;\n case 'BootstrappedDemos':\n formattedMessage =\n `[ BOOTSTRAPPED DEMOS ]\n` +\n `Count: ${data.value.count}\n` +\n `Demos: ${JSON.stringify(data.value.demos.slice(0, 2), null, 2)} ${data.value.demos.length > 2 ? '... (truncated)' : ''}\n` +\n `${divider}`;\n break;\n case 'BestConfigFound':\n formattedMessage =\n `[ BEST CONFIG FOUND ]\n` +\n `Score: ${data.value.score.toFixed(3)}\n` +\n `Config: ${JSON.stringify(data.value.config)}\n` +\n `${divider}`;\n break;\n default:\n formattedMessage = `[ UNKNOWN OPTIMIZER EVENT ]\n${JSON.stringify(data)}\n${divider}`;\n }\n\n output(formattedMessage);\n };\n};\n\n/**\n * Default optimizer logger instance with color formatting\n */\nexport const axDefaultOptimizerLogger = axCreateDefaultOptimizerColorLogger();\n","import type { Counter, Gauge, Histogram, Meter } from '@opentelemetry/api';\n\nimport type { AxAIService, AxLoggerFunction } from '../ai/types.js';\n\nimport { AxGen } from './generate.js';\nimport { axGlobals } from './globals.js';\nimport { axDefaultOptimizerLogger } from './optimizerLogging.js';\nimport type { AxOptimizerLoggerFunction } from './optimizerTypes.js';\nimport type {\n AxFieldValue,\n AxGenIn,\n AxGenOut,\n AxProgramDemos,\n} from './types.js';\n\n// Logger utilities are now exported from ./loggers.js\n\n// Common types used by optimizers\nexport type AxExample = Record<string, AxFieldValue>;\n\nexport type AxMetricFn = <T extends AxGenOut = AxGenOut>(\n arg0: Readonly<{ prediction: T; example: AxExample }>\n) => number | Promise<number>;\n\nexport type AxMetricFnArgs = Parameters<AxMetricFn>[0];\n\n// Multi-objective metric function for Pareto optimization\nexport type AxMultiMetricFn = <T extends AxGenOut = AxGenOut>(\n arg0: Readonly<{ prediction: T; example: AxExample }>\n) => Record<string, number>;\n\n// Progress tracking interface for real-time updates\nexport interface AxOptimizationProgress {\n round: number;\n totalRounds: number;\n currentScore: number;\n bestScore: number;\n tokensUsed: number;\n timeElapsed: number;\n successfulExamples: number;\n totalExamples: number;\n currentConfiguration?: Record<string, unknown>;\n bestConfiguration?: Record<string, unknown>;\n convergenceInfo?: {\n improvement: number;\n stagnationRounds: number;\n isConverging: boolean;\n };\n}\n\n// Cost tracking interface for monitoring resource usage\nexport interface AxCostTracker {\n trackTokens(count: number, model: string): void;\n getCurrentCost(): number;\n getTokenUsage(): Record<string, number>;\n getTotalTokens(): number;\n isLimitReached(): boolean;\n reset(): void;\n}\n\n// Checkpoint interface for saving/loading optimization state\nexport interface AxOptimizationCheckpoint {\n version: string;\n timestamp: number;\n optimizerType: string;\n optimizerConfig: Record<string, unknown>;\n\n // Current optimization state\n currentRound: number;\n totalRounds: number;\n bestScore: number;\n bestConfiguration?: Record<string, unknown>;\n\n // Historical data\n scoreHistory: number[];\n configurationHistory: Record<string, unknown>[];\n\n // Resource usage\n stats: AxOptimizationStats;\n\n // Optimizer-specific state\n optimizerState: Record<string, unknown>;\n\n // Examples and validation data\n examples: readonly AxExample[];\n validationSet?: readonly AxExample[];\n}\n\n// Simple checkpoint functions - users implement these as needed\nexport type AxCheckpointSaveFn = (\n checkpoint: Readonly<AxOptimizationCheckpoint>\n) => Promise<string>;\nexport type AxCheckpointLoadFn = (\n checkpointId: string\n) => Promise<AxOptimizationCheckpoint | null>;\n\n// Cost tracker configuration options\nexport interface AxCostTrackerOptions {\n // Cost-based limits\n costPerModel?: Record<string, number>;\n maxCost?: number;\n\n // Token-based limits\n maxTokens?: number;\n}\n\n// Enhanced optimizer arguments - no longer includes program\nexport type AxOptimizerArgs = {\n studentAI: AxAIService;\n teacherAI?: AxAIService; // For generating high-quality examples/corrections\n examples: readonly AxExample[];\n\n // Evaluation strategy\n validationSet?: readonly AxExample[];\n\n // Quality thresholds\n minSuccessRate?: number;\n targetScore?: number;\n\n // Monitoring & callbacks\n onProgress?: (progress: Readonly<AxOptimizationProgress>) => void;\n onEarlyStop?: (reason: string, stats: Readonly<AxOptimizationStats>) => void;\n costTracker?: AxCostTracker;\n\n // Checkpointing\n checkpointSave?: AxCheckpointSaveFn;\n checkpointLoad?: AxCheckpointLoadFn;\n checkpointInterval?: number; // Save checkpoint every N rounds\n resumeFromCheckpoint?: string; // Checkpoint ID to resume from\n\n // Logging\n logger?: AxLoggerFunction;\n verbose?: boolean;\n\n // Reproducibility\n seed?: number;\n\n // Optimizer logging\n debugOptimizer?: boolean;\n optimizerLogger?: AxOptimizerLoggerFunction;\n};\n\n// Enhanced optimization statistics\nexport interface AxOptimizationStats {\n totalCalls: number;\n successfulDemos: number;\n estimatedTokenUsage: number;\n earlyStopped: boolean;\n earlyStopping?: {\n bestScoreRound: number;\n patienceExhausted: boolean;\n reason: string;\n };\n bestScore: number;\n bestConfiguration?: Record<string, unknown>;\n\n // Resource usage tracking\n resourceUsage: {\n totalTokens: number;\n totalTime: number;\n avgLatencyPerEval: number;\n peakMemoryUsage?: number;\n costByModel: Record<string, number>;\n };\n\n // Quality metrics\n convergenceInfo: {\n converged: boolean;\n finalImprovement: number;\n stagnationRounds: number;\n convergenceThreshold: number;\n };\n\n // Evaluation breakdown\n evaluationBreakdown?: {\n trainingScore: number;\n validationScore: number;\n crossValidationScores?: number[];\n standardDeviation?: number;\n };\n}\n\n// Optimizer metrics configuration interface\nexport interface AxOptimizerMetricsConfig {\n enabled: boolean;\n enabledCategories: (\n | 'optimization'\n | 'convergence'\n | 'resource_usage'\n | 'teacher_student'\n | 'checkpointing'\n | 'pareto'\n )[];\n maxLabelLength: number;\n samplingRate: number;\n}\n\n// Default optimizer metrics configuration\nexport const axDefaultOptimizerMetricsConfig: AxOptimizerMetricsConfig = {\n enabled: true,\n enabledCategories: [\n 'optimization',\n 'convergence',\n 'resource_usage',\n 'teacher_student',\n 'checkpointing',\n 'pareto',\n ],\n maxLabelLength: 100,\n samplingRate: 1.0,\n};\n\n// Optimizer metrics instruments interface\nexport interface AxOptimizerMetricsInstruments {\n // Optimization flow metrics\n optimizationLatencyHistogram?: Histogram;\n optimizationRequestsCounter?: Counter;\n optimizationErrorsCounter?: Counter;\n\n // Convergence metrics\n convergenceRoundsHistogram?: Histogram;\n convergenceScoreGauge?: Gauge;\n convergenceImprovementGauge?: Gauge;\n stagnationRoundsGauge?: Gauge;\n earlyStoppingCounter?: Counter;\n\n // Resource usage metrics\n tokenUsageCounter?: Counter;\n costUsageCounter?: Counter;\n memoryUsageGauge?: Gauge;\n optimizationDurationHistogram?: Histogram;\n\n // Teacher-student metrics\n teacherStudentUsageCounter?: Counter;\n teacherStudentLatencyHistogram?: Histogram;\n teacherStudentScoreImprovementGauge?: Gauge;\n\n // Checkpointing metrics\n checkpointSaveCounter?: Counter;\n checkpointLoadCounter?: Counter;\n checkpointSaveLatencyHistogram?: Histogram;\n checkpointLoadLatencyHistogram?: Histogram;\n\n // Pareto optimization metrics\n paretoOptimizationsCounter?: Counter;\n paretoFrontSizeHistogram?: Histogram;\n paretoHypervolumeGauge?: Gauge;\n paretoSolutionsGeneratedHistogram?: Histogram;\n\n // Program complexity metrics\n programInputFieldsGauge?: Gauge;\n programOutputFieldsGauge?: Gauge;\n examplesCountGauge?: Gauge;\n validationSetSizeGauge?: Gauge;\n\n // Performance metrics\n evaluationLatencyHistogram?: Histogram;\n demoGenerationLatencyHistogram?: Histogram;\n metricComputationLatencyHistogram?: Histogram;\n\n // Configuration metrics\n optimizerTypeGauge?: Gauge;\n targetScoreGauge?: Gauge;\n maxRoundsGauge?: Gauge;\n}\n\n// Singleton instance for optimizer metrics instruments\nlet globalOptimizerMetricsInstruments:\n | AxOptimizerMetricsInstruments\n | undefined;\n\n// Function to get or create optimizer metrics instruments (singleton pattern)\nexport const getOrCreateOptimizerMetricsInstruments = (\n meter?: Meter\n): AxOptimizerMetricsInstruments | undefined => {\n // Return existing instance if available\n if (globalOptimizerMetricsInstruments) {\n return globalOptimizerMetricsInstruments;\n }\n\n if (meter) {\n globalOptimizerMetricsInstruments =\n createOptimizerMetricsInstruments(meter);\n return globalOptimizerMetricsInstruments;\n }\n\n return undefined;\n};\n\n// Function to reset the optimizer metrics singleton (useful for testing)\nexport const resetOptimizerMetricsInstruments = (): void => {\n globalOptimizerMetricsInstruments = undefined;\n};\n\n// Global optimizer metrics configuration\nlet currentOptimizerMetricsConfig: AxOptimizerMetricsConfig =\n axDefaultOptimizerMetricsConfig;\n\n// Function to update optimizer metrics configuration\nexport const axUpdateOptimizerMetricsConfig = (\n config: Readonly<Partial<AxOptimizerMetricsConfig>>\n): void => {\n currentOptimizerMetricsConfig = {\n ...currentOptimizerMetricsConfig,\n ...config,\n };\n};\n\n// Function to get current optimizer metrics configuration\nexport const axGetOptimizerMetricsConfig = (): AxOptimizerMetricsConfig => {\n return { ...currentOptimizerMetricsConfig };\n};\n\nexport const createOptimizerMetricsInstruments = (\n meter: Meter\n): AxOptimizerMetricsInstruments => {\n return {\n // Optimization flow metrics\n optimizationLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_optimization_duration_ms',\n {\n description: 'End-to-end duration of optimization runs',\n unit: 'ms',\n }\n ),\n\n optimizationRequestsCounter: meter.createCounter(\n 'ax_optimizer_optimization_requests_total',\n {\n description: 'Total number of optimization requests',\n }\n ),\n\n optimizationErrorsCounter: meter.createCounter(\n 'ax_optimizer_optimization_errors_total',\n {\n description: 'Total number of failed optimizations',\n }\n ),\n\n // Convergence metrics\n convergenceRoundsHistogram: meter.createHistogram(\n 'ax_optimizer_convergence_rounds',\n {\n description: 'Number of rounds until convergence',\n }\n ),\n\n convergenceScoreGauge: meter.createGauge('ax_optimizer_convergence_score', {\n description: 'Current best score during optimization',\n }),\n\n convergenceImprovementGauge: meter.createGauge(\n 'ax_optimizer_convergence_improvement',\n {\n description: 'Improvement in score from baseline',\n }\n ),\n\n stagnationRoundsGauge: meter.createGauge('ax_optimizer_stagnation_rounds', {\n description: 'Number of rounds without improvement',\n }),\n\n earlyStoppingCounter: meter.createCounter(\n 'ax_optimizer_early_stopping_total',\n {\n description: 'Total number of early stopping events',\n }\n ),\n\n // Resource usage metrics\n tokenUsageCounter: meter.createCounter('ax_optimizer_token_usage_total', {\n description: 'Total tokens used during optimization',\n }),\n\n costUsageCounter: meter.createCounter('ax_optimizer_cost_usage_total', {\n description: 'Total cost incurred during optimization',\n unit: '$',\n }),\n\n memoryUsageGauge: meter.createGauge('ax_optimizer_memory_usage_bytes', {\n description: 'Peak memory usage during optimization',\n unit: 'By',\n }),\n\n optimizationDurationHistogram: meter.createHistogram(\n 'ax_optimizer_duration_ms',\n {\n description: 'Duration of optimization runs',\n unit: 'ms',\n }\n ),\n\n // Teacher-student metrics\n teacherStudentUsageCounter: meter.createCounter(\n 'ax_optimizer_teacher_student_usage_total',\n {\n description: 'Total number of teacher-student interactions',\n }\n ),\n\n teacherStudentLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_teacher_student_latency_ms',\n {\n description: 'Latency of teacher-student interactions',\n unit: 'ms',\n }\n ),\n\n teacherStudentScoreImprovementGauge: meter.createGauge(\n 'ax_optimizer_teacher_student_score_improvement',\n {\n description: 'Score improvement from teacher-student interactions',\n }\n ),\n\n // Checkpointing metrics\n checkpointSaveCounter: meter.createCounter(\n 'ax_optimizer_checkpoint_save_total',\n {\n description: 'Total number of checkpoint saves',\n }\n ),\n\n checkpointLoadCounter: meter.createCounter(\n 'ax_optimizer_checkpoint_load_total',\n {\n description: 'Total number of checkpoint loads',\n }\n ),\n\n checkpointSaveLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_checkpoint_save_latency_ms',\n {\n description: 'Latency of checkpoint save operations',\n unit: 'ms',\n }\n ),\n\n checkpointLoadLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_checkpoint_load_latency_ms',\n {\n description: 'Latency of checkpoint load operations',\n unit: 'ms',\n }\n ),\n\n // Pareto optimization metrics\n paretoOptimizationsCounter: meter.createCounter(\n 'ax_optimizer_pareto_optimizations_total',\n {\n description: 'Total number of Pareto optimizations',\n }\n ),\n\n paretoFrontSizeHistogram: meter.createHistogram(\n 'ax_optimizer_pareto_front_size',\n {\n description: 'Size of Pareto frontier',\n }\n ),\n\n paretoHypervolumeGauge: meter.createGauge(\n 'ax_optimizer_pareto_hypervolume',\n {\n description: 'Hypervolume of Pareto frontier',\n }\n ),\n\n paretoSolutionsGeneratedHistogram: meter.createHistogram(\n 'ax_optimizer_pareto_solutions_generated',\n {\n description: 'Number of solutions generated for Pareto optimization',\n }\n ),\n\n // Program complexity metrics\n programInputFieldsGauge: meter.createGauge(\n 'ax_optimizer_program_input_fields',\n {\n description: 'Number of input fields in optimized program',\n }\n ),\n\n programOutputFieldsGauge: meter.createGauge(\n 'ax_optimizer_program_output_fields',\n {\n description: 'Number of output fields in optimized program',\n }\n ),\n\n examplesCountGauge: meter.createGauge('ax_optimizer_examples_count', {\n description: 'Number of training examples used',\n }),\n\n validationSetSizeGauge: meter.createGauge(\n 'ax_optimizer_validation_set_size',\n {\n description: 'Size of validation set used',\n }\n ),\n\n // Performance metrics\n evaluationLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_evaluation_latency_ms',\n {\n description: 'Latency of program evaluations',\n unit: 'ms',\n }\n ),\n\n demoGenerationLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_demo_generation_latency_ms',\n {\n description: 'Latency of demo generation',\n unit: 'ms',\n }\n ),\n\n metricComputationLatencyHistogram: meter.createHistogram(\n 'ax_optimizer_metric_computation_latency_ms',\n {\n description: 'Latency of metric computation',\n unit: 'ms',\n }\n ),\n\n // Configuration metrics\n optimizerTypeGauge: meter.createGauge('ax_optimizer_type', {\n description: 'Type of optimizer being used',\n }),\n\n targetScoreGauge: meter.createGauge('ax_optimizer_target_score', {\n description: 'Target score for optimization',\n }),\n\n maxRoundsGauge: meter.createGauge('ax_optimizer_max_rounds', {\n description: 'Maximum rounds for optimization',\n }),\n };\n};\n\n// Utility function to sanitize optimizer metric labels\nconst sanitizeOptimizerLabels = (\n labels: Record<string, unknown>\n): Record<string, string> => {\n const sanitized: Record<string, string> = {};\n for (const [key, value] of Object.entries(labels)) {\n if (value !== undefined && value !== null) {\n const stringValue = String(value);\n // Limit label length based on configuration\n const maxLength = currentOptimizerMetricsConfig.maxLabelLength;\n sanitized[key] =\n stringValue.length > maxLength\n ? stringValue.substring(0, maxLength)\n : stringValue;\n }\n }\n return sanitized;\n};\n\n// Recording functions for optimization flow metrics\nexport const recordOptimizationMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n duration: number,\n success: boolean,\n optimizerType: string,\n programSignature?: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n success: success.toString(),\n optimizer_type: optimizerType,\n ...(programSignature ? { program_signature: programSignature } : {}),\n });\n\n if (instruments.optimizationLatencyHistogram) {\n instruments.optimizationLatencyHistogram.record(duration, labels);\n }\n\n if (instruments.optimizationRequestsCounter) {\n instruments.optimizationRequestsCounter.add(1, labels);\n }\n\n if (!success && instruments.optimizationErrorsCounter) {\n instruments.optimizationErrorsCounter.add(1, labels);\n }\n } catch (error) {\n console.warn('Failed to record optimization metric:', error);\n }\n};\n\n// Recording functions for convergence metrics\nexport const recordConvergenceMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n rounds: number,\n currentScore: number,\n improvement: number,\n stagnationRounds: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.convergenceRoundsHistogram) {\n instruments.convergenceRoundsHistogram.record(rounds, labels);\n }\n\n if (instruments.convergenceScoreGauge) {\n instruments.convergenceScoreGauge.record(currentScore, labels);\n }\n\n if (instruments.convergenceImprovementGauge) {\n instruments.convergenceImprovementGauge.record(improvement, labels);\n }\n\n if (instruments.stagnationRoundsGauge) {\n instruments.stagnationRoundsGauge.record(stagnationRounds, labels);\n }\n } catch (error) {\n console.warn('Failed to record convergence metric:', error);\n }\n};\n\nexport const recordEarlyStoppingMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n reason: string,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n reason,\n optimizer_type: optimizerType,\n });\n\n if (instruments.earlyStoppingCounter) {\n instruments.earlyStoppingCounter.add(1, labels);\n }\n } catch (error) {\n console.warn('Failed to record early stopping metric:', error);\n }\n};\n\n// Recording functions for resource usage metrics\nexport const recordResourceUsageMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n tokensUsed: number,\n costIncurred: number,\n optimizerType: string,\n memoryUsage?: number\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.tokenUsageCounter) {\n instruments.tokenUsageCounter.add(tokensUsed, labels);\n }\n\n if (instruments.costUsageCounter) {\n instruments.costUsageCounter.add(costIncurred, labels);\n }\n\n if (memoryUsage !== undefined && instruments.memoryUsageGauge) {\n instruments.memoryUsageGauge.record(memoryUsage, labels);\n }\n } catch (error) {\n console.warn('Failed to record resource usage metric:', error);\n }\n};\n\nexport const recordOptimizationDurationMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n duration: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.optimizationDurationHistogram) {\n instruments.optimizationDurationHistogram.record(duration, labels);\n }\n } catch (error) {\n console.warn('Failed to record optimization duration metric:', error);\n }\n};\n\n// Recording functions for teacher-student metrics\nexport const recordTeacherStudentMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n latency: number,\n scoreImprovement: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.teacherStudentUsageCounter) {\n instruments.teacherStudentUsageCounter.add(1, labels);\n }\n\n if (instruments.teacherStudentLatencyHistogram) {\n instruments.teacherStudentLatencyHistogram.record(latency, labels);\n }\n\n if (instruments.teacherStudentScoreImprovementGauge) {\n instruments.teacherStudentScoreImprovementGauge.record(\n scoreImprovement,\n labels\n );\n }\n } catch (error) {\n console.warn('Failed to record teacher-student metric:', error);\n }\n};\n\n// Recording functions for checkpointing metrics\nexport const recordCheckpointMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n operation: 'save' | 'load',\n latency: number,\n success: boolean,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n operation,\n success: success.toString(),\n optimizer_type: optimizerType,\n });\n\n if (operation === 'save') {\n if (instruments.checkpointSaveCounter) {\n instruments.checkpointSaveCounter.add(1, labels);\n }\n if (instruments.checkpointSaveLatencyHistogram) {\n instruments.checkpointSaveLatencyHistogram.record(latency, labels);\n }\n } else {\n if (instruments.checkpointLoadCounter) {\n instruments.checkpointLoadCounter.add(1, labels);\n }\n if (instruments.checkpointLoadLatencyHistogram) {\n instruments.checkpointLoadLatencyHistogram.record(latency, labels);\n }\n }\n } catch (error) {\n console.warn('Failed to record checkpoint metric:', error);\n }\n};\n\n// Recording functions for Pareto optimization metrics\nexport const recordParetoMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n frontSize: number,\n solutionsGenerated: number,\n optimizerType: string,\n hypervolume?: number\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.paretoOptimizationsCounter) {\n instruments.paretoOptimizationsCounter.add(1, labels);\n }\n\n if (instruments.paretoFrontSizeHistogram) {\n instruments.paretoFrontSizeHistogram.record(frontSize, labels);\n }\n\n if (hypervolume !== undefined && instruments.paretoHypervolumeGauge) {\n instruments.paretoHypervolumeGauge.record(hypervolume, labels);\n }\n\n if (instruments.paretoSolutionsGeneratedHistogram) {\n instruments.paretoSolutionsGeneratedHistogram.record(\n solutionsGenerated,\n labels\n );\n }\n } catch (error) {\n console.warn('Failed to record Pareto metric:', error);\n }\n};\n\n// Recording functions for program complexity metrics\nexport const recordProgramComplexityMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n inputFields: number,\n outputFields: number,\n examplesCount: number,\n validationSetSize: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.programInputFieldsGauge) {\n instruments.programInputFieldsGauge.record(inputFields, labels);\n }\n\n if (instruments.programOutputFieldsGauge) {\n instruments.programOutputFieldsGauge.record(outputFields, labels);\n }\n\n if (instruments.examplesCountGauge) {\n instruments.examplesCountGauge.record(examplesCount, labels);\n }\n\n if (instruments.validationSetSizeGauge) {\n instruments.validationSetSizeGauge.record(validationSetSize, labels);\n }\n } catch (error) {\n console.warn('Failed to record program complexity metric:', error);\n }\n};\n\n// Recording functions for performance metrics\nexport const recordOptimizerPerformanceMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n metricType: 'evaluation' | 'demo_generation' | 'metric_computation',\n duration: number,\n optimizerType: string\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n metric_type: metricType,\n optimizer_type: optimizerType,\n });\n\n switch (metricType) {\n case 'evaluation':\n if (instruments.evaluationLatencyHistogram) {\n instruments.evaluationLatencyHistogram.record(duration, labels);\n }\n break;\n case 'demo_generation':\n if (instruments.demoGenerationLatencyHistogram) {\n instruments.demoGenerationLatencyHistogram.record(duration, labels);\n }\n break;\n case 'metric_computation':\n if (instruments.metricComputationLatencyHistogram) {\n instruments.metricComputationLatencyHistogram.record(\n duration,\n labels\n );\n }\n break;\n }\n } catch (error) {\n console.warn('Failed to record optimizer performance metric:', error);\n }\n};\n\n// Recording functions for configuration metrics\nexport const recordOptimizerConfigurationMetric = (\n instruments: Readonly<AxOptimizerMetricsInstruments>,\n optimizerType: string,\n targetScore?: number,\n maxRounds?: number\n): void => {\n try {\n const labels = sanitizeOptimizerLabels({\n optimizer_type: optimizerType,\n });\n\n if (instruments.optimizerTypeGauge) {\n instruments.optimizerTypeGauge.record(1, labels);\n }\n\n if (targetScore !== undefined && instruments.targetScoreGauge) {\n instruments.targetScoreGauge.record(targetScore, labels);\n }\n\n if (maxRounds !== undefined && instruments.maxRoundsGauge) {\n instruments.maxRoundsGauge.record(maxRounds, labels);\n }\n } catch (error) {\n console.warn('Failed to record optimizer configuration metric:', error);\n }\n};\n\n// Simplified result - no program since it's passed to compile\nexport interface AxOptimizerResult<OUT extends AxGenOut> {\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n stats: AxOptimizationStats;\n bestScore: number;\n finalConfiguration?: Record<string, unknown>;\n\n // Optimization history for analysis\n scoreHistory?: number[];\n configurationHistory?: Record<string, unknown>[];\n}\n\n// Pareto optimization result for multi-objective optimization\nexport interface AxParetoResult<OUT extends AxGenOut = AxGenOut>\n extends AxOptimizerResult<OUT> {\n paretoFront: ReadonlyArray<{\n demos: readonly AxProgramDemos<AxGenIn, OUT>[];\n scores: Readonly<Record<string, number>>;\n configuration: Readonly<Record<string, unknown>>;\n dominatedSolutions: number;\n }>;\n\n // Multi-objective specific stats\n hypervolume?: number;\n paretoFrontSize: number;\n convergenceMetrics?: Record<string, number>;\n}\n\n// Compile options that can override constructor arguments\nexport interface AxCompileOptions {\n // Method-specific options\n maxIterations?: number;\n earlyStoppingPatience?: number;\n verbose?: boolean;\n\n // Override args for this specific run\n overrideValidationSet?: readonly AxExample[];\n overrideTargetScore?: number;\n overrideCostTracker?: AxCostTracker;\n overrideTeacherAI?: AxAIService;\n\n // Progress monitoring overrides\n overrideOnProgress?: (progress: Readonly<AxOptimizationProgress>) => void;\n overrideOnEarlyStop?: (\n reason: string,\n stats: Readonly<AxOptimizationStats>\n ) => void;\n\n // Checkpointing overrides\n overrideCheckpointSave?: AxCheckpointSaveFn;\n overrideCheckpointLoad?: AxCheckpointLoadFn;\n overrideCheckpointInterval?: number;\n saveCheckpointOnComplete?: boolean;\n}\n\n// Enhanced base optimizer interface\nexport interface AxOptimizer<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> {\n /**\n * Optimize a program using the provided metric function\n * @param program The program to optimize (moved from constructor)\n * @param metricFn Evaluation metric function to assess program performance\n * @param options Optional configuration options that can override constructor settings\n * @returns Optimization result containing demos, stats, and configuration\n */\n compile(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): Promise<AxOptimizerResult<OUT>>;\n\n /**\n * Optimize a program with real-time streaming updates\n * @param program The program to optimize\n * @param metricFn Evaluation metric function\n * @param options Optional configuration options\n * @returns Async iterator yielding optimization progress\n */\n compileStream?(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): AsyncIterableIterator<AxOptimizationProgress>;\n\n /**\n * Multi-objective optimization using Pareto frontier\n * @param program The program to optimize\n * @param metricFn Multi-objective metric function\n * @param options Optional configuration options\n * @returns Pareto optimization result\n */\n compilePareto?(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMultiMetricFn,\n options?: AxCompileOptions\n ): Promise<AxParetoResult<OUT>>;\n\n /**\n * Get current optimization statistics\n * @returns Current optimization statistics\n */\n getStats(): AxOptimizationStats;\n\n /**\n * Cancel ongoing optimization gracefully\n * @returns Promise that resolves when cancellation is complete\n */\n cancel?(): Promise<void>;\n\n /**\n * Reset optimizer state for reuse with different programs\n */\n reset?(): void;\n\n /**\n * Get optimizer-specific configuration\n * @returns Current optimizer configuration\n */\n getConfiguration?(): Record<string, unknown>;\n\n /**\n * Update optimizer configuration\n * @param config New configuration to merge with existing\n */\n updateConfiguration?(config: Readonly<Record<string, unknown>>): void;\n\n /**\n * Validate that the optimizer can handle the given program\n * @param program Program to validate\n * @returns Validation result with any issues found\n */\n validateProgram?(program: Readonly<AxGen<IN, OUT>>): {\n isValid: boolean;\n issues: string[];\n suggestions: string[];\n };\n}\n\n// Specific optimizer options interfaces\n\nexport interface AxBootstrapOptimizerOptions {\n maxRounds?: number;\n maxExamples?: number;\n maxDemos?: number;\n batchSize?: number;\n earlyStoppingPatience?: number;\n teacherAI?: AxAIService;\n costMonitoring?: boolean;\n maxTokensPerGeneration?: number;\n verboseMode?: boolean;\n debugMode?: boolean;\n\n // Enhanced options\n adaptiveBatching?: boolean;\n dynamicTemperature?: boolean;\n qualityThreshold?: number;\n diversityWeight?: number;\n}\n\nexport interface AxMiPROOptimizerOptions {\n numCandidates?: number;\n initTemperature?: number;\n maxBootstrappedDemos?: number;\n maxLabeledDemos?: number;\n numTrials?: number;\n minibatch?: boolean;\n minibatchSize?: number;\n minibatchFullEvalSteps?: number;\n programAwareProposer?: boolean;\n dataAwareProposer?: boolean;\n viewDataBatchSize?: number;\n tipAwareProposer?: boolean;\n fewshotAwareProposer?: boolean;\n verbose?: boolean;\n earlyStoppingTrials?: number;\n minImprovementThreshold?: number;\n\n // Enhanced options\n bayesianOptimization?: boolean;\n acquisitionFunction?:\n | 'expected_improvement'\n | 'upper_confidence_bound'\n | 'probability_improvement';\n explorationWeight?: number;\n\n // New option: number of samples to generate per forward call for self-consistency\n sampleCount?: number;\n}\n\n// Legacy compile options (for backward compatibility)\nexport interface AxBootstrapCompileOptions extends AxCompileOptions {\n validationExamples?: readonly AxExample[];\n maxDemos?: number;\n teacherProgram?: Readonly<AxGen<AxGenIn, AxGenOut>>;\n}\n\nexport interface AxMiPROCompileOptions extends AxCompileOptions {\n validationExamples?: readonly AxExample[];\n teacher?: Readonly<AxGen<AxGenIn, AxGenOut>>;\n auto?: 'light' | 'medium' | 'heavy';\n\n // Enhanced MiPRO options\n instructionCandidates?: string[];\n customProposer?: (\n context: Readonly<{\n programSummary: string;\n dataSummary: string;\n previousInstructions: string[];\n }>\n ) => Promise<string[]>;\n}\n\n// Default cost tracker implementation\nexport class AxDefaultCostTracker implements AxCostTracker {\n private tokenUsage: Record<string, number> = {};\n private totalTokens = 0;\n\n // Configuration options\n private readonly costPerModel: Record<string, number>;\n private readonly maxCost?: number;\n private readonly maxTokens?: number;\n\n constructor(options?: AxCostTrackerOptions) {\n this.costPerModel = options?.costPerModel ?? {};\n this.maxCost = options?.maxCost;\n this.maxTokens = options?.maxTokens;\n }\n\n trackTokens(count: number, model: string): void {\n this.tokenUsage[model] = (this.tokenUsage[model] || 0) + count;\n this.totalTokens += count;\n }\n\n getCurrentCost(): number {\n // Calculate cost on-demand\n let totalCost = 0;\n for (const [model, tokens] of Object.entries(this.tokenUsage)) {\n const costPer1K = this.costPerModel[model] || 0.001; // Default fallback\n totalCost += (tokens / 1000) * costPer1K;\n }\n return totalCost;\n }\n\n getTokenUsage(): Record<string, number> {\n return { ...this.tokenUsage };\n }\n\n getTotalTokens(): number {\n return this.totalTokens;\n }\n\n isLimitReached(): boolean {\n // Check token limit if configured\n if (this.maxTokens !== undefined && this.totalTokens >= this.maxTokens) {\n return true;\n }\n\n // Check cost limit if configured (calculate cost on-demand)\n if (this.maxCost !== undefined) {\n const currentCost = this.getCurrentCost();\n if (currentCost >= this.maxCost) {\n return true;\n }\n }\n\n return false;\n }\n\n reset(): void {\n this.tokenUsage = {};\n this.totalTokens = 0;\n }\n}\n\n/**\n * Abstract base class for optimizers that provides common functionality\n * and standardized handling of AxOptimizerArgs\n */\nexport abstract class AxBaseOptimizer<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> implements AxOptimizer<IN, OUT>\n{\n // Common AxOptimizerArgs fields\n protected readonly studentAI: AxAIService;\n protected readonly teacherAI?: AxAIService;\n protected readonly examples: readonly AxExample[];\n protected readonly validationSet?: readonly AxExample[];\n protected readonly targetScore?: number;\n protected readonly minSuccessRate?: number;\n protected readonly onProgress?: (\n progress: Readonly<AxOptimizationProgress>\n ) => void;\n protected readonly onEarlyStop?: (\n reason: string,\n stats: Readonly<AxOptimizationStats>\n ) => void;\n protected readonly costTracker?: AxCostTracker;\n protected readonly seed?: number;\n\n // Checkpointing fields\n protected readonly checkpointSave?: AxCheckpointSaveFn;\n protected readonly checkpointLoad?: AxCheckpointLoadFn;\n protected readonly checkpointInterval?: number;\n protected readonly resumeFromCheckpoint?: string;\n\n // Logging fields\n protected readonly logger?: AxLoggerFunction;\n protected readonly verbose?: boolean;\n\n // Optimizer logging\n protected readonly debugOptimizer: boolean;\n protected readonly optimizerLogger?: AxOptimizerLoggerFunction;\n\n // Checkpoint state\n private currentRound = 0;\n private scoreHistory: number[] = [];\n private configurationHistory: Record<string, unknown>[] = [];\n\n // Common optimization statistics\n protected stats: AxOptimizationStats;\n\n // Metrics instruments\n protected readonly metricsInstruments?: AxOptimizerMetricsInstruments;\n\n constructor(args: Readonly<AxOptimizerArgs>) {\n if (args.examples.length === 0) {\n throw new Error('No examples found');\n }\n\n // Set common fields from AxOptimizerArgs\n this.studentAI = args.studentAI;\n this.teacherAI = args.teacherAI;\n this.examples = args.examples;\n this.validationSet = args.validationSet;\n this.targetScore = args.targetScore;\n this.minSuccessRate = args.minSuccessRate;\n this.onProgress = args.onProgress;\n this.onEarlyStop = args.onEarlyStop;\n this.seed = args.seed;\n\n // Set up checkpointing\n this.checkpointSave = args.checkpointSave;\n this.checkpointLoad = args.checkpointLoad;\n this.checkpointInterval = args.checkpointInterval ?? 10; // Default: checkpoint every 10 rounds\n this.resumeFromCheckpoint = args.resumeFromCheckpoint;\n\n // Set up logging\n this.logger = args.logger;\n this.verbose = args.verbose;\n\n // Set up cost tracker with default if not provided\n const costTracker = new AxDefaultCostTracker({\n maxTokens: 1000000,\n });\n this.costTracker = args.costTracker ?? costTracker;\n\n // Initialize metrics instruments\n this.metricsInstruments = getOrCreateOptimizerMetricsInstruments(\n axGlobals.meter\n );\n\n // Initialize common stats structure\n this.stats = this.initializeStats();\n\n // Set up optimizer logging\n this.debugOptimizer = args.debugOptimizer ?? false;\n this.optimizerLogger = args.optimizerLogger;\n }\n\n /**\n * Initialize the optimization statistics structure\n */\n protected initializeStats(): AxOptimizationStats {\n return {\n totalCalls: 0,\n successfulDemos: 0,\n estimatedTokenUsage: 0,\n earlyStopped: false,\n resourceUsage: {\n totalTokens: 0,\n totalTime: 0,\n avgLatencyPerEval: 0,\n costByModel: {},\n },\n convergenceInfo: {\n converged: false,\n finalImprovement: 0,\n stagnationRounds: 0,\n convergenceThreshold: 0.01,\n },\n bestScore: 0,\n bestConfiguration: {},\n };\n }\n\n /**\n * Set up reproducible random seed if provided\n */\n protected setupRandomSeed(): void {\n if (this.seed !== undefined) {\n // Note: For full reproducibility, we'd need a proper PRNG\n Math.random = (() => {\n let seed = this.seed!;\n return () => {\n seed = (seed * 9301 + 49297) % 233280;\n return seed / 233280;\n };\n })();\n }\n }\n\n /**\n * Check if optimization should stop early due to cost limits\n */\n protected checkCostLimits(): boolean {\n return this.costTracker?.isLimitReached() ?? false;\n }\n\n /**\n * Check if target score has been reached\n */\n protected checkTargetScore(currentScore: number): boolean {\n return this.targetScore !== undefined && currentScore >= this.targetScore;\n }\n\n /**\n * Update resource usage statistics\n */\n protected updateResourceUsage(startTime: number, tokensUsed = 0): void {\n this.stats.resourceUsage.totalTime = Date.now() - startTime;\n this.stats.resourceUsage.totalTokens += tokensUsed;\n\n if (this.stats.totalCalls > 0) {\n this.stats.resourceUsage.avgLatencyPerEval =\n this.stats.resourceUsage.totalTime / this.stats.totalCalls;\n }\n }\n\n /**\n * Trigger early stopping with appropriate callbacks\n */\n protected triggerEarlyStopping(reason: string, bestScoreRound: number): void {\n this.stats.earlyStopped = true;\n this.stats.earlyStopping = {\n bestScoreRound,\n patienceExhausted: reason.includes('improvement'),\n reason,\n };\n\n // Record early stopping metrics (use a default optimizer type)\n this.recordEarlyStoppingMetrics(reason, 'unknown');\n\n if (this.onEarlyStop) {\n this.onEarlyStop(reason, this.stats);\n }\n const optLogger = this.getOptimizerLogger();\n optLogger?.({\n name: 'EarlyStopping',\n value: {\n reason,\n finalScore: this.stats.bestScore ?? 0,\n round: bestScoreRound,\n },\n });\n }\n\n /**\n * Get the validation set, with fallback to a split of examples\n */\n protected getValidationSet(options?: AxCompileOptions): readonly AxExample[] {\n return (\n options?.overrideValidationSet ||\n this.validationSet ||\n this.examples.slice(0, Math.floor(this.examples.length * 0.2))\n );\n }\n\n /**\n * Get the AI service to use for a specific task, preferring teacher when available\n * @param preferTeacher Whether to prefer teacher AI over student AI\n * @param options Optional compile options that may override teacher AI\n * @returns The appropriate AI service to use\n */\n protected getAIService(\n preferTeacher = false,\n options?: AxCompileOptions\n ): AxAIService {\n // Check for override teacher AI first\n if (preferTeacher && options?.overrideTeacherAI) {\n return options.overrideTeacherAI;\n }\n\n // Then check for configured teacher AI\n if (preferTeacher && this.teacherAI) {\n return this.teacherAI;\n }\n\n return this.studentAI;\n }\n\n /**\n * Check if teacher AI is available (including overrides)\n * @param options Optional compile options that may override teacher AI\n * @returns True if teacher AI is configured or overridden\n */\n protected hasTeacherAI(options?: AxCompileOptions): boolean {\n return (\n options?.overrideTeacherAI !== undefined || this.teacherAI !== undefined\n );\n }\n\n /**\n * Get teacher AI if available, otherwise return student AI\n * @param options Optional compile options that may override teacher AI\n * @returns Teacher AI if available, otherwise student AI\n */\n protected getTeacherOrStudentAI(options?: AxCompileOptions): AxAIService {\n return options?.overrideTeacherAI || this.teacherAI || this.studentAI;\n }\n\n /**\n * Execute a task with teacher AI if available, otherwise use student AI\n * @param task Function that takes an AI service and returns a promise\n * @param preferTeacher Whether to prefer teacher AI (default: true)\n * @param options Optional compile options that may override teacher AI\n * @returns Result of the task execution\n */\n protected async executeWithTeacher<T>(\n task: (ai: AxAIService) => Promise<T>,\n preferTeacher = true,\n options?: AxCompileOptions\n ): Promise<T> {\n const ai = this.getAIService(preferTeacher, options);\n return await task(ai);\n }\n\n /**\n * Abstract method that must be implemented by concrete optimizers\n */\n public abstract compile(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): Promise<AxOptimizerResult<OUT>>;\n\n /**\n * Optimize a program with real-time streaming updates\n * @param program The program to optimize\n * @param metricFn Evaluation metric function\n * @param options Optional configuration options\n * @returns Async iterator yielding optimization progress\n */\n public async *compileStream(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): AsyncIterableIterator<AxOptimizationProgress> {\n const startTime = Date.now();\n const optimizerType = this.constructor.name;\n const programSignature = program.getSignature().toString();\n\n this.recordOptimizationStart(optimizerType, programSignature);\n\n let earlyStopReason: string | undefined;\n\n const updateProgress = (\n round: number,\n score: number,\n configuration: Record<string, unknown>,\n optimizerType: string,\n optimizerConfig: Record<string, unknown>,\n bestScore: number,\n bestConfiguration: Record<string, unknown> | undefined,\n optimizerState: Record<string, unknown> = {},\n options?: AxCompileOptions\n ) => {\n const optLogger = this.getOptimizerLogger(options);\n optLogger?.({\n name: 'RoundProgress',\n value: {\n round,\n totalRounds: options?.maxIterations ?? 0,\n currentScore: score,\n bestScore,\n configuration,\n },\n });\n this.updateOptimizationProgress(\n round,\n score,\n configuration,\n optimizerType,\n optimizerConfig,\n bestScore,\n bestConfiguration,\n optimizerState,\n options\n );\n };\n\n const onEarlyStop = (\n reason: string,\n _stats: Readonly<AxOptimizationStats>\n ) => {\n earlyStopReason = reason;\n this.triggerEarlyStopping(reason, this.currentRound);\n };\n\n const onProgress = (progress: Readonly<AxOptimizationProgress>) => {\n this.onProgress?.(progress);\n updateProgress(\n progress.round,\n progress.currentScore,\n progress.currentConfiguration || {},\n optimizerType,\n {}, // No optimizerConfig here, it's part of the progress object\n progress.bestScore,\n progress.bestConfiguration,\n progress.convergenceInfo,\n options\n );\n };\n\n const compileResult = await this.compile(program, metricFn, {\n ...options,\n overrideOnProgress: onProgress,\n overrideOnEarlyStop: onEarlyStop,\n });\n\n const duration = Date.now() - startTime;\n this.recordOptimizationComplete(\n duration,\n true,\n optimizerType,\n programSignature\n );\n\n if (earlyStopReason) {\n this.getLogger(options)?.({\n name: 'Notification',\n id: 'optimization_early_stop',\n value: `Optimization stopped early due to ${earlyStopReason}`,\n });\n }\n\n return {\n demos: compileResult.demos,\n stats: compileResult.stats,\n bestScore: compileResult.bestScore,\n finalConfiguration: compileResult.finalConfiguration,\n scoreHistory: compileResult.scoreHistory,\n configurationHistory: compileResult.configurationHistory,\n };\n }\n\n /**\n * Multi-objective optimization using Pareto frontier\n * Default implementation that leverages the single-objective compile method\n * @param program The program to optimize\n * @param metricFn Multi-objective metric function that returns multiple scores\n * @param options Optional configuration options\n * @returns Pareto optimization result with frontier of non-dominated solutions\n */\n public async compilePareto(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMultiMetricFn,\n options?: AxCompileOptions\n ): Promise<AxParetoResult<OUT>> {\n const _optimizerType = this.constructor.name;\n const startTime = Date.now();\n\n // Strategy 1: Generate different weighted combinations of objectives\n const solutions = await this.generateWeightedSolutions(\n program,\n metricFn,\n options\n );\n\n // Strategy 2: Generate constraint-based solutions (optimize one objective while constraining others)\n const constraintSolutions = await this.generateConstraintSolutions(\n program,\n metricFn,\n options\n );\n\n // Combine all solutions\n const allSolutions = [...solutions, ...constraintSolutions];\n\n // if (options?.verbose) {\n // this.getLogger(options)?.(\n // `Generated ${allSolutions.length} candidate solutions`,\n // { tags: ['discovery'] }\n // );\n // }\n\n // Find Pareto frontier\n const paretoFront = this.findParetoFrontier(allSolutions);\n\n // Calculate hypervolume if possible\n const hypervolume = this.calculateHypervolume(paretoFront);\n\n // if (options?.verbose) {\n // this.getLogger(options)?.(\n // `Found ${paretoFront.length} non-dominated solutions`,\n // { tags: ['discovery'] }\n // );\n // this.getLogger(options)?.(\n // `Hypervolume: ${hypervolume?.toFixed(4) || 'N/A'}`,\n // { tags: ['discovery'] }\n // );\n // }\n\n // Update stats\n this.updateResourceUsage(startTime);\n this.stats.convergenceInfo.converged = true;\n\n // Record Pareto optimization metrics\n this.recordParetoMetrics(\n paretoFront.length,\n allSolutions.length,\n 'base_optimizer',\n hypervolume\n );\n\n // Calculate best score as the maximum across all objectives and solutions\n const bestScore =\n paretoFront.length > 0\n ? Math.max(\n ...paretoFront.map((sol) => Math.max(...Object.values(sol.scores)))\n )\n : 0;\n\n return {\n demos: paretoFront.length > 0 ? [...paretoFront[0]!.demos] : undefined,\n stats: this.stats,\n bestScore,\n paretoFront,\n hypervolume,\n paretoFrontSize: paretoFront.length,\n finalConfiguration: {\n paretoFrontSize: paretoFront.length,\n hypervolume,\n strategy: 'weighted_combinations_and_constraints',\n numSolutions: allSolutions.length,\n },\n };\n }\n\n /**\n * Generate solutions using different weighted combinations of objectives\n */\n private async generateWeightedSolutions(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMultiMetricFn,\n options?: AxCompileOptions\n ): Promise<\n Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }>\n > {\n const solutions: Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }> = [];\n\n // First, determine the objectives by running the metric on a sample\n const sampleExample = this.examples[0]!;\n const samplePrediction = await program.forward(\n this.getAIService(false, options),\n sampleExample as IN\n );\n const sampleScores = await metricFn({\n prediction: samplePrediction,\n example: sampleExample,\n });\n const objectives = Object.keys(sampleScores);\n\n // if (options?.verbose) {\n // this.getLogger(options)?.(\n // `Detected objectives: ${objectives.join(', ')}`,\n // { tags: ['discovery'] }\n // );\n // }\n\n // Generate different weight combinations\n const weightCombinations = this.generateWeightCombinations(objectives);\n\n for (let i = 0; i < weightCombinations.length; i++) {\n const weights = weightCombinations[i]!;\n\n // if (options?.verbose) {\n // this.getLogger(options)?.(\n // `Optimizing with weights: ${JSON.stringify(weights)}`,\n // { tags: ['discovery'] }\n // );\n // }\n\n // Create a weighted single-objective metric\n const weightedMetric: AxMetricFn = async ({ prediction, example }) => {\n const scores = await metricFn({ prediction, example });\n let weightedScore = 0;\n for (const [objective, score] of Object.entries(scores)) {\n weightedScore += score * (weights[objective] || 0);\n }\n return weightedScore;\n };\n\n try {\n // Use the concrete optimizer's compile method\n const result = await this.compile(program, weightedMetric, {\n ...options,\n verbose: false, // Suppress inner optimization logs\n });\n\n // Evaluate the result with the multi-objective metric\n const scores = await this.evaluateWithMultiObjective(\n program,\n result,\n metricFn\n );\n\n solutions.push({\n scores,\n demos: result.demos,\n configuration: {\n ...result.finalConfiguration,\n weights,\n strategy: 'weighted_combination',\n },\n });\n } catch (_error) {\n // if (options?.verbose) {\n // this.getLogger(options)?.(\n // `Failed optimization with weights ${JSON.stringify(weights)}: ${error}`,\n // { tags: ['warning'] }\n // );\n // }\n }\n }\n\n return solutions;\n }\n\n /**\n * Generate solutions using constraint-based optimization\n */\n private async generateConstraintSolutions(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMultiMetricFn,\n options?: AxCompileOptions\n ): Promise<\n Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }>\n > {\n const solutions: Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }> = [];\n\n // Get objectives from a sample evaluation\n const sampleExample = this.examples[0]!;\n const samplePrediction = await program.forward(\n this.getAIService(false, options),\n sampleExample as IN\n );\n const sampleScores = await metricFn({\n prediction: samplePrediction,\n example: sampleExample,\n });\n const objectives = Object.keys(sampleScores);\n\n // For each objective, optimize it while constraining others\n for (const primaryObjective of objectives) {\n // if (options?.verbose) {\n // this.getLogger(options)?.(\n // `Optimizing ${primaryObjective} with constraints on other objectives`,\n // { tags: ['discovery'] }\n // );\n // }\n\n // Create a constraint-based metric\n const constraintMetric: AxMetricFn = async ({ prediction, example }) => {\n const scores = await metricFn({ prediction, example });\n\n // Primary objective score\n const primaryScore = scores[primaryObjective] || 0;\n\n // Penalty for violating constraints on other objectives\n let penalty = 0;\n for (const [objective, score] of Object.entries(scores)) {\n if (objective !== primaryObjective) {\n // Simple constraint: other objectives should be at least 0.3\n // This is a heuristic - in practice you'd set domain-specific thresholds\n if (score < 0.3) {\n penalty += (0.3 - score) * 2; // Penalty factor\n }\n }\n }\n\n return primaryScore - penalty;\n };\n\n try {\n const result = await this.compile(program, constraintMetric, {\n ...options,\n verbose: false,\n });\n\n const scores = await this.evaluateWithMultiObjective(\n program,\n result,\n metricFn\n );\n\n solutions.push({\n scores,\n demos: result.demos,\n configuration: {\n ...result.finalConfiguration,\n primaryObjective,\n strategy: 'constraint_based',\n },\n });\n } catch (_error) {\n // if (options?.verbose) {\n // this.getLogger(options)?.(\n // `Failed constraint optimization for ${primaryObjective}: ${error}`,\n // { tags: ['warning'] }\n // );\n // }\n }\n }\n\n return solutions;\n }\n\n /**\n * Generate different weight combinations for objectives\n */\n private generateWeightCombinations(\n objectives: string[]\n ): Record<string, number>[] {\n const combinations: Record<string, number>[] = [];\n\n // Single-objective focus (one objective gets weight 1, others get 0)\n for (const objective of objectives) {\n const weights: Record<string, number> = {};\n for (const obj of objectives) {\n weights[obj] = obj === objective ? 1 : 0;\n }\n combinations.push(weights);\n }\n\n // Equal weights\n const equalWeights: Record<string, number> = {};\n for (const objective of objectives) {\n equalWeights[objective] = 1 / objectives.length;\n }\n combinations.push(equalWeights);\n\n // If we have 2 objectives, generate more granular combinations\n if (objectives.length === 2) {\n const [obj1, obj2] = objectives;\n for (let w1 = 0.1; w1 <= 0.9; w1 += 0.2) {\n const w2 = 1 - w1;\n combinations.push({ [obj1!]: w1, [obj2!]: w2 });\n }\n }\n\n // If we have 3 objectives, generate some key combinations\n if (objectives.length === 3) {\n const [obj1, obj2, obj3] = objectives;\n combinations.push(\n { [obj1!]: 0.5, [obj2!]: 0.3, [obj3!]: 0.2 },\n { [obj1!]: 0.3, [obj2!]: 0.5, [obj3!]: 0.2 },\n { [obj1!]: 0.2, [obj2!]: 0.3, [obj3!]: 0.5 }\n );\n }\n\n return combinations;\n }\n\n /**\n * Evaluate a single-objective result with multi-objective metrics\n */\n private async evaluateWithMultiObjective(\n program: Readonly<AxGen<IN, OUT>>,\n result: Readonly<AxOptimizerResult<OUT>>,\n metricFn: AxMultiMetricFn\n ): Promise<Record<string, number>> {\n const testProgram = new AxGen(program.getSignature());\n if (result.demos) {\n testProgram.setDemos(result.demos);\n }\n\n const predictions = [];\n for (const ex of this.examples) {\n const prediction = await testProgram.forward(this.studentAI, ex as IN);\n predictions.push({ prediction, example: ex });\n }\n\n const valSet = this.getValidationSet();\n const allScores: Record<string, number[]> = {};\n\n // Evaluate on validation set\n const evalSet = valSet.slice(0, Math.min(5, valSet.length));\n\n for (const example of evalSet) {\n try {\n const prediction = await testProgram.forward(\n this.studentAI,\n example as IN\n );\n const scores = await metricFn({ prediction, example });\n\n // Collect scores for each objective\n for (const [objective, score] of Object.entries(scores)) {\n if (!allScores[objective]) {\n allScores[objective] = [];\n }\n allScores[objective]!.push(score);\n }\n } catch {}\n }\n\n // Calculate average scores for each objective\n const avgScores: Record<string, number> = {};\n for (const [objective, scores] of Object.entries(allScores)) {\n avgScores[objective] =\n scores.length > 0\n ? scores.reduce((sum, score) => sum + score, 0) / scores.length\n : 0;\n }\n\n return avgScores;\n }\n\n /**\n * Find the Pareto frontier from a set of solutions\n */\n private findParetoFrontier(\n solutions: Array<{\n scores: Record<string, number>;\n demos?: AxProgramDemos<AxGenIn, OUT>[];\n configuration: Record<string, unknown>;\n }>\n ): Array<{\n demos: readonly AxProgramDemos<AxGenIn, OUT>[];\n scores: Readonly<Record<string, number>>;\n configuration: Readonly<Record<string, unknown>>;\n dominatedSolutions: number;\n }> {\n const paretoFront: Array<{\n demos: readonly AxProgramDemos<AxGenIn, OUT>[];\n scores: Readonly<Record<string, number>>;\n configuration: Readonly<Record<string, unknown>>;\n dominatedSolutions: number;\n }> = [];\n\n // For each solution, check if it's dominated by any other solution\n for (let i = 0; i < solutions.length; i++) {\n const solutionA = solutions[i]!;\n let isDominated = false;\n let dominatedCount = 0;\n\n for (let j = 0; j < solutions.length; j++) {\n if (i === j) continue;\n\n const solutionB = solutions[j]!;\n\n // Check if B dominates A\n if (this.dominates(solutionB.scores, solutionA.scores)) {\n isDominated = true;\n break;\n }\n\n // Count how many solutions A dominates\n if (this.dominates(solutionA.scores, solutionB.scores)) {\n dominatedCount++;\n }\n }\n\n // If A is not dominated by any solution, it's on the Pareto frontier\n if (!isDominated) {\n paretoFront.push({\n demos: solutionA.demos || [],\n scores: solutionA.scores,\n configuration: solutionA.configuration,\n dominatedSolutions: dominatedCount,\n });\n }\n }\n\n return paretoFront;\n }\n\n /**\n * Check if solution A dominates solution B\n * A dominates B if A is better or equal in all objectives and strictly better in at least one\n */\n private dominates(\n scoresA: Record<string, number>,\n scoresB: Record<string, number>\n ): boolean {\n const objectives = Object.keys(scoresA);\n\n // Check if A is at least as good as B in all objectives\n let atLeastAsGood = true;\n let strictlyBetter = false;\n\n for (const objective of objectives) {\n const scoreA = scoresA[objective] || 0;\n const scoreB = scoresB[objective] || 0;\n\n if (scoreA < scoreB) {\n atLeastAsGood = false;\n break;\n }\n\n if (scoreA > scoreB) {\n strictlyBetter = true;\n }\n }\n\n return atLeastAsGood && strictlyBetter;\n }\n\n /**\n * Calculate hypervolume of the Pareto frontier\n * Simplified implementation using reference point at origin\n */\n private calculateHypervolume(\n paretoFront: Array<{\n scores: Readonly<Record<string, number>>;\n }>\n ): number | undefined {\n if (paretoFront.length === 0) return undefined;\n\n // For simplicity, calculate 2D hypervolume if we have exactly 2 objectives\n const firstSolution = paretoFront[0]!;\n const objectives = Object.keys(firstSolution.scores);\n\n if (objectives.length === 2) {\n const [obj1, obj2] = objectives;\n let hypervolume = 0;\n\n // Sort solutions by first objective (descending)\n const sortedSolutions = [...paretoFront].sort(\n (a, b) => (b.scores[obj1!] || 0) - (a.scores[obj1!] || 0)\n );\n\n let prevScore2 = 0;\n for (const solution of sortedSolutions) {\n const score1 = solution.scores[obj1!] || 0;\n const score2 = solution.scores[obj2!] || 0;\n\n // Calculate area contribution\n hypervolume += score1 * (score2 - prevScore2);\n prevScore2 = Math.max(prevScore2, score2);\n }\n\n return hypervolume;\n }\n\n // For higher dimensions, return undefined (would need more complex algorithm)\n return undefined;\n }\n\n /**\n * Save current optimization state to checkpoint\n */\n protected async saveCheckpoint(\n optimizerType: string,\n optimizerConfig: Record<string, unknown>,\n bestScore: number,\n bestConfiguration?: Record<string, unknown>,\n optimizerState: Record<string, unknown> = {},\n options?: AxCompileOptions\n ): Promise<string | undefined> {\n const saveFn = options?.overrideCheckpointSave || this.checkpointSave;\n if (!saveFn) return undefined;\n\n const startTime = Date.now();\n let success = false;\n let checkpointId: string | undefined;\n\n try {\n const checkpoint: AxOptimizationCheckpoint = {\n version: '1.0.0',\n timestamp: Date.now(),\n optimizerType,\n optimizerConfig,\n currentRound: this.currentRound,\n totalRounds:\n this.stats.resourceUsage.totalTime > 0 ? this.currentRound : 0,\n bestScore,\n bestConfiguration,\n scoreHistory: [...this.scoreHistory],\n configurationHistory: [...this.configurationHistory],\n stats: { ...this.stats },\n optimizerState,\n examples: this.examples,\n validationSet: this.validationSet,\n };\n\n checkpointId = await saveFn(checkpoint);\n success = true;\n } catch (error) {\n success = false;\n throw error;\n } finally {\n const latency = Date.now() - startTime;\n this.recordCheckpointMetrics('save', latency, success, optimizerType);\n }\n\n return checkpointId;\n }\n\n /**\n * Load optimization state from checkpoint\n */\n protected async loadCheckpoint(\n checkpointId: string,\n options?: AxCompileOptions\n ): Promise<AxOptimizationCheckpoint | null> {\n const loadFn = options?.overrideCheckpointLoad || this.checkpointLoad;\n if (!loadFn) return null;\n\n const startTime = Date.now();\n let success = false;\n let checkpoint: AxOptimizationCheckpoint | null = null;\n\n try {\n checkpoint = await loadFn(checkpointId);\n success = checkpoint !== null;\n } catch (error) {\n success = false;\n throw error;\n } finally {\n const latency = Date.now() - startTime;\n // Use a default optimizer type since we don't know it at load time\n this.recordCheckpointMetrics('load', latency, success, 'unknown');\n }\n\n return checkpoint;\n }\n\n /**\n * Restore optimizer state from checkpoint\n */\n protected restoreFromCheckpoint(\n checkpoint: Readonly<AxOptimizationCheckpoint>\n ): void {\n this.currentRound = checkpoint.currentRound;\n this.scoreHistory = [...checkpoint.scoreHistory];\n this.configurationHistory = [...checkpoint.configurationHistory];\n this.stats = { ...checkpoint.stats };\n }\n\n /**\n * Check if checkpoint should be saved\n */\n protected shouldSaveCheckpoint(\n round: number,\n options?: AxCompileOptions\n ): boolean {\n const interval =\n options?.overrideCheckpointInterval || this.checkpointInterval;\n return interval !== undefined && round % interval === 0;\n }\n\n /**\n * Update optimization progress and handle checkpointing\n */\n protected async updateOptimizationProgress(\n round: number,\n score: number,\n configuration: Record<string, unknown>,\n optimizerType: string,\n optimizerConfig: Record<string, unknown>,\n bestScore: number,\n bestConfiguration?: Record<string, unknown>,\n optimizerState: Record<string, unknown> = {},\n options?: AxCompileOptions\n ): Promise<void> {\n this.currentRound = round;\n this.scoreHistory.push(score);\n this.configurationHistory.push(configuration);\n\n // Save checkpoint if needed\n if (this.shouldSaveCheckpoint(round, options)) {\n await this.saveCheckpoint(\n optimizerType,\n optimizerConfig,\n bestScore,\n bestConfiguration,\n optimizerState,\n options\n );\n }\n const optLogger = this.getOptimizerLogger(options);\n optLogger?.({\n name: 'RoundProgress',\n value: {\n round,\n totalRounds: options?.maxIterations ?? 0,\n currentScore: score,\n bestScore,\n configuration,\n },\n });\n }\n\n /**\n * Save final checkpoint on completion\n */\n protected async saveFinalCheckpoint(\n optimizerType: string,\n optimizerConfig: Record<string, unknown>,\n bestScore: number,\n bestConfiguration?: Record<string, unknown>,\n optimizerState: Record<string, unknown> = {},\n options?: AxCompileOptions\n ): Promise<void> {\n if (options?.saveCheckpointOnComplete !== false) {\n await this.saveCheckpoint(\n optimizerType,\n optimizerConfig,\n bestScore,\n bestConfiguration,\n { ...optimizerState, final: true },\n options\n );\n }\n }\n\n /**\n * Get the logger function with fallback hierarchy:\n * 1. Explicit logger passed to optimizer\n * 2. Logger from student AI service\n * 3. undefined if verbose is false\n */\n protected getLogger(\n options?: AxCompileOptions\n ): AxLoggerFunction | undefined {\n // Check if logging should be disabled\n const isVerbose = this.isLoggingEnabled(options);\n if (!isVerbose) {\n return undefined;\n }\n\n // Use explicit logger if provided\n if (this.logger) {\n return this.logger;\n }\n\n // Fall back to student AI logger\n return this.studentAI.getLogger();\n }\n\n /**\n * Check if logging is enabled based on verbose settings\n */\n protected isLoggingEnabled(options?: AxCompileOptions): boolean {\n // Explicit verbose setting in options takes precedence\n if (options?.verbose !== undefined) {\n return options.verbose;\n }\n\n // Use optimizer's verbose setting\n return this.verbose ?? true; // Default to true if not specified\n }\n\n /**\n * Record optimization start metrics\n */\n protected recordOptimizationStart(\n optimizerType: string,\n programSignature?: string\n ): void {\n if (!this.metricsInstruments) return;\n\n // Record program complexity metrics\n if (programSignature) {\n // Extract field counts from signature (simplified)\n const inputFields = (programSignature.match(/input:/g) || []).length;\n const outputFields = (programSignature.match(/output:/g) || []).length;\n\n recordProgramComplexityMetric(\n this.metricsInstruments,\n inputFields,\n outputFields,\n this.examples.length,\n this.getValidationSet().length,\n optimizerType\n );\n }\n\n // Record configuration metrics\n recordOptimizerConfigurationMetric(\n this.metricsInstruments,\n optimizerType,\n this.targetScore,\n undefined // maxRounds would be set by concrete optimizers\n );\n }\n\n /**\n * Record optimization completion metrics\n */\n protected recordOptimizationComplete(\n duration: number,\n success: boolean,\n optimizerType: string,\n programSignature?: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordOptimizationMetric(\n this.metricsInstruments,\n duration,\n success,\n optimizerType,\n programSignature\n );\n\n recordOptimizationDurationMetric(\n this.metricsInstruments,\n duration,\n optimizerType\n );\n\n // Record resource usage\n const currentCost = this.costTracker?.getCurrentCost() ?? 0;\n const totalTokens = this.costTracker?.getTotalTokens() ?? 0;\n recordResourceUsageMetric(\n this.metricsInstruments,\n totalTokens,\n currentCost,\n optimizerType\n );\n }\n\n /**\n * Record convergence metrics\n */\n protected recordConvergenceMetrics(\n rounds: number,\n currentScore: number,\n improvement: number,\n stagnationRounds: number,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordConvergenceMetric(\n this.metricsInstruments,\n rounds,\n currentScore,\n improvement,\n stagnationRounds,\n optimizerType\n );\n }\n\n /**\n * Record early stopping metrics\n */\n protected recordEarlyStoppingMetrics(\n reason: string,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordEarlyStoppingMetric(this.metricsInstruments, reason, optimizerType);\n }\n\n /**\n * Record teacher-student interaction metrics\n */\n protected recordTeacherStudentMetrics(\n latency: number,\n scoreImprovement: number,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordTeacherStudentMetric(\n this.metricsInstruments,\n latency,\n scoreImprovement,\n optimizerType\n );\n }\n\n /**\n * Record checkpoint metrics\n */\n protected recordCheckpointMetrics(\n operation: 'save' | 'load',\n latency: number,\n success: boolean,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordCheckpointMetric(\n this.metricsInstruments,\n operation,\n latency,\n success,\n optimizerType\n );\n }\n\n /**\n * Record Pareto optimization metrics\n */\n protected recordParetoMetrics(\n frontSize: number,\n solutionsGenerated: number,\n optimizerType: string,\n hypervolume?: number\n ): void {\n if (!this.metricsInstruments) return;\n\n recordParetoMetric(\n this.metricsInstruments,\n frontSize,\n solutionsGenerated,\n optimizerType,\n hypervolume\n );\n }\n\n /**\n * Record performance metrics\n */\n protected recordPerformanceMetrics(\n metricType: 'evaluation' | 'demo_generation' | 'metric_computation',\n duration: number,\n optimizerType: string\n ): void {\n if (!this.metricsInstruments) return;\n\n recordOptimizerPerformanceMetric(\n this.metricsInstruments,\n metricType,\n duration,\n optimizerType\n );\n }\n\n // Optimizer logging methods\n protected isOptimizerLoggingEnabled(options?: AxCompileOptions): boolean {\n return this.debugOptimizer || (options?.verbose ?? this.verbose ?? false);\n }\n\n protected getOptimizerLogger(\n options?: AxCompileOptions\n ): AxOptimizerLoggerFunction | undefined {\n if (!this.isOptimizerLoggingEnabled(options)) return undefined;\n return this.optimizerLogger ?? axDefaultOptimizerLogger;\n }\n\n public getStats(): AxOptimizationStats {\n return { ...this.stats };\n }\n\n public reset(): void {\n this.stats = this.initializeStats();\n this.costTracker?.reset();\n this.currentRound = 0;\n this.scoreHistory = [];\n this.configurationHistory = [];\n }\n}\n","import type { AxAIService } from '../../ai/types.js';\nimport { AxGen } from '../generate.js';\nimport {\n AxBaseOptimizer,\n type AxCompileOptions,\n type AxExample,\n type AxMetricFn,\n type AxMiPROCompileOptions,\n type AxMiPROOptimizerOptions,\n type AxOptimizerArgs,\n type AxOptimizerResult,\n} from '../optimizer.js';\nimport type {\n AxGenIn,\n AxGenOut,\n AxProgramDemos,\n AxResultPickerFunction,\n} from '../types.js';\n\nimport { AxBootstrapFewShot } from './bootstrapFewshot.js';\n\ninterface ConfigType extends Record<string, unknown> {\n instruction: string;\n bootstrappedDemos: number;\n labeledExamples: number;\n}\n\n// Extended result interface to include the optimized AxGen\nexport interface AxMiPROResult<IN extends AxGenIn, OUT extends AxGenOut>\n extends AxOptimizerResult<OUT> {\n optimizedGen?: AxGen<IN, OUT>;\n}\n\nexport class AxMiPRO<\n IN extends AxGenIn = AxGenIn,\n OUT extends AxGenOut = AxGenOut,\n> extends AxBaseOptimizer<IN, OUT> {\n // MiPRO-specific options\n private maxBootstrappedDemos: number;\n private maxLabeledDemos: number;\n private numCandidates: number;\n private initTemperature: number;\n private numTrials: number;\n private minibatch: boolean;\n private minibatchSize: number;\n private minibatchFullEvalSteps: number;\n private programAwareProposer: boolean;\n private dataAwareProposer: boolean;\n private viewDataBatchSize: number;\n private tipAwareProposer: boolean;\n private fewshotAwareProposer: boolean;\n private earlyStoppingTrials: number;\n private minImprovementThreshold: number;\n private bayesianOptimization: boolean;\n private acquisitionFunction:\n | 'expected_improvement'\n | 'upper_confidence_bound'\n | 'probability_improvement';\n private explorationWeight: number;\n\n // Self-consistency / multiple sampling\n private sampleCount: number;\n\n // Surrogate model state for Bayesian optimization\n private miproConfigHistory: { config: ConfigType; score: number }[] = [];\n private surrogateModel: Map<string, { mean: number; variance: number }> =\n new Map();\n\n constructor(\n args: Readonly<AxOptimizerArgs & { options?: AxMiPROOptimizerOptions }>\n ) {\n // Call parent constructor with base args\n super(args);\n\n const options = args.options || {};\n\n // MiPRO-specific options with proper defaults\n this.numCandidates = options.numCandidates ?? 5;\n this.initTemperature = options.initTemperature ?? 0.7;\n this.maxBootstrappedDemos = options.maxBootstrappedDemos ?? 3;\n this.maxLabeledDemos = options.maxLabeledDemos ?? 4;\n this.numTrials = options.numTrials ?? 30;\n this.minibatch = options.minibatch ?? true;\n this.minibatchSize = options.minibatchSize ?? 25;\n this.minibatchFullEvalSteps = options.minibatchFullEvalSteps ?? 10;\n this.programAwareProposer = options.programAwareProposer ?? true;\n this.dataAwareProposer = options.dataAwareProposer ?? true;\n this.viewDataBatchSize = options.viewDataBatchSize ?? 10;\n this.tipAwareProposer = options.tipAwareProposer ?? true;\n this.fewshotAwareProposer = options.fewshotAwareProposer ?? true;\n this.earlyStoppingTrials = options.earlyStoppingTrials ?? 5;\n this.minImprovementThreshold = options.minImprovementThreshold ?? 0.01;\n this.bayesianOptimization = options.bayesianOptimization ?? false;\n this.acquisitionFunction =\n options.acquisitionFunction ?? 'expected_improvement';\n this.explorationWeight = options.explorationWeight ?? 0.1;\n\n // Self-consistency options\n this.sampleCount = options.sampleCount ?? 1;\n\n // Update convergence threshold in stats\n this.stats.convergenceInfo.convergenceThreshold =\n this.minImprovementThreshold;\n }\n\n /**\n * Configures the optimizer for light, medium, or heavy optimization\n * @param level The optimization level: \"light\", \"medium\", or \"heavy\"\n */\n public configureAuto(level: 'light' | 'medium' | 'heavy'): void {\n switch (level) {\n case 'light':\n this.numCandidates = 3;\n this.numTrials = 10;\n this.minibatch = true;\n this.minibatchSize = 20;\n break;\n case 'medium':\n this.numCandidates = 5;\n this.numTrials = 20;\n this.minibatch = true;\n this.minibatchSize = 25;\n break;\n case 'heavy':\n this.numCandidates = 7;\n this.numTrials = 30;\n this.minibatch = true;\n this.minibatchSize = 30;\n break;\n }\n }\n\n /**\n * Generates creative tips for instruction generation\n */\n private generateTips(): string[] {\n return [\n 'Be very specific and detailed in your instructions.',\n 'Focus on step-by-step reasoning in your instructions.',\n 'Provide clear constraints and guidelines in your instructions.',\n 'Keep your instructions concise and to the point.',\n 'Emphasize accuracy and precision in your instructions.',\n 'Include examples of good outputs in your instructions.',\n 'Focus on handling edge cases in your instructions.',\n 'Explicitly outline the reasoning process in your instructions.',\n ];\n }\n\n /**\n * Generates program summary for context-aware instruction generation\n */\n private async generateProgramSummary(\n program: Readonly<AxGen<IN, OUT>>,\n ai: Readonly<AxAIService>\n ): Promise<string> {\n // Extract program structure information\n const signature = program.getSignature();\n\n // Create program summary prompt based on paper's Appendix C.5\n const summaryPrompt = `\nAnalyze this language model program and provide a concise summary of its purpose and structure.\n\nProgram Signature: ${signature}\n\nProvide a 2-3 sentence summary focusing on:\n1. The main task or purpose of this program\n2. The input-output relationship\n3. Any special constraints or requirements\n\nSummary:`;\n\n try {\n const response = await ai.chat({\n chatPrompt: [{ role: 'user', content: summaryPrompt }],\n });\n if ('results' in response) {\n return (\n response.results[0]?.content?.trim() ||\n 'General language model program'\n );\n }\n return 'General language model program';\n } catch {\n return 'General language model program';\n }\n }\n\n /**\n * Generates dataset summary for context-aware instruction generation\n */\n private async generateDatasetSummary(\n examples: readonly AxExample[],\n ai: Readonly<AxAIService>\n ): Promise<string> {\n if (examples.length === 0) return 'No examples available';\n\n // Sample a few examples for analysis (based on paper's approach)\n const sampleSize = Math.min(this.viewDataBatchSize, examples.length);\n const sampledExamples = examples.slice(0, sampleSize);\n\n // Create dataset summary prompt based on paper's Appendix C.3\n const exampleTexts = sampledExamples\n .map((ex, i) => `Example ${i + 1}: ${JSON.stringify(ex)}`)\n .join('\\n');\n\n const summaryPrompt = `\nAnalyze this dataset and provide a concise summary of its characteristics.\n\nSample Examples:\n${exampleTexts}\n\nProvide a 2-3 sentence summary focusing on:\n1. The type of data and domain\n2. Common patterns or structures in the examples\n3. Key challenges or requirements for processing this data\n\nDataset Summary:`;\n\n try {\n const response = await ai.chat({\n chatPrompt: [{ role: 'user', content: summaryPrompt }],\n });\n if ('results' in response) {\n return response.results[0]?.content?.trim() || 'General dataset';\n }\n return 'General dataset';\n } catch {\n return 'General dataset';\n }\n }\n\n /**\n * Enhanced instruction generation using AI with program and data awareness\n */\n private async generateInstruction({\n tip,\n candidateIndex,\n ai,\n programSummary,\n datasetSummary,\n previousInstructions = [],\n }: Readonly<{\n tip: string | undefined;\n candidateIndex: number;\n ai: Readonly<AxAIService>;\n programSummary?: string;\n datasetSummary?: string;\n previousInstructions?: string[];\n }>): Promise<string> {\n // Build context-aware instruction generation prompt based on paper\n let contextInfo = '';\n\n if (this.programAwareProposer && programSummary) {\n contextInfo += `\\nProgram Context: ${programSummary}`;\n }\n\n if (this.dataAwareProposer && datasetSummary) {\n contextInfo += `\\nDataset Context: ${datasetSummary}`;\n }\n\n if (this.fewshotAwareProposer && previousInstructions.length > 0) {\n contextInfo += `\\nPrevious Instructions (avoid repeating): ${previousInstructions.slice(-3).join('; ')}`;\n }\n\n // Core instruction generation prompt inspired by paper's Appendix C.1\n const instructionPrompt = `\nGenerate a high-quality instruction for a language model program.\n\n${contextInfo}\n\n${tip ? `Tip: ${tip}` : ''}\n\nRequirements:\n1. Be specific and actionable\n2. Focus on accuracy and clarity\n3. Consider the program's purpose and data characteristics\n4. Make the instruction distinct from previous ones\n5. Keep it concise but comprehensive\n\nGenerate a single, well-crafted instruction:\nInstruction:`;\n\n try {\n const response = await ai.chat({\n chatPrompt: [\n {\n role: 'user',\n content: instructionPrompt,\n },\n ],\n });\n\n if ('results' in response) {\n const instruction = response.results[0]?.content?.trim();\n if (instruction && instruction.length > 10) {\n return instruction;\n }\n }\n } catch (_error) {\n // if (this.isLoggingEnabled()) {\n // this.getLogger()?.(`Failed to generate AI instruction: ${error}`, {\n // tags: ['optimizer', 'warning'],\n // });\n // }\n }\n\n // Fallback to enhanced templates if AI generation fails\n const enhancedTemplates = [\n 'Analyze the input systematically and provide a precise, well-reasoned response.',\n 'Think through this step-by-step, considering all relevant factors before responding.',\n 'Examine the input carefully and generate an accurate, detailed answer.',\n 'Process the information methodically and deliver a clear, comprehensive response.',\n 'Consider the context thoroughly and provide a thoughtful, accurate answer.',\n ];\n\n let instruction =\n enhancedTemplates[candidateIndex % enhancedTemplates.length] ||\n enhancedTemplates[0]!;\n\n if (tip) {\n instruction = `${instruction} ${tip}`;\n }\n\n return instruction;\n }\n\n /**\n * Generates instruction candidates using enhanced AI-powered generation\n * @param options Optional compile options that may override teacher AI\n * @returns Array of generated instruction candidates\n */\n private async proposeInstructionCandidates(\n _program: Readonly<AxGen<IN, OUT>>,\n options?: AxCompileOptions\n ): Promise<string[]> {\n const instructions: string[] = [];\n const aiToUse = this.getTeacherOrStudentAI(options);\n\n // Generate contextual information if enabled\n let programSummary: string | undefined;\n let datasetSummary: string | undefined;\n\n // if (this.programAwareProposer) {\n // programSummary = await this.generateProgramSummary(program, aiToUse);\n // if (this.isLoggingEnabled(options)) {\n // this.getLogger(options)?.(`Program summary: ${programSummary}`, {\n // tags: ['optimizer', 'config'],\n // });\n // }\n // }\n\n if (this.dataAwareProposer) {\n datasetSummary = await this.generateDatasetSummary(\n this.examples,\n aiToUse\n );\n // if (this.isLoggingEnabled(options)) {\n // this.getLogger(options)?.(`Dataset summary: ${datasetSummary}`, {\n // tags: ['optimizer', 'config'],\n // });\n // }\n }\n\n // Generate creative tips for tip-aware proposing\n const tips = this.tipAwareProposer ? this.generateTips() : [];\n\n // Generate instructions for each candidate\n for (let i = 0; i < this.numCandidates; i++) {\n const tipIndex = tips.length > 0 ? i % tips.length : -1;\n const tipToUse = tipIndex >= 0 ? tips[tipIndex] : undefined;\n\n const instruction = await this.generateInstruction({\n tip: tipToUse,\n candidateIndex: i,\n ai: aiToUse,\n programSummary,\n datasetSummary,\n previousInstructions: instructions, // Pass previous instructions for diversity\n });\n\n instructions.push(instruction);\n }\n\n return instructions;\n }\n\n /**\n * Bootstraps few-shot examples for the program\n */\n private async bootstrapFewShotExamples(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMetricFn\n ): Promise<AxProgramDemos<IN, OUT>[]> {\n // Initialize the bootstrapper for this program\n const bootstrapper = new AxBootstrapFewShot<IN, OUT>({\n studentAI: this.studentAI,\n examples: this.examples,\n options: {\n maxDemos: this.maxBootstrappedDemos,\n maxRounds: 3,\n verboseMode: this.isLoggingEnabled(),\n },\n });\n\n const result = await bootstrapper.compile(program, metricFn, {\n maxDemos: this.maxBootstrappedDemos,\n });\n\n return (result.demos || []) as AxProgramDemos<IN, OUT>[];\n }\n\n /**\n * Selects labeled examples directly from the training set\n */\n private selectLabeledExamples(): AxExample[] {\n const selectedExamples: AxExample[] = [];\n\n // Random sampling from the training set\n const indices = new Set<number>();\n while (\n indices.size < this.maxLabeledDemos &&\n indices.size < this.examples.length\n ) {\n const idx = Math.floor(Math.random() * this.examples.length);\n if (!indices.has(idx)) {\n indices.add(idx);\n const example = this.examples[idx];\n if (example) {\n selectedExamples.push(example);\n }\n }\n }\n\n return selectedExamples;\n }\n\n /**\n * Runs optimization to find the best combination of few-shot examples and instructions\n */\n private async runOptimization(\n program: Readonly<AxGen<IN, OUT>>,\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[],\n instructions: readonly string[],\n validationExamples: readonly AxExample[],\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): Promise<{ bestConfig: ConfigType; bestScore: number }> {\n let bestConfig: ConfigType = {\n instruction: instructions[0] || '',\n bootstrappedDemos: Math.min(1, bootstrappedDemos.length),\n labeledExamples: Math.min(1, labeledExamples.length),\n };\n let bestScore = 0;\n let stagnationRounds = 0;\n const scoreHistory: number[] = [];\n\n // Check for checkpoint resume\n let startRound = 0;\n if (this.resumeFromCheckpoint) {\n const checkpoint = await this.loadCheckpoint(\n this.resumeFromCheckpoint,\n options\n );\n if (checkpoint && checkpoint.optimizerType === 'MiPRO') {\n this.restoreFromCheckpoint(checkpoint);\n startRound = checkpoint.currentRound;\n bestScore = checkpoint.bestScore;\n bestConfig = (checkpoint.bestConfiguration as ConfigType) || bestConfig;\n stagnationRounds =\n checkpoint.stats.convergenceInfo?.stagnationRounds || 0;\n }\n }\n\n // Optimization loop with early stopping and checkpointing\n\n for (let i = startRound; i < this.numTrials; i++) {\n let config: ConfigType;\n\n if (this.bayesianOptimization && this.miproConfigHistory.length > 2) {\n // Use Bayesian optimization with acquisition function\n config = await this.selectConfigurationViaBayesianOptimization(\n instructions,\n bootstrappedDemos,\n labeledExamples\n );\n } else {\n // Random or round-robin selection (exploration phase)\n config = {\n instruction:\n instructions[i % instructions.length] || instructions[0] || '',\n bootstrappedDemos: Math.min(\n Math.floor(Math.random() * (bootstrappedDemos.length + 1)),\n this.maxBootstrappedDemos\n ),\n labeledExamples: Math.min(\n Math.floor(Math.random() * (labeledExamples.length + 1)),\n this.maxLabeledDemos\n ),\n };\n }\n\n const score = await this.evaluateConfig(\n program,\n config,\n bootstrappedDemos,\n labeledExamples,\n validationExamples,\n metricFn,\n i + 1 // Pass current trial number for adaptive evaluation\n );\n\n // Update surrogate model with observed score\n this.updateSurrogateModel(config, score);\n\n scoreHistory.push(score);\n\n // Check for improvement\n const improvement = score - bestScore;\n if (improvement > this.minImprovementThreshold) {\n bestScore = score;\n bestConfig = config;\n stagnationRounds = 0;\n\n // if (this.isLoggingEnabled(options)) {\n // this.getLogger(options)?.(\n // `Trial ${i + 1}/${this.numTrials}: New best score ${bestScore.toFixed(3)}`,\n // { tags: ['optimizer', 'progress'] }\n // );\n // }\n } else {\n stagnationRounds++;\n }\n\n // Update optimization progress with checkpointing\n await this.updateOptimizationProgress(\n i + 1,\n score,\n config,\n 'MiPRO',\n this.getConfiguration(),\n bestScore,\n bestConfig,\n {\n stagnationRounds,\n bootstrappedDemos: bootstrappedDemos.length,\n labeledExamples: labeledExamples.length,\n instructions: instructions.length,\n },\n options\n );\n\n // Progress callback\n if (this.onProgress) {\n this.onProgress({\n round: i + 1,\n totalRounds: this.numTrials,\n currentScore: score,\n bestScore,\n tokensUsed: this.stats.resourceUsage.totalTokens,\n timeElapsed: Date.now(),\n successfulExamples: this.stats.successfulDemos,\n totalExamples: this.examples.length,\n currentConfiguration: config,\n convergenceInfo: {\n improvement,\n stagnationRounds,\n isConverging: stagnationRounds < this.earlyStoppingTrials,\n },\n });\n }\n\n // Cost tracking check (handles token/time/cost budgets)\n if (this.checkCostLimits()) {\n this.triggerEarlyStopping('Cost limit reached', i + 1);\n break;\n }\n\n // Early stopping check\n if (stagnationRounds >= this.earlyStoppingTrials) {\n this.triggerEarlyStopping(\n `No improvement for ${this.earlyStoppingTrials} trials`,\n i - stagnationRounds + 1\n );\n break;\n }\n\n // Target score check\n if (this.checkTargetScore(bestScore)) {\n this.triggerEarlyStopping(\n `Target score ${this.targetScore} reached`,\n i + 1\n );\n break;\n }\n }\n\n // Update convergence info\n this.stats.convergenceInfo.stagnationRounds = stagnationRounds;\n this.stats.convergenceInfo.finalImprovement =\n scoreHistory.length > 1 ? bestScore - scoreHistory[0]! : 0;\n this.stats.convergenceInfo.converged =\n stagnationRounds < this.earlyStoppingTrials;\n\n return { bestConfig, bestScore };\n }\n\n private async evaluateConfig(\n program: Readonly<AxGen<IN, OUT>>,\n config: Readonly<ConfigType>,\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[],\n validationExamples: readonly AxExample[],\n metricFn: AxMetricFn,\n currentTrial = 0\n ): Promise<number> {\n const testProgram = new AxGen(program.getSignature());\n this.applyConfigToProgram(\n testProgram,\n config,\n bootstrappedDemos,\n labeledExamples\n );\n\n let totalScore = 0;\n let count = 0;\n\n // Adaptive minibatch size based on paper's approach\n let evalSize: number;\n if (this.minibatch) {\n // Start with smaller batches and increase for more promising configurations\n const baseSize = Math.min(this.minibatchSize, validationExamples.length);\n\n // Use full evaluation for top configurations in later trials\n const isFullEvalTrial = currentTrial % this.minibatchFullEvalSteps === 0;\n if (isFullEvalTrial || currentTrial > this.numTrials * 0.8) {\n evalSize = Math.min(validationExamples.length, baseSize * 2);\n } else {\n // Stochastic minibatch evaluation\n evalSize = Math.max(3, Math.min(baseSize, validationExamples.length));\n }\n } else {\n evalSize = validationExamples.length;\n }\n\n // Randomly sample evaluation examples for stochastic evaluation\n const evalIndices = this.shuffleArray([\n ...Array(validationExamples.length).keys(),\n ]).slice(0, evalSize);\n const evalSet = evalIndices.map((i) => validationExamples[i]!);\n\n for (const example of evalSet) {\n try {\n const prediction = await testProgram.forward(\n this.studentAI,\n example as IN,\n this.sampleCount > 1\n ? {\n sampleCount: this.sampleCount,\n resultPicker:\n axMajorityVotePicker<OUT>() as AxResultPickerFunction<AxGenOut>,\n }\n : undefined\n );\n const score = await metricFn({ prediction, example });\n totalScore += score;\n count++;\n this.stats.totalCalls++;\n } catch {}\n }\n\n return count > 0 ? totalScore / count : 0;\n }\n\n /**\n * Fisher-Yates shuffle for stochastic evaluation\n */\n private shuffleArray<T>(array: T[]): T[] {\n const shuffled = [...array];\n for (let i = shuffled.length - 1; i > 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [shuffled[i], shuffled[j]] = [shuffled[j]!, shuffled[i]!];\n }\n return shuffled;\n }\n\n private applyConfigToProgram(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n program: any,\n config: Readonly<ConfigType>,\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[]\n ): void {\n // Set instruction if the program supports it\n if (program.setInstruction) {\n program.setInstruction(config.instruction);\n }\n\n // Set demos if needed\n if (config.bootstrappedDemos > 0 && program.setDemos) {\n program.setDemos(bootstrappedDemos.slice(0, config.bootstrappedDemos));\n }\n\n // Set examples if needed\n if (config.labeledExamples > 0 && program.setExamples) {\n program.setExamples(labeledExamples.slice(0, config.labeledExamples));\n }\n }\n\n /**\n * The main compile method to run MIPROv2 optimization\n */\n public async compile(\n program: Readonly<AxGen<IN, OUT>>,\n metricFn: AxMetricFn,\n options?: AxCompileOptions\n ): Promise<AxMiPROResult<IN, OUT>> {\n const startTime = Date.now();\n\n // Initialize random seed if provided\n this.setupRandomSeed();\n\n // Configure auto settings if provided (cast to access MiPRO-specific options)\n const miproOptions = options as AxMiPROCompileOptions;\n if (miproOptions?.auto) {\n this.configureAuto(miproOptions.auto);\n }\n\n // Use validation set from parent class method\n const validationExamples =\n this.getValidationSet(options) ||\n (miproOptions?.validationExamples ??\n this.examples.slice(0, Math.floor(this.examples.length * 0.2)));\n\n // if (this.isLoggingEnabled(options)) {\n // this.getLogger(options)?.(\n // `Starting MIPROv2 optimization with ${this.numTrials} trials`,\n // { tags: ['optimizer', 'start'] }\n // );\n // this.getLogger(options)?.(\n // `Using ${this.examples.length} examples for training and ${validationExamples.length} for validation`,\n // { tags: ['optimizer', 'config'] }\n // );\n // if (this.teacherAI) {\n // this.getLogger(options)?.(\n // 'Using separate teacher model for instruction generation',\n // { tags: ['optimizer', 'config'] }\n // );\n // }\n // }\n\n // Step 1: Bootstrap few-shot examples\n let bootstrappedDemos: AxProgramDemos<IN, OUT>[] = [];\n if (this.maxBootstrappedDemos > 0) {\n bootstrappedDemos = await this.bootstrapFewShotExamples(\n program,\n metricFn\n );\n\n // if (this.isLoggingEnabled(options)) {\n // this.getLogger(options)?.(\n // `Generated ${bootstrappedDemos.length} bootstrapped demonstrations`,\n // { tags: ['optimizer', 'result'] }\n // );\n // }\n }\n\n // Step 2: Select labeled examples from training set\n let labeledExamples: AxExample[] = [];\n if (this.maxLabeledDemos > 0) {\n labeledExamples = this.selectLabeledExamples();\n\n // if (this.isLoggingEnabled(options)) {\n // this.getLogger(options)?.(\n // `Selected ${labeledExamples.length} labeled examples from training set`,\n // { tags: ['optimizer', 'result'] }\n // );\n // }\n }\n\n // Step 3: Generate instruction candidates\n const instructions = await this.proposeInstructionCandidates(\n program,\n options\n );\n\n // if (this.isLoggingEnabled(options)) {\n // this.getLogger(options)?.(\n // `Generated ${instructions.length} instruction candidates`,\n // { tags: ['optimizer', 'result'] }\n // );\n // if (this.hasTeacherAI(options)) {\n // this.getLogger(options)?.(\n // 'Using teacher AI for instruction generation',\n // { tags: ['optimizer', 'config'] }\n // );\n // }\n // }\n\n // Step 4: Run optimization to find the best configuration\n const { bestConfig, bestScore } = await this.runOptimization(\n program,\n bootstrappedDemos,\n labeledExamples,\n instructions,\n validationExamples,\n metricFn,\n options\n );\n\n // if (this.isLoggingEnabled(options)) {\n // this.getLogger(options)?.(\n // `Optimization complete. Best score: ${bestScore}`,\n // { tags: ['optimizer', 'complete'] }\n // );\n // this.getLogger(options)?.(\n // `Best configuration: ${JSON.stringify(bestConfig)}`,\n // { tags: ['optimizer', 'result'] }\n // );\n // }\n\n // Check if target score was reached\n if (this.checkTargetScore(bestScore)) {\n this.triggerEarlyStopping(\n `Target score ${this.targetScore} reached with score ${bestScore}`,\n this.numTrials\n );\n }\n\n // Create a new AxGen instance with the optimized configuration\n let signature: any;\n if (\n 'getSignature' in program &&\n typeof program.getSignature === 'function'\n ) {\n signature = program.getSignature();\n } else {\n // Fallback: create a basic signature\n signature = 'input -> output';\n }\n\n const optimizedGen = new AxGen<IN, OUT>(signature);\n\n // Apply the best configuration to the new AxGen\n this.applyConfigToAxGen(\n optimizedGen,\n bestConfig,\n bootstrappedDemos,\n labeledExamples\n );\n\n // Update stats using parent class method\n this.updateResourceUsage(startTime);\n this.stats.convergenceInfo.converged = true;\n this.stats.convergenceInfo.finalImprovement = bestScore;\n\n // Save final checkpoint\n await this.saveFinalCheckpoint(\n 'MiPRO',\n this.getConfiguration(),\n bestScore,\n bestConfig,\n {\n bootstrappedDemos: bootstrappedDemos.length,\n labeledExamples: labeledExamples.length,\n instructions: instructions.length,\n optimizedGen: !!optimizedGen,\n },\n options\n );\n\n return {\n demos: bootstrappedDemos,\n stats: this.stats,\n bestScore,\n optimizedGen,\n finalConfiguration: {\n instruction: bestConfig.instruction,\n bootstrappedDemos: bestConfig.bootstrappedDemos,\n labeledExamples: bestConfig.labeledExamples,\n numCandidates: this.numCandidates,\n numTrials: this.numTrials,\n sampleCount: this.sampleCount,\n },\n };\n }\n\n /**\n * Applies a configuration to an AxGen instance\n */\n private applyConfigToAxGen(\n axgen: Readonly<AxGen<IN, OUT>>,\n config: Readonly<ConfigType>,\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[]\n ): void {\n // Set instruction if the AxGen supports it\n if (\n 'setInstruction' in axgen &&\n typeof axgen.setInstruction === 'function'\n ) {\n axgen.setInstruction(config.instruction);\n }\n\n // Set demos if needed\n if (config.bootstrappedDemos > 0) {\n axgen.setDemos(bootstrappedDemos.slice(0, config.bootstrappedDemos));\n }\n\n // Set examples if needed\n if (config.labeledExamples > 0) {\n axgen.setExamples(\n labeledExamples.slice(\n 0,\n config.labeledExamples\n ) as unknown as readonly (OUT & IN)[]\n );\n }\n }\n\n /**\n * Get optimizer-specific configuration\n * @returns Current optimizer configuration\n */\n public getConfiguration(): Record<string, unknown> {\n return {\n numCandidates: this.numCandidates,\n initTemperature: this.initTemperature,\n maxBootstrappedDemos: this.maxBootstrappedDemos,\n maxLabeledDemos: this.maxLabeledDemos,\n numTrials: this.numTrials,\n minibatch: this.minibatch,\n minibatchSize: this.minibatchSize,\n minibatchFullEvalSteps: this.minibatchFullEvalSteps,\n programAwareProposer: this.programAwareProposer,\n dataAwareProposer: this.dataAwareProposer,\n tipAwareProposer: this.tipAwareProposer,\n fewshotAwareProposer: this.fewshotAwareProposer,\n earlyStoppingTrials: this.earlyStoppingTrials,\n minImprovementThreshold: this.minImprovementThreshold,\n bayesianOptimization: this.bayesianOptimization,\n acquisitionFunction: this.acquisitionFunction,\n explorationWeight: this.explorationWeight,\n sampleCount: this.sampleCount,\n };\n }\n\n /**\n * Update optimizer configuration\n * @param config New configuration to merge with existing\n */\n public updateConfiguration(config: Readonly<Record<string, unknown>>): void {\n if (config.numCandidates !== undefined) {\n this.numCandidates = config.numCandidates as number;\n }\n if (config.initTemperature !== undefined) {\n this.initTemperature = config.initTemperature as number;\n }\n if (config.maxBootstrappedDemos !== undefined) {\n this.maxBootstrappedDemos = config.maxBootstrappedDemos as number;\n }\n if (config.maxLabeledDemos !== undefined) {\n this.maxLabeledDemos = config.maxLabeledDemos as number;\n }\n if (config.numTrials !== undefined) {\n this.numTrials = config.numTrials as number;\n }\n if (config.minibatch !== undefined) {\n this.minibatch = config.minibatch as boolean;\n }\n if (config.minibatchSize !== undefined) {\n this.minibatchSize = config.minibatchSize as number;\n }\n if (config.earlyStoppingTrials !== undefined) {\n this.earlyStoppingTrials = config.earlyStoppingTrials as number;\n }\n if (config.minImprovementThreshold !== undefined) {\n this.minImprovementThreshold = config.minImprovementThreshold as number;\n }\n if (config.sampleCount !== undefined) {\n this.sampleCount = config.sampleCount as number;\n }\n // Note: verbose is now handled by the base class and cannot be updated here\n }\n\n /**\n * Reset optimizer state for reuse with different programs\n */\n public override reset(): void {\n super.reset();\n // Reset surrogate model state\n this.miproConfigHistory = [];\n this.surrogateModel.clear();\n // Update convergence threshold after reset\n this.stats.convergenceInfo.convergenceThreshold =\n this.minImprovementThreshold;\n }\n\n /**\n * Validate that the optimizer can handle the given program\n * @param program Program to validate\n * @returns Validation result with any issues found\n */\n public validateProgram(_program: Readonly<AxGen<IN, OUT>>): {\n isValid: boolean;\n issues: string[];\n suggestions: string[];\n } {\n // Start with empty validation result\n const issues: string[] = [];\n const suggestions: string[] = [];\n\n // Add MiPRO-specific validation\n if (\n this.examples.length <\n this.maxBootstrappedDemos + this.maxLabeledDemos\n ) {\n issues.push(\n `Not enough examples: need at least ${\n this.maxBootstrappedDemos + this.maxLabeledDemos\n }, got ${this.examples.length}`\n );\n suggestions.push(\n 'Reduce maxBootstrappedDemos or maxLabeledDemos, or provide more examples'\n );\n }\n\n // Check if validation set is reasonable for MiPRO\n const validationSetSize = this.getValidationSet().length;\n if (validationSetSize < 5) {\n issues.push('Validation set too small for reliable MiPRO optimization');\n suggestions.push('Provide more examples or a larger validation set');\n }\n\n return {\n isValid: issues.length === 0,\n issues,\n suggestions,\n };\n }\n\n /**\n * Encodes a configuration into a string key for surrogate model lookup\n */\n private encodeConfiguration(config: Readonly<ConfigType>): string {\n return `${config.instruction.length}_${config.bootstrappedDemos}_${config.labeledExamples}`;\n }\n\n /**\n * Updates the surrogate model with a new configuration-score pair\n */\n private updateSurrogateModel(\n config: Readonly<ConfigType>,\n score: number\n ): void {\n this.miproConfigHistory.push({ config: { ...config }, score });\n\n // Simple Gaussian Process approximation for the surrogate model\n const key = this.encodeConfiguration(config);\n\n // Find similar configurations (same instruction length and demo counts)\n const similarConfigs = this.miproConfigHistory.filter(\n (entry) => this.encodeConfiguration(entry.config) === key\n );\n\n if (similarConfigs.length > 0) {\n const scores = similarConfigs.map((entry) => entry.score);\n const mean = scores.reduce((sum, s) => sum + s, 0) / scores.length;\n const variance =\n scores.length > 1\n ? scores.reduce((sum, s) => sum + (s - mean) ** 2, 0) /\n (scores.length - 1)\n : 0.1; // Default variance for single observation\n\n this.surrogateModel.set(key, { mean, variance });\n }\n }\n\n /**\n * Predicts performance using the surrogate model\n */\n private predictPerformance(config: Readonly<ConfigType>): {\n mean: number;\n variance: number;\n } {\n const key = this.encodeConfiguration(config);\n\n if (this.surrogateModel.has(key)) {\n return this.surrogateModel.get(key)!;\n }\n\n // For unseen configurations, use prior knowledge\n if (this.miproConfigHistory.length > 0) {\n // Find most similar configurations based on demo counts\n const similarities = this.miproConfigHistory.map((entry) => {\n const diff =\n Math.abs(entry.config.bootstrappedDemos - config.bootstrappedDemos) +\n Math.abs(entry.config.labeledExamples - config.labeledExamples);\n return { score: entry.score, similarity: 1 / (1 + diff) };\n });\n\n // Weighted average based on similarity\n const totalWeight = similarities.reduce(\n (sum, s) => sum + s.similarity,\n 0\n );\n const weightedMean =\n similarities.reduce((sum, s) => sum + s.score * s.similarity, 0) /\n totalWeight;\n\n return { mean: weightedMean, variance: 0.2 }; // Higher variance for unseen configs\n }\n\n // Default prior for completely unknown configurations\n return { mean: 0.5, variance: 0.3 };\n }\n\n /**\n * Calculates acquisition function value for Bayesian optimization\n */\n private calculateAcquisitionValue(config: Readonly<ConfigType>): number {\n const prediction = this.predictPerformance(config);\n const { mean, variance } = prediction;\n const std = Math.sqrt(variance);\n\n // Current best score\n const bestScore =\n this.miproConfigHistory.length > 0\n ? Math.max(...this.miproConfigHistory.map((entry) => entry.score))\n : 0;\n\n switch (this.acquisitionFunction) {\n case 'expected_improvement': {\n const improvement = mean - bestScore;\n if (std === 0) return Math.max(0, improvement);\n\n const z = improvement / std;\n const phi = 0.5 * (1 + this.erf(z / Math.sqrt(2))); // CDF of standard normal\n const pdfValue = Math.exp(-0.5 * z * z) / Math.sqrt(2 * Math.PI); // PDF of standard normal\n\n return improvement * phi + std * pdfValue;\n }\n\n case 'upper_confidence_bound': {\n return mean + this.explorationWeight * std;\n }\n\n case 'probability_improvement': {\n const improvement = mean - bestScore;\n if (std === 0) return improvement > 0 ? 1 : 0;\n\n const z = improvement / std;\n return 0.5 * (1 + this.erf(z / Math.sqrt(2)));\n }\n\n default:\n return mean;\n }\n }\n\n /**\n * Error function approximation for acquisition function calculations\n */\n private erf(x: number): number {\n // Abramowitz and Stegun approximation\n const a1 = 0.254829592;\n const a2 = -0.284496736;\n const a3 = 1.421413741;\n const a4 = -1.453152027;\n const a5 = 1.061405429;\n const p = 0.3275911;\n\n const sign = x >= 0 ? 1 : -1;\n const absX = Math.abs(x);\n\n const t = 1.0 / (1.0 + p * absX);\n const y =\n 1.0 -\n ((((a5 * t + a4) * t + a3) * t + a2) * t + a1) *\n t *\n Math.exp(-absX * absX);\n\n return sign * y;\n }\n\n /**\n * Selects the next configuration to evaluate using Bayesian optimization\n */\n private async selectConfigurationViaBayesianOptimization(\n instructions: readonly string[],\n bootstrappedDemos: readonly AxProgramDemos<IN, OUT>[],\n labeledExamples: readonly AxExample[]\n ): Promise<ConfigType> {\n const candidates: Array<{ config: ConfigType; acquisitionValue: number }> =\n [];\n\n // Generate candidate configurations\n const numCandidates = Math.min(20, instructions.length * 3); // Reasonable number of candidates\n\n for (let i = 0; i < numCandidates; i++) {\n const config: ConfigType = {\n instruction:\n instructions[i % instructions.length] || instructions[0] || '',\n bootstrappedDemos: Math.min(\n Math.floor(Math.random() * (bootstrappedDemos.length + 1)),\n this.maxBootstrappedDemos\n ),\n labeledExamples: Math.min(\n Math.floor(Math.random() * (labeledExamples.length + 1)),\n this.maxLabeledDemos\n ),\n };\n\n const acquisitionValue = this.calculateAcquisitionValue(config);\n candidates.push({ config, acquisitionValue });\n }\n\n // Sort by acquisition value (higher is better)\n candidates.sort((a, b) => b.acquisitionValue - a.acquisitionValue);\n\n // Return the most promising configuration\n return candidates[0]!.config;\n }\n}\n\n// ---------------------------------------\n// Helper: Majority-vote result picker for self-consistency\n// ---------------------------------------\nconst axMajorityVotePicker = <\n OUT extends AxGenOut,\n>(): AxResultPickerFunction<OUT> => {\n // Return a picker function capturing no external state\n return async (data) => {\n // If we have field results, do majority vote on stringified payload\n if (data.type === 'fields') {\n const counts: Record<string, { count: number; index: number }> = {};\n for (const { index, sample } of data.results) {\n const key = JSON.stringify(sample);\n if (!counts[key]) {\n counts[key] = { count: 0, index };\n }\n counts[key]!.count += 1;\n }\n\n // Select the sample with highest count (ties -> first seen)\n let bestKey: string | undefined;\n let bestCount = -1;\n for (const [k, v] of Object.entries(counts)) {\n if (v.count > bestCount) {\n bestCount = v.count;\n bestKey = k;\n }\n }\n return counts[bestKey!]?.index ?? 0;\n }\n\n // For function results, fall back to first sample (could be improved)\n return data.results[0]?.index ?? 0;\n };\n};\n","import type { AxMCPTransport } from './transport.js';\nimport type {\n JSONRPCNotification,\n JSONRPCRequest,\n JSONRPCResponse,\n} from './types.js';\n\nexport class AxMCPHTTPSSETransport implements AxMCPTransport {\n private endpoint: string | null = null;\n private sseUrl: string;\n private eventSource?: EventSource;\n\n constructor(sseUrl: string) {\n this.sseUrl = sseUrl;\n }\n\n async connect(): Promise<void> {\n return new Promise((resolve, reject) => {\n this.eventSource = new EventSource(this.sseUrl);\n\n this.eventSource.addEventListener('endpoint', (event: Event) => {\n try {\n const messageEvent = event as MessageEvent;\n const data = JSON.parse(messageEvent.data);\n if (!data.uri) {\n throw new Error('Endpoint URI missing in SSE event data');\n }\n this.endpoint = data.uri;\n resolve();\n } catch (error) {\n reject(error);\n }\n });\n\n this.eventSource.onerror = () => {\n reject(new Error('Failed to establish SSE connection'));\n };\n });\n }\n\n async send(\n message: JSONRPCRequest<unknown> | JSONRPCNotification\n ): Promise<JSONRPCResponse<unknown>> {\n if (!this.endpoint) {\n throw new Error(\n 'HTTPTransport endpoint is not initialized. Call connect() first.'\n );\n }\n\n const res = await fetch(this.endpoint, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(message),\n });\n\n if (!res.ok) {\n throw new Error(`HTTP error ${res.status}: ${res.statusText}`);\n }\n\n return res.json() as Promise<JSONRPCResponse<unknown>>;\n }\n\n async sendNotification(\n message: Readonly<JSONRPCNotification>\n ): Promise<void> {\n if (!this.endpoint) {\n throw new Error(\n 'HTTPTransport endpoint is not initialized. Call connect() first.'\n );\n }\n await fetch(this.endpoint, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(message),\n });\n }\n}\n\nexport interface AxMCPStreamableHTTPTransportOptions {\n /**\n * Custom headers to include with all HTTP requests\n * Note: Content-Type, Accept, and Mcp-Session-Id are managed automatically\n */\n headers?: Record<string, string>;\n\n /**\n * Authorization header value (convenience for common use case)\n * If provided, will be added to the headers as 'Authorization'\n */\n authorization?: string;\n}\n\n/**\n * AxMCPStreambleHTTPTransport implements the 2025-03-26 Streamable HTTP transport specification\n * This transport uses a single HTTP endpoint that supports both POST and GET methods\n */\nexport class AxMCPStreambleHTTPTransport implements AxMCPTransport {\n private mcpEndpoint: string;\n private sessionId?: string;\n private eventSource?: EventSource;\n private pendingRequests = new Map<\n string | number,\n {\n resolve: (value: JSONRPCResponse<unknown>) => void;\n reject: (reason: unknown) => void;\n }\n >();\n private messageHandler?: (\n message: JSONRPCRequest<unknown> | JSONRPCNotification\n ) => void;\n private customHeaders: Record<string, string>;\n\n constructor(\n mcpEndpoint: string,\n options?: AxMCPStreamableHTTPTransportOptions\n ) {\n this.mcpEndpoint = mcpEndpoint;\n this.customHeaders = { ...options?.headers };\n\n // Add authorization header if provided\n if (options?.authorization) {\n this.customHeaders.Authorization = options.authorization;\n }\n }\n\n /**\n * Update custom headers (useful for refreshing tokens)\n */\n setHeaders(headers: Record<string, string>): void {\n this.customHeaders = { ...headers };\n }\n\n /**\n * Update authorization header (convenience method)\n */\n setAuthorization(authorization: string): void {\n this.customHeaders.Authorization = authorization;\n }\n\n /**\n * Get a copy of the current custom headers\n */\n getHeaders(): Record<string, string> {\n return { ...this.customHeaders };\n }\n\n /**\n * Build headers for HTTP requests, merging custom headers with required ones\n */\n private buildHeaders(\n baseHeaders: Record<string, string>\n ): Record<string, string> {\n const headers = { ...this.customHeaders, ...baseHeaders };\n\n if (this.sessionId) {\n headers['Mcp-Session-Id'] = this.sessionId;\n }\n\n return headers;\n }\n\n /**\n * Set a handler for incoming server messages (requests/notifications)\n */\n setMessageHandler(\n handler: (message: JSONRPCRequest<unknown> | JSONRPCNotification) => void\n ): void {\n this.messageHandler = handler;\n }\n\n async connect(): Promise<void> {\n // For Streamable HTTP, connection is implicit when making requests\n // But we can optionally open a GET SSE stream for server-initiated messages\n return Promise.resolve();\n }\n\n /**\n * Opens an SSE stream to listen for server-initiated messages\n */\n async openListeningStream(): Promise<void> {\n return new Promise((resolve, reject) => {\n const headers = this.buildHeaders({\n Accept: 'text/event-stream',\n });\n\n // Note: EventSource doesn't support custom headers in standard browsers\n // For custom headers with SSE, you may need to use fetch with ReadableStream\n // or use a library that supports custom headers\n const url = new URL(this.mcpEndpoint);\n\n // If we have custom headers, we need to use fetch instead of EventSource\n if (Object.keys(this.customHeaders).length > 0) {\n this.openListeningStreamWithFetch(headers).then(resolve).catch(reject);\n return;\n }\n\n this.eventSource = new EventSource(url.toString());\n\n this.eventSource.onopen = () => {\n resolve();\n };\n\n this.eventSource.onmessage = (event) => {\n try {\n const message = JSON.parse(event.data);\n if (this.messageHandler) {\n this.messageHandler(message);\n }\n } catch (error) {\n console.error('Failed to parse SSE message:', error);\n }\n };\n\n this.eventSource.onerror = () => {\n reject(new Error('Failed to establish SSE connection'));\n };\n });\n }\n\n /**\n * Opens an SSE stream using fetch API to support custom headers\n */\n private async openListeningStreamWithFetch(\n headers: Record<string, string>\n ): Promise<void> {\n const response = await fetch(this.mcpEndpoint, {\n method: 'GET',\n headers,\n });\n\n if (!response.ok) {\n throw new Error(\n `Failed to open SSE stream: ${response.status} ${response.statusText}`\n );\n }\n\n if (!response.body) {\n throw new Error('No response body available for SSE stream');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n const processStream = async (): Promise<void> => {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n reader.releaseLock();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || ''; // Keep incomplete line in buffer\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const data = line.slice(6); // Remove 'data: ' prefix\n if (data === '[DONE]') {\n return;\n }\n\n try {\n const message = JSON.parse(data);\n if (this.messageHandler) {\n this.messageHandler(message);\n }\n } catch (error) {\n console.error('Failed to parse SSE data:', error);\n }\n }\n }\n\n // Continue reading\n await processStream();\n } catch (error) {\n reader.releaseLock();\n throw error;\n }\n };\n\n await processStream();\n }\n\n async send(\n message: Readonly<JSONRPCRequest<unknown>>\n ): Promise<JSONRPCResponse<unknown>> {\n const headers = this.buildHeaders({\n 'Content-Type': 'application/json',\n Accept: 'application/json, text/event-stream',\n });\n\n const response = await fetch(this.mcpEndpoint, {\n method: 'POST',\n headers,\n body: JSON.stringify(message),\n });\n\n if (!response.ok) {\n if (response.status === 404 && this.sessionId) {\n // Session expired, clear it\n this.sessionId = undefined;\n throw new Error('Session expired. Please reinitialize.');\n }\n throw new Error(`HTTP error ${response.status}: ${response.statusText}`);\n }\n\n // Check if this is the initialization response with session ID\n const sessionIdHeader = response.headers.get('Mcp-Session-Id');\n if (sessionIdHeader) {\n this.sessionId = sessionIdHeader;\n }\n\n const contentType = response.headers.get('Content-Type');\n\n if (contentType?.includes('text/event-stream')) {\n // Handle SSE response\n return this.handleSSEResponse(response, message.id);\n }\n if (contentType?.includes('application/json')) {\n // Handle JSON response\n return response.json() as Promise<JSONRPCResponse<unknown>>;\n }\n throw new Error(`Unexpected content type: ${contentType}`);\n }\n\n private async handleSSEResponse(\n response: Response,\n requestId: string | number\n ): Promise<JSONRPCResponse<unknown>> {\n return new Promise((resolve, reject) => {\n const reader = response.body?.getReader();\n if (!reader) {\n reject(new Error('No response body reader available'));\n return;\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n\n const processChunk = async (): Promise<void> => {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n reader.releaseLock();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || ''; // Keep incomplete line in buffer\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const data = line.slice(6); // Remove 'data: ' prefix\n if (data === '[DONE]') {\n return;\n }\n\n try {\n const message = JSON.parse(data);\n\n // Check if this is the response to our request\n if ('id' in message && message.id === requestId) {\n resolve(message as JSONRPCResponse<unknown>);\n return;\n }\n\n // Handle other messages (server requests/notifications)\n if (this.messageHandler) {\n this.messageHandler(message);\n }\n } catch (error) {\n console.error('Failed to parse SSE data:', error);\n }\n }\n }\n\n // Continue reading\n await processChunk();\n } catch (error) {\n reader.releaseLock();\n reject(error);\n }\n };\n\n processChunk().catch(reject);\n });\n }\n\n async sendNotification(\n message: Readonly<JSONRPCNotification>\n ): Promise<void> {\n const headers = this.buildHeaders({\n 'Content-Type': 'application/json',\n Accept: 'application/json, text/event-stream',\n });\n\n const response = await fetch(this.mcpEndpoint, {\n method: 'POST',\n headers,\n body: JSON.stringify(message),\n });\n\n if (!response.ok) {\n if (response.status === 404 && this.sessionId) {\n // Session expired, clear it\n this.sessionId = undefined;\n throw new Error('Session expired. Please reinitialize.');\n }\n throw new Error(`HTTP error ${response.status}: ${response.statusText}`);\n }\n\n // For notifications, we expect 202 Accepted with no body\n if (response.status !== 202) {\n console.warn(`Unexpected status for notification: ${response.status}`);\n }\n }\n\n /**\n * Explicitly terminate the session (if supported by server)\n */\n async terminateSession(): Promise<void> {\n if (!this.sessionId) {\n return;\n }\n\n try {\n const headers = this.buildHeaders({});\n\n const response = await fetch(this.mcpEndpoint, {\n method: 'DELETE',\n headers,\n });\n\n if (response.status === 405) {\n // Server doesn't support explicit session termination\n console.info('Server does not support explicit session termination');\n }\n } catch (error) {\n console.error('Failed to terminate session:', error);\n } finally {\n this.sessionId = undefined;\n }\n }\n\n /**\n * Close any open connections\n */\n close(): void {\n if (this.eventSource) {\n this.eventSource.close();\n this.eventSource = undefined;\n }\n }\n}\n","import type {\n AxAIModelList,\n AxAIService,\n AxFunction,\n AxFunctionHandler,\n AxFunctionJSONSchema,\n} from '../ai/types.js';\nimport type { AxInputFunctionType } from '../dsp/functions.js';\nimport { AxGen } from '../dsp/generate.js';\nimport type { AxSignature } from '../dsp/sig.js';\nimport type {\n AxGenIn,\n AxGenOut,\n AxGenStreamingOut,\n AxMessage,\n AxProgramDemos,\n AxProgramExamples,\n AxProgramForwardOptions,\n AxProgrammable,\n AxProgramStreamingForwardOptions,\n AxSetExamplesOptions,\n AxTunable,\n AxUsable,\n} from '../dsp/types.js';\n\n/**\n * Interface for agents that can be used as child agents.\n * Provides methods to get the agent's function definition and features.\n */\nexport interface AxAgentic<IN extends AxGenIn, OUT extends AxGenOut>\n extends AxProgrammable<IN, OUT> {\n getFunction(): AxFunction;\n getFeatures(): AxAgentFeatures;\n}\n\nexport type AxAgentOptions = Omit<AxProgramForwardOptions, 'functions'> & {\n disableSmartModelRouting?: boolean;\n /** List of field names that should not be automatically passed from parent to child agents */\n excludeFieldsFromPassthrough?: string[];\n debug?: boolean;\n};\n\nexport interface AxAgentFeatures {\n /** Whether this agent can use smart model routing (requires an AI service) */\n canConfigureSmartModelRouting: boolean;\n /** List of fields that this agent excludes from parent->child value passing */\n excludeFieldsFromPassthrough: string[];\n}\n\n/**\n * Processes a child agent's function, applying model routing and input injection as needed.\n * Handles both the schema modifications and function wrapping.\n */\nfunction processChildAgentFunction<IN extends AxGenIn>(\n childFunction: Readonly<AxFunction>,\n parentValues: IN | AxMessage<IN>[],\n parentInputKeys: string[],\n modelList: AxAIModelList | undefined,\n options: Readonly<{\n debug: boolean;\n disableSmartModelRouting: boolean;\n excludeFieldsFromPassthrough: string[];\n canConfigureSmartModelRouting: boolean;\n }>\n): AxFunction {\n const processedFunction = { ...childFunction };\n\n // Process input field injection\n if (processedFunction.parameters) {\n const childKeys = processedFunction.parameters.properties\n ? Object.keys(processedFunction.parameters.properties)\n : [];\n\n // Find common keys between parent and child, excluding 'model' and specified exclusions\n const commonKeys = parentInputKeys\n .filter((key) => childKeys.includes(key))\n .filter((key) => key !== 'model');\n const injectionKeys = commonKeys.filter(\n (key) => !options.excludeFieldsFromPassthrough.includes(key)\n );\n\n if (injectionKeys.length > 0) {\n // Remove injected fields from child schema\n processedFunction.parameters = removePropertiesFromSchema(\n processedFunction.parameters,\n injectionKeys\n );\n\n // Wrap function to inject parent values\n const originalFunc = processedFunction.func;\n // add debug logging if enabled\n processedFunction.func = async (childArgs, funcOptions) => {\n // Extract values from parentValues - handle both IN and AxMessage<IN>[] cases\n let valuesToInject: Partial<IN> = {};\n if (Array.isArray(parentValues)) {\n // If parentValues is an array of messages, find the most recent user message\n const lastUserMessage = parentValues\n .filter((msg) => msg.role === 'user')\n .pop();\n if (lastUserMessage) {\n valuesToInject = pick(\n lastUserMessage.values,\n injectionKeys as (keyof IN)[]\n );\n }\n } else {\n // If parentValues is a single IN object\n valuesToInject = pick(parentValues, injectionKeys as (keyof IN)[]);\n }\n\n const updatedChildArgs = {\n ...childArgs,\n ...valuesToInject,\n };\n\n return await originalFunc(updatedChildArgs, funcOptions);\n };\n }\n\n return processedFunction;\n }\n\n // Apply smart model routing if enabled\n if (\n modelList &&\n !options.disableSmartModelRouting &&\n options.canConfigureSmartModelRouting\n ) {\n processedFunction.parameters = addModelParameter(\n processedFunction.parameters,\n modelList\n );\n }\n\n return processedFunction;\n}\n\nconst descriptionError = new Error(\n 'Agent description must be at least 20 characters (explain in detail what the agent does)'\n);\n\nconst definitionError = new Error(\n 'Agent definition is the prompt you give to the LLM for the agent. It must be detailed and at least 100 characters'\n);\n\n/**\n * An AI agent that can process inputs using an AI service and coordinate with child agents.\n * Supports features like smart model routing and automatic input field passing to child agents.\n */\nexport class AxAgent<IN extends AxGenIn, OUT extends AxGenOut>\n implements AxAgentic<IN, OUT>\n{\n private ai?: AxAIService;\n private program: AxGen<IN, OUT>;\n private functions?: AxInputFunctionType;\n private agents?: AxAgentic<IN, OUT>[];\n private disableSmartModelRouting?: boolean;\n private excludeFieldsFromPassthrough: string[];\n private debug?: boolean;\n\n private name: string;\n // private subAgentList?: string\n private func: AxFunction;\n\n constructor(\n {\n ai,\n name,\n description,\n definition,\n signature,\n agents,\n functions,\n }: Readonly<{\n ai?: Readonly<AxAIService>;\n name: string;\n description: string;\n definition?: string;\n signature: NonNullable<ConstructorParameters<typeof AxSignature>[0]>;\n agents?: AxAgentic<IN, OUT>[];\n functions?: AxInputFunctionType;\n }>,\n options?: Readonly<AxAgentOptions>\n ) {\n const { disableSmartModelRouting, excludeFieldsFromPassthrough, debug } =\n options ?? {};\n\n this.ai = ai;\n this.agents = agents;\n this.functions = functions;\n this.disableSmartModelRouting = disableSmartModelRouting;\n this.excludeFieldsFromPassthrough = excludeFieldsFromPassthrough ?? [];\n this.debug = debug;\n\n if (!name || name.length < 5) {\n throw new Error(\n 'Agent name must be at least 10 characters (more descriptive)'\n );\n }\n\n if (!description || description.length < 20) {\n throw descriptionError;\n }\n\n if (definition && definition.length < 100) {\n throw definitionError;\n }\n\n this.program = new AxGen<IN, OUT>(signature, {\n ...options,\n description: definition ?? description,\n });\n\n for (const agent of agents ?? []) {\n this.program.register(\n agent as unknown as Readonly<AxTunable<IN, OUT> & AxUsable>\n );\n }\n\n this.name = name;\n // this.subAgentList = agents?.map((a) => a.getFunction().name).join(', ')\n\n this.func = {\n name: toCamelCase(this.name),\n description,\n parameters: this.program.getSignature().toJSONSchema(),\n func: () => this.forward,\n };\n\n const mm = ai?.getModelList();\n // Only add model parameter if smart routing is enabled and model list exists\n if (mm && !this.disableSmartModelRouting) {\n this.func.parameters = addModelParameter(this.func.parameters, mm);\n }\n }\n\n public setExamples(\n examples: Readonly<AxProgramExamples<IN, OUT>>,\n options?: Readonly<AxSetExamplesOptions>\n ) {\n this.program.setExamples(examples, options);\n }\n\n public setId(id: string) {\n this.program.setId(id);\n }\n\n public setParentId(parentId: string) {\n this.program.setParentId(parentId);\n }\n\n public getTraces() {\n return this.program.getTraces();\n }\n\n public setDemos(demos: readonly AxProgramDemos<IN, OUT>[]) {\n this.program.setDemos(demos);\n }\n\n public getUsage() {\n return this.program.getUsage();\n }\n\n public resetUsage() {\n this.program.resetUsage();\n }\n\n public getFunction(): AxFunction {\n const boundFunc = this.forward.bind(this);\n\n // Create a wrapper function that excludes the 'ai' parameter\n const wrappedFunc: AxFunctionHandler = async (\n valuesAndModel: IN & { model: string },\n options?\n ): Promise<string> => {\n const { model, ...values } = valuesAndModel;\n\n const ai = this.ai ?? options?.ai;\n if (!ai) {\n throw new Error('AI service is required to run the agent');\n }\n const ret = await boundFunc(ai, values as unknown as IN, {\n ...options,\n model,\n });\n\n const sig = this.program.getSignature();\n const outFields = sig.getOutputFields();\n const result = Object.keys(ret)\n .map((k) => {\n const field = outFields.find((f) => f.name === k);\n if (field) {\n return `${field.title}: ${ret[k]}`;\n }\n return `${k}: ${ret[k]}`;\n })\n .join('\\n');\n\n return result;\n };\n\n return {\n ...this.func,\n func: wrappedFunc,\n };\n }\n\n public getFeatures(): AxAgentFeatures {\n return {\n canConfigureSmartModelRouting: this.ai === undefined,\n excludeFieldsFromPassthrough: this.excludeFieldsFromPassthrough,\n };\n }\n\n /**\n * Initializes the agent's execution context, processing child agents and their functions.\n */\n private init(\n parentAi: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options: Readonly<AxProgramForwardOptions> | undefined\n ) {\n const ai = this.ai ?? parentAi;\n const mm = ai?.getModelList();\n\n // Get parent's input schema and keys\n const parentSchema = this.program.getSignature().getInputFields();\n const parentKeys = parentSchema.map((p) => p.name);\n const debug = this.getDebug(ai, options);\n\n // Process each child agent's function\n const agentFuncs = this.agents?.map((agent) => {\n const f = agent.getFeatures();\n\n const processOptions = {\n debug,\n disableSmartModelRouting: !!this.disableSmartModelRouting,\n excludeFieldsFromPassthrough: f.excludeFieldsFromPassthrough,\n canConfigureSmartModelRouting: f.canConfigureSmartModelRouting,\n };\n\n return processChildAgentFunction(\n agent.getFunction(),\n values,\n parentKeys,\n mm,\n processOptions\n );\n });\n\n // Combine all functions\n const functions: AxInputFunctionType = [\n ...(options?.functions ?? this.functions ?? []),\n ...(agentFuncs ?? []),\n ];\n\n return { ai, functions, debug };\n }\n\n public async forward(\n parentAi: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options?: Readonly<AxProgramForwardOptions>\n ): Promise<OUT> {\n const { ai, functions, debug } = this.init(parentAi, values, options);\n return await this.program.forward(ai, values, {\n ...options,\n debug,\n functions,\n });\n }\n\n public async *streamingForward(\n parentAi: Readonly<AxAIService>,\n values: IN | AxMessage<IN>[],\n options?: Readonly<AxProgramStreamingForwardOptions>\n ): AxGenStreamingOut<OUT> {\n const { ai, functions, debug } = this.init(parentAi, values, options);\n return yield* this.program.streamingForward(ai, values, {\n ...options,\n debug,\n functions,\n });\n }\n\n /**\n * Updates the agent's description.\n * This updates both the stored description and the function's description.\n *\n * @param description - New description for the agent (must be at least 20 characters)\n * @throws Error if description is too short\n */\n public setDescription(description: string): void {\n if (!description || description.length < 20) {\n throw descriptionError;\n }\n\n this.program.getSignature().setDescription(description);\n this.func.description = description;\n }\n\n public setDefinition(definition: string): void {\n if (!definition || definition.length < 100) {\n throw definitionError;\n }\n\n this.program.setDescription(definition);\n this.func.description = definition;\n }\n\n public getSignature(): AxSignature {\n return this.program.getSignature();\n }\n\n public setSignature(\n signature: NonNullable<ConstructorParameters<typeof AxSignature>[0]>\n ) {\n this.program.setSignature(signature);\n }\n\n private getDebug(\n ai: AxAIService,\n options?: Readonly<AxProgramForwardOptions>\n ): boolean {\n return options?.debug ?? this.debug ?? ai?.getOptions()?.debug ?? false;\n }\n}\n\nfunction toCamelCase(inputString: string): string {\n // Split the string by any non-alphanumeric character (including underscores, spaces, hyphens)\n const words = inputString.split(/[^a-zA-Z0-9]/);\n\n // Map through each word, capitalize the first letter of each word except the first word\n const camelCaseString = words\n .map((word, index) => {\n // Lowercase the word to handle cases like uppercase letters in input\n const lowerWord = word.toLowerCase();\n\n // Capitalize the first letter of each word except the first one\n if (index > 0 && lowerWord && lowerWord[0]) {\n return lowerWord[0].toUpperCase() + lowerWord.slice(1);\n }\n\n return lowerWord;\n })\n .join('');\n\n return camelCaseString;\n}\n\n/**\n * Adds a required model parameter to a JSON Schema definition based on provided model mappings.\n * The model parameter will be an enum with values from the model map keys.\n *\n * @param parameters - The original JSON Schema parameters definition (optional)\n * @param models - Array of model mappings containing keys, model names and descriptions\n * @returns Updated JSON Schema with added model parameter\n */\nexport function addModelParameter(\n parameters: AxFunctionJSONSchema | undefined,\n models: AxAIModelList\n): AxFunctionJSONSchema {\n // If parameters is undefined, create a base schema\n const baseSchema: AxFunctionJSONSchema = parameters\n ? structuredClone(parameters)\n : {\n type: 'object',\n properties: {},\n required: [],\n };\n\n // Check if model parameter already exists\n if (baseSchema.properties?.model) {\n return baseSchema;\n }\n\n // Create the model property schema\n const modelProperty: AxFunctionJSONSchema & {\n enum: string[];\n description: string;\n } = {\n type: 'string',\n enum: models.map((m) => m.key),\n description: `The AI model to use for this function call. Available options: ${models\n .map((m) => `\\`${m.key}\\` ${m.description}`)\n .join(', ')}`,\n };\n\n // Create new properties object with model parameter\n const newProperties = {\n ...(baseSchema.properties ?? {}),\n model: modelProperty,\n };\n\n // Add model to required fields\n const newRequired = [...(baseSchema.required ?? []), 'model'];\n\n // Return updated schema\n return {\n ...baseSchema,\n properties: newProperties,\n required: newRequired,\n };\n}\n\n// New helper: removePropertiesFromSchema\n// Clones a JSON schema and removes properties and required fields matching the provided keys.\nfunction removePropertiesFromSchema(\n schema: Readonly<AxFunctionJSONSchema>,\n keys: string[]\n): AxFunctionJSONSchema {\n const newSchema = structuredClone(schema);\n if (newSchema.properties) {\n for (const key of keys) {\n delete newSchema.properties[key];\n }\n }\n if (Array.isArray(newSchema.required)) {\n const filteredRequired = newSchema.required.filter(\n (r: string) => !keys.includes(r)\n );\n Object.defineProperty(newSchema, 'required', {\n value: filteredRequired,\n writable: true,\n configurable: true,\n });\n }\n return newSchema;\n}\n\n// New helper: pick\n// Returns an object composed of the picked object properties.\nfunction pick<T extends object, K extends keyof T>(\n obj: T,\n keys: K[]\n): Pick<T, K> {\n const result = {} as Pick<T, K>;\n for (const key of keys) {\n if (key in obj) {\n result[key] = obj[key];\n }\n }\n return result;\n}\n"]}