@mariozechner/pi-ai 0.18.0 → 0.18.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"file":"openai-completions.js","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAS5B,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAe7C,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AACvE,OAAO,EAAE,kBAAkB,EAAE,MAAM,wBAAwB,CAAC;AAC5D,OAAO,EAAE,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAClE,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAE3D;;;;GAIG;AACH,SAAS,cAAc,CAAC,QAAmB,EAAW;IACrD,KAAK,MAAM,GAAG,IAAI,QAAQ,EAAE,CAAC;QAC5B,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YAC/B,OAAO,IAAI,CAAC;QACb,CAAC;QACD,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YAC9B,IAAI,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,KAAK,UAAU,CAAC,EAAE,CAAC;gBAC5D,OAAO,IAAI,CAAC;YACb,CAAC;QACF,CAAC;IACF,CAAC;IACD,OAAO,KAAK,CAAC;AAAA,CACb;AAOD,MAAM,CAAC,MAAM,uBAAuB,GAAyC,CAC5E,KAAkC,EAClC,OAAgB,EAChB,OAAkC,EACJ,EAAE,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,2BAA2B,EAAE,CAAC;IAEjD,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,MAAM,GAAqB;YAChC,IAAI,EAAE,WAAW;YACjB,OAAO,EAAE,EAAE;YACX,GAAG,EAAE,KAAK,CAAC,GAAG;YACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;YACxB,KAAK,EAAE,KAAK,CAAC,EAAE;YACf,KAAK,EAAE;gBACN,KAAK,EAAE,CAAC;gBACR,MAAM,EAAE,CAAC;gBACT,SAAS,EAAE,CAAC;gBACZ,UAAU,EAAE,CAAC;gBACb,WAAW,EAAE,CAAC;gBACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;aACpE;YACD,UAAU,EAAE,MAAM;YAClB,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;SACrB,CAAC;QAEF,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;YACpD,MAAM,MAAM,GAAG,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACpD,MAAM,YAAY,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC/F,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAEhD,IAAI,YAAY,GAAiF,IAAI,CAAC;YACtG,MAAM,MAAM,GAAG,MAAM,CAAC,OAAO,CAAC;YAC9B,MAAM,UAAU,GAAG,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;YAC3C,MAAM,kBAAkB,GAAG,CAAC,KAA2B,EAAE,EAAE,CAAC;gBAC3D,IAAI,KAAK,EAAE,CAAC;oBACX,IAAI,KAAK,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC3B,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,UAAU;4BAChB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,IAAI;4BACnB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,QAAQ;4BACvB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,KAAK,CAAC,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,WAAW,IAAI,IAAI,CAAC,CAAC;wBACxD,OAAO,KAAK,CAAC,WAAW,CAAC;wBACzB,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,QAAQ,EAAE,KAAK;4BACf,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;YAAA,CACD,CAAC;YAEF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,YAAY,EAAE,CAAC;gBACxC,IAAI,KAAK,CAAC,KAAK,EAAE,CAAC;oBACjB,MAAM,YAAY,GAAG,KAAK,CAAC,KAAK,CAAC,qBAAqB,EAAE,aAAa,IAAI,CAAC,CAAC;oBAC3E,MAAM,eAAe,GAAG,KAAK,CAAC,KAAK,CAAC,yBAAyB,EAAE,gBAAgB,IAAI,CAAC,CAAC;oBACrF,MAAM,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,aAAa,IAAI,CAAC,CAAC,GAAG,YAAY,CAAC;oBAC9D,MAAM,YAAY,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,iBAAiB,IAAI,CAAC,CAAC,GAAG,eAAe,CAAC;oBAC5E,MAAM,CAAC,KAAK,GAAG;wBACd,sFAAsF;wBACtF,KAAK;wBACL,MAAM,EAAE,YAAY;wBACpB,SAAS,EAAE,YAAY;wBACvB,UAAU,EAAE,CAAC;wBACb,wEAAwE;wBACxE,qEAAqE;wBACrE,WAAW,EAAE,KAAK,GAAG,YAAY,GAAG,YAAY;wBAChD,IAAI,EAAE;4BACL,KAAK,EAAE,CAAC;4BACR,MAAM,EAAE,CAAC;4BACT,SAAS,EAAE,CAAC;4BACZ,UAAU,EAAE,CAAC;4BACb,KAAK,EAAE,CAAC;yBACR;qBACD,CAAC;oBACF,aAAa,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC;gBACpC,CAAC;gBAED,MAAM,MAAM,GAAG,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;gBAChC,IAAI,CAAC,MAAM;oBAAE,SAAS;gBAEtB,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;oBAC1B,MAAM,CAAC,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;gBACzD,CAAC;gBAED,IAAI,MAAM,CAAC,KAAK,EAAE,CAAC;oBAClB,IACC,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,IAAI;wBAC7B,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,SAAS;wBAClC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAC9B,CAAC;wBACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BACnD,kBAAkB,CAAC,YAAY,CAAC,CAAC;4BACjC,YAAY,GAAG,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC;4BAC1C,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;4BAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;wBAClF,CAAC;wBAED,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BAClC,YAAY,CAAC,IAAI,IAAI,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC;4BAC1C,MAAM,CAAC,IAAI,CAAC;gCACX,IAAI,EAAE,YAAY;gCAClB,YAAY,EAAE,UAAU,EAAE;gCAC1B,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,OAAO;gCAC3B,OAAO,EAAE,MAAM;6BACf,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;oBAED,oEAAoE;oBACpE,mDAAmD;oBACnD,MAAM,eAAe,GAAG,CAAC,mBAAmB,EAAE,WAAW,CAAC,CAAC;oBAC3D,KAAK,MAAM,KAAK,IAAI,eAAe,EAAE,CAAC;wBACrC,IACE,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,IAAI;4BACpC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,SAAS;4BACzC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC,MAAM,GAAG,CAAC,EACtC,CAAC;4BACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACvD,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,QAAQ,EAAE,EAAE;oCACZ,iBAAiB,EAAE,KAAK;iCACxB,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,MAAM,KAAK,GAAI,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC;gCAC3C,YAAY,CAAC,QAAQ,IAAI,KAAK,CAAC;gCAC/B,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;oBAED,IAAI,MAAM,EAAE,KAAK,EAAE,UAAU,EAAE,CAAC;wBAC/B,KAAK,MAAM,QAAQ,IAAI,MAAM,CAAC,KAAK,CAAC,UAAU,EAAE,CAAC;4BAChD,IACC,CAAC,YAAY;gCACb,YAAY,CAAC,IAAI,KAAK,UAAU;gCAChC,CAAC,QAAQ,CAAC,EAAE,IAAI,YAAY,CAAC,EAAE,KAAK,QAAQ,CAAC,EAAE,CAAC,EAC/C,CAAC;gCACF,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,EAAE,EAAE,QAAQ,CAAC,EAAE,IAAI,EAAE;oCACrB,IAAI,EAAE,QAAQ,CAAC,QAAQ,EAAE,IAAI,IAAI,EAAE;oCACnC,SAAS,EAAE,EAAE;oCACb,WAAW,EAAE,EAAE;iCACf,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,IAAI,QAAQ,CAAC,EAAE;oCAAE,YAAY,CAAC,EAAE,GAAG,QAAQ,CAAC,EAAE,CAAC;gCAC/C,IAAI,QAAQ,CAAC,QAAQ,EAAE,IAAI;oCAAE,YAAY,CAAC,IAAI,GAAG,QAAQ,CAAC,QAAQ,CAAC,IAAI,CAAC;gCACxE,IAAI,KAAK,GAAG,EAAE,CAAC;gCACf,IAAI,QAAQ,CAAC,QAAQ,EAAE,SAAS,EAAE,CAAC;oCAClC,KAAK,GAAG,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACpC,YAAY,CAAC,WAAW,IAAI,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACxD,YAAY,CAAC,SAAS,GAAG,kBAAkB,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;gCACvE,CAAC;gCACD,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAEjC,IAAI,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC;gBAC9B,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;YACxC,CAAC;YAED,IAAI,MAAM,CAAC,UAAU,KAAK,SAAS,IAAI,MAAM,CAAC,UAAU,KAAK,OAAO,EAAE,CAAC;gBACtE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;YAC5C,CAAC;YAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC1E,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,OAAO;gBAAE,OAAQ,KAAa,CAAC,KAAK,CAAC;YAChE,MAAM,CAAC,UAAU,GAAG,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC;YACnE,MAAM,CAAC,YAAY,GAAG,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC;YACrF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC;YACzE,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;IAAA,CACD,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd,CAAC;AAEF,SAAS,YAAY,CAAC,KAAkC,EAAE,MAAe,EAAE;IAC1E,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC;YACjC,MAAM,IAAI,KAAK,CACd,gGAAgG,CAChG,CAAC;QACH,CAAC;QACD,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC;IACrC,CAAC;IACD,OAAO,IAAI,MAAM,CAAC;QACjB,MAAM;QACN,OAAO,EAAE,KAAK,CAAC,OAAO;QACtB,uBAAuB,EAAE,IAAI;QAC7B,cAAc,EAAE,KAAK,CAAC,OAAO;KAC7B,CAAC,CAAC;AAAA,CACH;AAED,SAAS,WAAW,CAAC,KAAkC,EAAE,OAAgB,EAAE,OAAkC,EAAE;IAC9G,MAAM,MAAM,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC;IAChC,MAAM,QAAQ,GAAG,eAAe,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IAEzD,MAAM,MAAM,GAAgE;QAC3E,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,QAAQ;QACR,MAAM,EAAE,IAAI;QACZ,cAAc,EAAE,EAAE,aAAa,EAAE,IAAI,EAAE;KACvC,CAAC;IAEF,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;QAC1B,MAAM,CAAC,KAAK,GAAG,KAAK,CAAC;IACtB,CAAC;IAED,IAAI,OAAO,EAAE,SAAS,EAAE,CAAC;QACxB,IAAI,MAAM,CAAC,cAAc,KAAK,YAAY,EAAE,CAAC;YAC3C,MAAc,CAAC,UAAU,GAAG,OAAO,CAAC,SAAS,CAAC;QAChD,CAAC;aAAM,CAAC;YACP,MAAM,CAAC,qBAAqB,GAAG,OAAO,CAAC,SAAS,CAAC;QAClD,CAAC;IACF,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,WAAW,CAAC;IAC1C,CAAC;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;QACnB,MAAM,CAAC,KAAK,GAAG,YAAY,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IAC5C,CAAC;SAAM,IAAI,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;QAC7C,mGAAmG;QACnG,MAAM,CAAC,KAAK,GAAG,EAAE,CAAC;IACnB,CAAC;IAED,IAAI,OAAO,EAAE,UAAU,EAAE,CAAC;QACzB,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,UAAU,CAAC;IACzC,CAAC;IAED,IAAI,OAAO,EAAE,eAAe,IAAI,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,uBAAuB,EAAE,CAAC;QACnF,MAAM,CAAC,gBAAgB,GAAG,OAAO,CAAC,eAAe,CAAC;IACnD,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,eAAe,CACvB,KAAkC,EAClC,OAAgB,EAChB,MAA8B,EACC;IAC/B,MAAM,MAAM,GAAiC,EAAE,CAAC;IAEhD,MAAM,mBAAmB,GAAG,iBAAiB,CAAC,OAAO,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;IAEvE,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC1B,MAAM,gBAAgB,GAAG,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,qBAAqB,CAAC;QACzE,MAAM,IAAI,GAAG,gBAAgB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC;QACvD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC,CAAC;IAChF,CAAC;IAED,KAAK,MAAM,GAAG,IAAI,mBAAmB,EAAE,CAAC;QACvC,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACzB,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;gBACrC,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,kBAAkB,CAAC,GAAG,CAAC,OAAO,CAAC;iBACxC,CAAC,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACP,MAAM,OAAO,GAAgC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,IAAI,EAA6B,EAAE,CAAC;oBACjG,IAAI,IAAI,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC1B,OAAO;4BACN,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,kBAAkB,CAAC,IAAI,CAAC,IAAI,CAAC;yBACK,CAAC;oBAC3C,CAAC;yBAAM,CAAC;wBACP,OAAO;4BACN,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAQ,IAAI,CAAC,QAAQ,WAAW,IAAI,CAAC,IAAI,EAAE;6BAChD;yBACwC,CAAC;oBAC5C,CAAC;gBAAA,CACD,CAAC,CAAC;gBACH,MAAM,eAAe,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC;oBACrD,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,CAAC;oBAC/C,CAAC,CAAC,OAAO,CAAC;gBACX,IAAI,eAAe,CAAC,MAAM,KAAK,CAAC;oBAAE,SAAS;gBAC3C,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,eAAe;iBACxB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACrC,MAAM,YAAY,GAAwC;gBACzD,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,IAAI;aACb,CAAC;YAEF,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAkB,CAAC;YACjF,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC3B,YAAY,CAAC,OAAO,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;oBAC5C,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;gBAAA,CAC1D,CAAC,CAAC;YACJ,CAAC;YAED,wDAAwD;YACxD,MAAM,cAAc,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAsB,CAAC;YAC7F,IAAI,cAAc,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC/B,+DAA+D;gBAC/D,MAAM,SAAS,GAAG,cAAc,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC;gBACtD,IAAI,SAAS,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBACtC,YAAoB,CAAC,SAAS,CAAC,GAAG,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;gBACrF,CAAC;YACF,CAAC;YAED,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAe,CAAC;YACjF,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC1B,YAAY,CAAC,UAAU,GAAG,SAAS,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC;oBAChD,EAAE,EAAE,EAAE,CAAC,EAAE;oBACT,IAAI,EAAE,UAAmB;oBACzB,QAAQ,EAAE;wBACT,IAAI,EAAE,EAAE,CAAC,IAAI;wBACb,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC;qBACvC;iBACD,CAAC,CAAC,CAAC;YACL,CAAC;YACD,IAAI,YAAY,CAAC,OAAO,KAAK,IAAI,IAAI,CAAC,YAAY,CAAC,UAAU,EAAE,CAAC;gBAC/D,SAAS;YACV,CAAC;YACD,MAAM,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAC3B,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YACtC,iCAAiC;YACjC,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO;iBAC5B,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC;iBAChC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAE,CAAS,CAAC,IAAI,CAAC;iBAC3B,IAAI,CAAC,IAAI,CAAC,CAAC;YACb,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;YAE9D,oEAAoE;YACpE,MAAM,OAAO,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;YACtC,MAAM,CAAC,IAAI,CAAC;gBACX,IAAI,EAAE,MAAM;gBACZ,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,sBAAsB,CAAC;gBAC1E,YAAY,EAAE,GAAG,CAAC,UAAU;aAC5B,CAAC,CAAC;YAEH,yFAAyF;YACzF,IAAI,SAAS,IAAI,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC;gBAChD,MAAM,aAAa,GAEf,EAAE,CAAC;gBAEP,kBAAkB;gBAClB,aAAa,CAAC,IAAI,CAAC;oBAClB,IAAI,EAAE,MAAM;oBACZ,IAAI,EAAE,qCAAqC;iBAC3C,CAAC,CAAC;gBAEH,aAAa;gBACb,KAAK,MAAM,KAAK,IAAI,GAAG,CAAC,OAAO,EAAE,CAAC;oBACjC,IAAI,KAAK,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;wBAC5B,aAAa,CAAC,IAAI,CAAC;4BAClB,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAS,KAAa,CAAC,QAAQ,WAAY,KAAa,CAAC,IAAI,EAAE;6BACpE;yBACD,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;gBAED,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,aAAa;iBACtB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,YAAY,CAAC,KAAa,EAAgD;IAClF,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC;QAC3B,IAAI,EAAE,UAAU;QAChB,QAAQ,EAAE;YACT,IAAI,EAAE,IAAI,CAAC,IAAI;YACf,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,UAAU,EAAE,IAAI,CAAC,UAAiB,EAAE,wCAAwC;SAC5E;KACD,CAAC,CAAC,CAAC;AAAA,CACJ;AAED,SAAS,aAAa,CAAC,MAAmD,EAAc;IACvF,IAAI,MAAM,KAAK,IAAI;QAAE,OAAO,MAAM,CAAC;IACnC,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;QACf,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,eAAe,CAAC;QACrB,KAAK,YAAY;YAChB,OAAO,SAAS,CAAC;QAClB,KAAK,gBAAgB;YACpB,OAAO,OAAO,CAAC;QAChB,SAAS,CAAC;YACT,MAAM,WAAW,GAAU,MAAM,CAAC;YAClC,MAAM,IAAI,KAAK,CAAC,0BAA0B,WAAW,EAAE,CAAC,CAAC;QAC1D,CAAC;IACF,CAAC;AAAA,CACD;AAED;;;GAGG;AACH,SAAS,mBAAmB,CAAC,OAAe,EAA0B;IACrE,MAAM,aAAa,GAClB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC;QAC/B,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC;QAC5B,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC;QAC9B,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;IAE/B,MAAM,YAAY,GAAG,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,IAAI,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;IAErF,MAAM,MAAM,GAAG,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAE5C,OAAO;QACN,aAAa,EAAE,CAAC,aAAa;QAC7B,qBAAqB,EAAE,CAAC,aAAa;QACrC,uBAAuB,EAAE,CAAC,MAAM;QAChC,cAAc,EAAE,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,uBAAuB;KACrE,CAAC;AAAA,CACF;AAED;;;GAGG;AACH,SAAS,SAAS,CAAC,KAAkC,EAA0B;IAC9E,MAAM,QAAQ,GAAG,mBAAmB,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;IACpD,IAAI,CAAC,KAAK,CAAC,MAAM;QAAE,OAAO,QAAQ,CAAC;IAEnC,OAAO;QACN,aAAa,EAAE,KAAK,CAAC,MAAM,CAAC,aAAa,IAAI,QAAQ,CAAC,aAAa;QACnE,qBAAqB,EAAE,KAAK,CAAC,MAAM,CAAC,qBAAqB,IAAI,QAAQ,CAAC,qBAAqB;QAC3F,uBAAuB,EAAE,KAAK,CAAC,MAAM,CAAC,uBAAuB,IAAI,QAAQ,CAAC,uBAAuB;QACjG,cAAc,EAAE,KAAK,CAAC,MAAM,CAAC,cAAc,IAAI,QAAQ,CAAC,cAAc;KACtE,CAAC;AAAA,CACF","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { calculateCost } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tMessage,\n\tModel,\n\tOpenAICompat,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { transformMessages } from \"./transorm-messages.js\";\n\n/**\n * Check if conversation messages contain tool calls or tool results.\n * This is needed because Anthropic (via proxy) requires the tools param\n * to be present when messages include tool_calls or tool role messages.\n */\nfunction hasToolHistory(messages: Message[]): boolean {\n\tfor (const msg of messages) {\n\t\tif (msg.role === \"toolResult\") {\n\t\t\treturn true;\n\t\t}\n\t\tif (msg.role === \"assistant\") {\n\t\t\tif (msg.content.some((block) => block.type === \"toolCall\")) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\"> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst client = createClient(model, options?.apiKey);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\"];\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\t\tthinkingSignature: field,\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\t\tconst delta = (choice.delta as any)[field];\n\t\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(model: Model<\"openai-completions\">, apiKey?: string) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: model.headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst compat = getCompat(model);\n\tconst messages = convertMessages(model, context, compat);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t\tstream_options: { include_usage: true },\n\t};\n\n\tif (compat.supportsStore) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\tif (compat.maxTokensField === \"max_tokens\") {\n\t\t\t(params as any).max_tokens = options.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t} else if (hasToolHistory(context.messages)) {\n\t\t// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results\n\t\tparams.tools = [];\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\tif (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tcompat: Required<OpenAICompat>,\n): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst transformedMessages = transformMessages(context.messages, model);\n\n\tif (context.systemPrompt) {\n\t\tconst useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\tif (textBlocks.length > 0) {\n\t\t\t\tassistantMsg.content = textBlocks.map((b) => {\n\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Handle thinking blocks for llama.cpp server + gpt-oss\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\tif (thinkingBlocks.length > 0) {\n\t\t\t\t// Use the signature from the first thinking block if available\n\t\t\t\tconst signature = thinkingBlocks[0].thinkingSignature;\n\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t(assistantMsg as any)[signature] = thinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t}\n\t\t\tif (assistantMsg.content === null && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tparams.push({\n\t\t\t\trole: \"tool\",\n\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\ttool_call_id: msg.toolCallId,\n\t\t\t});\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentBlocks: Array<\n\t\t\t\t\t{ type: \"text\"; text: string } | { type: \"image_url\"; image_url: { url: string } }\n\t\t\t\t> = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentBlocks.push({\n\t\t\t\t\ttype: \"text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t});\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentBlocks.push({\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentBlocks,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Detect compatibility settings from baseUrl for known providers.\n * Returns a fully resolved OpenAICompat object with all fields set.\n */\nfunction detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {\n\tconst isNonStandard =\n\t\tbaseUrl.includes(\"cerebras.ai\") ||\n\t\tbaseUrl.includes(\"api.x.ai\") ||\n\t\tbaseUrl.includes(\"mistral.ai\") ||\n\t\tbaseUrl.includes(\"chutes.ai\");\n\n\tconst useMaxTokens = baseUrl.includes(\"mistral.ai\") || baseUrl.includes(\"chutes.ai\");\n\n\tconst isGrok = baseUrl.includes(\"api.x.ai\");\n\n\treturn {\n\t\tsupportsStore: !isNonStandard,\n\t\tsupportsDeveloperRole: !isNonStandard,\n\t\tsupportsReasoningEffort: !isGrok,\n\t\tmaxTokensField: useMaxTokens ? \"max_tokens\" : \"max_completion_tokens\",\n\t};\n}\n\n/**\n * Get resolved compatibility settings for a model.\n * Uses explicit model.compat if provided, otherwise auto-detects from URL.\n */\nfunction getCompat(model: Model<\"openai-completions\">): Required<OpenAICompat> {\n\tconst detected = detectCompatFromUrl(model.baseUrl);\n\tif (!model.compat) return detected;\n\n\treturn {\n\t\tsupportsStore: model.compat.supportsStore ?? detected.supportsStore,\n\t\tsupportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,\n\t\tsupportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,\n\t\tmaxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,\n\t};\n}\n"]}
1
+ {"version":3,"file":"openai-completions.js","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAU5B,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAe7C,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AACvE,OAAO,EAAE,kBAAkB,EAAE,MAAM,wBAAwB,CAAC;AAC5D,OAAO,EAAE,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAClE,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAE3D;;;GAGG;AACH,SAAS,sBAAsB,CAAC,EAAU,EAAE,SAAkB,EAAU;IACvE,IAAI,CAAC,SAAS;QAAE,OAAO,EAAE,CAAC;IAC1B,qCAAqC;IACrC,IAAI,UAAU,GAAG,EAAE,CAAC,OAAO,CAAC,eAAe,EAAE,EAAE,CAAC,CAAC;IACjD,wCAAwC;IACxC,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAC3B,4EAA4E;QAC5E,MAAM,OAAO,GAAG,WAAW,CAAC;QAC5B,UAAU,GAAG,UAAU,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC;IACnE,CAAC;SAAM,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAClC,UAAU,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;IACrC,CAAC;IACD,OAAO,UAAU,CAAC;AAAA,CAClB;AAED;;;;GAIG;AACH,SAAS,cAAc,CAAC,QAAmB,EAAW;IACrD,KAAK,MAAM,GAAG,IAAI,QAAQ,EAAE,CAAC;QAC5B,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YAC/B,OAAO,IAAI,CAAC;QACb,CAAC;QACD,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YAC9B,IAAI,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,KAAK,UAAU,CAAC,EAAE,CAAC;gBAC5D,OAAO,IAAI,CAAC;YACb,CAAC;QACF,CAAC;IACF,CAAC;IACD,OAAO,KAAK,CAAC;AAAA,CACb;AAOD,MAAM,CAAC,MAAM,uBAAuB,GAAyC,CAC5E,KAAkC,EAClC,OAAgB,EAChB,OAAkC,EACJ,EAAE,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,2BAA2B,EAAE,CAAC;IAEjD,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,MAAM,GAAqB;YAChC,IAAI,EAAE,WAAW;YACjB,OAAO,EAAE,EAAE;YACX,GAAG,EAAE,KAAK,CAAC,GAAG;YACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;YACxB,KAAK,EAAE,KAAK,CAAC,EAAE;YACf,KAAK,EAAE;gBACN,KAAK,EAAE,CAAC;gBACR,MAAM,EAAE,CAAC;gBACT,SAAS,EAAE,CAAC;gBACZ,UAAU,EAAE,CAAC;gBACb,WAAW,EAAE,CAAC;gBACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;aACpE;YACD,UAAU,EAAE,MAAM;YAClB,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;SACrB,CAAC;QAEF,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;YACpD,MAAM,MAAM,GAAG,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACpD,MAAM,YAAY,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC/F,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAEhD,IAAI,YAAY,GAAiF,IAAI,CAAC;YACtG,MAAM,MAAM,GAAG,MAAM,CAAC,OAAO,CAAC;YAC9B,MAAM,UAAU,GAAG,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;YAC3C,MAAM,kBAAkB,GAAG,CAAC,KAA2B,EAAE,EAAE,CAAC;gBAC3D,IAAI,KAAK,EAAE,CAAC;oBACX,IAAI,KAAK,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC3B,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,UAAU;4BAChB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,IAAI;4BACnB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,QAAQ;4BACvB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,KAAK,CAAC,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,WAAW,IAAI,IAAI,CAAC,CAAC;wBACxD,OAAO,KAAK,CAAC,WAAW,CAAC;wBACzB,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,QAAQ,EAAE,KAAK;4BACf,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;YAAA,CACD,CAAC;YAEF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,YAAY,EAAE,CAAC;gBACxC,IAAI,KAAK,CAAC,KAAK,EAAE,CAAC;oBACjB,MAAM,YAAY,GAAG,KAAK,CAAC,KAAK,CAAC,qBAAqB,EAAE,aAAa,IAAI,CAAC,CAAC;oBAC3E,MAAM,eAAe,GAAG,KAAK,CAAC,KAAK,CAAC,yBAAyB,EAAE,gBAAgB,IAAI,CAAC,CAAC;oBACrF,MAAM,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,aAAa,IAAI,CAAC,CAAC,GAAG,YAAY,CAAC;oBAC9D,MAAM,YAAY,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,iBAAiB,IAAI,CAAC,CAAC,GAAG,eAAe,CAAC;oBAC5E,MAAM,CAAC,KAAK,GAAG;wBACd,sFAAsF;wBACtF,KAAK;wBACL,MAAM,EAAE,YAAY;wBACpB,SAAS,EAAE,YAAY;wBACvB,UAAU,EAAE,CAAC;wBACb,wEAAwE;wBACxE,qEAAqE;wBACrE,WAAW,EAAE,KAAK,GAAG,YAAY,GAAG,YAAY;wBAChD,IAAI,EAAE;4BACL,KAAK,EAAE,CAAC;4BACR,MAAM,EAAE,CAAC;4BACT,SAAS,EAAE,CAAC;4BACZ,UAAU,EAAE,CAAC;4BACb,KAAK,EAAE,CAAC;yBACR;qBACD,CAAC;oBACF,aAAa,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC;gBACpC,CAAC;gBAED,MAAM,MAAM,GAAG,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;gBAChC,IAAI,CAAC,MAAM;oBAAE,SAAS;gBAEtB,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;oBAC1B,MAAM,CAAC,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;gBACzD,CAAC;gBAED,IAAI,MAAM,CAAC,KAAK,EAAE,CAAC;oBAClB,IACC,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,IAAI;wBAC7B,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,SAAS;wBAClC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAC9B,CAAC;wBACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BACnD,kBAAkB,CAAC,YAAY,CAAC,CAAC;4BACjC,YAAY,GAAG,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC;4BAC1C,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;4BAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;wBAClF,CAAC;wBAED,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BAClC,YAAY,CAAC,IAAI,IAAI,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC;4BAC1C,MAAM,CAAC,IAAI,CAAC;gCACX,IAAI,EAAE,YAAY;gCAClB,YAAY,EAAE,UAAU,EAAE;gCAC1B,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,OAAO;gCAC3B,OAAO,EAAE,MAAM;6BACf,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;oBAED,oEAAoE;oBACpE,mDAAmD;oBACnD,MAAM,eAAe,GAAG,CAAC,mBAAmB,EAAE,WAAW,CAAC,CAAC;oBAC3D,KAAK,MAAM,KAAK,IAAI,eAAe,EAAE,CAAC;wBACrC,IACE,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,IAAI;4BACpC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,SAAS;4BACzC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC,MAAM,GAAG,CAAC,EACtC,CAAC;4BACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACvD,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,QAAQ,EAAE,EAAE;oCACZ,iBAAiB,EAAE,KAAK;iCACxB,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,MAAM,KAAK,GAAI,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC;gCAC3C,YAAY,CAAC,QAAQ,IAAI,KAAK,CAAC;gCAC/B,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;oBAED,IAAI,MAAM,EAAE,KAAK,EAAE,UAAU,EAAE,CAAC;wBAC/B,KAAK,MAAM,QAAQ,IAAI,MAAM,CAAC,KAAK,CAAC,UAAU,EAAE,CAAC;4BAChD,IACC,CAAC,YAAY;gCACb,YAAY,CAAC,IAAI,KAAK,UAAU;gCAChC,CAAC,QAAQ,CAAC,EAAE,IAAI,YAAY,CAAC,EAAE,KAAK,QAAQ,CAAC,EAAE,CAAC,EAC/C,CAAC;gCACF,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,EAAE,EAAE,QAAQ,CAAC,EAAE,IAAI,EAAE;oCACrB,IAAI,EAAE,QAAQ,CAAC,QAAQ,EAAE,IAAI,IAAI,EAAE;oCACnC,SAAS,EAAE,EAAE;oCACb,WAAW,EAAE,EAAE;iCACf,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,IAAI,QAAQ,CAAC,EAAE;oCAAE,YAAY,CAAC,EAAE,GAAG,QAAQ,CAAC,EAAE,CAAC;gCAC/C,IAAI,QAAQ,CAAC,QAAQ,EAAE,IAAI;oCAAE,YAAY,CAAC,IAAI,GAAG,QAAQ,CAAC,QAAQ,CAAC,IAAI,CAAC;gCACxE,IAAI,KAAK,GAAG,EAAE,CAAC;gCACf,IAAI,QAAQ,CAAC,QAAQ,EAAE,SAAS,EAAE,CAAC;oCAClC,KAAK,GAAG,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACpC,YAAY,CAAC,WAAW,IAAI,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACxD,YAAY,CAAC,SAAS,GAAG,kBAAkB,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;gCACvE,CAAC;gCACD,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAEjC,IAAI,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC;gBAC9B,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;YACxC,CAAC;YAED,IAAI,MAAM,CAAC,UAAU,KAAK,SAAS,IAAI,MAAM,CAAC,UAAU,KAAK,OAAO,EAAE,CAAC;gBACtE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;YAC5C,CAAC;YAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC1E,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,OAAO;gBAAE,OAAQ,KAAa,CAAC,KAAK,CAAC;YAChE,MAAM,CAAC,UAAU,GAAG,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC;YACnE,MAAM,CAAC,YAAY,GAAG,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC;YACrF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC;YACzE,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;IAAA,CACD,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd,CAAC;AAEF,SAAS,YAAY,CAAC,KAAkC,EAAE,MAAe,EAAE;IAC1E,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC;YACjC,MAAM,IAAI,KAAK,CACd,gGAAgG,CAChG,CAAC;QACH,CAAC;QACD,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC;IACrC,CAAC;IACD,OAAO,IAAI,MAAM,CAAC;QACjB,MAAM;QACN,OAAO,EAAE,KAAK,CAAC,OAAO;QACtB,uBAAuB,EAAE,IAAI;QAC7B,cAAc,EAAE,KAAK,CAAC,OAAO;KAC7B,CAAC,CAAC;AAAA,CACH;AAED,SAAS,WAAW,CAAC,KAAkC,EAAE,OAAgB,EAAE,OAAkC,EAAE;IAC9G,MAAM,MAAM,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC;IAChC,MAAM,QAAQ,GAAG,eAAe,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IAEzD,MAAM,MAAM,GAAgE;QAC3E,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,QAAQ;QACR,MAAM,EAAE,IAAI;QACZ,cAAc,EAAE,EAAE,aAAa,EAAE,IAAI,EAAE;KACvC,CAAC;IAEF,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;QAC1B,MAAM,CAAC,KAAK,GAAG,KAAK,CAAC;IACtB,CAAC;IAED,IAAI,OAAO,EAAE,SAAS,EAAE,CAAC;QACxB,IAAI,MAAM,CAAC,cAAc,KAAK,YAAY,EAAE,CAAC;YAC3C,MAAc,CAAC,UAAU,GAAG,OAAO,CAAC,SAAS,CAAC;QAChD,CAAC;aAAM,CAAC;YACP,MAAM,CAAC,qBAAqB,GAAG,OAAO,CAAC,SAAS,CAAC;QAClD,CAAC;IACF,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,WAAW,CAAC;IAC1C,CAAC;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;QACnB,MAAM,CAAC,KAAK,GAAG,YAAY,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IAC5C,CAAC;SAAM,IAAI,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;QAC7C,mGAAmG;QACnG,MAAM,CAAC,KAAK,GAAG,EAAE,CAAC;IACnB,CAAC;IAED,IAAI,OAAO,EAAE,UAAU,EAAE,CAAC;QACzB,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,UAAU,CAAC;IACzC,CAAC;IAED,IAAI,OAAO,EAAE,eAAe,IAAI,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,uBAAuB,EAAE,CAAC;QACnF,MAAM,CAAC,gBAAgB,GAAG,OAAO,CAAC,eAAe,CAAC;IACnD,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,eAAe,CACvB,KAAkC,EAClC,OAAgB,EAChB,MAA8B,EACC;IAC/B,MAAM,MAAM,GAAiC,EAAE,CAAC;IAEhD,MAAM,mBAAmB,GAAG,iBAAiB,CAAC,OAAO,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;IAEvE,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC1B,MAAM,gBAAgB,GAAG,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,qBAAqB,CAAC;QACzE,MAAM,IAAI,GAAG,gBAAgB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC;QACvD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC,CAAC;IAChF,CAAC;IAED,IAAI,QAAQ,GAAkB,IAAI,CAAC;IAEnC,KAAK,MAAM,GAAG,IAAI,mBAAmB,EAAE,CAAC;QACvC,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACzB,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;gBACrC,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,kBAAkB,CAAC,GAAG,CAAC,OAAO,CAAC;iBACxC,CAAC,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACP,MAAM,OAAO,GAAgC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,IAAI,EAA6B,EAAE,CAAC;oBACjG,IAAI,IAAI,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC1B,OAAO;4BACN,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,kBAAkB,CAAC,IAAI,CAAC,IAAI,CAAC;yBACK,CAAC;oBAC3C,CAAC;yBAAM,CAAC;wBACP,OAAO;4BACN,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAQ,IAAI,CAAC,QAAQ,WAAW,IAAI,CAAC,IAAI,EAAE;6BAChD;yBACwC,CAAC;oBAC5C,CAAC;gBAAA,CACD,CAAC,CAAC;gBACH,MAAM,eAAe,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC;oBACrD,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,CAAC;oBAC/C,CAAC,CAAC,OAAO,CAAC;gBACX,IAAI,eAAe,CAAC,MAAM,KAAK,CAAC;oBAAE,SAAS;gBAC3C,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,eAAe;iBACxB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACrC,oFAAoF;YACpF,MAAM,YAAY,GAAwC;gBACzD,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,MAAM,CAAC,gCAAgC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;aAC5D,CAAC;YAEF,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAkB,CAAC;YACjF,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC3B,YAAY,CAAC,OAAO,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;oBAC5C,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;gBAAA,CAC1D,CAAC,CAAC;YACJ,CAAC;YAED,yBAAyB;YACzB,MAAM,cAAc,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAsB,CAAC;YAC7F,IAAI,cAAc,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC/B,IAAI,MAAM,CAAC,sBAAsB,EAAE,CAAC;oBACnC,6DAA6D;oBAC7D,MAAM,YAAY,GAAG,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,eAAe,CAAC,CAAC,QAAQ,eAAe,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;oBACpG,MAAM,WAAW,GAAG,YAAY,CAAC,OAAuD,CAAC;oBACzF,IAAI,WAAW,EAAE,CAAC;wBACjB,WAAW,CAAC,OAAO,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;oBAC3D,CAAC;yBAAM,CAAC;wBACP,YAAY,CAAC,OAAO,GAAG,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;oBAC/D,CAAC;gBACF,CAAC;qBAAM,CAAC;oBACP,gGAAgG;oBAChG,MAAM,SAAS,GAAG,cAAc,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC;oBACtD,IAAI,SAAS,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;wBACtC,YAAoB,CAAC,SAAS,CAAC,GAAG,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;oBACrF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAe,CAAC;YACjF,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC1B,YAAY,CAAC,UAAU,GAAG,SAAS,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC;oBAChD,EAAE,EAAE,sBAAsB,CAAC,EAAE,CAAC,EAAE,EAAE,MAAM,CAAC,sBAAsB,CAAC;oBAChE,IAAI,EAAE,UAAmB;oBACzB,QAAQ,EAAE;wBACT,IAAI,EAAE,EAAE,CAAC,IAAI;wBACb,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC;qBACvC;iBACD,CAAC,CAAC,CAAC;YACL,CAAC;YACD,kEAAkE;YAClE,4EAA4E;YAC5E,8DAA8D;YAC9D,gEAAgE;YAChE,MAAM,OAAO,GAAG,YAAY,CAAC,OAAO,CAAC;YACrC,MAAM,UAAU,GACf,OAAO,KAAK,IAAI;gBAChB,OAAO,KAAK,SAAS;gBACrB,CAAC,OAAO,OAAO,KAAK,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YACzE,IAAI,CAAC,UAAU,IAAI,CAAC,YAAY,CAAC,UAAU,EAAE,CAAC;gBAC7C,SAAS;YACV,CAAC;YACD,MAAM,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAC3B,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YACtC,iCAAiC;YACjC,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO;iBAC5B,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC;iBAChC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAE,CAAS,CAAC,IAAI,CAAC;iBAC3B,IAAI,CAAC,IAAI,CAAC,CAAC;YACb,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;YAE9D,oEAAoE;YACpE,MAAM,OAAO,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;YACtC,yEAAyE;YACzE,MAAM,aAAa,GAAmC;gBACrD,IAAI,EAAE,MAAM;gBACZ,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,sBAAsB,CAAC;gBAC1E,YAAY,EAAE,sBAAsB,CAAC,GAAG,CAAC,UAAU,EAAE,MAAM,CAAC,sBAAsB,CAAC;aACnF,CAAC;YACF,IAAI,MAAM,CAAC,sBAAsB,IAAI,GAAG,CAAC,QAAQ,EAAE,CAAC;gBAClD,aAAqB,CAAC,IAAI,GAAG,GAAG,CAAC,QAAQ,CAAC;YAC5C,CAAC;YACD,MAAM,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;YAE3B,yFAAyF;YACzF,IAAI,SAAS,IAAI,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC;gBAChD,MAAM,aAAa,GAEf,EAAE,CAAC;gBAEP,kBAAkB;gBAClB,aAAa,CAAC,IAAI,CAAC;oBAClB,IAAI,EAAE,MAAM;oBACZ,IAAI,EAAE,qCAAqC;iBAC3C,CAAC,CAAC;gBAEH,aAAa;gBACb,KAAK,MAAM,KAAK,IAAI,GAAG,CAAC,OAAO,EAAE,CAAC;oBACjC,IAAI,KAAK,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;wBAC5B,aAAa,CAAC,IAAI,CAAC;4BAClB,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAS,KAAa,CAAC,QAAQ,WAAY,KAAa,CAAC,IAAI,EAAE;6BACpE;yBACD,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;gBAED,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,aAAa;iBACtB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;QAED,QAAQ,GAAG,GAAG,CAAC,IAAI,CAAC;IACrB,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,YAAY,CAAC,KAAa,EAAgD;IAClF,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC;QAC3B,IAAI,EAAE,UAAU;QAChB,QAAQ,EAAE;YACT,IAAI,EAAE,IAAI,CAAC,IAAI;YACf,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,UAAU,EAAE,IAAI,CAAC,UAAiB,EAAE,wCAAwC;SAC5E;KACD,CAAC,CAAC,CAAC;AAAA,CACJ;AAED,SAAS,aAAa,CAAC,MAAmD,EAAc;IACvF,IAAI,MAAM,KAAK,IAAI;QAAE,OAAO,MAAM,CAAC;IACnC,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;QACf,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,eAAe,CAAC;QACrB,KAAK,YAAY;YAChB,OAAO,SAAS,CAAC;QAClB,KAAK,gBAAgB;YACpB,OAAO,OAAO,CAAC;QAChB,SAAS,CAAC;YACT,MAAM,WAAW,GAAU,MAAM,CAAC;YAClC,MAAM,IAAI,KAAK,CAAC,0BAA0B,WAAW,EAAE,CAAC,CAAC;QAC1D,CAAC;IACF,CAAC;AAAA,CACD;AAED;;;GAGG;AACH,SAAS,mBAAmB,CAAC,OAAe,EAA0B;IACrE,MAAM,aAAa,GAClB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC;QAC/B,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC;QAC5B,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC;QAC9B,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;IAE/B,MAAM,YAAY,GAAG,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,IAAI,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;IAErF,MAAM,MAAM,GAAG,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAE5C,MAAM,SAAS,GAAG,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC;IAEjD,OAAO;QACN,aAAa,EAAE,CAAC,aAAa;QAC7B,qBAAqB,EAAE,CAAC,aAAa;QACrC,uBAAuB,EAAE,CAAC,MAAM;QAChC,cAAc,EAAE,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,uBAAuB;QACrE,sBAAsB,EAAE,SAAS;QACjC,gCAAgC,EAAE,KAAK,EAAE,iDAAiD;QAC1F,sBAAsB,EAAE,SAAS;QACjC,sBAAsB,EAAE,SAAS;KACjC,CAAC;AAAA,CACF;AAED;;;GAGG;AACH,SAAS,SAAS,CAAC,KAAkC,EAA0B;IAC9E,MAAM,QAAQ,GAAG,mBAAmB,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;IACpD,IAAI,CAAC,KAAK,CAAC,MAAM;QAAE,OAAO,QAAQ,CAAC;IAEnC,OAAO;QACN,aAAa,EAAE,KAAK,CAAC,MAAM,CAAC,aAAa,IAAI,QAAQ,CAAC,aAAa;QACnE,qBAAqB,EAAE,KAAK,CAAC,MAAM,CAAC,qBAAqB,IAAI,QAAQ,CAAC,qBAAqB;QAC3F,uBAAuB,EAAE,KAAK,CAAC,MAAM,CAAC,uBAAuB,IAAI,QAAQ,CAAC,uBAAuB;QACjG,cAAc,EAAE,KAAK,CAAC,MAAM,CAAC,cAAc,IAAI,QAAQ,CAAC,cAAc;QACtE,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;QAC9F,gCAAgC,EAC/B,KAAK,CAAC,MAAM,CAAC,gCAAgC,IAAI,QAAQ,CAAC,gCAAgC;QAC3F,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;QAC9F,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;KAC9F,CAAC;AAAA,CACF","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n\tChatCompletionToolMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { calculateCost } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tMessage,\n\tModel,\n\tOpenAICompat,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { transformMessages } from \"./transorm-messages.js\";\n\n/**\n * Normalize tool call ID for Mistral.\n * Mistral requires tool IDs to be exactly 9 alphanumeric characters (a-z, A-Z, 0-9).\n */\nfunction normalizeMistralToolId(id: string, isMistral: boolean): string {\n\tif (!isMistral) return id;\n\t// Remove non-alphanumeric characters\n\tlet normalized = id.replace(/[^a-zA-Z0-9]/g, \"\");\n\t// Mistral requires exactly 9 characters\n\tif (normalized.length < 9) {\n\t\t// Pad with deterministic characters based on original ID to ensure matching\n\t\tconst padding = \"ABCDEFGHI\";\n\t\tnormalized = normalized + padding.slice(0, 9 - normalized.length);\n\t} else if (normalized.length > 9) {\n\t\tnormalized = normalized.slice(0, 9);\n\t}\n\treturn normalized;\n}\n\n/**\n * Check if conversation messages contain tool calls or tool results.\n * This is needed because Anthropic (via proxy) requires the tools param\n * to be present when messages include tool_calls or tool role messages.\n */\nfunction hasToolHistory(messages: Message[]): boolean {\n\tfor (const msg of messages) {\n\t\tif (msg.role === \"toolResult\") {\n\t\t\treturn true;\n\t\t}\n\t\tif (msg.role === \"assistant\") {\n\t\t\tif (msg.content.some((block) => block.type === \"toolCall\")) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\"> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst client = createClient(model, options?.apiKey);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\"];\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\t\tthinkingSignature: field,\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\t\tconst delta = (choice.delta as any)[field];\n\t\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(model: Model<\"openai-completions\">, apiKey?: string) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: model.headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst compat = getCompat(model);\n\tconst messages = convertMessages(model, context, compat);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t\tstream_options: { include_usage: true },\n\t};\n\n\tif (compat.supportsStore) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\tif (compat.maxTokensField === \"max_tokens\") {\n\t\t\t(params as any).max_tokens = options.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t} else if (hasToolHistory(context.messages)) {\n\t\t// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results\n\t\tparams.tools = [];\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\tif (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tcompat: Required<OpenAICompat>,\n): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst transformedMessages = transformMessages(context.messages, model);\n\n\tif (context.systemPrompt) {\n\t\tconst useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tlet lastRole: string | null = null;\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\t// Some providers (e.g. Mistral) don't accept null content, use empty string instead\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: compat.requiresAssistantAfterToolResult ? \"\" : null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\tif (textBlocks.length > 0) {\n\t\t\t\tassistantMsg.content = textBlocks.map((b) => {\n\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Handle thinking blocks\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\tif (thinkingBlocks.length > 0) {\n\t\t\t\tif (compat.requiresThinkingAsText) {\n\t\t\t\t\t// Convert thinking blocks to text with <thinking> delimiters\n\t\t\t\t\tconst thinkingText = thinkingBlocks.map((b) => `<thinking>\\n${b.thinking}\\n</thinking>`).join(\"\\n\");\n\t\t\t\t\tconst textContent = assistantMsg.content as Array<{ type: \"text\"; text: string }> | null;\n\t\t\t\t\tif (textContent) {\n\t\t\t\t\t\ttextContent.unshift({ type: \"text\", text: thinkingText });\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassistantMsg.content = [{ type: \"text\", text: thinkingText }];\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)\n\t\t\t\t\tconst signature = thinkingBlocks[0].thinkingSignature;\n\t\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t\t(assistantMsg as any)[signature] = thinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: normalizeMistralToolId(tc.id, compat.requiresMistralToolIds),\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t}\n\t\t\t// Skip assistant messages that have no content and no tool calls.\n\t\t\t// Mistral explicitly requires \"either content or tool_calls, but not none\".\n\t\t\t// Other providers also don't accept empty assistant messages.\n\t\t\t// This handles aborted assistant responses that got no content.\n\t\t\tconst content = assistantMsg.content;\n\t\t\tconst hasContent =\n\t\t\t\tcontent !== null &&\n\t\t\t\tcontent !== undefined &&\n\t\t\t\t(typeof content === \"string\" ? content.length > 0 : content.length > 0);\n\t\t\tif (!hasContent && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\t// Some providers (e.g. Mistral) require the 'name' field in tool results\n\t\t\tconst toolResultMsg: ChatCompletionToolMessageParam = {\n\t\t\t\trole: \"tool\",\n\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\ttool_call_id: normalizeMistralToolId(msg.toolCallId, compat.requiresMistralToolIds),\n\t\t\t};\n\t\t\tif (compat.requiresToolResultName && msg.toolName) {\n\t\t\t\t(toolResultMsg as any).name = msg.toolName;\n\t\t\t}\n\t\t\tparams.push(toolResultMsg);\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentBlocks: Array<\n\t\t\t\t\t{ type: \"text\"; text: string } | { type: \"image_url\"; image_url: { url: string } }\n\t\t\t\t> = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentBlocks.push({\n\t\t\t\t\ttype: \"text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t});\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentBlocks.push({\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentBlocks,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\tlastRole = msg.role;\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Detect compatibility settings from baseUrl for known providers.\n * Returns a fully resolved OpenAICompat object with all fields set.\n */\nfunction detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {\n\tconst isNonStandard =\n\t\tbaseUrl.includes(\"cerebras.ai\") ||\n\t\tbaseUrl.includes(\"api.x.ai\") ||\n\t\tbaseUrl.includes(\"mistral.ai\") ||\n\t\tbaseUrl.includes(\"chutes.ai\");\n\n\tconst useMaxTokens = baseUrl.includes(\"mistral.ai\") || baseUrl.includes(\"chutes.ai\");\n\n\tconst isGrok = baseUrl.includes(\"api.x.ai\");\n\n\tconst isMistral = baseUrl.includes(\"mistral.ai\");\n\n\treturn {\n\t\tsupportsStore: !isNonStandard,\n\t\tsupportsDeveloperRole: !isNonStandard,\n\t\tsupportsReasoningEffort: !isGrok,\n\t\tmaxTokensField: useMaxTokens ? \"max_tokens\" : \"max_completion_tokens\",\n\t\trequiresToolResultName: isMistral,\n\t\trequiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024\n\t\trequiresThinkingAsText: isMistral,\n\t\trequiresMistralToolIds: isMistral,\n\t};\n}\n\n/**\n * Get resolved compatibility settings for a model.\n * Uses explicit model.compat if provided, otherwise auto-detects from URL.\n */\nfunction getCompat(model: Model<\"openai-completions\">): Required<OpenAICompat> {\n\tconst detected = detectCompatFromUrl(model.baseUrl);\n\tif (!model.compat) return detected;\n\n\treturn {\n\t\tsupportsStore: model.compat.supportsStore ?? detected.supportsStore,\n\t\tsupportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,\n\t\tsupportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,\n\t\tmaxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,\n\t\trequiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,\n\t\trequiresAssistantAfterToolResult:\n\t\t\tmodel.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult,\n\t\trequiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText,\n\t\trequiresMistralToolIds: model.compat.requiresMistralToolIds ?? detected.requiresMistralToolIds,\n\t};\n}\n"]}
@@ -1 +1 @@
1
- {"version":3,"file":"stream.d.ts","sourceRoot":"","sources":["../src/stream.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EACX,GAAG,EACH,gBAAgB,EAChB,2BAA2B,EAC3B,OAAO,EACP,aAAa,EACb,KAAK,EACL,aAAa,EAEb,mBAAmB,EACnB,MAAM,YAAY,CAAC;AAIpB,wBAAgB,SAAS,CAAC,QAAQ,EAAE,aAAa,EAAE,GAAG,EAAE,MAAM,GAAG,IAAI,CAAC;AACtE,wBAAgB,SAAS,CAAC,QAAQ,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,GAAG,IAAI,CAAC;AAK/D,wBAAgB,SAAS,CAAC,QAAQ,EAAE,aAAa,GAAG,MAAM,GAAG,SAAS,CAAC;AACvE,wBAAgB,SAAS,CAAC,QAAQ,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS,CAAC;AAsBhE,wBAAgB,MAAM,CAAC,IAAI,SAAS,GAAG,EACtC,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,aAAa,CAAC,IAAI,CAAC,GAC3B,2BAA2B,CA2B7B;AAED,wBAAsB,QAAQ,CAAC,IAAI,SAAS,GAAG,EAC9C,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,aAAa,CAAC,IAAI,CAAC,GAC3B,OAAO,CAAC,gBAAgB,CAAC,CAG3B;AAED,wBAAgB,YAAY,CAAC,IAAI,SAAS,GAAG,EAC5C,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,mBAAmB,GAC3B,2BAA2B,CAQ7B;AAED,wBAAsB,cAAc,CAAC,IAAI,SAAS,GAAG,EACpD,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,mBAAmB,GAC3B,OAAO,CAAC,gBAAgB,CAAC,CAG3B","sourcesContent":["import { type AnthropicOptions, streamAnthropic } from \"./providers/anthropic.js\";\nimport { type GoogleOptions, streamGoogle } from \"./providers/google.js\";\nimport { type OpenAICompletionsOptions, streamOpenAICompletions } from \"./providers/openai-completions.js\";\nimport { type OpenAIResponsesOptions, streamOpenAIResponses } from \"./providers/openai-responses.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEventStream,\n\tContext,\n\tKnownProvider,\n\tModel,\n\tOptionsForApi,\n\tReasoningEffort,\n\tSimpleStreamOptions,\n} from \"./types.js\";\n\nconst apiKeys: Map<string, string> = new Map();\n\nexport function setApiKey(provider: KnownProvider, key: string): void;\nexport function setApiKey(provider: string, key: string): void;\nexport function setApiKey(provider: any, key: string): void {\n\tapiKeys.set(provider, key);\n}\n\nexport function getApiKey(provider: KnownProvider): string | undefined;\nexport function getApiKey(provider: string): string | undefined;\nexport function getApiKey(provider: any): string | undefined {\n\t// Check explicit keys first\n\tconst key = apiKeys.get(provider);\n\tif (key) return key;\n\n\t// Fall back to environment variables\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\tanthropic: \"ANTHROPIC_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? process.env[envVar] : undefined;\n}\n\nexport function stream<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): AssistantMessageEventStream {\n\tconst apiKey = options?.apiKey || getApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\tconst providerOptions = { ...options, apiKey };\n\n\tconst api: Api = model.api;\n\tswitch (api) {\n\t\tcase \"anthropic-messages\":\n\t\t\treturn streamAnthropic(model as Model<\"anthropic-messages\">, context, providerOptions);\n\n\t\tcase \"openai-completions\":\n\t\t\treturn streamOpenAICompletions(model as Model<\"openai-completions\">, context, providerOptions as any);\n\n\t\tcase \"openai-responses\":\n\t\t\treturn streamOpenAIResponses(model as Model<\"openai-responses\">, context, providerOptions as any);\n\n\t\tcase \"google-generative-ai\":\n\t\t\treturn streamGoogle(model as Model<\"google-generative-ai\">, context, providerOptions);\n\n\t\tdefault: {\n\t\t\t// This should never be reached if all Api cases are handled\n\t\t\tconst _exhaustive: never = api;\n\t\t\tthrow new Error(`Unhandled API: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\nexport async function complete<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): Promise<AssistantMessage> {\n\tconst s = stream(model, context, options);\n\treturn s.result();\n}\n\nexport function streamSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream {\n\tconst apiKey = options?.apiKey || getApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst providerOptions = mapOptionsForApi(model, options, apiKey);\n\treturn stream(model, context, providerOptions);\n}\n\nexport async function completeSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): Promise<AssistantMessage> {\n\tconst s = streamSimple(model, context, options);\n\treturn s.result();\n}\n\nfunction mapOptionsForApi<TApi extends Api>(\n\tmodel: Model<TApi>,\n\toptions?: SimpleStreamOptions,\n\tapiKey?: string,\n): OptionsForApi<TApi> {\n\tconst base = {\n\t\ttemperature: options?.temperature,\n\t\tmaxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),\n\t\tsignal: options?.signal,\n\t\tapiKey: apiKey || options?.apiKey,\n\t};\n\n\t// Helper to clamp xhigh to high for providers that don't support it\n\tconst clampReasoning = (effort: ReasoningEffort | undefined) => (effort === \"xhigh\" ? \"high\" : effort);\n\n\tswitch (model.api) {\n\t\tcase \"anthropic-messages\": {\n\t\t\tif (!options?.reasoning) return base satisfies AnthropicOptions;\n\n\t\t\tconst anthropicBudgets = {\n\t\t\t\tminimal: 1024,\n\t\t\t\tlow: 2048,\n\t\t\t\tmedium: 8192,\n\t\t\t\thigh: 16384,\n\t\t\t};\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinkingEnabled: true,\n\t\t\t\tthinkingBudgetTokens: anthropicBudgets[clampReasoning(options.reasoning)!],\n\t\t\t} satisfies AnthropicOptions;\n\t\t}\n\n\t\tcase \"openai-completions\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: options?.reasoning,\n\t\t\t} satisfies OpenAICompletionsOptions;\n\n\t\tcase \"openai-responses\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: options?.reasoning,\n\t\t\t} satisfies OpenAIResponsesOptions;\n\n\t\tcase \"google-generative-ai\": {\n\t\t\tif (!options?.reasoning) return base as any;\n\n\t\t\tconst googleBudget = getGoogleBudget(\n\t\t\t\tmodel as Model<\"google-generative-ai\">,\n\t\t\t\tclampReasoning(options.reasoning)!,\n\t\t\t);\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: googleBudget,\n\t\t\t\t},\n\t\t\t} satisfies GoogleOptions;\n\t\t}\n\n\t\tdefault: {\n\t\t\t// Exhaustiveness check\n\t\t\tconst _exhaustive: never = model.api;\n\t\t\tthrow new Error(`Unhandled API in mapOptionsForApi: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\ntype ClampedReasoningEffort = Exclude<ReasoningEffort, \"xhigh\">;\n\nfunction getGoogleBudget(model: Model<\"google-generative-ai\">, effort: ClampedReasoningEffort): number {\n\t// See https://ai.google.dev/gemini-api/docs/thinking#set-budget\n\tif (model.id.includes(\"2.5-pro\")) {\n\t\tconst budgets: Record<ClampedReasoningEffort, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 32768,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\tif (model.id.includes(\"2.5-flash\")) {\n\t\t// Covers 2.5-flash-lite as well\n\t\tconst budgets: Record<ClampedReasoningEffort, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 24576,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\t// Unknown model - use dynamic\n\treturn -1;\n}\n"]}
1
+ {"version":3,"file":"stream.d.ts","sourceRoot":"","sources":["../src/stream.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EACX,GAAG,EACH,gBAAgB,EAChB,2BAA2B,EAC3B,OAAO,EACP,aAAa,EACb,KAAK,EACL,aAAa,EAEb,mBAAmB,EACnB,MAAM,YAAY,CAAC;AAIpB,wBAAgB,SAAS,CAAC,QAAQ,EAAE,aAAa,EAAE,GAAG,EAAE,MAAM,GAAG,IAAI,CAAC;AACtE,wBAAgB,SAAS,CAAC,QAAQ,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,GAAG,IAAI,CAAC;AAK/D,wBAAgB,SAAS,CAAC,QAAQ,EAAE,aAAa,GAAG,MAAM,GAAG,SAAS,CAAC;AACvE,wBAAgB,SAAS,CAAC,QAAQ,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS,CAAC;AAuBhE,wBAAgB,MAAM,CAAC,IAAI,SAAS,GAAG,EACtC,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,aAAa,CAAC,IAAI,CAAC,GAC3B,2BAA2B,CA2B7B;AAED,wBAAsB,QAAQ,CAAC,IAAI,SAAS,GAAG,EAC9C,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,aAAa,CAAC,IAAI,CAAC,GAC3B,OAAO,CAAC,gBAAgB,CAAC,CAG3B;AAED,wBAAgB,YAAY,CAAC,IAAI,SAAS,GAAG,EAC5C,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,mBAAmB,GAC3B,2BAA2B,CAQ7B;AAED,wBAAsB,cAAc,CAAC,IAAI,SAAS,GAAG,EACpD,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,mBAAmB,GAC3B,OAAO,CAAC,gBAAgB,CAAC,CAG3B","sourcesContent":["import { type AnthropicOptions, streamAnthropic } from \"./providers/anthropic.js\";\nimport { type GoogleOptions, streamGoogle } from \"./providers/google.js\";\nimport { type OpenAICompletionsOptions, streamOpenAICompletions } from \"./providers/openai-completions.js\";\nimport { type OpenAIResponsesOptions, streamOpenAIResponses } from \"./providers/openai-responses.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEventStream,\n\tContext,\n\tKnownProvider,\n\tModel,\n\tOptionsForApi,\n\tReasoningEffort,\n\tSimpleStreamOptions,\n} from \"./types.js\";\n\nconst apiKeys: Map<string, string> = new Map();\n\nexport function setApiKey(provider: KnownProvider, key: string): void;\nexport function setApiKey(provider: string, key: string): void;\nexport function setApiKey(provider: any, key: string): void {\n\tapiKeys.set(provider, key);\n}\n\nexport function getApiKey(provider: KnownProvider): string | undefined;\nexport function getApiKey(provider: string): string | undefined;\nexport function getApiKey(provider: any): string | undefined {\n\t// Check explicit keys first\n\tconst key = apiKeys.get(provider);\n\tif (key) return key;\n\n\t// Fall back to environment variables\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\tanthropic: \"ANTHROPIC_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t\tmistral: \"MISTRAL_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? process.env[envVar] : undefined;\n}\n\nexport function stream<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): AssistantMessageEventStream {\n\tconst apiKey = options?.apiKey || getApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\tconst providerOptions = { ...options, apiKey };\n\n\tconst api: Api = model.api;\n\tswitch (api) {\n\t\tcase \"anthropic-messages\":\n\t\t\treturn streamAnthropic(model as Model<\"anthropic-messages\">, context, providerOptions);\n\n\t\tcase \"openai-completions\":\n\t\t\treturn streamOpenAICompletions(model as Model<\"openai-completions\">, context, providerOptions as any);\n\n\t\tcase \"openai-responses\":\n\t\t\treturn streamOpenAIResponses(model as Model<\"openai-responses\">, context, providerOptions as any);\n\n\t\tcase \"google-generative-ai\":\n\t\t\treturn streamGoogle(model as Model<\"google-generative-ai\">, context, providerOptions);\n\n\t\tdefault: {\n\t\t\t// This should never be reached if all Api cases are handled\n\t\t\tconst _exhaustive: never = api;\n\t\t\tthrow new Error(`Unhandled API: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\nexport async function complete<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): Promise<AssistantMessage> {\n\tconst s = stream(model, context, options);\n\treturn s.result();\n}\n\nexport function streamSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream {\n\tconst apiKey = options?.apiKey || getApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst providerOptions = mapOptionsForApi(model, options, apiKey);\n\treturn stream(model, context, providerOptions);\n}\n\nexport async function completeSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): Promise<AssistantMessage> {\n\tconst s = streamSimple(model, context, options);\n\treturn s.result();\n}\n\nfunction mapOptionsForApi<TApi extends Api>(\n\tmodel: Model<TApi>,\n\toptions?: SimpleStreamOptions,\n\tapiKey?: string,\n): OptionsForApi<TApi> {\n\tconst base = {\n\t\ttemperature: options?.temperature,\n\t\tmaxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),\n\t\tsignal: options?.signal,\n\t\tapiKey: apiKey || options?.apiKey,\n\t};\n\n\t// Helper to clamp xhigh to high for providers that don't support it\n\tconst clampReasoning = (effort: ReasoningEffort | undefined) => (effort === \"xhigh\" ? \"high\" : effort);\n\n\tswitch (model.api) {\n\t\tcase \"anthropic-messages\": {\n\t\t\tif (!options?.reasoning) return base satisfies AnthropicOptions;\n\n\t\t\tconst anthropicBudgets = {\n\t\t\t\tminimal: 1024,\n\t\t\t\tlow: 2048,\n\t\t\t\tmedium: 8192,\n\t\t\t\thigh: 16384,\n\t\t\t};\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinkingEnabled: true,\n\t\t\t\tthinkingBudgetTokens: anthropicBudgets[clampReasoning(options.reasoning)!],\n\t\t\t} satisfies AnthropicOptions;\n\t\t}\n\n\t\tcase \"openai-completions\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: options?.reasoning,\n\t\t\t} satisfies OpenAICompletionsOptions;\n\n\t\tcase \"openai-responses\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: options?.reasoning,\n\t\t\t} satisfies OpenAIResponsesOptions;\n\n\t\tcase \"google-generative-ai\": {\n\t\t\tif (!options?.reasoning) return base as any;\n\n\t\t\tconst googleBudget = getGoogleBudget(\n\t\t\t\tmodel as Model<\"google-generative-ai\">,\n\t\t\t\tclampReasoning(options.reasoning)!,\n\t\t\t);\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: googleBudget,\n\t\t\t\t},\n\t\t\t} satisfies GoogleOptions;\n\t\t}\n\n\t\tdefault: {\n\t\t\t// Exhaustiveness check\n\t\t\tconst _exhaustive: never = model.api;\n\t\t\tthrow new Error(`Unhandled API in mapOptionsForApi: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\ntype ClampedReasoningEffort = Exclude<ReasoningEffort, \"xhigh\">;\n\nfunction getGoogleBudget(model: Model<\"google-generative-ai\">, effort: ClampedReasoningEffort): number {\n\t// See https://ai.google.dev/gemini-api/docs/thinking#set-budget\n\tif (model.id.includes(\"2.5-pro\")) {\n\t\tconst budgets: Record<ClampedReasoningEffort, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 32768,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\tif (model.id.includes(\"2.5-flash\")) {\n\t\t// Covers 2.5-flash-lite as well\n\t\tconst budgets: Record<ClampedReasoningEffort, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 24576,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\t// Unknown model - use dynamic\n\treturn -1;\n}\n"]}
package/dist/stream.js CHANGED
@@ -21,6 +21,7 @@ export function getApiKey(provider) {
21
21
  xai: "XAI_API_KEY",
22
22
  openrouter: "OPENROUTER_API_KEY",
23
23
  zai: "ZAI_API_KEY",
24
+ mistral: "MISTRAL_API_KEY",
24
25
  };
25
26
  const envVar = envMap[provider];
26
27
  return envVar ? process.env[envVar] : undefined;
@@ -1 +1 @@
1
- {"version":3,"file":"stream.js","sourceRoot":"","sources":["../src/stream.ts"],"names":[],"mappings":"AAAA,OAAO,EAAyB,eAAe,EAAE,MAAM,0BAA0B,CAAC;AAClF,OAAO,EAAsB,YAAY,EAAE,MAAM,uBAAuB,CAAC;AACzE,OAAO,EAAiC,uBAAuB,EAAE,MAAM,mCAAmC,CAAC;AAC3G,OAAO,EAA+B,qBAAqB,EAAE,MAAM,iCAAiC,CAAC;AAarG,MAAM,OAAO,GAAwB,IAAI,GAAG,EAAE,CAAC;AAI/C,MAAM,UAAU,SAAS,CAAC,QAAa,EAAE,GAAW,EAAQ;IAC3D,OAAO,CAAC,GAAG,CAAC,QAAQ,EAAE,GAAG,CAAC,CAAC;AAAA,CAC3B;AAID,MAAM,UAAU,SAAS,CAAC,QAAa,EAAsB;IAC5D,4BAA4B;IAC5B,MAAM,GAAG,GAAG,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC;IAClC,IAAI,GAAG;QAAE,OAAO,GAAG,CAAC;IAEpB,qCAAqC;IACrC,MAAM,MAAM,GAA2B;QACtC,MAAM,EAAE,gBAAgB;QACxB,SAAS,EAAE,mBAAmB;QAC9B,MAAM,EAAE,gBAAgB;QACxB,IAAI,EAAE,cAAc;QACpB,QAAQ,EAAE,kBAAkB;QAC5B,GAAG,EAAE,aAAa;QAClB,UAAU,EAAE,oBAAoB;QAChC,GAAG,EAAE,aAAa;KAClB,CAAC;IAEF,MAAM,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC;IAChC,OAAO,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CAChD;AAED,MAAM,UAAU,MAAM,CACrB,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACC;IAC9B,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,SAAS,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC5D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IACD,MAAM,eAAe,GAAG,EAAE,GAAG,OAAO,EAAE,MAAM,EAAE,CAAC;IAE/C,MAAM,GAAG,GAAQ,KAAK,CAAC,GAAG,CAAC;IAC3B,QAAQ,GAAG,EAAE,CAAC;QACb,KAAK,oBAAoB;YACxB,OAAO,eAAe,CAAC,KAAoC,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;QAExF,KAAK,oBAAoB;YACxB,OAAO,uBAAuB,CAAC,KAAoC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAEvG,KAAK,kBAAkB;YACtB,OAAO,qBAAqB,CAAC,KAAkC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAEnG,KAAK,sBAAsB;YAC1B,OAAO,YAAY,CAAC,KAAsC,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;QAEvF,SAAS,CAAC;YACT,4DAA4D;YAC5D,MAAM,WAAW,GAAU,GAAG,CAAC;YAC/B,MAAM,IAAI,KAAK,CAAC,kBAAkB,WAAW,EAAE,CAAC,CAAC;QAClD,CAAC;IACF,CAAC;AAAA,CACD;AAED,MAAM,CAAC,KAAK,UAAU,QAAQ,CAC7B,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACD;IAC5B,MAAM,CAAC,GAAG,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;IAC1C,OAAO,CAAC,CAAC,MAAM,EAAE,CAAC;AAAA,CAClB;AAED,MAAM,UAAU,YAAY,CAC3B,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACC;IAC9B,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,SAAS,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC5D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,MAAM,eAAe,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACjE,OAAO,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;AAAA,CAC/C;AAED,MAAM,CAAC,KAAK,UAAU,cAAc,CACnC,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACD;IAC5B,MAAM,CAAC,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;IAChD,OAAO,CAAC,CAAC,MAAM,EAAE,CAAC;AAAA,CAClB;AAED,SAAS,gBAAgB,CACxB,KAAkB,EAClB,OAA6B,EAC7B,MAAe,EACO;IACtB,MAAM,IAAI,GAAG;QACZ,WAAW,EAAE,OAAO,EAAE,WAAW;QACjC,SAAS,EAAE,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC;QACjE,MAAM,EAAE,OAAO,EAAE,MAAM;QACvB,MAAM,EAAE,MAAM,IAAI,OAAO,EAAE,MAAM;KACjC,CAAC;IAEF,oEAAoE;IACpE,MAAM,cAAc,GAAG,CAAC,MAAmC,EAAE,EAAE,CAAC,CAAC,MAAM,KAAK,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;IAEvG,QAAQ,KAAK,CAAC,GAAG,EAAE,CAAC;QACnB,KAAK,oBAAoB,EAAE,CAAC;YAC3B,IAAI,CAAC,OAAO,EAAE,SAAS;gBAAE,OAAO,IAA+B,CAAC;YAEhE,MAAM,gBAAgB,GAAG;gBACxB,OAAO,EAAE,IAAI;gBACb,GAAG,EAAE,IAAI;gBACT,MAAM,EAAE,IAAI;gBACZ,IAAI,EAAE,KAAK;aACX,CAAC;YAEF,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,IAAI;gBACrB,oBAAoB,EAAE,gBAAgB,CAAC,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;aAC/C,CAAC;QAC9B,CAAC;QAED,KAAK,oBAAoB;YACxB,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,OAAO,EAAE,SAAS;aACA,CAAC;QAEtC,KAAK,kBAAkB;YACtB,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,OAAO,EAAE,SAAS;aACF,CAAC;QAEpC,KAAK,sBAAsB,EAAE,CAAC;YAC7B,IAAI,CAAC,OAAO,EAAE,SAAS;gBAAE,OAAO,IAAW,CAAC;YAE5C,MAAM,YAAY,GAAG,eAAe,CACnC,KAAsC,EACtC,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAClC,CAAC;YACF,OAAO;gBACN,GAAG,IAAI;gBACP,QAAQ,EAAE;oBACT,OAAO,EAAE,IAAI;oBACb,YAAY,EAAE,YAAY;iBAC1B;aACuB,CAAC;QAC3B,CAAC;QAED,SAAS,CAAC;YACT,uBAAuB;YACvB,MAAM,WAAW,GAAU,KAAK,CAAC,GAAG,CAAC;YACrC,MAAM,IAAI,KAAK,CAAC,sCAAsC,WAAW,EAAE,CAAC,CAAC;QACtE,CAAC;IACF,CAAC;AAAA,CACD;AAID,SAAS,eAAe,CAAC,KAAoC,EAAE,MAA8B,EAAU;IACtG,gEAAgE;IAChE,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC;QAClC,MAAM,OAAO,GAA2C;YACvD,OAAO,EAAE,GAAG;YACZ,GAAG,EAAE,IAAI;YACT,MAAM,EAAE,IAAI;YACZ,IAAI,EAAE,KAAK;SACX,CAAC;QACF,OAAO,OAAO,CAAC,MAAM,CAAC,CAAC;IACxB,CAAC;IAED,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE,CAAC;QACpC,gCAAgC;QAChC,MAAM,OAAO,GAA2C;YACvD,OAAO,EAAE,GAAG;YACZ,GAAG,EAAE,IAAI;YACT,MAAM,EAAE,IAAI;YACZ,IAAI,EAAE,KAAK;SACX,CAAC;QACF,OAAO,OAAO,CAAC,MAAM,CAAC,CAAC;IACxB,CAAC;IAED,8BAA8B;IAC9B,OAAO,CAAC,CAAC,CAAC;AAAA,CACV","sourcesContent":["import { type AnthropicOptions, streamAnthropic } from \"./providers/anthropic.js\";\nimport { type GoogleOptions, streamGoogle } from \"./providers/google.js\";\nimport { type OpenAICompletionsOptions, streamOpenAICompletions } from \"./providers/openai-completions.js\";\nimport { type OpenAIResponsesOptions, streamOpenAIResponses } from \"./providers/openai-responses.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEventStream,\n\tContext,\n\tKnownProvider,\n\tModel,\n\tOptionsForApi,\n\tReasoningEffort,\n\tSimpleStreamOptions,\n} from \"./types.js\";\n\nconst apiKeys: Map<string, string> = new Map();\n\nexport function setApiKey(provider: KnownProvider, key: string): void;\nexport function setApiKey(provider: string, key: string): void;\nexport function setApiKey(provider: any, key: string): void {\n\tapiKeys.set(provider, key);\n}\n\nexport function getApiKey(provider: KnownProvider): string | undefined;\nexport function getApiKey(provider: string): string | undefined;\nexport function getApiKey(provider: any): string | undefined {\n\t// Check explicit keys first\n\tconst key = apiKeys.get(provider);\n\tif (key) return key;\n\n\t// Fall back to environment variables\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\tanthropic: \"ANTHROPIC_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? process.env[envVar] : undefined;\n}\n\nexport function stream<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): AssistantMessageEventStream {\n\tconst apiKey = options?.apiKey || getApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\tconst providerOptions = { ...options, apiKey };\n\n\tconst api: Api = model.api;\n\tswitch (api) {\n\t\tcase \"anthropic-messages\":\n\t\t\treturn streamAnthropic(model as Model<\"anthropic-messages\">, context, providerOptions);\n\n\t\tcase \"openai-completions\":\n\t\t\treturn streamOpenAICompletions(model as Model<\"openai-completions\">, context, providerOptions as any);\n\n\t\tcase \"openai-responses\":\n\t\t\treturn streamOpenAIResponses(model as Model<\"openai-responses\">, context, providerOptions as any);\n\n\t\tcase \"google-generative-ai\":\n\t\t\treturn streamGoogle(model as Model<\"google-generative-ai\">, context, providerOptions);\n\n\t\tdefault: {\n\t\t\t// This should never be reached if all Api cases are handled\n\t\t\tconst _exhaustive: never = api;\n\t\t\tthrow new Error(`Unhandled API: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\nexport async function complete<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): Promise<AssistantMessage> {\n\tconst s = stream(model, context, options);\n\treturn s.result();\n}\n\nexport function streamSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream {\n\tconst apiKey = options?.apiKey || getApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst providerOptions = mapOptionsForApi(model, options, apiKey);\n\treturn stream(model, context, providerOptions);\n}\n\nexport async function completeSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): Promise<AssistantMessage> {\n\tconst s = streamSimple(model, context, options);\n\treturn s.result();\n}\n\nfunction mapOptionsForApi<TApi extends Api>(\n\tmodel: Model<TApi>,\n\toptions?: SimpleStreamOptions,\n\tapiKey?: string,\n): OptionsForApi<TApi> {\n\tconst base = {\n\t\ttemperature: options?.temperature,\n\t\tmaxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),\n\t\tsignal: options?.signal,\n\t\tapiKey: apiKey || options?.apiKey,\n\t};\n\n\t// Helper to clamp xhigh to high for providers that don't support it\n\tconst clampReasoning = (effort: ReasoningEffort | undefined) => (effort === \"xhigh\" ? \"high\" : effort);\n\n\tswitch (model.api) {\n\t\tcase \"anthropic-messages\": {\n\t\t\tif (!options?.reasoning) return base satisfies AnthropicOptions;\n\n\t\t\tconst anthropicBudgets = {\n\t\t\t\tminimal: 1024,\n\t\t\t\tlow: 2048,\n\t\t\t\tmedium: 8192,\n\t\t\t\thigh: 16384,\n\t\t\t};\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinkingEnabled: true,\n\t\t\t\tthinkingBudgetTokens: anthropicBudgets[clampReasoning(options.reasoning)!],\n\t\t\t} satisfies AnthropicOptions;\n\t\t}\n\n\t\tcase \"openai-completions\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: options?.reasoning,\n\t\t\t} satisfies OpenAICompletionsOptions;\n\n\t\tcase \"openai-responses\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: options?.reasoning,\n\t\t\t} satisfies OpenAIResponsesOptions;\n\n\t\tcase \"google-generative-ai\": {\n\t\t\tif (!options?.reasoning) return base as any;\n\n\t\t\tconst googleBudget = getGoogleBudget(\n\t\t\t\tmodel as Model<\"google-generative-ai\">,\n\t\t\t\tclampReasoning(options.reasoning)!,\n\t\t\t);\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: googleBudget,\n\t\t\t\t},\n\t\t\t} satisfies GoogleOptions;\n\t\t}\n\n\t\tdefault: {\n\t\t\t// Exhaustiveness check\n\t\t\tconst _exhaustive: never = model.api;\n\t\t\tthrow new Error(`Unhandled API in mapOptionsForApi: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\ntype ClampedReasoningEffort = Exclude<ReasoningEffort, \"xhigh\">;\n\nfunction getGoogleBudget(model: Model<\"google-generative-ai\">, effort: ClampedReasoningEffort): number {\n\t// See https://ai.google.dev/gemini-api/docs/thinking#set-budget\n\tif (model.id.includes(\"2.5-pro\")) {\n\t\tconst budgets: Record<ClampedReasoningEffort, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 32768,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\tif (model.id.includes(\"2.5-flash\")) {\n\t\t// Covers 2.5-flash-lite as well\n\t\tconst budgets: Record<ClampedReasoningEffort, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 24576,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\t// Unknown model - use dynamic\n\treturn -1;\n}\n"]}
1
+ {"version":3,"file":"stream.js","sourceRoot":"","sources":["../src/stream.ts"],"names":[],"mappings":"AAAA,OAAO,EAAyB,eAAe,EAAE,MAAM,0BAA0B,CAAC;AAClF,OAAO,EAAsB,YAAY,EAAE,MAAM,uBAAuB,CAAC;AACzE,OAAO,EAAiC,uBAAuB,EAAE,MAAM,mCAAmC,CAAC;AAC3G,OAAO,EAA+B,qBAAqB,EAAE,MAAM,iCAAiC,CAAC;AAarG,MAAM,OAAO,GAAwB,IAAI,GAAG,EAAE,CAAC;AAI/C,MAAM,UAAU,SAAS,CAAC,QAAa,EAAE,GAAW,EAAQ;IAC3D,OAAO,CAAC,GAAG,CAAC,QAAQ,EAAE,GAAG,CAAC,CAAC;AAAA,CAC3B;AAID,MAAM,UAAU,SAAS,CAAC,QAAa,EAAsB;IAC5D,4BAA4B;IAC5B,MAAM,GAAG,GAAG,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC;IAClC,IAAI,GAAG;QAAE,OAAO,GAAG,CAAC;IAEpB,qCAAqC;IACrC,MAAM,MAAM,GAA2B;QACtC,MAAM,EAAE,gBAAgB;QACxB,SAAS,EAAE,mBAAmB;QAC9B,MAAM,EAAE,gBAAgB;QACxB,IAAI,EAAE,cAAc;QACpB,QAAQ,EAAE,kBAAkB;QAC5B,GAAG,EAAE,aAAa;QAClB,UAAU,EAAE,oBAAoB;QAChC,GAAG,EAAE,aAAa;QAClB,OAAO,EAAE,iBAAiB;KAC1B,CAAC;IAEF,MAAM,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC;IAChC,OAAO,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CAChD;AAED,MAAM,UAAU,MAAM,CACrB,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACC;IAC9B,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,SAAS,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC5D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IACD,MAAM,eAAe,GAAG,EAAE,GAAG,OAAO,EAAE,MAAM,EAAE,CAAC;IAE/C,MAAM,GAAG,GAAQ,KAAK,CAAC,GAAG,CAAC;IAC3B,QAAQ,GAAG,EAAE,CAAC;QACb,KAAK,oBAAoB;YACxB,OAAO,eAAe,CAAC,KAAoC,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;QAExF,KAAK,oBAAoB;YACxB,OAAO,uBAAuB,CAAC,KAAoC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAEvG,KAAK,kBAAkB;YACtB,OAAO,qBAAqB,CAAC,KAAkC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAEnG,KAAK,sBAAsB;YAC1B,OAAO,YAAY,CAAC,KAAsC,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;QAEvF,SAAS,CAAC;YACT,4DAA4D;YAC5D,MAAM,WAAW,GAAU,GAAG,CAAC;YAC/B,MAAM,IAAI,KAAK,CAAC,kBAAkB,WAAW,EAAE,CAAC,CAAC;QAClD,CAAC;IACF,CAAC;AAAA,CACD;AAED,MAAM,CAAC,KAAK,UAAU,QAAQ,CAC7B,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACD;IAC5B,MAAM,CAAC,GAAG,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;IAC1C,OAAO,CAAC,CAAC,MAAM,EAAE,CAAC;AAAA,CAClB;AAED,MAAM,UAAU,YAAY,CAC3B,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACC;IAC9B,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,SAAS,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC5D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,MAAM,eAAe,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACjE,OAAO,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;AAAA,CAC/C;AAED,MAAM,CAAC,KAAK,UAAU,cAAc,CACnC,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACD;IAC5B,MAAM,CAAC,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;IAChD,OAAO,CAAC,CAAC,MAAM,EAAE,CAAC;AAAA,CAClB;AAED,SAAS,gBAAgB,CACxB,KAAkB,EAClB,OAA6B,EAC7B,MAAe,EACO;IACtB,MAAM,IAAI,GAAG;QACZ,WAAW,EAAE,OAAO,EAAE,WAAW;QACjC,SAAS,EAAE,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC;QACjE,MAAM,EAAE,OAAO,EAAE,MAAM;QACvB,MAAM,EAAE,MAAM,IAAI,OAAO,EAAE,MAAM;KACjC,CAAC;IAEF,oEAAoE;IACpE,MAAM,cAAc,GAAG,CAAC,MAAmC,EAAE,EAAE,CAAC,CAAC,MAAM,KAAK,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;IAEvG,QAAQ,KAAK,CAAC,GAAG,EAAE,CAAC;QACnB,KAAK,oBAAoB,EAAE,CAAC;YAC3B,IAAI,CAAC,OAAO,EAAE,SAAS;gBAAE,OAAO,IAA+B,CAAC;YAEhE,MAAM,gBAAgB,GAAG;gBACxB,OAAO,EAAE,IAAI;gBACb,GAAG,EAAE,IAAI;gBACT,MAAM,EAAE,IAAI;gBACZ,IAAI,EAAE,KAAK;aACX,CAAC;YAEF,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,IAAI;gBACrB,oBAAoB,EAAE,gBAAgB,CAAC,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;aAC/C,CAAC;QAC9B,CAAC;QAED,KAAK,oBAAoB;YACxB,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,OAAO,EAAE,SAAS;aACA,CAAC;QAEtC,KAAK,kBAAkB;YACtB,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,OAAO,EAAE,SAAS;aACF,CAAC;QAEpC,KAAK,sBAAsB,EAAE,CAAC;YAC7B,IAAI,CAAC,OAAO,EAAE,SAAS;gBAAE,OAAO,IAAW,CAAC;YAE5C,MAAM,YAAY,GAAG,eAAe,CACnC,KAAsC,EACtC,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAClC,CAAC;YACF,OAAO;gBACN,GAAG,IAAI;gBACP,QAAQ,EAAE;oBACT,OAAO,EAAE,IAAI;oBACb,YAAY,EAAE,YAAY;iBAC1B;aACuB,CAAC;QAC3B,CAAC;QAED,SAAS,CAAC;YACT,uBAAuB;YACvB,MAAM,WAAW,GAAU,KAAK,CAAC,GAAG,CAAC;YACrC,MAAM,IAAI,KAAK,CAAC,sCAAsC,WAAW,EAAE,CAAC,CAAC;QACtE,CAAC;IACF,CAAC;AAAA,CACD;AAID,SAAS,eAAe,CAAC,KAAoC,EAAE,MAA8B,EAAU;IACtG,gEAAgE;IAChE,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC;QAClC,MAAM,OAAO,GAA2C;YACvD,OAAO,EAAE,GAAG;YACZ,GAAG,EAAE,IAAI;YACT,MAAM,EAAE,IAAI;YACZ,IAAI,EAAE,KAAK;SACX,CAAC;QACF,OAAO,OAAO,CAAC,MAAM,CAAC,CAAC;IACxB,CAAC;IAED,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE,CAAC;QACpC,gCAAgC;QAChC,MAAM,OAAO,GAA2C;YACvD,OAAO,EAAE,GAAG;YACZ,GAAG,EAAE,IAAI;YACT,MAAM,EAAE,IAAI;YACZ,IAAI,EAAE,KAAK;SACX,CAAC;QACF,OAAO,OAAO,CAAC,MAAM,CAAC,CAAC;IACxB,CAAC;IAED,8BAA8B;IAC9B,OAAO,CAAC,CAAC,CAAC;AAAA,CACV","sourcesContent":["import { type AnthropicOptions, streamAnthropic } from \"./providers/anthropic.js\";\nimport { type GoogleOptions, streamGoogle } from \"./providers/google.js\";\nimport { type OpenAICompletionsOptions, streamOpenAICompletions } from \"./providers/openai-completions.js\";\nimport { type OpenAIResponsesOptions, streamOpenAIResponses } from \"./providers/openai-responses.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEventStream,\n\tContext,\n\tKnownProvider,\n\tModel,\n\tOptionsForApi,\n\tReasoningEffort,\n\tSimpleStreamOptions,\n} from \"./types.js\";\n\nconst apiKeys: Map<string, string> = new Map();\n\nexport function setApiKey(provider: KnownProvider, key: string): void;\nexport function setApiKey(provider: string, key: string): void;\nexport function setApiKey(provider: any, key: string): void {\n\tapiKeys.set(provider, key);\n}\n\nexport function getApiKey(provider: KnownProvider): string | undefined;\nexport function getApiKey(provider: string): string | undefined;\nexport function getApiKey(provider: any): string | undefined {\n\t// Check explicit keys first\n\tconst key = apiKeys.get(provider);\n\tif (key) return key;\n\n\t// Fall back to environment variables\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\tanthropic: \"ANTHROPIC_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t\tmistral: \"MISTRAL_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? process.env[envVar] : undefined;\n}\n\nexport function stream<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): AssistantMessageEventStream {\n\tconst apiKey = options?.apiKey || getApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\tconst providerOptions = { ...options, apiKey };\n\n\tconst api: Api = model.api;\n\tswitch (api) {\n\t\tcase \"anthropic-messages\":\n\t\t\treturn streamAnthropic(model as Model<\"anthropic-messages\">, context, providerOptions);\n\n\t\tcase \"openai-completions\":\n\t\t\treturn streamOpenAICompletions(model as Model<\"openai-completions\">, context, providerOptions as any);\n\n\t\tcase \"openai-responses\":\n\t\t\treturn streamOpenAIResponses(model as Model<\"openai-responses\">, context, providerOptions as any);\n\n\t\tcase \"google-generative-ai\":\n\t\t\treturn streamGoogle(model as Model<\"google-generative-ai\">, context, providerOptions);\n\n\t\tdefault: {\n\t\t\t// This should never be reached if all Api cases are handled\n\t\t\tconst _exhaustive: never = api;\n\t\t\tthrow new Error(`Unhandled API: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\nexport async function complete<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): Promise<AssistantMessage> {\n\tconst s = stream(model, context, options);\n\treturn s.result();\n}\n\nexport function streamSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream {\n\tconst apiKey = options?.apiKey || getApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst providerOptions = mapOptionsForApi(model, options, apiKey);\n\treturn stream(model, context, providerOptions);\n}\n\nexport async function completeSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): Promise<AssistantMessage> {\n\tconst s = streamSimple(model, context, options);\n\treturn s.result();\n}\n\nfunction mapOptionsForApi<TApi extends Api>(\n\tmodel: Model<TApi>,\n\toptions?: SimpleStreamOptions,\n\tapiKey?: string,\n): OptionsForApi<TApi> {\n\tconst base = {\n\t\ttemperature: options?.temperature,\n\t\tmaxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),\n\t\tsignal: options?.signal,\n\t\tapiKey: apiKey || options?.apiKey,\n\t};\n\n\t// Helper to clamp xhigh to high for providers that don't support it\n\tconst clampReasoning = (effort: ReasoningEffort | undefined) => (effort === \"xhigh\" ? \"high\" : effort);\n\n\tswitch (model.api) {\n\t\tcase \"anthropic-messages\": {\n\t\t\tif (!options?.reasoning) return base satisfies AnthropicOptions;\n\n\t\t\tconst anthropicBudgets = {\n\t\t\t\tminimal: 1024,\n\t\t\t\tlow: 2048,\n\t\t\t\tmedium: 8192,\n\t\t\t\thigh: 16384,\n\t\t\t};\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinkingEnabled: true,\n\t\t\t\tthinkingBudgetTokens: anthropicBudgets[clampReasoning(options.reasoning)!],\n\t\t\t} satisfies AnthropicOptions;\n\t\t}\n\n\t\tcase \"openai-completions\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: options?.reasoning,\n\t\t\t} satisfies OpenAICompletionsOptions;\n\n\t\tcase \"openai-responses\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: options?.reasoning,\n\t\t\t} satisfies OpenAIResponsesOptions;\n\n\t\tcase \"google-generative-ai\": {\n\t\t\tif (!options?.reasoning) return base as any;\n\n\t\t\tconst googleBudget = getGoogleBudget(\n\t\t\t\tmodel as Model<\"google-generative-ai\">,\n\t\t\t\tclampReasoning(options.reasoning)!,\n\t\t\t);\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: googleBudget,\n\t\t\t\t},\n\t\t\t} satisfies GoogleOptions;\n\t\t}\n\n\t\tdefault: {\n\t\t\t// Exhaustiveness check\n\t\t\tconst _exhaustive: never = model.api;\n\t\t\tthrow new Error(`Unhandled API in mapOptionsForApi: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\ntype ClampedReasoningEffort = Exclude<ReasoningEffort, \"xhigh\">;\n\nfunction getGoogleBudget(model: Model<\"google-generative-ai\">, effort: ClampedReasoningEffort): number {\n\t// See https://ai.google.dev/gemini-api/docs/thinking#set-budget\n\tif (model.id.includes(\"2.5-pro\")) {\n\t\tconst budgets: Record<ClampedReasoningEffort, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 32768,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\tif (model.id.includes(\"2.5-flash\")) {\n\t\t// Covers 2.5-flash-lite as well\n\t\tconst budgets: Record<ClampedReasoningEffort, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 24576,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\t// Unknown model - use dynamic\n\treturn -1;\n}\n"]}
package/dist/types.d.ts CHANGED
@@ -12,7 +12,7 @@ export interface ApiOptionsMap {
12
12
  "google-generative-ai": GoogleOptions;
13
13
  }
14
14
  export type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];
15
- export type KnownProvider = "anthropic" | "google" | "openai" | "xai" | "groq" | "cerebras" | "openrouter" | "zai";
15
+ export type KnownProvider = "anthropic" | "google" | "openai" | "xai" | "groq" | "cerebras" | "openrouter" | "zai" | "mistral";
16
16
  export type Provider = KnownProvider | string;
17
17
  export type ReasoningEffort = "minimal" | "low" | "medium" | "high" | "xhigh";
18
18
  export interface StreamOptions {
@@ -166,6 +166,14 @@ export interface OpenAICompat {
166
166
  supportsReasoningEffort?: boolean;
167
167
  /** Which field to use for max tokens. Default: auto-detected from URL. */
168
168
  maxTokensField?: "max_completion_tokens" | "max_tokens";
169
+ /** Whether tool results require the `name` field. Default: auto-detected from URL. */
170
+ requiresToolResultName?: boolean;
171
+ /** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */
172
+ requiresAssistantAfterToolResult?: boolean;
173
+ /** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */
174
+ requiresThinkingAsText?: boolean;
175
+ /** Whether tool call IDs must be normalized to Mistral format (exactly 9 alphanumeric chars). Default: auto-detected from URL. */
176
+ requiresMistralToolIds?: boolean;
169
177
  }
170
178
  export interface Model<TApi extends Api> {
171
179
  id: string;
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,uBAAuB,CAAC;AAC3D,OAAO,KAAK,EAAE,wBAAwB,EAAE,MAAM,mCAAmC,CAAC;AAClF,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,iCAAiC,CAAC;AAC9E,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,YAAY,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,MAAM,MAAM,GAAG,GAAG,oBAAoB,GAAG,kBAAkB,GAAG,oBAAoB,GAAG,sBAAsB,CAAC;AAE5G,MAAM,WAAW,aAAa;IAC7B,oBAAoB,EAAE,gBAAgB,CAAC;IACvC,oBAAoB,EAAE,wBAAwB,CAAC;IAC/C,kBAAkB,EAAE,sBAAsB,CAAC;IAC3C,sBAAsB,EAAE,aAAa,CAAC;CACtC;AAWD,MAAM,MAAM,aAAa,CAAC,IAAI,SAAS,GAAG,IAAI,aAAa,CAAC,IAAI,CAAC,CAAC;AAElE,MAAM,MAAM,aAAa,GAAG,WAAW,GAAG,QAAQ,GAAG,QAAQ,GAAG,KAAK,GAAG,MAAM,GAAG,UAAU,GAAG,YAAY,GAAG,KAAK,CAAC;AACnH,MAAM,MAAM,QAAQ,GAAG,aAAa,GAAG,MAAM,CAAC;AAE9C,MAAM,MAAM,eAAe,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;AAG9E,MAAM,WAAW,aAAa;IAC7B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,WAAW,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;CAChB;AAGD,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACzD,SAAS,CAAC,EAAE,eAAe,CAAC;CAC5B;AAGD,MAAM,MAAM,cAAc,CAAC,IAAI,SAAS,GAAG,IAAI,CAC9C,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,EAAE,aAAa,CAAC,IAAI,CAAC,KACxB,2BAA2B,CAAC;AAEjC,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,aAAa,CAAC,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,eAAe;IAC/B,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,iBAAiB,CAAC,EAAE,MAAM,CAAC;CAC3B;AAED,MAAM,WAAW,YAAY;IAC5B,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,QAAQ;IACxB,IAAI,EAAE,UAAU,CAAC;IACjB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC/B,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC1B;AAED,MAAM,WAAW,KAAK;IACrB,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC;CACF;AAED,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,GAAG,OAAO,GAAG,SAAS,CAAC;AAE7E,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACjD,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,gBAAgB;IAChC,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,CAAC,WAAW,GAAG,eAAe,GAAG,QAAQ,CAAC,EAAE,CAAC;IACtD,GAAG,EAAE,GAAG,CAAC;IACT,QAAQ,EAAE,QAAQ,CAAC;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,KAAK,CAAC;IACb,UAAU,EAAE,UAAU,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,iBAAiB,CAAC,QAAQ,GAAG,GAAG;IAChD,IAAI,EAAE,YAAY,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACxC,OAAO,CAAC,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,OAAO,GAAG,WAAW,GAAG,gBAAgB,GAAG,iBAAiB,CAAC;AAEzE,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAEjD,MAAM,WAAW,IAAI,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO;IAC1D,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,WAAW,CAAC;CACxB;AAED,MAAM,WAAW,OAAO;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;CACf;AAED,MAAM,MAAM,qBAAqB,GAC9B;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC5C;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvE;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,QAAQ,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC7F;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,MAAM,GAAG,QAAQ,GAAG,SAAS,CAAC,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvG;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,SAAS,GAAG,OAAO,CAAC,CAAC;IAAC,KAAK,EAAE,gBAAgB,CAAA;CAAE,CAAC;AAEhG;;;GAGG;AACH,MAAM,WAAW,YAAY;IAC5B,wFAAwF;IACxF,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,yGAAyG;IACzG,qBAAqB,CAAC,EAAE,OAAO,CAAC;IAChC,yFAAyF;IACzF,uBAAuB,CAAC,EAAE,OAAO,CAAC;IAClC,0EAA0E;IAC1E,cAAc,CAAC,EAAE,uBAAuB,GAAG,YAAY,CAAC;CACxD;AAGD,MAAM,WAAW,KAAK,CAAC,IAAI,SAAS,GAAG;IACtC,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,GAAG,EAAE,IAAI,CAAC;IACV,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,EAAE,OAAO,CAAC;IACnB,KAAK,EAAE,CAAC,MAAM,GAAG,OAAO,CAAC,EAAE,CAAC;IAC5B,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,kGAAkG;IAClG,MAAM,CAAC,EAAE,IAAI,SAAS,oBAAoB,GAAG,YAAY,GAAG,KAAK,CAAC;CAClE","sourcesContent":["import type { AnthropicOptions } from \"./providers/anthropic.js\";\nimport type { GoogleOptions } from \"./providers/google.js\";\nimport type { OpenAICompletionsOptions } from \"./providers/openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./providers/openai-responses.js\";\nimport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type Api = \"openai-completions\" | \"openai-responses\" | \"anthropic-messages\" | \"google-generative-ai\";\n\nexport interface ApiOptionsMap {\n\t\"anthropic-messages\": AnthropicOptions;\n\t\"openai-completions\": OpenAICompletionsOptions;\n\t\"openai-responses\": OpenAIResponsesOptions;\n\t\"google-generative-ai\": GoogleOptions;\n}\n\n// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys\ntype _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>\n\t? Record<Api, StreamOptions> extends ApiOptionsMap\n\t\t? true\n\t\t: [\"ApiOptionsMap is missing some KnownApi values\", Exclude<Api, keyof ApiOptionsMap>]\n\t: [\"ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>\"];\nconst _exhaustive: _CheckExhaustive = true;\n\n// Helper type to get options for a specific API\nexport type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];\n\nexport type KnownProvider = \"anthropic\" | \"google\" | \"openai\" | \"xai\" | \"groq\" | \"cerebras\" | \"openrouter\" | \"zai\";\nexport type Provider = KnownProvider | string;\n\nexport type ReasoningEffort = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n// Base options all providers share\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n}\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ReasoningEffort;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions: OptionsForApi<TApi>,\n) => AssistantMessageEventStream;\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, the message ID\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for openai-completions API.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\" ? OpenAICompat : never;\n}\n"]}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,uBAAuB,CAAC;AAC3D,OAAO,KAAK,EAAE,wBAAwB,EAAE,MAAM,mCAAmC,CAAC;AAClF,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,iCAAiC,CAAC;AAC9E,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,YAAY,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,MAAM,MAAM,GAAG,GAAG,oBAAoB,GAAG,kBAAkB,GAAG,oBAAoB,GAAG,sBAAsB,CAAC;AAE5G,MAAM,WAAW,aAAa;IAC7B,oBAAoB,EAAE,gBAAgB,CAAC;IACvC,oBAAoB,EAAE,wBAAwB,CAAC;IAC/C,kBAAkB,EAAE,sBAAsB,CAAC;IAC3C,sBAAsB,EAAE,aAAa,CAAC;CACtC;AAWD,MAAM,MAAM,aAAa,CAAC,IAAI,SAAS,GAAG,IAAI,aAAa,CAAC,IAAI,CAAC,CAAC;AAElE,MAAM,MAAM,aAAa,GACtB,WAAW,GACX,QAAQ,GACR,QAAQ,GACR,KAAK,GACL,MAAM,GACN,UAAU,GACV,YAAY,GACZ,KAAK,GACL,SAAS,CAAC;AACb,MAAM,MAAM,QAAQ,GAAG,aAAa,GAAG,MAAM,CAAC;AAE9C,MAAM,MAAM,eAAe,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;AAG9E,MAAM,WAAW,aAAa;IAC7B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,WAAW,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;CAChB;AAGD,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACzD,SAAS,CAAC,EAAE,eAAe,CAAC;CAC5B;AAGD,MAAM,MAAM,cAAc,CAAC,IAAI,SAAS,GAAG,IAAI,CAC9C,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,EAAE,aAAa,CAAC,IAAI,CAAC,KACxB,2BAA2B,CAAC;AAEjC,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,aAAa,CAAC,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,eAAe;IAC/B,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,iBAAiB,CAAC,EAAE,MAAM,CAAC;CAC3B;AAED,MAAM,WAAW,YAAY;IAC5B,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,QAAQ;IACxB,IAAI,EAAE,UAAU,CAAC;IACjB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC/B,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC1B;AAED,MAAM,WAAW,KAAK;IACrB,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC;CACF;AAED,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,GAAG,OAAO,GAAG,SAAS,CAAC;AAE7E,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACjD,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,gBAAgB;IAChC,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,CAAC,WAAW,GAAG,eAAe,GAAG,QAAQ,CAAC,EAAE,CAAC;IACtD,GAAG,EAAE,GAAG,CAAC;IACT,QAAQ,EAAE,QAAQ,CAAC;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,KAAK,CAAC;IACb,UAAU,EAAE,UAAU,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,iBAAiB,CAAC,QAAQ,GAAG,GAAG;IAChD,IAAI,EAAE,YAAY,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACxC,OAAO,CAAC,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,OAAO,GAAG,WAAW,GAAG,gBAAgB,GAAG,iBAAiB,CAAC;AAEzE,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAEjD,MAAM,WAAW,IAAI,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO;IAC1D,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,WAAW,CAAC;CACxB;AAED,MAAM,WAAW,OAAO;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;CACf;AAED,MAAM,MAAM,qBAAqB,GAC9B;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC5C;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvE;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,QAAQ,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC7F;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,MAAM,GAAG,QAAQ,GAAG,SAAS,CAAC,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvG;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,SAAS,GAAG,OAAO,CAAC,CAAC;IAAC,KAAK,EAAE,gBAAgB,CAAA;CAAE,CAAC;AAEhG;;;GAGG;AACH,MAAM,WAAW,YAAY;IAC5B,wFAAwF;IACxF,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,yGAAyG;IACzG,qBAAqB,CAAC,EAAE,OAAO,CAAC;IAChC,yFAAyF;IACzF,uBAAuB,CAAC,EAAE,OAAO,CAAC;IAClC,0EAA0E;IAC1E,cAAc,CAAC,EAAE,uBAAuB,GAAG,YAAY,CAAC;IACxD,sFAAsF;IACtF,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,2HAA2H;IAC3H,gCAAgC,CAAC,EAAE,OAAO,CAAC;IAC3C,4HAA4H;IAC5H,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,kIAAkI;IAClI,sBAAsB,CAAC,EAAE,OAAO,CAAC;CACjC;AAGD,MAAM,WAAW,KAAK,CAAC,IAAI,SAAS,GAAG;IACtC,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,GAAG,EAAE,IAAI,CAAC;IACV,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,EAAE,OAAO,CAAC;IACnB,KAAK,EAAE,CAAC,MAAM,GAAG,OAAO,CAAC,EAAE,CAAC;IAC5B,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,kGAAkG;IAClG,MAAM,CAAC,EAAE,IAAI,SAAS,oBAAoB,GAAG,YAAY,GAAG,KAAK,CAAC;CAClE","sourcesContent":["import type { AnthropicOptions } from \"./providers/anthropic.js\";\nimport type { GoogleOptions } from \"./providers/google.js\";\nimport type { OpenAICompletionsOptions } from \"./providers/openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./providers/openai-responses.js\";\nimport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type Api = \"openai-completions\" | \"openai-responses\" | \"anthropic-messages\" | \"google-generative-ai\";\n\nexport interface ApiOptionsMap {\n\t\"anthropic-messages\": AnthropicOptions;\n\t\"openai-completions\": OpenAICompletionsOptions;\n\t\"openai-responses\": OpenAIResponsesOptions;\n\t\"google-generative-ai\": GoogleOptions;\n}\n\n// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys\ntype _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>\n\t? Record<Api, StreamOptions> extends ApiOptionsMap\n\t\t? true\n\t\t: [\"ApiOptionsMap is missing some KnownApi values\", Exclude<Api, keyof ApiOptionsMap>]\n\t: [\"ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>\"];\nconst _exhaustive: _CheckExhaustive = true;\n\n// Helper type to get options for a specific API\nexport type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];\n\nexport type KnownProvider =\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"openai\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"zai\"\n\t| \"mistral\";\nexport type Provider = KnownProvider | string;\n\nexport type ReasoningEffort = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n// Base options all providers share\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n}\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ReasoningEffort;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions: OptionsForApi<TApi>,\n) => AssistantMessageEventStream;\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, the message ID\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for openai-completions API.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether tool call IDs must be normalized to Mistral format (exactly 9 alphanumeric chars). Default: auto-detected from URL. */\n\trequiresMistralToolIds?: boolean;\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\" ? OpenAICompat : never;\n}\n"]}
package/dist/types.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAuBA,MAAM,WAAW,GAAqB,IAAI,CAAC","sourcesContent":["import type { AnthropicOptions } from \"./providers/anthropic.js\";\nimport type { GoogleOptions } from \"./providers/google.js\";\nimport type { OpenAICompletionsOptions } from \"./providers/openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./providers/openai-responses.js\";\nimport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type Api = \"openai-completions\" | \"openai-responses\" | \"anthropic-messages\" | \"google-generative-ai\";\n\nexport interface ApiOptionsMap {\n\t\"anthropic-messages\": AnthropicOptions;\n\t\"openai-completions\": OpenAICompletionsOptions;\n\t\"openai-responses\": OpenAIResponsesOptions;\n\t\"google-generative-ai\": GoogleOptions;\n}\n\n// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys\ntype _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>\n\t? Record<Api, StreamOptions> extends ApiOptionsMap\n\t\t? true\n\t\t: [\"ApiOptionsMap is missing some KnownApi values\", Exclude<Api, keyof ApiOptionsMap>]\n\t: [\"ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>\"];\nconst _exhaustive: _CheckExhaustive = true;\n\n// Helper type to get options for a specific API\nexport type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];\n\nexport type KnownProvider = \"anthropic\" | \"google\" | \"openai\" | \"xai\" | \"groq\" | \"cerebras\" | \"openrouter\" | \"zai\";\nexport type Provider = KnownProvider | string;\n\nexport type ReasoningEffort = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n// Base options all providers share\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n}\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ReasoningEffort;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions: OptionsForApi<TApi>,\n) => AssistantMessageEventStream;\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, the message ID\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for openai-completions API.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\" ? OpenAICompat : never;\n}\n"]}
1
+ {"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAuBA,MAAM,WAAW,GAAqB,IAAI,CAAC","sourcesContent":["import type { AnthropicOptions } from \"./providers/anthropic.js\";\nimport type { GoogleOptions } from \"./providers/google.js\";\nimport type { OpenAICompletionsOptions } from \"./providers/openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./providers/openai-responses.js\";\nimport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type Api = \"openai-completions\" | \"openai-responses\" | \"anthropic-messages\" | \"google-generative-ai\";\n\nexport interface ApiOptionsMap {\n\t\"anthropic-messages\": AnthropicOptions;\n\t\"openai-completions\": OpenAICompletionsOptions;\n\t\"openai-responses\": OpenAIResponsesOptions;\n\t\"google-generative-ai\": GoogleOptions;\n}\n\n// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys\ntype _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>\n\t? Record<Api, StreamOptions> extends ApiOptionsMap\n\t\t? true\n\t\t: [\"ApiOptionsMap is missing some KnownApi values\", Exclude<Api, keyof ApiOptionsMap>]\n\t: [\"ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>\"];\nconst _exhaustive: _CheckExhaustive = true;\n\n// Helper type to get options for a specific API\nexport type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];\n\nexport type KnownProvider =\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"openai\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"zai\"\n\t| \"mistral\";\nexport type Provider = KnownProvider | string;\n\nexport type ReasoningEffort = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n// Base options all providers share\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n}\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ReasoningEffort;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions: OptionsForApi<TApi>,\n) => AssistantMessageEventStream;\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, the message ID\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for openai-completions API.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether tool call IDs must be normalized to Mistral format (exactly 9 alphanumeric chars). Default: auto-detected from URL. */\n\trequiresMistralToolIds?: boolean;\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\" ? OpenAICompat : never;\n}\n"]}
@@ -17,6 +17,7 @@ import type { AssistantMessage } from "../types.js";
17
17
  * - xAI (Grok): "maximum prompt length is X but request contains Y"
18
18
  * - Groq: "reduce the length of the messages"
19
19
  * - Cerebras: 400/413 status code (no body)
20
+ * - Mistral: 400/413 status code (no body)
20
21
  * - OpenRouter (all backends): "maximum context length is X tokens"
21
22
  * - llama.cpp: "exceeds the available context size"
22
23
  * - LM Studio: "greater than the context length"
@@ -1 +1 @@
1
- {"version":3,"file":"overflow.d.ts","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,aAAa,CAAC;AAoCpD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA0CG;AACH,wBAAgB,iBAAiB,CAAC,OAAO,EAAE,gBAAgB,EAAE,aAAa,CAAC,EAAE,MAAM,GAAG,OAAO,CAuB5F;AAED;;GAEG;AACH,wBAAgB,mBAAmB,IAAI,MAAM,EAAE,CAE9C","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - Cerebras: Returns \"400 status code (no body)\" - handled separately below\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Silently truncates input - not detectable via error message\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context length exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413 status code (no body)\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: Silently truncates input without error. Cannot be detected via this function.\n * The response will have usage.input < expected, but we don't know the expected value.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras returns 400/413 with no body - check for status code pattern\n\t\tif (/^4(00|13)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
1
+ {"version":3,"file":"overflow.d.ts","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,aAAa,CAAC;AAqCpD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2CG;AACH,wBAAgB,iBAAiB,CAAC,OAAO,EAAE,gBAAgB,EAAE,aAAa,CAAC,EAAE,MAAM,GAAG,OAAO,CAuB5F;AAED;;GAEG;AACH,wBAAgB,mBAAmB,IAAI,MAAM,EAAE,CAE9C","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - Cerebras: Returns \"400 status code (no body)\" - handled separately below\n * - Mistral: Returns \"400 status code (no body)\" - handled separately below\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Silently truncates input - not detectable via error message\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context length exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413 status code (no body)\n * - Mistral: 400/413 status code (no body)\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: Silently truncates input without error. Cannot be detected via this function.\n * The response will have usage.input < expected, but we don't know the expected value.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras and Mistral return 400/413 with no body - check for status code pattern\n\t\tif (/^4(00|13)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
@@ -15,6 +15,7 @@
15
15
  * - llama.cpp: "the request exceeds the available context size, try increasing it"
16
16
  * - LM Studio: "tokens to keep from the initial prompt is greater than the context length"
17
17
  * - Cerebras: Returns "400 status code (no body)" - handled separately below
18
+ * - Mistral: Returns "400 status code (no body)" - handled separately below
18
19
  * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow
19
20
  * - Ollama: Silently truncates input - not detectable via error message
20
21
  */
@@ -49,6 +50,7 @@ const OVERFLOW_PATTERNS = [
49
50
  * - xAI (Grok): "maximum prompt length is X but request contains Y"
50
51
  * - Groq: "reduce the length of the messages"
51
52
  * - Cerebras: 400/413 status code (no body)
53
+ * - Mistral: 400/413 status code (no body)
52
54
  * - OpenRouter (all backends): "maximum context length is X tokens"
53
55
  * - llama.cpp: "exceeds the available context size"
54
56
  * - LM Studio: "greater than the context length"
@@ -81,7 +83,7 @@ export function isContextOverflow(message, contextWindow) {
81
83
  if (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage))) {
82
84
  return true;
83
85
  }
84
- // Cerebras returns 400/413 with no body - check for status code pattern
86
+ // Cerebras and Mistral return 400/413 with no body - check for status code pattern
85
87
  if (/^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message.errorMessage)) {
86
88
  return true;
87
89
  }
@@ -1 +1 @@
1
- {"version":3,"file":"overflow.js","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAEA;;;;;;;;;;;;;;;;;;;GAmBG;AACH,MAAM,iBAAiB,GAAG;IACzB,qBAAqB,EAAE,YAAY;IACnC,6BAA6B,EAAE,uCAAuC;IACtE,yCAAyC,EAAE,kBAAkB;IAC7D,+BAA+B,EAAE,aAAa;IAC9C,oCAAoC,EAAE,OAAO;IAC7C,uCAAuC,EAAE,4BAA4B;IACrE,qCAAqC,EAAE,mBAAmB;IAC1D,kCAAkC,EAAE,YAAY;IAChD,0BAA0B,EAAE,mBAAmB;IAC/C,kBAAkB,EAAE,mBAAmB;IACvC,uBAAuB,EAAE,mBAAmB;CAC5C,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA0CG;AACH,MAAM,UAAU,iBAAiB,CAAC,OAAyB,EAAE,aAAsB,EAAW;IAC7F,uCAAuC;IACvC,IAAI,OAAO,CAAC,UAAU,KAAK,OAAO,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC5D,uBAAuB;QACvB,IAAI,iBAAiB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,YAAa,CAAC,CAAC,EAAE,CAAC;YAClE,OAAO,IAAI,CAAC;QACb,CAAC;QAED,wEAAwE;QACxE,IAAI,2CAA2C,CAAC,IAAI,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC;YAC5E,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,8EAA8E;IAC9E,IAAI,aAAa,IAAI,OAAO,CAAC,UAAU,KAAK,MAAM,EAAE,CAAC;QACpD,MAAM,WAAW,GAAG,OAAO,CAAC,KAAK,CAAC,KAAK,GAAG,OAAO,CAAC,KAAK,CAAC,SAAS,CAAC;QAClE,IAAI,WAAW,GAAG,aAAa,EAAE,CAAC;YACjC,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,OAAO,KAAK,CAAC;AAAA,CACb;AAED;;GAEG;AACH,MAAM,UAAU,mBAAmB,GAAa;IAC/C,OAAO,CAAC,GAAG,iBAAiB,CAAC,CAAC;AAAA,CAC9B","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - Cerebras: Returns \"400 status code (no body)\" - handled separately below\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Silently truncates input - not detectable via error message\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context length exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413 status code (no body)\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: Silently truncates input without error. Cannot be detected via this function.\n * The response will have usage.input < expected, but we don't know the expected value.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras returns 400/413 with no body - check for status code pattern\n\t\tif (/^4(00|13)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
1
+ {"version":3,"file":"overflow.js","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAEA;;;;;;;;;;;;;;;;;;;;GAoBG;AACH,MAAM,iBAAiB,GAAG;IACzB,qBAAqB,EAAE,YAAY;IACnC,6BAA6B,EAAE,uCAAuC;IACtE,yCAAyC,EAAE,kBAAkB;IAC7D,+BAA+B,EAAE,aAAa;IAC9C,oCAAoC,EAAE,OAAO;IAC7C,uCAAuC,EAAE,4BAA4B;IACrE,qCAAqC,EAAE,mBAAmB;IAC1D,kCAAkC,EAAE,YAAY;IAChD,0BAA0B,EAAE,mBAAmB;IAC/C,kBAAkB,EAAE,mBAAmB;IACvC,uBAAuB,EAAE,mBAAmB;CAC5C,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2CG;AACH,MAAM,UAAU,iBAAiB,CAAC,OAAyB,EAAE,aAAsB,EAAW;IAC7F,uCAAuC;IACvC,IAAI,OAAO,CAAC,UAAU,KAAK,OAAO,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC5D,uBAAuB;QACvB,IAAI,iBAAiB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,YAAa,CAAC,CAAC,EAAE,CAAC;YAClE,OAAO,IAAI,CAAC;QACb,CAAC;QAED,mFAAmF;QACnF,IAAI,2CAA2C,CAAC,IAAI,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC;YAC5E,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,8EAA8E;IAC9E,IAAI,aAAa,IAAI,OAAO,CAAC,UAAU,KAAK,MAAM,EAAE,CAAC;QACpD,MAAM,WAAW,GAAG,OAAO,CAAC,KAAK,CAAC,KAAK,GAAG,OAAO,CAAC,KAAK,CAAC,SAAS,CAAC;QAClE,IAAI,WAAW,GAAG,aAAa,EAAE,CAAC;YACjC,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,OAAO,KAAK,CAAC;AAAA,CACb;AAED;;GAEG;AACH,MAAM,UAAU,mBAAmB,GAAa;IAC/C,OAAO,CAAC,GAAG,iBAAiB,CAAC,CAAC;AAAA,CAC9B","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - Cerebras: Returns \"400 status code (no body)\" - handled separately below\n * - Mistral: Returns \"400 status code (no body)\" - handled separately below\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Silently truncates input - not detectable via error message\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context length exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413 status code (no body)\n * - Mistral: 400/413 status code (no body)\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: Silently truncates input without error. Cannot be detected via this function.\n * The response will have usage.input < expected, but we don't know the expected value.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras and Mistral return 400/413 with no body - check for status code pattern\n\t\tif (/^4(00|13)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mariozechner/pi-ai",
3
- "version": "0.18.0",
3
+ "version": "0.18.2",
4
4
  "description": "Unified LLM API with automatic model discovery and provider configuration",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -22,6 +22,7 @@
22
22
  "dependencies": {
23
23
  "@anthropic-ai/sdk": "0.71.2",
24
24
  "@google/genai": "1.31.0",
25
+ "@mistralai/mistralai": "1.10.0",
25
26
  "@sinclair/typebox": "^0.34.41",
26
27
  "ajv": "^8.17.1",
27
28
  "ajv-formats": "^3.0.1",