@mariozechner/pi-ai 0.37.7 → 0.38.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/models.generated.d.ts +17 -136
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +22 -141
- package/dist/models.generated.js.map +1 -1
- package/dist/providers/openai-codex/prompts/codex.d.ts +2 -2
- package/dist/providers/openai-codex/prompts/codex.d.ts.map +1 -1
- package/dist/providers/openai-codex/prompts/codex.js +7 -7
- package/dist/providers/openai-codex/prompts/codex.js.map +1 -1
- package/dist/providers/openai-codex/request-transformer.d.ts +0 -1
- package/dist/providers/openai-codex/request-transformer.d.ts.map +1 -1
- package/dist/providers/openai-codex/request-transformer.js +12 -130
- package/dist/providers/openai-codex/request-transformer.js.map +1 -1
- package/dist/providers/openai-codex-responses.d.ts.map +1 -1
- package/dist/providers/openai-codex-responses.js +65 -8
- package/dist/providers/openai-codex-responses.js.map +1 -1
- package/dist/stream.d.ts.map +1 -1
- package/dist/stream.js +13 -6
- package/dist/stream.js.map +1 -1
- package/dist/types.d.ts +9 -0
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/overflow.d.ts.map +1 -1
- package/dist/utils/overflow.js +1 -1
- package/dist/utils/overflow.js.map +1 -1
- package/package.json +1 -1
package/dist/stream.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"stream.js","sourceRoot":"","sources":["../src/stream.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,MAAM,SAAS,CAAC;AACrC,OAAO,EAAE,OAAO,EAAE,MAAM,SAAS,CAAC;AAClC,OAAO,EAAE,IAAI,EAAE,MAAM,WAAW,CAAC;AACjC,OAAO,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAC5C,OAAO,EAAyB,eAAe,EAAE,MAAM,0BAA0B,CAAC;AAClF,OAAO,EAAsB,YAAY,EAAE,MAAM,uBAAuB,CAAC;AACzE,OAAO,EAGN,qBAAqB,GACrB,MAAM,kCAAkC,CAAC;AAC1C,OAAO,EAA4B,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAC5F,OAAO,EAAoC,0BAA0B,EAAE,MAAM,uCAAuC,CAAC;AACrH,OAAO,EAAiC,uBAAuB,EAAE,MAAM,mCAAmC,CAAC;AAC3G,OAAO,EAA+B,qBAAqB,EAAE,MAAM,iCAAiC,CAAC;AAarG,MAAM,2BAA2B,GAAG,IAAI,CAAC,OAAO,EAAE,EAAE,SAAS,EAAE,QAAQ,EAAE,sCAAsC,CAAC,CAAC;AAEjH,IAAI,gCAAgC,GAAmB,IAAI,CAAC;AAE5D,SAAS,uBAAuB,GAAY;IAC3C,IAAI,gCAAgC,KAAK,IAAI,EAAE,CAAC;QAC/C,gCAAgC,GAAG,UAAU,CAAC,2BAA2B,CAAC,CAAC;IAC5E,CAAC;IACD,OAAO,gCAAgC,CAAC;AAAA,CACxC;AASD,MAAM,UAAU,YAAY,CAAC,QAAa,EAAsB;IAC/D,qCAAqC;IACrC,IAAI,QAAQ,KAAK,gBAAgB,EAAE,CAAC;QACnC,OAAO,OAAO,CAAC,GAAG,CAAC,oBAAoB,IAAI,OAAO,CAAC,GAAG,CAAC,QAAQ,IAAI,OAAO,CAAC,GAAG,CAAC,YAAY,CAAC;IAC7F,CAAC;IAED,gEAAgE;IAChE,IAAI,QAAQ,KAAK,WAAW,EAAE,CAAC;QAC9B,OAAO,OAAO,CAAC,GAAG,CAAC,qBAAqB,IAAI,OAAO,CAAC,GAAG,CAAC,iBAAiB,CAAC;IAC3E,CAAC;IAED,gEAAgE;IAChE,kEAAkE;IAClE,IAAI,QAAQ,KAAK,eAAe,EAAE,CAAC;QAClC,MAAM,cAAc,GAAG,uBAAuB,EAAE,CAAC;QACjD,MAAM,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,oBAAoB,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC;QACtF,MAAM,WAAW,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,qBAAqB,CAAC;QAExD,IAAI,cAAc,IAAI,UAAU,IAAI,WAAW,EAAE,CAAC;YACjD,OAAO,iBAAiB,CAAC;QAC1B,CAAC;IACF,CAAC;IAED,MAAM,MAAM,GAA2B;QACtC,MAAM,EAAE,gBAAgB;QACxB,MAAM,EAAE,gBAAgB;QACxB,IAAI,EAAE,cAAc;QACpB,QAAQ,EAAE,kBAAkB;QAC5B,GAAG,EAAE,aAAa;QAClB,UAAU,EAAE,oBAAoB;QAChC,GAAG,EAAE,aAAa;QAClB,OAAO,EAAE,iBAAiB;KAC1B,CAAC;IAEF,MAAM,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC;IAChC,OAAO,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CAChD;AAED,MAAM,UAAU,MAAM,CACrB,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACC;IAC9B,+DAA+D;IAC/D,IAAI,KAAK,CAAC,GAAG,KAAK,eAAe,EAAE,CAAC;QACnC,OAAO,kBAAkB,CAAC,KAA+B,EAAE,OAAO,EAAE,OAA8B,CAAC,CAAC;IACrG,CAAC;IAED,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IACD,MAAM,eAAe,GAAG,EAAE,GAAG,OAAO,EAAE,MAAM,EAAE,CAAC;IAE/C,MAAM,GAAG,GAAQ,KAAK,CAAC,GAAG,CAAC;IAC3B,QAAQ,GAAG,EAAE,CAAC;QACb,KAAK,oBAAoB;YACxB,OAAO,eAAe,CAAC,KAAoC,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;QAExF,KAAK,oBAAoB;YACxB,OAAO,uBAAuB,CAAC,KAAoC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAEvG,KAAK,kBAAkB;YACtB,OAAO,qBAAqB,CAAC,KAAkC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAEnG,KAAK,wBAAwB;YAC5B,OAAO,0BAA0B,CAAC,KAAwC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAE9G,KAAK,sBAAsB;YAC1B,OAAO,YAAY,CAAC,KAAsC,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;QAEvF,KAAK,mBAAmB;YACvB,OAAO,qBAAqB,CAC3B,KAAmC,EACnC,OAAO,EACP,eAAyC,CACzC,CAAC;QAEH,SAAS,CAAC;YACT,4DAA4D;YAC5D,MAAM,WAAW,GAAU,GAAG,CAAC;YAC/B,MAAM,IAAI,KAAK,CAAC,kBAAkB,WAAW,EAAE,CAAC,CAAC;QAClD,CAAC;IACF,CAAC;AAAA,CACD;AAED,MAAM,CAAC,KAAK,UAAU,QAAQ,CAC7B,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACD;IAC5B,MAAM,CAAC,GAAG,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;IAC1C,OAAO,CAAC,CAAC,MAAM,EAAE,CAAC;AAAA,CAClB;AAED,MAAM,UAAU,YAAY,CAC3B,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACC;IAC9B,+DAA+D;IAC/D,IAAI,KAAK,CAAC,GAAG,KAAK,eAAe,EAAE,CAAC;QACnC,MAAM,eAAe,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;QACpE,OAAO,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;IAChD,CAAC;IAED,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,MAAM,eAAe,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACjE,OAAO,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;AAAA,CAC/C;AAED,MAAM,CAAC,KAAK,UAAU,cAAc,CACnC,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACD;IAC5B,MAAM,CAAC,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;IAChD,OAAO,CAAC,CAAC,MAAM,EAAE,CAAC;AAAA,CAClB;AAED,SAAS,gBAAgB,CACxB,KAAkB,EAClB,OAA6B,EAC7B,MAAe,EACO;IACtB,MAAM,IAAI,GAAG;QACZ,WAAW,EAAE,OAAO,EAAE,WAAW;QACjC,SAAS,EAAE,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC;QACjE,MAAM,EAAE,OAAO,EAAE,MAAM;QACvB,MAAM,EAAE,MAAM,IAAI,OAAO,EAAE,MAAM;QACjC,SAAS,EAAE,OAAO,EAAE,SAAS;KAC7B,CAAC;IAEF,oEAAoE;IACpE,MAAM,cAAc,GAAG,CAAC,MAAiC,EAAE,EAAE,CAAC,CAAC,MAAM,KAAK,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;IAErG,QAAQ,KAAK,CAAC,GAAG,EAAE,CAAC;QACnB,KAAK,oBAAoB,EAAE,CAAC;YAC3B,8DAA8D;YAC9D,IAAI,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC;gBACzB,OAAO,EAAE,GAAG,IAAI,EAAE,eAAe,EAAE,KAAK,EAA6B,CAAC;YACvE,CAAC;YAED,sDAAsD;YACtD,uEAAuE;YACvE,MAAM,gBAAgB,GAAG;gBACxB,OAAO,EAAE,IAAI;gBACb,GAAG,EAAE,IAAI;gBACT,MAAM,EAAE,IAAI;gBACZ,IAAI,EAAE,KAAK;aACX,CAAC;YAEF,MAAM,eAAe,GAAG,IAAI,CAAC;YAC7B,IAAI,cAAc,GAAG,gBAAgB,CAAC,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC,CAAC;YAC1E,8FAA8F;YAC9F,MAAM,SAAS,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,SAAS,IAAI,CAAC,CAAC,GAAG,cAAc,EAAE,KAAK,CAAC,SAAS,CAAC,CAAC;YAEpF,mEAAmE;YACnE,IAAI,SAAS,IAAI,cAAc,EAAE,CAAC;gBACjC,cAAc,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,SAAS,GAAG,eAAe,CAAC,CAAC;YAC3D,CAAC;YAED,OAAO;gBACN,GAAG,IAAI;gBACP,SAAS;gBACT,eAAe,EAAE,IAAI;gBACrB,oBAAoB,EAAE,cAAc;aACT,CAAC;QAC9B,CAAC;QAED,KAAK,oBAAoB;YACxB,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC;aAC5D,CAAC;QAEtC,KAAK,kBAAkB;YACtB,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC;aAC9D,CAAC;QAEpC,KAAK,wBAAwB;YAC5B,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC;aACzD,CAAC;QAEzC,KAAK,sBAAsB,EAAE,CAAC;YAC7B,8DAA8D;YAC9D,0EAA0E;YAC1E,IAAI,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC;gBACzB,OAAO,EAAE,GAAG,IAAI,EAAE,QAAQ,EAAE,EAAE,OAAO,EAAE,KAAK,EAAE,EAA0B,CAAC;YAC1E,CAAC;YAED,MAAM,WAAW,GAAG,KAAsC,CAAC;YAC3D,MAAM,MAAM,GAAG,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;YAElD,2EAA2E;YAC3E,4DAA4D;YAC5D,IAAI,iBAAiB,CAAC,WAAW,CAAC,IAAI,mBAAmB,CAAC,WAAW,CAAC,EAAE,CAAC;gBACxE,OAAO;oBACN,GAAG,IAAI;oBACP,QAAQ,EAAE;wBACT,OAAO,EAAE,IAAI;wBACb,KAAK,EAAE,uBAAuB,CAAC,MAAM,EAAE,WAAW,CAAC;qBACnD;iBACuB,CAAC;YAC3B,CAAC;YAED,OAAO;gBACN,GAAG,IAAI;gBACP,QAAQ,EAAE;oBACT,OAAO,EAAE,IAAI;oBACb,YAAY,EAAE,eAAe,CAAC,WAAW,EAAE,MAAM,CAAC;iBAClD;aACuB,CAAC;QAC3B,CAAC;QAED,KAAK,mBAAmB,EAAE,CAAC;YAC1B,IAAI,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC;gBACzB,OAAO,EAAE,GAAG,IAAI,EAAE,QAAQ,EAAE,EAAE,OAAO,EAAE,KAAK,EAAE,EAAmC,CAAC;YACnF,CAAC;YAED,MAAM,MAAM,GAAG,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;YAElD,8DAA8D;YAC9D,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC;gBAChE,OAAO;oBACN,GAAG,IAAI;oBACP,QAAQ,EAAE;wBACT,OAAO,EAAE,IAAI;wBACb,KAAK,EAAE,yBAAyB,CAAC,MAAM,EAAE,KAAK,CAAC,EAAE,CAAC;qBAClD;iBACgC,CAAC;YACpC,CAAC;YAED,mEAAmE;YACnE,sDAAsD;YACtD,uEAAuE;YACvE,MAAM,OAAO,GAAyC;gBACrD,OAAO,EAAE,IAAI;gBACb,GAAG,EAAE,IAAI;gBACT,MAAM,EAAE,IAAI;gBACZ,IAAI,EAAE,KAAK;aACX,CAAC;YAEF,MAAM,eAAe,GAAG,IAAI,CAAC;YAC7B,IAAI,cAAc,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC;YACrC,8FAA8F;YAC9F,MAAM,SAAS,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,SAAS,IAAI,CAAC,CAAC,GAAG,cAAc,EAAE,KAAK,CAAC,SAAS,CAAC,CAAC;YAEpF,mEAAmE;YACnE,IAAI,SAAS,IAAI,cAAc,EAAE,CAAC;gBACjC,cAAc,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,SAAS,GAAG,eAAe,CAAC,CAAC;YAC3D,CAAC;YAED,OAAO;gBACN,GAAG,IAAI;gBACP,SAAS;gBACT,QAAQ,EAAE;oBACT,OAAO,EAAE,IAAI;oBACb,YAAY,EAAE,cAAc;iBAC5B;aACgC,CAAC;QACpC,CAAC;QAED,KAAK,eAAe,EAAE,CAAC;YACtB,8DAA8D;YAC9D,IAAI,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC;gBACzB,OAAO,EAAE,GAAG,IAAI,EAAE,QAAQ,EAAE,EAAE,OAAO,EAAE,KAAK,EAAE,EAAgC,CAAC;YAChF,CAAC;YAED,MAAM,WAAW,GAAG,KAA+B,CAAC;YACpD,MAAM,MAAM,GAAG,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;YAClD,MAAM,WAAW,GAAG,WAAuD,CAAC;YAE5E,IAAI,iBAAiB,CAAC,WAAW,CAAC,IAAI,mBAAmB,CAAC,WAAW,CAAC,EAAE,CAAC;gBACxE,OAAO;oBACN,GAAG,IAAI;oBACP,QAAQ,EAAE;wBACT,OAAO,EAAE,IAAI;wBACb,KAAK,EAAE,uBAAuB,CAAC,MAAM,EAAE,WAAW,CAAC;qBACnD;iBAC6B,CAAC;YACjC,CAAC;YAED,OAAO;gBACN,GAAG,IAAI;gBACP,QAAQ,EAAE;oBACT,OAAO,EAAE,IAAI;oBACb,YAAY,EAAE,eAAe,CAAC,WAAW,EAAE,MAAM,CAAC;iBAClD;aAC6B,CAAC;QACjC,CAAC;QAED,SAAS,CAAC;YACT,uBAAuB;YACvB,MAAM,WAAW,GAAU,KAAK,CAAC,GAAG,CAAC;YACrC,MAAM,IAAI,KAAK,CAAC,sCAAsC,WAAW,EAAE,CAAC,CAAC;QACtE,CAAC;IACF,CAAC;AAAA,CACD;AAID,SAAS,iBAAiB,CAAC,KAAoC,EAAW;IACzE,2FAA2F;IAC3F,OAAO,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC;AAAA,CAClC;AAED,SAAS,mBAAmB,CAAC,KAAoC,EAAW;IAC3E,+FAA+F;IAC/F,OAAO,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC;AAAA,CACpC;AAED,SAAS,uBAAuB,CAC/B,MAA4B,EAC5B,KAAoC,EACd;IACtB,IAAI,iBAAiB,CAAC,KAAK,CAAC,EAAE,CAAC;QAC9B,gDAAgD;QAChD,QAAQ,MAAM,EAAE,CAAC;YAChB,KAAK,SAAS,CAAC;YACf,KAAK,KAAK;gBACT,OAAO,KAAK,CAAC;YACd,KAAK,QAAQ,CAAC;YACd,KAAK,MAAM;gBACV,OAAO,MAAM,CAAC;QAChB,CAAC;IACF,CAAC;IACD,0CAA0C;IAC1C,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,SAAS;YACb,OAAO,SAAS,CAAC;QAClB,KAAK,KAAK;YACT,OAAO,KAAK,CAAC;QACd,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;IAChB,CAAC;AAAA,CACD;AAED,SAAS,yBAAyB,CAAC,MAA4B,EAAE,OAAe,EAAuB;IACtG,IAAI,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC;QAC/B,gDAAgD;QAChD,QAAQ,MAAM,EAAE,CAAC;YAChB,KAAK,SAAS,CAAC;YACf,KAAK,KAAK;gBACT,OAAO,KAAK,CAAC;YACd,KAAK,QAAQ,CAAC;YACd,KAAK,MAAM;gBACV,OAAO,MAAM,CAAC;QAChB,CAAC;IACF,CAAC;IACD,0CAA0C;IAC1C,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,SAAS;YACb,OAAO,SAAS,CAAC;QAClB,KAAK,KAAK;YACT,OAAO,KAAK,CAAC;QACd,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;IAChB,CAAC;AAAA,CACD;AAED,SAAS,eAAe,CAAC,KAAoC,EAAE,MAA4B,EAAU;IACpG,gEAAgE;IAChE,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC;QAClC,MAAM,OAAO,GAAyC;YACrD,OAAO,EAAE,GAAG;YACZ,GAAG,EAAE,IAAI;YACT,MAAM,EAAE,IAAI;YACZ,IAAI,EAAE,KAAK;SACX,CAAC;QACF,OAAO,OAAO,CAAC,MAAM,CAAC,CAAC;IACxB,CAAC;IAED,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE,CAAC;QACpC,gCAAgC;QAChC,MAAM,OAAO,GAAyC;YACrD,OAAO,EAAE,GAAG;YACZ,GAAG,EAAE,IAAI;YACT,MAAM,EAAE,IAAI;YACZ,IAAI,EAAE,KAAK;SACX,CAAC;QACF,OAAO,OAAO,CAAC,MAAM,CAAC,CAAC;IACxB,CAAC;IAED,8BAA8B;IAC9B,OAAO,CAAC,CAAC,CAAC;AAAA,CACV","sourcesContent":["import { existsSync } from \"node:fs\";\nimport { homedir } from \"node:os\";\nimport { join } from \"node:path\";\nimport { supportsXhigh } from \"./models.js\";\nimport { type AnthropicOptions, streamAnthropic } from \"./providers/anthropic.js\";\nimport { type GoogleOptions, streamGoogle } from \"./providers/google.js\";\nimport {\n\ttype GoogleGeminiCliOptions,\n\ttype GoogleThinkingLevel,\n\tstreamGoogleGeminiCli,\n} from \"./providers/google-gemini-cli.js\";\nimport { type GoogleVertexOptions, streamGoogleVertex } from \"./providers/google-vertex.js\";\nimport { type OpenAICodexResponsesOptions, streamOpenAICodexResponses } from \"./providers/openai-codex-responses.js\";\nimport { type OpenAICompletionsOptions, streamOpenAICompletions } from \"./providers/openai-completions.js\";\nimport { type OpenAIResponsesOptions, streamOpenAIResponses } from \"./providers/openai-responses.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEventStream,\n\tContext,\n\tKnownProvider,\n\tModel,\n\tOptionsForApi,\n\tSimpleStreamOptions,\n\tThinkingLevel,\n} from \"./types.js\";\n\nconst VERTEX_ADC_CREDENTIALS_PATH = join(homedir(), \".config\", \"gcloud\", \"application_default_credentials.json\");\n\nlet cachedVertexAdcCredentialsExists: boolean | null = null;\n\nfunction hasVertexAdcCredentials(): boolean {\n\tif (cachedVertexAdcCredentialsExists === null) {\n\t\tcachedVertexAdcCredentialsExists = existsSync(VERTEX_ADC_CREDENTIALS_PATH);\n\t}\n\treturn cachedVertexAdcCredentialsExists;\n}\n\n/**\n * Get API key for provider from known environment variables, e.g. OPENAI_API_KEY.\n *\n * Will not return API keys for providers that require OAuth tokens.\n */\nexport function getEnvApiKey(provider: KnownProvider): string | undefined;\nexport function getEnvApiKey(provider: string): string | undefined;\nexport function getEnvApiKey(provider: any): string | undefined {\n\t// Fall back to environment variables\n\tif (provider === \"github-copilot\") {\n\t\treturn process.env.COPILOT_GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_TOKEN;\n\t}\n\n\t// ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY\n\tif (provider === \"anthropic\") {\n\t\treturn process.env.ANTHROPIC_OAUTH_TOKEN || process.env.ANTHROPIC_API_KEY;\n\t}\n\n\t// Vertex AI uses Application Default Credentials, not API keys.\n\t// Auth is configured via `gcloud auth application-default login`.\n\tif (provider === \"google-vertex\") {\n\t\tconst hasCredentials = hasVertexAdcCredentials();\n\t\tconst hasProject = !!(process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT);\n\t\tconst hasLocation = !!process.env.GOOGLE_CLOUD_LOCATION;\n\n\t\tif (hasCredentials && hasProject && hasLocation) {\n\t\t\treturn \"<authenticated>\";\n\t\t}\n\t}\n\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t\tmistral: \"MISTRAL_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? process.env[envVar] : undefined;\n}\n\nexport function stream<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): AssistantMessageEventStream {\n\t// Vertex AI uses Application Default Credentials, not API keys\n\tif (model.api === \"google-vertex\") {\n\t\treturn streamGoogleVertex(model as Model<\"google-vertex\">, context, options as GoogleVertexOptions);\n\t}\n\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\tconst providerOptions = { ...options, apiKey };\n\n\tconst api: Api = model.api;\n\tswitch (api) {\n\t\tcase \"anthropic-messages\":\n\t\t\treturn streamAnthropic(model as Model<\"anthropic-messages\">, context, providerOptions);\n\n\t\tcase \"openai-completions\":\n\t\t\treturn streamOpenAICompletions(model as Model<\"openai-completions\">, context, providerOptions as any);\n\n\t\tcase \"openai-responses\":\n\t\t\treturn streamOpenAIResponses(model as Model<\"openai-responses\">, context, providerOptions as any);\n\n\t\tcase \"openai-codex-responses\":\n\t\t\treturn streamOpenAICodexResponses(model as Model<\"openai-codex-responses\">, context, providerOptions as any);\n\n\t\tcase \"google-generative-ai\":\n\t\t\treturn streamGoogle(model as Model<\"google-generative-ai\">, context, providerOptions);\n\n\t\tcase \"google-gemini-cli\":\n\t\t\treturn streamGoogleGeminiCli(\n\t\t\t\tmodel as Model<\"google-gemini-cli\">,\n\t\t\t\tcontext,\n\t\t\t\tproviderOptions as GoogleGeminiCliOptions,\n\t\t\t);\n\n\t\tdefault: {\n\t\t\t// This should never be reached if all Api cases are handled\n\t\t\tconst _exhaustive: never = api;\n\t\t\tthrow new Error(`Unhandled API: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\nexport async function complete<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): Promise<AssistantMessage> {\n\tconst s = stream(model, context, options);\n\treturn s.result();\n}\n\nexport function streamSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream {\n\t// Vertex AI uses Application Default Credentials, not API keys\n\tif (model.api === \"google-vertex\") {\n\t\tconst providerOptions = mapOptionsForApi(model, options, undefined);\n\t\treturn stream(model, context, providerOptions);\n\t}\n\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst providerOptions = mapOptionsForApi(model, options, apiKey);\n\treturn stream(model, context, providerOptions);\n}\n\nexport async function completeSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): Promise<AssistantMessage> {\n\tconst s = streamSimple(model, context, options);\n\treturn s.result();\n}\n\nfunction mapOptionsForApi<TApi extends Api>(\n\tmodel: Model<TApi>,\n\toptions?: SimpleStreamOptions,\n\tapiKey?: string,\n): OptionsForApi<TApi> {\n\tconst base = {\n\t\ttemperature: options?.temperature,\n\t\tmaxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),\n\t\tsignal: options?.signal,\n\t\tapiKey: apiKey || options?.apiKey,\n\t\tsessionId: options?.sessionId,\n\t};\n\n\t// Helper to clamp xhigh to high for providers that don't support it\n\tconst clampReasoning = (effort: ThinkingLevel | undefined) => (effort === \"xhigh\" ? \"high\" : effort);\n\n\tswitch (model.api) {\n\t\tcase \"anthropic-messages\": {\n\t\t\t// Explicitly disable thinking when reasoning is not specified\n\t\t\tif (!options?.reasoning) {\n\t\t\t\treturn { ...base, thinkingEnabled: false } satisfies AnthropicOptions;\n\t\t\t}\n\n\t\t\t// Claude requires max_tokens > thinking.budget_tokens\n\t\t\t// So we need to ensure maxTokens accounts for both thinking and output\n\t\t\tconst anthropicBudgets = {\n\t\t\t\tminimal: 1024,\n\t\t\t\tlow: 2048,\n\t\t\t\tmedium: 8192,\n\t\t\t\thigh: 16384,\n\t\t\t};\n\n\t\t\tconst minOutputTokens = 1024;\n\t\t\tlet thinkingBudget = anthropicBudgets[clampReasoning(options.reasoning)!];\n\t\t\t// Caller's maxTokens is the desired output; add thinking budget on top, capped at model limit\n\t\t\tconst maxTokens = Math.min((base.maxTokens || 0) + thinkingBudget, model.maxTokens);\n\n\t\t\t// If not enough room for thinking + output, reduce thinking budget\n\t\t\tif (maxTokens <= thinkingBudget) {\n\t\t\t\tthinkingBudget = Math.max(0, maxTokens - minOutputTokens);\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tmaxTokens,\n\t\t\t\tthinkingEnabled: true,\n\t\t\t\tthinkingBudgetTokens: thinkingBudget,\n\t\t\t} satisfies AnthropicOptions;\n\t\t}\n\n\t\tcase \"openai-completions\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),\n\t\t\t} satisfies OpenAICompletionsOptions;\n\n\t\tcase \"openai-responses\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),\n\t\t\t} satisfies OpenAIResponsesOptions;\n\n\t\tcase \"openai-codex-responses\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),\n\t\t\t} satisfies OpenAICodexResponsesOptions;\n\n\t\tcase \"google-generative-ai\": {\n\t\t\t// Explicitly disable thinking when reasoning is not specified\n\t\t\t// This is needed because Gemini has \"dynamic thinking\" enabled by default\n\t\t\tif (!options?.reasoning) {\n\t\t\t\treturn { ...base, thinking: { enabled: false } } satisfies GoogleOptions;\n\t\t\t}\n\n\t\t\tconst googleModel = model as Model<\"google-generative-ai\">;\n\t\t\tconst effort = clampReasoning(options.reasoning)!;\n\n\t\t\t// Gemini 3 models use thinkingLevel exclusively instead of thinkingBudget.\n\t\t\t// https://ai.google.dev/gemini-api/docs/thinking#set-budget\n\t\t\tif (isGemini3ProModel(googleModel) || isGemini3FlashModel(googleModel)) {\n\t\t\t\treturn {\n\t\t\t\t\t...base,\n\t\t\t\t\tthinking: {\n\t\t\t\t\t\tenabled: true,\n\t\t\t\t\t\tlevel: getGemini3ThinkingLevel(effort, googleModel),\n\t\t\t\t\t},\n\t\t\t\t} satisfies GoogleOptions;\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: getGoogleBudget(googleModel, effort),\n\t\t\t\t},\n\t\t\t} satisfies GoogleOptions;\n\t\t}\n\n\t\tcase \"google-gemini-cli\": {\n\t\t\tif (!options?.reasoning) {\n\t\t\t\treturn { ...base, thinking: { enabled: false } } satisfies GoogleGeminiCliOptions;\n\t\t\t}\n\n\t\t\tconst effort = clampReasoning(options.reasoning)!;\n\n\t\t\t// Gemini 3 models use thinkingLevel instead of thinkingBudget\n\t\t\tif (model.id.includes(\"3-pro\") || model.id.includes(\"3-flash\")) {\n\t\t\t\treturn {\n\t\t\t\t\t...base,\n\t\t\t\t\tthinking: {\n\t\t\t\t\t\tenabled: true,\n\t\t\t\t\t\tlevel: getGeminiCliThinkingLevel(effort, model.id),\n\t\t\t\t\t},\n\t\t\t\t} satisfies GoogleGeminiCliOptions;\n\t\t\t}\n\n\t\t\t// Models using thinkingBudget (Gemini 2.x, Claude via Antigravity)\n\t\t\t// Claude requires max_tokens > thinking.budget_tokens\n\t\t\t// So we need to ensure maxTokens accounts for both thinking and output\n\t\t\tconst budgets: Record<ClampedThinkingLevel, number> = {\n\t\t\t\tminimal: 1024,\n\t\t\t\tlow: 2048,\n\t\t\t\tmedium: 8192,\n\t\t\t\thigh: 16384,\n\t\t\t};\n\n\t\t\tconst minOutputTokens = 1024;\n\t\t\tlet thinkingBudget = budgets[effort];\n\t\t\t// Caller's maxTokens is the desired output; add thinking budget on top, capped at model limit\n\t\t\tconst maxTokens = Math.min((base.maxTokens || 0) + thinkingBudget, model.maxTokens);\n\n\t\t\t// If not enough room for thinking + output, reduce thinking budget\n\t\t\tif (maxTokens <= thinkingBudget) {\n\t\t\t\tthinkingBudget = Math.max(0, maxTokens - minOutputTokens);\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tmaxTokens,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: thinkingBudget,\n\t\t\t\t},\n\t\t\t} satisfies GoogleGeminiCliOptions;\n\t\t}\n\n\t\tcase \"google-vertex\": {\n\t\t\t// Explicitly disable thinking when reasoning is not specified\n\t\t\tif (!options?.reasoning) {\n\t\t\t\treturn { ...base, thinking: { enabled: false } } satisfies GoogleVertexOptions;\n\t\t\t}\n\n\t\t\tconst vertexModel = model as Model<\"google-vertex\">;\n\t\t\tconst effort = clampReasoning(options.reasoning)!;\n\t\t\tconst geminiModel = vertexModel as unknown as Model<\"google-generative-ai\">;\n\n\t\t\tif (isGemini3ProModel(geminiModel) || isGemini3FlashModel(geminiModel)) {\n\t\t\t\treturn {\n\t\t\t\t\t...base,\n\t\t\t\t\tthinking: {\n\t\t\t\t\t\tenabled: true,\n\t\t\t\t\t\tlevel: getGemini3ThinkingLevel(effort, geminiModel),\n\t\t\t\t\t},\n\t\t\t\t} satisfies GoogleVertexOptions;\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: getGoogleBudget(geminiModel, effort),\n\t\t\t\t},\n\t\t\t} satisfies GoogleVertexOptions;\n\t\t}\n\n\t\tdefault: {\n\t\t\t// Exhaustiveness check\n\t\t\tconst _exhaustive: never = model.api;\n\t\t\tthrow new Error(`Unhandled API in mapOptionsForApi: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\ntype ClampedThinkingLevel = Exclude<ThinkingLevel, \"xhigh\">;\n\nfunction isGemini3ProModel(model: Model<\"google-generative-ai\">): boolean {\n\t// Covers gemini-3-pro, gemini-3-pro-preview, and possible other prefixed ids in the future\n\treturn model.id.includes(\"3-pro\");\n}\n\nfunction isGemini3FlashModel(model: Model<\"google-generative-ai\">): boolean {\n\t// Covers gemini-3-flash, gemini-3-flash-preview, and possible other prefixed ids in the future\n\treturn model.id.includes(\"3-flash\");\n}\n\nfunction getGemini3ThinkingLevel(\n\teffort: ClampedThinkingLevel,\n\tmodel: Model<\"google-generative-ai\">,\n): GoogleThinkingLevel {\n\tif (isGemini3ProModel(model)) {\n\t\t// Gemini 3 Pro only supports LOW/HIGH (for now)\n\t\tswitch (effort) {\n\t\t\tcase \"minimal\":\n\t\t\tcase \"low\":\n\t\t\t\treturn \"LOW\";\n\t\t\tcase \"medium\":\n\t\t\tcase \"high\":\n\t\t\t\treturn \"HIGH\";\n\t\t}\n\t}\n\t// Gemini 3 Flash supports all four levels\n\tswitch (effort) {\n\t\tcase \"minimal\":\n\t\t\treturn \"MINIMAL\";\n\t\tcase \"low\":\n\t\t\treturn \"LOW\";\n\t\tcase \"medium\":\n\t\t\treturn \"MEDIUM\";\n\t\tcase \"high\":\n\t\t\treturn \"HIGH\";\n\t}\n}\n\nfunction getGeminiCliThinkingLevel(effort: ClampedThinkingLevel, modelId: string): GoogleThinkingLevel {\n\tif (modelId.includes(\"3-pro\")) {\n\t\t// Gemini 3 Pro only supports LOW/HIGH (for now)\n\t\tswitch (effort) {\n\t\t\tcase \"minimal\":\n\t\t\tcase \"low\":\n\t\t\t\treturn \"LOW\";\n\t\t\tcase \"medium\":\n\t\t\tcase \"high\":\n\t\t\t\treturn \"HIGH\";\n\t\t}\n\t}\n\t// Gemini 3 Flash supports all four levels\n\tswitch (effort) {\n\t\tcase \"minimal\":\n\t\t\treturn \"MINIMAL\";\n\t\tcase \"low\":\n\t\t\treturn \"LOW\";\n\t\tcase \"medium\":\n\t\t\treturn \"MEDIUM\";\n\t\tcase \"high\":\n\t\t\treturn \"HIGH\";\n\t}\n}\n\nfunction getGoogleBudget(model: Model<\"google-generative-ai\">, effort: ClampedThinkingLevel): number {\n\t// See https://ai.google.dev/gemini-api/docs/thinking#set-budget\n\tif (model.id.includes(\"2.5-pro\")) {\n\t\tconst budgets: Record<ClampedThinkingLevel, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 32768,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\tif (model.id.includes(\"2.5-flash\")) {\n\t\t// Covers 2.5-flash-lite as well\n\t\tconst budgets: Record<ClampedThinkingLevel, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 24576,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\t// Unknown model - use dynamic\n\treturn -1;\n}\n"]}
|
|
1
|
+
{"version":3,"file":"stream.js","sourceRoot":"","sources":["../src/stream.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,MAAM,SAAS,CAAC;AACrC,OAAO,EAAE,OAAO,EAAE,MAAM,SAAS,CAAC;AAClC,OAAO,EAAE,IAAI,EAAE,MAAM,WAAW,CAAC;AACjC,OAAO,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAC5C,OAAO,EAAyB,eAAe,EAAE,MAAM,0BAA0B,CAAC;AAClF,OAAO,EAAsB,YAAY,EAAE,MAAM,uBAAuB,CAAC;AACzE,OAAO,EAGN,qBAAqB,GACrB,MAAM,kCAAkC,CAAC;AAC1C,OAAO,EAA4B,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAC5F,OAAO,EAAoC,0BAA0B,EAAE,MAAM,uCAAuC,CAAC;AACrH,OAAO,EAAiC,uBAAuB,EAAE,MAAM,mCAAmC,CAAC;AAC3G,OAAO,EAA+B,qBAAqB,EAAE,MAAM,iCAAiC,CAAC;AAcrG,MAAM,2BAA2B,GAAG,IAAI,CAAC,OAAO,EAAE,EAAE,SAAS,EAAE,QAAQ,EAAE,sCAAsC,CAAC,CAAC;AAEjH,IAAI,gCAAgC,GAAmB,IAAI,CAAC;AAE5D,SAAS,uBAAuB,GAAY;IAC3C,IAAI,gCAAgC,KAAK,IAAI,EAAE,CAAC;QAC/C,gCAAgC,GAAG,UAAU,CAAC,2BAA2B,CAAC,CAAC;IAC5E,CAAC;IACD,OAAO,gCAAgC,CAAC;AAAA,CACxC;AASD,MAAM,UAAU,YAAY,CAAC,QAAa,EAAsB;IAC/D,qCAAqC;IACrC,IAAI,QAAQ,KAAK,gBAAgB,EAAE,CAAC;QACnC,OAAO,OAAO,CAAC,GAAG,CAAC,oBAAoB,IAAI,OAAO,CAAC,GAAG,CAAC,QAAQ,IAAI,OAAO,CAAC,GAAG,CAAC,YAAY,CAAC;IAC7F,CAAC;IAED,gEAAgE;IAChE,IAAI,QAAQ,KAAK,WAAW,EAAE,CAAC;QAC9B,OAAO,OAAO,CAAC,GAAG,CAAC,qBAAqB,IAAI,OAAO,CAAC,GAAG,CAAC,iBAAiB,CAAC;IAC3E,CAAC;IAED,gEAAgE;IAChE,kEAAkE;IAClE,IAAI,QAAQ,KAAK,eAAe,EAAE,CAAC;QAClC,MAAM,cAAc,GAAG,uBAAuB,EAAE,CAAC;QACjD,MAAM,UAAU,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,oBAAoB,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC;QACtF,MAAM,WAAW,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,qBAAqB,CAAC;QAExD,IAAI,cAAc,IAAI,UAAU,IAAI,WAAW,EAAE,CAAC;YACjD,OAAO,iBAAiB,CAAC;QAC1B,CAAC;IACF,CAAC;IAED,MAAM,MAAM,GAA2B;QACtC,MAAM,EAAE,gBAAgB;QACxB,MAAM,EAAE,gBAAgB;QACxB,IAAI,EAAE,cAAc;QACpB,QAAQ,EAAE,kBAAkB;QAC5B,GAAG,EAAE,aAAa;QAClB,UAAU,EAAE,oBAAoB;QAChC,GAAG,EAAE,aAAa;QAClB,OAAO,EAAE,iBAAiB;KAC1B,CAAC;IAEF,MAAM,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC;IAChC,OAAO,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CAChD;AAED,MAAM,UAAU,MAAM,CACrB,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACC;IAC9B,+DAA+D;IAC/D,IAAI,KAAK,CAAC,GAAG,KAAK,eAAe,EAAE,CAAC;QACnC,OAAO,kBAAkB,CAAC,KAA+B,EAAE,OAAO,EAAE,OAA8B,CAAC,CAAC;IACrG,CAAC;IAED,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IACD,MAAM,eAAe,GAAG,EAAE,GAAG,OAAO,EAAE,MAAM,EAAE,CAAC;IAE/C,MAAM,GAAG,GAAQ,KAAK,CAAC,GAAG,CAAC;IAC3B,QAAQ,GAAG,EAAE,CAAC;QACb,KAAK,oBAAoB;YACxB,OAAO,eAAe,CAAC,KAAoC,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;QAExF,KAAK,oBAAoB;YACxB,OAAO,uBAAuB,CAAC,KAAoC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAEvG,KAAK,kBAAkB;YACtB,OAAO,qBAAqB,CAAC,KAAkC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAEnG,KAAK,wBAAwB;YAC5B,OAAO,0BAA0B,CAAC,KAAwC,EAAE,OAAO,EAAE,eAAsB,CAAC,CAAC;QAE9G,KAAK,sBAAsB;YAC1B,OAAO,YAAY,CAAC,KAAsC,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;QAEvF,KAAK,mBAAmB;YACvB,OAAO,qBAAqB,CAC3B,KAAmC,EACnC,OAAO,EACP,eAAyC,CACzC,CAAC;QAEH,SAAS,CAAC;YACT,4DAA4D;YAC5D,MAAM,WAAW,GAAU,GAAG,CAAC;YAC/B,MAAM,IAAI,KAAK,CAAC,kBAAkB,WAAW,EAAE,CAAC,CAAC;QAClD,CAAC;IACF,CAAC;AAAA,CACD;AAED,MAAM,CAAC,KAAK,UAAU,QAAQ,CAC7B,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACD;IAC5B,MAAM,CAAC,GAAG,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;IAC1C,OAAO,CAAC,CAAC,MAAM,EAAE,CAAC;AAAA,CAClB;AAED,MAAM,UAAU,YAAY,CAC3B,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACC;IAC9B,+DAA+D;IAC/D,IAAI,KAAK,CAAC,GAAG,KAAK,eAAe,EAAE,CAAC;QACnC,MAAM,eAAe,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;QACpE,OAAO,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;IAChD,CAAC;IAED,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,MAAM,eAAe,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACjE,OAAO,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;AAAA,CAC/C;AAED,MAAM,CAAC,KAAK,UAAU,cAAc,CACnC,KAAkB,EAClB,OAAgB,EAChB,OAA6B,EACD;IAC5B,MAAM,CAAC,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;IAChD,OAAO,CAAC,CAAC,MAAM,EAAE,CAAC;AAAA,CAClB;AAED,SAAS,gBAAgB,CACxB,KAAkB,EAClB,OAA6B,EAC7B,MAAe,EACO;IACtB,MAAM,IAAI,GAAG;QACZ,WAAW,EAAE,OAAO,EAAE,WAAW;QACjC,SAAS,EAAE,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC;QACjE,MAAM,EAAE,OAAO,EAAE,MAAM;QACvB,MAAM,EAAE,MAAM,IAAI,OAAO,EAAE,MAAM;QACjC,SAAS,EAAE,OAAO,EAAE,SAAS;KAC7B,CAAC;IAEF,oEAAoE;IACpE,MAAM,cAAc,GAAG,CAAC,MAAiC,EAAE,EAAE,CAAC,CAAC,MAAM,KAAK,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;IAErG,QAAQ,KAAK,CAAC,GAAG,EAAE,CAAC;QACnB,KAAK,oBAAoB,EAAE,CAAC;YAC3B,8DAA8D;YAC9D,IAAI,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC;gBACzB,OAAO,EAAE,GAAG,IAAI,EAAE,eAAe,EAAE,KAAK,EAA6B,CAAC;YACvE,CAAC;YAED,sDAAsD;YACtD,uEAAuE;YACvE,MAAM,cAAc,GAAoB;gBACvC,OAAO,EAAE,IAAI;gBACb,GAAG,EAAE,IAAI;gBACT,MAAM,EAAE,IAAI;gBACZ,IAAI,EAAE,KAAK;aACX,CAAC;YACF,MAAM,OAAO,GAAG,EAAE,GAAG,cAAc,EAAE,GAAG,OAAO,EAAE,eAAe,EAAE,CAAC;YAEnE,MAAM,eAAe,GAAG,IAAI,CAAC;YAC7B,MAAM,KAAK,GAAG,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;YACjD,IAAI,cAAc,GAAG,OAAO,CAAC,KAAK,CAAE,CAAC;YACrC,8FAA8F;YAC9F,MAAM,SAAS,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,SAAS,IAAI,CAAC,CAAC,GAAG,cAAc,EAAE,KAAK,CAAC,SAAS,CAAC,CAAC;YAEpF,mEAAmE;YACnE,IAAI,SAAS,IAAI,cAAc,EAAE,CAAC;gBACjC,cAAc,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,SAAS,GAAG,eAAe,CAAC,CAAC;YAC3D,CAAC;YAED,OAAO;gBACN,GAAG,IAAI;gBACP,SAAS;gBACT,eAAe,EAAE,IAAI;gBACrB,oBAAoB,EAAE,cAAc;aACT,CAAC;QAC9B,CAAC;QAED,KAAK,oBAAoB;YACxB,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC;aAC5D,CAAC;QAEtC,KAAK,kBAAkB;YACtB,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC;aAC9D,CAAC;QAEpC,KAAK,wBAAwB;YAC5B,OAAO;gBACN,GAAG,IAAI;gBACP,eAAe,EAAE,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC;aACzD,CAAC;QAEzC,KAAK,sBAAsB,EAAE,CAAC;YAC7B,8DAA8D;YAC9D,0EAA0E;YAC1E,IAAI,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC;gBACzB,OAAO,EAAE,GAAG,IAAI,EAAE,QAAQ,EAAE,EAAE,OAAO,EAAE,KAAK,EAAE,EAA0B,CAAC;YAC1E,CAAC;YAED,MAAM,WAAW,GAAG,KAAsC,CAAC;YAC3D,MAAM,MAAM,GAAG,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;YAElD,2EAA2E;YAC3E,4DAA4D;YAC5D,IAAI,iBAAiB,CAAC,WAAW,CAAC,IAAI,mBAAmB,CAAC,WAAW,CAAC,EAAE,CAAC;gBACxE,OAAO;oBACN,GAAG,IAAI;oBACP,QAAQ,EAAE;wBACT,OAAO,EAAE,IAAI;wBACb,KAAK,EAAE,uBAAuB,CAAC,MAAM,EAAE,WAAW,CAAC;qBACnD;iBACuB,CAAC;YAC3B,CAAC;YAED,OAAO;gBACN,GAAG,IAAI;gBACP,QAAQ,EAAE;oBACT,OAAO,EAAE,IAAI;oBACb,YAAY,EAAE,eAAe,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,EAAE,eAAe,CAAC;iBAC5E;aACuB,CAAC;QAC3B,CAAC;QAED,KAAK,mBAAmB,EAAE,CAAC;YAC1B,IAAI,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC;gBACzB,OAAO,EAAE,GAAG,IAAI,EAAE,QAAQ,EAAE,EAAE,OAAO,EAAE,KAAK,EAAE,EAAmC,CAAC;YACnF,CAAC;YAED,MAAM,MAAM,GAAG,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;YAElD,8DAA8D;YAC9D,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC;gBAChE,OAAO;oBACN,GAAG,IAAI;oBACP,QAAQ,EAAE;wBACT,OAAO,EAAE,IAAI;wBACb,KAAK,EAAE,yBAAyB,CAAC,MAAM,EAAE,KAAK,CAAC,EAAE,CAAC;qBAClD;iBACgC,CAAC;YACpC,CAAC;YAED,mEAAmE;YACnE,sDAAsD;YACtD,uEAAuE;YACvE,MAAM,cAAc,GAAoB;gBACvC,OAAO,EAAE,IAAI;gBACb,GAAG,EAAE,IAAI;gBACT,MAAM,EAAE,IAAI;gBACZ,IAAI,EAAE,KAAK;aACX,CAAC;YACF,MAAM,OAAO,GAAG,EAAE,GAAG,cAAc,EAAE,GAAG,OAAO,EAAE,eAAe,EAAE,CAAC;YAEnE,MAAM,eAAe,GAAG,IAAI,CAAC;YAC7B,IAAI,cAAc,GAAG,OAAO,CAAC,MAAM,CAAE,CAAC;YACtC,8FAA8F;YAC9F,MAAM,SAAS,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,SAAS,IAAI,CAAC,CAAC,GAAG,cAAc,EAAE,KAAK,CAAC,SAAS,CAAC,CAAC;YAEpF,mEAAmE;YACnE,IAAI,SAAS,IAAI,cAAc,EAAE,CAAC;gBACjC,cAAc,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,SAAS,GAAG,eAAe,CAAC,CAAC;YAC3D,CAAC;YAED,OAAO;gBACN,GAAG,IAAI;gBACP,SAAS;gBACT,QAAQ,EAAE;oBACT,OAAO,EAAE,IAAI;oBACb,YAAY,EAAE,cAAc;iBAC5B;aACgC,CAAC;QACpC,CAAC;QAED,KAAK,eAAe,EAAE,CAAC;YACtB,8DAA8D;YAC9D,IAAI,CAAC,OAAO,EAAE,SAAS,EAAE,CAAC;gBACzB,OAAO,EAAE,GAAG,IAAI,EAAE,QAAQ,EAAE,EAAE,OAAO,EAAE,KAAK,EAAE,EAAgC,CAAC;YAChF,CAAC;YAED,MAAM,WAAW,GAAG,KAA+B,CAAC;YACpD,MAAM,MAAM,GAAG,cAAc,CAAC,OAAO,CAAC,SAAS,CAAE,CAAC;YAClD,MAAM,WAAW,GAAG,WAAuD,CAAC;YAE5E,IAAI,iBAAiB,CAAC,WAAW,CAAC,IAAI,mBAAmB,CAAC,WAAW,CAAC,EAAE,CAAC;gBACxE,OAAO;oBACN,GAAG,IAAI;oBACP,QAAQ,EAAE;wBACT,OAAO,EAAE,IAAI;wBACb,KAAK,EAAE,uBAAuB,CAAC,MAAM,EAAE,WAAW,CAAC;qBACnD;iBAC6B,CAAC;YACjC,CAAC;YAED,OAAO;gBACN,GAAG,IAAI;gBACP,QAAQ,EAAE;oBACT,OAAO,EAAE,IAAI;oBACb,YAAY,EAAE,eAAe,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,EAAE,eAAe,CAAC;iBAC5E;aAC6B,CAAC;QACjC,CAAC;QAED,SAAS,CAAC;YACT,uBAAuB;YACvB,MAAM,WAAW,GAAU,KAAK,CAAC,GAAG,CAAC;YACrC,MAAM,IAAI,KAAK,CAAC,sCAAsC,WAAW,EAAE,CAAC,CAAC;QACtE,CAAC;IACF,CAAC;AAAA,CACD;AAID,SAAS,iBAAiB,CAAC,KAAoC,EAAW;IACzE,2FAA2F;IAC3F,OAAO,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC;AAAA,CAClC;AAED,SAAS,mBAAmB,CAAC,KAAoC,EAAW;IAC3E,+FAA+F;IAC/F,OAAO,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC;AAAA,CACpC;AAED,SAAS,uBAAuB,CAC/B,MAA4B,EAC5B,KAAoC,EACd;IACtB,IAAI,iBAAiB,CAAC,KAAK,CAAC,EAAE,CAAC;QAC9B,gDAAgD;QAChD,QAAQ,MAAM,EAAE,CAAC;YAChB,KAAK,SAAS,CAAC;YACf,KAAK,KAAK;gBACT,OAAO,KAAK,CAAC;YACd,KAAK,QAAQ,CAAC;YACd,KAAK,MAAM;gBACV,OAAO,MAAM,CAAC;QAChB,CAAC;IACF,CAAC;IACD,0CAA0C;IAC1C,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,SAAS;YACb,OAAO,SAAS,CAAC;QAClB,KAAK,KAAK;YACT,OAAO,KAAK,CAAC;QACd,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;IAChB,CAAC;AAAA,CACD;AAED,SAAS,yBAAyB,CAAC,MAA4B,EAAE,OAAe,EAAuB;IACtG,IAAI,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC;QAC/B,gDAAgD;QAChD,QAAQ,MAAM,EAAE,CAAC;YAChB,KAAK,SAAS,CAAC;YACf,KAAK,KAAK;gBACT,OAAO,KAAK,CAAC;YACd,KAAK,QAAQ,CAAC;YACd,KAAK,MAAM;gBACV,OAAO,MAAM,CAAC;QAChB,CAAC;IACF,CAAC;IACD,0CAA0C;IAC1C,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,SAAS;YACb,OAAO,SAAS,CAAC;QAClB,KAAK,KAAK;YACT,OAAO,KAAK,CAAC;QACd,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;IAChB,CAAC;AAAA,CACD;AAED,SAAS,eAAe,CACvB,KAAoC,EACpC,MAA4B,EAC5B,aAA+B,EACtB;IACT,4DAA4D;IAC5D,IAAI,aAAa,EAAE,CAAC,MAAM,CAAC,KAAK,SAAS,EAAE,CAAC;QAC3C,OAAO,aAAa,CAAC,MAAM,CAAE,CAAC;IAC/B,CAAC;IAED,gEAAgE;IAChE,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC;QAClC,MAAM,OAAO,GAAyC;YACrD,OAAO,EAAE,GAAG;YACZ,GAAG,EAAE,IAAI;YACT,MAAM,EAAE,IAAI;YACZ,IAAI,EAAE,KAAK;SACX,CAAC;QACF,OAAO,OAAO,CAAC,MAAM,CAAC,CAAC;IACxB,CAAC;IAED,IAAI,KAAK,CAAC,EAAE,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE,CAAC;QACpC,gCAAgC;QAChC,MAAM,OAAO,GAAyC;YACrD,OAAO,EAAE,GAAG;YACZ,GAAG,EAAE,IAAI;YACT,MAAM,EAAE,IAAI;YACZ,IAAI,EAAE,KAAK;SACX,CAAC;QACF,OAAO,OAAO,CAAC,MAAM,CAAC,CAAC;IACxB,CAAC;IAED,8BAA8B;IAC9B,OAAO,CAAC,CAAC,CAAC;AAAA,CACV","sourcesContent":["import { existsSync } from \"node:fs\";\nimport { homedir } from \"node:os\";\nimport { join } from \"node:path\";\nimport { supportsXhigh } from \"./models.js\";\nimport { type AnthropicOptions, streamAnthropic } from \"./providers/anthropic.js\";\nimport { type GoogleOptions, streamGoogle } from \"./providers/google.js\";\nimport {\n\ttype GoogleGeminiCliOptions,\n\ttype GoogleThinkingLevel,\n\tstreamGoogleGeminiCli,\n} from \"./providers/google-gemini-cli.js\";\nimport { type GoogleVertexOptions, streamGoogleVertex } from \"./providers/google-vertex.js\";\nimport { type OpenAICodexResponsesOptions, streamOpenAICodexResponses } from \"./providers/openai-codex-responses.js\";\nimport { type OpenAICompletionsOptions, streamOpenAICompletions } from \"./providers/openai-completions.js\";\nimport { type OpenAIResponsesOptions, streamOpenAIResponses } from \"./providers/openai-responses.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEventStream,\n\tContext,\n\tKnownProvider,\n\tModel,\n\tOptionsForApi,\n\tSimpleStreamOptions,\n\tThinkingBudgets,\n\tThinkingLevel,\n} from \"./types.js\";\n\nconst VERTEX_ADC_CREDENTIALS_PATH = join(homedir(), \".config\", \"gcloud\", \"application_default_credentials.json\");\n\nlet cachedVertexAdcCredentialsExists: boolean | null = null;\n\nfunction hasVertexAdcCredentials(): boolean {\n\tif (cachedVertexAdcCredentialsExists === null) {\n\t\tcachedVertexAdcCredentialsExists = existsSync(VERTEX_ADC_CREDENTIALS_PATH);\n\t}\n\treturn cachedVertexAdcCredentialsExists;\n}\n\n/**\n * Get API key for provider from known environment variables, e.g. OPENAI_API_KEY.\n *\n * Will not return API keys for providers that require OAuth tokens.\n */\nexport function getEnvApiKey(provider: KnownProvider): string | undefined;\nexport function getEnvApiKey(provider: string): string | undefined;\nexport function getEnvApiKey(provider: any): string | undefined {\n\t// Fall back to environment variables\n\tif (provider === \"github-copilot\") {\n\t\treturn process.env.COPILOT_GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_TOKEN;\n\t}\n\n\t// ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY\n\tif (provider === \"anthropic\") {\n\t\treturn process.env.ANTHROPIC_OAUTH_TOKEN || process.env.ANTHROPIC_API_KEY;\n\t}\n\n\t// Vertex AI uses Application Default Credentials, not API keys.\n\t// Auth is configured via `gcloud auth application-default login`.\n\tif (provider === \"google-vertex\") {\n\t\tconst hasCredentials = hasVertexAdcCredentials();\n\t\tconst hasProject = !!(process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT);\n\t\tconst hasLocation = !!process.env.GOOGLE_CLOUD_LOCATION;\n\n\t\tif (hasCredentials && hasProject && hasLocation) {\n\t\t\treturn \"<authenticated>\";\n\t\t}\n\t}\n\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t\tmistral: \"MISTRAL_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? process.env[envVar] : undefined;\n}\n\nexport function stream<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): AssistantMessageEventStream {\n\t// Vertex AI uses Application Default Credentials, not API keys\n\tif (model.api === \"google-vertex\") {\n\t\treturn streamGoogleVertex(model as Model<\"google-vertex\">, context, options as GoogleVertexOptions);\n\t}\n\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\tconst providerOptions = { ...options, apiKey };\n\n\tconst api: Api = model.api;\n\tswitch (api) {\n\t\tcase \"anthropic-messages\":\n\t\t\treturn streamAnthropic(model as Model<\"anthropic-messages\">, context, providerOptions);\n\n\t\tcase \"openai-completions\":\n\t\t\treturn streamOpenAICompletions(model as Model<\"openai-completions\">, context, providerOptions as any);\n\n\t\tcase \"openai-responses\":\n\t\t\treturn streamOpenAIResponses(model as Model<\"openai-responses\">, context, providerOptions as any);\n\n\t\tcase \"openai-codex-responses\":\n\t\t\treturn streamOpenAICodexResponses(model as Model<\"openai-codex-responses\">, context, providerOptions as any);\n\n\t\tcase \"google-generative-ai\":\n\t\t\treturn streamGoogle(model as Model<\"google-generative-ai\">, context, providerOptions);\n\n\t\tcase \"google-gemini-cli\":\n\t\t\treturn streamGoogleGeminiCli(\n\t\t\t\tmodel as Model<\"google-gemini-cli\">,\n\t\t\t\tcontext,\n\t\t\t\tproviderOptions as GoogleGeminiCliOptions,\n\t\t\t);\n\n\t\tdefault: {\n\t\t\t// This should never be reached if all Api cases are handled\n\t\t\tconst _exhaustive: never = api;\n\t\t\tthrow new Error(`Unhandled API: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\nexport async function complete<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: OptionsForApi<TApi>,\n): Promise<AssistantMessage> {\n\tconst s = stream(model, context, options);\n\treturn s.result();\n}\n\nexport function streamSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream {\n\t// Vertex AI uses Application Default Credentials, not API keys\n\tif (model.api === \"google-vertex\") {\n\t\tconst providerOptions = mapOptionsForApi(model, options, undefined);\n\t\treturn stream(model, context, providerOptions);\n\t}\n\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst providerOptions = mapOptionsForApi(model, options, apiKey);\n\treturn stream(model, context, providerOptions);\n}\n\nexport async function completeSimple<TApi extends Api>(\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): Promise<AssistantMessage> {\n\tconst s = streamSimple(model, context, options);\n\treturn s.result();\n}\n\nfunction mapOptionsForApi<TApi extends Api>(\n\tmodel: Model<TApi>,\n\toptions?: SimpleStreamOptions,\n\tapiKey?: string,\n): OptionsForApi<TApi> {\n\tconst base = {\n\t\ttemperature: options?.temperature,\n\t\tmaxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),\n\t\tsignal: options?.signal,\n\t\tapiKey: apiKey || options?.apiKey,\n\t\tsessionId: options?.sessionId,\n\t};\n\n\t// Helper to clamp xhigh to high for providers that don't support it\n\tconst clampReasoning = (effort: ThinkingLevel | undefined) => (effort === \"xhigh\" ? \"high\" : effort);\n\n\tswitch (model.api) {\n\t\tcase \"anthropic-messages\": {\n\t\t\t// Explicitly disable thinking when reasoning is not specified\n\t\t\tif (!options?.reasoning) {\n\t\t\t\treturn { ...base, thinkingEnabled: false } satisfies AnthropicOptions;\n\t\t\t}\n\n\t\t\t// Claude requires max_tokens > thinking.budget_tokens\n\t\t\t// So we need to ensure maxTokens accounts for both thinking and output\n\t\t\tconst defaultBudgets: ThinkingBudgets = {\n\t\t\t\tminimal: 1024,\n\t\t\t\tlow: 2048,\n\t\t\t\tmedium: 8192,\n\t\t\t\thigh: 16384,\n\t\t\t};\n\t\t\tconst budgets = { ...defaultBudgets, ...options?.thinkingBudgets };\n\n\t\t\tconst minOutputTokens = 1024;\n\t\t\tconst level = clampReasoning(options.reasoning)!;\n\t\t\tlet thinkingBudget = budgets[level]!;\n\t\t\t// Caller's maxTokens is the desired output; add thinking budget on top, capped at model limit\n\t\t\tconst maxTokens = Math.min((base.maxTokens || 0) + thinkingBudget, model.maxTokens);\n\n\t\t\t// If not enough room for thinking + output, reduce thinking budget\n\t\t\tif (maxTokens <= thinkingBudget) {\n\t\t\t\tthinkingBudget = Math.max(0, maxTokens - minOutputTokens);\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tmaxTokens,\n\t\t\t\tthinkingEnabled: true,\n\t\t\t\tthinkingBudgetTokens: thinkingBudget,\n\t\t\t} satisfies AnthropicOptions;\n\t\t}\n\n\t\tcase \"openai-completions\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),\n\t\t\t} satisfies OpenAICompletionsOptions;\n\n\t\tcase \"openai-responses\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),\n\t\t\t} satisfies OpenAIResponsesOptions;\n\n\t\tcase \"openai-codex-responses\":\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\treasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),\n\t\t\t} satisfies OpenAICodexResponsesOptions;\n\n\t\tcase \"google-generative-ai\": {\n\t\t\t// Explicitly disable thinking when reasoning is not specified\n\t\t\t// This is needed because Gemini has \"dynamic thinking\" enabled by default\n\t\t\tif (!options?.reasoning) {\n\t\t\t\treturn { ...base, thinking: { enabled: false } } satisfies GoogleOptions;\n\t\t\t}\n\n\t\t\tconst googleModel = model as Model<\"google-generative-ai\">;\n\t\t\tconst effort = clampReasoning(options.reasoning)!;\n\n\t\t\t// Gemini 3 models use thinkingLevel exclusively instead of thinkingBudget.\n\t\t\t// https://ai.google.dev/gemini-api/docs/thinking#set-budget\n\t\t\tif (isGemini3ProModel(googleModel) || isGemini3FlashModel(googleModel)) {\n\t\t\t\treturn {\n\t\t\t\t\t...base,\n\t\t\t\t\tthinking: {\n\t\t\t\t\t\tenabled: true,\n\t\t\t\t\t\tlevel: getGemini3ThinkingLevel(effort, googleModel),\n\t\t\t\t\t},\n\t\t\t\t} satisfies GoogleOptions;\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: getGoogleBudget(googleModel, effort, options?.thinkingBudgets),\n\t\t\t\t},\n\t\t\t} satisfies GoogleOptions;\n\t\t}\n\n\t\tcase \"google-gemini-cli\": {\n\t\t\tif (!options?.reasoning) {\n\t\t\t\treturn { ...base, thinking: { enabled: false } } satisfies GoogleGeminiCliOptions;\n\t\t\t}\n\n\t\t\tconst effort = clampReasoning(options.reasoning)!;\n\n\t\t\t// Gemini 3 models use thinkingLevel instead of thinkingBudget\n\t\t\tif (model.id.includes(\"3-pro\") || model.id.includes(\"3-flash\")) {\n\t\t\t\treturn {\n\t\t\t\t\t...base,\n\t\t\t\t\tthinking: {\n\t\t\t\t\t\tenabled: true,\n\t\t\t\t\t\tlevel: getGeminiCliThinkingLevel(effort, model.id),\n\t\t\t\t\t},\n\t\t\t\t} satisfies GoogleGeminiCliOptions;\n\t\t\t}\n\n\t\t\t// Models using thinkingBudget (Gemini 2.x, Claude via Antigravity)\n\t\t\t// Claude requires max_tokens > thinking.budget_tokens\n\t\t\t// So we need to ensure maxTokens accounts for both thinking and output\n\t\t\tconst defaultBudgets: ThinkingBudgets = {\n\t\t\t\tminimal: 1024,\n\t\t\t\tlow: 2048,\n\t\t\t\tmedium: 8192,\n\t\t\t\thigh: 16384,\n\t\t\t};\n\t\t\tconst budgets = { ...defaultBudgets, ...options?.thinkingBudgets };\n\n\t\t\tconst minOutputTokens = 1024;\n\t\t\tlet thinkingBudget = budgets[effort]!;\n\t\t\t// Caller's maxTokens is the desired output; add thinking budget on top, capped at model limit\n\t\t\tconst maxTokens = Math.min((base.maxTokens || 0) + thinkingBudget, model.maxTokens);\n\n\t\t\t// If not enough room for thinking + output, reduce thinking budget\n\t\t\tif (maxTokens <= thinkingBudget) {\n\t\t\t\tthinkingBudget = Math.max(0, maxTokens - minOutputTokens);\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tmaxTokens,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: thinkingBudget,\n\t\t\t\t},\n\t\t\t} satisfies GoogleGeminiCliOptions;\n\t\t}\n\n\t\tcase \"google-vertex\": {\n\t\t\t// Explicitly disable thinking when reasoning is not specified\n\t\t\tif (!options?.reasoning) {\n\t\t\t\treturn { ...base, thinking: { enabled: false } } satisfies GoogleVertexOptions;\n\t\t\t}\n\n\t\t\tconst vertexModel = model as Model<\"google-vertex\">;\n\t\t\tconst effort = clampReasoning(options.reasoning)!;\n\t\t\tconst geminiModel = vertexModel as unknown as Model<\"google-generative-ai\">;\n\n\t\t\tif (isGemini3ProModel(geminiModel) || isGemini3FlashModel(geminiModel)) {\n\t\t\t\treturn {\n\t\t\t\t\t...base,\n\t\t\t\t\tthinking: {\n\t\t\t\t\t\tenabled: true,\n\t\t\t\t\t\tlevel: getGemini3ThinkingLevel(effort, geminiModel),\n\t\t\t\t\t},\n\t\t\t\t} satisfies GoogleVertexOptions;\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\t...base,\n\t\t\t\tthinking: {\n\t\t\t\t\tenabled: true,\n\t\t\t\t\tbudgetTokens: getGoogleBudget(geminiModel, effort, options?.thinkingBudgets),\n\t\t\t\t},\n\t\t\t} satisfies GoogleVertexOptions;\n\t\t}\n\n\t\tdefault: {\n\t\t\t// Exhaustiveness check\n\t\t\tconst _exhaustive: never = model.api;\n\t\t\tthrow new Error(`Unhandled API in mapOptionsForApi: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\ntype ClampedThinkingLevel = Exclude<ThinkingLevel, \"xhigh\">;\n\nfunction isGemini3ProModel(model: Model<\"google-generative-ai\">): boolean {\n\t// Covers gemini-3-pro, gemini-3-pro-preview, and possible other prefixed ids in the future\n\treturn model.id.includes(\"3-pro\");\n}\n\nfunction isGemini3FlashModel(model: Model<\"google-generative-ai\">): boolean {\n\t// Covers gemini-3-flash, gemini-3-flash-preview, and possible other prefixed ids in the future\n\treturn model.id.includes(\"3-flash\");\n}\n\nfunction getGemini3ThinkingLevel(\n\teffort: ClampedThinkingLevel,\n\tmodel: Model<\"google-generative-ai\">,\n): GoogleThinkingLevel {\n\tif (isGemini3ProModel(model)) {\n\t\t// Gemini 3 Pro only supports LOW/HIGH (for now)\n\t\tswitch (effort) {\n\t\t\tcase \"minimal\":\n\t\t\tcase \"low\":\n\t\t\t\treturn \"LOW\";\n\t\t\tcase \"medium\":\n\t\t\tcase \"high\":\n\t\t\t\treturn \"HIGH\";\n\t\t}\n\t}\n\t// Gemini 3 Flash supports all four levels\n\tswitch (effort) {\n\t\tcase \"minimal\":\n\t\t\treturn \"MINIMAL\";\n\t\tcase \"low\":\n\t\t\treturn \"LOW\";\n\t\tcase \"medium\":\n\t\t\treturn \"MEDIUM\";\n\t\tcase \"high\":\n\t\t\treturn \"HIGH\";\n\t}\n}\n\nfunction getGeminiCliThinkingLevel(effort: ClampedThinkingLevel, modelId: string): GoogleThinkingLevel {\n\tif (modelId.includes(\"3-pro\")) {\n\t\t// Gemini 3 Pro only supports LOW/HIGH (for now)\n\t\tswitch (effort) {\n\t\t\tcase \"minimal\":\n\t\t\tcase \"low\":\n\t\t\t\treturn \"LOW\";\n\t\t\tcase \"medium\":\n\t\t\tcase \"high\":\n\t\t\t\treturn \"HIGH\";\n\t\t}\n\t}\n\t// Gemini 3 Flash supports all four levels\n\tswitch (effort) {\n\t\tcase \"minimal\":\n\t\t\treturn \"MINIMAL\";\n\t\tcase \"low\":\n\t\t\treturn \"LOW\";\n\t\tcase \"medium\":\n\t\t\treturn \"MEDIUM\";\n\t\tcase \"high\":\n\t\t\treturn \"HIGH\";\n\t}\n}\n\nfunction getGoogleBudget(\n\tmodel: Model<\"google-generative-ai\">,\n\teffort: ClampedThinkingLevel,\n\tcustomBudgets?: ThinkingBudgets,\n): number {\n\t// Custom budgets take precedence if provided for this level\n\tif (customBudgets?.[effort] !== undefined) {\n\t\treturn customBudgets[effort]!;\n\t}\n\n\t// See https://ai.google.dev/gemini-api/docs/thinking#set-budget\n\tif (model.id.includes(\"2.5-pro\")) {\n\t\tconst budgets: Record<ClampedThinkingLevel, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 32768,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\tif (model.id.includes(\"2.5-flash\")) {\n\t\t// Covers 2.5-flash-lite as well\n\t\tconst budgets: Record<ClampedThinkingLevel, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 24576,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\t// Unknown model - use dynamic\n\treturn -1;\n}\n"]}
|
package/dist/types.d.ts
CHANGED
|
@@ -21,6 +21,13 @@ export type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];
|
|
|
21
21
|
export type KnownProvider = "anthropic" | "google" | "google-gemini-cli" | "google-antigravity" | "google-vertex" | "openai" | "openai-codex" | "github-copilot" | "xai" | "groq" | "cerebras" | "openrouter" | "zai" | "mistral";
|
|
22
22
|
export type Provider = KnownProvider | string;
|
|
23
23
|
export type ThinkingLevel = "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
24
|
+
/** Token budgets for each thinking level (token-based providers only) */
|
|
25
|
+
export interface ThinkingBudgets {
|
|
26
|
+
minimal?: number;
|
|
27
|
+
low?: number;
|
|
28
|
+
medium?: number;
|
|
29
|
+
high?: number;
|
|
30
|
+
}
|
|
24
31
|
export interface StreamOptions {
|
|
25
32
|
temperature?: number;
|
|
26
33
|
maxTokens?: number;
|
|
@@ -35,6 +42,8 @@ export interface StreamOptions {
|
|
|
35
42
|
}
|
|
36
43
|
export interface SimpleStreamOptions extends StreamOptions {
|
|
37
44
|
reasoning?: ThinkingLevel;
|
|
45
|
+
/** Custom token budgets for thinking levels (token-based providers only) */
|
|
46
|
+
thinkingBudgets?: ThinkingBudgets;
|
|
38
47
|
}
|
|
39
48
|
export type StreamFunction<TApi extends Api> = (model: Model<TApi>, context: Context, options: OptionsForApi<TApi>) => AssistantMessageEventStream;
|
|
40
49
|
export interface TextContent {
|
package/dist/types.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,uBAAuB,CAAC;AAC3D,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,kCAAkC,CAAC;AAC/E,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,8BAA8B,CAAC;AACxE,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,uCAAuC,CAAC;AACzF,OAAO,KAAK,EAAE,wBAAwB,EAAE,MAAM,mCAAmC,CAAC;AAClF,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,iCAAiC,CAAC;AAC9E,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,YAAY,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,MAAM,MAAM,GAAG,GACZ,oBAAoB,GACpB,kBAAkB,GAClB,wBAAwB,GACxB,oBAAoB,GACpB,sBAAsB,GACtB,mBAAmB,GACnB,eAAe,CAAC;AAEnB,MAAM,WAAW,aAAa;IAC7B,oBAAoB,EAAE,gBAAgB,CAAC;IACvC,oBAAoB,EAAE,wBAAwB,CAAC;IAC/C,kBAAkB,EAAE,sBAAsB,CAAC;IAC3C,wBAAwB,EAAE,2BAA2B,CAAC;IACtD,sBAAsB,EAAE,aAAa,CAAC;IACtC,mBAAmB,EAAE,sBAAsB,CAAC;IAC5C,eAAe,EAAE,mBAAmB,CAAC;CACrC;AAWD,MAAM,MAAM,aAAa,CAAC,IAAI,SAAS,GAAG,IAAI,aAAa,CAAC,IAAI,CAAC,CAAC;AAElE,MAAM,MAAM,aAAa,GACtB,WAAW,GACX,QAAQ,GACR,mBAAmB,GACnB,oBAAoB,GACpB,eAAe,GACf,QAAQ,GACR,cAAc,GACd,gBAAgB,GAChB,KAAK,GACL,MAAM,GACN,UAAU,GACV,YAAY,GACZ,KAAK,GACL,SAAS,CAAC;AACb,MAAM,MAAM,QAAQ,GAAG,aAAa,GAAG,MAAM,CAAC;AAE9C,MAAM,MAAM,aAAa,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;AAG5E,MAAM,WAAW,aAAa;IAC7B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,WAAW,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB;AAGD,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACzD,SAAS,CAAC,EAAE,aAAa,CAAC;CAC1B;AAGD,MAAM,MAAM,cAAc,CAAC,IAAI,SAAS,GAAG,IAAI,CAC9C,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,EAAE,aAAa,CAAC,IAAI,CAAC,KACxB,2BAA2B,CAAC;AAEjC,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,aAAa,CAAC,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,eAAe;IAC/B,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,iBAAiB,CAAC,EAAE,MAAM,CAAC;CAC3B;AAED,MAAM,WAAW,YAAY;IAC5B,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,QAAQ;IACxB,IAAI,EAAE,UAAU,CAAC;IACjB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC/B,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC1B;AAED,MAAM,WAAW,KAAK;IACrB,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC;CACF;AAED,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,GAAG,OAAO,GAAG,SAAS,CAAC;AAE7E,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACjD,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,gBAAgB;IAChC,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,CAAC,WAAW,GAAG,eAAe,GAAG,QAAQ,CAAC,EAAE,CAAC;IACtD,GAAG,EAAE,GAAG,CAAC;IACT,QAAQ,EAAE,QAAQ,CAAC;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,KAAK,CAAC;IACb,UAAU,EAAE,UAAU,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,iBAAiB,CAAC,QAAQ,GAAG,GAAG;IAChD,IAAI,EAAE,YAAY,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACxC,OAAO,CAAC,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,OAAO,GAAG,WAAW,GAAG,gBAAgB,GAAG,iBAAiB,CAAC;AAEzE,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAEjD,MAAM,WAAW,IAAI,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO;IAC1D,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,WAAW,CAAC;CACxB;AAED,MAAM,WAAW,OAAO;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;CACf;AAED,MAAM,MAAM,qBAAqB,GAC9B;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC5C;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvE;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,QAAQ,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC7F;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,MAAM,GAAG,QAAQ,GAAG,SAAS,CAAC,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvG;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,SAAS,GAAG,OAAO,CAAC,CAAC;IAAC,KAAK,EAAE,gBAAgB,CAAA;CAAE,CAAC;AAEhG;;;GAGG;AACH,MAAM,WAAW,YAAY;IAC5B,wFAAwF;IACxF,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,yGAAyG;IACzG,qBAAqB,CAAC,EAAE,OAAO,CAAC;IAChC,yFAAyF;IACzF,uBAAuB,CAAC,EAAE,OAAO,CAAC;IAClC,0EAA0E;IAC1E,cAAc,CAAC,EAAE,uBAAuB,GAAG,YAAY,CAAC;IACxD,sFAAsF;IACtF,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,2HAA2H;IAC3H,gCAAgC,CAAC,EAAE,OAAO,CAAC;IAC3C,4HAA4H;IAC5H,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,kIAAkI;IAClI,sBAAsB,CAAC,EAAE,OAAO,CAAC;CACjC;AAGD,MAAM,WAAW,KAAK,CAAC,IAAI,SAAS,GAAG;IACtC,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,GAAG,EAAE,IAAI,CAAC;IACV,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,EAAE,OAAO,CAAC;IACnB,KAAK,EAAE,CAAC,MAAM,GAAG,OAAO,CAAC,EAAE,CAAC;IAC5B,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,kGAAkG;IAClG,MAAM,CAAC,EAAE,IAAI,SAAS,oBAAoB,GAAG,YAAY,GAAG,KAAK,CAAC;CAClE","sourcesContent":["import type { AnthropicOptions } from \"./providers/anthropic.js\";\nimport type { GoogleOptions } from \"./providers/google.js\";\nimport type { GoogleGeminiCliOptions } from \"./providers/google-gemini-cli.js\";\nimport type { GoogleVertexOptions } from \"./providers/google-vertex.js\";\nimport type { OpenAICodexResponsesOptions } from \"./providers/openai-codex-responses.js\";\nimport type { OpenAICompletionsOptions } from \"./providers/openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./providers/openai-responses.js\";\nimport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type Api =\n\t| \"openai-completions\"\n\t| \"openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"google-generative-ai\"\n\t| \"google-gemini-cli\"\n\t| \"google-vertex\";\n\nexport interface ApiOptionsMap {\n\t\"anthropic-messages\": AnthropicOptions;\n\t\"openai-completions\": OpenAICompletionsOptions;\n\t\"openai-responses\": OpenAIResponsesOptions;\n\t\"openai-codex-responses\": OpenAICodexResponsesOptions;\n\t\"google-generative-ai\": GoogleOptions;\n\t\"google-gemini-cli\": GoogleGeminiCliOptions;\n\t\"google-vertex\": GoogleVertexOptions;\n}\n\n// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys\ntype _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>\n\t? Record<Api, StreamOptions> extends ApiOptionsMap\n\t\t? true\n\t\t: [\"ApiOptionsMap is missing some KnownApi values\", Exclude<Api, keyof ApiOptionsMap>]\n\t: [\"ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>\"];\nconst _exhaustive: _CheckExhaustive = true;\n\n// Helper type to get options for a specific API\nexport type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];\n\nexport type KnownProvider =\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"google-gemini-cli\"\n\t| \"google-antigravity\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"openai-codex\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"zai\"\n\t| \"mistral\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n// Base options all providers share\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n}\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions: OptionsForApi<TApi>,\n) => AssistantMessageEventStream;\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, the message ID\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for openai-completions API.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether tool call IDs must be normalized to Mistral format (exactly 9 alphanumeric chars). Default: auto-detected from URL. */\n\trequiresMistralToolIds?: boolean;\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\" ? OpenAICompat : never;\n}\n"]}
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,uBAAuB,CAAC;AAC3D,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,kCAAkC,CAAC;AAC/E,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,8BAA8B,CAAC;AACxE,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,uCAAuC,CAAC;AACzF,OAAO,KAAK,EAAE,wBAAwB,EAAE,MAAM,mCAAmC,CAAC;AAClF,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,iCAAiC,CAAC;AAC9E,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,YAAY,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,MAAM,MAAM,GAAG,GACZ,oBAAoB,GACpB,kBAAkB,GAClB,wBAAwB,GACxB,oBAAoB,GACpB,sBAAsB,GACtB,mBAAmB,GACnB,eAAe,CAAC;AAEnB,MAAM,WAAW,aAAa;IAC7B,oBAAoB,EAAE,gBAAgB,CAAC;IACvC,oBAAoB,EAAE,wBAAwB,CAAC;IAC/C,kBAAkB,EAAE,sBAAsB,CAAC;IAC3C,wBAAwB,EAAE,2BAA2B,CAAC;IACtD,sBAAsB,EAAE,aAAa,CAAC;IACtC,mBAAmB,EAAE,sBAAsB,CAAC;IAC5C,eAAe,EAAE,mBAAmB,CAAC;CACrC;AAWD,MAAM,MAAM,aAAa,CAAC,IAAI,SAAS,GAAG,IAAI,aAAa,CAAC,IAAI,CAAC,CAAC;AAElE,MAAM,MAAM,aAAa,GACtB,WAAW,GACX,QAAQ,GACR,mBAAmB,GACnB,oBAAoB,GACpB,eAAe,GACf,QAAQ,GACR,cAAc,GACd,gBAAgB,GAChB,KAAK,GACL,MAAM,GACN,UAAU,GACV,YAAY,GACZ,KAAK,GACL,SAAS,CAAC;AACb,MAAM,MAAM,QAAQ,GAAG,aAAa,GAAG,MAAM,CAAC;AAE9C,MAAM,MAAM,aAAa,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;AAE5E,yEAAyE;AACzE,MAAM,WAAW,eAAe;IAC/B,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,IAAI,CAAC,EAAE,MAAM,CAAC;CACd;AAGD,MAAM,WAAW,aAAa;IAC7B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,WAAW,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB;AAGD,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACzD,SAAS,CAAC,EAAE,aAAa,CAAC;IAC1B,4EAA4E;IAC5E,eAAe,CAAC,EAAE,eAAe,CAAC;CAClC;AAGD,MAAM,MAAM,cAAc,CAAC,IAAI,SAAS,GAAG,IAAI,CAC9C,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,EAAE,aAAa,CAAC,IAAI,CAAC,KACxB,2BAA2B,CAAC;AAEjC,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,aAAa,CAAC,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,eAAe;IAC/B,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,iBAAiB,CAAC,EAAE,MAAM,CAAC;CAC3B;AAED,MAAM,WAAW,YAAY;IAC5B,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,QAAQ;IACxB,IAAI,EAAE,UAAU,CAAC;IACjB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC/B,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC1B;AAED,MAAM,WAAW,KAAK;IACrB,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC;CACF;AAED,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,GAAG,OAAO,GAAG,SAAS,CAAC;AAE7E,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACjD,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,gBAAgB;IAChC,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,CAAC,WAAW,GAAG,eAAe,GAAG,QAAQ,CAAC,EAAE,CAAC;IACtD,GAAG,EAAE,GAAG,CAAC;IACT,QAAQ,EAAE,QAAQ,CAAC;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,KAAK,CAAC;IACb,UAAU,EAAE,UAAU,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,iBAAiB,CAAC,QAAQ,GAAG,GAAG;IAChD,IAAI,EAAE,YAAY,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACxC,OAAO,CAAC,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,OAAO,GAAG,WAAW,GAAG,gBAAgB,GAAG,iBAAiB,CAAC;AAEzE,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAEjD,MAAM,WAAW,IAAI,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO;IAC1D,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,WAAW,CAAC;CACxB;AAED,MAAM,WAAW,OAAO;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;CACf;AAED,MAAM,MAAM,qBAAqB,GAC9B;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC5C;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvE;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,QAAQ,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC7F;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,MAAM,GAAG,QAAQ,GAAG,SAAS,CAAC,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvG;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,SAAS,GAAG,OAAO,CAAC,CAAC;IAAC,KAAK,EAAE,gBAAgB,CAAA;CAAE,CAAC;AAEhG;;;GAGG;AACH,MAAM,WAAW,YAAY;IAC5B,wFAAwF;IACxF,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,yGAAyG;IACzG,qBAAqB,CAAC,EAAE,OAAO,CAAC;IAChC,yFAAyF;IACzF,uBAAuB,CAAC,EAAE,OAAO,CAAC;IAClC,0EAA0E;IAC1E,cAAc,CAAC,EAAE,uBAAuB,GAAG,YAAY,CAAC;IACxD,sFAAsF;IACtF,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,2HAA2H;IAC3H,gCAAgC,CAAC,EAAE,OAAO,CAAC;IAC3C,4HAA4H;IAC5H,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,kIAAkI;IAClI,sBAAsB,CAAC,EAAE,OAAO,CAAC;CACjC;AAGD,MAAM,WAAW,KAAK,CAAC,IAAI,SAAS,GAAG;IACtC,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,GAAG,EAAE,IAAI,CAAC;IACV,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,EAAE,OAAO,CAAC;IACnB,KAAK,EAAE,CAAC,MAAM,GAAG,OAAO,CAAC,EAAE,CAAC;IAC5B,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,kGAAkG;IAClG,MAAM,CAAC,EAAE,IAAI,SAAS,oBAAoB,GAAG,YAAY,GAAG,KAAK,CAAC;CAClE","sourcesContent":["import type { AnthropicOptions } from \"./providers/anthropic.js\";\nimport type { GoogleOptions } from \"./providers/google.js\";\nimport type { GoogleGeminiCliOptions } from \"./providers/google-gemini-cli.js\";\nimport type { GoogleVertexOptions } from \"./providers/google-vertex.js\";\nimport type { OpenAICodexResponsesOptions } from \"./providers/openai-codex-responses.js\";\nimport type { OpenAICompletionsOptions } from \"./providers/openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./providers/openai-responses.js\";\nimport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type Api =\n\t| \"openai-completions\"\n\t| \"openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"google-generative-ai\"\n\t| \"google-gemini-cli\"\n\t| \"google-vertex\";\n\nexport interface ApiOptionsMap {\n\t\"anthropic-messages\": AnthropicOptions;\n\t\"openai-completions\": OpenAICompletionsOptions;\n\t\"openai-responses\": OpenAIResponsesOptions;\n\t\"openai-codex-responses\": OpenAICodexResponsesOptions;\n\t\"google-generative-ai\": GoogleOptions;\n\t\"google-gemini-cli\": GoogleGeminiCliOptions;\n\t\"google-vertex\": GoogleVertexOptions;\n}\n\n// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys\ntype _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>\n\t? Record<Api, StreamOptions> extends ApiOptionsMap\n\t\t? true\n\t\t: [\"ApiOptionsMap is missing some KnownApi values\", Exclude<Api, keyof ApiOptionsMap>]\n\t: [\"ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>\"];\nconst _exhaustive: _CheckExhaustive = true;\n\n// Helper type to get options for a specific API\nexport type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];\n\nexport type KnownProvider =\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"google-gemini-cli\"\n\t| \"google-antigravity\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"openai-codex\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"zai\"\n\t| \"mistral\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/** Token budgets for each thinking level (token-based providers only) */\nexport interface ThinkingBudgets {\n\tminimal?: number;\n\tlow?: number;\n\tmedium?: number;\n\thigh?: number;\n}\n\n// Base options all providers share\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n}\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n\t/** Custom token budgets for thinking levels (token-based providers only) */\n\tthinkingBudgets?: ThinkingBudgets;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions: OptionsForApi<TApi>,\n) => AssistantMessageEventStream;\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, the message ID\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for openai-completions API.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether tool call IDs must be normalized to Mistral format (exactly 9 alphanumeric chars). Default: auto-detected from URL. */\n\trequiresMistralToolIds?: boolean;\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\" ? OpenAICompat : never;\n}\n"]}
|
package/dist/types.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAoCA,MAAM,WAAW,GAAqB,IAAI,CAAC","sourcesContent":["import type { AnthropicOptions } from \"./providers/anthropic.js\";\nimport type { GoogleOptions } from \"./providers/google.js\";\nimport type { GoogleGeminiCliOptions } from \"./providers/google-gemini-cli.js\";\nimport type { GoogleVertexOptions } from \"./providers/google-vertex.js\";\nimport type { OpenAICodexResponsesOptions } from \"./providers/openai-codex-responses.js\";\nimport type { OpenAICompletionsOptions } from \"./providers/openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./providers/openai-responses.js\";\nimport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type Api =\n\t| \"openai-completions\"\n\t| \"openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"google-generative-ai\"\n\t| \"google-gemini-cli\"\n\t| \"google-vertex\";\n\nexport interface ApiOptionsMap {\n\t\"anthropic-messages\": AnthropicOptions;\n\t\"openai-completions\": OpenAICompletionsOptions;\n\t\"openai-responses\": OpenAIResponsesOptions;\n\t\"openai-codex-responses\": OpenAICodexResponsesOptions;\n\t\"google-generative-ai\": GoogleOptions;\n\t\"google-gemini-cli\": GoogleGeminiCliOptions;\n\t\"google-vertex\": GoogleVertexOptions;\n}\n\n// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys\ntype _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>\n\t? Record<Api, StreamOptions> extends ApiOptionsMap\n\t\t? true\n\t\t: [\"ApiOptionsMap is missing some KnownApi values\", Exclude<Api, keyof ApiOptionsMap>]\n\t: [\"ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>\"];\nconst _exhaustive: _CheckExhaustive = true;\n\n// Helper type to get options for a specific API\nexport type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];\n\nexport type KnownProvider =\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"google-gemini-cli\"\n\t| \"google-antigravity\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"openai-codex\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"zai\"\n\t| \"mistral\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n// Base options all providers share\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n}\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions: OptionsForApi<TApi>,\n) => AssistantMessageEventStream;\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, the message ID\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for openai-completions API.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether tool call IDs must be normalized to Mistral format (exactly 9 alphanumeric chars). Default: auto-detected from URL. */\n\trequiresMistralToolIds?: boolean;\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\" ? OpenAICompat : never;\n}\n"]}
|
|
1
|
+
{"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAoCA,MAAM,WAAW,GAAqB,IAAI,CAAC","sourcesContent":["import type { AnthropicOptions } from \"./providers/anthropic.js\";\nimport type { GoogleOptions } from \"./providers/google.js\";\nimport type { GoogleGeminiCliOptions } from \"./providers/google-gemini-cli.js\";\nimport type { GoogleVertexOptions } from \"./providers/google-vertex.js\";\nimport type { OpenAICodexResponsesOptions } from \"./providers/openai-codex-responses.js\";\nimport type { OpenAICompletionsOptions } from \"./providers/openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./providers/openai-responses.js\";\nimport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type Api =\n\t| \"openai-completions\"\n\t| \"openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"google-generative-ai\"\n\t| \"google-gemini-cli\"\n\t| \"google-vertex\";\n\nexport interface ApiOptionsMap {\n\t\"anthropic-messages\": AnthropicOptions;\n\t\"openai-completions\": OpenAICompletionsOptions;\n\t\"openai-responses\": OpenAIResponsesOptions;\n\t\"openai-codex-responses\": OpenAICodexResponsesOptions;\n\t\"google-generative-ai\": GoogleOptions;\n\t\"google-gemini-cli\": GoogleGeminiCliOptions;\n\t\"google-vertex\": GoogleVertexOptions;\n}\n\n// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys\ntype _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>\n\t? Record<Api, StreamOptions> extends ApiOptionsMap\n\t\t? true\n\t\t: [\"ApiOptionsMap is missing some KnownApi values\", Exclude<Api, keyof ApiOptionsMap>]\n\t: [\"ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>\"];\nconst _exhaustive: _CheckExhaustive = true;\n\n// Helper type to get options for a specific API\nexport type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];\n\nexport type KnownProvider =\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"google-gemini-cli\"\n\t| \"google-antigravity\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"openai-codex\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"zai\"\n\t| \"mistral\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/** Token budgets for each thinking level (token-based providers only) */\nexport interface ThinkingBudgets {\n\tminimal?: number;\n\tlow?: number;\n\tmedium?: number;\n\thigh?: number;\n}\n\n// Base options all providers share\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n}\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n\t/** Custom token budgets for thinking levels (token-based providers only) */\n\tthinkingBudgets?: ThinkingBudgets;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions: OptionsForApi<TApi>,\n) => AssistantMessageEventStream;\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, the message ID\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for openai-completions API.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether tool call IDs must be normalized to Mistral format (exactly 9 alphanumeric chars). Default: auto-detected from URL. */\n\trequiresMistralToolIds?: boolean;\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\" ? OpenAICompat : never;\n}\n"]}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"overflow.d.ts","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,aAAa,CAAC;AAuCpD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2CG;AACH,wBAAgB,iBAAiB,CAAC,OAAO,EAAE,gBAAgB,EAAE,aAAa,CAAC,EAAE,MAAM,GAAG,OAAO,CAwB5F;AAED;;GAEG;AACH,wBAAgB,mBAAmB,IAAI,MAAM,EAAE,CAE9C","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - GitHub Copilot: \"prompt token count of X exceeds the limit of Y\"\n * - Cerebras: Returns \"400 status code (no body)\" - handled separately below\n * - Mistral: Returns \"400 status code (no body)\" - handled separately below\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Silently truncates input - not detectable via error message\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the limit of \\d+/i, // GitHub Copilot\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context length exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413/429 status code (no body)\n * - Mistral: 400/413/429 status code (no body)\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: Silently truncates input without error. Cannot be detected via this function.\n * The response will have usage.input < expected, but we don't know the expected value.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras and Mistral return 400/413/429 with no body - check for status code pattern\n\t\t// 429 can indicate token-based rate limiting which correlates with context overflow\n\t\tif (/^4(00|13|29)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
|
|
1
|
+
{"version":3,"file":"overflow.d.ts","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,aAAa,CAAC;AAuCpD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2CG;AACH,wBAAgB,iBAAiB,CAAC,OAAO,EAAE,gBAAgB,EAAE,aAAa,CAAC,EAAE,MAAM,GAAG,OAAO,CAwB5F;AAED;;GAEG;AACH,wBAAgB,mBAAmB,IAAI,MAAM,EAAE,CAE9C","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - GitHub Copilot: \"prompt token count of X exceeds the limit of Y\"\n * - Cerebras: Returns \"400 status code (no body)\" - handled separately below\n * - Mistral: Returns \"400 status code (no body)\" - handled separately below\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Silently truncates input - not detectable via error message\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the limit of \\d+/i, // GitHub Copilot\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context[_ ]length[_ ]exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413/429 status code (no body)\n * - Mistral: 400/413/429 status code (no body)\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: Silently truncates input without error. Cannot be detected via this function.\n * The response will have usage.input < expected, but we don't know the expected value.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras and Mistral return 400/413/429 with no body - check for status code pattern\n\t\t// 429 can indicate token-based rate limiting which correlates with context overflow\n\t\tif (/^4(00|13|29)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
|
package/dist/utils/overflow.js
CHANGED
|
@@ -30,7 +30,7 @@ const OVERFLOW_PATTERNS = [
|
|
|
30
30
|
/exceeds the limit of \d+/i, // GitHub Copilot
|
|
31
31
|
/exceeds the available context size/i, // llama.cpp server
|
|
32
32
|
/greater than the context length/i, // LM Studio
|
|
33
|
-
/context length exceeded/i, // Generic fallback
|
|
33
|
+
/context[_ ]length[_ ]exceeded/i, // Generic fallback
|
|
34
34
|
/too many tokens/i, // Generic fallback
|
|
35
35
|
/token limit exceeded/i, // Generic fallback
|
|
36
36
|
];
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"overflow.js","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAEA;;;;;;;;;;;;;;;;;;;;;GAqBG;AACH,MAAM,iBAAiB,GAAG;IACzB,qBAAqB,EAAE,YAAY;IACnC,6BAA6B,EAAE,uCAAuC;IACtE,yCAAyC,EAAE,kBAAkB;IAC7D,+BAA+B,EAAE,aAAa;IAC9C,oCAAoC,EAAE,OAAO;IAC7C,uCAAuC,EAAE,4BAA4B;IACrE,2BAA2B,EAAE,iBAAiB;IAC9C,qCAAqC,EAAE,mBAAmB;IAC1D,kCAAkC,EAAE,YAAY;IAChD,
|
|
1
|
+
{"version":3,"file":"overflow.js","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAEA;;;;;;;;;;;;;;;;;;;;;GAqBG;AACH,MAAM,iBAAiB,GAAG;IACzB,qBAAqB,EAAE,YAAY;IACnC,6BAA6B,EAAE,uCAAuC;IACtE,yCAAyC,EAAE,kBAAkB;IAC7D,+BAA+B,EAAE,aAAa;IAC9C,oCAAoC,EAAE,OAAO;IAC7C,uCAAuC,EAAE,4BAA4B;IACrE,2BAA2B,EAAE,iBAAiB;IAC9C,qCAAqC,EAAE,mBAAmB;IAC1D,kCAAkC,EAAE,YAAY;IAChD,gCAAgC,EAAE,mBAAmB;IACrD,kBAAkB,EAAE,mBAAmB;IACvC,uBAAuB,EAAE,mBAAmB;CAC5C,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2CG;AACH,MAAM,UAAU,iBAAiB,CAAC,OAAyB,EAAE,aAAsB,EAAW;IAC7F,uCAAuC;IACvC,IAAI,OAAO,CAAC,UAAU,KAAK,OAAO,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC5D,uBAAuB;QACvB,IAAI,iBAAiB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,YAAa,CAAC,CAAC,EAAE,CAAC;YAClE,OAAO,IAAI,CAAC;QACb,CAAC;QAED,uFAAuF;QACvF,oFAAoF;QACpF,IAAI,8CAA8C,CAAC,IAAI,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC;YAC/E,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,8EAA8E;IAC9E,IAAI,aAAa,IAAI,OAAO,CAAC,UAAU,KAAK,MAAM,EAAE,CAAC;QACpD,MAAM,WAAW,GAAG,OAAO,CAAC,KAAK,CAAC,KAAK,GAAG,OAAO,CAAC,KAAK,CAAC,SAAS,CAAC;QAClE,IAAI,WAAW,GAAG,aAAa,EAAE,CAAC;YACjC,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,OAAO,KAAK,CAAC;AAAA,CACb;AAED;;GAEG;AACH,MAAM,UAAU,mBAAmB,GAAa;IAC/C,OAAO,CAAC,GAAG,iBAAiB,CAAC,CAAC;AAAA,CAC9B","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - GitHub Copilot: \"prompt token count of X exceeds the limit of Y\"\n * - Cerebras: Returns \"400 status code (no body)\" - handled separately below\n * - Mistral: Returns \"400 status code (no body)\" - handled separately below\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Silently truncates input - not detectable via error message\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the limit of \\d+/i, // GitHub Copilot\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context[_ ]length[_ ]exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413/429 status code (no body)\n * - Mistral: 400/413/429 status code (no body)\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: Silently truncates input without error. Cannot be detected via this function.\n * The response will have usage.input < expected, but we don't know the expected value.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras and Mistral return 400/413/429 with no body - check for status code pattern\n\t\t// 429 can indicate token-based rate limiting which correlates with context overflow\n\t\tif (/^4(00|13|29)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
|