@yourgpt/llm-sdk 0.1.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +209 -451
  11. package/dist/index.d.ts +209 -451
  12. package/dist/index.js +1905 -311
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1895 -309
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../src/adapters/base.ts","../../../src/adapters/azure.ts","../../../src/providers/azure/index.ts"],"names":["generateMessageId"],"mappings":";;;;;;;AAkGA,SAAS,sBAAsB,KAAA,EAMH;AAC1B,EAAA,MAAM,MAAA,GAAkC;AAAA,IACtC,MAAM,KAAA,CAAM;AAAA,GACd;AAEA,EAAA,IAAI,MAAM,WAAA,EAAa;AACrB,IAAA,MAAA,CAAO,cAAc,KAAA,CAAM,WAAA;AAAA,EAC7B;AAEA,EAAA,IAAI,MAAM,IAAA,EAAM;AACd,IAAA,MAAA,CAAO,OAAO,KAAA,CAAM,IAAA;AAAA,EACtB;AAGA,EAAA,IAAI,KAAA,CAAM,IAAA,KAAS,OAAA,IAAW,KAAA,CAAM,KAAA,EAAO;AACzC,IAAA,MAAA,CAAO,KAAA,GAAQ,qBAAA;AAAA,MACb,KAAA,CAAM;AAAA,KAOR;AAAA,EACF;AAGA,EAAA,IAAI,KAAA,CAAM,IAAA,KAAS,QAAA,IAAY,KAAA,CAAM,UAAA,EAAY;AAC/C,IAAA,MAAA,CAAO,aAAa,MAAA,CAAO,WAAA;AAAA,MACzB,MAAA,CAAO,OAAA,CAAQ,KAAA,CAAM,UAAU,CAAA,CAAE,IAAI,CAAC,CAAC,GAAA,EAAK,IAAI,CAAA,KAAM;AAAA,QACpD,GAAA;AAAA,QACA,qBAAA;AAAA,UACE;AAAA;AAOF,OACD;AAAA,KACH;AAAA,EACF;AAEA,EAAA,OAAO,MAAA;AACT;AAKO,SAAS,YAAY,OAAA,EAOzB;AACD,EAAA,OAAO,OAAA,CAAQ,GAAA,CAAI,CAAC,MAAA,MAAY;AAAA,IAC9B,IAAA,EAAM,UAAA;AAAA,IACN,QAAA,EAAU;AAAA,MACR,MAAM,MAAA,CAAO,IAAA;AAAA,MACb,aAAa,MAAA,CAAO,WAAA;AAAA,MACpB,UAAA,EAAY;AAAA,QACV,IAAA,EAAM,QAAA;AAAA,QACN,UAAA,EAAY,MAAA,CAAO,UAAA,GACf,MAAA,CAAO,WAAA;AAAA,UACL,MAAA,CAAO,OAAA,CAAQ,MAAA,CAAO,UAAU,CAAA,CAAE,IAAI,CAAC,CAAC,GAAA,EAAK,KAAK,CAAA,KAAM;AAAA,YACtD,GAAA;AAAA,YACA,sBAAsB,KAAK;AAAA,WAC5B;AAAA,YAEH,EAAC;AAAA,QACL,QAAA,EAAU,MAAA,CAAO,UAAA,GACb,MAAA,CAAO,OAAA,CAAQ,OAAO,UAAU,CAAA,CAC7B,MAAA,CAAO,CAAC,GAAG,KAAK,CAAA,KAAM,KAAA,CAAM,QAAQ,CAAA,CACpC,GAAA,CAAI,CAAC,CAAC,GAAG,CAAA,KAAM,GAAG,CAAA,GACrB;AAAC;AACP;AACF,GACF,CAAE,CAAA;AACJ;AAoDO,SAAS,oBAAoB,OAAA,EAA2B;AAC7D,EAAA,MAAM,WAAA,GAAc,QAAQ,QAAA,EAAU,WAAA;AACtC,EAAA,OAAO,aAAa,IAAA,CAAK,CAAC,MAAM,CAAA,CAAE,IAAA,KAAS,OAAO,CAAA,IAAK,KAAA;AACzD;AA8EO,SAAS,wBACd,UAAA,EAC2B;AAC3B,EAAA,IAAI,UAAA,CAAW,IAAA,KAAS,OAAA,EAAS,OAAO,IAAA;AAExC,EAAA,IAAI,QAAA;AAGJ,EAAA,IAAI,WAAW,GAAA,EAAK;AAClB,IAAA,QAAA,GAAW,UAAA,CAAW,GAAA;AAAA,EACxB,CAAA,MAAA,IAAW,WAAW,IAAA,EAAM;AAE1B,IAAA,QAAA,GAAW,UAAA,CAAW,IAAA,CAAK,UAAA,CAAW,OAAO,CAAA,GACzC,UAAA,CAAW,IAAA,GACX,CAAA,KAAA,EAAQ,UAAA,CAAW,QAAA,IAAY,WAAW,CAAA,QAAA,EAAW,WAAW,IAAI,CAAA,CAAA;AAAA,EAC1E,CAAA,MAAO;AACL,IAAA,OAAO,IAAA;AAAA,EACT;AAEA,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,WAAA;AAAA,IACN,SAAA,EAAW;AAAA,MACT,GAAA,EAAK,QAAA;AAAA,MACL,MAAA,EAAQ;AAAA;AACV,GACF;AACF;AAqGO,SAAS,uBACd,OAAA,EAC+B;AAC/B,EAAA,MAAM,WAAA,GAAc,QAAQ,QAAA,EAAU,WAAA;AACtC,EAAA,MAAM,OAAA,GAAU,QAAQ,OAAA,IAAW,EAAA;AAGnC,EAAA,IAAI,CAAC,mBAAA,CAAoB,OAAO,CAAA,EAAG;AACjC,IAAA,OAAO,OAAA;AAAA,EACT;AAGA,EAAA,MAAM,SAA+B,EAAC;AAGtC,EAAA,IAAI,OAAA,EAAS;AACX,IAAA,MAAA,CAAO,KAAK,EAAE,IAAA,EAAM,MAAA,EAAQ,IAAA,EAAM,SAAS,CAAA;AAAA,EAC7C;AAGA,EAAA,IAAI,WAAA,EAAa;AACf,IAAA,KAAA,MAAW,cAAc,WAAA,EAAa;AACpC,MAAA,MAAM,UAAA,GAAa,wBAAwB,UAAU,CAAA;AACrD,MAAA,IAAI,UAAA,EAAY;AACd,QAAA,MAAA,CAAO,KAAK,UAAU,CAAA;AAAA,MACxB;AAAA,IACF;AAAA,EACF;AAEA,EAAA,OAAO,MAAA;AACT;AA+IO,SAAS,uBAAA,CACd,UACA,YAAA,EACiB;AACjB,EAAA,MAAM,YAA6B,EAAC;AAGpC,EAAA,IAAI,YAAA,EAAc;AAChB,IAAA,SAAA,CAAU,KAAK,EAAE,IAAA,EAAM,QAAA,EAAU,OAAA,EAAS,cAAc,CAAA;AAAA,EAC1D;AAEA,EAAA,KAAA,MAAW,OAAO,QAAA,EAAU;AAC1B,IAAA,IAAI,GAAA,CAAI,SAAS,QAAA,EAAU;AACzB,MAAA,SAAA,CAAU,IAAA,CAAK,EAAE,IAAA,EAAM,QAAA,EAAU,SAAS,GAAA,CAAI,OAAA,IAAW,IAAI,CAAA;AAAA,IAC/D,CAAA,MAAA,IAAW,GAAA,CAAI,IAAA,KAAS,MAAA,EAAQ;AAC9B,MAAA,SAAA,CAAU,IAAA,CAAK;AAAA,QACb,IAAA,EAAM,MAAA;AAAA,QACN,OAAA,EAAS,uBAAuB,GAAG;AAAA,OACpC,CAAA;AAAA,IACH,CAAA,MAAA,IAAW,GAAA,CAAI,IAAA,KAAS,WAAA,EAAa;AACnC,MAAA,MAAM,YAAA,GAA8B;AAAA,QAClC,IAAA,EAAM,WAAA;AAAA,QACN,SAAS,GAAA,CAAI;AAAA,OACf;AACA,MAAA,IAAI,GAAA,CAAI,UAAA,IAAc,GAAA,CAAI,UAAA,CAAW,SAAS,CAAA,EAAG;AAC/C,QAAC,YAAA,CAAuD,aACtD,GAAA,CAAI,UAAA;AAAA,MACR;AACA,MAAA,SAAA,CAAU,KAAK,YAAY,CAAA;AAAA,IAC7B,CAAA,MAAA,IAAW,GAAA,CAAI,IAAA,KAAS,MAAA,IAAU,IAAI,YAAA,EAAc;AAClD,MAAA,SAAA,CAAU,IAAA,CAAK;AAAA,QACb,IAAA,EAAM,MAAA;AAAA,QACN,OAAA,EAAS,IAAI,OAAA,IAAW,EAAA;AAAA,QACxB,cAAc,GAAA,CAAI;AAAA,OACnB,CAAA;AAAA,IACH;AAAA,EACF;AAEA,EAAA,OAAO,SAAA;AACT;;;ACtmBA,IAAM,mBAAA,GAAsB,oBAAA;AAK5B,SAAS,kBAAA,CACP,YAAA,EACA,cAAA,EACA,UAAA,EACQ;AACR,EAAA,OAAO,CAAA,QAAA,EAAW,YAAY,CAAA,qCAAA,EAAwC,cAAc,CAAA,CAAA;AACtF;AAWO,IAAM,eAAN,MAAyC;AAAA,EAO9C,YAAY,MAAA,EAA4B;AANxC,IAAA,IAAA,CAAS,QAAA,GAAW,OAAA;AAOlB,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,QAAQ,MAAA,CAAO,cAAA;AAAA,EACtB;AAAA,EAEA,MAAc,SAAA,GAAY;AACxB,IAAA,IAAI,CAAC,KAAK,MAAA,EAAQ;AAEhB,MAAA,MAAM,EAAE,WAAA,EAAY,GAAI,MAAM,OAAO,QAAQ,CAAA;AAE7C,MAAA,MAAM,UAAA,GAAa,IAAA,CAAK,MAAA,CAAO,UAAA,IAAc,mBAAA;AAC7C,MAAA,MAAM,QAAA,GACJ,IAAA,CAAK,MAAA,CAAO,OAAA,IACZ,kBAAA;AAAA,QACE,KAAK,MAAA,CAAO,YAAA;AAAA,QACZ,KAAK,MAAA,CAAO,cAEd,CAAA;AAEF,MAAA,IAAA,CAAK,MAAA,GAAS,IAAI,WAAA,CAAY;AAAA,QAC5B,MAAA,EAAQ,KAAK,MAAA,CAAO,MAAA;AAAA,QACpB,QAAA;AAAA,QACA,UAAA;AAAA,QACA,UAAA,EAAY,KAAK,MAAA,CAAO;AAAA,OACzB,CAAA;AAAA,IACH;AACA,IAAA,OAAO,IAAA,CAAK,MAAA;AAAA,EACd;AAAA,EAEA,OAAO,OAAO,OAAA,EAA6D;AACzE,IAAA,MAAM,MAAA,GAAS,MAAM,IAAA,CAAK,SAAA,EAAU;AAGpC,IAAA,IAAI,QAAA;AACJ,IAAA,IAAI,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,CAAY,SAAS,CAAA,EAAG;AAEzD,MAAA,MAAM,iBAAA,GAAoB,OAAA,CAAQ,WAAA,CAAY,GAAA,CAAI,CAAC,GAAA,KAAQ;AAEzD,QAAA,MAAM,cAAA,GACJ,GAAA,CAAI,WAAA,IACJ,KAAA,CAAM,OAAA,CAAQ,IAAI,WAAW,CAAA,IAC7B,GAAA,CAAI,WAAA,CAAY,MAAA,GAAS,CAAA;AAE3B,QAAA,IAAI,cAAA,EAAgB;AAElB,UAAA,MAAM,UAA0C,EAAC;AAGjD,UAAA,IAAI,IAAI,OAAA,EAAS;AACf,YAAA,OAAA,CAAQ,KAAK,EAAE,IAAA,EAAM,QAAQ,IAAA,EAAM,GAAA,CAAI,SAAS,CAAA;AAAA,UAClD;AAGA,UAAA,KAAA,MAAW,UAAA,IAAc,IAAI,WAAA,EAIzB;AACF,YAAA,IAAI,UAAA,CAAW,SAAS,OAAA,EAAS;AAE/B,cAAA,IAAI,WAAW,UAAA,CAAW,IAAA;AAC1B,cAAA,IAAI,CAAC,QAAA,CAAS,UAAA,CAAW,OAAO,CAAA,EAAG;AACjC,gBAAA,QAAA,GAAW,QAAQ,UAAA,CAAW,QAAA,IAAY,WAAW,CAAA,QAAA,EAAW,WAAW,IAAI,CAAA,CAAA;AAAA,cACjF;AACA,cAAA,OAAA,CAAQ,IAAA,CAAK;AAAA,gBACX,IAAA,EAAM,WAAA;AAAA,gBACN,SAAA,EAAW,EAAE,GAAA,EAAK,QAAA,EAAU,QAAQ,MAAA;AAAO,eAC5C,CAAA;AAAA,YACH;AAAA,UACF;AAEA,UAAA,OAAO,EAAE,GAAG,GAAA,EAAK,OAAA,EAAS,aAAa,MAAA,EAAU;AAAA,QACnD;AACA,QAAA,OAAO,GAAA;AAAA,MACT,CAAC,CAAA;AAGD,MAAA,IAAI,QAAQ,YAAA,EAAc;AACxB,QAAA,MAAM,YAAY,iBAAA,CAAkB,IAAA,CAAK,CAAC,CAAA,KAAM,CAAA,CAAE,SAAS,QAAQ,CAAA;AACnE,QAAA,IAAI,CAAC,SAAA,EAAW;AACd,UAAA,QAAA,GAAW;AAAA,YACT,EAAE,IAAA,EAAM,QAAA,EAAU,OAAA,EAAS,QAAQ,YAAA,EAAa;AAAA,YAChD,GAAG;AAAA,WACL;AAAA,QACF,CAAA,MAAO;AACL,UAAA,QAAA,GAAW,iBAAA;AAAA,QACb;AAAA,MACF,CAAA,MAAO;AACL,QAAA,QAAA,GAAW,iBAAA;AAAA,MACb;AAAA,IACF,CAAA,MAAO;AAEL,MAAA,QAAA,GAAW,uBAAA;AAAA,QACT,OAAA,CAAQ,QAAA;AAAA,QACR,OAAA,CAAQ;AAAA,OACV;AAAA,IACF;AAEA,IAAA,MAAM,QAAQ,OAAA,CAAQ,OAAA,EAAS,SAC3B,WAAA,CAAY,OAAA,CAAQ,OAAO,CAAA,GAC3B,MAAA;AAEJ,IAAA,MAAM,YAAYA,sBAAA,EAAkB;AAGpC,IAAA,MAAM,EAAE,IAAA,EAAM,eAAA,EAAiB,EAAA,EAAI,SAAA,EAAU;AAE7C,IAAA,IAAI;AACF,MAAA,MAAM,MAAA,GAAS,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA;AAAA,QAElD,KAAA,EAAO,KAAK,MAAA,CAAO,cAAA;AAAA,QACnB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,WAAA,EAAa,OAAA,CAAQ,MAAA,EAAQ,WAAA,IAAe,KAAK,MAAA,CAAO,WAAA;AAAA,QACxD,UAAA,EAAY,OAAA,CAAQ,MAAA,EAAQ,SAAA,IAAa,KAAK,MAAA,CAAO,SAAA;AAAA,QACrD,MAAA,EAAQ;AAAA,OACT,CAAA;AAED,MAAA,IAAI,eAAA,GAIO,IAAA;AAEX,MAAA,WAAA,MAAiB,SAAS,MAAA,EAAQ;AAEhC,QAAA,IAAI,OAAA,CAAQ,QAAQ,OAAA,EAAS;AAC3B,UAAA;AAAA,QACF;AAEA,QAAA,MAAM,KAAA,GAAQ,KAAA,CAAM,OAAA,CAAQ,CAAC,CAAA,EAAG,KAAA;AAGhC,QAAA,IAAI,OAAO,OAAA,EAAS;AAClB,UAAA,MAAM,EAAE,IAAA,EAAM,eAAA,EAAiB,OAAA,EAAS,MAAM,OAAA,EAAQ;AAAA,QACxD;AAGA,QAAA,IAAI,OAAO,UAAA,EAAY;AACrB,UAAA,KAAA,MAAW,QAAA,IAAY,MAAM,UAAA,EAAY;AAEvC,YAAA,IAAI,SAAS,EAAA,EAAI;AAEf,cAAA,IAAI,eAAA,EAAiB;AACnB,gBAAA,MAAM;AAAA,kBACJ,IAAA,EAAM,aAAA;AAAA,kBACN,IAAI,eAAA,CAAgB,EAAA;AAAA,kBACpB,MAAM,eAAA,CAAgB;AAAA,iBACxB;AAAA,cACF;AAEA,cAAA,eAAA,GAAkB;AAAA,gBAChB,IAAI,QAAA,CAAS,EAAA;AAAA,gBACb,IAAA,EAAM,QAAA,CAAS,QAAA,EAAU,IAAA,IAAQ,EAAA;AAAA,gBACjC,SAAA,EAAW,QAAA,CAAS,QAAA,EAAU,SAAA,IAAa;AAAA,eAC7C;AAEA,cAAA,MAAM;AAAA,gBACJ,IAAA,EAAM,cAAA;AAAA,gBACN,IAAI,eAAA,CAAgB,EAAA;AAAA,gBACpB,MAAM,eAAA,CAAgB;AAAA,eACxB;AAAA,YACF,CAAA,MAAA,IAAW,eAAA,IAAmB,QAAA,CAAS,QAAA,EAAU,SAAA,EAAW;AAE1D,cAAA,eAAA,CAAgB,SAAA,IAAa,SAAS,QAAA,CAAS,SAAA;AAAA,YACjD;AAAA,UACF;AAAA,QACF;AAGA,QAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,CAAC,CAAA,EAAG,aAAA,EAAe;AAEnC,UAAA,IAAI,eAAA,EAAiB;AACnB,YAAA,MAAM;AAAA,cACJ,IAAA,EAAM,aAAA;AAAA,cACN,IAAI,eAAA,CAAgB,EAAA;AAAA,cACpB,MAAM,eAAA,CAAgB;AAAA,aACxB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAGA,MAAA,MAAM,EAAE,MAAM,aAAA,EAAc;AAC5B,MAAA,MAAM,EAAE,MAAM,MAAA,EAAO;AAAA,IACvB,SAAS,KAAA,EAAO;AACd,MAAA,MAAM;AAAA,QACJ,IAAA,EAAM,OAAA;AAAA,QACN,OAAA,EAAS,KAAA,YAAiB,KAAA,GAAQ,KAAA,CAAM,OAAA,GAAU,eAAA;AAAA,QAClD,IAAA,EAAM;AAAA,OACR;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,SAAS,OAAA,EAA2D;AACxE,IAAA,MAAM,MAAA,GAAS,MAAM,IAAA,CAAK,SAAA,EAAU;AAEpC,IAAA,IAAI,QAAA;AACJ,IAAA,IAAI,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,CAAY,SAAS,CAAA,EAAG;AACzD,MAAA,QAAA,GAAW,OAAA,CAAQ,WAAA;AACnB,MAAA,IAAI,QAAQ,YAAA,EAAc;AACxB,QAAA,MAAM,YAAY,QAAA,CAAS,IAAA,CAAK,CAAC,CAAA,KAAM,CAAA,CAAE,SAAS,QAAQ,CAAA;AAC1D,QAAA,IAAI,CAAC,SAAA,EAAW;AACd,UAAA,QAAA,GAAW;AAAA,YACT,EAAE,IAAA,EAAM,QAAA,EAAU,OAAA,EAAS,QAAQ,YAAA,EAAa;AAAA,YAChD,GAAG;AAAA,WACL;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAA,MAAO;AACL,MAAA,QAAA,GAAW,uBAAA;AAAA,QACT,OAAA,CAAQ,QAAA;AAAA,QACR,OAAA,CAAQ;AAAA,OACV;AAAA,IACF;AAEA,IAAA,MAAM,QAAQ,OAAA,CAAQ,OAAA,EAAS,SAC3B,WAAA,CAAY,OAAA,CAAQ,OAAO,CAAA,GAC3B,MAAA;AAEJ,IAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MACpD,KAAA,EAAO,KAAK,MAAA,CAAO,cAAA;AAAA,MACnB,QAAA;AAAA,MACA,KAAA;AAAA,MACA,WAAA,EAAa,OAAA,CAAQ,MAAA,EAAQ,WAAA,IAAe,KAAK,MAAA,CAAO,WAAA;AAAA,MACxD,UAAA,EAAY,OAAA,CAAQ,MAAA,EAAQ,SAAA,IAAa,KAAK,MAAA,CAAO;AAAA,KACtD,CAAA;AAED,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AACjC,IAAA,MAAM,UAAU,MAAA,EAAQ,OAAA;AAExB,IAAA,MAAM,aAAa,OAAA,EAAS,UAAA,IAAc,EAAC,EAAG,GAAA,CAAI,CAAC,EAAA,MAAa;AAAA,MAC9D,IAAI,EAAA,CAAG,EAAA;AAAA,MACP,IAAA,EAAM,GAAG,QAAA,CAAS,IAAA;AAAA,MAClB,MAAM,IAAA,CAAK,KAAA,CAAM,EAAA,CAAG,QAAA,CAAS,aAAa,IAAI;AAAA,KAChD,CAAE,CAAA;AAEF,IAAA,OAAO;AAAA,MACL,OAAA,EAAS,SAAS,OAAA,IAAW,EAAA;AAAA,MAC7B,SAAA;AAAA,MACA,WAAA,EAAa;AAAA,KACf;AAAA,EACF;AACF,CAAA;AAKO,SAAS,mBAAmB,MAAA,EAA0C;AAC3E,EAAA,OAAO,IAAI,aAAa,MAAM,CAAA;AAChC;;;ACtSA,SAAS,iCAAiC,cAAA,EAIxC;AACA,EAAA,MAAM,IAAA,GAAO,eAAe,WAAA,EAAY;AAGxC,EAAA,IAAI,KAAK,QAAA,CAAS,QAAQ,KAAK,IAAA,CAAK,QAAA,CAAS,OAAO,CAAA,EAAG;AACrD,IAAA,OAAO,EAAE,MAAA,EAAQ,IAAA,EAAM,KAAA,EAAO,IAAA,EAAM,WAAW,KAAA,EAAO;AAAA,EACxD;AAGA,EAAA,IAAA,CACG,IAAA,CAAK,QAAA,CAAS,OAAO,CAAA,IAAK,KAAK,QAAA,CAAS,MAAM,CAAA,MAC9C,IAAA,CAAK,SAAS,OAAO,CAAA,IAAK,IAAA,CAAK,QAAA,CAAS,QAAQ,CAAA,CAAA,EACjD;AACA,IAAA,OAAO,EAAE,MAAA,EAAQ,IAAA,EAAM,KAAA,EAAO,IAAA,EAAM,WAAW,KAAA,EAAO;AAAA,EACxD;AAGA,EAAA,IAAI,KAAK,QAAA,CAAS,OAAO,KAAK,IAAA,CAAK,QAAA,CAAS,MAAM,CAAA,EAAG;AACnD,IAAA,OAAO,EAAE,MAAA,EAAQ,KAAA,EAAO,KAAA,EAAO,IAAA,EAAM,WAAW,IAAA,EAAK;AAAA,EACvD;AAGA,EAAA,IACE,IAAA,CAAK,QAAA,CAAS,QAAQ,CAAA,IACtB,IAAA,CAAK,QAAA,CAAS,SAAS,CAAA,IACvB,IAAA,CAAK,QAAA,CAAS,OAAO,CAAA,EACrB;AACA,IAAA,OAAO,EAAE,MAAA,EAAQ,KAAA,EAAO,KAAA,EAAO,IAAA,EAAM,WAAW,KAAA,EAAM;AAAA,EACxD;AAGA,EAAA,IAAI,IAAA,CAAK,QAAA,CAAS,IAAI,CAAA,EAAG;AACvB,IAAA,OAAO,EAAE,MAAA,EAAQ,IAAA,EAAM,KAAA,EAAO,KAAA,EAAO,WAAW,KAAA,EAAO;AAAA,EACzD;AAGA,EAAA,OAAO,EAAE,MAAA,EAAQ,KAAA,EAAO,KAAA,EAAO,IAAA,EAAM,WAAW,IAAA,EAAK;AACvD;AAoBO,SAAS,YAAY,MAAA,EAAyC;AACnE,EAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,IAAU,OAAA,CAAQ,IAAI,oBAAA,IAAwB,EAAA;AACpE,EAAA,MAAM,YAAA,GACJ,MAAA,CAAO,YAAA,IAAgB,OAAA,CAAQ,IAAI,qBAAA,IAAyB,EAAA;AAC9D,EAAA,MAAM,iBAAA,GACJ,MAAA,CAAO,cAAA,IAAkB,OAAA,CAAQ,IAAI,uBAAA,IAA2B,EAAA;AAIlE,EAAA,MAAM,eAAA,GAAkB,iBAAA,GAAoB,CAAC,iBAAiB,IAAI,EAAC;AAEnE,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,OAAA;AAAA,IACN,eAAA;AAAA,IAEA,cAAc,cAAA,EAAwB;AACpC,MAAA,OAAO,kBAAA,CAAmB;AAAA,QACxB,MAAA;AAAA,QACA,YAAA;AAAA,QACA,gBAAgB,cAAA,IAAkB,iBAAA;AAAA,QAClC,YAAY,MAAA,CAAO,UAAA;AAAA,QACnB,SAAS,MAAA,CAAO;AAAA,OACjB,CAAA;AAAA,IACH,CAAA;AAAA,IAEA,gBAAgB,cAAA,EAA8C;AAC5D,MAAA,MAAM,QAAA,GAAW,gCAAA;AAAA,QACf,cAAA,IAAkB;AAAA,OACpB;AAEA,MAAA,OAAO;AAAA,QACL,gBAAgB,QAAA,CAAS,MAAA;AAAA,QACzB,eAAe,QAAA,CAAS,KAAA;AAAA,QACxB,gBAAA,EAAkB,KAAA;AAAA,QAClB,iBAAA,EAAmB,IAAA;AAAA,QACnB,WAAA,EAAa,KAAA;AAAA,QACb,aAAA,EAAe,KAAA;AAAA,QACf,aAAA,EAAe,KAAA;AAAA,QACf,WAAW,QAAA,CAAS,SAAA;AAAA,QACpB,mBAAA,EAAqB,SAAS,MAAA,GAC1B,CAAC,aAAa,YAAA,EAAc,WAAA,EAAa,YAAY,CAAA,GACrD,EAAC;AAAA,QACL,gBAAA,EAAkB,IAAA;AAAA,QAClB,sBAAA,EAAwB;AAAA,OAC1B;AAAA,IACF;AAAA,GACF;AACF;AAGO,IAAM,mBAAA,GAAsB","file":"index.js","sourcesContent":["import type {\n Message,\n MessageAttachment,\n ActionDefinition,\n StreamEvent,\n LLMConfig,\n} from \"@yourgpt/copilot-sdk/core\";\n\n/**\n * Chat completion request\n */\nexport interface ChatCompletionRequest {\n /** Conversation messages */\n messages: Message[];\n /**\n * Raw provider-formatted messages (for agent loop with tool calls)\n * When provided, these are used instead of converting from Message[]\n * This allows passing messages with tool_calls and tool role\n */\n rawMessages?: Array<Record<string, unknown>>;\n /** Available actions/tools */\n actions?: ActionDefinition[];\n /** System prompt */\n systemPrompt?: string;\n /** LLM configuration overrides */\n config?: Partial<LLMConfig>;\n /** Abort signal for cancellation */\n signal?: AbortSignal;\n}\n\n/**\n * Non-streaming completion result\n */\nexport interface CompletionResult {\n /** Text content */\n content: string;\n /** Tool calls */\n toolCalls: Array<{ id: string; name: string; args: Record<string, unknown> }>;\n /** Thinking content (if extended thinking enabled) */\n thinking?: string;\n /** Raw provider response for debugging */\n rawResponse: Record<string, unknown>;\n}\n\n/**\n * Base LLM adapter interface\n */\nexport interface LLMAdapter {\n /** Provider name */\n readonly provider: string;\n\n /** Model name */\n readonly model: string;\n\n /**\n * Stream a chat completion\n */\n stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;\n\n /**\n * Non-streaming chat completion (for debugging/comparison)\n */\n complete?(request: ChatCompletionRequest): Promise<CompletionResult>;\n}\n\n/**\n * Adapter factory function type\n */\nexport type AdapterFactory = (config: LLMConfig) => LLMAdapter;\n\n/**\n * Convert messages to provider format (simple text only)\n */\nexport function formatMessages(\n messages: Message[],\n systemPrompt?: string,\n): Array<{ role: string; content: string }> {\n const formatted: Array<{ role: string; content: string }> = [];\n\n // Add system prompt if provided\n if (systemPrompt) {\n formatted.push({ role: \"system\", content: systemPrompt });\n }\n\n // Add conversation messages\n for (const msg of messages) {\n formatted.push({\n role: msg.role,\n content: msg.content ?? \"\",\n });\n }\n\n return formatted;\n}\n\n/**\n * Convert ActionParameter to JSON Schema format recursively\n */\nfunction parameterToJsonSchema(param: {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n}): Record<string, unknown> {\n const schema: Record<string, unknown> = {\n type: param.type,\n };\n\n if (param.description) {\n schema.description = param.description;\n }\n\n if (param.enum) {\n schema.enum = param.enum;\n }\n\n // Handle array items\n if (param.type === \"array\" && param.items) {\n schema.items = parameterToJsonSchema(\n param.items as {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n },\n );\n }\n\n // Handle nested object properties\n if (param.type === \"object\" && param.properties) {\n schema.properties = Object.fromEntries(\n Object.entries(param.properties).map(([key, prop]) => [\n key,\n parameterToJsonSchema(\n prop as {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n },\n ),\n ]),\n );\n }\n\n return schema;\n}\n\n/**\n * Convert actions to OpenAI tool format\n */\nexport function formatTools(actions: ActionDefinition[]): Array<{\n type: \"function\";\n function: {\n name: string;\n description: string;\n parameters: object;\n };\n}> {\n return actions.map((action) => ({\n type: \"function\" as const,\n function: {\n name: action.name,\n description: action.description,\n parameters: {\n type: \"object\",\n properties: action.parameters\n ? Object.fromEntries(\n Object.entries(action.parameters).map(([key, param]) => [\n key,\n parameterToJsonSchema(param),\n ]),\n )\n : {},\n required: action.parameters\n ? Object.entries(action.parameters)\n .filter(([, param]) => param.required)\n .map(([key]) => key)\n : [],\n },\n },\n }));\n}\n\n// ============================================\n// Vision/Multimodal Support\n// ============================================\n\n/**\n * Content block types for multimodal messages\n */\nexport type AnthropicContentBlock =\n | { type: \"text\"; text: string }\n | {\n type: \"image\";\n source:\n | {\n type: \"base64\";\n media_type: string;\n data: string;\n }\n | {\n type: \"url\";\n url: string;\n };\n }\n | {\n type: \"document\";\n source:\n | {\n type: \"base64\";\n media_type: string;\n data: string;\n }\n | {\n type: \"url\";\n url: string;\n };\n };\n\nexport type OpenAIContentBlock =\n | { type: \"text\"; text: string }\n | {\n type: \"image_url\";\n image_url: {\n url: string;\n detail?: \"low\" | \"high\" | \"auto\";\n };\n };\n\n/**\n * Check if a message has image attachments\n * Supports both new format (metadata.attachments) and legacy (attachments)\n */\nexport function hasImageAttachments(message: Message): boolean {\n const attachments = message.metadata?.attachments;\n return attachments?.some((a) => a.type === \"image\") ?? false;\n}\n\n/**\n * Check if a message has media attachments (images or PDFs)\n */\nexport function hasMediaAttachments(message: Message): boolean {\n const attachments = message.metadata?.attachments;\n return (\n attachments?.some(\n (a) =>\n a.type === \"image\" ||\n (a.type === \"file\" && a.mimeType === \"application/pdf\"),\n ) ?? false\n );\n}\n\n/**\n * Convert MessageAttachment to Anthropic image content block\n *\n * Anthropic format:\n * {\n * type: \"image\",\n * source: {\n * type: \"base64\",\n * media_type: \"image/png\",\n * data: \"base64data...\"\n * }\n * }\n */\nexport function attachmentToAnthropicImage(\n attachment: MessageAttachment,\n): AnthropicContentBlock | null {\n if (attachment.type !== \"image\") return null;\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n return {\n type: \"image\",\n source: {\n type: \"url\",\n url: attachment.url,\n },\n };\n }\n\n // Fall back to base64 data\n if (!attachment.data) return null;\n\n // Extract base64 data (remove data URI prefix if present)\n let base64Data = attachment.data;\n if (base64Data.startsWith(\"data:\")) {\n const commaIndex = base64Data.indexOf(\",\");\n if (commaIndex !== -1) {\n base64Data = base64Data.slice(commaIndex + 1);\n }\n }\n\n return {\n type: \"image\",\n source: {\n type: \"base64\",\n media_type: attachment.mimeType || \"image/png\",\n data: base64Data,\n },\n };\n}\n\n/**\n * Convert MessageAttachment to OpenAI image_url content block\n *\n * OpenAI format:\n * {\n * type: \"image_url\",\n * image_url: {\n * url: \"data:image/png;base64,...\"\n * }\n * }\n */\nexport function attachmentToOpenAIImage(\n attachment: MessageAttachment,\n): OpenAIContentBlock | null {\n if (attachment.type !== \"image\") return null;\n\n let imageUrl: string;\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n imageUrl = attachment.url;\n } else if (attachment.data) {\n // Build data URI if not already one\n imageUrl = attachment.data.startsWith(\"data:\")\n ? attachment.data\n : `data:${attachment.mimeType || \"image/png\"};base64,${attachment.data}`;\n } else {\n return null;\n }\n\n return {\n type: \"image_url\",\n image_url: {\n url: imageUrl,\n detail: \"auto\",\n },\n };\n}\n\n/**\n * Convert MessageAttachment (PDF) to Anthropic document content block\n *\n * Anthropic format:\n * {\n * type: \"document\",\n * source: {\n * type: \"base64\",\n * media_type: \"application/pdf\",\n * data: \"base64data...\"\n * }\n * }\n */\nexport function attachmentToAnthropicDocument(\n attachment: MessageAttachment,\n): AnthropicContentBlock | null {\n // Only handle PDF files\n if (attachment.type !== \"file\" || attachment.mimeType !== \"application/pdf\") {\n return null;\n }\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n return {\n type: \"document\",\n source: {\n type: \"url\",\n url: attachment.url,\n },\n };\n }\n\n // Fall back to base64 data\n if (!attachment.data) return null;\n\n // Extract base64 data (remove data URI prefix if present)\n let base64Data = attachment.data;\n if (base64Data.startsWith(\"data:\")) {\n const commaIndex = base64Data.indexOf(\",\");\n if (commaIndex !== -1) {\n base64Data = base64Data.slice(commaIndex + 1);\n }\n }\n\n return {\n type: \"document\",\n source: {\n type: \"base64\",\n media_type: \"application/pdf\",\n data: base64Data,\n },\n };\n}\n\n/**\n * Convert a Message to Anthropic multimodal content blocks\n */\nexport function messageToAnthropicContent(\n message: Message,\n): string | AnthropicContentBlock[] {\n const attachments = message.metadata?.attachments;\n const content = message.content ?? \"\";\n\n // If no media attachments (images or PDFs), return simple string\n if (!hasMediaAttachments(message)) {\n return content;\n }\n\n // Build content blocks array\n const blocks: AnthropicContentBlock[] = [];\n\n // Add media attachments first (Claude recommends media before text)\n if (attachments) {\n for (const attachment of attachments) {\n // Try image first\n const imageBlock = attachmentToAnthropicImage(attachment);\n if (imageBlock) {\n blocks.push(imageBlock);\n continue;\n }\n // Try document (PDF)\n const docBlock = attachmentToAnthropicDocument(attachment);\n if (docBlock) {\n blocks.push(docBlock);\n }\n }\n }\n\n // Add text content\n if (content) {\n blocks.push({ type: \"text\", text: content });\n }\n\n return blocks;\n}\n\n/**\n * Convert a Message to OpenAI multimodal content blocks\n */\nexport function messageToOpenAIContent(\n message: Message,\n): string | OpenAIContentBlock[] {\n const attachments = message.metadata?.attachments;\n const content = message.content ?? \"\";\n\n // If no image attachments, return simple string\n if (!hasImageAttachments(message)) {\n return content;\n }\n\n // Build content blocks array\n const blocks: OpenAIContentBlock[] = [];\n\n // Add text content first\n if (content) {\n blocks.push({ type: \"text\", text: content });\n }\n\n // Add image attachments\n if (attachments) {\n for (const attachment of attachments) {\n const imageBlock = attachmentToOpenAIImage(attachment);\n if (imageBlock) {\n blocks.push(imageBlock);\n }\n }\n }\n\n return blocks;\n}\n\n/**\n * Anthropic content block types (extended for tools)\n */\nexport type AnthropicToolUseBlock = {\n type: \"tool_use\";\n id: string;\n name: string;\n input: Record<string, unknown>;\n};\n\nexport type AnthropicToolResultBlock = {\n type: \"tool_result\";\n tool_use_id: string;\n content: string;\n};\n\nexport type AnthropicMessageContent =\n | string\n | Array<\n AnthropicContentBlock | AnthropicToolUseBlock | AnthropicToolResultBlock\n >;\n\n/**\n * Format messages for Anthropic with full tool support\n * Handles: text, images, tool_use, and tool_result\n *\n * Key differences from OpenAI:\n * - tool_calls become tool_use blocks in assistant content\n * - tool results become tool_result blocks in user content\n */\nexport function formatMessagesForAnthropic(\n messages: Message[],\n systemPrompt?: string,\n): {\n system: string;\n messages: Array<{\n role: \"user\" | \"assistant\";\n content: AnthropicMessageContent;\n }>;\n} {\n const formatted: Array<{\n role: \"user\" | \"assistant\";\n content: AnthropicMessageContent;\n }> = [];\n\n for (let i = 0; i < messages.length; i++) {\n const msg = messages[i];\n\n if (msg.role === \"system\") continue; // System handled separately\n\n if (msg.role === \"assistant\") {\n // Build content array for assistant\n const content: Array<AnthropicContentBlock | AnthropicToolUseBlock> = [];\n\n // Add text content if present\n if (msg.content) {\n content.push({ type: \"text\", text: msg.content });\n }\n\n // Convert tool_calls to tool_use blocks\n if (msg.tool_calls && msg.tool_calls.length > 0) {\n for (const tc of msg.tool_calls) {\n content.push({\n type: \"tool_use\",\n id: tc.id,\n name: tc.function.name,\n input: JSON.parse(tc.function.arguments),\n });\n }\n }\n\n formatted.push({\n role: \"assistant\",\n content:\n content.length === 1 && content[0].type === \"text\"\n ? (content[0] as { type: \"text\"; text: string }).text\n : content,\n });\n } else if (msg.role === \"tool\" && msg.tool_call_id) {\n // Tool results go in user message as tool_result blocks\n // Group consecutive tool messages together\n const toolResults: AnthropicToolResultBlock[] = [\n {\n type: \"tool_result\",\n tool_use_id: msg.tool_call_id,\n content: msg.content ?? \"\",\n },\n ];\n\n // Look ahead for more consecutive tool messages\n while (i + 1 < messages.length && messages[i + 1].role === \"tool\") {\n i++;\n const nextTool = messages[i];\n if (nextTool.tool_call_id) {\n toolResults.push({\n type: \"tool_result\",\n tool_use_id: nextTool.tool_call_id,\n content: nextTool.content ?? \"\",\n });\n }\n }\n\n formatted.push({\n role: \"user\",\n content: toolResults,\n });\n } else if (msg.role === \"user\") {\n formatted.push({\n role: \"user\",\n content: messageToAnthropicContent(msg),\n });\n }\n }\n\n return {\n system: systemPrompt || \"\",\n messages: formatted,\n };\n}\n\n/**\n * OpenAI message format with tool support\n */\nexport type OpenAIMessage =\n | { role: \"system\"; content: string }\n | { role: \"user\"; content: string | OpenAIContentBlock[] }\n | {\n role: \"assistant\";\n content: string | null;\n tool_calls?: Array<{\n id: string;\n type: \"function\";\n function: { name: string; arguments: string };\n }>;\n }\n | { role: \"tool\"; content: string; tool_call_id: string };\n\n/**\n * Format messages for OpenAI with full tool support\n * Handles: text, images, tool_calls, and tool results\n */\nexport function formatMessagesForOpenAI(\n messages: Message[],\n systemPrompt?: string,\n): OpenAIMessage[] {\n const formatted: OpenAIMessage[] = [];\n\n // Add system prompt if provided\n if (systemPrompt) {\n formatted.push({ role: \"system\", content: systemPrompt });\n }\n\n for (const msg of messages) {\n if (msg.role === \"system\") {\n formatted.push({ role: \"system\", content: msg.content ?? \"\" });\n } else if (msg.role === \"user\") {\n formatted.push({\n role: \"user\",\n content: messageToOpenAIContent(msg),\n });\n } else if (msg.role === \"assistant\") {\n const assistantMsg: OpenAIMessage = {\n role: \"assistant\",\n content: msg.content,\n };\n if (msg.tool_calls && msg.tool_calls.length > 0) {\n (assistantMsg as { tool_calls: typeof msg.tool_calls }).tool_calls =\n msg.tool_calls;\n }\n formatted.push(assistantMsg);\n } else if (msg.role === \"tool\" && msg.tool_call_id) {\n formatted.push({\n role: \"tool\",\n content: msg.content ?? \"\",\n tool_call_id: msg.tool_call_id,\n });\n }\n }\n\n return formatted;\n}\n","/**\n * Azure OpenAI LLM Adapter\n *\n * Azure OpenAI uses Microsoft's cloud infrastructure with\n * different authentication and URL patterns than standard OpenAI.\n *\n * Supports: Any OpenAI model deployed on Azure (GPT-4, GPT-4o, etc.)\n * Features: Vision, Tools/Function Calling (depends on deployed model)\n */\n\nimport type { LLMConfig, StreamEvent } from \"@yourgpt/copilot-sdk/core\";\nimport {\n generateMessageId,\n generateToolCallId,\n} from \"@yourgpt/copilot-sdk/core\";\nimport type {\n LLMAdapter,\n ChatCompletionRequest,\n CompletionResult,\n} from \"./base\";\nimport { formatMessagesForOpenAI, formatTools } from \"./base\";\n\n// ============================================\n// Types\n// ============================================\n\n/**\n * Azure OpenAI adapter configuration\n */\nexport interface AzureAdapterConfig extends Partial<LLMConfig> {\n /** Azure OpenAI API key */\n apiKey: string;\n /** Azure resource name (e.g., 'my-resource') */\n resourceName: string;\n /** Azure deployment name (e.g., 'gpt-4o-deployment') */\n deploymentName: string;\n /** API version (default: 2024-08-01-preview) */\n apiVersion?: string;\n /** Custom endpoint URL (optional, overrides resourceName) */\n baseUrl?: string;\n}\n\n// Default Azure API version\nconst DEFAULT_API_VERSION = \"2024-08-01-preview\";\n\n/**\n * Build Azure OpenAI endpoint URL\n */\nfunction buildAzureEndpoint(\n resourceName: string,\n deploymentName: string,\n apiVersion: string,\n): string {\n return `https://${resourceName}.openai.azure.com/openai/deployments/${deploymentName}`;\n}\n\n// ============================================\n// Adapter Implementation\n// ============================================\n\n/**\n * Azure OpenAI LLM Adapter\n *\n * Uses Azure's OpenAI service with Azure-specific authentication\n */\nexport class AzureAdapter implements LLMAdapter {\n readonly provider = \"azure\";\n readonly model: string;\n\n private client: any; // OpenAI client (lazy loaded)\n private config: AzureAdapterConfig;\n\n constructor(config: AzureAdapterConfig) {\n this.config = config;\n this.model = config.deploymentName;\n }\n\n private async getClient() {\n if (!this.client) {\n // Use OpenAI SDK with Azure configuration\n const { AzureOpenAI } = await import(\"openai\");\n\n const apiVersion = this.config.apiVersion || DEFAULT_API_VERSION;\n const endpoint =\n this.config.baseUrl ||\n buildAzureEndpoint(\n this.config.resourceName,\n this.config.deploymentName,\n apiVersion,\n );\n\n this.client = new AzureOpenAI({\n apiKey: this.config.apiKey,\n endpoint,\n apiVersion,\n deployment: this.config.deploymentName,\n });\n }\n return this.client;\n }\n\n async *stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent> {\n const client = await this.getClient();\n\n // Use raw messages if provided (for agent loop with tool calls), otherwise format from Message[]\n let messages: Array<Record<string, unknown>>;\n if (request.rawMessages && request.rawMessages.length > 0) {\n // Process raw messages - convert any attachments to OpenAI vision format\n const processedMessages = request.rawMessages.map((msg) => {\n // Check if message has attachments (images)\n const hasAttachments =\n msg.attachments &&\n Array.isArray(msg.attachments) &&\n msg.attachments.length > 0;\n\n if (hasAttachments) {\n // Convert to OpenAI multimodal content format\n const content: Array<Record<string, unknown>> = [];\n\n // Add text content if present\n if (msg.content) {\n content.push({ type: \"text\", text: msg.content });\n }\n\n // Add image attachments\n for (const attachment of msg.attachments as Array<{\n type: string;\n data: string;\n mimeType?: string;\n }>) {\n if (attachment.type === \"image\") {\n // Convert to OpenAI image_url format\n let imageUrl = attachment.data;\n if (!imageUrl.startsWith(\"data:\")) {\n imageUrl = `data:${attachment.mimeType || \"image/png\"};base64,${attachment.data}`;\n }\n content.push({\n type: \"image_url\",\n image_url: { url: imageUrl, detail: \"auto\" },\n });\n }\n }\n\n return { ...msg, content, attachments: undefined };\n }\n return msg;\n });\n\n // Add system prompt at the start if provided and not already present\n if (request.systemPrompt) {\n const hasSystem = processedMessages.some((m) => m.role === \"system\");\n if (!hasSystem) {\n messages = [\n { role: \"system\", content: request.systemPrompt },\n ...processedMessages,\n ];\n } else {\n messages = processedMessages;\n }\n } else {\n messages = processedMessages;\n }\n } else {\n // Format from Message[] with multimodal support (images, attachments)\n messages = formatMessagesForOpenAI(\n request.messages,\n request.systemPrompt,\n ) as Array<Record<string, unknown>>;\n }\n\n const tools = request.actions?.length\n ? formatTools(request.actions)\n : undefined;\n\n const messageId = generateMessageId();\n\n // Emit message start\n yield { type: \"message:start\", id: messageId };\n\n try {\n const stream = await client.chat.completions.create({\n // Azure uses deployment name, not model name\n model: this.config.deploymentName,\n messages,\n tools,\n temperature: request.config?.temperature ?? this.config.temperature,\n max_tokens: request.config?.maxTokens ?? this.config.maxTokens,\n stream: true,\n });\n\n let currentToolCall: {\n id: string;\n name: string;\n arguments: string;\n } | null = null;\n\n for await (const chunk of stream) {\n // Check for abort\n if (request.signal?.aborted) {\n break;\n }\n\n const delta = chunk.choices[0]?.delta;\n\n // Handle content\n if (delta?.content) {\n yield { type: \"message:delta\", content: delta.content };\n }\n\n // Handle tool calls\n if (delta?.tool_calls) {\n for (const toolCall of delta.tool_calls) {\n // New tool call\n if (toolCall.id) {\n // End previous tool call if any\n if (currentToolCall) {\n yield {\n type: \"action:args\",\n id: currentToolCall.id,\n args: currentToolCall.arguments,\n };\n }\n\n currentToolCall = {\n id: toolCall.id,\n name: toolCall.function?.name || \"\",\n arguments: toolCall.function?.arguments || \"\",\n };\n\n yield {\n type: \"action:start\",\n id: currentToolCall.id,\n name: currentToolCall.name,\n };\n } else if (currentToolCall && toolCall.function?.arguments) {\n // Append to current tool call arguments\n currentToolCall.arguments += toolCall.function.arguments;\n }\n }\n }\n\n // Check for finish\n if (chunk.choices[0]?.finish_reason) {\n // Complete any pending tool call\n if (currentToolCall) {\n yield {\n type: \"action:args\",\n id: currentToolCall.id,\n args: currentToolCall.arguments,\n };\n }\n }\n }\n\n // Emit message end\n yield { type: \"message:end\" };\n yield { type: \"done\" };\n } catch (error) {\n yield {\n type: \"error\",\n message: error instanceof Error ? error.message : \"Unknown error\",\n code: \"AZURE_ERROR\",\n };\n }\n }\n\n /**\n * Non-streaming completion (optional, for debugging)\n */\n async complete(request: ChatCompletionRequest): Promise<CompletionResult> {\n const client = await this.getClient();\n\n let messages: Array<Record<string, unknown>>;\n if (request.rawMessages && request.rawMessages.length > 0) {\n messages = request.rawMessages as Array<Record<string, unknown>>;\n if (request.systemPrompt) {\n const hasSystem = messages.some((m) => m.role === \"system\");\n if (!hasSystem) {\n messages = [\n { role: \"system\", content: request.systemPrompt },\n ...messages,\n ];\n }\n }\n } else {\n messages = formatMessagesForOpenAI(\n request.messages,\n request.systemPrompt,\n ) as Array<Record<string, unknown>>;\n }\n\n const tools = request.actions?.length\n ? formatTools(request.actions)\n : undefined;\n\n const response = await client.chat.completions.create({\n model: this.config.deploymentName,\n messages,\n tools,\n temperature: request.config?.temperature ?? this.config.temperature,\n max_tokens: request.config?.maxTokens ?? this.config.maxTokens,\n });\n\n const choice = response.choices[0];\n const message = choice?.message;\n\n const toolCalls = (message?.tool_calls || []).map((tc: any) => ({\n id: tc.id,\n name: tc.function.name,\n args: JSON.parse(tc.function.arguments || \"{}\"),\n }));\n\n return {\n content: message?.content || \"\",\n toolCalls,\n rawResponse: response as Record<string, unknown>,\n };\n }\n}\n\n/**\n * Create Azure OpenAI adapter\n */\nexport function createAzureAdapter(config: AzureAdapterConfig): AzureAdapter {\n return new AzureAdapter(config);\n}\n","/**\n * Azure OpenAI Provider\n *\n * Wraps the AzureAdapter with provider interface.\n * Azure OpenAI provides enterprise-grade OpenAI models with Azure security.\n *\n * Features:\n * - Vision (for supported deployments)\n * - Tools/Function calling\n * - Enterprise security & compliance\n * - Private networking options\n *\n * Note: Capabilities depend on which model is deployed, not a model ID.\n * The provider attempts to detect capabilities from the deployment name.\n */\n\nimport { createAzureAdapter } from \"../../adapters/azure\";\nimport type {\n AIProvider,\n ProviderCapabilities,\n AzureProviderConfig,\n} from \"../types\";\n\n// ============================================\n// Model Capability Patterns\n// ============================================\n\n/**\n * Detect model capabilities from deployment name\n * Azure deployments are user-named, so we look for common patterns\n */\nfunction detectCapabilitiesFromDeployment(deploymentName: string): {\n vision: boolean;\n tools: boolean;\n maxTokens: number;\n} {\n const name = deploymentName.toLowerCase();\n\n // GPT-4o variants (vision, tools, 128k context)\n if (name.includes(\"gpt-4o\") || name.includes(\"gpt4o\")) {\n return { vision: true, tools: true, maxTokens: 128000 };\n }\n\n // GPT-4 Turbo with vision\n if (\n (name.includes(\"gpt-4\") || name.includes(\"gpt4\")) &&\n (name.includes(\"turbo\") || name.includes(\"vision\"))\n ) {\n return { vision: true, tools: true, maxTokens: 128000 };\n }\n\n // GPT-4 base\n if (name.includes(\"gpt-4\") || name.includes(\"gpt4\")) {\n return { vision: false, tools: true, maxTokens: 8192 };\n }\n\n // GPT-3.5 Turbo\n if (\n name.includes(\"gpt-35\") ||\n name.includes(\"gpt-3.5\") ||\n name.includes(\"gpt35\")\n ) {\n return { vision: false, tools: true, maxTokens: 16385 };\n }\n\n // o1 reasoning models\n if (name.includes(\"o1\")) {\n return { vision: true, tools: false, maxTokens: 128000 };\n }\n\n // Default fallback\n return { vision: false, tools: true, maxTokens: 8192 };\n}\n\n// ============================================\n// Provider Implementation\n// ============================================\n\n/**\n * Create an Azure OpenAI provider\n *\n * @example\n * ```typescript\n * const azure = createAzure({\n * apiKey: '...',\n * resourceName: 'my-azure-resource',\n * deploymentName: 'gpt-4o-deployment',\n * });\n * const adapter = azure.languageModel('gpt-4o-deployment');\n * const caps = azure.getCapabilities('gpt-4o-deployment');\n * ```\n */\nexport function createAzure(config: AzureProviderConfig): AIProvider {\n const apiKey = config.apiKey ?? process.env.AZURE_OPENAI_API_KEY ?? \"\";\n const resourceName =\n config.resourceName ?? process.env.AZURE_OPENAI_RESOURCE ?? \"\";\n const defaultDeployment =\n config.deploymentName ?? process.env.AZURE_OPENAI_DEPLOYMENT ?? \"\";\n\n // For Azure, the \"supported models\" are actually deployment names\n // We include the default deployment as the main \"model\"\n const supportedModels = defaultDeployment ? [defaultDeployment] : [];\n\n return {\n name: \"azure\",\n supportedModels,\n\n languageModel(deploymentName: string) {\n return createAzureAdapter({\n apiKey,\n resourceName,\n deploymentName: deploymentName || defaultDeployment,\n apiVersion: config.apiVersion,\n baseUrl: config.baseUrl,\n });\n },\n\n getCapabilities(deploymentName: string): ProviderCapabilities {\n const detected = detectCapabilitiesFromDeployment(\n deploymentName || defaultDeployment,\n );\n\n return {\n supportsVision: detected.vision,\n supportsTools: detected.tools,\n supportsThinking: false,\n supportsStreaming: true,\n supportsPDF: false,\n supportsAudio: false,\n supportsVideo: false,\n maxTokens: detected.maxTokens,\n supportedImageTypes: detected.vision\n ? [\"image/png\", \"image/jpeg\", \"image/gif\", \"image/webp\"]\n : [],\n supportsJsonMode: true,\n supportsSystemMessages: true,\n };\n },\n };\n}\n\n// Alias for consistency\nexport const createAzureProvider = createAzure;\n"]}
@@ -0,0 +1,377 @@
1
+ import { generateMessageId } from '@yourgpt/copilot-sdk/core';
2
+
3
+ // src/adapters/azure.ts
4
+
5
+ // src/adapters/base.ts
6
+ function parameterToJsonSchema(param) {
7
+ const schema = {
8
+ type: param.type
9
+ };
10
+ if (param.description) {
11
+ schema.description = param.description;
12
+ }
13
+ if (param.enum) {
14
+ schema.enum = param.enum;
15
+ }
16
+ if (param.type === "array" && param.items) {
17
+ schema.items = parameterToJsonSchema(
18
+ param.items
19
+ );
20
+ }
21
+ if (param.type === "object" && param.properties) {
22
+ schema.properties = Object.fromEntries(
23
+ Object.entries(param.properties).map(([key, prop]) => [
24
+ key,
25
+ parameterToJsonSchema(
26
+ prop
27
+ )
28
+ ])
29
+ );
30
+ }
31
+ return schema;
32
+ }
33
+ function formatTools(actions) {
34
+ return actions.map((action) => ({
35
+ type: "function",
36
+ function: {
37
+ name: action.name,
38
+ description: action.description,
39
+ parameters: {
40
+ type: "object",
41
+ properties: action.parameters ? Object.fromEntries(
42
+ Object.entries(action.parameters).map(([key, param]) => [
43
+ key,
44
+ parameterToJsonSchema(param)
45
+ ])
46
+ ) : {},
47
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
48
+ }
49
+ }
50
+ }));
51
+ }
52
+ function hasImageAttachments(message) {
53
+ const attachments = message.metadata?.attachments;
54
+ return attachments?.some((a) => a.type === "image") ?? false;
55
+ }
56
+ function attachmentToOpenAIImage(attachment) {
57
+ if (attachment.type !== "image") return null;
58
+ let imageUrl;
59
+ if (attachment.url) {
60
+ imageUrl = attachment.url;
61
+ } else if (attachment.data) {
62
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
63
+ } else {
64
+ return null;
65
+ }
66
+ return {
67
+ type: "image_url",
68
+ image_url: {
69
+ url: imageUrl,
70
+ detail: "auto"
71
+ }
72
+ };
73
+ }
74
+ function messageToOpenAIContent(message) {
75
+ const attachments = message.metadata?.attachments;
76
+ const content = message.content ?? "";
77
+ if (!hasImageAttachments(message)) {
78
+ return content;
79
+ }
80
+ const blocks = [];
81
+ if (content) {
82
+ blocks.push({ type: "text", text: content });
83
+ }
84
+ if (attachments) {
85
+ for (const attachment of attachments) {
86
+ const imageBlock = attachmentToOpenAIImage(attachment);
87
+ if (imageBlock) {
88
+ blocks.push(imageBlock);
89
+ }
90
+ }
91
+ }
92
+ return blocks;
93
+ }
94
+ function formatMessagesForOpenAI(messages, systemPrompt) {
95
+ const formatted = [];
96
+ if (systemPrompt) {
97
+ formatted.push({ role: "system", content: systemPrompt });
98
+ }
99
+ for (const msg of messages) {
100
+ if (msg.role === "system") {
101
+ formatted.push({ role: "system", content: msg.content ?? "" });
102
+ } else if (msg.role === "user") {
103
+ formatted.push({
104
+ role: "user",
105
+ content: messageToOpenAIContent(msg)
106
+ });
107
+ } else if (msg.role === "assistant") {
108
+ const assistantMsg = {
109
+ role: "assistant",
110
+ content: msg.content
111
+ };
112
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
113
+ assistantMsg.tool_calls = msg.tool_calls;
114
+ }
115
+ formatted.push(assistantMsg);
116
+ } else if (msg.role === "tool" && msg.tool_call_id) {
117
+ formatted.push({
118
+ role: "tool",
119
+ content: msg.content ?? "",
120
+ tool_call_id: msg.tool_call_id
121
+ });
122
+ }
123
+ }
124
+ return formatted;
125
+ }
126
+
127
+ // src/adapters/azure.ts
128
+ var DEFAULT_API_VERSION = "2024-08-01-preview";
129
+ function buildAzureEndpoint(resourceName, deploymentName, apiVersion) {
130
+ return `https://${resourceName}.openai.azure.com/openai/deployments/${deploymentName}`;
131
+ }
132
+ var AzureAdapter = class {
133
+ constructor(config) {
134
+ this.provider = "azure";
135
+ this.config = config;
136
+ this.model = config.deploymentName;
137
+ }
138
+ async getClient() {
139
+ if (!this.client) {
140
+ const { AzureOpenAI } = await import('openai');
141
+ const apiVersion = this.config.apiVersion || DEFAULT_API_VERSION;
142
+ const endpoint = this.config.baseUrl || buildAzureEndpoint(
143
+ this.config.resourceName,
144
+ this.config.deploymentName);
145
+ this.client = new AzureOpenAI({
146
+ apiKey: this.config.apiKey,
147
+ endpoint,
148
+ apiVersion,
149
+ deployment: this.config.deploymentName
150
+ });
151
+ }
152
+ return this.client;
153
+ }
154
+ async *stream(request) {
155
+ const client = await this.getClient();
156
+ let messages;
157
+ if (request.rawMessages && request.rawMessages.length > 0) {
158
+ const processedMessages = request.rawMessages.map((msg) => {
159
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
160
+ if (hasAttachments) {
161
+ const content = [];
162
+ if (msg.content) {
163
+ content.push({ type: "text", text: msg.content });
164
+ }
165
+ for (const attachment of msg.attachments) {
166
+ if (attachment.type === "image") {
167
+ let imageUrl = attachment.data;
168
+ if (!imageUrl.startsWith("data:")) {
169
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
170
+ }
171
+ content.push({
172
+ type: "image_url",
173
+ image_url: { url: imageUrl, detail: "auto" }
174
+ });
175
+ }
176
+ }
177
+ return { ...msg, content, attachments: void 0 };
178
+ }
179
+ return msg;
180
+ });
181
+ if (request.systemPrompt) {
182
+ const hasSystem = processedMessages.some((m) => m.role === "system");
183
+ if (!hasSystem) {
184
+ messages = [
185
+ { role: "system", content: request.systemPrompt },
186
+ ...processedMessages
187
+ ];
188
+ } else {
189
+ messages = processedMessages;
190
+ }
191
+ } else {
192
+ messages = processedMessages;
193
+ }
194
+ } else {
195
+ messages = formatMessagesForOpenAI(
196
+ request.messages,
197
+ request.systemPrompt
198
+ );
199
+ }
200
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
201
+ const messageId = generateMessageId();
202
+ yield { type: "message:start", id: messageId };
203
+ try {
204
+ const stream = await client.chat.completions.create({
205
+ // Azure uses deployment name, not model name
206
+ model: this.config.deploymentName,
207
+ messages,
208
+ tools,
209
+ temperature: request.config?.temperature ?? this.config.temperature,
210
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
211
+ stream: true
212
+ });
213
+ let currentToolCall = null;
214
+ for await (const chunk of stream) {
215
+ if (request.signal?.aborted) {
216
+ break;
217
+ }
218
+ const delta = chunk.choices[0]?.delta;
219
+ if (delta?.content) {
220
+ yield { type: "message:delta", content: delta.content };
221
+ }
222
+ if (delta?.tool_calls) {
223
+ for (const toolCall of delta.tool_calls) {
224
+ if (toolCall.id) {
225
+ if (currentToolCall) {
226
+ yield {
227
+ type: "action:args",
228
+ id: currentToolCall.id,
229
+ args: currentToolCall.arguments
230
+ };
231
+ }
232
+ currentToolCall = {
233
+ id: toolCall.id,
234
+ name: toolCall.function?.name || "",
235
+ arguments: toolCall.function?.arguments || ""
236
+ };
237
+ yield {
238
+ type: "action:start",
239
+ id: currentToolCall.id,
240
+ name: currentToolCall.name
241
+ };
242
+ } else if (currentToolCall && toolCall.function?.arguments) {
243
+ currentToolCall.arguments += toolCall.function.arguments;
244
+ }
245
+ }
246
+ }
247
+ if (chunk.choices[0]?.finish_reason) {
248
+ if (currentToolCall) {
249
+ yield {
250
+ type: "action:args",
251
+ id: currentToolCall.id,
252
+ args: currentToolCall.arguments
253
+ };
254
+ }
255
+ }
256
+ }
257
+ yield { type: "message:end" };
258
+ yield { type: "done" };
259
+ } catch (error) {
260
+ yield {
261
+ type: "error",
262
+ message: error instanceof Error ? error.message : "Unknown error",
263
+ code: "AZURE_ERROR"
264
+ };
265
+ }
266
+ }
267
+ /**
268
+ * Non-streaming completion (optional, for debugging)
269
+ */
270
+ async complete(request) {
271
+ const client = await this.getClient();
272
+ let messages;
273
+ if (request.rawMessages && request.rawMessages.length > 0) {
274
+ messages = request.rawMessages;
275
+ if (request.systemPrompt) {
276
+ const hasSystem = messages.some((m) => m.role === "system");
277
+ if (!hasSystem) {
278
+ messages = [
279
+ { role: "system", content: request.systemPrompt },
280
+ ...messages
281
+ ];
282
+ }
283
+ }
284
+ } else {
285
+ messages = formatMessagesForOpenAI(
286
+ request.messages,
287
+ request.systemPrompt
288
+ );
289
+ }
290
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
291
+ const response = await client.chat.completions.create({
292
+ model: this.config.deploymentName,
293
+ messages,
294
+ tools,
295
+ temperature: request.config?.temperature ?? this.config.temperature,
296
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
297
+ });
298
+ const choice = response.choices[0];
299
+ const message = choice?.message;
300
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
301
+ id: tc.id,
302
+ name: tc.function.name,
303
+ args: JSON.parse(tc.function.arguments || "{}")
304
+ }));
305
+ return {
306
+ content: message?.content || "",
307
+ toolCalls,
308
+ rawResponse: response
309
+ };
310
+ }
311
+ };
312
+ function createAzureAdapter(config) {
313
+ return new AzureAdapter(config);
314
+ }
315
+
316
+ // src/providers/azure/index.ts
317
+ function detectCapabilitiesFromDeployment(deploymentName) {
318
+ const name = deploymentName.toLowerCase();
319
+ if (name.includes("gpt-4o") || name.includes("gpt4o")) {
320
+ return { vision: true, tools: true, maxTokens: 128e3 };
321
+ }
322
+ if ((name.includes("gpt-4") || name.includes("gpt4")) && (name.includes("turbo") || name.includes("vision"))) {
323
+ return { vision: true, tools: true, maxTokens: 128e3 };
324
+ }
325
+ if (name.includes("gpt-4") || name.includes("gpt4")) {
326
+ return { vision: false, tools: true, maxTokens: 8192 };
327
+ }
328
+ if (name.includes("gpt-35") || name.includes("gpt-3.5") || name.includes("gpt35")) {
329
+ return { vision: false, tools: true, maxTokens: 16385 };
330
+ }
331
+ if (name.includes("o1")) {
332
+ return { vision: true, tools: false, maxTokens: 128e3 };
333
+ }
334
+ return { vision: false, tools: true, maxTokens: 8192 };
335
+ }
336
+ function createAzure(config) {
337
+ const apiKey = config.apiKey ?? process.env.AZURE_OPENAI_API_KEY ?? "";
338
+ const resourceName = config.resourceName ?? process.env.AZURE_OPENAI_RESOURCE ?? "";
339
+ const defaultDeployment = config.deploymentName ?? process.env.AZURE_OPENAI_DEPLOYMENT ?? "";
340
+ const supportedModels = defaultDeployment ? [defaultDeployment] : [];
341
+ return {
342
+ name: "azure",
343
+ supportedModels,
344
+ languageModel(deploymentName) {
345
+ return createAzureAdapter({
346
+ apiKey,
347
+ resourceName,
348
+ deploymentName: deploymentName || defaultDeployment,
349
+ apiVersion: config.apiVersion,
350
+ baseUrl: config.baseUrl
351
+ });
352
+ },
353
+ getCapabilities(deploymentName) {
354
+ const detected = detectCapabilitiesFromDeployment(
355
+ deploymentName || defaultDeployment
356
+ );
357
+ return {
358
+ supportsVision: detected.vision,
359
+ supportsTools: detected.tools,
360
+ supportsThinking: false,
361
+ supportsStreaming: true,
362
+ supportsPDF: false,
363
+ supportsAudio: false,
364
+ supportsVideo: false,
365
+ maxTokens: detected.maxTokens,
366
+ supportedImageTypes: detected.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
367
+ supportsJsonMode: true,
368
+ supportsSystemMessages: true
369
+ };
370
+ }
371
+ };
372
+ }
373
+ var createAzureProvider = createAzure;
374
+
375
+ export { createAzure, createAzureProvider };
376
+ //# sourceMappingURL=index.mjs.map
377
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../src/adapters/base.ts","../../../src/adapters/azure.ts","../../../src/providers/azure/index.ts"],"names":[],"mappings":";;;;;AAkGA,SAAS,sBAAsB,KAAA,EAMH;AAC1B,EAAA,MAAM,MAAA,GAAkC;AAAA,IACtC,MAAM,KAAA,CAAM;AAAA,GACd;AAEA,EAAA,IAAI,MAAM,WAAA,EAAa;AACrB,IAAA,MAAA,CAAO,cAAc,KAAA,CAAM,WAAA;AAAA,EAC7B;AAEA,EAAA,IAAI,MAAM,IAAA,EAAM;AACd,IAAA,MAAA,CAAO,OAAO,KAAA,CAAM,IAAA;AAAA,EACtB;AAGA,EAAA,IAAI,KAAA,CAAM,IAAA,KAAS,OAAA,IAAW,KAAA,CAAM,KAAA,EAAO;AACzC,IAAA,MAAA,CAAO,KAAA,GAAQ,qBAAA;AAAA,MACb,KAAA,CAAM;AAAA,KAOR;AAAA,EACF;AAGA,EAAA,IAAI,KAAA,CAAM,IAAA,KAAS,QAAA,IAAY,KAAA,CAAM,UAAA,EAAY;AAC/C,IAAA,MAAA,CAAO,aAAa,MAAA,CAAO,WAAA;AAAA,MACzB,MAAA,CAAO,OAAA,CAAQ,KAAA,CAAM,UAAU,CAAA,CAAE,IAAI,CAAC,CAAC,GAAA,EAAK,IAAI,CAAA,KAAM;AAAA,QACpD,GAAA;AAAA,QACA,qBAAA;AAAA,UACE;AAAA;AAOF,OACD;AAAA,KACH;AAAA,EACF;AAEA,EAAA,OAAO,MAAA;AACT;AAKO,SAAS,YAAY,OAAA,EAOzB;AACD,EAAA,OAAO,OAAA,CAAQ,GAAA,CAAI,CAAC,MAAA,MAAY;AAAA,IAC9B,IAAA,EAAM,UAAA;AAAA,IACN,QAAA,EAAU;AAAA,MACR,MAAM,MAAA,CAAO,IAAA;AAAA,MACb,aAAa,MAAA,CAAO,WAAA;AAAA,MACpB,UAAA,EAAY;AAAA,QACV,IAAA,EAAM,QAAA;AAAA,QACN,UAAA,EAAY,MAAA,CAAO,UAAA,GACf,MAAA,CAAO,WAAA;AAAA,UACL,MAAA,CAAO,OAAA,CAAQ,MAAA,CAAO,UAAU,CAAA,CAAE,IAAI,CAAC,CAAC,GAAA,EAAK,KAAK,CAAA,KAAM;AAAA,YACtD,GAAA;AAAA,YACA,sBAAsB,KAAK;AAAA,WAC5B;AAAA,YAEH,EAAC;AAAA,QACL,QAAA,EAAU,MAAA,CAAO,UAAA,GACb,MAAA,CAAO,OAAA,CAAQ,OAAO,UAAU,CAAA,CAC7B,MAAA,CAAO,CAAC,GAAG,KAAK,CAAA,KAAM,KAAA,CAAM,QAAQ,CAAA,CACpC,GAAA,CAAI,CAAC,CAAC,GAAG,CAAA,KAAM,GAAG,CAAA,GACrB;AAAC;AACP;AACF,GACF,CAAE,CAAA;AACJ;AAoDO,SAAS,oBAAoB,OAAA,EAA2B;AAC7D,EAAA,MAAM,WAAA,GAAc,QAAQ,QAAA,EAAU,WAAA;AACtC,EAAA,OAAO,aAAa,IAAA,CAAK,CAAC,MAAM,CAAA,CAAE,IAAA,KAAS,OAAO,CAAA,IAAK,KAAA;AACzD;AA8EO,SAAS,wBACd,UAAA,EAC2B;AAC3B,EAAA,IAAI,UAAA,CAAW,IAAA,KAAS,OAAA,EAAS,OAAO,IAAA;AAExC,EAAA,IAAI,QAAA;AAGJ,EAAA,IAAI,WAAW,GAAA,EAAK;AAClB,IAAA,QAAA,GAAW,UAAA,CAAW,GAAA;AAAA,EACxB,CAAA,MAAA,IAAW,WAAW,IAAA,EAAM;AAE1B,IAAA,QAAA,GAAW,UAAA,CAAW,IAAA,CAAK,UAAA,CAAW,OAAO,CAAA,GACzC,UAAA,CAAW,IAAA,GACX,CAAA,KAAA,EAAQ,UAAA,CAAW,QAAA,IAAY,WAAW,CAAA,QAAA,EAAW,WAAW,IAAI,CAAA,CAAA;AAAA,EAC1E,CAAA,MAAO;AACL,IAAA,OAAO,IAAA;AAAA,EACT;AAEA,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,WAAA;AAAA,IACN,SAAA,EAAW;AAAA,MACT,GAAA,EAAK,QAAA;AAAA,MACL,MAAA,EAAQ;AAAA;AACV,GACF;AACF;AAqGO,SAAS,uBACd,OAAA,EAC+B;AAC/B,EAAA,MAAM,WAAA,GAAc,QAAQ,QAAA,EAAU,WAAA;AACtC,EAAA,MAAM,OAAA,GAAU,QAAQ,OAAA,IAAW,EAAA;AAGnC,EAAA,IAAI,CAAC,mBAAA,CAAoB,OAAO,CAAA,EAAG;AACjC,IAAA,OAAO,OAAA;AAAA,EACT;AAGA,EAAA,MAAM,SAA+B,EAAC;AAGtC,EAAA,IAAI,OAAA,EAAS;AACX,IAAA,MAAA,CAAO,KAAK,EAAE,IAAA,EAAM,MAAA,EAAQ,IAAA,EAAM,SAAS,CAAA;AAAA,EAC7C;AAGA,EAAA,IAAI,WAAA,EAAa;AACf,IAAA,KAAA,MAAW,cAAc,WAAA,EAAa;AACpC,MAAA,MAAM,UAAA,GAAa,wBAAwB,UAAU,CAAA;AACrD,MAAA,IAAI,UAAA,EAAY;AACd,QAAA,MAAA,CAAO,KAAK,UAAU,CAAA;AAAA,MACxB;AAAA,IACF;AAAA,EACF;AAEA,EAAA,OAAO,MAAA;AACT;AA+IO,SAAS,uBAAA,CACd,UACA,YAAA,EACiB;AACjB,EAAA,MAAM,YAA6B,EAAC;AAGpC,EAAA,IAAI,YAAA,EAAc;AAChB,IAAA,SAAA,CAAU,KAAK,EAAE,IAAA,EAAM,QAAA,EAAU,OAAA,EAAS,cAAc,CAAA;AAAA,EAC1D;AAEA,EAAA,KAAA,MAAW,OAAO,QAAA,EAAU;AAC1B,IAAA,IAAI,GAAA,CAAI,SAAS,QAAA,EAAU;AACzB,MAAA,SAAA,CAAU,IAAA,CAAK,EAAE,IAAA,EAAM,QAAA,EAAU,SAAS,GAAA,CAAI,OAAA,IAAW,IAAI,CAAA;AAAA,IAC/D,CAAA,MAAA,IAAW,GAAA,CAAI,IAAA,KAAS,MAAA,EAAQ;AAC9B,MAAA,SAAA,CAAU,IAAA,CAAK;AAAA,QACb,IAAA,EAAM,MAAA;AAAA,QACN,OAAA,EAAS,uBAAuB,GAAG;AAAA,OACpC,CAAA;AAAA,IACH,CAAA,MAAA,IAAW,GAAA,CAAI,IAAA,KAAS,WAAA,EAAa;AACnC,MAAA,MAAM,YAAA,GAA8B;AAAA,QAClC,IAAA,EAAM,WAAA;AAAA,QACN,SAAS,GAAA,CAAI;AAAA,OACf;AACA,MAAA,IAAI,GAAA,CAAI,UAAA,IAAc,GAAA,CAAI,UAAA,CAAW,SAAS,CAAA,EAAG;AAC/C,QAAC,YAAA,CAAuD,aACtD,GAAA,CAAI,UAAA;AAAA,MACR;AACA,MAAA,SAAA,CAAU,KAAK,YAAY,CAAA;AAAA,IAC7B,CAAA,MAAA,IAAW,GAAA,CAAI,IAAA,KAAS,MAAA,IAAU,IAAI,YAAA,EAAc;AAClD,MAAA,SAAA,CAAU,IAAA,CAAK;AAAA,QACb,IAAA,EAAM,MAAA;AAAA,QACN,OAAA,EAAS,IAAI,OAAA,IAAW,EAAA;AAAA,QACxB,cAAc,GAAA,CAAI;AAAA,OACnB,CAAA;AAAA,IACH;AAAA,EACF;AAEA,EAAA,OAAO,SAAA;AACT;;;ACtmBA,IAAM,mBAAA,GAAsB,oBAAA;AAK5B,SAAS,kBAAA,CACP,YAAA,EACA,cAAA,EACA,UAAA,EACQ;AACR,EAAA,OAAO,CAAA,QAAA,EAAW,YAAY,CAAA,qCAAA,EAAwC,cAAc,CAAA,CAAA;AACtF;AAWO,IAAM,eAAN,MAAyC;AAAA,EAO9C,YAAY,MAAA,EAA4B;AANxC,IAAA,IAAA,CAAS,QAAA,GAAW,OAAA;AAOlB,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,QAAQ,MAAA,CAAO,cAAA;AAAA,EACtB;AAAA,EAEA,MAAc,SAAA,GAAY;AACxB,IAAA,IAAI,CAAC,KAAK,MAAA,EAAQ;AAEhB,MAAA,MAAM,EAAE,WAAA,EAAY,GAAI,MAAM,OAAO,QAAQ,CAAA;AAE7C,MAAA,MAAM,UAAA,GAAa,IAAA,CAAK,MAAA,CAAO,UAAA,IAAc,mBAAA;AAC7C,MAAA,MAAM,QAAA,GACJ,IAAA,CAAK,MAAA,CAAO,OAAA,IACZ,kBAAA;AAAA,QACE,KAAK,MAAA,CAAO,YAAA;AAAA,QACZ,KAAK,MAAA,CAAO,cAEd,CAAA;AAEF,MAAA,IAAA,CAAK,MAAA,GAAS,IAAI,WAAA,CAAY;AAAA,QAC5B,MAAA,EAAQ,KAAK,MAAA,CAAO,MAAA;AAAA,QACpB,QAAA;AAAA,QACA,UAAA;AAAA,QACA,UAAA,EAAY,KAAK,MAAA,CAAO;AAAA,OACzB,CAAA;AAAA,IACH;AACA,IAAA,OAAO,IAAA,CAAK,MAAA;AAAA,EACd;AAAA,EAEA,OAAO,OAAO,OAAA,EAA6D;AACzE,IAAA,MAAM,MAAA,GAAS,MAAM,IAAA,CAAK,SAAA,EAAU;AAGpC,IAAA,IAAI,QAAA;AACJ,IAAA,IAAI,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,CAAY,SAAS,CAAA,EAAG;AAEzD,MAAA,MAAM,iBAAA,GAAoB,OAAA,CAAQ,WAAA,CAAY,GAAA,CAAI,CAAC,GAAA,KAAQ;AAEzD,QAAA,MAAM,cAAA,GACJ,GAAA,CAAI,WAAA,IACJ,KAAA,CAAM,OAAA,CAAQ,IAAI,WAAW,CAAA,IAC7B,GAAA,CAAI,WAAA,CAAY,MAAA,GAAS,CAAA;AAE3B,QAAA,IAAI,cAAA,EAAgB;AAElB,UAAA,MAAM,UAA0C,EAAC;AAGjD,UAAA,IAAI,IAAI,OAAA,EAAS;AACf,YAAA,OAAA,CAAQ,KAAK,EAAE,IAAA,EAAM,QAAQ,IAAA,EAAM,GAAA,CAAI,SAAS,CAAA;AAAA,UAClD;AAGA,UAAA,KAAA,MAAW,UAAA,IAAc,IAAI,WAAA,EAIzB;AACF,YAAA,IAAI,UAAA,CAAW,SAAS,OAAA,EAAS;AAE/B,cAAA,IAAI,WAAW,UAAA,CAAW,IAAA;AAC1B,cAAA,IAAI,CAAC,QAAA,CAAS,UAAA,CAAW,OAAO,CAAA,EAAG;AACjC,gBAAA,QAAA,GAAW,QAAQ,UAAA,CAAW,QAAA,IAAY,WAAW,CAAA,QAAA,EAAW,WAAW,IAAI,CAAA,CAAA;AAAA,cACjF;AACA,cAAA,OAAA,CAAQ,IAAA,CAAK;AAAA,gBACX,IAAA,EAAM,WAAA;AAAA,gBACN,SAAA,EAAW,EAAE,GAAA,EAAK,QAAA,EAAU,QAAQ,MAAA;AAAO,eAC5C,CAAA;AAAA,YACH;AAAA,UACF;AAEA,UAAA,OAAO,EAAE,GAAG,GAAA,EAAK,OAAA,EAAS,aAAa,MAAA,EAAU;AAAA,QACnD;AACA,QAAA,OAAO,GAAA;AAAA,MACT,CAAC,CAAA;AAGD,MAAA,IAAI,QAAQ,YAAA,EAAc;AACxB,QAAA,MAAM,YAAY,iBAAA,CAAkB,IAAA,CAAK,CAAC,CAAA,KAAM,CAAA,CAAE,SAAS,QAAQ,CAAA;AACnE,QAAA,IAAI,CAAC,SAAA,EAAW;AACd,UAAA,QAAA,GAAW;AAAA,YACT,EAAE,IAAA,EAAM,QAAA,EAAU,OAAA,EAAS,QAAQ,YAAA,EAAa;AAAA,YAChD,GAAG;AAAA,WACL;AAAA,QACF,CAAA,MAAO;AACL,UAAA,QAAA,GAAW,iBAAA;AAAA,QACb;AAAA,MACF,CAAA,MAAO;AACL,QAAA,QAAA,GAAW,iBAAA;AAAA,MACb;AAAA,IACF,CAAA,MAAO;AAEL,MAAA,QAAA,GAAW,uBAAA;AAAA,QACT,OAAA,CAAQ,QAAA;AAAA,QACR,OAAA,CAAQ;AAAA,OACV;AAAA,IACF;AAEA,IAAA,MAAM,QAAQ,OAAA,CAAQ,OAAA,EAAS,SAC3B,WAAA,CAAY,OAAA,CAAQ,OAAO,CAAA,GAC3B,MAAA;AAEJ,IAAA,MAAM,YAAY,iBAAA,EAAkB;AAGpC,IAAA,MAAM,EAAE,IAAA,EAAM,eAAA,EAAiB,EAAA,EAAI,SAAA,EAAU;AAE7C,IAAA,IAAI;AACF,MAAA,MAAM,MAAA,GAAS,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA;AAAA,QAElD,KAAA,EAAO,KAAK,MAAA,CAAO,cAAA;AAAA,QACnB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,WAAA,EAAa,OAAA,CAAQ,MAAA,EAAQ,WAAA,IAAe,KAAK,MAAA,CAAO,WAAA;AAAA,QACxD,UAAA,EAAY,OAAA,CAAQ,MAAA,EAAQ,SAAA,IAAa,KAAK,MAAA,CAAO,SAAA;AAAA,QACrD,MAAA,EAAQ;AAAA,OACT,CAAA;AAED,MAAA,IAAI,eAAA,GAIO,IAAA;AAEX,MAAA,WAAA,MAAiB,SAAS,MAAA,EAAQ;AAEhC,QAAA,IAAI,OAAA,CAAQ,QAAQ,OAAA,EAAS;AAC3B,UAAA;AAAA,QACF;AAEA,QAAA,MAAM,KAAA,GAAQ,KAAA,CAAM,OAAA,CAAQ,CAAC,CAAA,EAAG,KAAA;AAGhC,QAAA,IAAI,OAAO,OAAA,EAAS;AAClB,UAAA,MAAM,EAAE,IAAA,EAAM,eAAA,EAAiB,OAAA,EAAS,MAAM,OAAA,EAAQ;AAAA,QACxD;AAGA,QAAA,IAAI,OAAO,UAAA,EAAY;AACrB,UAAA,KAAA,MAAW,QAAA,IAAY,MAAM,UAAA,EAAY;AAEvC,YAAA,IAAI,SAAS,EAAA,EAAI;AAEf,cAAA,IAAI,eAAA,EAAiB;AACnB,gBAAA,MAAM;AAAA,kBACJ,IAAA,EAAM,aAAA;AAAA,kBACN,IAAI,eAAA,CAAgB,EAAA;AAAA,kBACpB,MAAM,eAAA,CAAgB;AAAA,iBACxB;AAAA,cACF;AAEA,cAAA,eAAA,GAAkB;AAAA,gBAChB,IAAI,QAAA,CAAS,EAAA;AAAA,gBACb,IAAA,EAAM,QAAA,CAAS,QAAA,EAAU,IAAA,IAAQ,EAAA;AAAA,gBACjC,SAAA,EAAW,QAAA,CAAS,QAAA,EAAU,SAAA,IAAa;AAAA,eAC7C;AAEA,cAAA,MAAM;AAAA,gBACJ,IAAA,EAAM,cAAA;AAAA,gBACN,IAAI,eAAA,CAAgB,EAAA;AAAA,gBACpB,MAAM,eAAA,CAAgB;AAAA,eACxB;AAAA,YACF,CAAA,MAAA,IAAW,eAAA,IAAmB,QAAA,CAAS,QAAA,EAAU,SAAA,EAAW;AAE1D,cAAA,eAAA,CAAgB,SAAA,IAAa,SAAS,QAAA,CAAS,SAAA;AAAA,YACjD;AAAA,UACF;AAAA,QACF;AAGA,QAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,CAAC,CAAA,EAAG,aAAA,EAAe;AAEnC,UAAA,IAAI,eAAA,EAAiB;AACnB,YAAA,MAAM;AAAA,cACJ,IAAA,EAAM,aAAA;AAAA,cACN,IAAI,eAAA,CAAgB,EAAA;AAAA,cACpB,MAAM,eAAA,CAAgB;AAAA,aACxB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAGA,MAAA,MAAM,EAAE,MAAM,aAAA,EAAc;AAC5B,MAAA,MAAM,EAAE,MAAM,MAAA,EAAO;AAAA,IACvB,SAAS,KAAA,EAAO;AACd,MAAA,MAAM;AAAA,QACJ,IAAA,EAAM,OAAA;AAAA,QACN,OAAA,EAAS,KAAA,YAAiB,KAAA,GAAQ,KAAA,CAAM,OAAA,GAAU,eAAA;AAAA,QAClD,IAAA,EAAM;AAAA,OACR;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,SAAS,OAAA,EAA2D;AACxE,IAAA,MAAM,MAAA,GAAS,MAAM,IAAA,CAAK,SAAA,EAAU;AAEpC,IAAA,IAAI,QAAA;AACJ,IAAA,IAAI,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,CAAY,SAAS,CAAA,EAAG;AACzD,MAAA,QAAA,GAAW,OAAA,CAAQ,WAAA;AACnB,MAAA,IAAI,QAAQ,YAAA,EAAc;AACxB,QAAA,MAAM,YAAY,QAAA,CAAS,IAAA,CAAK,CAAC,CAAA,KAAM,CAAA,CAAE,SAAS,QAAQ,CAAA;AAC1D,QAAA,IAAI,CAAC,SAAA,EAAW;AACd,UAAA,QAAA,GAAW;AAAA,YACT,EAAE,IAAA,EAAM,QAAA,EAAU,OAAA,EAAS,QAAQ,YAAA,EAAa;AAAA,YAChD,GAAG;AAAA,WACL;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAA,MAAO;AACL,MAAA,QAAA,GAAW,uBAAA;AAAA,QACT,OAAA,CAAQ,QAAA;AAAA,QACR,OAAA,CAAQ;AAAA,OACV;AAAA,IACF;AAEA,IAAA,MAAM,QAAQ,OAAA,CAAQ,OAAA,EAAS,SAC3B,WAAA,CAAY,OAAA,CAAQ,OAAO,CAAA,GAC3B,MAAA;AAEJ,IAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MACpD,KAAA,EAAO,KAAK,MAAA,CAAO,cAAA;AAAA,MACnB,QAAA;AAAA,MACA,KAAA;AAAA,MACA,WAAA,EAAa,OAAA,CAAQ,MAAA,EAAQ,WAAA,IAAe,KAAK,MAAA,CAAO,WAAA;AAAA,MACxD,UAAA,EAAY,OAAA,CAAQ,MAAA,EAAQ,SAAA,IAAa,KAAK,MAAA,CAAO;AAAA,KACtD,CAAA;AAED,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AACjC,IAAA,MAAM,UAAU,MAAA,EAAQ,OAAA;AAExB,IAAA,MAAM,aAAa,OAAA,EAAS,UAAA,IAAc,EAAC,EAAG,GAAA,CAAI,CAAC,EAAA,MAAa;AAAA,MAC9D,IAAI,EAAA,CAAG,EAAA;AAAA,MACP,IAAA,EAAM,GAAG,QAAA,CAAS,IAAA;AAAA,MAClB,MAAM,IAAA,CAAK,KAAA,CAAM,EAAA,CAAG,QAAA,CAAS,aAAa,IAAI;AAAA,KAChD,CAAE,CAAA;AAEF,IAAA,OAAO;AAAA,MACL,OAAA,EAAS,SAAS,OAAA,IAAW,EAAA;AAAA,MAC7B,SAAA;AAAA,MACA,WAAA,EAAa;AAAA,KACf;AAAA,EACF;AACF,CAAA;AAKO,SAAS,mBAAmB,MAAA,EAA0C;AAC3E,EAAA,OAAO,IAAI,aAAa,MAAM,CAAA;AAChC;;;ACtSA,SAAS,iCAAiC,cAAA,EAIxC;AACA,EAAA,MAAM,IAAA,GAAO,eAAe,WAAA,EAAY;AAGxC,EAAA,IAAI,KAAK,QAAA,CAAS,QAAQ,KAAK,IAAA,CAAK,QAAA,CAAS,OAAO,CAAA,EAAG;AACrD,IAAA,OAAO,EAAE,MAAA,EAAQ,IAAA,EAAM,KAAA,EAAO,IAAA,EAAM,WAAW,KAAA,EAAO;AAAA,EACxD;AAGA,EAAA,IAAA,CACG,IAAA,CAAK,QAAA,CAAS,OAAO,CAAA,IAAK,KAAK,QAAA,CAAS,MAAM,CAAA,MAC9C,IAAA,CAAK,SAAS,OAAO,CAAA,IAAK,IAAA,CAAK,QAAA,CAAS,QAAQ,CAAA,CAAA,EACjD;AACA,IAAA,OAAO,EAAE,MAAA,EAAQ,IAAA,EAAM,KAAA,EAAO,IAAA,EAAM,WAAW,KAAA,EAAO;AAAA,EACxD;AAGA,EAAA,IAAI,KAAK,QAAA,CAAS,OAAO,KAAK,IAAA,CAAK,QAAA,CAAS,MAAM,CAAA,EAAG;AACnD,IAAA,OAAO,EAAE,MAAA,EAAQ,KAAA,EAAO,KAAA,EAAO,IAAA,EAAM,WAAW,IAAA,EAAK;AAAA,EACvD;AAGA,EAAA,IACE,IAAA,CAAK,QAAA,CAAS,QAAQ,CAAA,IACtB,IAAA,CAAK,QAAA,CAAS,SAAS,CAAA,IACvB,IAAA,CAAK,QAAA,CAAS,OAAO,CAAA,EACrB;AACA,IAAA,OAAO,EAAE,MAAA,EAAQ,KAAA,EAAO,KAAA,EAAO,IAAA,EAAM,WAAW,KAAA,EAAM;AAAA,EACxD;AAGA,EAAA,IAAI,IAAA,CAAK,QAAA,CAAS,IAAI,CAAA,EAAG;AACvB,IAAA,OAAO,EAAE,MAAA,EAAQ,IAAA,EAAM,KAAA,EAAO,KAAA,EAAO,WAAW,KAAA,EAAO;AAAA,EACzD;AAGA,EAAA,OAAO,EAAE,MAAA,EAAQ,KAAA,EAAO,KAAA,EAAO,IAAA,EAAM,WAAW,IAAA,EAAK;AACvD;AAoBO,SAAS,YAAY,MAAA,EAAyC;AACnE,EAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,IAAU,OAAA,CAAQ,IAAI,oBAAA,IAAwB,EAAA;AACpE,EAAA,MAAM,YAAA,GACJ,MAAA,CAAO,YAAA,IAAgB,OAAA,CAAQ,IAAI,qBAAA,IAAyB,EAAA;AAC9D,EAAA,MAAM,iBAAA,GACJ,MAAA,CAAO,cAAA,IAAkB,OAAA,CAAQ,IAAI,uBAAA,IAA2B,EAAA;AAIlE,EAAA,MAAM,eAAA,GAAkB,iBAAA,GAAoB,CAAC,iBAAiB,IAAI,EAAC;AAEnE,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,OAAA;AAAA,IACN,eAAA;AAAA,IAEA,cAAc,cAAA,EAAwB;AACpC,MAAA,OAAO,kBAAA,CAAmB;AAAA,QACxB,MAAA;AAAA,QACA,YAAA;AAAA,QACA,gBAAgB,cAAA,IAAkB,iBAAA;AAAA,QAClC,YAAY,MAAA,CAAO,UAAA;AAAA,QACnB,SAAS,MAAA,CAAO;AAAA,OACjB,CAAA;AAAA,IACH,CAAA;AAAA,IAEA,gBAAgB,cAAA,EAA8C;AAC5D,MAAA,MAAM,QAAA,GAAW,gCAAA;AAAA,QACf,cAAA,IAAkB;AAAA,OACpB;AAEA,MAAA,OAAO;AAAA,QACL,gBAAgB,QAAA,CAAS,MAAA;AAAA,QACzB,eAAe,QAAA,CAAS,KAAA;AAAA,QACxB,gBAAA,EAAkB,KAAA;AAAA,QAClB,iBAAA,EAAmB,IAAA;AAAA,QACnB,WAAA,EAAa,KAAA;AAAA,QACb,aAAA,EAAe,KAAA;AAAA,QACf,aAAA,EAAe,KAAA;AAAA,QACf,WAAW,QAAA,CAAS,SAAA;AAAA,QACpB,mBAAA,EAAqB,SAAS,MAAA,GAC1B,CAAC,aAAa,YAAA,EAAc,WAAA,EAAa,YAAY,CAAA,GACrD,EAAC;AAAA,QACL,gBAAA,EAAkB,IAAA;AAAA,QAClB,sBAAA,EAAwB;AAAA,OAC1B;AAAA,IACF;AAAA,GACF;AACF;AAGO,IAAM,mBAAA,GAAsB","file":"index.mjs","sourcesContent":["import type {\n Message,\n MessageAttachment,\n ActionDefinition,\n StreamEvent,\n LLMConfig,\n} from \"@yourgpt/copilot-sdk/core\";\n\n/**\n * Chat completion request\n */\nexport interface ChatCompletionRequest {\n /** Conversation messages */\n messages: Message[];\n /**\n * Raw provider-formatted messages (for agent loop with tool calls)\n * When provided, these are used instead of converting from Message[]\n * This allows passing messages with tool_calls and tool role\n */\n rawMessages?: Array<Record<string, unknown>>;\n /** Available actions/tools */\n actions?: ActionDefinition[];\n /** System prompt */\n systemPrompt?: string;\n /** LLM configuration overrides */\n config?: Partial<LLMConfig>;\n /** Abort signal for cancellation */\n signal?: AbortSignal;\n}\n\n/**\n * Non-streaming completion result\n */\nexport interface CompletionResult {\n /** Text content */\n content: string;\n /** Tool calls */\n toolCalls: Array<{ id: string; name: string; args: Record<string, unknown> }>;\n /** Thinking content (if extended thinking enabled) */\n thinking?: string;\n /** Raw provider response for debugging */\n rawResponse: Record<string, unknown>;\n}\n\n/**\n * Base LLM adapter interface\n */\nexport interface LLMAdapter {\n /** Provider name */\n readonly provider: string;\n\n /** Model name */\n readonly model: string;\n\n /**\n * Stream a chat completion\n */\n stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;\n\n /**\n * Non-streaming chat completion (for debugging/comparison)\n */\n complete?(request: ChatCompletionRequest): Promise<CompletionResult>;\n}\n\n/**\n * Adapter factory function type\n */\nexport type AdapterFactory = (config: LLMConfig) => LLMAdapter;\n\n/**\n * Convert messages to provider format (simple text only)\n */\nexport function formatMessages(\n messages: Message[],\n systemPrompt?: string,\n): Array<{ role: string; content: string }> {\n const formatted: Array<{ role: string; content: string }> = [];\n\n // Add system prompt if provided\n if (systemPrompt) {\n formatted.push({ role: \"system\", content: systemPrompt });\n }\n\n // Add conversation messages\n for (const msg of messages) {\n formatted.push({\n role: msg.role,\n content: msg.content ?? \"\",\n });\n }\n\n return formatted;\n}\n\n/**\n * Convert ActionParameter to JSON Schema format recursively\n */\nfunction parameterToJsonSchema(param: {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n}): Record<string, unknown> {\n const schema: Record<string, unknown> = {\n type: param.type,\n };\n\n if (param.description) {\n schema.description = param.description;\n }\n\n if (param.enum) {\n schema.enum = param.enum;\n }\n\n // Handle array items\n if (param.type === \"array\" && param.items) {\n schema.items = parameterToJsonSchema(\n param.items as {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n },\n );\n }\n\n // Handle nested object properties\n if (param.type === \"object\" && param.properties) {\n schema.properties = Object.fromEntries(\n Object.entries(param.properties).map(([key, prop]) => [\n key,\n parameterToJsonSchema(\n prop as {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n },\n ),\n ]),\n );\n }\n\n return schema;\n}\n\n/**\n * Convert actions to OpenAI tool format\n */\nexport function formatTools(actions: ActionDefinition[]): Array<{\n type: \"function\";\n function: {\n name: string;\n description: string;\n parameters: object;\n };\n}> {\n return actions.map((action) => ({\n type: \"function\" as const,\n function: {\n name: action.name,\n description: action.description,\n parameters: {\n type: \"object\",\n properties: action.parameters\n ? Object.fromEntries(\n Object.entries(action.parameters).map(([key, param]) => [\n key,\n parameterToJsonSchema(param),\n ]),\n )\n : {},\n required: action.parameters\n ? Object.entries(action.parameters)\n .filter(([, param]) => param.required)\n .map(([key]) => key)\n : [],\n },\n },\n }));\n}\n\n// ============================================\n// Vision/Multimodal Support\n// ============================================\n\n/**\n * Content block types for multimodal messages\n */\nexport type AnthropicContentBlock =\n | { type: \"text\"; text: string }\n | {\n type: \"image\";\n source:\n | {\n type: \"base64\";\n media_type: string;\n data: string;\n }\n | {\n type: \"url\";\n url: string;\n };\n }\n | {\n type: \"document\";\n source:\n | {\n type: \"base64\";\n media_type: string;\n data: string;\n }\n | {\n type: \"url\";\n url: string;\n };\n };\n\nexport type OpenAIContentBlock =\n | { type: \"text\"; text: string }\n | {\n type: \"image_url\";\n image_url: {\n url: string;\n detail?: \"low\" | \"high\" | \"auto\";\n };\n };\n\n/**\n * Check if a message has image attachments\n * Supports both new format (metadata.attachments) and legacy (attachments)\n */\nexport function hasImageAttachments(message: Message): boolean {\n const attachments = message.metadata?.attachments;\n return attachments?.some((a) => a.type === \"image\") ?? false;\n}\n\n/**\n * Check if a message has media attachments (images or PDFs)\n */\nexport function hasMediaAttachments(message: Message): boolean {\n const attachments = message.metadata?.attachments;\n return (\n attachments?.some(\n (a) =>\n a.type === \"image\" ||\n (a.type === \"file\" && a.mimeType === \"application/pdf\"),\n ) ?? false\n );\n}\n\n/**\n * Convert MessageAttachment to Anthropic image content block\n *\n * Anthropic format:\n * {\n * type: \"image\",\n * source: {\n * type: \"base64\",\n * media_type: \"image/png\",\n * data: \"base64data...\"\n * }\n * }\n */\nexport function attachmentToAnthropicImage(\n attachment: MessageAttachment,\n): AnthropicContentBlock | null {\n if (attachment.type !== \"image\") return null;\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n return {\n type: \"image\",\n source: {\n type: \"url\",\n url: attachment.url,\n },\n };\n }\n\n // Fall back to base64 data\n if (!attachment.data) return null;\n\n // Extract base64 data (remove data URI prefix if present)\n let base64Data = attachment.data;\n if (base64Data.startsWith(\"data:\")) {\n const commaIndex = base64Data.indexOf(\",\");\n if (commaIndex !== -1) {\n base64Data = base64Data.slice(commaIndex + 1);\n }\n }\n\n return {\n type: \"image\",\n source: {\n type: \"base64\",\n media_type: attachment.mimeType || \"image/png\",\n data: base64Data,\n },\n };\n}\n\n/**\n * Convert MessageAttachment to OpenAI image_url content block\n *\n * OpenAI format:\n * {\n * type: \"image_url\",\n * image_url: {\n * url: \"data:image/png;base64,...\"\n * }\n * }\n */\nexport function attachmentToOpenAIImage(\n attachment: MessageAttachment,\n): OpenAIContentBlock | null {\n if (attachment.type !== \"image\") return null;\n\n let imageUrl: string;\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n imageUrl = attachment.url;\n } else if (attachment.data) {\n // Build data URI if not already one\n imageUrl = attachment.data.startsWith(\"data:\")\n ? attachment.data\n : `data:${attachment.mimeType || \"image/png\"};base64,${attachment.data}`;\n } else {\n return null;\n }\n\n return {\n type: \"image_url\",\n image_url: {\n url: imageUrl,\n detail: \"auto\",\n },\n };\n}\n\n/**\n * Convert MessageAttachment (PDF) to Anthropic document content block\n *\n * Anthropic format:\n * {\n * type: \"document\",\n * source: {\n * type: \"base64\",\n * media_type: \"application/pdf\",\n * data: \"base64data...\"\n * }\n * }\n */\nexport function attachmentToAnthropicDocument(\n attachment: MessageAttachment,\n): AnthropicContentBlock | null {\n // Only handle PDF files\n if (attachment.type !== \"file\" || attachment.mimeType !== \"application/pdf\") {\n return null;\n }\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n return {\n type: \"document\",\n source: {\n type: \"url\",\n url: attachment.url,\n },\n };\n }\n\n // Fall back to base64 data\n if (!attachment.data) return null;\n\n // Extract base64 data (remove data URI prefix if present)\n let base64Data = attachment.data;\n if (base64Data.startsWith(\"data:\")) {\n const commaIndex = base64Data.indexOf(\",\");\n if (commaIndex !== -1) {\n base64Data = base64Data.slice(commaIndex + 1);\n }\n }\n\n return {\n type: \"document\",\n source: {\n type: \"base64\",\n media_type: \"application/pdf\",\n data: base64Data,\n },\n };\n}\n\n/**\n * Convert a Message to Anthropic multimodal content blocks\n */\nexport function messageToAnthropicContent(\n message: Message,\n): string | AnthropicContentBlock[] {\n const attachments = message.metadata?.attachments;\n const content = message.content ?? \"\";\n\n // If no media attachments (images or PDFs), return simple string\n if (!hasMediaAttachments(message)) {\n return content;\n }\n\n // Build content blocks array\n const blocks: AnthropicContentBlock[] = [];\n\n // Add media attachments first (Claude recommends media before text)\n if (attachments) {\n for (const attachment of attachments) {\n // Try image first\n const imageBlock = attachmentToAnthropicImage(attachment);\n if (imageBlock) {\n blocks.push(imageBlock);\n continue;\n }\n // Try document (PDF)\n const docBlock = attachmentToAnthropicDocument(attachment);\n if (docBlock) {\n blocks.push(docBlock);\n }\n }\n }\n\n // Add text content\n if (content) {\n blocks.push({ type: \"text\", text: content });\n }\n\n return blocks;\n}\n\n/**\n * Convert a Message to OpenAI multimodal content blocks\n */\nexport function messageToOpenAIContent(\n message: Message,\n): string | OpenAIContentBlock[] {\n const attachments = message.metadata?.attachments;\n const content = message.content ?? \"\";\n\n // If no image attachments, return simple string\n if (!hasImageAttachments(message)) {\n return content;\n }\n\n // Build content blocks array\n const blocks: OpenAIContentBlock[] = [];\n\n // Add text content first\n if (content) {\n blocks.push({ type: \"text\", text: content });\n }\n\n // Add image attachments\n if (attachments) {\n for (const attachment of attachments) {\n const imageBlock = attachmentToOpenAIImage(attachment);\n if (imageBlock) {\n blocks.push(imageBlock);\n }\n }\n }\n\n return blocks;\n}\n\n/**\n * Anthropic content block types (extended for tools)\n */\nexport type AnthropicToolUseBlock = {\n type: \"tool_use\";\n id: string;\n name: string;\n input: Record<string, unknown>;\n};\n\nexport type AnthropicToolResultBlock = {\n type: \"tool_result\";\n tool_use_id: string;\n content: string;\n};\n\nexport type AnthropicMessageContent =\n | string\n | Array<\n AnthropicContentBlock | AnthropicToolUseBlock | AnthropicToolResultBlock\n >;\n\n/**\n * Format messages for Anthropic with full tool support\n * Handles: text, images, tool_use, and tool_result\n *\n * Key differences from OpenAI:\n * - tool_calls become tool_use blocks in assistant content\n * - tool results become tool_result blocks in user content\n */\nexport function formatMessagesForAnthropic(\n messages: Message[],\n systemPrompt?: string,\n): {\n system: string;\n messages: Array<{\n role: \"user\" | \"assistant\";\n content: AnthropicMessageContent;\n }>;\n} {\n const formatted: Array<{\n role: \"user\" | \"assistant\";\n content: AnthropicMessageContent;\n }> = [];\n\n for (let i = 0; i < messages.length; i++) {\n const msg = messages[i];\n\n if (msg.role === \"system\") continue; // System handled separately\n\n if (msg.role === \"assistant\") {\n // Build content array for assistant\n const content: Array<AnthropicContentBlock | AnthropicToolUseBlock> = [];\n\n // Add text content if present\n if (msg.content) {\n content.push({ type: \"text\", text: msg.content });\n }\n\n // Convert tool_calls to tool_use blocks\n if (msg.tool_calls && msg.tool_calls.length > 0) {\n for (const tc of msg.tool_calls) {\n content.push({\n type: \"tool_use\",\n id: tc.id,\n name: tc.function.name,\n input: JSON.parse(tc.function.arguments),\n });\n }\n }\n\n formatted.push({\n role: \"assistant\",\n content:\n content.length === 1 && content[0].type === \"text\"\n ? (content[0] as { type: \"text\"; text: string }).text\n : content,\n });\n } else if (msg.role === \"tool\" && msg.tool_call_id) {\n // Tool results go in user message as tool_result blocks\n // Group consecutive tool messages together\n const toolResults: AnthropicToolResultBlock[] = [\n {\n type: \"tool_result\",\n tool_use_id: msg.tool_call_id,\n content: msg.content ?? \"\",\n },\n ];\n\n // Look ahead for more consecutive tool messages\n while (i + 1 < messages.length && messages[i + 1].role === \"tool\") {\n i++;\n const nextTool = messages[i];\n if (nextTool.tool_call_id) {\n toolResults.push({\n type: \"tool_result\",\n tool_use_id: nextTool.tool_call_id,\n content: nextTool.content ?? \"\",\n });\n }\n }\n\n formatted.push({\n role: \"user\",\n content: toolResults,\n });\n } else if (msg.role === \"user\") {\n formatted.push({\n role: \"user\",\n content: messageToAnthropicContent(msg),\n });\n }\n }\n\n return {\n system: systemPrompt || \"\",\n messages: formatted,\n };\n}\n\n/**\n * OpenAI message format with tool support\n */\nexport type OpenAIMessage =\n | { role: \"system\"; content: string }\n | { role: \"user\"; content: string | OpenAIContentBlock[] }\n | {\n role: \"assistant\";\n content: string | null;\n tool_calls?: Array<{\n id: string;\n type: \"function\";\n function: { name: string; arguments: string };\n }>;\n }\n | { role: \"tool\"; content: string; tool_call_id: string };\n\n/**\n * Format messages for OpenAI with full tool support\n * Handles: text, images, tool_calls, and tool results\n */\nexport function formatMessagesForOpenAI(\n messages: Message[],\n systemPrompt?: string,\n): OpenAIMessage[] {\n const formatted: OpenAIMessage[] = [];\n\n // Add system prompt if provided\n if (systemPrompt) {\n formatted.push({ role: \"system\", content: systemPrompt });\n }\n\n for (const msg of messages) {\n if (msg.role === \"system\") {\n formatted.push({ role: \"system\", content: msg.content ?? \"\" });\n } else if (msg.role === \"user\") {\n formatted.push({\n role: \"user\",\n content: messageToOpenAIContent(msg),\n });\n } else if (msg.role === \"assistant\") {\n const assistantMsg: OpenAIMessage = {\n role: \"assistant\",\n content: msg.content,\n };\n if (msg.tool_calls && msg.tool_calls.length > 0) {\n (assistantMsg as { tool_calls: typeof msg.tool_calls }).tool_calls =\n msg.tool_calls;\n }\n formatted.push(assistantMsg);\n } else if (msg.role === \"tool\" && msg.tool_call_id) {\n formatted.push({\n role: \"tool\",\n content: msg.content ?? \"\",\n tool_call_id: msg.tool_call_id,\n });\n }\n }\n\n return formatted;\n}\n","/**\n * Azure OpenAI LLM Adapter\n *\n * Azure OpenAI uses Microsoft's cloud infrastructure with\n * different authentication and URL patterns than standard OpenAI.\n *\n * Supports: Any OpenAI model deployed on Azure (GPT-4, GPT-4o, etc.)\n * Features: Vision, Tools/Function Calling (depends on deployed model)\n */\n\nimport type { LLMConfig, StreamEvent } from \"@yourgpt/copilot-sdk/core\";\nimport {\n generateMessageId,\n generateToolCallId,\n} from \"@yourgpt/copilot-sdk/core\";\nimport type {\n LLMAdapter,\n ChatCompletionRequest,\n CompletionResult,\n} from \"./base\";\nimport { formatMessagesForOpenAI, formatTools } from \"./base\";\n\n// ============================================\n// Types\n// ============================================\n\n/**\n * Azure OpenAI adapter configuration\n */\nexport interface AzureAdapterConfig extends Partial<LLMConfig> {\n /** Azure OpenAI API key */\n apiKey: string;\n /** Azure resource name (e.g., 'my-resource') */\n resourceName: string;\n /** Azure deployment name (e.g., 'gpt-4o-deployment') */\n deploymentName: string;\n /** API version (default: 2024-08-01-preview) */\n apiVersion?: string;\n /** Custom endpoint URL (optional, overrides resourceName) */\n baseUrl?: string;\n}\n\n// Default Azure API version\nconst DEFAULT_API_VERSION = \"2024-08-01-preview\";\n\n/**\n * Build Azure OpenAI endpoint URL\n */\nfunction buildAzureEndpoint(\n resourceName: string,\n deploymentName: string,\n apiVersion: string,\n): string {\n return `https://${resourceName}.openai.azure.com/openai/deployments/${deploymentName}`;\n}\n\n// ============================================\n// Adapter Implementation\n// ============================================\n\n/**\n * Azure OpenAI LLM Adapter\n *\n * Uses Azure's OpenAI service with Azure-specific authentication\n */\nexport class AzureAdapter implements LLMAdapter {\n readonly provider = \"azure\";\n readonly model: string;\n\n private client: any; // OpenAI client (lazy loaded)\n private config: AzureAdapterConfig;\n\n constructor(config: AzureAdapterConfig) {\n this.config = config;\n this.model = config.deploymentName;\n }\n\n private async getClient() {\n if (!this.client) {\n // Use OpenAI SDK with Azure configuration\n const { AzureOpenAI } = await import(\"openai\");\n\n const apiVersion = this.config.apiVersion || DEFAULT_API_VERSION;\n const endpoint =\n this.config.baseUrl ||\n buildAzureEndpoint(\n this.config.resourceName,\n this.config.deploymentName,\n apiVersion,\n );\n\n this.client = new AzureOpenAI({\n apiKey: this.config.apiKey,\n endpoint,\n apiVersion,\n deployment: this.config.deploymentName,\n });\n }\n return this.client;\n }\n\n async *stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent> {\n const client = await this.getClient();\n\n // Use raw messages if provided (for agent loop with tool calls), otherwise format from Message[]\n let messages: Array<Record<string, unknown>>;\n if (request.rawMessages && request.rawMessages.length > 0) {\n // Process raw messages - convert any attachments to OpenAI vision format\n const processedMessages = request.rawMessages.map((msg) => {\n // Check if message has attachments (images)\n const hasAttachments =\n msg.attachments &&\n Array.isArray(msg.attachments) &&\n msg.attachments.length > 0;\n\n if (hasAttachments) {\n // Convert to OpenAI multimodal content format\n const content: Array<Record<string, unknown>> = [];\n\n // Add text content if present\n if (msg.content) {\n content.push({ type: \"text\", text: msg.content });\n }\n\n // Add image attachments\n for (const attachment of msg.attachments as Array<{\n type: string;\n data: string;\n mimeType?: string;\n }>) {\n if (attachment.type === \"image\") {\n // Convert to OpenAI image_url format\n let imageUrl = attachment.data;\n if (!imageUrl.startsWith(\"data:\")) {\n imageUrl = `data:${attachment.mimeType || \"image/png\"};base64,${attachment.data}`;\n }\n content.push({\n type: \"image_url\",\n image_url: { url: imageUrl, detail: \"auto\" },\n });\n }\n }\n\n return { ...msg, content, attachments: undefined };\n }\n return msg;\n });\n\n // Add system prompt at the start if provided and not already present\n if (request.systemPrompt) {\n const hasSystem = processedMessages.some((m) => m.role === \"system\");\n if (!hasSystem) {\n messages = [\n { role: \"system\", content: request.systemPrompt },\n ...processedMessages,\n ];\n } else {\n messages = processedMessages;\n }\n } else {\n messages = processedMessages;\n }\n } else {\n // Format from Message[] with multimodal support (images, attachments)\n messages = formatMessagesForOpenAI(\n request.messages,\n request.systemPrompt,\n ) as Array<Record<string, unknown>>;\n }\n\n const tools = request.actions?.length\n ? formatTools(request.actions)\n : undefined;\n\n const messageId = generateMessageId();\n\n // Emit message start\n yield { type: \"message:start\", id: messageId };\n\n try {\n const stream = await client.chat.completions.create({\n // Azure uses deployment name, not model name\n model: this.config.deploymentName,\n messages,\n tools,\n temperature: request.config?.temperature ?? this.config.temperature,\n max_tokens: request.config?.maxTokens ?? this.config.maxTokens,\n stream: true,\n });\n\n let currentToolCall: {\n id: string;\n name: string;\n arguments: string;\n } | null = null;\n\n for await (const chunk of stream) {\n // Check for abort\n if (request.signal?.aborted) {\n break;\n }\n\n const delta = chunk.choices[0]?.delta;\n\n // Handle content\n if (delta?.content) {\n yield { type: \"message:delta\", content: delta.content };\n }\n\n // Handle tool calls\n if (delta?.tool_calls) {\n for (const toolCall of delta.tool_calls) {\n // New tool call\n if (toolCall.id) {\n // End previous tool call if any\n if (currentToolCall) {\n yield {\n type: \"action:args\",\n id: currentToolCall.id,\n args: currentToolCall.arguments,\n };\n }\n\n currentToolCall = {\n id: toolCall.id,\n name: toolCall.function?.name || \"\",\n arguments: toolCall.function?.arguments || \"\",\n };\n\n yield {\n type: \"action:start\",\n id: currentToolCall.id,\n name: currentToolCall.name,\n };\n } else if (currentToolCall && toolCall.function?.arguments) {\n // Append to current tool call arguments\n currentToolCall.arguments += toolCall.function.arguments;\n }\n }\n }\n\n // Check for finish\n if (chunk.choices[0]?.finish_reason) {\n // Complete any pending tool call\n if (currentToolCall) {\n yield {\n type: \"action:args\",\n id: currentToolCall.id,\n args: currentToolCall.arguments,\n };\n }\n }\n }\n\n // Emit message end\n yield { type: \"message:end\" };\n yield { type: \"done\" };\n } catch (error) {\n yield {\n type: \"error\",\n message: error instanceof Error ? error.message : \"Unknown error\",\n code: \"AZURE_ERROR\",\n };\n }\n }\n\n /**\n * Non-streaming completion (optional, for debugging)\n */\n async complete(request: ChatCompletionRequest): Promise<CompletionResult> {\n const client = await this.getClient();\n\n let messages: Array<Record<string, unknown>>;\n if (request.rawMessages && request.rawMessages.length > 0) {\n messages = request.rawMessages as Array<Record<string, unknown>>;\n if (request.systemPrompt) {\n const hasSystem = messages.some((m) => m.role === \"system\");\n if (!hasSystem) {\n messages = [\n { role: \"system\", content: request.systemPrompt },\n ...messages,\n ];\n }\n }\n } else {\n messages = formatMessagesForOpenAI(\n request.messages,\n request.systemPrompt,\n ) as Array<Record<string, unknown>>;\n }\n\n const tools = request.actions?.length\n ? formatTools(request.actions)\n : undefined;\n\n const response = await client.chat.completions.create({\n model: this.config.deploymentName,\n messages,\n tools,\n temperature: request.config?.temperature ?? this.config.temperature,\n max_tokens: request.config?.maxTokens ?? this.config.maxTokens,\n });\n\n const choice = response.choices[0];\n const message = choice?.message;\n\n const toolCalls = (message?.tool_calls || []).map((tc: any) => ({\n id: tc.id,\n name: tc.function.name,\n args: JSON.parse(tc.function.arguments || \"{}\"),\n }));\n\n return {\n content: message?.content || \"\",\n toolCalls,\n rawResponse: response as Record<string, unknown>,\n };\n }\n}\n\n/**\n * Create Azure OpenAI adapter\n */\nexport function createAzureAdapter(config: AzureAdapterConfig): AzureAdapter {\n return new AzureAdapter(config);\n}\n","/**\n * Azure OpenAI Provider\n *\n * Wraps the AzureAdapter with provider interface.\n * Azure OpenAI provides enterprise-grade OpenAI models with Azure security.\n *\n * Features:\n * - Vision (for supported deployments)\n * - Tools/Function calling\n * - Enterprise security & compliance\n * - Private networking options\n *\n * Note: Capabilities depend on which model is deployed, not a model ID.\n * The provider attempts to detect capabilities from the deployment name.\n */\n\nimport { createAzureAdapter } from \"../../adapters/azure\";\nimport type {\n AIProvider,\n ProviderCapabilities,\n AzureProviderConfig,\n} from \"../types\";\n\n// ============================================\n// Model Capability Patterns\n// ============================================\n\n/**\n * Detect model capabilities from deployment name\n * Azure deployments are user-named, so we look for common patterns\n */\nfunction detectCapabilitiesFromDeployment(deploymentName: string): {\n vision: boolean;\n tools: boolean;\n maxTokens: number;\n} {\n const name = deploymentName.toLowerCase();\n\n // GPT-4o variants (vision, tools, 128k context)\n if (name.includes(\"gpt-4o\") || name.includes(\"gpt4o\")) {\n return { vision: true, tools: true, maxTokens: 128000 };\n }\n\n // GPT-4 Turbo with vision\n if (\n (name.includes(\"gpt-4\") || name.includes(\"gpt4\")) &&\n (name.includes(\"turbo\") || name.includes(\"vision\"))\n ) {\n return { vision: true, tools: true, maxTokens: 128000 };\n }\n\n // GPT-4 base\n if (name.includes(\"gpt-4\") || name.includes(\"gpt4\")) {\n return { vision: false, tools: true, maxTokens: 8192 };\n }\n\n // GPT-3.5 Turbo\n if (\n name.includes(\"gpt-35\") ||\n name.includes(\"gpt-3.5\") ||\n name.includes(\"gpt35\")\n ) {\n return { vision: false, tools: true, maxTokens: 16385 };\n }\n\n // o1 reasoning models\n if (name.includes(\"o1\")) {\n return { vision: true, tools: false, maxTokens: 128000 };\n }\n\n // Default fallback\n return { vision: false, tools: true, maxTokens: 8192 };\n}\n\n// ============================================\n// Provider Implementation\n// ============================================\n\n/**\n * Create an Azure OpenAI provider\n *\n * @example\n * ```typescript\n * const azure = createAzure({\n * apiKey: '...',\n * resourceName: 'my-azure-resource',\n * deploymentName: 'gpt-4o-deployment',\n * });\n * const adapter = azure.languageModel('gpt-4o-deployment');\n * const caps = azure.getCapabilities('gpt-4o-deployment');\n * ```\n */\nexport function createAzure(config: AzureProviderConfig): AIProvider {\n const apiKey = config.apiKey ?? process.env.AZURE_OPENAI_API_KEY ?? \"\";\n const resourceName =\n config.resourceName ?? process.env.AZURE_OPENAI_RESOURCE ?? \"\";\n const defaultDeployment =\n config.deploymentName ?? process.env.AZURE_OPENAI_DEPLOYMENT ?? \"\";\n\n // For Azure, the \"supported models\" are actually deployment names\n // We include the default deployment as the main \"model\"\n const supportedModels = defaultDeployment ? [defaultDeployment] : [];\n\n return {\n name: \"azure\",\n supportedModels,\n\n languageModel(deploymentName: string) {\n return createAzureAdapter({\n apiKey,\n resourceName,\n deploymentName: deploymentName || defaultDeployment,\n apiVersion: config.apiVersion,\n baseUrl: config.baseUrl,\n });\n },\n\n getCapabilities(deploymentName: string): ProviderCapabilities {\n const detected = detectCapabilitiesFromDeployment(\n deploymentName || defaultDeployment,\n );\n\n return {\n supportsVision: detected.vision,\n supportsTools: detected.tools,\n supportsThinking: false,\n supportsStreaming: true,\n supportsPDF: false,\n supportsAudio: false,\n supportsVideo: false,\n maxTokens: detected.maxTokens,\n supportedImageTypes: detected.vision\n ? [\"image/png\", \"image/jpeg\", \"image/gif\", \"image/webp\"]\n : [],\n supportsJsonMode: true,\n supportsSystemMessages: true,\n };\n },\n };\n}\n\n// Alias for consistency\nexport const createAzureProvider = createAzure;\n"]}
@@ -0,0 +1,72 @@
1
+ import { L as LanguageModel } from '../../types-CdORv1Yu.mjs';
2
+ import { G as GoogleProviderConfig, A as AIProvider } from '../../types-BBCZ3Fxy.mjs';
3
+ import 'zod';
4
+ import '@yourgpt/copilot-sdk/core';
5
+ import '../../base-D_FyHFKj.mjs';
6
+
7
+ /**
8
+ * Google Provider - Modern Pattern
9
+ *
10
+ * Google Gemini models.
11
+ *
12
+ * @example
13
+ * ```ts
14
+ * import { google } from '@yourgpt/llm-sdk/google';
15
+ * import { generateText } from '@yourgpt/llm-sdk';
16
+ *
17
+ * const result = await generateText({
18
+ * model: google('gemini-2.0-flash'),
19
+ * prompt: 'Hello!',
20
+ * });
21
+ * ```
22
+ */
23
+
24
+ interface GoogleProviderOptions {
25
+ /** API key (defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var) */
26
+ apiKey?: string;
27
+ /** Safety settings */
28
+ safetySettings?: Array<{
29
+ category: string;
30
+ threshold: string;
31
+ }>;
32
+ }
33
+ /**
34
+ * Create a Google Gemini language model
35
+ */
36
+ declare function google(modelId: string, options?: GoogleProviderOptions): LanguageModel;
37
+
38
+ /**
39
+ * Google Provider
40
+ *
41
+ * Modern pattern: google('gemini-2.0-flash') returns a LanguageModel
42
+ * Legacy pattern: createGoogle({ apiKey }) returns an AIProvider
43
+ *
44
+ * Features:
45
+ * - Vision (images)
46
+ * - Audio input
47
+ * - Video input
48
+ * - PDF documents
49
+ * - Tools/Function calling
50
+ * - Massive context windows (up to 2M tokens)
51
+ */
52
+
53
+ /**
54
+ * Create a Google provider
55
+ *
56
+ * @example
57
+ * ```typescript
58
+ * const google = createGoogle({
59
+ * apiKey: '...',
60
+ * });
61
+ * const adapter = google.languageModel('gemini-2.0-flash');
62
+ * const caps = google.getCapabilities('gemini-2.0-flash');
63
+ *
64
+ * if (caps.supportsVideo) {
65
+ * // Show video upload button
66
+ * }
67
+ * ```
68
+ */
69
+ declare function createGoogle(config?: GoogleProviderConfig): AIProvider;
70
+ declare const createGoogleProvider: typeof createGoogle;
71
+
72
+ export { type GoogleProviderOptions, createGoogle, google as createGoogleModel, createGoogleProvider, google };