@llumiverse/drivers 0.20.0 → 0.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/lib/cjs/azure/azure_foundry.js +388 -0
  2. package/lib/cjs/azure/azure_foundry.js.map +1 -0
  3. package/lib/cjs/bedrock/index.js +43 -27
  4. package/lib/cjs/bedrock/index.js.map +1 -1
  5. package/lib/cjs/groq/index.js +91 -10
  6. package/lib/cjs/groq/index.js.map +1 -1
  7. package/lib/cjs/index.js +2 -1
  8. package/lib/cjs/index.js.map +1 -1
  9. package/lib/cjs/mistral/index.js +2 -1
  10. package/lib/cjs/mistral/index.js.map +1 -1
  11. package/lib/cjs/openai/azure_openai.js +72 -0
  12. package/lib/cjs/openai/azure_openai.js.map +1 -0
  13. package/lib/cjs/openai/index.js +6 -9
  14. package/lib/cjs/openai/index.js.map +1 -1
  15. package/lib/cjs/openai/openai.js +2 -2
  16. package/lib/cjs/openai/openai.js.map +1 -1
  17. package/lib/cjs/openai/openai_format.js +138 -0
  18. package/lib/cjs/openai/openai_format.js.map +1 -0
  19. package/lib/cjs/vertexai/index.js +26 -18
  20. package/lib/cjs/vertexai/index.js.map +1 -1
  21. package/lib/cjs/vertexai/models/claude.js +5 -3
  22. package/lib/cjs/vertexai/models/claude.js.map +1 -1
  23. package/lib/cjs/vertexai/models/gemini.js +39 -12
  24. package/lib/cjs/vertexai/models/gemini.js.map +1 -1
  25. package/lib/cjs/watsonx/index.js +1 -1
  26. package/lib/cjs/watsonx/index.js.map +1 -1
  27. package/lib/cjs/xai/index.js +3 -3
  28. package/lib/cjs/xai/index.js.map +1 -1
  29. package/lib/esm/azure/azure_foundry.js +382 -0
  30. package/lib/esm/azure/azure_foundry.js.map +1 -0
  31. package/lib/esm/bedrock/index.js +43 -27
  32. package/lib/esm/bedrock/index.js.map +1 -1
  33. package/lib/esm/groq/index.js +91 -10
  34. package/lib/esm/groq/index.js.map +1 -1
  35. package/lib/esm/index.js +2 -1
  36. package/lib/esm/index.js.map +1 -1
  37. package/lib/esm/mistral/index.js +2 -1
  38. package/lib/esm/mistral/index.js.map +1 -1
  39. package/lib/esm/openai/azure_openai.js +68 -0
  40. package/lib/esm/openai/azure_openai.js.map +1 -0
  41. package/lib/esm/openai/index.js +5 -8
  42. package/lib/esm/openai/index.js.map +1 -1
  43. package/lib/esm/openai/openai.js +2 -2
  44. package/lib/esm/openai/openai.js.map +1 -1
  45. package/lib/esm/openai/openai_format.js +134 -0
  46. package/lib/esm/openai/openai_format.js.map +1 -0
  47. package/lib/esm/vertexai/index.js +26 -18
  48. package/lib/esm/vertexai/index.js.map +1 -1
  49. package/lib/esm/vertexai/models/claude.js +5 -3
  50. package/lib/esm/vertexai/models/claude.js.map +1 -1
  51. package/lib/esm/vertexai/models/gemini.js +39 -12
  52. package/lib/esm/vertexai/models/gemini.js.map +1 -1
  53. package/lib/esm/watsonx/index.js +1 -1
  54. package/lib/esm/watsonx/index.js.map +1 -1
  55. package/lib/esm/xai/index.js +2 -2
  56. package/lib/esm/xai/index.js.map +1 -1
  57. package/lib/types/azure/azure_foundry.d.ts +50 -0
  58. package/lib/types/azure/azure_foundry.d.ts.map +1 -0
  59. package/lib/types/bedrock/index.d.ts +2 -2
  60. package/lib/types/bedrock/index.d.ts.map +1 -1
  61. package/lib/types/groq/index.d.ts +5 -5
  62. package/lib/types/groq/index.d.ts.map +1 -1
  63. package/lib/types/index.d.ts +2 -1
  64. package/lib/types/index.d.ts.map +1 -1
  65. package/lib/types/mistral/index.d.ts +2 -2
  66. package/lib/types/mistral/index.d.ts.map +1 -1
  67. package/lib/types/openai/azure_openai.d.ts +25 -0
  68. package/lib/types/openai/azure_openai.d.ts.map +1 -0
  69. package/lib/types/openai/index.d.ts +6 -7
  70. package/lib/types/openai/index.d.ts.map +1 -1
  71. package/lib/types/openai/openai.d.ts +2 -2
  72. package/lib/types/openai/openai.d.ts.map +1 -1
  73. package/lib/types/openai/openai_format.d.ts +19 -0
  74. package/lib/types/openai/openai_format.d.ts.map +1 -0
  75. package/lib/types/vertexai/index.d.ts.map +1 -1
  76. package/lib/types/vertexai/models/claude.d.ts.map +1 -1
  77. package/lib/types/vertexai/models/gemini.d.ts +3 -2
  78. package/lib/types/vertexai/models/gemini.d.ts.map +1 -1
  79. package/lib/types/xai/index.d.ts.map +1 -1
  80. package/package.json +27 -23
  81. package/src/azure/azure_foundry.ts +458 -0
  82. package/src/bedrock/index.ts +44 -28
  83. package/src/groq/index.ts +107 -16
  84. package/src/index.ts +2 -1
  85. package/src/mistral/index.ts +3 -2
  86. package/src/openai/azure_openai.ts +92 -0
  87. package/src/openai/index.ts +19 -22
  88. package/src/openai/openai.ts +2 -5
  89. package/src/openai/openai_format.ts +165 -0
  90. package/src/vertexai/index.ts +29 -22
  91. package/src/vertexai/models/claude.ts +5 -3
  92. package/src/vertexai/models/gemini.ts +50 -12
  93. package/src/watsonx/index.ts +5 -5
  94. package/src/xai/index.ts +2 -3
  95. package/lib/cjs/openai/azure.js +0 -31
  96. package/lib/cjs/openai/azure.js.map +0 -1
  97. package/lib/esm/openai/azure.js +0 -27
  98. package/lib/esm/openai/azure.js.map +0 -1
  99. package/lib/types/openai/azure.d.ts +0 -20
  100. package/lib/types/openai/azure.d.ts.map +0 -1
  101. package/src/openai/azure.ts +0 -54
@@ -72,11 +72,13 @@ function maxToken(option: StatelessExecutionOptions): number {
72
72
  if (modelOptions && typeof modelOptions.max_tokens === "number") {
73
73
  return modelOptions.max_tokens;
74
74
  } else {
75
+ const thinking_budget = modelOptions?.thinking_budget_tokens ?? 0;
76
+ let maxSupportedTokens = getMaxTokensLimitVertexAi(option.model);
75
77
  // Fallback to the default max tokens limit for the model
76
- if (option.model.includes('claude-3-7-sonnet') && (modelOptions?.thinking_budget_tokens ?? 0) < 64000) {
77
- return 64000; // Claude 3.7 can go up to 128k with a beta header, but when no max tokens is specified, we default to 64k.
78
+ if (option.model.includes('claude-3-7-sonnet') && (modelOptions?.thinking_budget_tokens ?? 0) < 48000) {
79
+ maxSupportedTokens = 64000; // Claude 3.7 can go up to 128k with a beta header, but when no max tokens is specified, we default to 64k.
78
80
  }
79
- return getMaxTokensLimitVertexAi(option.model);
81
+ return Math.min(16000 + thinking_budget, maxSupportedTokens); // Cap to 16k, to avoid taking up too much context window and quota.
80
82
  }
81
83
  }
82
84
 
@@ -1,5 +1,6 @@
1
1
  import {
2
2
  Content, FinishReason, FunctionCallingConfigMode, FunctionDeclaration, GenerateContentParameters,
3
+ GenerateContentResponseUsageMetadata,
3
4
  HarmBlockThreshold, HarmCategory, Part, SafetySetting, Schema, Tool, Type
4
5
  } from "@google/genai";
5
6
  import {
@@ -465,6 +466,12 @@ export function mergeConsecutiveRole(contents: Content[] | undefined): Content[]
465
466
  return result;
466
467
  }
467
468
 
469
+ const supportedFinishReasons: FinishReason[] = [
470
+ FinishReason.MAX_TOKENS,
471
+ FinishReason.STOP,
472
+ FinishReason.FINISH_REASON_UNSPECIFIED
473
+ ]
474
+
468
475
  export class GeminiModelDefinition implements ModelDefinition<GenerateContentPrompt> {
469
476
 
470
477
  model: AIModel
@@ -608,6 +615,32 @@ export class GeminiModelDefinition implements ModelDefinition<GenerateContentPro
608
615
  return { contents, system };
609
616
  }
610
617
 
618
+ usageMetadataToTokenUsage(usageMetadata: GenerateContentResponseUsageMetadata | undefined): ExecutionTokenUsage {
619
+ if (!usageMetadata || !usageMetadata.totalTokenCount) {
620
+ return {};
621
+ }
622
+ const tokenUsage: ExecutionTokenUsage = { total: usageMetadata.totalTokenCount, prompt: usageMetadata.promptTokenCount };
623
+
624
+ //Output/Response side
625
+ tokenUsage.result = (usageMetadata.candidatesTokenCount ?? 0)
626
+ + (usageMetadata.thoughtsTokenCount ?? 0)
627
+ + (usageMetadata.toolUsePromptTokenCount ?? 0);
628
+
629
+ if ((tokenUsage.total ?? 0) != (tokenUsage.prompt ?? 0) + tokenUsage.result) {
630
+ console.warn("[VertexAI] Gemini token usage mismatch: total does not equal prompt + result", {
631
+ total: tokenUsage.total,
632
+ prompt: tokenUsage.prompt,
633
+ result: tokenUsage.result
634
+ });
635
+ }
636
+
637
+ if (!tokenUsage.result) {
638
+ tokenUsage.result = undefined; // If no result, mark as undefined
639
+ }
640
+
641
+ return tokenUsage;
642
+ }
643
+
611
644
  async requestTextCompletion(driver: VertexAIDriver, prompt: GenerateContentPrompt, options: ExecutionOptions): Promise<Completion> {
612
645
  const splits = options.model.split("/");
613
646
  const modelName = splits[splits.length - 1];
@@ -621,12 +654,7 @@ export class GeminiModelDefinition implements ModelDefinition<GenerateContentPro
621
654
  const payload = getGeminiPayload(options, prompt);
622
655
  const response = await client.models.generateContent(payload);
623
656
 
624
- const usage = response.usageMetadata;
625
- const token_usage: ExecutionTokenUsage = {
626
- prompt: usage?.promptTokenCount,
627
- result: usage?.candidatesTokenCount,
628
- total: usage?.totalTokenCount,
629
- }
657
+ const token_usage: ExecutionTokenUsage = this.usageMetadataToTokenUsage(response.usageMetadata);
630
658
 
631
659
  let tool_use: ToolUse[] | undefined;
632
660
  let finish_reason: string | undefined, result: any;
@@ -638,6 +666,13 @@ export class GeminiModelDefinition implements ModelDefinition<GenerateContentPro
638
666
  default: finish_reason = candidate.finishReason;
639
667
  }
640
668
  const content = candidate.content;
669
+
670
+ if (candidate.finishReason && !supportedFinishReasons.includes(candidate.finishReason)) {
671
+ throw new Error(`Unsupported finish reason: ${candidate.finishReason}, `
672
+ + `finish message: ${candidate.finishMessage}, `
673
+ + `content: ${JSON.stringify(content, null, 2)}, safety: ${JSON.stringify(candidate.safetyRatings, null, 2)}`);
674
+ }
675
+
641
676
  if (content) {
642
677
  tool_use = collectToolUseParts(content);
643
678
 
@@ -648,6 +683,8 @@ export class GeminiModelDefinition implements ModelDefinition<GenerateContentPro
648
683
  }
649
684
  }
650
685
 
686
+
687
+
651
688
  if (tool_use) {
652
689
  finish_reason = "tool_use";
653
690
  }
@@ -673,12 +710,7 @@ export class GeminiModelDefinition implements ModelDefinition<GenerateContentPro
673
710
  const response = await client.models.generateContentStream(payload);
674
711
 
675
712
  const stream = asyncMap(response, async (item) => {
676
- const usage = item.usageMetadata;
677
- const token_usage: ExecutionTokenUsage = {
678
- prompt: usage?.promptTokenCount,
679
- result: usage?.candidatesTokenCount,
680
- total: usage?.totalTokenCount,
681
- }
713
+ const token_usage: ExecutionTokenUsage = this.usageMetadataToTokenUsage(item.usageMetadata);
682
714
  if (item.candidates && item.candidates.length > 0) {
683
715
  for (const candidate of item.candidates) {
684
716
  let tool_use: ToolUse[] | undefined;
@@ -688,6 +720,11 @@ export class GeminiModelDefinition implements ModelDefinition<GenerateContentPro
688
720
  case FinishReason.STOP: finish_reason = "stop"; break;
689
721
  default: finish_reason = candidate.finishReason;
690
722
  }
723
+ if (candidate.finishReason && !supportedFinishReasons.includes(candidate.finishReason)) {
724
+ throw new Error(`Unsupported finish reason: ${candidate.finishReason}, `
725
+ + `finish message: ${candidate.finishMessage}, `
726
+ + `content: ${JSON.stringify(candidate.content, null, 2)}, safety: ${JSON.stringify(candidate.safetyRatings, null, 2)}`);
727
+ }
691
728
  if (candidate.content?.role === 'model') {
692
729
  const text = collectTextParts(candidate.content);
693
730
  tool_use = collectToolUseParts(candidate.content);
@@ -707,6 +744,7 @@ export class GeminiModelDefinition implements ModelDefinition<GenerateContentPro
707
744
  return {
708
745
  result: item.promptFeedback?.blockReasonMessage ?? "",
709
746
  finish_reason: item.promptFeedback?.blockReason ?? "",
747
+ token_usage: token_usage,
710
748
  };
711
749
  });
712
750
 
@@ -31,10 +31,10 @@ export class WatsonxDriver extends AbstractDriver<WatsonxDriverOptions, string>
31
31
 
32
32
  async requestTextCompletion(prompt: string, options: ExecutionOptions): Promise<Completion<any>> {
33
33
  if (options.model_options?._option_id !== "text-fallback") {
34
- this.logger.warn("Invalid model options", {options: options.model_options });
34
+ this.logger.warn("Invalid model options", { options: options.model_options });
35
35
  }
36
36
  options.model_options = options.model_options as TextFallbackOptions | undefined;
37
-
37
+
38
38
  const payload: WatsonxTextGenerationPayload = {
39
39
  model_id: options.model,
40
40
  input: prompt + "\n",
@@ -66,7 +66,7 @@ export class WatsonxDriver extends AbstractDriver<WatsonxDriverOptions, string>
66
66
 
67
67
  async requestTextCompletionStream(prompt: string, options: ExecutionOptions): Promise<AsyncIterable<CompletionChunk>> {
68
68
  if (options.model_options?._option_id !== "text-fallback") {
69
- this.logger.warn("Invalid model options", {options: options.model_options });
69
+ this.logger.warn("Invalid model options", { options: options.model_options });
70
70
  }
71
71
  options.model_options = options.model_options as TextFallbackOptions | undefined;
72
72
  const payload: WatsonxTextGenerationPayload = {
@@ -132,7 +132,7 @@ export class WatsonxDriver extends AbstractDriver<WatsonxDriverOptions, string>
132
132
  if (now < this.authToken.expiration) {
133
133
  return this.authToken.access_token;
134
134
  } else {
135
- this.logger.debug("Token expired, refetching", this.authToken, now)
135
+ this.logger.debug("Token expired, refetching")
136
136
  }
137
137
  }
138
138
  const authToken = await fetch('https://iam.cloud.ibm.com/identity/token', {
@@ -163,7 +163,7 @@ export class WatsonxDriver extends AbstractDriver<WatsonxDriverOptions, string>
163
163
  }
164
164
 
165
165
  if (!options.text) {
166
- throw new Error ("No text provided");
166
+ throw new Error("No text provided");
167
167
  }
168
168
 
169
169
  const payload: GenerateEmbeddingPayload = {
package/src/xai/index.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { AIModel, Completion, DriverOptions, ExecutionOptions, PromptOptions, PromptSegment } from "@llumiverse/core";
2
- import { formatOpenAILikeMultimodalPrompt, OpenAIPromptFormatterOptions } from "@llumiverse/core/formatters";
2
+ import { formatOpenAILikeMultimodalPrompt, OpenAIPromptFormatterOptions } from "../openai/openai_format.js";
3
3
  import { FetchClient } from "@vertesia/api-fetch-client";
4
4
  import OpenAI from "openai";
5
5
  import { BaseOpenAIDriver } from "../openai/index.js";
@@ -14,7 +14,6 @@ export interface xAiDriverOptions extends DriverOptions {
14
14
 
15
15
  export class xAIDriver extends BaseOpenAIDriver {
16
16
 
17
-
18
17
  service: OpenAI;
19
18
  provider: "xai";
20
19
  xai_service: FetchClient;
@@ -33,7 +32,7 @@ export class xAIDriver extends BaseOpenAIDriver {
33
32
  });
34
33
  this.xai_service = new FetchClient(opts.endpoint ?? this.DEFAULT_ENDPOINT ).withAuthCallback(async () => `Bearer ${opts.apiKey}`);
35
34
  this.provider = "xai";
36
- this.formatPrompt = this._formatPrompt;
35
+ //this.formatPrompt = this._formatPrompt; //TODO: fix xai prompt formatting
37
36
  }
38
37
 
39
38
  async _formatPrompt(segments: PromptSegment[], opts: PromptOptions): Promise<OpenAI.Chat.Completions.ChatCompletionMessageParam[]> {
@@ -1,31 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.AzureOpenAIDriver = void 0;
4
- const identity_1 = require("@azure/identity");
5
- const openai_1 = require("openai");
6
- const index_js_1 = require("./index.js");
7
- class AzureOpenAIDriver extends index_js_1.BaseOpenAIDriver {
8
- service;
9
- provider;
10
- constructor(opts) {
11
- super(opts);
12
- if (!opts.azureADTokenProvider && !opts.apiKey) {
13
- opts.azureADTokenProvider = this.getDefaultAuth();
14
- }
15
- this.service = new openai_1.AzureOpenAI({
16
- apiKey: opts.apiKey,
17
- azureADTokenProvider: opts.azureADTokenProvider,
18
- endpoint: opts.endpoint,
19
- apiVersion: opts.apiVersion ?? "2024-10-21",
20
- deployment: opts.deployment
21
- });
22
- this.provider = "azure_openai";
23
- }
24
- getDefaultAuth() {
25
- const scope = "https://cognitiveservices.azure.com/.default";
26
- const azureADTokenProvider = (0, identity_1.getBearerTokenProvider)(new identity_1.DefaultAzureCredential(), scope);
27
- return azureADTokenProvider;
28
- }
29
- }
30
- exports.AzureOpenAIDriver = AzureOpenAIDriver;
31
- //# sourceMappingURL=azure.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"azure.js","sourceRoot":"","sources":["../../../src/openai/azure.ts"],"names":[],"mappings":";;;AAAA,8CAAiF;AAEjF,mCAAqC;AACrC,yCAA8C;AAmB9C,MAAa,iBAAkB,SAAQ,2BAAgB;IAGnD,OAAO,CAAc;IACrB,QAAQ,CAAiB;IAEzB,YAAY,IAA8B;QACtC,KAAK,CAAC,IAAI,CAAC,CAAC;QAEZ,IAAI,CAAC,IAAI,CAAC,oBAAoB,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YAC7C,IAAI,CAAC,oBAAoB,GAAG,IAAI,CAAC,cAAc,EAAE,CAAC;QACtD,CAAC;QAED,IAAI,CAAC,OAAO,GAAG,IAAI,oBAAW,CAAC;YAC3B,MAAM,EAAE,IAAI,CAAC,MAAM;YACnB,oBAAoB,EAAE,IAAI,CAAC,oBAAoB;YAC/C,QAAQ,EAAE,IAAI,CAAC,QAAQ;YACvB,UAAU,EAAE,IAAI,CAAC,UAAU,IAAI,YAAY;YAC3C,UAAU,EAAE,IAAI,CAAC,UAAU;SAC9B,CAAC,CAAC;QACH,IAAI,CAAC,QAAQ,GAAG,cAAc,CAAC;IACnC,CAAC;IAGD,cAAc;QACV,MAAM,KAAK,GAAG,8CAA8C,CAAC;QAC7D,MAAM,oBAAoB,GAAG,IAAA,iCAAsB,EAAC,IAAI,iCAAsB,EAAE,EAAE,KAAK,CAAC,CAAC;QACzF,OAAO,oBAAoB,CAAC;IAChC,CAAC;CAGJ;AA/BD,8CA+BC"}
@@ -1,27 +0,0 @@
1
- import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
2
- import { AzureOpenAI } from "openai";
3
- import { BaseOpenAIDriver } from "./index.js";
4
- export class AzureOpenAIDriver extends BaseOpenAIDriver {
5
- service;
6
- provider;
7
- constructor(opts) {
8
- super(opts);
9
- if (!opts.azureADTokenProvider && !opts.apiKey) {
10
- opts.azureADTokenProvider = this.getDefaultAuth();
11
- }
12
- this.service = new AzureOpenAI({
13
- apiKey: opts.apiKey,
14
- azureADTokenProvider: opts.azureADTokenProvider,
15
- endpoint: opts.endpoint,
16
- apiVersion: opts.apiVersion ?? "2024-10-21",
17
- deployment: opts.deployment
18
- });
19
- this.provider = "azure_openai";
20
- }
21
- getDefaultAuth() {
22
- const scope = "https://cognitiveservices.azure.com/.default";
23
- const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
24
- return azureADTokenProvider;
25
- }
26
- }
27
- //# sourceMappingURL=azure.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"azure.js","sourceRoot":"","sources":["../../../src/openai/azure.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,sBAAsB,EAAE,sBAAsB,EAAE,MAAM,iBAAiB,CAAC;AAEjF,OAAO,EAAE,WAAW,EAAE,MAAM,QAAQ,CAAC;AACrC,OAAO,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAmB9C,MAAM,OAAO,iBAAkB,SAAQ,gBAAgB;IAGnD,OAAO,CAAc;IACrB,QAAQ,CAAiB;IAEzB,YAAY,IAA8B;QACtC,KAAK,CAAC,IAAI,CAAC,CAAC;QAEZ,IAAI,CAAC,IAAI,CAAC,oBAAoB,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YAC7C,IAAI,CAAC,oBAAoB,GAAG,IAAI,CAAC,cAAc,EAAE,CAAC;QACtD,CAAC;QAED,IAAI,CAAC,OAAO,GAAG,IAAI,WAAW,CAAC;YAC3B,MAAM,EAAE,IAAI,CAAC,MAAM;YACnB,oBAAoB,EAAE,IAAI,CAAC,oBAAoB;YAC/C,QAAQ,EAAE,IAAI,CAAC,QAAQ;YACvB,UAAU,EAAE,IAAI,CAAC,UAAU,IAAI,YAAY;YAC3C,UAAU,EAAE,IAAI,CAAC,UAAU;SAC9B,CAAC,CAAC;QACH,IAAI,CAAC,QAAQ,GAAG,cAAc,CAAC;IACnC,CAAC;IAGD,cAAc;QACV,MAAM,KAAK,GAAG,8CAA8C,CAAC;QAC7D,MAAM,oBAAoB,GAAG,sBAAsB,CAAC,IAAI,sBAAsB,EAAE,EAAE,KAAK,CAAC,CAAC;QACzF,OAAO,oBAAoB,CAAC;IAChC,CAAC;CAGJ"}
@@ -1,20 +0,0 @@
1
- import { DriverOptions } from "@llumiverse/core";
2
- import { AzureOpenAI } from "openai";
3
- import { BaseOpenAIDriver } from "./index.js";
4
- export interface AzureOpenAIDriverOptions extends DriverOptions {
5
- /**
6
- * The credentials to use to access Azure OpenAI
7
- */
8
- azureADTokenProvider?: any;
9
- apiKey?: string;
10
- endpoint?: string;
11
- apiVersion?: string;
12
- deployment?: string;
13
- }
14
- export declare class AzureOpenAIDriver extends BaseOpenAIDriver {
15
- service: AzureOpenAI;
16
- provider: "azure_openai";
17
- constructor(opts: AzureOpenAIDriverOptions);
18
- getDefaultAuth(): () => Promise<string>;
19
- }
20
- //# sourceMappingURL=azure.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"azure.d.ts","sourceRoot":"","sources":["../../../src/openai/azure.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,WAAW,EAAE,MAAM,QAAQ,CAAC;AACrC,OAAO,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAE9C,MAAM,WAAW,wBAAyB,SAAQ,aAAa;IAE3D;;OAEG;IACH,oBAAoB,CAAC,EAAE,GAAG,CAAC;IAE3B,MAAM,CAAC,EAAE,MAAM,CAAC;IAEhB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAElB,UAAU,CAAC,EAAE,MAAM,CAAA;IAEnB,UAAU,CAAC,EAAE,MAAM,CAAC;CAEvB;AAED,qBAAa,iBAAkB,SAAQ,gBAAgB;IAGnD,OAAO,EAAE,WAAW,CAAC;IACrB,QAAQ,EAAE,cAAc,CAAC;gBAEb,IAAI,EAAE,wBAAwB;IAkB1C,cAAc;CAOjB"}
@@ -1,54 +0,0 @@
1
- import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
2
- import { DriverOptions } from "@llumiverse/core";
3
- import { AzureOpenAI } from "openai";
4
- import { BaseOpenAIDriver } from "./index.js";
5
-
6
- export interface AzureOpenAIDriverOptions extends DriverOptions {
7
-
8
- /**
9
- * The credentials to use to access Azure OpenAI
10
- */
11
- azureADTokenProvider?: any; //type with azure credentials
12
-
13
- apiKey?: string;
14
-
15
- endpoint?: string;
16
-
17
- apiVersion?: string
18
-
19
- deployment?: string;
20
-
21
- }
22
-
23
- export class AzureOpenAIDriver extends BaseOpenAIDriver {
24
-
25
-
26
- service: AzureOpenAI;
27
- provider: "azure_openai";
28
-
29
- constructor(opts: AzureOpenAIDriverOptions) {
30
- super(opts);
31
-
32
- if (!opts.azureADTokenProvider && !opts.apiKey) {
33
- opts.azureADTokenProvider = this.getDefaultAuth();
34
- }
35
-
36
- this.service = new AzureOpenAI({
37
- apiKey: opts.apiKey,
38
- azureADTokenProvider: opts.azureADTokenProvider,
39
- endpoint: opts.endpoint,
40
- apiVersion: opts.apiVersion ?? "2024-10-21",
41
- deployment: opts.deployment
42
- });
43
- this.provider = "azure_openai";
44
- }
45
-
46
-
47
- getDefaultAuth() {
48
- const scope = "https://cognitiveservices.azure.com/.default";
49
- const azureADTokenProvider = getBearerTokenProvider(new DefaultAzureCredential(), scope);
50
- return azureADTokenProvider;
51
- }
52
-
53
-
54
- }