@sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-azure-ai 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,5 @@
1
+ # @sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-azure-ai
2
+
3
+ The model-provider-azure-ai backend module for the ai-assistant plugin.
4
+
5
+ _This plugin was created through the Backstage CLI_
package/config.d.ts ADDED
@@ -0,0 +1,16 @@
1
+ export interface Config {
2
+ aiAssistant: {
3
+ models: {
4
+ azureAiInference: {
5
+ /**
6
+ * @visibility secret
7
+ */
8
+ apiKey: string;
9
+ models: {
10
+ modelName: string;
11
+ endpoint: string;
12
+ };
13
+ };
14
+ };
15
+ };
16
+ }
@@ -0,0 +1,105 @@
1
+ 'use strict';
2
+
3
+ var chat_models = require('@langchain/core/language_models/chat_models');
4
+ var createClient = require('@azure-rest/ai-inference');
5
+ var coreAuth = require('@azure/core-auth');
6
+ var messages = require('@langchain/core/messages');
7
+ var stream = require('@langchain/core/utils/stream');
8
+ var outputs = require('@langchain/core/outputs');
9
+ var utils = require('./utils.cjs.js');
10
+ var coreSse = require('@azure/core-sse');
11
+
12
+ function _interopDefaultCompat (e) { return e && typeof e === 'object' && 'default' in e ? e : { default: e }; }
13
+
14
+ var createClient__default = /*#__PURE__*/_interopDefaultCompat(createClient);
15
+
16
+ class AzureAiInferenceChatModel extends chat_models.BaseChatModel {
17
+ modelName;
18
+ endpoint;
19
+ apiKey;
20
+ client;
21
+ constructor({
22
+ modelName,
23
+ endpoint,
24
+ apiKey,
25
+ ...rest
26
+ }) {
27
+ super(rest);
28
+ this.modelName = modelName;
29
+ this.endpoint = endpoint;
30
+ this.apiKey = apiKey;
31
+ this.client = createClient__default.default(endpoint, new coreAuth.AzureKeyCredential(apiKey));
32
+ }
33
+ _llmType() {
34
+ return "azure-ai-inference";
35
+ }
36
+ async *_streamResponseChunks(messages$1, _options, runManager) {
37
+ const aiInferenceMessages = utils.convertToAzureAiInferenceMessages(messages$1);
38
+ const response = await this.client.path("/chat/completions").post({
39
+ body: {
40
+ stream: true,
41
+ messages: aiInferenceMessages,
42
+ model: this.modelName
43
+ }
44
+ }).asNodeStream();
45
+ const stream = response.body;
46
+ if (!stream) {
47
+ throw new Error("Azure AI Inference response stream is undefined");
48
+ }
49
+ if (response.status !== "200") {
50
+ stream.destroy();
51
+ throw new Error(
52
+ `Failed to get chat completions. Operation failed with ${response.status} code.`
53
+ );
54
+ }
55
+ const sseStream = coreSse.createSseStream(stream);
56
+ for await (const event of sseStream) {
57
+ if (event.data === "[DONE]") {
58
+ return;
59
+ }
60
+ for (const choice of JSON.parse(event.data).choices) {
61
+ const token = choice.delta?.content ?? "";
62
+ const responseMessage = new messages.AIMessageChunk({
63
+ content: token
64
+ });
65
+ yield new outputs.ChatGenerationChunk({
66
+ text: token,
67
+ message: responseMessage
68
+ });
69
+ await runManager?.handleLLMNewToken(token);
70
+ }
71
+ }
72
+ }
73
+ async _generate(messages$1, options, runManager) {
74
+ let finalChunk;
75
+ for await (const chunk of this._streamResponseChunks(
76
+ messages$1,
77
+ options,
78
+ runManager
79
+ )) {
80
+ if (!finalChunk) {
81
+ finalChunk = chunk.message;
82
+ } else {
83
+ finalChunk = stream.concat(finalChunk, chunk.message);
84
+ }
85
+ }
86
+ const nonChunkMessage = new messages.AIMessage({
87
+ id: finalChunk?.id,
88
+ content: finalChunk?.content ?? "",
89
+ tool_calls: finalChunk?.tool_calls,
90
+ response_metadata: finalChunk?.response_metadata,
91
+ usage_metadata: finalChunk?.usage_metadata
92
+ });
93
+ return {
94
+ generations: [
95
+ {
96
+ text: typeof nonChunkMessage.content === "string" ? nonChunkMessage.content : "",
97
+ message: nonChunkMessage
98
+ }
99
+ ]
100
+ };
101
+ }
102
+ }
103
+
104
+ exports.AzureAiInferenceChatModel = AzureAiInferenceChatModel;
105
+ //# sourceMappingURL=azure-ai-inference-chat-model.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"azure-ai-inference-chat-model.cjs.js","sources":["../src/azure-ai-inference-chat-model.ts"],"sourcesContent":["import {\n BaseChatModel,\n BaseChatModelParams,\n} from '@langchain/core/language_models/chat_models';\nimport createClient, { ModelClient } from '@azure-rest/ai-inference';\nimport { AzureKeyCredential } from '@azure/core-auth';\nimport {\n AIMessage,\n AIMessageChunk,\n BaseMessage,\n} from '@langchain/core/messages';\nimport { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport { concat } from '@langchain/core/utils/stream';\nimport { ChatGenerationChunk, ChatResult } from '@langchain/core/outputs';\nimport { convertToAzureAiInferenceMessages } from './utils';\nimport { createSseStream } from '@azure/core-sse';\n\nexport interface ChatAzureAiInferenceInputs extends BaseChatModelParams {\n modelName: string;\n endpoint: string;\n apiKey: string;\n}\n\nexport class AzureAiInferenceChatModel\n extends BaseChatModel\n implements ChatAzureAiInferenceInputs\n{\n modelName: string;\n endpoint: string;\n apiKey: string;\n private client: ModelClient;\n\n constructor({\n modelName,\n endpoint,\n apiKey,\n ...rest\n }: ChatAzureAiInferenceInputs) {\n super(rest);\n this.modelName = modelName;\n this.endpoint = endpoint;\n this.apiKey = apiKey;\n this.client = createClient(endpoint, new AzureKeyCredential(apiKey));\n }\n\n _llmType(): string {\n return 'azure-ai-inference';\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n _options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun,\n ): AsyncGenerator<ChatGenerationChunk> {\n const aiInferenceMessages = convertToAzureAiInferenceMessages(messages);\n\n const response = await this.client\n .path('/chat/completions')\n .post({\n body: {\n stream: true,\n messages: aiInferenceMessages,\n model: this.modelName,\n },\n })\n .asNodeStream();\n\n const stream = response.body;\n\n if (!stream) {\n throw new Error('Azure AI Inference response stream is undefined');\n }\n\n if (response.status !== '200') {\n stream.destroy();\n throw new Error(\n `Failed to get chat completions. Operation failed with ${response.status} code.`,\n );\n }\n\n const sseStream = createSseStream(stream);\n\n for await (const event of sseStream) {\n if (event.data === '[DONE]') {\n return;\n }\n\n for (const choice of JSON.parse(event.data).choices) {\n const token = choice.delta?.content ?? '';\n\n const responseMessage = new AIMessageChunk({\n content: token,\n });\n\n yield new ChatGenerationChunk({\n text: token,\n message: responseMessage,\n });\n await runManager?.handleLLMNewToken(token);\n }\n }\n }\n\n async _generate(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun,\n ): Promise<ChatResult> {\n let finalChunk: AIMessageChunk | undefined;\n for await (const chunk of this._streamResponseChunks(\n messages,\n options,\n runManager,\n )) {\n if (!finalChunk) {\n finalChunk = chunk.message;\n } else {\n finalChunk = concat(finalChunk, chunk.message);\n }\n }\n\n // Convert from AIMessageChunk to AIMessage since `generate` expects AIMessage.\n const nonChunkMessage = new AIMessage({\n id: finalChunk?.id,\n content: finalChunk?.content ?? '',\n tool_calls: finalChunk?.tool_calls,\n response_metadata: finalChunk?.response_metadata,\n usage_metadata: finalChunk?.usage_metadata,\n });\n return {\n generations: [\n {\n text:\n typeof nonChunkMessage.content === 'string'\n ? nonChunkMessage.content\n : '',\n message: nonChunkMessage,\n },\n ],\n };\n }\n}\n"],"names":["BaseChatModel","createClient","AzureKeyCredential","messages","convertToAzureAiInferenceMessages","createSseStream","AIMessageChunk","ChatGenerationChunk","concat","AIMessage"],"mappings":";;;;;;;;;;;;;;;AAuBO,MAAM,kCACHA,yBAAA,CAEV;AAAA,EACE,SAAA;AAAA,EACA,QAAA;AAAA,EACA,MAAA;AAAA,EACQ,MAAA;AAAA,EAER,WAAA,CAAY;AAAA,IACV,SAAA;AAAA,IACA,QAAA;AAAA,IACA,MAAA;AAAA,IACA,GAAG;AAAA,GACL,EAA+B;AAC7B,IAAA,KAAA,CAAM,IAAI,CAAA;AACV,IAAA,IAAA,CAAK,SAAA,GAAY,SAAA;AACjB,IAAA,IAAA,CAAK,QAAA,GAAW,QAAA;AAChB,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,SAASC,6BAAA,CAAa,QAAA,EAAU,IAAIC,2BAAA,CAAmB,MAAM,CAAC,CAAA;AAAA,EACrE;AAAA,EAEA,QAAA,GAAmB;AACjB,IAAA,OAAO,oBAAA;AAAA,EACT;AAAA,EAEA,OAAO,qBAAA,CACLC,UAAA,EACA,QAAA,EACA,UAAA,EACqC;AACrC,IAAA,MAAM,mBAAA,GAAsBC,wCAAkCD,UAAQ,CAAA;AAEtE,IAAA,MAAM,WAAW,MAAM,IAAA,CAAK,OACzB,IAAA,CAAK,mBAAmB,EACxB,IAAA,CAAK;AAAA,MACJ,IAAA,EAAM;AAAA,QACJ,MAAA,EAAQ,IAAA;AAAA,QACR,QAAA,EAAU,mBAAA;AAAA,QACV,OAAO,IAAA,CAAK;AAAA;AACd,KACD,EACA,YAAA,EAAa;AAEhB,IAAA,MAAM,SAAS,QAAA,CAAS,IAAA;AAExB,IAAA,IAAI,CAAC,MAAA,EAAQ;AACX,MAAA,MAAM,IAAI,MAAM,iDAAiD,CAAA;AAAA,IACnE;AAEA,IAAA,IAAI,QAAA,CAAS,WAAW,KAAA,EAAO;AAC7B,MAAA,MAAA,CAAO,OAAA,EAAQ;AACf,MAAA,MAAM,IAAI,KAAA;AAAA,QACR,CAAA,sDAAA,EAAyD,SAAS,MAAM,CAAA,MAAA;AAAA,OAC1E;AAAA,IACF;AAEA,IAAA,MAAM,SAAA,GAAYE,wBAAgB,MAAM,CAAA;AAExC,IAAA,WAAA,MAAiB,SAAS,SAAA,EAAW;AACnC,MAAA,IAAI,KAAA,CAAM,SAAS,QAAA,EAAU;AAC3B,QAAA;AAAA,MACF;AAEA,MAAA,KAAA,MAAW,UAAU,IAAA,CAAK,KAAA,CAAM,KAAA,CAAM,IAAI,EAAE,OAAA,EAAS;AACnD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,EAAO,OAAA,IAAW,EAAA;AAEvC,QAAA,MAAM,eAAA,GAAkB,IAAIC,uBAAA,CAAe;AAAA,UACzC,OAAA,EAAS;AAAA,SACV,CAAA;AAED,QAAA,MAAM,IAAIC,2BAAA,CAAoB;AAAA,UAC5B,IAAA,EAAM,KAAA;AAAA,UACN,OAAA,EAAS;AAAA,SACV,CAAA;AACD,QAAA,MAAM,UAAA,EAAY,kBAAkB,KAAK,CAAA;AAAA,MAC3C;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,SAAA,CACJJ,UAAA,EACA,OAAA,EACA,UAAA,EACqB;AACrB,IAAA,IAAI,UAAA;AACJ,IAAA,WAAA,MAAiB,SAAS,IAAA,CAAK,qBAAA;AAAA,MAC7BA,UAAA;AAAA,MACA,OAAA;AAAA,MACA;AAAA,KACF,EAAG;AACD,MAAA,IAAI,CAAC,UAAA,EAAY;AACf,QAAA,UAAA,GAAa,KAAA,CAAM,OAAA;AAAA,MACrB,CAAA,MAAO;AACL,QAAA,UAAA,GAAaK,aAAA,CAAO,UAAA,EAAY,KAAA,CAAM,OAAO,CAAA;AAAA,MAC/C;AAAA,IACF;AAGA,IAAA,MAAM,eAAA,GAAkB,IAAIC,kBAAA,CAAU;AAAA,MACpC,IAAI,UAAA,EAAY,EAAA;AAAA,MAChB,OAAA,EAAS,YAAY,OAAA,IAAW,EAAA;AAAA,MAChC,YAAY,UAAA,EAAY,UAAA;AAAA,MACxB,mBAAmB,UAAA,EAAY,iBAAA;AAAA,MAC/B,gBAAgB,UAAA,EAAY;AAAA,KAC7B,CAAA;AACD,IAAA,OAAO;AAAA,MACL,WAAA,EAAa;AAAA,QACX;AAAA,UACE,MACE,OAAO,eAAA,CAAgB,OAAA,KAAY,QAAA,GAC/B,gBAAgB,OAAA,GAChB,EAAA;AAAA,UACN,OAAA,EAAS;AAAA;AACX;AACF,KACF;AAAA,EACF;AACF;;;;"}
@@ -0,0 +1,10 @@
1
+ 'use strict';
2
+
3
+ Object.defineProperty(exports, '__esModule', { value: true });
4
+
5
+ var module$1 = require('./module.cjs.js');
6
+
7
+
8
+
9
+ exports.default = module$1.aiAssistantModuleModelProviderAzureAi;
10
+ //# sourceMappingURL=index.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.cjs.js","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;"}
@@ -0,0 +1,5 @@
1
+ import * as _backstage_backend_plugin_api from '@backstage/backend-plugin-api';
2
+
3
+ declare const aiAssistantModuleModelProviderAzureAi: _backstage_backend_plugin_api.BackendFeature;
4
+
5
+ export { aiAssistantModuleModelProviderAzureAi as default };
@@ -0,0 +1,42 @@
1
+ 'use strict';
2
+
3
+ var backendPluginApi = require('@backstage/backend-plugin-api');
4
+ var backstagePluginAiAssistantNode = require('@sweetoburrito/backstage-plugin-ai-assistant-node');
5
+ var azureAiInferenceChatModel = require('./azure-ai-inference-chat-model.cjs.js');
6
+
7
+ const aiAssistantModuleModelProviderAzureAi = backendPluginApi.createBackendModule({
8
+ pluginId: "ai-assistant",
9
+ moduleId: "model-provider-azure-ai",
10
+ register(reg) {
11
+ reg.registerInit({
12
+ deps: {
13
+ config: backendPluginApi.coreServices.rootConfig,
14
+ modelProvider: backstagePluginAiAssistantNode.modelProviderExtensionPoint
15
+ },
16
+ async init({ config, modelProvider }) {
17
+ const azureConfig = config.getConfig(
18
+ "aiAssistant.models.azureAiInference"
19
+ );
20
+ const apiKey = azureConfig.getString("apiKey");
21
+ const modelConfigs = azureConfig.getOptionalConfigArray("models");
22
+ const models = modelConfigs?.map((modelConfig) => {
23
+ const endpoint = modelConfig.getString("endpoint");
24
+ const modelName = modelConfig.getString("modelName");
25
+ const chatModel = new azureAiInferenceChatModel.AzureAiInferenceChatModel({
26
+ apiKey,
27
+ endpoint,
28
+ modelName
29
+ });
30
+ return {
31
+ id: modelName,
32
+ chatModel
33
+ };
34
+ }) ?? [];
35
+ models.forEach((model) => modelProvider.register(model));
36
+ }
37
+ });
38
+ }
39
+ });
40
+
41
+ exports.aiAssistantModuleModelProviderAzureAi = aiAssistantModuleModelProviderAzureAi;
42
+ //# sourceMappingURL=module.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"module.cjs.js","sources":["../src/module.ts"],"sourcesContent":["import {\n coreServices,\n createBackendModule,\n} from '@backstage/backend-plugin-api';\n\nimport {\n Model,\n modelProviderExtensionPoint,\n} from '@sweetoburrito/backstage-plugin-ai-assistant-node';\nimport { AzureAiInferenceChatModel } from './azure-ai-inference-chat-model';\n\nimport { BaseChatModel } from '@langchain/core/language_models/chat_models';\n\nexport const aiAssistantModuleModelProviderAzureAi = createBackendModule({\n pluginId: 'ai-assistant',\n moduleId: 'model-provider-azure-ai',\n register(reg) {\n reg.registerInit({\n deps: {\n config: coreServices.rootConfig,\n modelProvider: modelProviderExtensionPoint,\n },\n async init({ config, modelProvider }) {\n const azureConfig = config.getConfig(\n 'aiAssistant.models.azureAiInference',\n );\n\n const apiKey = azureConfig.getString('apiKey');\n const modelConfigs = azureConfig.getOptionalConfigArray('models');\n\n const models: Model[] =\n modelConfigs?.map<Model>(modelConfig => {\n const endpoint = modelConfig.getString('endpoint');\n const modelName = modelConfig.getString('modelName');\n\n const chatModel: BaseChatModel = new AzureAiInferenceChatModel({\n apiKey,\n endpoint,\n modelName,\n });\n\n return {\n id: modelName,\n chatModel,\n };\n }) ?? [];\n\n models.forEach(model => modelProvider.register(model));\n },\n });\n },\n});\n"],"names":["createBackendModule","coreServices","modelProviderExtensionPoint","AzureAiInferenceChatModel"],"mappings":";;;;;;AAaO,MAAM,wCAAwCA,oCAAA,CAAoB;AAAA,EACvE,QAAA,EAAU,cAAA;AAAA,EACV,QAAA,EAAU,yBAAA;AAAA,EACV,SAAS,GAAA,EAAK;AACZ,IAAA,GAAA,CAAI,YAAA,CAAa;AAAA,MACf,IAAA,EAAM;AAAA,QACJ,QAAQC,6BAAA,CAAa,UAAA;AAAA,QACrB,aAAA,EAAeC;AAAA,OACjB;AAAA,MACA,MAAM,IAAA,CAAK,EAAE,MAAA,EAAQ,eAAc,EAAG;AACpC,QAAA,MAAM,cAAc,MAAA,CAAO,SAAA;AAAA,UACzB;AAAA,SACF;AAEA,QAAA,MAAM,MAAA,GAAS,WAAA,CAAY,SAAA,CAAU,QAAQ,CAAA;AAC7C,QAAA,MAAM,YAAA,GAAe,WAAA,CAAY,sBAAA,CAAuB,QAAQ,CAAA;AAEhE,QAAA,MAAM,MAAA,GACJ,YAAA,EAAc,GAAA,CAAW,CAAA,WAAA,KAAe;AACtC,UAAA,MAAM,QAAA,GAAW,WAAA,CAAY,SAAA,CAAU,UAAU,CAAA;AACjD,UAAA,MAAM,SAAA,GAAY,WAAA,CAAY,SAAA,CAAU,WAAW,CAAA;AAEnD,UAAA,MAAM,SAAA,GAA2B,IAAIC,mDAAA,CAA0B;AAAA,YAC7D,MAAA;AAAA,YACA,QAAA;AAAA,YACA;AAAA,WACD,CAAA;AAED,UAAA,OAAO;AAAA,YACL,EAAA,EAAI,SAAA;AAAA,YACJ;AAAA,WACF;AAAA,QACF,CAAC,KAAK,EAAC;AAET,QAAA,MAAA,CAAO,OAAA,CAAQ,CAAA,KAAA,KAAS,aAAA,CAAc,QAAA,CAAS,KAAK,CAAC,CAAA;AAAA,MACvD;AAAA,KACD,CAAA;AAAA,EACH;AACF,CAAC;;;;"}
@@ -0,0 +1,30 @@
1
+ 'use strict';
2
+
3
+ const getMessageType = (message) => {
4
+ const type = message.getType();
5
+ if (type === "human") {
6
+ return {
7
+ role: "user",
8
+ content: message.content
9
+ };
10
+ }
11
+ if (type === "ai") {
12
+ return {
13
+ role: "assistant",
14
+ content: message.content
15
+ };
16
+ }
17
+ if (type === "system") {
18
+ return {
19
+ role: "system",
20
+ content: message.content
21
+ };
22
+ }
23
+ throw new Error(`Unsupported message type: ${type}`);
24
+ };
25
+ const convertToAzureAiInferenceMessages = (messages) => {
26
+ return messages.map(getMessageType);
27
+ };
28
+
29
+ exports.convertToAzureAiInferenceMessages = convertToAzureAiInferenceMessages;
30
+ //# sourceMappingURL=utils.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"utils.cjs.js","sources":["../src/utils.ts"],"sourcesContent":["import { BaseMessage } from '@langchain/core/messages';\nimport {\n ChatRequestMessage,\n ChatRequestUserMessage,\n ChatRequestAssistantMessage,\n ChatRequestSystemMessage,\n} from '@azure-rest/ai-inference';\n\nconst getMessageType = (message: BaseMessage): ChatRequestMessage => {\n const type = message.getType();\n\n if (type === 'human') {\n return {\n role: 'user',\n content: message.content,\n } as ChatRequestUserMessage;\n }\n\n if (type === 'ai') {\n return {\n role: 'assistant',\n content: message.content,\n } as ChatRequestAssistantMessage;\n }\n\n if (type === 'system') {\n return {\n role: 'system',\n content: message.content,\n } as ChatRequestSystemMessage;\n }\n\n throw new Error(`Unsupported message type: ${type}`);\n};\n\nexport const convertToAzureAiInferenceMessages = (\n messages: BaseMessage[],\n): ChatRequestMessage[] => {\n return messages.map(getMessageType);\n};\n"],"names":[],"mappings":";;AAQA,MAAM,cAAA,GAAiB,CAAC,OAAA,KAA6C;AACnE,EAAA,MAAM,IAAA,GAAO,QAAQ,OAAA,EAAQ;AAE7B,EAAA,IAAI,SAAS,OAAA,EAAS;AACpB,IAAA,OAAO;AAAA,MACL,IAAA,EAAM,MAAA;AAAA,MACN,SAAS,OAAA,CAAQ;AAAA,KACnB;AAAA,EACF;AAEA,EAAA,IAAI,SAAS,IAAA,EAAM;AACjB,IAAA,OAAO;AAAA,MACL,IAAA,EAAM,WAAA;AAAA,MACN,SAAS,OAAA,CAAQ;AAAA,KACnB;AAAA,EACF;AAEA,EAAA,IAAI,SAAS,QAAA,EAAU;AACrB,IAAA,OAAO;AAAA,MACL,IAAA,EAAM,QAAA;AAAA,MACN,SAAS,OAAA,CAAQ;AAAA,KACnB;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,KAAA,CAAM,CAAA,0BAAA,EAA6B,IAAI,CAAA,CAAE,CAAA;AACrD,CAAA;AAEO,MAAM,iCAAA,GAAoC,CAC/C,QAAA,KACyB;AACzB,EAAA,OAAO,QAAA,CAAS,IAAI,cAAc,CAAA;AACpC;;;;"}
package/package.json ADDED
@@ -0,0 +1,54 @@
1
+ {
2
+ "name": "@sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-azure-ai",
3
+ "version": "0.2.2",
4
+ "license": "Apache-2.0",
5
+ "description": "The model-provider-azure-ai backend module for the ai-assistant plugin.",
6
+ "main": "dist/index.cjs.js",
7
+ "types": "dist/index.d.ts",
8
+ "publishConfig": {
9
+ "access": "public",
10
+ "main": "dist/index.cjs.js",
11
+ "types": "dist/index.d.ts"
12
+ },
13
+ "backstage": {
14
+ "role": "backend-plugin-module",
15
+ "pluginId": "ai-assistant",
16
+ "pluginPackage": "@sweetoburrito/backstage-plugin-ai-assistant-backend",
17
+ "features": {
18
+ ".": "@backstage/BackendFeature"
19
+ }
20
+ },
21
+ "scripts": {
22
+ "start": "backstage-cli package start",
23
+ "build": "backstage-cli package build",
24
+ "lint": "backstage-cli package lint",
25
+ "test": "backstage-cli package test",
26
+ "clean": "backstage-cli package clean",
27
+ "prepack": "backstage-cli package prepack",
28
+ "postpack": "backstage-cli package postpack"
29
+ },
30
+ "dependencies": {
31
+ "@azure-rest/ai-inference": "^1.0.0-beta.6",
32
+ "@azure/core-auth": "^1.10.0",
33
+ "@azure/core-sse": "^2.3.0",
34
+ "@backstage/backend-plugin-api": "backstage:^",
35
+ "@langchain/core": "^0.3.72",
36
+ "@sweetoburrito/backstage-plugin-ai-assistant-node": "workspace:^"
37
+ },
38
+ "devDependencies": {
39
+ "@backstage/backend-test-utils": "backstage:^",
40
+ "@backstage/cli": "backstage:^"
41
+ },
42
+ "files": [
43
+ "dist",
44
+ "config.d.ts"
45
+ ],
46
+ "configSchema": "config.d.ts",
47
+ "typesVersions": {
48
+ "*": {
49
+ "package.json": [
50
+ "package.json"
51
+ ]
52
+ }
53
+ }
54
+ }