@sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-azure-ai 0.0.0-snapshot-20251029150521
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +57 -0
- package/config.d.ts +19 -0
- package/dist/index.cjs.js +10 -0
- package/dist/index.cjs.js.map +1 -0
- package/dist/index.d.ts +5 -0
- package/dist/module.cjs.js +41 -0
- package/dist/module.cjs.js.map +1 -0
- package/dist/services/chat-model/azure-ai-inference-chat-model.cjs.js +109 -0
- package/dist/services/chat-model/azure-ai-inference-chat-model.cjs.js.map +1 -0
- package/dist/services/chat-model/index.cjs.js +18 -0
- package/dist/services/chat-model/index.cjs.js.map +1 -0
- package/dist/services/chat-model/open-ai.cjs.js +16 -0
- package/dist/services/chat-model/open-ai.cjs.js.map +1 -0
- package/dist/utils.cjs.js +30 -0
- package/dist/utils.cjs.js.map +1 -0
- package/package.json +55 -0
package/README.md
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# @sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-azure-ai
|
|
2
|
+
|
|
3
|
+
This module provides an Azure AI (Azure OpenAI / Azure Foundry) model provider implementation for the
|
|
4
|
+
[backstage-plugin-ai-assistant](https://github.com/SweetOBurritO/backstage-plugin-ai-assistant) backend. It lets the AI Assistant backend call Azure-hosted models (chat or completion)
|
|
5
|
+
using a configuration-driven provider so the rest of the plugin can remain model-agnostic.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- Connects Backstage AI Assistant to Azure-hosted LLM models (Azure OpenAI / Azure Foundry).
|
|
10
|
+
- Configuration via Backstage `app-config.yaml` and environment variables.
|
|
11
|
+
|
|
12
|
+
## When to use
|
|
13
|
+
|
|
14
|
+
Use this module when you want the AI Assistant backend to use models hosted in Azure (for example: GPT-family models
|
|
15
|
+
deployed in Azure OpenAI or Azure AI Foundry deployments) in the backstage ai assistant.
|
|
16
|
+
|
|
17
|
+
## Configuration
|
|
18
|
+
|
|
19
|
+
Add the provider configuration in your `app-config.local`.
|
|
20
|
+
|
|
21
|
+
```yaml
|
|
22
|
+
aiAssistant:
|
|
23
|
+
models:
|
|
24
|
+
azureAi:
|
|
25
|
+
apiKey: ${AZURE-AI-API-KEY}
|
|
26
|
+
models:
|
|
27
|
+
- endpoint: https://eastus.api.cognitive.microsoft.com/openai/v1/ # Replace with your deployment endpoint
|
|
28
|
+
modelName: 'gpt-5-mini'
|
|
29
|
+
- endpoint: https://eastus.api.cognitive.microsoft.com/openai/v1/
|
|
30
|
+
modelName: 'DeepSeek-R1'
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Install
|
|
34
|
+
|
|
35
|
+
Install the plugin into your backstage backend with the following command
|
|
36
|
+
|
|
37
|
+
```sh
|
|
38
|
+
yarn workspace backend add @sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-azure-ai
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Add it to your backend
|
|
42
|
+
|
|
43
|
+
```diff
|
|
44
|
+
// packages/backend/src/index.ts
|
|
45
|
+
|
|
46
|
+
backend.add(import('@backstage/plugin-events-backend'));
|
|
47
|
+
backend.add(import('@backstage/plugin-signals-backend'));
|
|
48
|
+
|
|
49
|
+
backend.add(import('@sweetoburrito/backstage-plugin-ai-assistant-backend'));
|
|
50
|
+
|
|
51
|
+
++backend.add(
|
|
52
|
+
++ import(
|
|
53
|
+
++ '@sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-azure-ai'
|
|
54
|
+
++ ),
|
|
55
|
+
++);
|
|
56
|
+
|
|
57
|
+
```
|
package/config.d.ts
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { SdkType } from './src/types/chat-model';
|
|
2
|
+
|
|
3
|
+
export interface Config {
|
|
4
|
+
aiAssistant: {
|
|
5
|
+
models: {
|
|
6
|
+
azureAi: {
|
|
7
|
+
/**
|
|
8
|
+
* @visibility secret
|
|
9
|
+
*/
|
|
10
|
+
apiKey: string;
|
|
11
|
+
models: {
|
|
12
|
+
modelName: string;
|
|
13
|
+
endpoint: string;
|
|
14
|
+
sdk?: SdkType;
|
|
15
|
+
}[];
|
|
16
|
+
};
|
|
17
|
+
};
|
|
18
|
+
};
|
|
19
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs.js","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;"}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var backendPluginApi = require('@backstage/backend-plugin-api');
|
|
4
|
+
var backstagePluginAiAssistantNode = require('@sweetoburrito/backstage-plugin-ai-assistant-node');
|
|
5
|
+
var index = require('./services/chat-model/index.cjs.js');
|
|
6
|
+
|
|
7
|
+
const aiAssistantModuleModelProviderAzureAi = backendPluginApi.createBackendModule({
|
|
8
|
+
pluginId: "ai-assistant",
|
|
9
|
+
moduleId: "model-provider-azure-ai",
|
|
10
|
+
register(reg) {
|
|
11
|
+
reg.registerInit({
|
|
12
|
+
deps: {
|
|
13
|
+
config: backendPluginApi.coreServices.rootConfig,
|
|
14
|
+
modelProvider: backstagePluginAiAssistantNode.modelProviderExtensionPoint
|
|
15
|
+
},
|
|
16
|
+
async init({ config, modelProvider }) {
|
|
17
|
+
const azureConfig = config.getConfig("aiAssistant.models.azureAi");
|
|
18
|
+
const apiKey = azureConfig.getString("apiKey");
|
|
19
|
+
const modelConfigs = azureConfig.getOptionalConfigArray("models");
|
|
20
|
+
const models = modelConfigs?.map((modelConfig) => {
|
|
21
|
+
const endpoint = modelConfig.getString("endpoint");
|
|
22
|
+
const modelName = modelConfig.getString("modelName");
|
|
23
|
+
const sdk = modelConfig.getOptionalString("sdk") ?? "openai";
|
|
24
|
+
const chatModel = index.createChatModeForSdk(sdk, {
|
|
25
|
+
apiKey,
|
|
26
|
+
endpoint,
|
|
27
|
+
modelName
|
|
28
|
+
});
|
|
29
|
+
return {
|
|
30
|
+
id: modelName,
|
|
31
|
+
chatModel
|
|
32
|
+
};
|
|
33
|
+
}) ?? [];
|
|
34
|
+
models.forEach((model) => modelProvider.register(model));
|
|
35
|
+
}
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
exports.aiAssistantModuleModelProviderAzureAi = aiAssistantModuleModelProviderAzureAi;
|
|
41
|
+
//# sourceMappingURL=module.cjs.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"module.cjs.js","sources":["../src/module.ts"],"sourcesContent":["import {\n coreServices,\n createBackendModule,\n} from '@backstage/backend-plugin-api';\n\nimport {\n Model,\n modelProviderExtensionPoint,\n} from '@sweetoburrito/backstage-plugin-ai-assistant-node';\nimport { createChatModeForSdk } from './services/chat-model';\n\nexport const aiAssistantModuleModelProviderAzureAi = createBackendModule({\n pluginId: 'ai-assistant',\n moduleId: 'model-provider-azure-ai',\n register(reg) {\n reg.registerInit({\n deps: {\n config: coreServices.rootConfig,\n modelProvider: modelProviderExtensionPoint,\n },\n async init({ config, modelProvider }) {\n const azureConfig = config.getConfig('aiAssistant.models.azureAi');\n\n const apiKey = azureConfig.getString('apiKey');\n const modelConfigs = azureConfig.getOptionalConfigArray('models');\n\n const models: Model[] =\n modelConfigs?.map<Model>(modelConfig => {\n const endpoint = modelConfig.getString('endpoint');\n const modelName = modelConfig.getString('modelName');\n const sdk = modelConfig.getOptionalString('sdk') ?? 'openai';\n\n const chatModel = createChatModeForSdk(sdk, {\n apiKey,\n endpoint,\n modelName,\n });\n\n return {\n id: modelName,\n chatModel,\n };\n }) ?? [];\n\n models.forEach(model => modelProvider.register(model));\n },\n });\n },\n});\n"],"names":["createBackendModule","coreServices","modelProviderExtensionPoint","createChatModeForSdk"],"mappings":";;;;;;AAWO,MAAM,wCAAwCA,oCAAA,CAAoB;AAAA,EACvE,QAAA,EAAU,cAAA;AAAA,EACV,QAAA,EAAU,yBAAA;AAAA,EACV,SAAS,GAAA,EAAK;AACZ,IAAA,GAAA,CAAI,YAAA,CAAa;AAAA,MACf,IAAA,EAAM;AAAA,QACJ,QAAQC,6BAAA,CAAa,UAAA;AAAA,QACrB,aAAA,EAAeC;AAAA,OACjB;AAAA,MACA,MAAM,IAAA,CAAK,EAAE,MAAA,EAAQ,eAAc,EAAG;AACpC,QAAA,MAAM,WAAA,GAAc,MAAA,CAAO,SAAA,CAAU,4BAA4B,CAAA;AAEjE,QAAA,MAAM,MAAA,GAAS,WAAA,CAAY,SAAA,CAAU,QAAQ,CAAA;AAC7C,QAAA,MAAM,YAAA,GAAe,WAAA,CAAY,sBAAA,CAAuB,QAAQ,CAAA;AAEhE,QAAA,MAAM,MAAA,GACJ,YAAA,EAAc,GAAA,CAAW,CAAA,WAAA,KAAe;AACtC,UAAA,MAAM,QAAA,GAAW,WAAA,CAAY,SAAA,CAAU,UAAU,CAAA;AACjD,UAAA,MAAM,SAAA,GAAY,WAAA,CAAY,SAAA,CAAU,WAAW,CAAA;AACnD,UAAA,MAAM,GAAA,GAAM,WAAA,CAAY,iBAAA,CAAkB,KAAK,CAAA,IAAK,QAAA;AAEpD,UAAA,MAAM,SAAA,GAAYC,2BAAqB,GAAA,EAAK;AAAA,YAC1C,MAAA;AAAA,YACA,QAAA;AAAA,YACA;AAAA,WACD,CAAA;AAED,UAAA,OAAO;AAAA,YACL,EAAA,EAAI,SAAA;AAAA,YACJ;AAAA,WACF;AAAA,QACF,CAAC,KAAK,EAAC;AAET,QAAA,MAAA,CAAO,OAAA,CAAQ,CAAA,KAAA,KAAS,aAAA,CAAc,QAAA,CAAS,KAAK,CAAC,CAAA;AAAA,MACvD;AAAA,KACD,CAAA;AAAA,EACH;AACF,CAAC;;;;"}
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var chat_models = require('@langchain/core/language_models/chat_models');
|
|
4
|
+
var createClient = require('@azure-rest/ai-inference');
|
|
5
|
+
var coreAuth = require('@azure/core-auth');
|
|
6
|
+
var messages = require('@langchain/core/messages');
|
|
7
|
+
var stream = require('@langchain/core/utils/stream');
|
|
8
|
+
var outputs = require('@langchain/core/outputs');
|
|
9
|
+
var utils = require('../../utils.cjs.js');
|
|
10
|
+
var coreSse = require('@azure/core-sse');
|
|
11
|
+
|
|
12
|
+
function _interopDefaultCompat (e) { return e && typeof e === 'object' && 'default' in e ? e : { default: e }; }
|
|
13
|
+
|
|
14
|
+
var createClient__default = /*#__PURE__*/_interopDefaultCompat(createClient);
|
|
15
|
+
|
|
16
|
+
class AzureAiInferenceChatModel extends chat_models.BaseChatModel {
|
|
17
|
+
modelName;
|
|
18
|
+
endpoint;
|
|
19
|
+
apiKey;
|
|
20
|
+
client;
|
|
21
|
+
constructor({
|
|
22
|
+
modelName,
|
|
23
|
+
endpoint,
|
|
24
|
+
apiKey,
|
|
25
|
+
...rest
|
|
26
|
+
}) {
|
|
27
|
+
super(rest);
|
|
28
|
+
this.modelName = modelName;
|
|
29
|
+
this.endpoint = endpoint;
|
|
30
|
+
this.apiKey = apiKey;
|
|
31
|
+
this.client = createClient__default.default(endpoint, new coreAuth.AzureKeyCredential(apiKey));
|
|
32
|
+
}
|
|
33
|
+
_llmType() {
|
|
34
|
+
return "azure-ai-inference";
|
|
35
|
+
}
|
|
36
|
+
async *_streamResponseChunks(messages$1, _options, runManager) {
|
|
37
|
+
const aiInferenceMessages = utils.convertToAzureAiInferenceMessages(messages$1);
|
|
38
|
+
const response = await this.client.path("/chat/completions").post({
|
|
39
|
+
body: {
|
|
40
|
+
stream: true,
|
|
41
|
+
messages: aiInferenceMessages,
|
|
42
|
+
model: this.modelName
|
|
43
|
+
}
|
|
44
|
+
}).asNodeStream();
|
|
45
|
+
const stream = response.body;
|
|
46
|
+
if (!stream) {
|
|
47
|
+
throw new Error("Azure AI Inference response stream is undefined");
|
|
48
|
+
}
|
|
49
|
+
if (response.status !== "200") {
|
|
50
|
+
stream.destroy();
|
|
51
|
+
throw new Error(
|
|
52
|
+
`Failed to get chat completions. Operation failed with ${response.status} code.`
|
|
53
|
+
);
|
|
54
|
+
}
|
|
55
|
+
const sseStream = coreSse.createSseStream(stream);
|
|
56
|
+
for await (const event of sseStream) {
|
|
57
|
+
if (event.data === "[DONE]") {
|
|
58
|
+
return;
|
|
59
|
+
}
|
|
60
|
+
for (const choice of JSON.parse(event.data).choices) {
|
|
61
|
+
const token = choice.delta?.content ?? "";
|
|
62
|
+
const responseMessage = new messages.AIMessageChunk({
|
|
63
|
+
content: token
|
|
64
|
+
});
|
|
65
|
+
yield new outputs.ChatGenerationChunk({
|
|
66
|
+
text: token,
|
|
67
|
+
message: responseMessage
|
|
68
|
+
});
|
|
69
|
+
await runManager?.handleLLMNewToken(token);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
async _generate(messages$1, options, runManager) {
|
|
74
|
+
let finalChunk;
|
|
75
|
+
for await (const chunk of this._streamResponseChunks(
|
|
76
|
+
messages$1,
|
|
77
|
+
options,
|
|
78
|
+
runManager
|
|
79
|
+
)) {
|
|
80
|
+
if (!finalChunk) {
|
|
81
|
+
finalChunk = chunk.message;
|
|
82
|
+
} else {
|
|
83
|
+
finalChunk = stream.concat(finalChunk, chunk.message);
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
const nonChunkMessage = new messages.AIMessage({
|
|
87
|
+
id: finalChunk?.id,
|
|
88
|
+
content: finalChunk?.content ?? "",
|
|
89
|
+
tool_calls: finalChunk?.tool_calls,
|
|
90
|
+
response_metadata: finalChunk?.response_metadata,
|
|
91
|
+
usage_metadata: finalChunk?.usage_metadata
|
|
92
|
+
});
|
|
93
|
+
return {
|
|
94
|
+
generations: [
|
|
95
|
+
{
|
|
96
|
+
text: typeof nonChunkMessage.content === "string" ? nonChunkMessage.content : "",
|
|
97
|
+
message: nonChunkMessage
|
|
98
|
+
}
|
|
99
|
+
]
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
const createAzureAiInferenceChatModel = (options) => {
|
|
104
|
+
return new AzureAiInferenceChatModel(options);
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
exports.AzureAiInferenceChatModel = AzureAiInferenceChatModel;
|
|
108
|
+
exports.createAzureAiInferenceChatModel = createAzureAiInferenceChatModel;
|
|
109
|
+
//# sourceMappingURL=azure-ai-inference-chat-model.cjs.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"azure-ai-inference-chat-model.cjs.js","sources":["../../../src/services/chat-model/azure-ai-inference-chat-model.ts"],"sourcesContent":["import {\n BaseChatModel,\n BaseChatModelParams,\n} from '@langchain/core/language_models/chat_models';\nimport createClient, { ModelClient } from '@azure-rest/ai-inference';\nimport { AzureKeyCredential } from '@azure/core-auth';\nimport {\n AIMessage,\n AIMessageChunk,\n BaseMessage,\n} from '@langchain/core/messages';\nimport { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport { concat } from '@langchain/core/utils/stream';\nimport { ChatGenerationChunk, ChatResult } from '@langchain/core/outputs';\nimport { convertToAzureAiInferenceMessages } from '../../utils';\nimport { createSseStream } from '@azure/core-sse';\nimport { CreateChatModelFunction } from '../../types/chat-model';\n\nexport interface ChatAzureAiInferenceInputs extends BaseChatModelParams {\n modelName: string;\n endpoint: string;\n apiKey: string;\n}\n\nexport class AzureAiInferenceChatModel\n extends BaseChatModel\n implements ChatAzureAiInferenceInputs\n{\n modelName: string;\n endpoint: string;\n apiKey: string;\n private client: ModelClient;\n\n constructor({\n modelName,\n endpoint,\n apiKey,\n ...rest\n }: ChatAzureAiInferenceInputs) {\n super(rest);\n this.modelName = modelName;\n this.endpoint = endpoint;\n this.apiKey = apiKey;\n this.client = createClient(endpoint, new AzureKeyCredential(apiKey));\n }\n\n _llmType(): string {\n return 'azure-ai-inference';\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n _options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun,\n ): AsyncGenerator<ChatGenerationChunk> {\n const aiInferenceMessages = convertToAzureAiInferenceMessages(messages);\n\n const response = await this.client\n .path('/chat/completions')\n .post({\n body: {\n stream: true,\n messages: aiInferenceMessages,\n model: this.modelName,\n },\n })\n .asNodeStream();\n\n const stream = response.body;\n\n if (!stream) {\n throw new Error('Azure AI Inference response stream is undefined');\n }\n\n if (response.status !== '200') {\n stream.destroy();\n throw new Error(\n `Failed to get chat completions. Operation failed with ${response.status} code.`,\n );\n }\n\n const sseStream = createSseStream(stream);\n\n for await (const event of sseStream) {\n if (event.data === '[DONE]') {\n return;\n }\n\n for (const choice of JSON.parse(event.data).choices) {\n const token = choice.delta?.content ?? '';\n\n const responseMessage = new AIMessageChunk({\n content: token,\n });\n\n yield new ChatGenerationChunk({\n text: token,\n message: responseMessage,\n });\n await runManager?.handleLLMNewToken(token);\n }\n }\n }\n\n async _generate(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun,\n ): Promise<ChatResult> {\n let finalChunk: AIMessageChunk | undefined;\n for await (const chunk of this._streamResponseChunks(\n messages,\n options,\n runManager,\n )) {\n if (!finalChunk) {\n finalChunk = chunk.message;\n } else {\n finalChunk = concat(finalChunk, chunk.message);\n }\n }\n\n // Convert from AIMessageChunk to AIMessage since `generate` expects AIMessage.\n const nonChunkMessage = new AIMessage({\n id: finalChunk?.id,\n content: finalChunk?.content ?? '',\n tool_calls: finalChunk?.tool_calls,\n response_metadata: finalChunk?.response_metadata,\n usage_metadata: finalChunk?.usage_metadata,\n });\n return {\n generations: [\n {\n text:\n typeof nonChunkMessage.content === 'string'\n ? nonChunkMessage.content\n : '',\n message: nonChunkMessage,\n },\n ],\n };\n }\n}\n\nexport const createAzureAiInferenceChatModel: CreateChatModelFunction =\n options => {\n return new AzureAiInferenceChatModel(options);\n };\n"],"names":["BaseChatModel","createClient","AzureKeyCredential","messages","convertToAzureAiInferenceMessages","createSseStream","AIMessageChunk","ChatGenerationChunk","concat","AIMessage"],"mappings":";;;;;;;;;;;;;;;AAwBO,MAAM,kCACHA,yBAAA,CAEV;AAAA,EACE,SAAA;AAAA,EACA,QAAA;AAAA,EACA,MAAA;AAAA,EACQ,MAAA;AAAA,EAER,WAAA,CAAY;AAAA,IACV,SAAA;AAAA,IACA,QAAA;AAAA,IACA,MAAA;AAAA,IACA,GAAG;AAAA,GACL,EAA+B;AAC7B,IAAA,KAAA,CAAM,IAAI,CAAA;AACV,IAAA,IAAA,CAAK,SAAA,GAAY,SAAA;AACjB,IAAA,IAAA,CAAK,QAAA,GAAW,QAAA;AAChB,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,SAASC,6BAAA,CAAa,QAAA,EAAU,IAAIC,2BAAA,CAAmB,MAAM,CAAC,CAAA;AAAA,EACrE;AAAA,EAEA,QAAA,GAAmB;AACjB,IAAA,OAAO,oBAAA;AAAA,EACT;AAAA,EAEA,OAAO,qBAAA,CACLC,UAAA,EACA,QAAA,EACA,UAAA,EACqC;AACrC,IAAA,MAAM,mBAAA,GAAsBC,wCAAkCD,UAAQ,CAAA;AAEtE,IAAA,MAAM,WAAW,MAAM,IAAA,CAAK,OACzB,IAAA,CAAK,mBAAmB,EACxB,IAAA,CAAK;AAAA,MACJ,IAAA,EAAM;AAAA,QACJ,MAAA,EAAQ,IAAA;AAAA,QACR,QAAA,EAAU,mBAAA;AAAA,QACV,OAAO,IAAA,CAAK;AAAA;AACd,KACD,EACA,YAAA,EAAa;AAEhB,IAAA,MAAM,SAAS,QAAA,CAAS,IAAA;AAExB,IAAA,IAAI,CAAC,MAAA,EAAQ;AACX,MAAA,MAAM,IAAI,MAAM,iDAAiD,CAAA;AAAA,IACnE;AAEA,IAAA,IAAI,QAAA,CAAS,WAAW,KAAA,EAAO;AAC7B,MAAA,MAAA,CAAO,OAAA,EAAQ;AACf,MAAA,MAAM,IAAI,KAAA;AAAA,QACR,CAAA,sDAAA,EAAyD,SAAS,MAAM,CAAA,MAAA;AAAA,OAC1E;AAAA,IACF;AAEA,IAAA,MAAM,SAAA,GAAYE,wBAAgB,MAAM,CAAA;AAExC,IAAA,WAAA,MAAiB,SAAS,SAAA,EAAW;AACnC,MAAA,IAAI,KAAA,CAAM,SAAS,QAAA,EAAU;AAC3B,QAAA;AAAA,MACF;AAEA,MAAA,KAAA,MAAW,UAAU,IAAA,CAAK,KAAA,CAAM,KAAA,CAAM,IAAI,EAAE,OAAA,EAAS;AACnD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,EAAO,OAAA,IAAW,EAAA;AAEvC,QAAA,MAAM,eAAA,GAAkB,IAAIC,uBAAA,CAAe;AAAA,UACzC,OAAA,EAAS;AAAA,SACV,CAAA;AAED,QAAA,MAAM,IAAIC,2BAAA,CAAoB;AAAA,UAC5B,IAAA,EAAM,KAAA;AAAA,UACN,OAAA,EAAS;AAAA,SACV,CAAA;AACD,QAAA,MAAM,UAAA,EAAY,kBAAkB,KAAK,CAAA;AAAA,MAC3C;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,SAAA,CACJJ,UAAA,EACA,OAAA,EACA,UAAA,EACqB;AACrB,IAAA,IAAI,UAAA;AACJ,IAAA,WAAA,MAAiB,SAAS,IAAA,CAAK,qBAAA;AAAA,MAC7BA,UAAA;AAAA,MACA,OAAA;AAAA,MACA;AAAA,KACF,EAAG;AACD,MAAA,IAAI,CAAC,UAAA,EAAY;AACf,QAAA,UAAA,GAAa,KAAA,CAAM,OAAA;AAAA,MACrB,CAAA,MAAO;AACL,QAAA,UAAA,GAAaK,aAAA,CAAO,UAAA,EAAY,KAAA,CAAM,OAAO,CAAA;AAAA,MAC/C;AAAA,IACF;AAGA,IAAA,MAAM,eAAA,GAAkB,IAAIC,kBAAA,CAAU;AAAA,MACpC,IAAI,UAAA,EAAY,EAAA;AAAA,MAChB,OAAA,EAAS,YAAY,OAAA,IAAW,EAAA;AAAA,MAChC,YAAY,UAAA,EAAY,UAAA;AAAA,MACxB,mBAAmB,UAAA,EAAY,iBAAA;AAAA,MAC/B,gBAAgB,UAAA,EAAY;AAAA,KAC7B,CAAA;AACD,IAAA,OAAO;AAAA,MACL,WAAA,EAAa;AAAA,QACX;AAAA,UACE,MACE,OAAO,eAAA,CAAgB,OAAA,KAAY,QAAA,GAC/B,gBAAgB,OAAA,GAChB,EAAA;AAAA,UACN,OAAA,EAAS;AAAA;AACX;AACF,KACF;AAAA,EACF;AACF;AAEO,MAAM,kCACX,CAAA,OAAA,KAAW;AACT,EAAA,OAAO,IAAI,0BAA0B,OAAO,CAAA;AAC9C;;;;;"}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var azureAiInferenceChatModel = require('./azure-ai-inference-chat-model.cjs.js');
|
|
4
|
+
var openAi = require('./open-ai.cjs.js');
|
|
5
|
+
|
|
6
|
+
const chatModels = {
|
|
7
|
+
openai: openAi.createOpenAiChatModel,
|
|
8
|
+
azureAiInference: azureAiInferenceChatModel.createAzureAiInferenceChatModel
|
|
9
|
+
};
|
|
10
|
+
const createChatModeForSdk = (sdk, options) => {
|
|
11
|
+
if (!(sdk in chatModels)) {
|
|
12
|
+
throw new Error(`Unsupported SDK type: ${sdk}`);
|
|
13
|
+
}
|
|
14
|
+
return chatModels[sdk](options);
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
exports.createChatModeForSdk = createChatModeForSdk;
|
|
18
|
+
//# sourceMappingURL=index.cjs.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs.js","sources":["../../../src/services/chat-model/index.ts"],"sourcesContent":["import { createAzureAiInferenceChatModel } from './azure-ai-inference-chat-model';\nimport {\n CreateChatModelFunction,\n SdkType,\n CreateChatModelFunctionOptions,\n} from '../../types/chat-model';\nimport { createOpenAiChatModel } from './open-ai';\n\nconst chatModels: Record<SdkType, CreateChatModelFunction> = {\n openai: createOpenAiChatModel,\n azureAiInference: createAzureAiInferenceChatModel,\n};\n\nexport const createChatModeForSdk = (\n sdk: string,\n options: CreateChatModelFunctionOptions,\n) => {\n if (!(sdk in chatModels)) {\n throw new Error(`Unsupported SDK type: ${sdk}`);\n }\n\n return chatModels[sdk as SdkType](options);\n};\n"],"names":["createOpenAiChatModel","createAzureAiInferenceChatModel"],"mappings":";;;;;AAQA,MAAM,UAAA,GAAuD;AAAA,EAC3D,MAAA,EAAQA,4BAAA;AAAA,EACR,gBAAA,EAAkBC;AACpB,CAAA;AAEO,MAAM,oBAAA,GAAuB,CAClC,GAAA,EACA,OAAA,KACG;AACH,EAAA,IAAI,EAAE,OAAO,UAAA,CAAA,EAAa;AACxB,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,sBAAA,EAAyB,GAAG,CAAA,CAAE,CAAA;AAAA,EAChD;AAEA,EAAA,OAAO,UAAA,CAAW,GAAc,CAAA,CAAE,OAAO,CAAA;AAC3C;;;;"}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var openai = require('@langchain/openai');
|
|
4
|
+
|
|
5
|
+
const createOpenAiChatModel = (options) => {
|
|
6
|
+
return new openai.ChatOpenAI({
|
|
7
|
+
configuration: {
|
|
8
|
+
apiKey: options.apiKey,
|
|
9
|
+
baseURL: options.endpoint
|
|
10
|
+
},
|
|
11
|
+
modelName: options.modelName
|
|
12
|
+
});
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
exports.createOpenAiChatModel = createOpenAiChatModel;
|
|
16
|
+
//# sourceMappingURL=open-ai.cjs.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"open-ai.cjs.js","sources":["../../../src/services/chat-model/open-ai.ts"],"sourcesContent":["import { CreateChatModelFunction } from '../../types/chat-model';\nimport { ChatOpenAI } from '@langchain/openai';\n\nexport const createOpenAiChatModel: CreateChatModelFunction = options => {\n return new ChatOpenAI({\n configuration: {\n apiKey: options.apiKey,\n baseURL: options.endpoint,\n },\n modelName: options.modelName,\n });\n};\n"],"names":["ChatOpenAI"],"mappings":";;;;AAGO,MAAM,wBAAiD,CAAA,OAAA,KAAW;AACvE,EAAA,OAAO,IAAIA,iBAAA,CAAW;AAAA,IACpB,aAAA,EAAe;AAAA,MACb,QAAQ,OAAA,CAAQ,MAAA;AAAA,MAChB,SAAS,OAAA,CAAQ;AAAA,KACnB;AAAA,IACA,WAAW,OAAA,CAAQ;AAAA,GACpB,CAAA;AACH;;;;"}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const getMessageType = (message) => {
|
|
4
|
+
const type = message.getType();
|
|
5
|
+
if (type === "human") {
|
|
6
|
+
return {
|
|
7
|
+
role: "user",
|
|
8
|
+
content: message.content
|
|
9
|
+
};
|
|
10
|
+
}
|
|
11
|
+
if (type === "ai") {
|
|
12
|
+
return {
|
|
13
|
+
role: "assistant",
|
|
14
|
+
content: message.content
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
if (type === "system") {
|
|
18
|
+
return {
|
|
19
|
+
role: "system",
|
|
20
|
+
content: message.content
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
throw new Error(`Unsupported message type: ${type}`);
|
|
24
|
+
};
|
|
25
|
+
const convertToAzureAiInferenceMessages = (messages) => {
|
|
26
|
+
return messages.map(getMessageType);
|
|
27
|
+
};
|
|
28
|
+
|
|
29
|
+
exports.convertToAzureAiInferenceMessages = convertToAzureAiInferenceMessages;
|
|
30
|
+
//# sourceMappingURL=utils.cjs.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"utils.cjs.js","sources":["../src/utils.ts"],"sourcesContent":["import { BaseMessage } from '@langchain/core/messages';\nimport {\n ChatRequestMessage,\n ChatRequestUserMessage,\n ChatRequestAssistantMessage,\n ChatRequestSystemMessage,\n} from '@azure-rest/ai-inference';\n\nconst getMessageType = (message: BaseMessage): ChatRequestMessage => {\n const type = message.getType();\n\n if (type === 'human') {\n return {\n role: 'user',\n content: message.content,\n } as ChatRequestUserMessage;\n }\n\n if (type === 'ai') {\n return {\n role: 'assistant',\n content: message.content,\n } as ChatRequestAssistantMessage;\n }\n\n if (type === 'system') {\n return {\n role: 'system',\n content: message.content,\n } as ChatRequestSystemMessage;\n }\n\n throw new Error(`Unsupported message type: ${type}`);\n};\n\nexport const convertToAzureAiInferenceMessages = (\n messages: BaseMessage[],\n): ChatRequestMessage[] => {\n return messages.map(getMessageType);\n};\n"],"names":[],"mappings":";;AAQA,MAAM,cAAA,GAAiB,CAAC,OAAA,KAA6C;AACnE,EAAA,MAAM,IAAA,GAAO,QAAQ,OAAA,EAAQ;AAE7B,EAAA,IAAI,SAAS,OAAA,EAAS;AACpB,IAAA,OAAO;AAAA,MACL,IAAA,EAAM,MAAA;AAAA,MACN,SAAS,OAAA,CAAQ;AAAA,KACnB;AAAA,EACF;AAEA,EAAA,IAAI,SAAS,IAAA,EAAM;AACjB,IAAA,OAAO;AAAA,MACL,IAAA,EAAM,WAAA;AAAA,MACN,SAAS,OAAA,CAAQ;AAAA,KACnB;AAAA,EACF;AAEA,EAAA,IAAI,SAAS,QAAA,EAAU;AACrB,IAAA,OAAO;AAAA,MACL,IAAA,EAAM,QAAA;AAAA,MACN,SAAS,OAAA,CAAQ;AAAA,KACnB;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,KAAA,CAAM,CAAA,0BAAA,EAA6B,IAAI,CAAA,CAAE,CAAA;AACrD,CAAA;AAEO,MAAM,iCAAA,GAAoC,CAC/C,QAAA,KACyB;AACzB,EAAA,OAAO,QAAA,CAAS,IAAI,cAAc,CAAA;AACpC;;;;"}
|
package/package.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-azure-ai",
|
|
3
|
+
"version": "0.0.0-snapshot-20251029150521",
|
|
4
|
+
"license": "Apache-2.0",
|
|
5
|
+
"description": "The model-provider-azure-ai backend module for the ai-assistant plugin.",
|
|
6
|
+
"main": "dist/index.cjs.js",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"publishConfig": {
|
|
9
|
+
"access": "public",
|
|
10
|
+
"main": "dist/index.cjs.js",
|
|
11
|
+
"types": "dist/index.d.ts"
|
|
12
|
+
},
|
|
13
|
+
"backstage": {
|
|
14
|
+
"role": "backend-plugin-module",
|
|
15
|
+
"pluginId": "ai-assistant",
|
|
16
|
+
"pluginPackage": "@sweetoburrito/backstage-plugin-ai-assistant-backend",
|
|
17
|
+
"features": {
|
|
18
|
+
".": "@backstage/BackendFeature"
|
|
19
|
+
}
|
|
20
|
+
},
|
|
21
|
+
"scripts": {
|
|
22
|
+
"start": "backstage-cli package start",
|
|
23
|
+
"build": "backstage-cli package build",
|
|
24
|
+
"lint": "backstage-cli package lint",
|
|
25
|
+
"test": "backstage-cli package test",
|
|
26
|
+
"clean": "backstage-cli package clean",
|
|
27
|
+
"prepack": "backstage-cli package prepack",
|
|
28
|
+
"postpack": "backstage-cli package postpack"
|
|
29
|
+
},
|
|
30
|
+
"dependencies": {
|
|
31
|
+
"@azure-rest/ai-inference": "^1.0.0-beta.6",
|
|
32
|
+
"@azure/core-auth": "^1.10.0",
|
|
33
|
+
"@azure/core-sse": "^2.3.0",
|
|
34
|
+
"@backstage/backend-plugin-api": "backstage:^",
|
|
35
|
+
"@langchain/core": "^0.3.72",
|
|
36
|
+
"@langchain/openai": "^0.6.13",
|
|
37
|
+
"@sweetoburrito/backstage-plugin-ai-assistant-node": "0.0.0-snapshot-20251029150521"
|
|
38
|
+
},
|
|
39
|
+
"devDependencies": {
|
|
40
|
+
"@backstage/backend-test-utils": "backstage:^",
|
|
41
|
+
"@backstage/cli": "backstage:^"
|
|
42
|
+
},
|
|
43
|
+
"files": [
|
|
44
|
+
"dist",
|
|
45
|
+
"config.d.ts"
|
|
46
|
+
],
|
|
47
|
+
"configSchema": "config.d.ts",
|
|
48
|
+
"typesVersions": {
|
|
49
|
+
"*": {
|
|
50
|
+
"package.json": [
|
|
51
|
+
"package.json"
|
|
52
|
+
]
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|