@sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-ollama 0.0.0-snapshot-20251029150521

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,65 @@
1
+ # @sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-ollama
2
+
3
+ Ollama model-provider backend module for the Backstage AI Assistant plugin.
4
+
5
+ This module lets the AI Assistant backend call local or remote Ollama-hosted models through a configuration-driven provider so the rest of the plugin remains model-agnostic.
6
+
7
+ Key features
8
+
9
+ - Connect Backstage AI Assistant to Ollama models (local Ollama server or remote Ollama Cloud endpoints).
10
+ - Config-driven: supply base URL, API key (if used), and the list of models in your Backstage config.
11
+ - Transparent to the rest of the ai-assistant plugin — swap providers via configuration.
12
+
13
+ When to use
14
+
15
+ Use this module when you want to host models with Ollama (for example, running LLMs locally with the Ollama server or calling Ollama Cloud) and surface them to the Backstage AI Assistant plugin.
16
+
17
+ Configuration
18
+
19
+ Add the provider configuration to your Backstage `app-config.yaml` (or `app-config.local.yaml`). The module expects an `aiAssistant.models.ollama` section with the server base URL, an optional API key, and an array of model names you want to make available.
20
+
21
+ Example configuration:
22
+
23
+ ```yaml
24
+ aiAssistant:
25
+ models:
26
+ ollama:
27
+ baseUrl: http://localhost:11434
28
+ apiKey: ${OLLAMA_API_KEY} # optional, set when your Ollama server or Cloud requires auth
29
+ models:
30
+ - llama2
31
+ - ggml-vicuna
32
+ ```
33
+
34
+ Notes on fields
35
+
36
+ - `baseUrl` — the HTTP endpoint for your Ollama server. Use `http://localhost:11434` for a local Ollama server, or the provided URL for Ollama Cloud. If you use ollama through openwebui the base url is <http://youropenwebuiurl:port/ollama>
37
+ - `apiKey` — optional; include it when your Ollama deployment uses API keys. Marked secret in the module's `config.d.ts`.
38
+ - `models` — a list of model identifiers that the provider will expose to the ai-assistant backend.
39
+
40
+ Install
41
+
42
+ Install the module into your Backstage backend workspace with the following command:
43
+
44
+ ```sh
45
+ yarn workspace backend add @sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-ollama
46
+ ```
47
+
48
+ Wiring into the backend
49
+
50
+ Add the module to your Backstage backend in `packages/backend/src/index.ts` (or equivalent) so the AI Assistant backend can discover and use it:
51
+
52
+ ```diff
53
+ // packages/backend/src/index.ts
54
+
55
+ backend.add(import('@backstage/plugin-events-backend'));
56
+ backend.add(import('@backstage/plugin-signals-backend'));
57
+
58
+ backend.add(import('@sweetoburrito/backstage-plugin-ai-assistant-backend'));
59
+
60
+ +backend.add(
61
+ + import(
62
+ + '@sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-ollama'
63
+ + ),
64
+ +);
65
+ ```
package/config.d.ts ADDED
@@ -0,0 +1,14 @@
1
+ export interface Config {
2
+ aiAssistant: {
3
+ models: {
4
+ ollama: {
5
+ baseUrl: string;
6
+ /**
7
+ * @visibility secret
8
+ */
9
+ apiKey: string;
10
+ models: string[];
11
+ };
12
+ };
13
+ };
14
+ }
@@ -0,0 +1,10 @@
1
+ 'use strict';
2
+
3
+ Object.defineProperty(exports, '__esModule', { value: true });
4
+
5
+ var module$1 = require('./module.cjs.js');
6
+
7
+
8
+
9
+ exports.default = module$1.aiAssistantModuleModelProviderOllama;
10
+ //# sourceMappingURL=index.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.cjs.js","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;"}
@@ -0,0 +1,5 @@
1
+ import * as _backstage_backend_plugin_api from '@backstage/backend-plugin-api';
2
+
3
+ declare const aiAssistantModuleModelProviderOllama: _backstage_backend_plugin_api.BackendFeature;
4
+
5
+ export { aiAssistantModuleModelProviderOllama as default };
@@ -0,0 +1,40 @@
1
+ 'use strict';
2
+
3
+ var backendPluginApi = require('@backstage/backend-plugin-api');
4
+ var ollama = require('@langchain/ollama');
5
+ var backstagePluginAiAssistantNode = require('@sweetoburrito/backstage-plugin-ai-assistant-node');
6
+
7
+ const aiAssistantModuleModelProviderOllama = backendPluginApi.createBackendModule({
8
+ pluginId: "ai-assistant",
9
+ moduleId: "model-provider-ollama",
10
+ register(reg) {
11
+ reg.registerInit({
12
+ deps: {
13
+ config: backendPluginApi.coreServices.rootConfig,
14
+ modelProvider: backstagePluginAiAssistantNode.modelProviderExtensionPoint
15
+ },
16
+ async init({ config, modelProvider }) {
17
+ const ollamaConfig = config.getConfig("aiAssistant.models.ollama");
18
+ const baseUrl = ollamaConfig.getString("baseUrl");
19
+ const apiKey = ollamaConfig.getString("apiKey");
20
+ const modelIds = ollamaConfig.getStringArray("models");
21
+ const models = modelIds.map((modelId) => {
22
+ return {
23
+ id: modelId,
24
+ chatModel: new ollama.ChatOllama({
25
+ baseUrl,
26
+ model: modelId,
27
+ headers: {
28
+ Authorization: `Bearer ${apiKey}`
29
+ }
30
+ })
31
+ };
32
+ });
33
+ models.forEach((model) => modelProvider.register(model));
34
+ }
35
+ });
36
+ }
37
+ });
38
+
39
+ exports.aiAssistantModuleModelProviderOllama = aiAssistantModuleModelProviderOllama;
40
+ //# sourceMappingURL=module.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"module.cjs.js","sources":["../src/module.ts"],"sourcesContent":["import {\n coreServices,\n createBackendModule,\n} from '@backstage/backend-plugin-api';\nimport { ChatOllama } from '@langchain/ollama';\nimport {\n Model,\n modelProviderExtensionPoint,\n} from '@sweetoburrito/backstage-plugin-ai-assistant-node';\n\nexport const aiAssistantModuleModelProviderOllama = createBackendModule({\n pluginId: 'ai-assistant',\n moduleId: 'model-provider-ollama',\n register(reg) {\n reg.registerInit({\n deps: {\n config: coreServices.rootConfig,\n modelProvider: modelProviderExtensionPoint,\n },\n async init({ config, modelProvider }) {\n const ollamaConfig = config.getConfig('aiAssistant.models.ollama');\n\n const baseUrl = ollamaConfig.getString('baseUrl');\n const apiKey = ollamaConfig.getString('apiKey');\n const modelIds = ollamaConfig.getStringArray('models');\n\n const models: Model[] = modelIds.map(modelId => {\n return {\n id: modelId,\n chatModel: new ChatOllama({\n baseUrl,\n model: modelId,\n headers: {\n Authorization: `Bearer ${apiKey}`,\n },\n }),\n };\n });\n\n models.forEach(model => modelProvider.register(model));\n },\n });\n },\n});\n"],"names":["createBackendModule","coreServices","modelProviderExtensionPoint","ChatOllama"],"mappings":";;;;;;AAUO,MAAM,uCAAuCA,oCAAA,CAAoB;AAAA,EACtE,QAAA,EAAU,cAAA;AAAA,EACV,QAAA,EAAU,uBAAA;AAAA,EACV,SAAS,GAAA,EAAK;AACZ,IAAA,GAAA,CAAI,YAAA,CAAa;AAAA,MACf,IAAA,EAAM;AAAA,QACJ,QAAQC,6BAAA,CAAa,UAAA;AAAA,QACrB,aAAA,EAAeC;AAAA,OACjB;AAAA,MACA,MAAM,IAAA,CAAK,EAAE,MAAA,EAAQ,eAAc,EAAG;AACpC,QAAA,MAAM,YAAA,GAAe,MAAA,CAAO,SAAA,CAAU,2BAA2B,CAAA;AAEjE,QAAA,MAAM,OAAA,GAAU,YAAA,CAAa,SAAA,CAAU,SAAS,CAAA;AAChD,QAAA,MAAM,MAAA,GAAS,YAAA,CAAa,SAAA,CAAU,QAAQ,CAAA;AAC9C,QAAA,MAAM,QAAA,GAAW,YAAA,CAAa,cAAA,CAAe,QAAQ,CAAA;AAErD,QAAA,MAAM,MAAA,GAAkB,QAAA,CAAS,GAAA,CAAI,CAAA,OAAA,KAAW;AAC9C,UAAA,OAAO;AAAA,YACL,EAAA,EAAI,OAAA;AAAA,YACJ,SAAA,EAAW,IAAIC,iBAAA,CAAW;AAAA,cACxB,OAAA;AAAA,cACA,KAAA,EAAO,OAAA;AAAA,cACP,OAAA,EAAS;AAAA,gBACP,aAAA,EAAe,UAAU,MAAM,CAAA;AAAA;AACjC,aACD;AAAA,WACH;AAAA,QACF,CAAC,CAAA;AAED,QAAA,MAAA,CAAO,OAAA,CAAQ,CAAA,KAAA,KAAS,aAAA,CAAc,QAAA,CAAS,KAAK,CAAC,CAAA;AAAA,MACvD;AAAA,KACD,CAAA;AAAA,EACH;AACF,CAAC;;;;"}
package/package.json ADDED
@@ -0,0 +1,51 @@
1
+ {
2
+ "name": "@sweetoburrito/backstage-plugin-ai-assistant-backend-module-model-provider-ollama",
3
+ "version": "0.0.0-snapshot-20251029150521",
4
+ "license": "Apache-2.0",
5
+ "description": "The model-provider-ollama backend module for the ai-assistant plugin.",
6
+ "main": "dist/index.cjs.js",
7
+ "types": "dist/index.d.ts",
8
+ "publishConfig": {
9
+ "access": "public",
10
+ "main": "dist/index.cjs.js",
11
+ "types": "dist/index.d.ts"
12
+ },
13
+ "backstage": {
14
+ "role": "backend-plugin-module",
15
+ "pluginId": "ai-assistant",
16
+ "pluginPackage": "@sweetoburrito/backstage-plugin-ai-assistant-backend",
17
+ "features": {
18
+ ".": "@backstage/BackendFeature"
19
+ }
20
+ },
21
+ "scripts": {
22
+ "start": "backstage-cli package start",
23
+ "build": "backstage-cli package build",
24
+ "lint": "backstage-cli package lint",
25
+ "test": "backstage-cli package test",
26
+ "clean": "backstage-cli package clean",
27
+ "prepack": "backstage-cli package prepack",
28
+ "postpack": "backstage-cli package postpack"
29
+ },
30
+ "dependencies": {
31
+ "@backstage/backend-plugin-api": "backstage:^",
32
+ "@langchain/ollama": "^0.2.3",
33
+ "@sweetoburrito/backstage-plugin-ai-assistant-node": "0.0.0-snapshot-20251029150521"
34
+ },
35
+ "devDependencies": {
36
+ "@backstage/backend-test-utils": "backstage:^",
37
+ "@backstage/cli": "backstage:^"
38
+ },
39
+ "files": [
40
+ "dist",
41
+ "config.d.ts"
42
+ ],
43
+ "configSchema": "config.d.ts",
44
+ "typesVersions": {
45
+ "*": {
46
+ "package.json": [
47
+ "package.json"
48
+ ]
49
+ }
50
+ }
51
+ }