@sweetoburrito/backstage-plugin-ai-assistant-backend-module-embeddings-provider-ollama 0.0.0-snapshot-20251029150521

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,77 @@
1
+ # @sweetoburrito/backstage-plugin-ai-assistant-backend-module-embeddings-provider-ollama
2
+
3
+ An embeddings provider module that lets the Backstage AI Assistant backend create vector
4
+ embeddings using Ollama-hosted models (local Ollama server or Ollama Cloud).
5
+
6
+ This README explains how the provider works, when to use it, configuration options, and how
7
+ to wire it into your Backstage backend.
8
+
9
+ ## Features
10
+
11
+ - Convert text or documents to numeric vector embeddings using an Ollama model.
12
+ - Exposes a provider implementation compatible with the AI Assistant backend so different
13
+ embeddings services can be swapped without changing the rest of the app.
14
+ - Minimal configuration for local or remote Ollama endpoints and optional API key support.
15
+
16
+ ## When to use
17
+
18
+ Use this module when you run an Ollama embeddings-capable model and want the AI Assistant to
19
+ build semantic search indices, vector stores, or provide retrieval-augmented generation (RAG)
20
+ capabilities in Backstage. It's a good fit for local development with Ollama or when using an
21
+ Ollama-hosted endpoint.
22
+
23
+ ## Configuration
24
+
25
+ Add the provider configuration to your Backstage `app-config.yaml` or `app-config.local.yaml`
26
+ under `aiAssistant.embeddings.ollama`.
27
+
28
+ Minimum configuration keys (example):
29
+
30
+ ```yaml
31
+ aiAssistant:
32
+ embeddings:
33
+ ollama:
34
+ baseUrl: 'http://localhost:11434'
35
+ model: 'text-embedding-3-small'
36
+ apiKey: ${OLLAMA_API_KEY}
37
+ ```
38
+
39
+ Field descriptions:
40
+
41
+ - `baseUrl` - The base URL of your Ollama service. For a local Ollama server this is typically
42
+ `http://localhost:11434`. For Ollama Cloud or a proxied endpoint, use the full base URL. For ollama with webui you must set the base url to the `/ollama` route. i.e `http://localhost:11434/ollama`
43
+ - `model` - The name of the model to use for generating embeddings. The model must support
44
+ embeddings (check your Ollama model documentation for supported capabilities).
45
+ - `apiKey` - (Optional) An API key for Ollama Cloud or any endpoint that requires authentication.
46
+ Mark this value as secret in Backstage configuration when applicable.
47
+
48
+ The exact keys available and required depend on your Ollama setup. Check the provider's
49
+ `config.d.ts` in the package for the canonical types used by the module.
50
+
51
+ ## Install
52
+
53
+ Install the module into your Backstage backend workspace:
54
+
55
+ ```sh
56
+ yarn workspace backend add @sweetoburrito/backstage-plugin-ai-assistant-backend-module-embeddings-provider-ollama
57
+ ```
58
+
59
+ ## Wire the provider into your backend
60
+
61
+ Add the provider module import to your backend entrypoint (usually `packages/backend/src/index.ts`):
62
+
63
+ ```diff
64
+ // packages/backend/src/index.ts
65
+
66
+ // other backend modules...
67
+ backend.add(import('@sweetoburrito/backstage-plugin-ai-assistant-backend'));
68
+
69
+ // Add the Ollama embeddings provider
70
+ ++backend.add(
71
+ ++ import(
72
+ ++ '@sweetoburrito/backstage-plugin-ai-assistant-backend-module-embeddings-provider-ollama'
73
+ ++ ),
74
+ ++);
75
+ ```
76
+
77
+ Restart your backend after adding the provider so it registers with the AI Assistant plugin.
package/config.d.ts ADDED
@@ -0,0 +1,14 @@
1
+ export interface Config {
2
+ aiAssistant: {
3
+ embeddings: {
4
+ ollama: {
5
+ baseUrl: string;
6
+ model: string;
7
+ /**
8
+ * @visibility secret
9
+ */
10
+ apiKey: string;
11
+ };
12
+ };
13
+ };
14
+ }
@@ -0,0 +1,10 @@
1
+ 'use strict';
2
+
3
+ Object.defineProperty(exports, '__esModule', { value: true });
4
+
5
+ var module$1 = require('./module.cjs.js');
6
+
7
+
8
+
9
+ exports.default = module$1.aiAssistantModuleEmbeddingsProviderOllama;
10
+ //# sourceMappingURL=index.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.cjs.js","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;"}
@@ -0,0 +1,5 @@
1
+ import * as _backstage_backend_plugin_api from '@backstage/backend-plugin-api';
2
+
3
+ declare const aiAssistantModuleEmbeddingsProviderOllama: _backstage_backend_plugin_api.BackendFeature;
4
+
5
+ export { aiAssistantModuleEmbeddingsProviderOllama as default };
@@ -0,0 +1,39 @@
1
+ 'use strict';
2
+
3
+ var backendPluginApi = require('@backstage/backend-plugin-api');
4
+ var backstagePluginAiAssistantNode = require('@sweetoburrito/backstage-plugin-ai-assistant-node');
5
+ var ollama = require('@langchain/ollama');
6
+
7
+ const aiAssistantModuleEmbeddingsProviderOllama = backendPluginApi.createBackendModule({
8
+ pluginId: "ai-assistant",
9
+ moduleId: "embeddings-provider-ollama",
10
+ register(reg) {
11
+ reg.registerInit({
12
+ deps: {
13
+ logger: backendPluginApi.coreServices.logger,
14
+ config: backendPluginApi.coreServices.rootConfig,
15
+ embeddingsProvider: backstagePluginAiAssistantNode.embeddingsProviderExtensionPoint
16
+ },
17
+ async init({ embeddingsProvider, config }) {
18
+ const model = config.getString("aiAssistant.embeddings.ollama.model");
19
+ const apiKey = config.getString("aiAssistant.embeddings.ollama.apiKey");
20
+ const baseUrl = config.getString(
21
+ "aiAssistant.embeddings.ollama.baseUrl"
22
+ );
23
+ const embeddings = new ollama.OllamaEmbeddings({
24
+ baseUrl,
25
+ model,
26
+ headers: {
27
+ Authorization: `Bearer ${apiKey}`
28
+ }
29
+ });
30
+ return embeddingsProvider.register({
31
+ getEmbeddings: async () => embeddings
32
+ });
33
+ }
34
+ });
35
+ }
36
+ });
37
+
38
+ exports.aiAssistantModuleEmbeddingsProviderOllama = aiAssistantModuleEmbeddingsProviderOllama;
39
+ //# sourceMappingURL=module.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"module.cjs.js","sources":["../src/module.ts"],"sourcesContent":["import {\n coreServices,\n createBackendModule,\n} from '@backstage/backend-plugin-api';\nimport { embeddingsProviderExtensionPoint } from '@sweetoburrito/backstage-plugin-ai-assistant-node';\nimport { OllamaEmbeddings } from '@langchain/ollama';\n\nexport const aiAssistantModuleEmbeddingsProviderOllama = createBackendModule({\n pluginId: 'ai-assistant',\n moduleId: 'embeddings-provider-ollama',\n register(reg) {\n reg.registerInit({\n deps: {\n logger: coreServices.logger,\n config: coreServices.rootConfig,\n embeddingsProvider: embeddingsProviderExtensionPoint,\n },\n async init({ embeddingsProvider, config }) {\n const model = config.getString('aiAssistant.embeddings.ollama.model');\n const apiKey = config.getString('aiAssistant.embeddings.ollama.apiKey');\n const baseUrl = config.getString(\n 'aiAssistant.embeddings.ollama.baseUrl',\n );\n\n const embeddings = new OllamaEmbeddings({\n baseUrl,\n model,\n headers: {\n Authorization: `Bearer ${apiKey}`,\n },\n });\n\n return embeddingsProvider.register({\n getEmbeddings: async () => embeddings,\n });\n },\n });\n },\n});\n"],"names":["createBackendModule","coreServices","embeddingsProviderExtensionPoint","OllamaEmbeddings"],"mappings":";;;;;;AAOO,MAAM,4CAA4CA,oCAAA,CAAoB;AAAA,EAC3E,QAAA,EAAU,cAAA;AAAA,EACV,QAAA,EAAU,4BAAA;AAAA,EACV,SAAS,GAAA,EAAK;AACZ,IAAA,GAAA,CAAI,YAAA,CAAa;AAAA,MACf,IAAA,EAAM;AAAA,QACJ,QAAQC,6BAAA,CAAa,MAAA;AAAA,QACrB,QAAQA,6BAAA,CAAa,UAAA;AAAA,QACrB,kBAAA,EAAoBC;AAAA,OACtB;AAAA,MACA,MAAM,IAAA,CAAK,EAAE,kBAAA,EAAoB,QAAO,EAAG;AACzC,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,SAAA,CAAU,qCAAqC,CAAA;AACpE,QAAA,MAAM,MAAA,GAAS,MAAA,CAAO,SAAA,CAAU,sCAAsC,CAAA;AACtE,QAAA,MAAM,UAAU,MAAA,CAAO,SAAA;AAAA,UACrB;AAAA,SACF;AAEA,QAAA,MAAM,UAAA,GAAa,IAAIC,uBAAA,CAAiB;AAAA,UACtC,OAAA;AAAA,UACA,KAAA;AAAA,UACA,OAAA,EAAS;AAAA,YACP,aAAA,EAAe,UAAU,MAAM,CAAA;AAAA;AACjC,SACD,CAAA;AAED,QAAA,OAAO,mBAAmB,QAAA,CAAS;AAAA,UACjC,eAAe,YAAY;AAAA,SAC5B,CAAA;AAAA,MACH;AAAA,KACD,CAAA;AAAA,EACH;AACF,CAAC;;;;"}
package/package.json ADDED
@@ -0,0 +1,51 @@
1
+ {
2
+ "name": "@sweetoburrito/backstage-plugin-ai-assistant-backend-module-embeddings-provider-ollama",
3
+ "version": "0.0.0-snapshot-20251029150521",
4
+ "license": "Apache-2.0",
5
+ "description": "The embeddings-provider-ollama backend module for the ai-assistant plugin.",
6
+ "main": "dist/index.cjs.js",
7
+ "types": "dist/index.d.ts",
8
+ "publishConfig": {
9
+ "access": "public",
10
+ "main": "dist/index.cjs.js",
11
+ "types": "dist/index.d.ts"
12
+ },
13
+ "backstage": {
14
+ "role": "backend-plugin-module",
15
+ "pluginId": "ai-assistant",
16
+ "pluginPackage": "@sweetoburrito/backstage-plugin-ai-assistant-backend",
17
+ "features": {
18
+ ".": "@backstage/BackendFeature"
19
+ }
20
+ },
21
+ "scripts": {
22
+ "start": "backstage-cli package start",
23
+ "build": "backstage-cli package build",
24
+ "lint": "backstage-cli package lint",
25
+ "test": "backstage-cli package test",
26
+ "clean": "backstage-cli package clean",
27
+ "prepack": "backstage-cli package prepack",
28
+ "postpack": "backstage-cli package postpack"
29
+ },
30
+ "dependencies": {
31
+ "@backstage/backend-plugin-api": "backstage:^",
32
+ "@langchain/ollama": "^0.2.3",
33
+ "@sweetoburrito/backstage-plugin-ai-assistant-node": "0.0.0-snapshot-20251029150521"
34
+ },
35
+ "devDependencies": {
36
+ "@backstage/backend-test-utils": "backstage:^",
37
+ "@backstage/cli": "backstage:^"
38
+ },
39
+ "configSchema": "config.d.ts",
40
+ "files": [
41
+ "dist",
42
+ "config.d.ts"
43
+ ],
44
+ "typesVersions": {
45
+ "*": {
46
+ "package.json": [
47
+ "package.json"
48
+ ]
49
+ }
50
+ }
51
+ }