node-red-contrib-knx-ultimate 4.3.1 → 4.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  const fs = require('fs')
2
2
 
3
3
  module.exports = function (RED) {
4
- function knxUltimateLogger(config) {
4
+ function knxUltimateLogger (config) {
5
5
  RED.nodes.createNode(this, config)
6
6
  const node = this
7
7
  node.serverKNX = RED.nodes.getNode(config.server) || undefined
@@ -66,7 +66,6 @@ module.exports = function (RED) {
66
66
 
67
67
  if (!node.serverKNX) return
68
68
 
69
-
70
69
  const buildXMLFromLines = (lines) => {
71
70
  const header = '<CommunicationLog xmlns="http://knx.org/xml/telegrams/01">\n'
72
71
  const stop = '<RecordStop Timestamp="' + new Date().toISOString() + '" />\n'
@@ -96,7 +95,7 @@ module.exports = function (RED) {
96
95
  }
97
96
 
98
97
  // 26/03/2020 Create and output the XML for ETS bus monitor
99
- function createETSXML() {
98
+ function createETSXML () {
100
99
  let sFile = '<CommunicationLog xmlns="http://knx.org/xml/telegrams/01">\n'
101
100
  for (let index = 0; index < node.etsXMLRow.length; index++) {
102
101
  const element = node.etsXMLRow[index]
@@ -129,7 +128,7 @@ module.exports = function (RED) {
129
128
  };
130
129
 
131
130
  // 25/10/2021 Count Telegrams. Requested by RicharddeCrep https://github.com/Supergiovane/node-red-contrib-knx-ultimate/issues/149#issue-1034644956
132
- function countTelegrams() {
131
+ function countTelegrams () {
133
132
  node.send([null, { topic: node.topic, payload: node.telegramCount, countIntervalInSeconds: node.intervalTelegramCount / 1000, currentTime: new Date().toLocaleString() }])
134
133
  node.setNodeStatus({ fill: 'green', shape: 'dot', text: 'Payload Telegram counter sent.', payload: node.telegramCount, GA: '', dpt: '', devicename: '' })
135
134
  node.telegramCount = 0
@@ -326,7 +326,6 @@ module.exports = function (RED) {
326
326
  if (parsed.event === 'GroupValue_Read') {
327
327
  if (typeof client.read !== 'function') throw new Error('KNX client does not support read')
328
328
  client.read(ga)
329
- return
330
329
  }
331
330
  }
332
331
 
@@ -46,6 +46,7 @@ Hier sind alle Felder aufgeführt, wie sie im KNX-AI-Editor sichtbar sind.
46
46
  - **Max changes per GA in window (0=off)**: Maximal erlaubte Änderungen im Fenster.
47
47
 
48
48
  ### LLM Assistant
49
+ - Der Tab **LLM Assistant** steht im Editor jetzt an erster Stelle für eine schnellere Einrichtung.
49
50
  - **Enable LLM assistant**: Aktiviert Ask/Chat-Funktionen.
50
51
  - **Provider**: LLM-Backend (OpenAI-compatible oder Ollama).
51
52
  - **Endpoint URL**: URL des Chat/Completions-Endpunkts.
@@ -65,6 +66,16 @@ Hier sind alle Felder aufgeführt, wie sie im KNX-AI-Editor sichtbar sind.
65
66
  - **Max docs chars**: Max. Gesamtzeichen aus Doku.
66
67
  - Button **Refresh**: Provider abfragen und verfügbare Modelle laden.
67
68
 
69
+ ### Ollama Schnellstart (lokal)
70
+ - **Provider = Ollama** auswählen.
71
+ - Standard-Endpoint: `http://localhost:11434/api/chat`.
72
+ - Wenn keine lokalen Modelle gefunden werden:
73
+ - **1) Download model**: öffnet die Seite **Model library**.
74
+ - **2) Install it**: lädt und installiert das Modell lokal (z. B. `llama3.1`).
75
+ - Beim Refresh/Install versucht KNX AI zusätzlich, den Ollama-Server automatisch zu starten.
76
+ - Bei Installationsfehlern mit Verbindungsproblem prüfen, ob Ollama läuft (Desktop-App oder `ollama serve`).
77
+ - Wenn Node-RED in Docker läuft, im Endpoint `host.docker.internal` statt `localhost` verwenden.
78
+
68
79
  ## Sicherheitshinweis
69
80
  Bei aktiviertem LLM kann KNX-Traffic-Kontext an den konfigurierten Endpoint gesendet werden. Für striktes On-Premise lokale Provider verwenden.
70
81
  </script>
@@ -53,7 +53,7 @@
53
53
  "selectlists": {
54
54
  "llmProvider": {
55
55
  "openai_compat": "OpenAI-compatible (chat/completions)",
56
- "ollama": "Ollama (local) - not yet supported"
56
+ "ollama": "Ollama (local, beta)"
57
57
  }
58
58
  },
59
59
  "placeholder": {
@@ -63,7 +63,13 @@
63
63
  "llmSystemPrompt": "Optional. Leave empty for default."
64
64
  },
65
65
  "messages": {
66
- "ollamaNotSupported": "Ollama integration is marked as not yet supported (testing in progress)."
66
+ "ollamaNotSupported": "Ollama local mode: API key not required. Default endpoint is http://localhost:11434/api/chat.",
67
+ "ollamaNoModels": "No local Ollama model found. Install one or pick one from the library.",
68
+ "installingOllamaModel": "Starting Ollama and installing model…",
69
+ "installedOllamaModel": "Ollama model installed",
70
+ "installOllamaModelFailed": "Failed to install Ollama model",
71
+ "ollamaInstallSteps": "1) Open the model library and copy the model name (for example llama3.1). 2) Put the name in the Model field and click Install it.",
72
+ "ollamaStartedAuto": "Ollama server started automatically."
67
73
  },
68
74
  "sidebar": {
69
75
  "ui": {
@@ -111,6 +117,11 @@
111
117
  "patternsTitle": "Muster (wiederkehrende Sequenzen):",
112
118
  "patternItem": "{{from}} → {{to}} ({{count}} mal innerhalb von {{withinMs}}ms)"
113
119
  }
120
+ },
121
+ "buttons": {
122
+ "installOllamaModel": "2) Install it",
123
+ "ollamaLibrary": "Model library",
124
+ "downloadOllamaModel": "1) Download model"
114
125
  }
115
126
  }
116
127
  }
@@ -46,6 +46,7 @@ All fields exposed in the KNX AI editor are listed below.
46
46
  - **Max changes per GA in window (0=off)**: Max allowed changes in flap window.
47
47
 
48
48
  ### LLM Assistant
49
+ - The **LLM Assistant** tab is shown first in the editor for faster setup.
49
50
  - **Enable LLM assistant**: Enable Ask/chat assistant features.
50
51
  - **Provider**: Select LLM backend (OpenAI-compatible or Ollama).
51
52
  - **Endpoint URL**: Chat/completions endpoint URL.
@@ -65,6 +66,16 @@ All fields exposed in the KNX AI editor are listed below.
65
66
  - **Max docs chars**: Max total docs characters.
66
67
  - **Refresh** button: Query provider and load available model IDs.
67
68
 
69
+ ### Ollama quick setup (local)
70
+ - Choose **Provider = Ollama**.
71
+ - Default endpoint: `http://localhost:11434/api/chat`.
72
+ - If no local models are found, use:
73
+ - **1) Download model**: opens the **Model library** page.
74
+ - **2) Install it**: downloads and installs the model locally (for example `llama3.1`).
75
+ - During model refresh/install, KNX AI also tries to auto-start the Ollama server when possible.
76
+ - If install fails with connection errors, ensure Ollama is running (desktop app or `ollama serve`).
77
+ - If Node-RED runs in Docker, use `host.docker.internal` instead of `localhost` in the endpoint URL.
78
+
68
79
  ## Security note
69
80
  If LLM is enabled, KNX traffic context can be sent to the configured endpoint. Use local providers if you need strict on-prem data handling.
70
81
  </script>
@@ -53,16 +53,25 @@
53
53
  "selectlists": {
54
54
  "llmProvider": {
55
55
  "openai_compat": "OpenAI-compatible (chat/completions)",
56
- "ollama": "Ollama (local) - not yet supported"
56
+ "ollama": "Ollama (local, beta)"
57
57
  }
58
58
  },
59
59
  "buttons": {
60
- "refreshModels": "Refresh"
60
+ "refreshModels": "Refresh",
61
+ "installOllamaModel": "2) Install it",
62
+ "ollamaLibrary": "Model library",
63
+ "downloadOllamaModel": "1) Download model"
61
64
  },
62
65
  "messages": {
63
66
  "loadingModels": "Loading models…",
64
67
  "loadedModels": "Models loaded",
65
- "ollamaNotSupported": "Ollama integration is marked as not yet supported (testing in progress)."
68
+ "ollamaNotSupported": "Ollama local mode: API key not required. Default endpoint is http://localhost:11434/api/chat.",
69
+ "ollamaNoModels": "No local Ollama model found. Install one or pick one from the library.",
70
+ "installingOllamaModel": "Starting Ollama and installing model…",
71
+ "installedOllamaModel": "Ollama model installed",
72
+ "installOllamaModelFailed": "Failed to install Ollama model",
73
+ "ollamaInstallSteps": "1) Open the model library and copy the model name (for example llama3.1). 2) Put the name in the Model field and click Install it.",
74
+ "ollamaStartedAuto": "Ollama server started automatically."
66
75
  },
67
76
  "placeholder": {
68
77
  "llmBaseUrl": "https://api.openai.com/v1/chat/completions (or your compatible endpoint)",
@@ -46,6 +46,7 @@ Aquí tienes todos los campos tal como se muestran en el editor de KNX AI.
46
46
  - **Max changes per GA in window (0=off)**: cambios máximos permitidos en ventana.
47
47
 
48
48
  ### LLM Assistant
49
+ - La pestaña **LLM Assistant** ahora aparece primero en el editor para facilitar la configuración.
49
50
  - **Enable LLM assistant**: habilita funciones Ask/chat.
50
51
  - **Provider**: backend LLM (OpenAI-compatible u Ollama).
51
52
  - **Endpoint URL**: URL endpoint chat/completions.
@@ -65,6 +66,16 @@ Aquí tienes todos los campos tal como se muestran en el editor de KNX AI.
65
66
  - **Max docs chars**: máximo total de caracteres de documentación.
66
67
  - Botón **Refresh**: consulta el provider y carga modelos disponibles.
67
68
 
69
+ ### Configuración rápida de Ollama (local)
70
+ - Selecciona **Provider = Ollama**.
71
+ - Endpoint por defecto: `http://localhost:11434/api/chat`.
72
+ - Si no hay modelos locales:
73
+ - **1) Download model**: abre la página **Model library**.
74
+ - **2) Install it**: descarga e instala el modelo localmente (p. ej. `llama3.1`).
75
+ - Durante refresh/instalación, KNX AI también intenta iniciar automáticamente el servidor Ollama.
76
+ - Si la instalación falla con error de conexión, verifica que Ollama esté ejecutándose (app de escritorio o `ollama serve`).
77
+ - Si Node-RED se ejecuta en Docker, usa `host.docker.internal` en lugar de `localhost` en el endpoint.
78
+
68
79
  ## Nota de seguridad
69
80
  Si el LLM está habilitado, el contexto de tráfico KNX puede enviarse al endpoint configurado. Para privacidad on-premise, usa proveedores locales.
70
81
  </script>
@@ -53,11 +53,17 @@
53
53
  "selectlists": {
54
54
  "llmProvider": {
55
55
  "openai_compat": "OpenAI-compatible (chat/completions)",
56
- "ollama": "Ollama (local) - not yet supported"
56
+ "ollama": "Ollama (local, beta)"
57
57
  }
58
58
  },
59
59
  "messages": {
60
- "ollamaNotSupported": "Ollama integration is marked as not yet supported (testing in progress)."
60
+ "ollamaNotSupported": "Ollama local mode: API key not required. Default endpoint is http://localhost:11434/api/chat.",
61
+ "ollamaNoModels": "No local Ollama model found. Install one or pick one from the library.",
62
+ "installingOllamaModel": "Starting Ollama and installing model…",
63
+ "installedOllamaModel": "Ollama model installed",
64
+ "installOllamaModelFailed": "Failed to install Ollama model",
65
+ "ollamaInstallSteps": "1) Open the model library and copy the model name (for example llama3.1). 2) Put the name in the Model field and click Install it.",
66
+ "ollamaStartedAuto": "Ollama server started automatically."
61
67
  },
62
68
  "placeholder": {
63
69
  "llmBaseUrl": "https://api.openai.com/v1/chat/completions (or your compatible endpoint)",
@@ -111,6 +117,11 @@
111
117
  "patternsTitle": "Patrones (secuencias recurrentes):",
112
118
  "patternItem": "{{from}} → {{to}} ({{count}} veces en {{withinMs}}ms)"
113
119
  }
120
+ },
121
+ "buttons": {
122
+ "installOllamaModel": "2) Install it",
123
+ "ollamaLibrary": "Model library",
124
+ "downloadOllamaModel": "1) Download model"
114
125
  }
115
126
  }
116
127
  }
@@ -46,6 +46,7 @@ Voici tous les champs tels qu'affichés dans l'éditeur KNX AI.
46
46
  - **Max changes per GA in window (0=off)** : nombre max de changements autorisés.
47
47
 
48
48
  ### LLM Assistant
49
+ - L'onglet **LLM Assistant** est maintenant affiché en premier dans l'éditeur.
49
50
  - **Enable LLM assistant** : active les fonctions Ask/chat.
50
51
  - **Provider** : backend LLM (OpenAI-compatible ou Ollama).
51
52
  - **Endpoint URL** : URL endpoint chat/completions.
@@ -65,6 +66,16 @@ Voici tous les champs tels qu'affichés dans l'éditeur KNX AI.
65
66
  - **Max docs chars** : nombre total max de caractères documentation.
66
67
  - Bouton **Refresh** : interroge le provider et charge les modèles disponibles.
67
68
 
69
+ ### Démarrage rapide Ollama (local)
70
+ - Choisir **Provider = Ollama**.
71
+ - Endpoint par défaut : `http://localhost:11434/api/chat`.
72
+ - Si aucun modèle local n'est trouvé :
73
+ - **1) Download model** : ouvre la page **Model library**.
74
+ - **2) Install it** : télécharge et installe le modèle localement (ex. `llama3.1`).
75
+ - Pendant refresh/install, KNX AI tente aussi de démarrer automatiquement le serveur Ollama.
76
+ - Si l'installation échoue avec une erreur de connexion, vérifier qu'Ollama est lancé (app desktop ou `ollama serve`).
77
+ - Si Node-RED tourne dans Docker, utiliser `host.docker.internal` au lieu de `localhost` dans l'endpoint.
78
+
68
79
  ## Note sécurité
69
80
  Si le LLM est activé, le contexte trafic KNX peut être envoyé à l'endpoint configuré. Pour un usage strictement on-premise, utilisez un provider local.
70
81
  </script>
@@ -53,11 +53,17 @@
53
53
  "selectlists": {
54
54
  "llmProvider": {
55
55
  "openai_compat": "OpenAI-compatible (chat/completions)",
56
- "ollama": "Ollama (local) - not yet supported"
56
+ "ollama": "Ollama (local, beta)"
57
57
  }
58
58
  },
59
59
  "messages": {
60
- "ollamaNotSupported": "Ollama integration is marked as not yet supported (testing in progress)."
60
+ "ollamaNotSupported": "Ollama local mode: API key not required. Default endpoint is http://localhost:11434/api/chat.",
61
+ "ollamaNoModels": "No local Ollama model found. Install one or pick one from the library.",
62
+ "installingOllamaModel": "Starting Ollama and installing model…",
63
+ "installedOllamaModel": "Ollama model installed",
64
+ "installOllamaModelFailed": "Failed to install Ollama model",
65
+ "ollamaInstallSteps": "1) Open the model library and copy the model name (for example llama3.1). 2) Put the name in the Model field and click Install it.",
66
+ "ollamaStartedAuto": "Ollama server started automatically."
61
67
  },
62
68
  "placeholder": {
63
69
  "llmBaseUrl": "https://api.openai.com/v1/chat/completions (or your compatible endpoint)",
@@ -111,6 +117,11 @@
111
117
  "patternsTitle": "Motifs (séquences récurrentes) :",
112
118
  "patternItem": "{{from}} → {{to}} ({{count}} fois en {{withinMs}}ms)"
113
119
  }
120
+ },
121
+ "buttons": {
122
+ "installOllamaModel": "2) Install it",
123
+ "ollamaLibrary": "Model library",
124
+ "downloadOllamaModel": "1) Download model"
114
125
  }
115
126
  }
116
127
  }
@@ -46,6 +46,7 @@ Di seguito sono elencati tutti i campi presenti nell'editor del nodo KNX AI.
46
46
  - **Max cambi per GA nella finestra (0=off)**: massimo numero di cambi consentiti.
47
47
 
48
48
  ### Assistente LLM
49
+ - Il tab **Assistente LLM** è mostrato per primo nell'editor, per semplificare la configurazione.
49
50
  - **Abilita assistente LLM**: abilita funzioni Ask/chat.
50
51
  - **Provider**: backend LLM (OpenAI-compatible o Ollama).
51
52
  - **URL endpoint**: URL endpoint chat/completions.
@@ -65,6 +66,16 @@ Di seguito sono elencati tutti i campi presenti nell'editor del nodo KNX AI.
65
66
  - **Max caratteri docs**: massimo totale caratteri docs.
66
67
  - Pulsante **Aggiorna**: interroga il provider e popola i modelli disponibili.
67
68
 
69
+ ### Setup rapido Ollama (locale)
70
+ - Seleziona **Provider = Ollama**.
71
+ - Endpoint predefinito: `http://localhost:11434/api/chat`.
72
+ - Se non trovi modelli locali, usa:
73
+ - **1) Scarica il modello**: apre la pagina **Libreria modelli**.
74
+ - **2) Installalo**: scarica e installa localmente il modello (esempio `llama3.1`).
75
+ - Durante refresh/installazione, KNX AI prova anche ad avviare automaticamente il server Ollama quando possibile.
76
+ - Se l'installazione fallisce per errore di connessione, verifica che Ollama sia avviato (app desktop o `ollama serve`).
77
+ - Se Node-RED gira in Docker, usa `host.docker.internal` al posto di `localhost` nell'endpoint.
78
+
68
79
  ## Nota sicurezza
69
80
  Se l'LLM è abilitato, il contesto traffico KNX può essere inviato all'endpoint configurato. Per privacy on-prem, usa provider locali.
70
81
  </script>
@@ -53,16 +53,25 @@
53
53
  "selectlists": {
54
54
  "llmProvider": {
55
55
  "openai_compat": "Compatibile OpenAI (chat/completions)",
56
- "ollama": "Ollama (locale) - non ancora supportato"
56
+ "ollama": "Ollama (locale, beta)"
57
57
  }
58
58
  },
59
59
  "buttons": {
60
- "refreshModels": "Aggiorna"
60
+ "refreshModels": "Aggiorna",
61
+ "installOllamaModel": "2) Installalo",
62
+ "ollamaLibrary": "Libreria modelli",
63
+ "downloadOllamaModel": "1) Scarica il modello"
61
64
  },
62
65
  "messages": {
63
66
  "loadingModels": "Carico i modelli…",
64
67
  "loadedModels": "Modelli caricati",
65
- "ollamaNotSupported": "Integrazione Ollama marcata come non ancora supportata (test in corso)."
68
+ "ollamaNotSupported": "Modalita locale Ollama: API key non richiesta. Endpoint predefinito: http://localhost:11434/api/chat.",
69
+ "ollamaNoModels": "Nessun modello Ollama locale trovato. Installa un modello o scegli dalla libreria.",
70
+ "installingOllamaModel": "Avvio Ollama e installo il modello…",
71
+ "installedOllamaModel": "Modello Ollama installato",
72
+ "installOllamaModelFailed": "Installazione modello Ollama non riuscita",
73
+ "ollamaInstallSteps": "1) Apri la libreria, scegli un modello e copiane il nome (es. llama3.1). 2) Inserisci il nome nel campo Modello e clicca Installalo.",
74
+ "ollamaStartedAuto": "Server Ollama avviato automaticamente."
66
75
  },
67
76
  "placeholder": {
68
77
  "llmBaseUrl": "https://api.openai.com/v1/chat/completions (o endpoint compatibile)",
@@ -46,6 +46,7 @@
46
46
  - **Max changes per GA in window (0=off)**:窗口内允许的最大变化次数。
47
47
 
48
48
  ### LLM Assistant
49
+ - **LLM Assistant** 标签页现在固定在编辑器最前面,便于快速配置。
49
50
  - **Enable LLM assistant**:启用 Ask/chat 功能。
50
51
  - **Provider**:LLM 后端(OpenAI-compatible 或 Ollama)。
51
52
  - **Endpoint URL**:chat/completions 接口 URL。
@@ -65,6 +66,16 @@
65
66
  - **Max docs chars**:文档片段最大总字符数。
66
67
  - **Refresh** 按钮:请求 provider 并加载可用模型 ID。
67
68
 
69
+ ### Ollama 快速配置(本地)
70
+ - 选择 **Provider = Ollama**。
71
+ - 默认 endpoint:`http://localhost:11434/api/chat`。
72
+ - 若未发现本地模型:
73
+ - **1) Download model**:打开 **Model library** 页面。
74
+ - **2) Install it**:在本机下载并安装模型(例如 `llama3.1`)。
75
+ - 在刷新/安装模型时,KNX AI 也会在可能情况下尝试自动启动 Ollama 服务。
76
+ - 若安装因连接错误失败,请确认 Ollama 已运行(桌面应用或 `ollama serve`)。
77
+ - 若 Node-RED 运行在 Docker 中,endpoint 请使用 `host.docker.internal` 替代 `localhost`。
78
+
68
79
  ## 安全说明
69
80
  启用 LLM 后,KNX 流量上下文可能发送到所配置的 endpoint。若需严格本地化,请使用本地 provider。
70
81
  </script>
@@ -53,11 +53,17 @@
53
53
  "selectlists": {
54
54
  "llmProvider": {
55
55
  "openai_compat": "OpenAI-compatible (chat/completions)",
56
- "ollama": "Ollama (local) - not yet supported"
56
+ "ollama": "Ollama (local, beta)"
57
57
  }
58
58
  },
59
59
  "messages": {
60
- "ollamaNotSupported": "Ollama integration is marked as not yet supported (testing in progress)."
60
+ "ollamaNotSupported": "Ollama local mode: API key not required. Default endpoint is http://localhost:11434/api/chat.",
61
+ "ollamaNoModels": "No local Ollama model found. Install one or pick one from the library.",
62
+ "installingOllamaModel": "Starting Ollama and installing model…",
63
+ "installedOllamaModel": "Ollama model installed",
64
+ "installOllamaModelFailed": "Failed to install Ollama model",
65
+ "ollamaInstallSteps": "1) Open the model library and copy the model name (for example llama3.1). 2) Put the name in the Model field and click Install it.",
66
+ "ollamaStartedAuto": "Ollama server started automatically."
61
67
  },
62
68
  "placeholder": {
63
69
  "llmBaseUrl": "https://api.openai.com/v1/chat/completions (or your compatible endpoint)",
@@ -111,6 +117,11 @@
111
117
  "patternsTitle": "模式(重复序列):",
112
118
  "patternItem": "{{from}} → {{to}}({{count}} 次,{{withinMs}}ms 内)"
113
119
  }
120
+ },
121
+ "buttons": {
122
+ "installOllamaModel": "2) Install it",
123
+ "ollamaLibrary": "Model library",
124
+ "downloadOllamaModel": "1) Download model"
114
125
  }
115
126
  }
116
127
  }