plugin-custom-llm 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1 +1,77 @@
1
- # @nocobase/plugin-custom-llm
1
+ # Plugin Custom LLM (OpenAI Compatible)
2
+
3
+ NocoBase plugin for integrating external LLM providers that support OpenAI-compatible `/chat/completions` API, with built-in response format normalization and response mapping for non-standard APIs.
4
+
5
+ ## Features
6
+
7
+ - **OpenAI-compatible**: Works with any LLM provider exposing `/chat/completions` endpoint
8
+ - **Auto content detection**: Handles both string and array content blocks (`[{type: 'text', text: '...'}]`)
9
+ - **Response mapping**: Transform non-standard API responses to OpenAI format via JSON config (supports streaming SSE and JSON)
10
+ - **Reasoning content**: Display thinking/reasoning from DeepSeek-compatible providers
11
+ - **Configurable**: JSON config editors for request and response customization
12
+ - **Locale support**: English, Vietnamese, Chinese
13
+
14
+ ## Installation
15
+
16
+ Upload `plugin-custom-llm-x.x.x.tgz` via NocoBase Plugin Manager UI, then enable.
17
+
18
+ ## Configuration
19
+
20
+ ### Provider Settings
21
+
22
+ | Field | Description |
23
+ |---|---|
24
+ | **Base URL** | LLM endpoint URL, e.g. `https://your-llm-server.com/v1` |
25
+ | **API Key** | Authentication key |
26
+ | **Request config (JSON)** | Optional. Extra request configuration |
27
+ | **Response config (JSON)** | Optional. Response parsing and mapping configuration |
28
+
29
+ ### Request Config
30
+
31
+ ```json
32
+ {
33
+ "extraHeaders": { "X-Custom-Header": "value" },
34
+ "extraBody": { "custom_field": "value" },
35
+ "modelKwargs": { "stop": ["\n"] }
36
+ }
37
+ ```
38
+
39
+ - `extraHeaders` — Custom HTTP headers sent with every request
40
+ - `extraBody` — Additional fields merged into the request body
41
+ - `modelKwargs` — Extra LangChain model parameters (stop sequences, etc.)
42
+
43
+ ### Response Config
44
+
45
+ ```json
46
+ {
47
+ "contentPath": "auto",
48
+ "reasoningKey": "reasoning_content",
49
+ "responseMapping": {
50
+ "content": "message.response"
51
+ }
52
+ }
53
+ ```
54
+
55
+ - `contentPath` — How to extract text from LangChain chunks. `"auto"` (default) detects string, array, and object formats. Or use a dot-path like `"0.text"`
56
+ - `reasoningKey` — Key name for reasoning/thinking content in `additional_kwargs` (default: `"reasoning_content"`)
57
+ - `responseMapping` — Maps non-standard LLM responses to OpenAI format before LangChain processes them:
58
+ - `content` — Dot-path to the content field in the raw response (e.g. `"message.response"`, `"data.text"`)
59
+ - `role` — Dot-path to role field (optional, defaults to `"assistant"`)
60
+ - `id` — Dot-path to response ID (optional)
61
+
62
+ ### Response Mapping Examples
63
+
64
+ | Raw LLM Response | `responseMapping.content` |
65
+ |---|---|
66
+ | `{"message": {"response": "..."}}` | `message.response` |
67
+ | `{"data": {"text": "..."}}` | `data.text` |
68
+ | `{"result": "..."}` | `result` |
69
+ | `{"output": {"content": {"text": "..."}}}` | `output.content.text` |
70
+
71
+ ### Model Settings
72
+
73
+ Standard OpenAI-compatible parameters: temperature, max tokens, top P, frequency/presence penalty, response format, timeout, max retries.
74
+
75
+ ## License
76
+
77
+ Apache-2.0
@@ -7,4 +7,4 @@
7
7
  * For more information, please refer to: https://www.nocobase.com/agreement.
8
8
  */
9
9
 
10
- !function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t(require("react"),require("@nocobase/plugin-ai/client"),require("@nocobase/client"),require("@nocobase/utils/client"),require("antd"),require("react-i18next")):"function"==typeof define&&define.amd?define("plugin-custom-llm",["react","@nocobase/plugin-ai/client","@nocobase/client","@nocobase/utils/client","antd","react-i18next"],t):"object"==typeof exports?exports["plugin-custom-llm"]=t(require("react"),require("@nocobase/plugin-ai/client"),require("@nocobase/client"),require("@nocobase/utils/client"),require("antd"),require("react-i18next")):e["plugin-custom-llm"]=t(e.react,e["@nocobase/plugin-ai/client"],e["@nocobase/client"],e["@nocobase/utils/client"],e.antd,e["react-i18next"])}(self,function(e,t,n,o,r,i){return function(){"use strict";var u={772:function(e){e.exports=n},645:function(e){e.exports=t},584:function(e){e.exports=o},721:function(e){e.exports=r},156:function(t){t.exports=e},238:function(e){e.exports=i}},c={};function a(e){var t=c[e];if(void 0!==t)return t.exports;var n=c[e]={exports:{}};return u[e](n,n.exports,a),n.exports}a.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return a.d(t,{a:t}),t},a.d=function(e,t){for(var n in t)a.o(t,n)&&!a.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:t[n]})},a.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},a.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})};var l={};return!function(){a.r(l),a.d(l,{PluginCustomLLMClient:function(){return g},default:function(){return S}});var e=a(772),t=a(156),n=a.n(t),o=a(584),r=a(238),i="@nocobase/plugin-custom-llm",u=a(721),c=a(645),p=function(){var t=(0,r.useTranslation)(i,{nsMode:"fallback"}).t;return n().createElement("div",{style:{marginBottom:24}},n().createElement(u.Collapse,{bordered:!1,size:"small",items:[{key:"options",label:t("Options"),forceRender:!0,children:n().createElement(e.SchemaComponent,{schema:{type:"void",name:"custom-llm",properties:{temperature:{title:(0,o.tval)("Temperature",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:.7,"x-component-props":{step:.1,min:0,max:2}},maxCompletionTokens:{title:(0,o.tval)("Max completion tokens",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:-1},topP:{title:(0,o.tval)("Top P",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:1,"x-component-props":{step:.1,min:0,max:1}},frequencyPenalty:{title:(0,o.tval)("Frequency penalty",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:0,"x-component-props":{step:.1,min:-2,max:2}},presencePenalty:{title:(0,o.tval)("Presence penalty",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:0,"x-component-props":{step:.1,min:-2,max:2}},responseFormat:{title:(0,o.tval)("Response format",{ns:i}),type:"string","x-decorator":"FormItem","x-component":"Select",enum:[{label:t("Text"),value:"text"},{label:t("JSON"),value:"json_object"}],default:"text"},timeout:{title:(0,o.tval)("Timeout (ms)",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:6e4},maxRetries:{title:(0,o.tval)("Max retries",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:1}}}})}]}))},s={components:{ProviderSettingsForm:function(){return n().createElement(e.SchemaComponent,{schema:{type:"void",properties:{baseURL:{title:(0,o.tval)("Base URL",{ns:i}),type:"string",required:!0,"x-decorator":"FormItem","x-component":"TextAreaWithGlobalScope","x-component-props":{placeholder:"https://your-llm-server.com/v1"}},apiKey:{title:(0,o.tval)("API Key",{ns:i}),type:"string",required:!0,"x-decorator":"FormItem","x-component":"TextAreaWithGlobalScope"},requestConfig:{title:(0,o.tval)("Request config (JSON)",{ns:i}),type:"string","x-decorator":"FormItem","x-component":"Input.TextArea","x-component-props":{placeholder:JSON.stringify({extraHeaders:{},extraBody:{},modelKwargs:{}},null,2),rows:6,style:{fontFamily:"monospace",fontSize:12}},description:(0,o.tval)("Request config description",{ns:i})},responseConfig:{title:(0,o.tval)("Response config (JSON)",{ns:i}),type:"string","x-decorator":"FormItem","x-component":"Input.TextArea","x-component-props":{placeholder:JSON.stringify({contentPath:"auto",reasoningKey:"reasoning_content"},null,2),rows:4,style:{fontFamily:"monospace",fontSize:12}},description:(0,o.tval)("Response config description",{ns:i})}}}})},ModelSettingsForm:function(){return n().createElement(e.SchemaComponent,{components:{Options:p,ModelSelect:c.ModelSelect},schema:{type:"void",properties:{model:{title:(0,o.tval)("Model",{ns:i}),type:"string",required:!0,"x-decorator":"FormItem","x-component":"ModelSelect"},options:{type:"void","x-component":"Options"}}}})}}};function f(e,t,n,o,r,i,u){try{var c=e[i](u),a=c.value}catch(e){n(e);return}c.done?t(a):Promise.resolve(a).then(o,r)}function m(e){return function(){var t=this,n=arguments;return new Promise(function(o,r){var i=e.apply(t,n);function u(e){f(i,o,r,u,c,"next",e)}function c(e){f(i,o,r,u,c,"throw",e)}u(void 0)})}}function d(e,t,n){return(d=v()?Reflect.construct:function(e,t,n){var o=[null];o.push.apply(o,t);var r=new(Function.bind.apply(e,o));return n&&b(r,n.prototype),r}).apply(null,arguments)}function y(e){return(y=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)})(e)}function b(e,t){return(b=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e})(e,t)}function x(e){var t="function"==typeof Map?new Map:void 0;return(x=function(e){if(null===e||-1===Function.toString.call(e).indexOf("[native code]"))return e;if("function"!=typeof e)throw TypeError("Super expression must either be null or a function");if(void 0!==t){if(t.has(e))return t.get(e);t.set(e,n)}function n(){return d(e,arguments,y(this).constructor)}return n.prototype=Object.create(e.prototype,{constructor:{value:n,enumerable:!1,writable:!0,configurable:!0}}),b(n,e)})(e)}function v(){try{var e=!Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){}))}catch(e){}return(v=function(){return!!e})()}function h(e,t){var n,o,r,i,u={label:0,sent:function(){if(1&r[0])throw r[1];return r[1]},trys:[],ops:[]};return i={next:c(0),throw:c(1),return:c(2)},"function"==typeof Symbol&&(i[Symbol.iterator]=function(){return this}),i;function c(i){return function(c){var a=[i,c];if(n)throw TypeError("Generator is already executing.");for(;u;)try{if(n=1,o&&(r=2&a[0]?o.return:a[0]?o.throw||((r=o.return)&&r.call(o),0):o.next)&&!(r=r.call(o,a[1])).done)return r;switch(o=0,r&&(a=[2&a[0],r.value]),a[0]){case 0:case 1:r=a;break;case 4:return u.label++,{value:a[1],done:!1};case 5:u.label++,o=a[1],a=[0];continue;case 7:a=u.ops.pop(),u.trys.pop();continue;default:if(!(r=(r=u.trys).length>0&&r[r.length-1])&&(6===a[0]||2===a[0])){u=0;continue}if(3===a[0]&&(!r||a[1]>r[0]&&a[1]<r[3])){u.label=a[1];break}if(6===a[0]&&u.label<r[1]){u.label=r[1],r=a;break}if(r&&u.label<r[2]){u.label=r[2],u.ops.push(a);break}r[2]&&u.ops.pop(),u.trys.pop();continue}a=t.call(e,u)}catch(e){a=[6,e],o=0}finally{n=r=0}if(5&a[0])throw a[1];return{value:a[0]?a[1]:void 0,done:!0}}}}var g=function(e){var t;if("function"!=typeof e&&null!==e)throw TypeError("Super expression must either be null or a function");function n(){var e,t;if(!(this instanceof n))throw TypeError("Cannot call a class as a function");return e=n,t=arguments,e=y(e),function(e,t){var n;if(t&&("object"==((n=t)&&"undefined"!=typeof Symbol&&n.constructor===Symbol?"symbol":typeof n)||"function"==typeof t))return t;if(void 0===e)throw ReferenceError("this hasn't been initialised - super() hasn't been called");return e}(this,v()?Reflect.construct(e,t||[],y(this).constructor):e.apply(this,t))}return n.prototype=Object.create(e&&e.prototype,{constructor:{value:n,writable:!0,configurable:!0}}),e&&b(n,e),t=[{key:"afterAdd",value:function(){return m(function(){return h(this,function(e){return[2]})})()}},{key:"beforeLoad",value:function(){return m(function(){return h(this,function(e){return[2]})})()}},{key:"load",value:function(){var e=this;return m(function(){return h(this,function(t){return e.aiPlugin.aiManager.registerLLMProvider("custom-llm",s),[2]})})()}},{key:"aiPlugin",get:function(){return this.app.pm.get("ai")}}],function(e,t){for(var n=0;n<t.length;n++){var o=t[n];o.enumerable=o.enumerable||!1,o.configurable=!0,"value"in o&&(o.writable=!0),Object.defineProperty(e,o.key,o)}}(n.prototype,t),n}(x(e.Plugin)),S=g}(),l}()});
10
+ !function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t(require("react"),require("@nocobase/plugin-ai/client"),require("@nocobase/client"),require("@nocobase/utils/client"),require("antd"),require("react-i18next")):"function"==typeof define&&define.amd?define("plugin-custom-llm",["react","@nocobase/plugin-ai/client","@nocobase/client","@nocobase/utils/client","antd","react-i18next"],t):"object"==typeof exports?exports["plugin-custom-llm"]=t(require("react"),require("@nocobase/plugin-ai/client"),require("@nocobase/client"),require("@nocobase/utils/client"),require("antd"),require("react-i18next")):e["plugin-custom-llm"]=t(e.react,e["@nocobase/plugin-ai/client"],e["@nocobase/client"],e["@nocobase/utils/client"],e.antd,e["react-i18next"])}(self,function(e,t,n,o,r,i){return function(){"use strict";var a={772:function(e){e.exports=n},645:function(e){e.exports=t},584:function(e){e.exports=o},721:function(e){e.exports=r},156:function(t){t.exports=e},238:function(e){e.exports=i}},c={};function l(e){var t=c[e];if(void 0!==t)return t.exports;var n=c[e]={exports:{}};return a[e](n,n.exports,l),n.exports}l.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return l.d(t,{a:t}),t},l.d=function(e,t){for(var n in t)l.o(t,n)&&!l.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:t[n]})},l.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},l.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})};var u={};return!function(){l.r(u),l.d(u,{PluginCustomLLMClient:function(){return g},default:function(){return S}});var e=l(772),t=l(156),n=l.n(t),o=l(584),r=l(238),i="@nocobase/plugin-custom-llm",a=l(721),c=l(645),p=function(){var t=(0,r.useTranslation)(i,{nsMode:"fallback"}).t;return n().createElement("div",{style:{marginBottom:24}},n().createElement(a.Collapse,{bordered:!1,size:"small",items:[{key:"options",label:t("Options"),forceRender:!0,children:n().createElement(e.SchemaComponent,{schema:{type:"void",name:"custom-llm",properties:{temperature:{title:(0,o.tval)("Temperature",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:.7,"x-component-props":{step:.1,min:0,max:2}},maxCompletionTokens:{title:(0,o.tval)("Max completion tokens",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:-1},topP:{title:(0,o.tval)("Top P",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:1,"x-component-props":{step:.1,min:0,max:1}},frequencyPenalty:{title:(0,o.tval)("Frequency penalty",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:0,"x-component-props":{step:.1,min:-2,max:2}},presencePenalty:{title:(0,o.tval)("Presence penalty",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:0,"x-component-props":{step:.1,min:-2,max:2}},responseFormat:{title:(0,o.tval)("Response format",{ns:i}),type:"string","x-decorator":"FormItem","x-component":"Select",enum:[{label:t("Text"),value:"text"},{label:t("JSON"),value:"json_object"}],default:"text"},timeout:{title:(0,o.tval)("Timeout (ms)",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:6e4},maxRetries:{title:(0,o.tval)("Max retries",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber",default:1}}}})}]}))},s={components:{ProviderSettingsForm:function(){return n().createElement(e.SchemaComponent,{schema:{type:"void",properties:{baseURL:{title:(0,o.tval)("Base URL",{ns:i}),type:"string",required:!0,"x-decorator":"FormItem","x-component":"TextAreaWithGlobalScope","x-component-props":{placeholder:"https://your-llm-server.com/v1"}},apiKey:{title:(0,o.tval)("API Key",{ns:i}),type:"string",required:!0,"x-decorator":"FormItem","x-component":"TextAreaWithGlobalScope"},disableStream:{title:(0,o.tval)("Disable streaming",{ns:i}),type:"boolean","x-decorator":"FormItem","x-component":"Checkbox","x-content":(0,o.tval)("Disable streaming description",{ns:i})},streamKeepAlive:{title:(0,o.tval)("Stream keepalive",{ns:i}),type:"boolean","x-decorator":"FormItem","x-component":"Checkbox","x-content":(0,o.tval)("Stream keepalive description",{ns:i})},keepAliveIntervalMs:{title:(0,o.tval)("Keepalive interval (ms)",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber","x-component-props":{placeholder:"5000",min:1e3,step:1e3,style:{width:"100%"}},description:(0,o.tval)("Keepalive interval description",{ns:i})},keepAliveContent:{title:(0,o.tval)("Keepalive content",{ns:i}),type:"string","x-decorator":"FormItem","x-component":"Input","x-component-props":{placeholder:"..."},description:(0,o.tval)("Keepalive content description",{ns:i})},timeout:{title:(0,o.tval)("Timeout (ms)",{ns:i}),type:"number","x-decorator":"FormItem","x-component":"InputNumber","x-component-props":{placeholder:"120000",min:0,step:1e3,style:{width:"100%"}},description:(0,o.tval)("Timeout description",{ns:i})},requestConfig:{title:(0,o.tval)("Request config (JSON)",{ns:i}),type:"string","x-decorator":"FormItem","x-component":"Input.TextArea","x-component-props":{placeholder:JSON.stringify({extraHeaders:{},extraBody:{},modelKwargs:{}},null,2),rows:6,style:{fontFamily:"monospace",fontSize:12}},description:(0,o.tval)("Request config description",{ns:i})},responseConfig:{title:(0,o.tval)("Response config (JSON)",{ns:i}),type:"string","x-decorator":"FormItem","x-component":"Input.TextArea","x-component-props":{placeholder:JSON.stringify({contentPath:"auto",reasoningKey:"reasoning_content",responseMapping:{content:"message.response"}},null,2),rows:8,style:{fontFamily:"monospace",fontSize:12}},description:(0,o.tval)("Response config description",{ns:i})}}}})},ModelSettingsForm:function(){return n().createElement(e.SchemaComponent,{components:{Options:p,ModelSelect:c.ModelSelect},schema:{type:"void",properties:{model:{title:(0,o.tval)("Model",{ns:i}),type:"string",required:!0,"x-decorator":"FormItem","x-component":"ModelSelect"},options:{type:"void","x-component":"Options"}}}})}}};function m(e,t,n,o,r,i,a){try{var c=e[i](a),l=c.value}catch(e){n(e);return}c.done?t(l):Promise.resolve(l).then(o,r)}function f(e){return function(){var t=this,n=arguments;return new Promise(function(o,r){var i=e.apply(t,n);function a(e){m(i,o,r,a,c,"next",e)}function c(e){m(i,o,r,a,c,"throw",e)}a(void 0)})}}function d(e,t,n){return(d=x()?Reflect.construct:function(e,t,n){var o=[null];o.push.apply(o,t);var r=new(Function.bind.apply(e,o));return n&&b(r,n.prototype),r}).apply(null,arguments)}function y(e){return(y=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)})(e)}function b(e,t){return(b=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e})(e,t)}function v(e){var t="function"==typeof Map?new Map:void 0;return(v=function(e){if(null===e||-1===Function.toString.call(e).indexOf("[native code]"))return e;if("function"!=typeof e)throw TypeError("Super expression must either be null or a function");if(void 0!==t){if(t.has(e))return t.get(e);t.set(e,n)}function n(){return d(e,arguments,y(this).constructor)}return n.prototype=Object.create(e.prototype,{constructor:{value:n,enumerable:!1,writable:!0,configurable:!0}}),b(n,e)})(e)}function x(){try{var e=!Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){}))}catch(e){}return(x=function(){return!!e})()}function h(e,t){var n,o,r,i,a={label:0,sent:function(){if(1&r[0])throw r[1];return r[1]},trys:[],ops:[]};return i={next:c(0),throw:c(1),return:c(2)},"function"==typeof Symbol&&(i[Symbol.iterator]=function(){return this}),i;function c(i){return function(c){var l=[i,c];if(n)throw TypeError("Generator is already executing.");for(;a;)try{if(n=1,o&&(r=2&l[0]?o.return:l[0]?o.throw||((r=o.return)&&r.call(o),0):o.next)&&!(r=r.call(o,l[1])).done)return r;switch(o=0,r&&(l=[2&l[0],r.value]),l[0]){case 0:case 1:r=l;break;case 4:return a.label++,{value:l[1],done:!1};case 5:a.label++,o=l[1],l=[0];continue;case 7:l=a.ops.pop(),a.trys.pop();continue;default:if(!(r=(r=a.trys).length>0&&r[r.length-1])&&(6===l[0]||2===l[0])){a=0;continue}if(3===l[0]&&(!r||l[1]>r[0]&&l[1]<r[3])){a.label=l[1];break}if(6===l[0]&&a.label<r[1]){a.label=r[1],r=l;break}if(r&&a.label<r[2]){a.label=r[2],a.ops.push(l);break}r[2]&&a.ops.pop(),a.trys.pop();continue}l=t.call(e,a)}catch(e){l=[6,e],o=0}finally{n=r=0}if(5&l[0])throw l[1];return{value:l[0]?l[1]:void 0,done:!0}}}}var g=function(e){var t;if("function"!=typeof e&&null!==e)throw TypeError("Super expression must either be null or a function");function n(){var e,t;if(!(this instanceof n))throw TypeError("Cannot call a class as a function");return e=n,t=arguments,e=y(e),function(e,t){var n;if(t&&("object"==((n=t)&&"undefined"!=typeof Symbol&&n.constructor===Symbol?"symbol":typeof n)||"function"==typeof t))return t;if(void 0===e)throw ReferenceError("this hasn't been initialised - super() hasn't been called");return e}(this,x()?Reflect.construct(e,t||[],y(this).constructor):e.apply(this,t))}return n.prototype=Object.create(e&&e.prototype,{constructor:{value:n,writable:!0,configurable:!0}}),e&&b(n,e),t=[{key:"afterAdd",value:function(){return f(function(){return h(this,function(e){return[2]})})()}},{key:"beforeLoad",value:function(){return f(function(){return h(this,function(e){return[2]})})()}},{key:"load",value:function(){var e=this;return f(function(){return h(this,function(t){return e.aiPlugin.aiManager.registerLLMProvider("custom-llm",s),[2]})})()}},{key:"aiPlugin",get:function(){return this.app.pm.get("ai")}}],function(e,t){for(var n=0;n<t.length;n++){var o=t[n];o.enumerable=o.enumerable||!1,o.configurable=!0,"value"in o&&(o.writable=!0),Object.defineProperty(e,o.key,o)}}(n.prototype,t),n}(v(e.Plugin)),S=g}(),u}()});
@@ -12,9 +12,18 @@
12
12
  "Text": "Text",
13
13
  "JSON": "JSON",
14
14
  "Timeout (ms)": "Timeout (ms)",
15
+ "Timeout description": "Request timeout in milliseconds. Increase this for models with long thinking/processing phases. Default: 120000 (2 minutes).",
15
16
  "Max retries": "Max retries",
17
+ "Disable streaming": "Disable streaming",
18
+ "Disable streaming description": "Use non-streaming mode. Enable this for models that have a long \"thinking\" phase before responding, which can cause empty stream values and processing to terminate early.",
19
+ "Stream keepalive": "Stream keepalive",
20
+ "Stream keepalive description": "Keep stream alive during model thinking. Injects placeholder content when no data arrives within the keepalive interval. Works only when streaming is enabled.",
21
+ "Keepalive interval (ms)": "Keepalive interval (ms)",
22
+ "Keepalive interval description": "Interval in milliseconds between keepalive signals. Default: 5000 (5 seconds).",
23
+ "Keepalive content": "Keepalive content",
24
+ "Keepalive content description": "Placeholder text used as keepalive signal (invisible to the user). Default: '...'",
16
25
  "Request config (JSON)": "Request config (JSON)",
17
26
  "Request config description": "Extra configuration for LLM requests. Supported keys: extraHeaders (custom HTTP headers), extraBody (extra request body fields), modelKwargs (LangChain model kwargs).",
18
27
  "Response config (JSON)": "Response config (JSON)",
19
- "Response config description": "Configure response parsing. contentPath: 'auto' (default) or dot-path like '0.text'. reasoningKey: key name for reasoning content (default: 'reasoning_content')."
28
+ "Response config description": "Configure response parsing. contentPath: 'auto' or dot-path. reasoningKey: key for reasoning content. responseMapping: { content: 'dot.path' } — maps non-standard LLM response to OpenAI format (e.g., 'message.response')."
20
29
  }
@@ -12,9 +12,18 @@
12
12
  "Text": "Văn bản",
13
13
  "JSON": "JSON",
14
14
  "Timeout (ms)": "Thời gian chờ (ms)",
15
+ "Timeout description": "Thời gian chờ request tính bằng mili giây. Tăng giá trị này cho các model có giai đoạn thinking/xử lý dài. Mặc định: 120000 (2 phút).",
15
16
  "Max retries": "Số lần thử lại tối đa",
17
+ "Disable streaming": "Tắt streaming",
18
+ "Disable streaming description": "Sử dụng chế độ non-streaming. Bật tính năng này cho các model có giai đoạn \"thinking\" dài trước khi trả lời, gây ra stream rỗng và xử lý bị ngắt sớm.",
19
+ "Stream keepalive": "Giữ kết nối stream",
20
+ "Stream keepalive description": "Giữ stream hoạt động khi model đang thinking. Gửi nội dung giữ kết nối khi không có dữ liệu trong khoảng thời gian đã cấu hình. Chỉ hoạt động khi streaming được bật.",
21
+ "Keepalive interval (ms)": "Khoảng thời gian keepalive (ms)",
22
+ "Keepalive interval description": "Khoảng thời gian giữa các tín hiệu keepalive, tính bằng mili giây. Mặc định: 5000 (5 giây).",
23
+ "Keepalive content": "Nội dung keepalive",
24
+ "Keepalive content description": "Nội dung giữ kết nối (không hiển thị cho người dùng). Mặc định: '...'",
16
25
  "Request config (JSON)": "Cấu hình request (JSON)",
17
26
  "Request config description": "Cấu hình bổ sung cho request LLM. Các key hỗ trợ: extraHeaders (HTTP headers tùy chỉnh), extraBody (thêm trường vào request body), modelKwargs (tham số model LangChain).",
18
27
  "Response config (JSON)": "Cấu hình response (JSON)",
19
- "Response config description": "Cấu hình cách parse response. contentPath: 'auto' (mặc định) hoặc dot-path như '0.text'. reasoningKey: tên key cho reasoning content (mặc định: 'reasoning_content')."
28
+ "Response config description": "Cấu hình parse response. contentPath: 'auto' hoặc dot-path. reasoningKey: key reasoning. responseMapping: { content: 'dot.path' } mapping response không chuẩn OpenAI ( dụ: 'message.response')."
20
29
  }
@@ -42,6 +42,7 @@ __export(custom_llm_exports, {
42
42
  module.exports = __toCommonJS(custom_llm_exports);
43
43
  var import_plugin_ai = require("@nocobase/plugin-ai");
44
44
  var import_node_path = __toESM(require("node:path"));
45
+ const KEEPALIVE_PREFIX = "\u200B\u200B\u200B";
45
46
  function requireFromApp(moduleName) {
46
47
  const appNodeModules = process.env.NODE_MODULES_PATH || import_node_path.default.join(process.cwd(), "node_modules");
47
48
  const resolved = require.resolve(moduleName, { paths: [appNodeModules] });
@@ -55,6 +56,22 @@ function getChatOpenAI() {
55
56
  }
56
57
  return _ChatOpenAI;
57
58
  }
59
+ let _ChatGenerationChunk = null;
60
+ function getChatGenerationChunk() {
61
+ if (!_ChatGenerationChunk) {
62
+ const mod = requireFromApp("@langchain/core/outputs");
63
+ _ChatGenerationChunk = mod.ChatGenerationChunk;
64
+ }
65
+ return _ChatGenerationChunk;
66
+ }
67
+ let _AIMessageChunk = null;
68
+ function getAIMessageChunk() {
69
+ if (!_AIMessageChunk) {
70
+ const mod = requireFromApp("@langchain/core/messages");
71
+ _AIMessageChunk = mod.AIMessageChunk;
72
+ }
73
+ return _AIMessageChunk;
74
+ }
58
75
  function stripToolCallTags(content) {
59
76
  if (typeof content !== "string") {
60
77
  return content;
@@ -74,9 +91,7 @@ function extractTextContent(content, contentPath) {
74
91
  } catch {
75
92
  }
76
93
  }
77
- if (typeof content === "string") {
78
- return content;
79
- }
94
+ if (typeof content === "string") return content;
80
95
  if (Array.isArray(content)) {
81
96
  return content.filter((block) => block && block.type === "text").map((block) => block.text ?? "").join("");
82
97
  }
@@ -93,6 +108,189 @@ function safeParseJSON(str) {
93
108
  return {};
94
109
  }
95
110
  }
111
+ function getByPath(obj, dotPath) {
112
+ if (!obj || !dotPath) return void 0;
113
+ const keys = dotPath.split(".");
114
+ let current = obj;
115
+ for (const key of keys) {
116
+ if (current == null) return void 0;
117
+ current = current[key];
118
+ }
119
+ return current;
120
+ }
121
+ function createMappingFetch(responseMapping) {
122
+ const contentPath = responseMapping.content;
123
+ if (!contentPath) return void 0;
124
+ return async (url, init) => {
125
+ var _a, _b;
126
+ const response = await fetch(url, init);
127
+ if (!response.ok) return response;
128
+ const contentType = response.headers.get("content-type") || "";
129
+ if (contentType.includes("text/event-stream") || ((_a = init == null ? void 0 : init.headers) == null ? void 0 : _a["Accept"]) === "text/event-stream") {
130
+ const reader = (_b = response.body) == null ? void 0 : _b.getReader();
131
+ if (!reader) return response;
132
+ const stream = new ReadableStream({
133
+ async start(controller) {
134
+ const decoder = new TextDecoder();
135
+ const encoder = new TextEncoder();
136
+ let buffer = "";
137
+ try {
138
+ while (true) {
139
+ const { done, value } = await reader.read();
140
+ if (done) {
141
+ controller.close();
142
+ break;
143
+ }
144
+ buffer += decoder.decode(value, { stream: true });
145
+ const lines = buffer.split("\n");
146
+ buffer = lines.pop() || "";
147
+ for (const line of lines) {
148
+ if (line.startsWith("data: ")) {
149
+ const data = line.slice(6).trim();
150
+ if (data === "[DONE]") {
151
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
152
+ continue;
153
+ }
154
+ try {
155
+ const parsed = JSON.parse(data);
156
+ const mappedContent = getByPath(parsed, contentPath);
157
+ if (mappedContent !== void 0) {
158
+ const mapped = {
159
+ id: getByPath(parsed, responseMapping.id || "id") || "chatcmpl-custom",
160
+ object: "chat.completion.chunk",
161
+ created: Math.floor(Date.now() / 1e3),
162
+ model: "custom",
163
+ choices: [{
164
+ index: 0,
165
+ delta: { content: String(mappedContent), role: "assistant" },
166
+ finish_reason: null
167
+ }]
168
+ };
169
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(mapped)}
170
+
171
+ `));
172
+ } else {
173
+ controller.enqueue(encoder.encode(line + "\n"));
174
+ }
175
+ } catch {
176
+ controller.enqueue(encoder.encode(line + "\n"));
177
+ }
178
+ } else {
179
+ controller.enqueue(encoder.encode(line + "\n"));
180
+ }
181
+ }
182
+ }
183
+ } catch (err) {
184
+ controller.error(err);
185
+ }
186
+ }
187
+ });
188
+ return new Response(stream, {
189
+ status: response.status,
190
+ statusText: response.statusText,
191
+ headers: new Headers({
192
+ "content-type": "text/event-stream"
193
+ })
194
+ });
195
+ }
196
+ if (contentType.includes("application/json")) {
197
+ const body = await response.json();
198
+ const mappedContent = getByPath(body, contentPath);
199
+ if (mappedContent !== void 0) {
200
+ const mapped = {
201
+ id: getByPath(body, responseMapping.id || "id") || "chatcmpl-custom",
202
+ object: "chat.completion",
203
+ created: Math.floor(Date.now() / 1e3),
204
+ model: "custom",
205
+ choices: [{
206
+ index: 0,
207
+ message: {
208
+ role: getByPath(body, responseMapping.role || "") || "assistant",
209
+ content: String(mappedContent)
210
+ },
211
+ finish_reason: "stop"
212
+ }],
213
+ usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }
214
+ };
215
+ return new Response(JSON.stringify(mapped), {
216
+ status: response.status,
217
+ statusText: response.statusText,
218
+ headers: new Headers({
219
+ "content-type": "application/json"
220
+ })
221
+ });
222
+ }
223
+ }
224
+ return response;
225
+ };
226
+ }
227
+ function wrapWithStreamKeepAlive(model, options) {
228
+ const originalStream = model._stream.bind(model);
229
+ const { intervalMs, keepAliveContent } = options;
230
+ model._stream = async function* (messages, opts, runManager) {
231
+ const ChatGenerationChunk = getChatGenerationChunk();
232
+ const AIMessageChunk = getAIMessageChunk();
233
+ const baseIterator = originalStream(messages, opts, runManager);
234
+ const buffer = [];
235
+ let streamDone = false;
236
+ let streamError = null;
237
+ let notifyReady = null;
238
+ const consumer = (async () => {
239
+ try {
240
+ for await (const chunk of baseIterator) {
241
+ buffer.push(chunk);
242
+ if (notifyReady) {
243
+ notifyReady();
244
+ notifyReady = null;
245
+ }
246
+ }
247
+ } catch (err) {
248
+ streamError = err;
249
+ } finally {
250
+ streamDone = true;
251
+ if (notifyReady) {
252
+ notifyReady();
253
+ notifyReady = null;
254
+ }
255
+ }
256
+ })();
257
+ try {
258
+ while (!streamDone || buffer.length > 0) {
259
+ while (buffer.length > 0) {
260
+ yield buffer.shift();
261
+ }
262
+ if (streamDone) break;
263
+ const waitForChunk = new Promise((resolve) => {
264
+ notifyReady = resolve;
265
+ });
266
+ let timer = null;
267
+ const result = await Promise.race([
268
+ waitForChunk.then(() => "chunk"),
269
+ new Promise((resolve) => {
270
+ timer = setTimeout(() => resolve("timeout"), intervalMs);
271
+ })
272
+ ]);
273
+ if (timer) clearTimeout(timer);
274
+ if (result === "timeout" && !streamDone && buffer.length === 0) {
275
+ const keepAliveChunk = new ChatGenerationChunk({
276
+ message: new AIMessageChunk({ content: KEEPALIVE_PREFIX + keepAliveContent }),
277
+ text: KEEPALIVE_PREFIX + keepAliveContent
278
+ });
279
+ yield keepAliveChunk;
280
+ }
281
+ }
282
+ if (streamError) {
283
+ throw streamError;
284
+ }
285
+ } finally {
286
+ await consumer;
287
+ }
288
+ };
289
+ return model;
290
+ }
291
+ function isKeepAlive(text) {
292
+ return typeof text === "string" && text.startsWith(KEEPALIVE_PREFIX);
293
+ }
96
294
  class CustomLLMProvider extends import_plugin_ai.LLMProvider {
97
295
  get baseURL() {
98
296
  return null;
@@ -106,9 +304,10 @@ class CustomLLMProvider extends import_plugin_ai.LLMProvider {
106
304
  return safeParseJSON((_a = this.serviceOptions) == null ? void 0 : _a.responseConfig);
107
305
  }
108
306
  createModel() {
109
- const { baseURL, apiKey } = this.serviceOptions || {};
307
+ const { baseURL, apiKey, disableStream, timeout, streamKeepAlive, keepAliveIntervalMs, keepAliveContent } = this.serviceOptions || {};
110
308
  const { responseFormat } = this.modelOptions || {};
111
309
  const reqConfig = this.requestConfig;
310
+ const resConfig = this.responseConfig;
112
311
  const responseFormatOptions = {
113
312
  type: responseFormat ?? "text"
114
313
  };
@@ -129,22 +328,36 @@ class CustomLLMProvider extends import_plugin_ai.LLMProvider {
129
328
  },
130
329
  verbose: false
131
330
  };
331
+ if (disableStream) {
332
+ config.streaming = false;
333
+ }
334
+ if (timeout && Number(timeout) > 0) {
335
+ config.timeout = Number(timeout);
336
+ config.configuration.timeout = Number(timeout);
337
+ }
132
338
  if (reqConfig.extraHeaders && typeof reqConfig.extraHeaders === "object") {
133
339
  config.configuration.defaultHeaders = reqConfig.extraHeaders;
134
340
  }
135
- return new ChatOpenAI(config);
341
+ if (resConfig.responseMapping) {
342
+ config.configuration.fetch = createMappingFetch(resConfig.responseMapping);
343
+ }
344
+ const model = new ChatOpenAI(config);
345
+ if (streamKeepAlive && !disableStream) {
346
+ return wrapWithStreamKeepAlive(model, {
347
+ intervalMs: Number(keepAliveIntervalMs) || 5e3,
348
+ keepAliveContent: keepAliveContent || "..."
349
+ });
350
+ }
351
+ return model;
136
352
  }
137
- /**
138
- * Handle streaming chunks — normalize content format using responseConfig.
139
- */
140
353
  parseResponseChunk(chunk) {
141
354
  const resConfig = this.responseConfig;
142
355
  const text = extractTextContent(chunk, resConfig.contentPath);
356
+ if (isKeepAlive(text)) {
357
+ return null;
358
+ }
143
359
  return stripToolCallTags(text);
144
360
  }
145
- /**
146
- * Handle saved messages — normalize array content when loading from DB.
147
- */
148
361
  parseResponseMessage(message) {
149
362
  const { content: rawContent, messageId, metadata, role, toolCalls, attachments, workContext } = message;
150
363
  const content = {
@@ -162,6 +375,8 @@ class CustomLLMProvider extends import_plugin_ai.LLMProvider {
162
375
  content.content = textBlocks.map((block) => block.text).join("") || "";
163
376
  }
164
377
  if (typeof content.content === "string") {
378
+ const escapedPrefix = KEEPALIVE_PREFIX.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
379
+ content.content = content.content.replace(new RegExp(escapedPrefix + ".*?(?=" + escapedPrefix + "|$)", "g"), "");
165
380
  content.content = stripToolCallTags(content.content);
166
381
  }
167
382
  return {
@@ -170,19 +385,13 @@ class CustomLLMProvider extends import_plugin_ai.LLMProvider {
170
385
  role
171
386
  };
172
387
  }
173
- /**
174
- * Parse reasoning content using responseConfig.reasoningKey.
175
- */
176
388
  parseReasoningContent(chunk) {
177
389
  var _a;
178
390
  const resConfig = this.responseConfig;
179
391
  const reasoningKey = resConfig.reasoningKey || "reasoning_content";
180
392
  const reasoning = (_a = chunk == null ? void 0 : chunk.additional_kwargs) == null ? void 0 : _a[reasoningKey];
181
393
  if (reasoning && typeof reasoning === "string") {
182
- return {
183
- status: "streaming",
184
- content: reasoning
185
- };
394
+ return { status: "streaming", content: reasoning };
186
395
  }
187
396
  return null;
188
397
  }
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "displayName": "AI LLM: Custom (OpenAI Compatible)",
4
4
  "displayName.zh-CN": "AI LLM:自定义(OpenAI 兼容)",
5
5
  "description": "OpenAI-compatible LLM provider with auto response format detection for external LLM services.",
6
- "version": "1.0.0",
6
+ "version": "1.1.0",
7
7
  "main": "dist/server/index.js",
8
8
  "nocobase": {
9
9
  "supportedVersions": [
Binary file