onbuzz 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +267 -0
- package/README.md +425 -0
- package/bin/cli.js +556 -0
- package/bin/loxia-terminal-v2.js +162 -0
- package/bin/loxia-terminal.js +90 -0
- package/bin/start-with-terminal.js +200 -0
- package/node_modules/@isaacs/balanced-match/LICENSE.md +23 -0
- package/node_modules/@isaacs/balanced-match/README.md +60 -0
- package/node_modules/@isaacs/balanced-match/dist/commonjs/index.d.ts +9 -0
- package/node_modules/@isaacs/balanced-match/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/@isaacs/balanced-match/dist/commonjs/index.js +59 -0
- package/node_modules/@isaacs/balanced-match/dist/commonjs/index.js.map +1 -0
- package/node_modules/@isaacs/balanced-match/dist/commonjs/package.json +3 -0
- package/node_modules/@isaacs/balanced-match/dist/esm/index.d.ts +9 -0
- package/node_modules/@isaacs/balanced-match/dist/esm/index.d.ts.map +1 -0
- package/node_modules/@isaacs/balanced-match/dist/esm/index.js +54 -0
- package/node_modules/@isaacs/balanced-match/dist/esm/index.js.map +1 -0
- package/node_modules/@isaacs/balanced-match/dist/esm/package.json +3 -0
- package/node_modules/@isaacs/balanced-match/package.json +79 -0
- package/node_modules/@isaacs/brace-expansion/LICENSE +23 -0
- package/node_modules/@isaacs/brace-expansion/README.md +97 -0
- package/node_modules/@isaacs/brace-expansion/dist/commonjs/index.d.ts +6 -0
- package/node_modules/@isaacs/brace-expansion/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/@isaacs/brace-expansion/dist/commonjs/index.js +199 -0
- package/node_modules/@isaacs/brace-expansion/dist/commonjs/index.js.map +1 -0
- package/node_modules/@isaacs/brace-expansion/dist/commonjs/package.json +3 -0
- package/node_modules/@isaacs/brace-expansion/dist/esm/index.d.ts +6 -0
- package/node_modules/@isaacs/brace-expansion/dist/esm/index.d.ts.map +1 -0
- package/node_modules/@isaacs/brace-expansion/dist/esm/index.js +195 -0
- package/node_modules/@isaacs/brace-expansion/dist/esm/index.js.map +1 -0
- package/node_modules/@isaacs/brace-expansion/dist/esm/package.json +3 -0
- package/node_modules/@isaacs/brace-expansion/package.json +60 -0
- package/node_modules/glob/LICENSE.md +63 -0
- package/node_modules/glob/README.md +1177 -0
- package/node_modules/glob/dist/commonjs/glob.d.ts +388 -0
- package/node_modules/glob/dist/commonjs/glob.d.ts.map +1 -0
- package/node_modules/glob/dist/commonjs/glob.js +247 -0
- package/node_modules/glob/dist/commonjs/glob.js.map +1 -0
- package/node_modules/glob/dist/commonjs/has-magic.d.ts +14 -0
- package/node_modules/glob/dist/commonjs/has-magic.d.ts.map +1 -0
- package/node_modules/glob/dist/commonjs/has-magic.js +27 -0
- package/node_modules/glob/dist/commonjs/has-magic.js.map +1 -0
- package/node_modules/glob/dist/commonjs/ignore.d.ts +24 -0
- package/node_modules/glob/dist/commonjs/ignore.d.ts.map +1 -0
- package/node_modules/glob/dist/commonjs/ignore.js +119 -0
- package/node_modules/glob/dist/commonjs/ignore.js.map +1 -0
- package/node_modules/glob/dist/commonjs/index.d.ts +97 -0
- package/node_modules/glob/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/glob/dist/commonjs/index.js +68 -0
- package/node_modules/glob/dist/commonjs/index.js.map +1 -0
- package/node_modules/glob/dist/commonjs/index.min.js +4 -0
- package/node_modules/glob/dist/commonjs/index.min.js.map +7 -0
- package/node_modules/glob/dist/commonjs/package.json +3 -0
- package/node_modules/glob/dist/commonjs/pattern.d.ts +76 -0
- package/node_modules/glob/dist/commonjs/pattern.d.ts.map +1 -0
- package/node_modules/glob/dist/commonjs/pattern.js +219 -0
- package/node_modules/glob/dist/commonjs/pattern.js.map +1 -0
- package/node_modules/glob/dist/commonjs/processor.d.ts +59 -0
- package/node_modules/glob/dist/commonjs/processor.d.ts.map +1 -0
- package/node_modules/glob/dist/commonjs/processor.js +301 -0
- package/node_modules/glob/dist/commonjs/processor.js.map +1 -0
- package/node_modules/glob/dist/commonjs/walker.d.ts +97 -0
- package/node_modules/glob/dist/commonjs/walker.d.ts.map +1 -0
- package/node_modules/glob/dist/commonjs/walker.js +387 -0
- package/node_modules/glob/dist/commonjs/walker.js.map +1 -0
- package/node_modules/glob/dist/esm/glob.d.ts +388 -0
- package/node_modules/glob/dist/esm/glob.d.ts.map +1 -0
- package/node_modules/glob/dist/esm/glob.js +243 -0
- package/node_modules/glob/dist/esm/glob.js.map +1 -0
- package/node_modules/glob/dist/esm/has-magic.d.ts +14 -0
- package/node_modules/glob/dist/esm/has-magic.d.ts.map +1 -0
- package/node_modules/glob/dist/esm/has-magic.js +23 -0
- package/node_modules/glob/dist/esm/has-magic.js.map +1 -0
- package/node_modules/glob/dist/esm/ignore.d.ts +24 -0
- package/node_modules/glob/dist/esm/ignore.d.ts.map +1 -0
- package/node_modules/glob/dist/esm/ignore.js +115 -0
- package/node_modules/glob/dist/esm/ignore.js.map +1 -0
- package/node_modules/glob/dist/esm/index.d.ts +97 -0
- package/node_modules/glob/dist/esm/index.d.ts.map +1 -0
- package/node_modules/glob/dist/esm/index.js +55 -0
- package/node_modules/glob/dist/esm/index.js.map +1 -0
- package/node_modules/glob/dist/esm/index.min.js +4 -0
- package/node_modules/glob/dist/esm/index.min.js.map +7 -0
- package/node_modules/glob/dist/esm/package.json +3 -0
- package/node_modules/glob/dist/esm/pattern.d.ts +76 -0
- package/node_modules/glob/dist/esm/pattern.d.ts.map +1 -0
- package/node_modules/glob/dist/esm/pattern.js +215 -0
- package/node_modules/glob/dist/esm/pattern.js.map +1 -0
- package/node_modules/glob/dist/esm/processor.d.ts +59 -0
- package/node_modules/glob/dist/esm/processor.d.ts.map +1 -0
- package/node_modules/glob/dist/esm/processor.js +294 -0
- package/node_modules/glob/dist/esm/processor.js.map +1 -0
- package/node_modules/glob/dist/esm/walker.d.ts +97 -0
- package/node_modules/glob/dist/esm/walker.d.ts.map +1 -0
- package/node_modules/glob/dist/esm/walker.js +381 -0
- package/node_modules/glob/dist/esm/walker.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/LICENSE.md +55 -0
- package/node_modules/glob/node_modules/minimatch/README.md +453 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/assert-valid-pattern.d.ts +2 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/assert-valid-pattern.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/assert-valid-pattern.js +14 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/assert-valid-pattern.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/ast.d.ts +20 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/ast.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/ast.js +591 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/ast.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/brace-expressions.d.ts +8 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/brace-expressions.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/brace-expressions.js +152 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/brace-expressions.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/escape.d.ts +15 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/escape.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/escape.js +30 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/escape.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/index.d.ts +94 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/index.js +1029 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/index.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/package.json +3 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/unescape.d.ts +22 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/unescape.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/unescape.js +38 -0
- package/node_modules/glob/node_modules/minimatch/dist/commonjs/unescape.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/assert-valid-pattern.d.ts +2 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/assert-valid-pattern.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/assert-valid-pattern.js +10 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/assert-valid-pattern.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/ast.d.ts +20 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/ast.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/ast.js +587 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/ast.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/brace-expressions.d.ts +8 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/brace-expressions.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/brace-expressions.js +148 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/brace-expressions.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/escape.d.ts +15 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/escape.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/escape.js +26 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/escape.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/index.d.ts +94 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/index.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/index.js +1016 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/index.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/package.json +3 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/unescape.d.ts +22 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/unescape.d.ts.map +1 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/unescape.js +34 -0
- package/node_modules/glob/node_modules/minimatch/dist/esm/unescape.js.map +1 -0
- package/node_modules/glob/node_modules/minimatch/package.json +67 -0
- package/node_modules/glob/package.json +101 -0
- package/node_modules/minipass/LICENSE +15 -0
- package/node_modules/minipass/README.md +825 -0
- package/node_modules/minipass/dist/commonjs/index.d.ts +549 -0
- package/node_modules/minipass/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/minipass/dist/commonjs/index.js +1028 -0
- package/node_modules/minipass/dist/commonjs/index.js.map +1 -0
- package/node_modules/minipass/dist/commonjs/package.json +3 -0
- package/node_modules/minipass/dist/esm/index.d.ts +549 -0
- package/node_modules/minipass/dist/esm/index.d.ts.map +1 -0
- package/node_modules/minipass/dist/esm/index.js +1018 -0
- package/node_modules/minipass/dist/esm/index.js.map +1 -0
- package/node_modules/minipass/dist/esm/package.json +3 -0
- package/node_modules/minipass/package.json +82 -0
- package/node_modules/package-json-from-dist/LICENSE.md +63 -0
- package/node_modules/package-json-from-dist/README.md +110 -0
- package/node_modules/package-json-from-dist/dist/commonjs/index.d.ts +89 -0
- package/node_modules/package-json-from-dist/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/package-json-from-dist/dist/commonjs/index.js +134 -0
- package/node_modules/package-json-from-dist/dist/commonjs/index.js.map +1 -0
- package/node_modules/package-json-from-dist/dist/commonjs/package.json +3 -0
- package/node_modules/package-json-from-dist/dist/esm/index.d.ts +89 -0
- package/node_modules/package-json-from-dist/dist/esm/index.d.ts.map +1 -0
- package/node_modules/package-json-from-dist/dist/esm/index.js +129 -0
- package/node_modules/package-json-from-dist/dist/esm/index.js.map +1 -0
- package/node_modules/package-json-from-dist/dist/esm/package.json +3 -0
- package/node_modules/package-json-from-dist/package.json +68 -0
- package/node_modules/path-scurry/LICENSE.md +55 -0
- package/node_modules/path-scurry/README.md +636 -0
- package/node_modules/path-scurry/dist/commonjs/index.d.ts +1115 -0
- package/node_modules/path-scurry/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/path-scurry/dist/commonjs/index.js +2018 -0
- package/node_modules/path-scurry/dist/commonjs/index.js.map +1 -0
- package/node_modules/path-scurry/dist/commonjs/package.json +3 -0
- package/node_modules/path-scurry/dist/esm/index.d.ts +1115 -0
- package/node_modules/path-scurry/dist/esm/index.d.ts.map +1 -0
- package/node_modules/path-scurry/dist/esm/index.js +1983 -0
- package/node_modules/path-scurry/dist/esm/index.js.map +1 -0
- package/node_modules/path-scurry/dist/esm/package.json +3 -0
- package/node_modules/path-scurry/node_modules/lru-cache/LICENSE.md +55 -0
- package/node_modules/path-scurry/node_modules/lru-cache/README.md +383 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/commonjs/index.d.ts +1323 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/commonjs/index.js +1589 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/commonjs/index.js.map +1 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/commonjs/index.min.js +2 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/commonjs/index.min.js.map +7 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/commonjs/package.json +3 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/esm/index.d.ts +1323 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/esm/index.d.ts.map +1 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/esm/index.js +1585 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/esm/index.js.map +1 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/esm/index.min.js +2 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/esm/index.min.js.map +7 -0
- package/node_modules/path-scurry/node_modules/lru-cache/dist/esm/package.json +3 -0
- package/node_modules/path-scurry/node_modules/lru-cache/package.json +101 -0
- package/node_modules/path-scurry/package.json +88 -0
- package/node_modules/rimraf/LICENSE.md +55 -0
- package/node_modules/rimraf/README.md +226 -0
- package/node_modules/rimraf/dist/commonjs/default-tmp.d.ts +3 -0
- package/node_modules/rimraf/dist/commonjs/default-tmp.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/default-tmp.js +58 -0
- package/node_modules/rimraf/dist/commonjs/default-tmp.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/error.d.ts +6 -0
- package/node_modules/rimraf/dist/commonjs/error.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/error.js +10 -0
- package/node_modules/rimraf/dist/commonjs/error.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/fix-eperm.d.ts +3 -0
- package/node_modules/rimraf/dist/commonjs/fix-eperm.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/fix-eperm.js +38 -0
- package/node_modules/rimraf/dist/commonjs/fix-eperm.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/fs.d.ts +15 -0
- package/node_modules/rimraf/dist/commonjs/fs.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/fs.js +33 -0
- package/node_modules/rimraf/dist/commonjs/fs.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/ignore-enoent.d.ts +3 -0
- package/node_modules/rimraf/dist/commonjs/ignore-enoent.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/ignore-enoent.js +24 -0
- package/node_modules/rimraf/dist/commonjs/ignore-enoent.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/index.d.ts +50 -0
- package/node_modules/rimraf/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/index.js +78 -0
- package/node_modules/rimraf/dist/commonjs/index.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/opt-arg.d.ts +34 -0
- package/node_modules/rimraf/dist/commonjs/opt-arg.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/opt-arg.js +53 -0
- package/node_modules/rimraf/dist/commonjs/opt-arg.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/package.json +3 -0
- package/node_modules/rimraf/dist/commonjs/path-arg.d.ts +4 -0
- package/node_modules/rimraf/dist/commonjs/path-arg.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/path-arg.js +48 -0
- package/node_modules/rimraf/dist/commonjs/path-arg.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/readdir-or-error.d.ts +3 -0
- package/node_modules/rimraf/dist/commonjs/readdir-or-error.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/readdir-or-error.js +19 -0
- package/node_modules/rimraf/dist/commonjs/readdir-or-error.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/retry-busy.d.ts +8 -0
- package/node_modules/rimraf/dist/commonjs/retry-busy.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/retry-busy.js +65 -0
- package/node_modules/rimraf/dist/commonjs/retry-busy.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-manual.d.ts +3 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-manual.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-manual.js +8 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-manual.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-move-remove.d.ts +4 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-move-remove.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-move-remove.js +138 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-move-remove.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-native.d.ts +4 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-native.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-native.js +24 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-native.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-posix.d.ts +4 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-posix.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-posix.js +103 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-posix.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-windows.d.ts +4 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-windows.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-windows.js +159 -0
- package/node_modules/rimraf/dist/commonjs/rimraf-windows.js.map +1 -0
- package/node_modules/rimraf/dist/commonjs/use-native.d.ts +4 -0
- package/node_modules/rimraf/dist/commonjs/use-native.d.ts.map +1 -0
- package/node_modules/rimraf/dist/commonjs/use-native.js +18 -0
- package/node_modules/rimraf/dist/commonjs/use-native.js.map +1 -0
- package/node_modules/rimraf/dist/esm/bin.d.mts +3 -0
- package/node_modules/rimraf/dist/esm/bin.d.mts.map +1 -0
- package/node_modules/rimraf/dist/esm/bin.mjs +250 -0
- package/node_modules/rimraf/dist/esm/bin.mjs.map +1 -0
- package/node_modules/rimraf/dist/esm/default-tmp.d.ts +3 -0
- package/node_modules/rimraf/dist/esm/default-tmp.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/default-tmp.js +55 -0
- package/node_modules/rimraf/dist/esm/default-tmp.js.map +1 -0
- package/node_modules/rimraf/dist/esm/error.d.ts +6 -0
- package/node_modules/rimraf/dist/esm/error.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/error.js +5 -0
- package/node_modules/rimraf/dist/esm/error.js.map +1 -0
- package/node_modules/rimraf/dist/esm/fix-eperm.d.ts +3 -0
- package/node_modules/rimraf/dist/esm/fix-eperm.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/fix-eperm.js +33 -0
- package/node_modules/rimraf/dist/esm/fix-eperm.js.map +1 -0
- package/node_modules/rimraf/dist/esm/fs.d.ts +15 -0
- package/node_modules/rimraf/dist/esm/fs.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/fs.js +18 -0
- package/node_modules/rimraf/dist/esm/fs.js.map +1 -0
- package/node_modules/rimraf/dist/esm/ignore-enoent.d.ts +3 -0
- package/node_modules/rimraf/dist/esm/ignore-enoent.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/ignore-enoent.js +19 -0
- package/node_modules/rimraf/dist/esm/ignore-enoent.js.map +1 -0
- package/node_modules/rimraf/dist/esm/index.d.ts +50 -0
- package/node_modules/rimraf/dist/esm/index.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/index.js +70 -0
- package/node_modules/rimraf/dist/esm/index.js.map +1 -0
- package/node_modules/rimraf/dist/esm/opt-arg.d.ts +34 -0
- package/node_modules/rimraf/dist/esm/opt-arg.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/opt-arg.js +46 -0
- package/node_modules/rimraf/dist/esm/opt-arg.js.map +1 -0
- package/node_modules/rimraf/dist/esm/package.json +3 -0
- package/node_modules/rimraf/dist/esm/path-arg.d.ts +4 -0
- package/node_modules/rimraf/dist/esm/path-arg.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/path-arg.js +46 -0
- package/node_modules/rimraf/dist/esm/path-arg.js.map +1 -0
- package/node_modules/rimraf/dist/esm/readdir-or-error.d.ts +3 -0
- package/node_modules/rimraf/dist/esm/readdir-or-error.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/readdir-or-error.js +14 -0
- package/node_modules/rimraf/dist/esm/readdir-or-error.js.map +1 -0
- package/node_modules/rimraf/dist/esm/retry-busy.d.ts +8 -0
- package/node_modules/rimraf/dist/esm/retry-busy.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/retry-busy.js +60 -0
- package/node_modules/rimraf/dist/esm/retry-busy.js.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-manual.d.ts +3 -0
- package/node_modules/rimraf/dist/esm/rimraf-manual.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-manual.js +5 -0
- package/node_modules/rimraf/dist/esm/rimraf-manual.js.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-move-remove.d.ts +4 -0
- package/node_modules/rimraf/dist/esm/rimraf-move-remove.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-move-remove.js +133 -0
- package/node_modules/rimraf/dist/esm/rimraf-move-remove.js.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-native.d.ts +4 -0
- package/node_modules/rimraf/dist/esm/rimraf-native.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-native.js +19 -0
- package/node_modules/rimraf/dist/esm/rimraf-native.js.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-posix.d.ts +4 -0
- package/node_modules/rimraf/dist/esm/rimraf-posix.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-posix.js +98 -0
- package/node_modules/rimraf/dist/esm/rimraf-posix.js.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-windows.d.ts +4 -0
- package/node_modules/rimraf/dist/esm/rimraf-windows.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/rimraf-windows.js +154 -0
- package/node_modules/rimraf/dist/esm/rimraf-windows.js.map +1 -0
- package/node_modules/rimraf/dist/esm/use-native.d.ts +4 -0
- package/node_modules/rimraf/dist/esm/use-native.d.ts.map +1 -0
- package/node_modules/rimraf/dist/esm/use-native.js +15 -0
- package/node_modules/rimraf/dist/esm/use-native.js.map +1 -0
- package/node_modules/rimraf/package.json +92 -0
- package/package.json +152 -0
- package/scripts/install-scanners.js +258 -0
- package/scripts/watchdog.js +147 -0
- package/src/analyzers/CSSAnalyzer.js +297 -0
- package/src/analyzers/ConfigValidator.js +690 -0
- package/src/analyzers/ESLintAnalyzer.js +320 -0
- package/src/analyzers/JavaScriptAnalyzer.js +261 -0
- package/src/analyzers/PrettierFormatter.js +247 -0
- package/src/analyzers/PythonAnalyzer.js +283 -0
- package/src/analyzers/SecurityAnalyzer.js +729 -0
- package/src/analyzers/SparrowAnalyzer.js +341 -0
- package/src/analyzers/TypeScriptAnalyzer.js +247 -0
- package/src/analyzers/codeCloneDetector/analyzer.js +344 -0
- package/src/analyzers/codeCloneDetector/detector.js +250 -0
- package/src/analyzers/codeCloneDetector/index.js +192 -0
- package/src/analyzers/codeCloneDetector/parser.js +199 -0
- package/src/analyzers/codeCloneDetector/reporter.js +148 -0
- package/src/analyzers/codeCloneDetector/scanner.js +88 -0
- package/src/core/agentPool.js +1957 -0
- package/src/core/agentScheduler.js +3212 -0
- package/src/core/contextManager.js +709 -0
- package/src/core/flowExecutor.js +928 -0
- package/src/core/messageProcessor.js +808 -0
- package/src/core/orchestrator.js +584 -0
- package/src/core/stateManager.js +1500 -0
- package/src/index.js +972 -0
- package/src/interfaces/cli.js +553 -0
- package/src/interfaces/terminal/__tests__/smoke/advancedFeatures.test.js +208 -0
- package/src/interfaces/terminal/__tests__/smoke/agentControl.test.js +236 -0
- package/src/interfaces/terminal/__tests__/smoke/agents.test.js +138 -0
- package/src/interfaces/terminal/__tests__/smoke/components.test.js +137 -0
- package/src/interfaces/terminal/__tests__/smoke/connection.test.js +350 -0
- package/src/interfaces/terminal/__tests__/smoke/enhancements.test.js +156 -0
- package/src/interfaces/terminal/__tests__/smoke/imports.test.js +332 -0
- package/src/interfaces/terminal/__tests__/smoke/messages.test.js +256 -0
- package/src/interfaces/terminal/__tests__/smoke/tools.test.js +388 -0
- package/src/interfaces/terminal/api/apiClient.js +299 -0
- package/src/interfaces/terminal/api/messageRouter.js +262 -0
- package/src/interfaces/terminal/api/session.js +266 -0
- package/src/interfaces/terminal/api/websocket.js +497 -0
- package/src/interfaces/terminal/components/AgentCreator.js +705 -0
- package/src/interfaces/terminal/components/AgentEditor.js +678 -0
- package/src/interfaces/terminal/components/AgentSwitcher.js +330 -0
- package/src/interfaces/terminal/components/ErrorBoundary.js +92 -0
- package/src/interfaces/terminal/components/ErrorPanel.js +264 -0
- package/src/interfaces/terminal/components/Header.js +28 -0
- package/src/interfaces/terminal/components/HelpPanel.js +231 -0
- package/src/interfaces/terminal/components/InputBox.js +118 -0
- package/src/interfaces/terminal/components/Layout.js +603 -0
- package/src/interfaces/terminal/components/LoadingSpinner.js +71 -0
- package/src/interfaces/terminal/components/MessageList.js +281 -0
- package/src/interfaces/terminal/components/MultilineTextInput.js +251 -0
- package/src/interfaces/terminal/components/SearchPanel.js +265 -0
- package/src/interfaces/terminal/components/SettingsPanel.js +415 -0
- package/src/interfaces/terminal/components/StatusBar.js +65 -0
- package/src/interfaces/terminal/components/TextInput.js +127 -0
- package/src/interfaces/terminal/config/agentEditorConstants.js +227 -0
- package/src/interfaces/terminal/config/constants.js +393 -0
- package/src/interfaces/terminal/index.js +168 -0
- package/src/interfaces/terminal/state/useAgentControl.js +496 -0
- package/src/interfaces/terminal/state/useAgents.js +537 -0
- package/src/interfaces/terminal/state/useConnection.js +444 -0
- package/src/interfaces/terminal/state/useMessages.js +630 -0
- package/src/interfaces/terminal/state/useTools.js +554 -0
- package/src/interfaces/terminal/utils/debugLogger.js +44 -0
- package/src/interfaces/terminal/utils/settingsStorage.js +232 -0
- package/src/interfaces/terminal/utils/theme.js +85 -0
- package/src/interfaces/webServer.js +5457 -0
- package/src/modules/fileExplorer/controller.js +413 -0
- package/src/modules/fileExplorer/index.js +37 -0
- package/src/modules/fileExplorer/middleware.js +92 -0
- package/src/modules/fileExplorer/routes.js +158 -0
- package/src/modules/fileExplorer/types.js +44 -0
- package/src/services/agentActivityService.js +399 -0
- package/src/services/aiService.js +2618 -0
- package/src/services/apiKeyManager.js +334 -0
- package/src/services/benchmarkService.js +196 -0
- package/src/services/budgetService.js +565 -0
- package/src/services/contextInjectionService.js +268 -0
- package/src/services/conversationCompactionService.js +1103 -0
- package/src/services/credentialVault.js +685 -0
- package/src/services/errorHandler.js +810 -0
- package/src/services/fileAttachmentService.js +547 -0
- package/src/services/flowContextService.js +189 -0
- package/src/services/memoryService.js +521 -0
- package/src/services/modelRouterService.js +365 -0
- package/src/services/modelsService.js +323 -0
- package/src/services/ollamaService.js +452 -0
- package/src/services/portRegistry.js +336 -0
- package/src/services/portTracker.js +223 -0
- package/src/services/projectDetector.js +404 -0
- package/src/services/promptService.js +372 -0
- package/src/services/qualityInspector.js +796 -0
- package/src/services/scheduleService.js +725 -0
- package/src/services/serviceRegistry.js +386 -0
- package/src/services/skillsService.js +486 -0
- package/src/services/telegramService.js +920 -0
- package/src/services/tokenCountingService.js +316 -0
- package/src/services/visualEditorBridge.js +1033 -0
- package/src/services/visualEditorServer.js +1727 -0
- package/src/services/whatsappService.js +663 -0
- package/src/tools/__tests__/webTool.e2e.test.js +569 -0
- package/src/tools/__tests__/webTool.unit.test.js +195 -0
- package/src/tools/agentCommunicationTool.js +1343 -0
- package/src/tools/agentDelayTool.js +498 -0
- package/src/tools/asyncToolManager.js +604 -0
- package/src/tools/baseTool.js +887 -0
- package/src/tools/browserTool.js +897 -0
- package/src/tools/cloneDetectionTool.js +581 -0
- package/src/tools/codeMapTool.js +857 -0
- package/src/tools/dependencyResolverTool.js +1212 -0
- package/src/tools/docxTool.js +623 -0
- package/src/tools/excelTool.js +636 -0
- package/src/tools/fileContentReplaceTool.js +840 -0
- package/src/tools/fileTreeTool.js +833 -0
- package/src/tools/filesystemTool.js +1217 -0
- package/src/tools/helpTool.js +198 -0
- package/src/tools/imageTool.js +1034 -0
- package/src/tools/importAnalyzerTool.js +1056 -0
- package/src/tools/jobDoneTool.js +388 -0
- package/src/tools/memoryTool.js +554 -0
- package/src/tools/pdfTool.js +627 -0
- package/src/tools/seekTool.js +883 -0
- package/src/tools/skillsTool.js +276 -0
- package/src/tools/staticAnalysisTool.js +2146 -0
- package/src/tools/taskManagerTool.js +2836 -0
- package/src/tools/terminalTool.js +2486 -0
- package/src/tools/userPromptTool.js +474 -0
- package/src/tools/videoTool.js +1139 -0
- package/src/tools/visionTool.js +507 -0
- package/src/tools/visualEditorTool.js +1175 -0
- package/src/tools/webTool.js +3114 -0
- package/src/tools/whatsappTool.js +457 -0
- package/src/types/agent.js +519 -0
- package/src/types/contextReference.js +972 -0
- package/src/types/conversation.js +730 -0
- package/src/types/toolCommand.js +747 -0
- package/src/utilities/attachmentValidator.js +288 -0
- package/src/utilities/browserStealth.js +630 -0
- package/src/utilities/configManager.js +618 -0
- package/src/utilities/constants.js +870 -0
- package/src/utilities/directoryAccessManager.js +566 -0
- package/src/utilities/fileProcessor.js +307 -0
- package/src/utilities/humanBehavior.js +453 -0
- package/src/utilities/jsonRepair.js +242 -0
- package/src/utilities/logger.js +436 -0
- package/src/utilities/platformUtils.js +255 -0
- package/src/utilities/platformUtils.test.js +98 -0
- package/src/utilities/stealthConstants.js +377 -0
- package/src/utilities/structuredFileValidator.js +699 -0
- package/src/utilities/tagParser.js +878 -0
- package/src/utilities/toolConstants.js +415 -0
- package/src/utilities/userDataDir.js +300 -0
- package/web-ui/build/brands/autopilot/favicon.svg +1 -0
- package/web-ui/build/brands/autopilot/logo.webp +0 -0
- package/web-ui/build/brands/onbuzz/favicon.svg +1 -0
- package/web-ui/build/brands/onbuzz/logo-text.webp +0 -0
- package/web-ui/build/brands/onbuzz/logo.webp +0 -0
- package/web-ui/build/index.html +15 -0
- package/web-ui/build/logo.png +0 -0
- package/web-ui/build/logo2.png +0 -0
- package/web-ui/build/static/index-SmQFfvBs.js +746 -0
- package/web-ui/build/static/index-V2ySwjHp.css +1 -0
|
@@ -0,0 +1,2618 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AIService - Manages communication with Azure backend API, model routing, rate limiting
|
|
3
|
+
*
|
|
4
|
+
* Purpose:
|
|
5
|
+
* - Backend API communication
|
|
6
|
+
* - Model selection and routing
|
|
7
|
+
* - Rate limiting enforcement
|
|
8
|
+
* - Conversation compactization
|
|
9
|
+
* - Token usage tracking
|
|
10
|
+
* - Request/response transformation
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import {
|
|
14
|
+
MODELS,
|
|
15
|
+
MODEL_PROVIDERS,
|
|
16
|
+
HTTP_STATUS,
|
|
17
|
+
ERROR_TYPES,
|
|
18
|
+
SYSTEM_DEFAULTS,
|
|
19
|
+
COMPACTION_CONFIG
|
|
20
|
+
} from '../utilities/constants.js';
|
|
21
|
+
import { getOllamaService, OLLAMA_MODEL_PREFIX } from './ollamaService.js';
|
|
22
|
+
|
|
23
|
+
class AIService {
|
|
24
|
+
constructor(config, logger, budgetService, errorHandler) {
|
|
25
|
+
this.config = config;
|
|
26
|
+
this.logger = logger;
|
|
27
|
+
this.budgetService = budgetService;
|
|
28
|
+
this.errorHandler = errorHandler;
|
|
29
|
+
|
|
30
|
+
this.baseUrl = config.backend?.baseUrl || 'https://loxia-api-g7hrb8bxdae8a2h7.z02.azurefd.net';
|
|
31
|
+
this.timeout = config.backend?.timeout || 270000; // 4.5 minutes for LLM responses (no auto-retry)
|
|
32
|
+
|
|
33
|
+
// Rate limiting
|
|
34
|
+
this.rateLimiters = new Map();
|
|
35
|
+
this.requestQueue = new Map();
|
|
36
|
+
|
|
37
|
+
// Circuit breaker
|
|
38
|
+
this.circuitBreaker = {
|
|
39
|
+
failures: 0,
|
|
40
|
+
lastFailureTime: null,
|
|
41
|
+
isOpen: false,
|
|
42
|
+
threshold: 5,
|
|
43
|
+
timeout: 30000 // 30 seconds
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
// Model specifications
|
|
47
|
+
this.modelSpecs = this._initializeModelSpecs();
|
|
48
|
+
|
|
49
|
+
// Conversation managers for multi-model support
|
|
50
|
+
this.conversationManagers = new Map();
|
|
51
|
+
|
|
52
|
+
// API Key Manager reference (will be set by LoxiaSystem)
|
|
53
|
+
this.apiKeyManager = null;
|
|
54
|
+
|
|
55
|
+
// Agent Pool reference (will be set by LoxiaSystem)
|
|
56
|
+
this.agentPool = null;
|
|
57
|
+
|
|
58
|
+
// Active requests tracking for abort support
|
|
59
|
+
// Maps agentId -> { controller: AbortController, requestId: string, startTime: Date }
|
|
60
|
+
this.activeRequests = new Map();
|
|
61
|
+
|
|
62
|
+
// Ollama service for local/offline models (lazy initialized)
|
|
63
|
+
this._ollamaService = null;
|
|
64
|
+
|
|
65
|
+
// Track user-initiated aborts (separate from timeout aborts)
|
|
66
|
+
// Set contains agentIds that were aborted by user action
|
|
67
|
+
this.userAbortedRequests = new Set();
|
|
68
|
+
|
|
69
|
+
// Model reliability tracking (in-memory, global across agents)
|
|
70
|
+
// Maps modelId -> { successCount: number, failureCount: number, lastSuccess: Date|null, lastFailure: Date|null }
|
|
71
|
+
this.modelReliability = new Map();
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Get Ollama service (lazy init)
|
|
76
|
+
* @returns {import('./ollamaService.js').OllamaService}
|
|
77
|
+
*/
|
|
78
|
+
_getOllamaService() {
|
|
79
|
+
if (!this._ollamaService) {
|
|
80
|
+
this._ollamaService = getOllamaService(this.config, this.logger);
|
|
81
|
+
}
|
|
82
|
+
return this._ollamaService;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Check if a model is an Ollama local model
|
|
87
|
+
*/
|
|
88
|
+
_isOllamaModel(model) {
|
|
89
|
+
return model && model.startsWith(OLLAMA_MODEL_PREFIX);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Record a successful model response
|
|
94
|
+
* @param {string} model - Model name
|
|
95
|
+
*/
|
|
96
|
+
recordModelSuccess(model) {
|
|
97
|
+
const existing = this.modelReliability.get(model) || { successCount: 0, failureCount: 0, lastSuccess: null, lastFailure: null };
|
|
98
|
+
existing.successCount++;
|
|
99
|
+
existing.lastSuccess = new Date();
|
|
100
|
+
this.modelReliability.set(model, existing);
|
|
101
|
+
this.logger?.debug(`Model success recorded: ${model}`, { successCount: existing.successCount });
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Record a model failure
|
|
106
|
+
* @param {string} model - Model name
|
|
107
|
+
* @param {string} errorType - Type of error that occurred
|
|
108
|
+
*/
|
|
109
|
+
recordModelFailure(model, errorType = 'unknown') {
|
|
110
|
+
const existing = this.modelReliability.get(model) || { successCount: 0, failureCount: 0, lastSuccess: null, lastFailure: null };
|
|
111
|
+
existing.failureCount++;
|
|
112
|
+
existing.lastFailure = new Date();
|
|
113
|
+
existing.lastErrorType = errorType;
|
|
114
|
+
this.modelReliability.set(model, existing);
|
|
115
|
+
this.logger?.debug(`Model failure recorded: ${model}`, { failureCount: existing.failureCount, errorType });
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Get reliability info for a model
|
|
120
|
+
* @param {string} model - Model name
|
|
121
|
+
* @returns {Object} Reliability info with verified status
|
|
122
|
+
*/
|
|
123
|
+
getModelReliability(model) {
|
|
124
|
+
const info = this.modelReliability.get(model);
|
|
125
|
+
if (!info) {
|
|
126
|
+
return { verified: false, successCount: 0, failureCount: 0 };
|
|
127
|
+
}
|
|
128
|
+
return {
|
|
129
|
+
verified: info.successCount > 0,
|
|
130
|
+
successCount: info.successCount,
|
|
131
|
+
failureCount: info.failureCount,
|
|
132
|
+
lastSuccess: info.lastSuccess,
|
|
133
|
+
lastFailure: info.lastFailure,
|
|
134
|
+
lastErrorType: info.lastErrorType
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
* Get all model reliability data
|
|
140
|
+
* @returns {Object} Map of model -> reliability info
|
|
141
|
+
*/
|
|
142
|
+
getAllModelReliability() {
|
|
143
|
+
const result = {};
|
|
144
|
+
for (const [model, info] of this.modelReliability.entries()) {
|
|
145
|
+
result[model] = {
|
|
146
|
+
verified: info.successCount > 0,
|
|
147
|
+
successCount: info.successCount,
|
|
148
|
+
failureCount: info.failureCount
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
return result;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Set models service reference (for model suggestions)
|
|
156
|
+
* @param {ModelsService} modelsService - Models service instance
|
|
157
|
+
*/
|
|
158
|
+
setModelsService(modelsService) {
|
|
159
|
+
this.modelsService = modelsService;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Check if an error is model-related (should trigger model suggestion modal)
|
|
164
|
+
* @param {Error} error - The error to check
|
|
165
|
+
* @returns {boolean} True if error is model-related
|
|
166
|
+
*/
|
|
167
|
+
isModelRelatedError(error) {
|
|
168
|
+
const modelErrorPatterns = [
|
|
169
|
+
/model.*not found/i,
|
|
170
|
+
/model.*unavailable/i,
|
|
171
|
+
/model.*does not exist/i,
|
|
172
|
+
/invalid.*model/i,
|
|
173
|
+
/unsupported.*model/i,
|
|
174
|
+
/quota.*exceeded/i,
|
|
175
|
+
/rate.*limit/i,
|
|
176
|
+
/context.*length.*exceeded/i,
|
|
177
|
+
/token.*limit/i,
|
|
178
|
+
/maximum.*context/i,
|
|
179
|
+
/capacity/i,
|
|
180
|
+
/overloaded/i,
|
|
181
|
+
/503/,
|
|
182
|
+
/429/
|
|
183
|
+
];
|
|
184
|
+
|
|
185
|
+
const errorMessage = error.message || '';
|
|
186
|
+
const errorCode = error.code || error.status;
|
|
187
|
+
|
|
188
|
+
// Check status codes
|
|
189
|
+
if ([429, 503, 404].includes(errorCode)) {
|
|
190
|
+
return true;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// Check error message patterns
|
|
194
|
+
return modelErrorPatterns.some(pattern => pattern.test(errorMessage));
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/**
|
|
198
|
+
* Classify the type of model error
|
|
199
|
+
* @param {Error} error - The error to classify
|
|
200
|
+
* @returns {string} Error type
|
|
201
|
+
*/
|
|
202
|
+
classifyModelError(error) {
|
|
203
|
+
const message = (error.message || '').toLowerCase();
|
|
204
|
+
const code = error.code || error.status;
|
|
205
|
+
|
|
206
|
+
if (code === 404 || message.includes('not found') || message.includes('does not exist')) {
|
|
207
|
+
return 'model_not_found';
|
|
208
|
+
}
|
|
209
|
+
if (code === 429 || message.includes('rate limit') || message.includes('quota')) {
|
|
210
|
+
return 'rate_limit';
|
|
211
|
+
}
|
|
212
|
+
if (message.includes('context') || message.includes('token limit') || message.includes('maximum')) {
|
|
213
|
+
return 'context_exceeded';
|
|
214
|
+
}
|
|
215
|
+
if (code === 503 || message.includes('overload') || message.includes('capacity')) {
|
|
216
|
+
return 'model_overloaded';
|
|
217
|
+
}
|
|
218
|
+
if (code === 401 || code === 403) {
|
|
219
|
+
return 'auth_error';
|
|
220
|
+
}
|
|
221
|
+
return 'unknown';
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* Get model suggestions with reliability info
|
|
226
|
+
* @param {string} failedModel - The model that failed
|
|
227
|
+
* @param {Error} error - The error that occurred
|
|
228
|
+
* @returns {Object} Suggestions object with models and reliability info
|
|
229
|
+
*/
|
|
230
|
+
getModelSuggestions(failedModel, error) {
|
|
231
|
+
const suggestions = [];
|
|
232
|
+
const allModels = this.modelsService?.getModels() || [];
|
|
233
|
+
|
|
234
|
+
for (const model of allModels) {
|
|
235
|
+
if (model.name === failedModel) continue; // Skip the failed model
|
|
236
|
+
|
|
237
|
+
const reliability = this.getModelReliability(model.name);
|
|
238
|
+
suggestions.push({
|
|
239
|
+
name: model.name,
|
|
240
|
+
displayName: model.displayName || model.name,
|
|
241
|
+
provider: model.provider,
|
|
242
|
+
contextWindow: model.contextWindow,
|
|
243
|
+
verified: reliability.verified,
|
|
244
|
+
successCount: reliability.successCount,
|
|
245
|
+
failureCount: reliability.failureCount
|
|
246
|
+
});
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// Sort: verified models first, then by success count
|
|
250
|
+
suggestions.sort((a, b) => {
|
|
251
|
+
if (a.verified !== b.verified) return b.verified ? 1 : -1;
|
|
252
|
+
return b.successCount - a.successCount;
|
|
253
|
+
});
|
|
254
|
+
|
|
255
|
+
return {
|
|
256
|
+
failedModel,
|
|
257
|
+
errorType: this.classifyModelError(error),
|
|
258
|
+
errorMessage: error.message,
|
|
259
|
+
suggestions // All available models (sorted by reliability)
|
|
260
|
+
};
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
/**
|
|
264
|
+
* Send message to backend API
|
|
265
|
+
* @param {string} model - Target model name
|
|
266
|
+
* @param {string|Array} messages - Message content or conversation history
|
|
267
|
+
* @param {Object} options - Additional options (agentId, systemPrompt, etc.)
|
|
268
|
+
* @returns {Promise<Object>} API response with content and metadata
|
|
269
|
+
*/
|
|
270
|
+
async sendMessage(model, messages, options = {}) {
|
|
271
|
+
const requestId = `req-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
|
272
|
+
|
|
273
|
+
// Route Ollama models directly to local inference (bypass Azure backend)
|
|
274
|
+
if (this._isOllamaModel(model)) {
|
|
275
|
+
return this._sendOllamaMessage(model, messages, options, requestId);
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
try {
|
|
279
|
+
// Check circuit breaker (compaction calls pass skipCircuitBreaker: true to avoid tripping it)
|
|
280
|
+
if (this._isCircuitBreakerOpen() && !options.skipCircuitBreaker) {
|
|
281
|
+
throw new Error('Service temporarily unavailable - circuit breaker is open');
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
// Skip local model validation - let Azure backend handle it
|
|
285
|
+
|
|
286
|
+
// Check rate limits
|
|
287
|
+
await this._checkRateLimit(model);
|
|
288
|
+
|
|
289
|
+
// Format messages for specific model
|
|
290
|
+
const formattedMessages = this._formatMessagesForModel(messages, model, options);
|
|
291
|
+
|
|
292
|
+
// Prepare request payload
|
|
293
|
+
const payload = {
|
|
294
|
+
model, // Use original model name - let Azure backend handle it
|
|
295
|
+
messages: formattedMessages,
|
|
296
|
+
options: {
|
|
297
|
+
// Respect options.maxTokens if provided (e.g., from compaction service)
|
|
298
|
+
max_tokens: Math.min(options.maxTokens || this.modelSpecs[model]?.maxTokens || COMPACTION_CONFIG.MAX_OUTPUT_TOKENS, COMPACTION_CONFIG.MAX_OUTPUT_TOKENS),
|
|
299
|
+
temperature: options.temperature || 0.7,
|
|
300
|
+
stream: options.stream || false
|
|
301
|
+
},
|
|
302
|
+
metadata: {
|
|
303
|
+
requestId,
|
|
304
|
+
agentId: options.agentId,
|
|
305
|
+
timestamp: new Date().toISOString()
|
|
306
|
+
}
|
|
307
|
+
};
|
|
308
|
+
|
|
309
|
+
// Add system prompt if provided
|
|
310
|
+
if (options.systemPrompt) {
|
|
311
|
+
payload.system = options.systemPrompt;
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
this.logger.info(`Sending message to model: ${model}`, {
|
|
315
|
+
requestId,
|
|
316
|
+
agentId: options.agentId,
|
|
317
|
+
messageCount: Array.isArray(messages) ? messages.length : 1,
|
|
318
|
+
maxTokens: payload.max_tokens
|
|
319
|
+
});
|
|
320
|
+
|
|
321
|
+
// Make API request
|
|
322
|
+
const response = await this._makeAPIRequest('/chat/completions', payload, requestId, {
|
|
323
|
+
...options,
|
|
324
|
+
sessionId: options.sessionId, // Pass session ID for API key retrieval
|
|
325
|
+
platformProvided: options.platformProvided !== false
|
|
326
|
+
});
|
|
327
|
+
|
|
328
|
+
// Track usage
|
|
329
|
+
if (response.usage) {
|
|
330
|
+
await this.trackUsage(options.agentId, model, {
|
|
331
|
+
prompt_tokens: response.usage.prompt_tokens || 0,
|
|
332
|
+
completion_tokens: response.usage.completion_tokens || 0,
|
|
333
|
+
total_tokens: response.usage.total_tokens || 0
|
|
334
|
+
});
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
// Reset circuit breaker on success
|
|
338
|
+
this._resetCircuitBreaker();
|
|
339
|
+
|
|
340
|
+
// Track model reliability - mark as verified
|
|
341
|
+
this.recordModelSuccess(model);
|
|
342
|
+
|
|
343
|
+
return {
|
|
344
|
+
content: response.choices[0]?.message?.content || '',
|
|
345
|
+
model: response.model,
|
|
346
|
+
tokenUsage: response.usage,
|
|
347
|
+
requestId,
|
|
348
|
+
finishReason: response.choices[0]?.finish_reason
|
|
349
|
+
};
|
|
350
|
+
|
|
351
|
+
} catch (error) {
|
|
352
|
+
// Handle circuit breaker (skip for compaction calls to avoid contaminating main circuit)
|
|
353
|
+
if (!options.skipCircuitBreaker) {
|
|
354
|
+
this._recordFailure();
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
// Track model failure
|
|
358
|
+
this.recordModelFailure(model, error.code || error.status || 'unknown');
|
|
359
|
+
|
|
360
|
+
this.logger.error(`AI service request failed: ${error.message}`, {
|
|
361
|
+
requestId,
|
|
362
|
+
model,
|
|
363
|
+
agentId: options.agentId,
|
|
364
|
+
error: error.stack
|
|
365
|
+
});
|
|
366
|
+
|
|
367
|
+
// Handle specific error types
|
|
368
|
+
await this.handleHttpError(error, { requestId, model, agentId: options.agentId });
|
|
369
|
+
|
|
370
|
+
throw error;
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
/**
|
|
375
|
+
* Send message to backend API with streaming response
|
|
376
|
+
* @param {string} model - Target model name
|
|
377
|
+
* @param {string|Array} messages - Message content or conversation history
|
|
378
|
+
* @param {Object} options - Additional options (agentId, systemPrompt, etc.)
|
|
379
|
+
* @param {Function} options.onChunk - Callback for each text chunk
|
|
380
|
+
* @param {Function} options.onDone - Callback when stream completes
|
|
381
|
+
* @param {Function} options.onError - Callback for errors
|
|
382
|
+
* @returns {Promise<Object>} Final response with content and metadata
|
|
383
|
+
*/
|
|
384
|
+
async sendMessageStream(model, messages, options = {}) {
|
|
385
|
+
const requestId = `req-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
|
386
|
+
const { onChunk, onDone, onError } = options;
|
|
387
|
+
|
|
388
|
+
// Route Ollama models directly to local streaming inference
|
|
389
|
+
if (this._isOllamaModel(model)) {
|
|
390
|
+
return this._sendOllamaStream(model, messages, options, requestId);
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
try {
|
|
394
|
+
// Check circuit breaker
|
|
395
|
+
if (this._isCircuitBreakerOpen()) {
|
|
396
|
+
throw new Error('Service temporarily unavailable - circuit breaker is open');
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
// Check rate limits
|
|
400
|
+
await this._checkRateLimit(model);
|
|
401
|
+
|
|
402
|
+
// Format messages for specific model
|
|
403
|
+
const formattedMessages = this._formatMessagesForModel(messages, model, options);
|
|
404
|
+
|
|
405
|
+
// Prepare request payload
|
|
406
|
+
const payload = {
|
|
407
|
+
model,
|
|
408
|
+
messages: formattedMessages,
|
|
409
|
+
options: {
|
|
410
|
+
max_tokens: Math.min(options.maxTokens || this.modelSpecs[model]?.maxTokens || COMPACTION_CONFIG.MAX_OUTPUT_TOKENS, COMPACTION_CONFIG.MAX_OUTPUT_TOKENS),
|
|
411
|
+
temperature: options.temperature || 0.7,
|
|
412
|
+
stream: true // Force streaming
|
|
413
|
+
},
|
|
414
|
+
metadata: {
|
|
415
|
+
requestId,
|
|
416
|
+
agentId: options.agentId,
|
|
417
|
+
timestamp: new Date().toISOString()
|
|
418
|
+
}
|
|
419
|
+
};
|
|
420
|
+
|
|
421
|
+
// Add system prompt if provided
|
|
422
|
+
if (options.systemPrompt) {
|
|
423
|
+
payload.system = options.systemPrompt;
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
this.logger.info(`Sending streaming message to model: ${model}`, {
|
|
427
|
+
requestId,
|
|
428
|
+
agentId: options.agentId,
|
|
429
|
+
messageCount: Array.isArray(messages) ? messages.length : 1
|
|
430
|
+
});
|
|
431
|
+
|
|
432
|
+
// Make streaming API request
|
|
433
|
+
const response = await this._makeStreamingAPIRequest(payload, requestId, {
|
|
434
|
+
...options,
|
|
435
|
+
onChunk,
|
|
436
|
+
onDone,
|
|
437
|
+
onError
|
|
438
|
+
});
|
|
439
|
+
|
|
440
|
+
// Track usage
|
|
441
|
+
if (response.usage) {
|
|
442
|
+
await this.trackUsage(options.agentId, model, {
|
|
443
|
+
prompt_tokens: response.usage.prompt_tokens || response.usage.input_tokens || 0,
|
|
444
|
+
completion_tokens: response.usage.completion_tokens || response.usage.output_tokens || 0,
|
|
445
|
+
total_tokens: response.usage.total_tokens || 0
|
|
446
|
+
});
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
// Reset circuit breaker on success
|
|
450
|
+
this._resetCircuitBreaker();
|
|
451
|
+
|
|
452
|
+
// Track model reliability - mark as verified
|
|
453
|
+
this.recordModelSuccess(model);
|
|
454
|
+
|
|
455
|
+
return {
|
|
456
|
+
content: response.content || '',
|
|
457
|
+
model: response.model || model,
|
|
458
|
+
tokenUsage: response.usage,
|
|
459
|
+
requestId,
|
|
460
|
+
finishReason: response.finishReason || 'stop'
|
|
461
|
+
};
|
|
462
|
+
|
|
463
|
+
} catch (error) {
|
|
464
|
+
// Handle circuit breaker
|
|
465
|
+
this._recordFailure();
|
|
466
|
+
|
|
467
|
+
// Track model failure
|
|
468
|
+
this.recordModelFailure(model, error.code || error.status || 'unknown');
|
|
469
|
+
|
|
470
|
+
this.logger.error(`AI streaming service request failed: ${error.message}`, {
|
|
471
|
+
requestId,
|
|
472
|
+
model,
|
|
473
|
+
agentId: options.agentId,
|
|
474
|
+
error: error.stack
|
|
475
|
+
});
|
|
476
|
+
|
|
477
|
+
if (onError) {
|
|
478
|
+
onError(error);
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
throw error;
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
/**
|
|
486
|
+
* Make streaming API request using Server-Sent Events
|
|
487
|
+
* @private
|
|
488
|
+
*/
|
|
489
|
+
async _makeStreamingAPIRequest(payload, requestId, options = {}) {
|
|
490
|
+
const { onChunk, onDone, onError, sessionId, platformProvided } = options;
|
|
491
|
+
|
|
492
|
+
const backendUrl = `${this.baseUrl}/llm/chat`;
|
|
493
|
+
|
|
494
|
+
// Get API key
|
|
495
|
+
let apiKey = null;
|
|
496
|
+
let vendorApiKey = null;
|
|
497
|
+
|
|
498
|
+
if (this.apiKeyManager) {
|
|
499
|
+
const keys = this.apiKeyManager.getKeysForRequest(sessionId, {
|
|
500
|
+
platformProvided: platformProvided !== false,
|
|
501
|
+
vendor: this._getVendorFromModel(payload.model)
|
|
502
|
+
});
|
|
503
|
+
|
|
504
|
+
apiKey = keys.loxiaApiKey;
|
|
505
|
+
vendorApiKey = keys.vendorApiKey;
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
if (!apiKey && options.apiKey) {
|
|
509
|
+
apiKey = options.apiKey;
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
if (!apiKey && this.config.apiKey) {
|
|
513
|
+
apiKey = this.config.apiKey;
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
if (!apiKey) {
|
|
517
|
+
throw new Error('No API key configured. Please configure your Loxia API key in Settings.');
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
// Validate messages before sending
|
|
521
|
+
if (!payload.messages || payload.messages.length === 0) {
|
|
522
|
+
throw new Error('No messages to send - messages array is empty');
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
const lastMessage = payload.messages[payload.messages.length - 1];
|
|
526
|
+
const messageContent = lastMessage?.content || '';
|
|
527
|
+
|
|
528
|
+
if (!messageContent.trim()) {
|
|
529
|
+
this.logger?.warn('Empty message content detected', {
|
|
530
|
+
messagesCount: payload.messages.length,
|
|
531
|
+
lastMessageRole: lastMessage?.role,
|
|
532
|
+
agentId: options.agentId
|
|
533
|
+
});
|
|
534
|
+
throw new Error('Message content is empty - cannot send empty request');
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
// Transform payload for Azure backend
|
|
538
|
+
const azurePayload = {
|
|
539
|
+
conversationId: requestId,
|
|
540
|
+
message: messageContent,
|
|
541
|
+
messages: payload.messages,
|
|
542
|
+
model: payload.model,
|
|
543
|
+
requestId,
|
|
544
|
+
options: payload.options || {},
|
|
545
|
+
stream: true, // Enable streaming
|
|
546
|
+
platformProvided: platformProvided !== false
|
|
547
|
+
};
|
|
548
|
+
|
|
549
|
+
if (payload.system) {
|
|
550
|
+
azurePayload.systemPrompt = payload.system;
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
if (platformProvided) {
|
|
554
|
+
azurePayload.apiKey = apiKey;
|
|
555
|
+
} else {
|
|
556
|
+
if (vendorApiKey) {
|
|
557
|
+
azurePayload.vendorApiKey = vendorApiKey;
|
|
558
|
+
}
|
|
559
|
+
azurePayload.apiKey = apiKey;
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
this.logger.info('Making streaming request to Azure backend', {
|
|
563
|
+
url: backendUrl,
|
|
564
|
+
model: payload.model,
|
|
565
|
+
requestId,
|
|
566
|
+
agentId: options.agentId,
|
|
567
|
+
hasApiKey: !!apiKey
|
|
568
|
+
});
|
|
569
|
+
|
|
570
|
+
const controller = new AbortController();
|
|
571
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
572
|
+
|
|
573
|
+
// Track active request for abort support
|
|
574
|
+
const agentId = options.agentId;
|
|
575
|
+
if (agentId) {
|
|
576
|
+
this.activeRequests.set(agentId, {
|
|
577
|
+
controller,
|
|
578
|
+
requestId,
|
|
579
|
+
startTime: new Date(),
|
|
580
|
+
type: 'streaming'
|
|
581
|
+
});
|
|
582
|
+
this.logger.debug(`Registered active streaming request for agent: ${agentId}`, { requestId });
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
// Track accumulated content for partial return on abort
|
|
586
|
+
let fullContent = '';
|
|
587
|
+
|
|
588
|
+
try {
|
|
589
|
+
const response = await fetch(backendUrl, {
|
|
590
|
+
method: 'POST',
|
|
591
|
+
headers: {
|
|
592
|
+
'Content-Type': 'application/json',
|
|
593
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
594
|
+
'X-Request-ID': requestId,
|
|
595
|
+
'Accept': 'text/event-stream'
|
|
596
|
+
},
|
|
597
|
+
body: JSON.stringify(azurePayload),
|
|
598
|
+
signal: controller.signal
|
|
599
|
+
});
|
|
600
|
+
|
|
601
|
+
clearTimeout(timeoutId);
|
|
602
|
+
|
|
603
|
+
if (!response.ok) {
|
|
604
|
+
const errorText = await response.text();
|
|
605
|
+
const error = new Error(`HTTP ${response.status}: ${response.statusText}${errorText ? ` - ${errorText}` : ''}`);
|
|
606
|
+
error.status = response.status;
|
|
607
|
+
throw error;
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
// Check if we actually got a stream
|
|
611
|
+
const contentType = response.headers.get('content-type');
|
|
612
|
+
if (!contentType?.includes('text/event-stream')) {
|
|
613
|
+
// Not a stream, parse as regular JSON (fallback)
|
|
614
|
+
let data;
|
|
615
|
+
try {
|
|
616
|
+
data = await response.json();
|
|
617
|
+
} catch (jsonError) {
|
|
618
|
+
// Backend returned non-JSON or malformed JSON in non-streaming response
|
|
619
|
+
const rawText = '(body already consumed)';
|
|
620
|
+
this.logger.warn('Non-streaming response has malformed JSON, falling back to text', {
|
|
621
|
+
requestId,
|
|
622
|
+
contentType,
|
|
623
|
+
error: jsonError.message
|
|
624
|
+
});
|
|
625
|
+
// Try to return whatever we can
|
|
626
|
+
const fallbackResult = {
|
|
627
|
+
content: '',
|
|
628
|
+
usage: null,
|
|
629
|
+
model: payload.model,
|
|
630
|
+
finishReason: 'error'
|
|
631
|
+
};
|
|
632
|
+
if (onError) {
|
|
633
|
+
onError(new Error(`Backend returned malformed response: ${jsonError.message}`));
|
|
634
|
+
}
|
|
635
|
+
if (onDone) {
|
|
636
|
+
onDone(fallbackResult);
|
|
637
|
+
}
|
|
638
|
+
return fallbackResult;
|
|
639
|
+
}
|
|
640
|
+
if (onChunk && data.content) {
|
|
641
|
+
onChunk(data.content);
|
|
642
|
+
}
|
|
643
|
+
if (onDone) {
|
|
644
|
+
onDone(data);
|
|
645
|
+
}
|
|
646
|
+
return {
|
|
647
|
+
content: data.content || '',
|
|
648
|
+
usage: data.usage,
|
|
649
|
+
model: data.model || payload.model,
|
|
650
|
+
finishReason: data.finishReason || 'stop'
|
|
651
|
+
};
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
// Validate response body before attempting to read stream
|
|
655
|
+
if (!response.body) {
|
|
656
|
+
this.logger.warn('Response body is null despite OK status, falling back to text parsing', {
|
|
657
|
+
requestId,
|
|
658
|
+
status: response.status,
|
|
659
|
+
contentType
|
|
660
|
+
});
|
|
661
|
+
// Try to get content as text and return it
|
|
662
|
+
const textContent = await response.text().catch(() => '');
|
|
663
|
+
if (onChunk && textContent) {
|
|
664
|
+
onChunk(textContent);
|
|
665
|
+
}
|
|
666
|
+
const fallbackResult = {
|
|
667
|
+
content: textContent,
|
|
668
|
+
usage: null,
|
|
669
|
+
model: payload.model,
|
|
670
|
+
finishReason: 'stop'
|
|
671
|
+
};
|
|
672
|
+
if (onDone) {
|
|
673
|
+
onDone(fallbackResult);
|
|
674
|
+
}
|
|
675
|
+
return fallbackResult;
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
// Process SSE stream with line buffering
|
|
679
|
+
// SSE events can be split across read() calls, so we buffer incomplete lines
|
|
680
|
+
const reader = response.body.getReader();
|
|
681
|
+
const decoder = new TextDecoder();
|
|
682
|
+
fullContent = ''; // Reset (declared outside try for catch block access)
|
|
683
|
+
let finalData = null;
|
|
684
|
+
let lineBuffer = ''; // Buffer for incomplete lines across chunks
|
|
685
|
+
|
|
686
|
+
while (true) {
|
|
687
|
+
const { done, value } = await reader.read();
|
|
688
|
+
|
|
689
|
+
if (done) {
|
|
690
|
+
break;
|
|
691
|
+
}
|
|
692
|
+
|
|
693
|
+
const text = decoder.decode(value, { stream: true });
|
|
694
|
+
lineBuffer += text;
|
|
695
|
+
|
|
696
|
+
// Split on newlines but keep the last fragment if it doesn't end with \n
|
|
697
|
+
const parts = lineBuffer.split('\n');
|
|
698
|
+
// The last element may be an incomplete line — keep it in the buffer
|
|
699
|
+
lineBuffer = parts.pop() || '';
|
|
700
|
+
|
|
701
|
+
for (const line of parts) {
|
|
702
|
+
const trimmedLine = line.trim();
|
|
703
|
+
if (trimmedLine.startsWith('data: ')) {
|
|
704
|
+
const data = trimmedLine.slice(6).trim();
|
|
705
|
+
|
|
706
|
+
if (data === '[DONE]') {
|
|
707
|
+
continue;
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
try {
|
|
711
|
+
const parsed = JSON.parse(data);
|
|
712
|
+
|
|
713
|
+
if (parsed.type === 'chunk' && parsed.content) {
|
|
714
|
+
fullContent += parsed.content;
|
|
715
|
+
if (onChunk) {
|
|
716
|
+
onChunk(parsed.content);
|
|
717
|
+
}
|
|
718
|
+
} else if (parsed.type === 'done') {
|
|
719
|
+
finalData = {
|
|
720
|
+
content: parsed.content || fullContent,
|
|
721
|
+
usage: parsed.usage,
|
|
722
|
+
model: parsed.model || payload.model,
|
|
723
|
+
finishReason: parsed.finishReason || 'stop'
|
|
724
|
+
};
|
|
725
|
+
} else if (parsed.type === 'error') {
|
|
726
|
+
const error = new Error(parsed.error);
|
|
727
|
+
error.code = parsed.code;
|
|
728
|
+
if (onError) {
|
|
729
|
+
onError(error);
|
|
730
|
+
}
|
|
731
|
+
throw error;
|
|
732
|
+
}
|
|
733
|
+
} catch (parseError) {
|
|
734
|
+
// Rethrow intentional backend error events (have .code from parsed.type === 'error')
|
|
735
|
+
// but never rethrow JSON SyntaxErrors — those are just malformed SSE chunks
|
|
736
|
+
if (parseError.code && !(parseError instanceof SyntaxError)) {
|
|
737
|
+
throw parseError;
|
|
738
|
+
}
|
|
739
|
+
this.logger?.debug('Skipping unparseable SSE data', {
|
|
740
|
+
requestId,
|
|
741
|
+
dataPreview: data.substring(0, 120),
|
|
742
|
+
error: parseError.message
|
|
743
|
+
});
|
|
744
|
+
continue;
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
}
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
// Process any remaining buffered data (in case stream ended without trailing newline)
|
|
751
|
+
if (lineBuffer.trim().startsWith('data: ')) {
|
|
752
|
+
const data = lineBuffer.trim().slice(6).trim();
|
|
753
|
+
if (data !== '[DONE]') {
|
|
754
|
+
try {
|
|
755
|
+
const parsed = JSON.parse(data);
|
|
756
|
+
if (parsed.type === 'done') {
|
|
757
|
+
finalData = {
|
|
758
|
+
content: parsed.content || fullContent,
|
|
759
|
+
usage: parsed.usage,
|
|
760
|
+
model: parsed.model || payload.model,
|
|
761
|
+
finishReason: parsed.finishReason || 'stop'
|
|
762
|
+
};
|
|
763
|
+
}
|
|
764
|
+
} catch (e) {
|
|
765
|
+
// Ignore parse errors on final buffer
|
|
766
|
+
}
|
|
767
|
+
}
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
// Final callback with complete data
|
|
771
|
+
if (onDone) {
|
|
772
|
+
onDone(finalData || { content: fullContent });
|
|
773
|
+
}
|
|
774
|
+
|
|
775
|
+
// Clean up active request tracking on success
|
|
776
|
+
if (agentId) {
|
|
777
|
+
this.activeRequests.delete(agentId);
|
|
778
|
+
this.logger.debug(`Cleaned up active request for agent: ${agentId}`, { requestId });
|
|
779
|
+
}
|
|
780
|
+
|
|
781
|
+
return finalData || {
|
|
782
|
+
content: fullContent,
|
|
783
|
+
usage: null,
|
|
784
|
+
model: payload.model,
|
|
785
|
+
finishReason: 'stop'
|
|
786
|
+
};
|
|
787
|
+
|
|
788
|
+
} catch (error) {
|
|
789
|
+
clearTimeout(timeoutId);
|
|
790
|
+
|
|
791
|
+
// Check if this was a user-initiated abort (tracked in userAbortedRequests Set)
|
|
792
|
+
const wasUserAbort = agentId && this.userAbortedRequests.has(agentId);
|
|
793
|
+
|
|
794
|
+
// Clean up tracking
|
|
795
|
+
if (agentId) {
|
|
796
|
+
this.activeRequests.delete(agentId);
|
|
797
|
+
this.userAbortedRequests.delete(agentId); // Clean up abort flag
|
|
798
|
+
}
|
|
799
|
+
|
|
800
|
+
const isTimeout = error.name === 'AbortError' && !wasUserAbort;
|
|
801
|
+
const isGatewayTimeout = error.status === 504 || error.message?.includes('504');
|
|
802
|
+
|
|
803
|
+
// Handle user-initiated abort gracefully - no error, just return partial content
|
|
804
|
+
if (wasUserAbort) {
|
|
805
|
+
this.logger.info('Streaming request aborted by user', {
|
|
806
|
+
requestId,
|
|
807
|
+
agentId
|
|
808
|
+
});
|
|
809
|
+
|
|
810
|
+
// Return partial content that was received before abort
|
|
811
|
+
const abortResult = {
|
|
812
|
+
content: fullContent || '', // Use accumulated content
|
|
813
|
+
usage: null,
|
|
814
|
+
model: payload.model,
|
|
815
|
+
finishReason: 'user_abort',
|
|
816
|
+
aborted: true
|
|
817
|
+
};
|
|
818
|
+
|
|
819
|
+
if (onDone) {
|
|
820
|
+
onDone(abortResult);
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
return abortResult;
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
this.logger.error('Streaming request to Azure backend failed', {
|
|
827
|
+
requestId,
|
|
828
|
+
error: error.message,
|
|
829
|
+
status: error.status,
|
|
830
|
+
isTimeout,
|
|
831
|
+
isGatewayTimeout
|
|
832
|
+
});
|
|
833
|
+
|
|
834
|
+
if (isGatewayTimeout || isTimeout) {
|
|
835
|
+
const timeoutError = new Error(
|
|
836
|
+
`The AI service is taking longer than expected to respond. ` +
|
|
837
|
+
`Please try again in a moment. (Error: ${isGatewayTimeout ? '504 Gateway Timeout' : 'Request Timeout'})`
|
|
838
|
+
);
|
|
839
|
+
timeoutError.status = isGatewayTimeout ? 504 : 408;
|
|
840
|
+
timeoutError.isTimeout = true;
|
|
841
|
+
timeoutError.shouldReturnToChat = true;
|
|
842
|
+
throw timeoutError;
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
throw error;
|
|
846
|
+
}
|
|
847
|
+
}
|
|
848
|
+
|
|
849
|
+
/**
|
|
850
|
+
* Compactize conversation for specific model context window
|
|
851
|
+
* @param {Array} messages - Message history
|
|
852
|
+
* @param {string} targetModel - Target model name
|
|
853
|
+
* @returns {Promise<Array>} Compactized messages
|
|
854
|
+
*/
|
|
855
|
+
async compactizeConversation(messages, targetModel) {
|
|
856
|
+
const modelSpec = this.modelSpecs[targetModel];
|
|
857
|
+
if (!modelSpec) {
|
|
858
|
+
throw new Error(`Unknown model: ${targetModel}`);
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
const maxTokens = modelSpec.contextWindow * 0.8; // Use 80% of context window
|
|
862
|
+
let currentTokens = 0;
|
|
863
|
+
const compactizedMessages = [];
|
|
864
|
+
|
|
865
|
+
// Estimate tokens for each message
|
|
866
|
+
const messagesWithTokens = await Promise.all(
|
|
867
|
+
messages.map(async (msg) => ({
|
|
868
|
+
...msg,
|
|
869
|
+
estimatedTokens: await this._estimateTokens(msg.content, targetModel)
|
|
870
|
+
}))
|
|
871
|
+
);
|
|
872
|
+
|
|
873
|
+
// Start from the most recent messages
|
|
874
|
+
const reversedMessages = [...messagesWithTokens].reverse();
|
|
875
|
+
|
|
876
|
+
for (const message of reversedMessages) {
|
|
877
|
+
if (currentTokens + message.estimatedTokens > maxTokens) {
|
|
878
|
+
// If we've exceeded the limit, summarize older messages
|
|
879
|
+
if (compactizedMessages.length === 0) {
|
|
880
|
+
// If even the latest message is too long, truncate it
|
|
881
|
+
const truncatedContent = await this._truncateMessage(message.content, maxTokens);
|
|
882
|
+
compactizedMessages.unshift({
|
|
883
|
+
...message,
|
|
884
|
+
content: truncatedContent,
|
|
885
|
+
estimatedTokens: maxTokens
|
|
886
|
+
});
|
|
887
|
+
}
|
|
888
|
+
break;
|
|
889
|
+
}
|
|
890
|
+
|
|
891
|
+
compactizedMessages.unshift(message);
|
|
892
|
+
currentTokens += message.estimatedTokens;
|
|
893
|
+
}
|
|
894
|
+
|
|
895
|
+
// If we have remaining older messages, create a summary
|
|
896
|
+
const remainingMessages = messagesWithTokens.slice(0, -compactizedMessages.length);
|
|
897
|
+
if (remainingMessages.length > 0) {
|
|
898
|
+
const summary = await this._summarizeMessages(remainingMessages, targetModel);
|
|
899
|
+
compactizedMessages.unshift({
|
|
900
|
+
role: 'system',
|
|
901
|
+
content: `Previous conversation summary: ${summary}`,
|
|
902
|
+
timestamp: remainingMessages[0].timestamp,
|
|
903
|
+
type: 'summary',
|
|
904
|
+
estimatedTokens: await this._estimateTokens(summary, targetModel)
|
|
905
|
+
});
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
this.logger.info(`Conversation compactized for model: ${targetModel}`, {
|
|
909
|
+
originalMessages: messages.length,
|
|
910
|
+
compactizedMessages: compactizedMessages.length,
|
|
911
|
+
estimatedTokens: currentTokens,
|
|
912
|
+
maxTokens
|
|
913
|
+
});
|
|
914
|
+
|
|
915
|
+
return compactizedMessages;
|
|
916
|
+
}
|
|
917
|
+
|
|
918
|
+
/**
|
|
919
|
+
* Track token usage and costs
|
|
920
|
+
* @param {number} tokens - Number of tokens used
|
|
921
|
+
* @param {number} cost - Cost in dollars
|
|
922
|
+
* @returns {Promise<void>}
|
|
923
|
+
*/
|
|
924
|
+
async trackUsage(agentId, model, tokenUsage, cost) {
|
|
925
|
+
try {
|
|
926
|
+
if (this.budgetService) {
|
|
927
|
+
this.budgetService.trackUsage(agentId, model, tokenUsage);
|
|
928
|
+
}
|
|
929
|
+
} catch (error) {
|
|
930
|
+
this.logger.error(`Usage tracking failed: ${error.message}`);
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
|
|
934
|
+
/**
|
|
935
|
+
* Handle HTTP errors with comprehensive error handling
|
|
936
|
+
* @param {Error} error - Error object
|
|
937
|
+
* @param {Object} context - Request context
|
|
938
|
+
* @returns {Promise<void>}
|
|
939
|
+
*/
|
|
940
|
+
async handleHttpError(error, context) {
|
|
941
|
+
// If error already has timeout flags, re-throw it directly to preserve them
|
|
942
|
+
if (error.isTimeout || error.shouldReturnToChat) {
|
|
943
|
+
throw error;
|
|
944
|
+
}
|
|
945
|
+
|
|
946
|
+
const errorType = this.errorHandler?.classifyError?.(error, context);
|
|
947
|
+
|
|
948
|
+
switch (error.status || error.code) {
|
|
949
|
+
case HTTP_STATUS.BAD_REQUEST:
|
|
950
|
+
this.logger.error('Bad request to AI service', { context, error: error.message });
|
|
951
|
+
throw new Error(`Invalid request: ${error.message}`);
|
|
952
|
+
|
|
953
|
+
case HTTP_STATUS.UNAUTHORIZED:
|
|
954
|
+
this.logger.error('Authentication failed with AI service', { context });
|
|
955
|
+
throw new Error('Authentication failed - check API credentials');
|
|
956
|
+
|
|
957
|
+
case HTTP_STATUS.FORBIDDEN:
|
|
958
|
+
this.logger.error('Access forbidden to AI service', { context });
|
|
959
|
+
throw new Error('Access forbidden - insufficient permissions');
|
|
960
|
+
|
|
961
|
+
case HTTP_STATUS.NOT_FOUND:
|
|
962
|
+
this.logger.error('AI service endpoint not found', { context });
|
|
963
|
+
throw new Error('Service endpoint not found');
|
|
964
|
+
|
|
965
|
+
case HTTP_STATUS.TOO_MANY_REQUESTS:
|
|
966
|
+
this.logger.warn('Rate limit exceeded', { context });
|
|
967
|
+
await this._handleRateLimit(context);
|
|
968
|
+
throw new Error('Rate limit exceeded - request queued for retry');
|
|
969
|
+
|
|
970
|
+
case HTTP_STATUS.INTERNAL_SERVER_ERROR:
|
|
971
|
+
case HTTP_STATUS.BAD_GATEWAY:
|
|
972
|
+
case HTTP_STATUS.SERVICE_UNAVAILABLE:
|
|
973
|
+
case HTTP_STATUS.GATEWAY_TIMEOUT:
|
|
974
|
+
this.logger.error('AI service unavailable', { context, status: error.status });
|
|
975
|
+
await this._handleServiceUnavailable(context);
|
|
976
|
+
throw new Error('AI service temporarily unavailable');
|
|
977
|
+
|
|
978
|
+
default:
|
|
979
|
+
this.logger.error('Unknown AI service error', { context, error: error.message });
|
|
980
|
+
throw new Error(`AI service error: ${error.message}`);
|
|
981
|
+
}
|
|
982
|
+
}
|
|
983
|
+
|
|
984
|
+
/**
|
|
985
|
+
* Set API key manager instance
|
|
986
|
+
* @param {ApiKeyManager} apiKeyManager - API key manager instance
|
|
987
|
+
*/
|
|
988
|
+
setApiKeyManager(apiKeyManager) {
|
|
989
|
+
this.apiKeyManager = apiKeyManager;
|
|
990
|
+
|
|
991
|
+
this.logger?.info('API key manager set for AI service', {
|
|
992
|
+
hasManager: !!apiKeyManager
|
|
993
|
+
});
|
|
994
|
+
}
|
|
995
|
+
|
|
996
|
+
/**
|
|
997
|
+
* Set agent pool reference
|
|
998
|
+
* @param {Object} agentPool - Agent pool instance
|
|
999
|
+
*/
|
|
1000
|
+
setAgentPool(agentPool) {
|
|
1001
|
+
this.agentPool = agentPool;
|
|
1002
|
+
|
|
1003
|
+
this.logger?.info('Agent pool set for AI service', {
|
|
1004
|
+
hasAgentPool: !!agentPool
|
|
1005
|
+
});
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
/**
|
|
1009
|
+
* Abort an active request for a specific agent
|
|
1010
|
+
* Used when user clicks "Stop" to immediately cancel streaming
|
|
1011
|
+
* @param {string} agentId - Agent ID whose request should be aborted
|
|
1012
|
+
* @param {string} partialContent - Optional partial content received before abort
|
|
1013
|
+
* @returns {boolean} True if request was found and aborted
|
|
1014
|
+
*/
|
|
1015
|
+
abortRequest(agentId, partialContent = '') {
|
|
1016
|
+
const activeRequest = this.activeRequests.get(agentId);
|
|
1017
|
+
|
|
1018
|
+
if (!activeRequest) {
|
|
1019
|
+
this.logger?.debug(`No active request found for agent: ${agentId}`);
|
|
1020
|
+
return false;
|
|
1021
|
+
}
|
|
1022
|
+
|
|
1023
|
+
const { controller, requestId, startTime, type } = activeRequest;
|
|
1024
|
+
const duration = Date.now() - startTime.getTime();
|
|
1025
|
+
|
|
1026
|
+
this.logger?.info(`Aborting ${type} request for agent: ${agentId}`, {
|
|
1027
|
+
requestId,
|
|
1028
|
+
durationMs: duration
|
|
1029
|
+
});
|
|
1030
|
+
|
|
1031
|
+
// Mark this as a user-initiated abort BEFORE triggering abort
|
|
1032
|
+
// This allows catch blocks to distinguish user abort from timeout
|
|
1033
|
+
this.userAbortedRequests.add(agentId);
|
|
1034
|
+
|
|
1035
|
+
// Trigger the abort
|
|
1036
|
+
controller.abort();
|
|
1037
|
+
|
|
1038
|
+
// Clean up active request tracking (but keep userAbortedRequests for catch block)
|
|
1039
|
+
this.activeRequests.delete(agentId);
|
|
1040
|
+
|
|
1041
|
+
this.logger?.info(`Successfully aborted request for agent: ${agentId}`, {
|
|
1042
|
+
requestId,
|
|
1043
|
+
durationMs: duration
|
|
1044
|
+
});
|
|
1045
|
+
|
|
1046
|
+
return true;
|
|
1047
|
+
}
|
|
1048
|
+
|
|
1049
|
+
/**
|
|
1050
|
+
* Check if an agent has an active request
|
|
1051
|
+
* @param {string} agentId - Agent ID to check
|
|
1052
|
+
* @returns {Object|null} Active request info or null
|
|
1053
|
+
*/
|
|
1054
|
+
getActiveRequest(agentId) {
|
|
1055
|
+
const request = this.activeRequests.get(agentId);
|
|
1056
|
+
if (!request) return null;
|
|
1057
|
+
|
|
1058
|
+
return {
|
|
1059
|
+
requestId: request.requestId,
|
|
1060
|
+
type: request.type,
|
|
1061
|
+
startTime: request.startTime,
|
|
1062
|
+
durationMs: Date.now() - request.startTime.getTime()
|
|
1063
|
+
};
|
|
1064
|
+
}
|
|
1065
|
+
|
|
1066
|
+
/**
|
|
1067
|
+
* Get count of all active requests
|
|
1068
|
+
* @returns {number} Number of active requests
|
|
1069
|
+
*/
|
|
1070
|
+
getActiveRequestCount() {
|
|
1071
|
+
return this.activeRequests.size;
|
|
1072
|
+
}
|
|
1073
|
+
|
|
1074
|
+
/**
|
|
1075
|
+
* Generate image from text prompt using AI models
|
|
1076
|
+
* @param {string} prompt - Text description of the image to generate
|
|
1077
|
+
* @param {Object} options - Generation options
|
|
1078
|
+
* @param {string} options.model - Model to use (resolved from modelsService if omitted)
|
|
1079
|
+
* @param {string} options.size - Image size (e.g., '1024x1024', '512x512')
|
|
1080
|
+
* @param {string} options.quality - Image quality ('standard' or 'hd')
|
|
1081
|
+
* @param {string} options.responseFormat - Response format ('url' or 'b64_json')
|
|
1082
|
+
* @param {string} options.sessionId - Session ID for API key retrieval
|
|
1083
|
+
* @param {boolean} options.platformProvided - Whether to use platform model
|
|
1084
|
+
* @returns {Promise<Object>} Generated image result with URL or base64 data
|
|
1085
|
+
*/
|
|
1086
|
+
async generateImage(prompt, options = {}) {
|
|
1087
|
+
const requestId = `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
|
1088
|
+
|
|
1089
|
+
try {
|
|
1090
|
+
// Check circuit breaker
|
|
1091
|
+
if (this._isCircuitBreakerOpen()) {
|
|
1092
|
+
throw new Error('Service temporarily unavailable - circuit breaker is open');
|
|
1093
|
+
}
|
|
1094
|
+
|
|
1095
|
+
// Validate prompt
|
|
1096
|
+
if (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {
|
|
1097
|
+
throw new Error('Image generation requires a non-empty text prompt');
|
|
1098
|
+
}
|
|
1099
|
+
|
|
1100
|
+
// Resolve model: caller-specified → modelsService lookup → error
|
|
1101
|
+
const model = options.model || this._resolveModelByApiType('image_generation');
|
|
1102
|
+
if (!model) {
|
|
1103
|
+
throw new Error('No image generation model available. Ensure your API has image generation models configured.');
|
|
1104
|
+
}
|
|
1105
|
+
const size = options.size || '1024x1024';
|
|
1106
|
+
const quality = options.quality || 'standard';
|
|
1107
|
+
const responseFormat = options.responseFormat || 'url';
|
|
1108
|
+
|
|
1109
|
+
this.logger.info(`Generating image with model: ${model}`, {
|
|
1110
|
+
requestId,
|
|
1111
|
+
model,
|
|
1112
|
+
size,
|
|
1113
|
+
quality,
|
|
1114
|
+
promptLength: prompt.length
|
|
1115
|
+
});
|
|
1116
|
+
|
|
1117
|
+
// Prepare request payload
|
|
1118
|
+
const payload = {
|
|
1119
|
+
prompt,
|
|
1120
|
+
model,
|
|
1121
|
+
size,
|
|
1122
|
+
quality,
|
|
1123
|
+
response_format: responseFormat,
|
|
1124
|
+
n: 1, // Generate 1 image
|
|
1125
|
+
metadata: {
|
|
1126
|
+
requestId,
|
|
1127
|
+
timestamp: new Date().toISOString()
|
|
1128
|
+
}
|
|
1129
|
+
};
|
|
1130
|
+
|
|
1131
|
+
// Make API request to image generation endpoint
|
|
1132
|
+
const response = await this._makeImageAPIRequest(payload, requestId, {
|
|
1133
|
+
sessionId: options.sessionId,
|
|
1134
|
+
platformProvided: options.platformProvided !== false
|
|
1135
|
+
});
|
|
1136
|
+
|
|
1137
|
+
// Reset circuit breaker on success
|
|
1138
|
+
this._resetCircuitBreaker();
|
|
1139
|
+
|
|
1140
|
+
return {
|
|
1141
|
+
url: response.data?.[0]?.url || null,
|
|
1142
|
+
b64_json: response.data?.[0]?.b64_json || null,
|
|
1143
|
+
model: response.model || model,
|
|
1144
|
+
requestId,
|
|
1145
|
+
revisedPrompt: response.data?.[0]?.revised_prompt || prompt
|
|
1146
|
+
};
|
|
1147
|
+
|
|
1148
|
+
} catch (error) {
|
|
1149
|
+
// Handle circuit breaker
|
|
1150
|
+
this._recordFailure();
|
|
1151
|
+
|
|
1152
|
+
this.logger.error(`Image generation failed: ${error.message}`, {
|
|
1153
|
+
requestId,
|
|
1154
|
+
model: options.model,
|
|
1155
|
+
error: error.stack
|
|
1156
|
+
});
|
|
1157
|
+
|
|
1158
|
+
throw error;
|
|
1159
|
+
}
|
|
1160
|
+
}
|
|
1161
|
+
|
|
1162
|
+
/**
|
|
1163
|
+
* Edit an existing image using GPT-Image models (image-to-image)
|
|
1164
|
+
* @param {string} prompt - Editing instruction
|
|
1165
|
+
* @param {string} imageBase64 - Base64-encoded source image
|
|
1166
|
+
* @param {Object} options - Options (model, maskBase64, sessionId)
|
|
1167
|
+
* @returns {Promise<Object>} Edited image result { b64_json, model, requestId }
|
|
1168
|
+
*/
|
|
1169
|
+
async editImage(prompt, imageBase64, options = {}) {
|
|
1170
|
+
const requestId = `img-edit-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
|
1171
|
+
|
|
1172
|
+
try {
|
|
1173
|
+
if (this._isCircuitBreakerOpen()) {
|
|
1174
|
+
throw new Error('Service temporarily unavailable - circuit breaker is open');
|
|
1175
|
+
}
|
|
1176
|
+
|
|
1177
|
+
if (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {
|
|
1178
|
+
throw new Error('Image editing requires a non-empty text prompt');
|
|
1179
|
+
}
|
|
1180
|
+
|
|
1181
|
+
if (!imageBase64) {
|
|
1182
|
+
throw new Error('Image editing requires a source image');
|
|
1183
|
+
}
|
|
1184
|
+
|
|
1185
|
+
const model = options.model || 'gpt-image-1.5';
|
|
1186
|
+
|
|
1187
|
+
this.logger.info(`Editing image with model: ${model}`, {
|
|
1188
|
+
requestId,
|
|
1189
|
+
model,
|
|
1190
|
+
hasMask: !!options.maskBase64,
|
|
1191
|
+
promptLength: prompt.length
|
|
1192
|
+
});
|
|
1193
|
+
|
|
1194
|
+
const payload = {
|
|
1195
|
+
prompt,
|
|
1196
|
+
model,
|
|
1197
|
+
imageBase64,
|
|
1198
|
+
...(options.maskBase64 && { maskBase64: options.maskBase64 }),
|
|
1199
|
+
requestId
|
|
1200
|
+
};
|
|
1201
|
+
|
|
1202
|
+
const editUrl = `${this.baseUrl}/llm/edit-image`;
|
|
1203
|
+
|
|
1204
|
+
// Get API key
|
|
1205
|
+
let apiKey = null;
|
|
1206
|
+
if (this.apiKeyManager) {
|
|
1207
|
+
const keys = this.apiKeyManager.getKeysForRequest(options.sessionId, {
|
|
1208
|
+
platformProvided: options.platformProvided !== false,
|
|
1209
|
+
vendor: this._getVendorFromModel(model)
|
|
1210
|
+
});
|
|
1211
|
+
apiKey = keys.loxiaApiKey;
|
|
1212
|
+
}
|
|
1213
|
+
if (!apiKey && options.apiKey) apiKey = options.apiKey;
|
|
1214
|
+
if (!apiKey && this.config.apiKey) apiKey = this.config.apiKey;
|
|
1215
|
+
if (!apiKey && process.env.LOXIA_API_KEY) apiKey = process.env.LOXIA_API_KEY;
|
|
1216
|
+
if (!apiKey) {
|
|
1217
|
+
throw new Error('No API key configured. Please configure your Loxia API key in Settings.');
|
|
1218
|
+
}
|
|
1219
|
+
|
|
1220
|
+
const imageTimeout = this.timeout * 3; // Editing may take longer
|
|
1221
|
+
const controller = new AbortController();
|
|
1222
|
+
const timeoutId = setTimeout(() => controller.abort(), imageTimeout);
|
|
1223
|
+
|
|
1224
|
+
try {
|
|
1225
|
+
const response = await fetch(editUrl, {
|
|
1226
|
+
method: 'POST',
|
|
1227
|
+
headers: {
|
|
1228
|
+
'Content-Type': 'application/json',
|
|
1229
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
1230
|
+
'X-Request-ID': requestId
|
|
1231
|
+
},
|
|
1232
|
+
body: JSON.stringify(payload),
|
|
1233
|
+
signal: controller.signal
|
|
1234
|
+
});
|
|
1235
|
+
|
|
1236
|
+
clearTimeout(timeoutId);
|
|
1237
|
+
|
|
1238
|
+
if (!response.ok) {
|
|
1239
|
+
const errorText = await response.text();
|
|
1240
|
+
const error = new Error(`HTTP ${response.status}: ${response.statusText}${errorText ? ` - ${errorText}` : ''}`);
|
|
1241
|
+
error.status = response.status;
|
|
1242
|
+
throw error;
|
|
1243
|
+
}
|
|
1244
|
+
|
|
1245
|
+
const data = await response.json();
|
|
1246
|
+
|
|
1247
|
+
this._resetCircuitBreaker();
|
|
1248
|
+
|
|
1249
|
+
return {
|
|
1250
|
+
url: null,
|
|
1251
|
+
b64_json: data.images?.[0]?.b64_json || null,
|
|
1252
|
+
model: data.model || model,
|
|
1253
|
+
requestId,
|
|
1254
|
+
revisedPrompt: prompt
|
|
1255
|
+
};
|
|
1256
|
+
|
|
1257
|
+
} finally {
|
|
1258
|
+
clearTimeout(timeoutId);
|
|
1259
|
+
}
|
|
1260
|
+
|
|
1261
|
+
} catch (error) {
|
|
1262
|
+
this._recordFailure();
|
|
1263
|
+
|
|
1264
|
+
this.logger.error(`Image editing failed: ${error.message}`, {
|
|
1265
|
+
requestId,
|
|
1266
|
+
model: options.model,
|
|
1267
|
+
error: error.stack
|
|
1268
|
+
});
|
|
1269
|
+
|
|
1270
|
+
if (error.name === 'AbortError' || error.message?.includes('timeout')) {
|
|
1271
|
+
const timeoutError = new Error(
|
|
1272
|
+
`Image editing is taking longer than expected. Please try again in a moment.`
|
|
1273
|
+
);
|
|
1274
|
+
timeoutError.status = 408;
|
|
1275
|
+
timeoutError.isTimeout = true;
|
|
1276
|
+
timeoutError.shouldReturnToChat = true;
|
|
1277
|
+
throw timeoutError;
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
throw error;
|
|
1281
|
+
}
|
|
1282
|
+
}
|
|
1283
|
+
|
|
1284
|
+
/**
|
|
1285
|
+
* Generate a video using Sora
|
|
1286
|
+
* This submits a video generation job and returns a job ID for polling
|
|
1287
|
+
* @param {string} prompt - Text description of the video to generate
|
|
1288
|
+
* @param {Object} options - Options
|
|
1289
|
+
* @param {number} options.width - Video width (default: 1080)
|
|
1290
|
+
* @param {number} options.height - Video height (default: 1080)
|
|
1291
|
+
* @param {number} options.duration - Duration in seconds 1-20 (default: 5)
|
|
1292
|
+
* @param {number} options.variants - Number of variants 1-4 (default: 1)
|
|
1293
|
+
* @param {string} options.model - Model to use (resolved from modelsService if omitted)
|
|
1294
|
+
* @param {string} options.sessionId - Session ID for API key retrieval
|
|
1295
|
+
* @returns {Promise<Object>} Job info with jobId for status polling
|
|
1296
|
+
*/
|
|
1297
|
+
async generateVideo(prompt, options = {}) {
|
|
1298
|
+
const requestId = `vid-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
|
1299
|
+
|
|
1300
|
+
try {
|
|
1301
|
+
// Check circuit breaker
|
|
1302
|
+
if (this._isCircuitBreakerOpen()) {
|
|
1303
|
+
throw new Error('Service temporarily unavailable - circuit breaker is open');
|
|
1304
|
+
}
|
|
1305
|
+
|
|
1306
|
+
// Validate prompt
|
|
1307
|
+
if (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {
|
|
1308
|
+
throw new Error('Video generation requires a non-empty text prompt');
|
|
1309
|
+
}
|
|
1310
|
+
|
|
1311
|
+
// Resolve model: caller-specified → modelsService lookup → error
|
|
1312
|
+
const model = options.model || this._resolveModelByApiType('video_generation');
|
|
1313
|
+
if (!model) {
|
|
1314
|
+
throw new Error('No video generation model available. Ensure your API has video generation models configured.');
|
|
1315
|
+
}
|
|
1316
|
+
const width = options.width || 1080;
|
|
1317
|
+
const height = options.height || 1080;
|
|
1318
|
+
const duration = options.duration || 5;
|
|
1319
|
+
const variants = options.variants || 1;
|
|
1320
|
+
|
|
1321
|
+
this.logger.info(`Submitting video generation job with model: ${model}`, {
|
|
1322
|
+
requestId,
|
|
1323
|
+
model,
|
|
1324
|
+
width,
|
|
1325
|
+
height,
|
|
1326
|
+
duration,
|
|
1327
|
+
variants,
|
|
1328
|
+
promptLength: prompt.length
|
|
1329
|
+
});
|
|
1330
|
+
|
|
1331
|
+
// Prepare request payload
|
|
1332
|
+
const payload = {
|
|
1333
|
+
prompt,
|
|
1334
|
+
model,
|
|
1335
|
+
width,
|
|
1336
|
+
height,
|
|
1337
|
+
duration,
|
|
1338
|
+
variants,
|
|
1339
|
+
requestId
|
|
1340
|
+
};
|
|
1341
|
+
|
|
1342
|
+
// Make API request to video generation endpoint
|
|
1343
|
+
const response = await this._makeVideoAPIRequest(payload, requestId, {
|
|
1344
|
+
sessionId: options.sessionId,
|
|
1345
|
+
platformProvided: options.platformProvided !== false
|
|
1346
|
+
});
|
|
1347
|
+
|
|
1348
|
+
// Reset circuit breaker on success
|
|
1349
|
+
this._resetCircuitBreaker();
|
|
1350
|
+
|
|
1351
|
+
return {
|
|
1352
|
+
jobId: response.jobId,
|
|
1353
|
+
status: response.status,
|
|
1354
|
+
prompt: prompt,
|
|
1355
|
+
model: response.model || model,
|
|
1356
|
+
width: response.width,
|
|
1357
|
+
height: response.height,
|
|
1358
|
+
duration: response.duration,
|
|
1359
|
+
variants: response.variants,
|
|
1360
|
+
requestId,
|
|
1361
|
+
createdAt: response.createdAt
|
|
1362
|
+
};
|
|
1363
|
+
|
|
1364
|
+
} catch (error) {
|
|
1365
|
+
// Handle circuit breaker
|
|
1366
|
+
this._recordFailure();
|
|
1367
|
+
|
|
1368
|
+
this.logger.error(`Video generation failed: ${error.message}`, {
|
|
1369
|
+
requestId,
|
|
1370
|
+
model: options.model,
|
|
1371
|
+
error: error.stack
|
|
1372
|
+
});
|
|
1373
|
+
|
|
1374
|
+
throw error;
|
|
1375
|
+
}
|
|
1376
|
+
}
|
|
1377
|
+
|
|
1378
|
+
/**
|
|
1379
|
+
* Get video generation job status
|
|
1380
|
+
* @param {string} jobId - The Sora job ID to check
|
|
1381
|
+
* @param {Object} options - Options
|
|
1382
|
+
* @param {string} options.model - Model used (for routing)
|
|
1383
|
+
* @param {string} options.sessionId - Session ID for API key retrieval
|
|
1384
|
+
* @returns {Promise<Object>} Job status with videoUrl if completed
|
|
1385
|
+
*/
|
|
1386
|
+
async getVideoJobStatus(jobId, options = {}) {
|
|
1387
|
+
try {
|
|
1388
|
+
const model = options.model || this._resolveModelByApiType('video_generation');
|
|
1389
|
+
|
|
1390
|
+
this.logger.debug(`Checking video job status: ${jobId}`, { model });
|
|
1391
|
+
|
|
1392
|
+
// Make API request to get job status
|
|
1393
|
+
const response = await this._makeVideoStatusRequest(jobId, {
|
|
1394
|
+
model,
|
|
1395
|
+
sessionId: options.sessionId
|
|
1396
|
+
});
|
|
1397
|
+
|
|
1398
|
+
return {
|
|
1399
|
+
jobId: response.jobId,
|
|
1400
|
+
status: response.status,
|
|
1401
|
+
videoUrl: response.videoUrl,
|
|
1402
|
+
generations: response.generations,
|
|
1403
|
+
error: response.error,
|
|
1404
|
+
createdAt: response.createdAt,
|
|
1405
|
+
completedAt: response.completedAt
|
|
1406
|
+
};
|
|
1407
|
+
|
|
1408
|
+
} catch (error) {
|
|
1409
|
+
this.logger.error(`Video job status check failed: ${error.message}`, {
|
|
1410
|
+
jobId,
|
|
1411
|
+
error: error.stack
|
|
1412
|
+
});
|
|
1413
|
+
|
|
1414
|
+
throw error;
|
|
1415
|
+
}
|
|
1416
|
+
}
|
|
1417
|
+
|
|
1418
|
+
/**
|
|
1419
|
+
* Check service health for circuit breaker
|
|
1420
|
+
* @returns {Promise<boolean>} Service health status
|
|
1421
|
+
*/
|
|
1422
|
+
async checkServiceHealth() {
|
|
1423
|
+
try {
|
|
1424
|
+
const response = await this._makeAPIRequest('/health', {}, 'health-check');
|
|
1425
|
+
return response.status === 'healthy';
|
|
1426
|
+
} catch (error) {
|
|
1427
|
+
return false;
|
|
1428
|
+
}
|
|
1429
|
+
}
|
|
1430
|
+
|
|
1431
|
+
/**
|
|
1432
|
+
* Switch agent to different model
|
|
1433
|
+
* @param {string} agentId - Agent identifier
|
|
1434
|
+
* @param {string} newModel - New model name
|
|
1435
|
+
* @returns {Promise<Object>} Switch result
|
|
1436
|
+
*/
|
|
1437
|
+
async switchAgentModel(agentId, newModel) {
|
|
1438
|
+
try {
|
|
1439
|
+
if (!this._isValidModel(newModel)) {
|
|
1440
|
+
throw new Error(`Invalid model: ${newModel}`);
|
|
1441
|
+
}
|
|
1442
|
+
|
|
1443
|
+
// Get conversation manager for agent
|
|
1444
|
+
let conversationManager = this.conversationManagers.get(agentId);
|
|
1445
|
+
if (!conversationManager) {
|
|
1446
|
+
// Create new conversation manager if it doesn't exist
|
|
1447
|
+
conversationManager = new ConversationManager(agentId, this.logger);
|
|
1448
|
+
this.conversationManagers.set(agentId, conversationManager);
|
|
1449
|
+
}
|
|
1450
|
+
|
|
1451
|
+
// Switch model and return conversation
|
|
1452
|
+
const modelConversation = await conversationManager.switchModel(newModel);
|
|
1453
|
+
|
|
1454
|
+
// CRITICAL FIX: Update agent's currentModel field in AgentPool
|
|
1455
|
+
const agent = await this.agentPool?.getAgent(agentId);
|
|
1456
|
+
if (agent) {
|
|
1457
|
+
agent.currentModel = newModel;
|
|
1458
|
+
await this.agentPool.persistAgentState(agentId);
|
|
1459
|
+
}
|
|
1460
|
+
|
|
1461
|
+
this.logger.info(`Agent model switched: ${agentId}`, {
|
|
1462
|
+
newModel,
|
|
1463
|
+
messageCount: modelConversation.messages.length
|
|
1464
|
+
});
|
|
1465
|
+
|
|
1466
|
+
return {
|
|
1467
|
+
success: true,
|
|
1468
|
+
agentId,
|
|
1469
|
+
newModel,
|
|
1470
|
+
conversation: modelConversation
|
|
1471
|
+
};
|
|
1472
|
+
|
|
1473
|
+
} catch (error) {
|
|
1474
|
+
this.logger.error(`Model switch failed: ${error.message}`, { agentId, newModel });
|
|
1475
|
+
throw error;
|
|
1476
|
+
}
|
|
1477
|
+
}
|
|
1478
|
+
|
|
1479
|
+
/**
|
|
1480
|
+
* Initialize model specifications
|
|
1481
|
+
* @private
|
|
1482
|
+
*/
|
|
1483
|
+
_initializeModelSpecs() {
|
|
1484
|
+
const baseSpecs = {
|
|
1485
|
+
// Anthropic Claude models
|
|
1486
|
+
[MODELS.ANTHROPIC_SONNET]: {
|
|
1487
|
+
provider: MODEL_PROVIDERS.ANTHROPIC,
|
|
1488
|
+
contextWindow: 200000,
|
|
1489
|
+
maxTokens: 8192, // Increased from 4096
|
|
1490
|
+
costPer1kTokens: 0.015
|
|
1491
|
+
},
|
|
1492
|
+
[MODELS.ANTHROPIC_HAIKU]: {
|
|
1493
|
+
provider: MODEL_PROVIDERS.ANTHROPIC,
|
|
1494
|
+
contextWindow: 200000,
|
|
1495
|
+
maxTokens: 8192, // Increased from 4096
|
|
1496
|
+
costPer1kTokens: 0.0025
|
|
1497
|
+
},
|
|
1498
|
+
|
|
1499
|
+
// OpenAI models
|
|
1500
|
+
[MODELS.GPT_4]: {
|
|
1501
|
+
provider: MODEL_PROVIDERS.OPENAI,
|
|
1502
|
+
contextWindow: 128000,
|
|
1503
|
+
maxTokens: 8192, // Increased from 4096
|
|
1504
|
+
costPer1kTokens: 0.03
|
|
1505
|
+
},
|
|
1506
|
+
[MODELS.GPT_4_MINI]: {
|
|
1507
|
+
provider: MODEL_PROVIDERS.OPENAI,
|
|
1508
|
+
contextWindow: 128000,
|
|
1509
|
+
maxTokens: 16384,
|
|
1510
|
+
costPer1kTokens: 0.0015
|
|
1511
|
+
},
|
|
1512
|
+
'gpt-4o': {
|
|
1513
|
+
provider: MODEL_PROVIDERS.OPENAI,
|
|
1514
|
+
contextWindow: 128000,
|
|
1515
|
+
maxTokens: 8192,
|
|
1516
|
+
costPer1kTokens: 0.03
|
|
1517
|
+
},
|
|
1518
|
+
'gpt-4o-mini': {
|
|
1519
|
+
provider: MODEL_PROVIDERS.OPENAI,
|
|
1520
|
+
contextWindow: 128000,
|
|
1521
|
+
maxTokens: 16384,
|
|
1522
|
+
costPer1kTokens: 0.0015
|
|
1523
|
+
},
|
|
1524
|
+
'gpt-4-turbo': {
|
|
1525
|
+
provider: MODEL_PROVIDERS.OPENAI,
|
|
1526
|
+
contextWindow: 128000,
|
|
1527
|
+
maxTokens: 8192,
|
|
1528
|
+
costPer1kTokens: 0.03
|
|
1529
|
+
},
|
|
1530
|
+
'gpt-3.5-turbo': {
|
|
1531
|
+
provider: MODEL_PROVIDERS.OPENAI,
|
|
1532
|
+
contextWindow: 16384,
|
|
1533
|
+
maxTokens: 4096,
|
|
1534
|
+
costPer1kTokens: 0.001
|
|
1535
|
+
},
|
|
1536
|
+
[MODELS.GPT_5_1_CODEX_MINI]: {
|
|
1537
|
+
provider: MODEL_PROVIDERS.OPENAI,
|
|
1538
|
+
contextWindow: 400000,
|
|
1539
|
+
maxTokens: 8192,
|
|
1540
|
+
costPer1kTokens: 0.002
|
|
1541
|
+
},
|
|
1542
|
+
|
|
1543
|
+
// DeepSeek models
|
|
1544
|
+
[MODELS.DEEPSEEK_R1]: {
|
|
1545
|
+
provider: MODEL_PROVIDERS.DEEPSEEK,
|
|
1546
|
+
contextWindow: 128000,
|
|
1547
|
+
maxTokens: 8192,
|
|
1548
|
+
costPer1kTokens: 0.002
|
|
1549
|
+
},
|
|
1550
|
+
|
|
1551
|
+
// Phi models
|
|
1552
|
+
[MODELS.PHI_4]: {
|
|
1553
|
+
provider: MODEL_PROVIDERS.PHI,
|
|
1554
|
+
contextWindow: 16384,
|
|
1555
|
+
maxTokens: 4096, // Increased from 2048
|
|
1556
|
+
costPer1kTokens: 0.001
|
|
1557
|
+
},
|
|
1558
|
+
[MODELS.PHI_4_REASONING]: {
|
|
1559
|
+
provider: MODEL_PROVIDERS.PHI,
|
|
1560
|
+
contextWindow: 32000,
|
|
1561
|
+
maxTokens: 4096,
|
|
1562
|
+
costPer1kTokens: 0.001
|
|
1563
|
+
},
|
|
1564
|
+
|
|
1565
|
+
// Azure AI Foundry models
|
|
1566
|
+
'azure-ai-grok3': {
|
|
1567
|
+
provider: 'AZURE',
|
|
1568
|
+
contextWindow: 128000,
|
|
1569
|
+
maxTokens: 8192, // Increased from 4096
|
|
1570
|
+
costPer1kTokens: 0.01
|
|
1571
|
+
},
|
|
1572
|
+
'azure-ai-deepseek-r1': {
|
|
1573
|
+
provider: 'AZURE',
|
|
1574
|
+
contextWindow: 128000,
|
|
1575
|
+
maxTokens: 8192,
|
|
1576
|
+
costPer1kTokens: 0.002
|
|
1577
|
+
},
|
|
1578
|
+
'azure-openai-gpt-5': {
|
|
1579
|
+
provider: 'AZURE',
|
|
1580
|
+
contextWindow: 128000,
|
|
1581
|
+
maxTokens: 8192,
|
|
1582
|
+
costPer1kTokens: 0.03
|
|
1583
|
+
},
|
|
1584
|
+
'azure-openai-gpt-4': {
|
|
1585
|
+
provider: 'AZURE',
|
|
1586
|
+
contextWindow: 128000,
|
|
1587
|
+
maxTokens: 8192,
|
|
1588
|
+
costPer1kTokens: 0.03
|
|
1589
|
+
},
|
|
1590
|
+
'azure-openai-gpt-4o': {
|
|
1591
|
+
provider: 'AZURE',
|
|
1592
|
+
contextWindow: 128000,
|
|
1593
|
+
maxTokens: 8192,
|
|
1594
|
+
costPer1kTokens: 0.03
|
|
1595
|
+
},
|
|
1596
|
+
|
|
1597
|
+
// Router model
|
|
1598
|
+
'autopilot-model-router': {
|
|
1599
|
+
provider: 'AZURE',
|
|
1600
|
+
contextWindow: 16384,
|
|
1601
|
+
maxTokens: 2048,
|
|
1602
|
+
costPer1kTokens: 0.001
|
|
1603
|
+
}
|
|
1604
|
+
};
|
|
1605
|
+
|
|
1606
|
+
// No need for prefixed models anymore - just return clean base specs
|
|
1607
|
+
return baseSpecs;
|
|
1608
|
+
}
|
|
1609
|
+
|
|
1610
|
+
/**
|
|
1611
|
+
* Format messages for specific model
|
|
1612
|
+
* @private
|
|
1613
|
+
*/
|
|
1614
|
+
_formatMessagesForModel(messages, model, options) {
|
|
1615
|
+
// Get model spec or use default
|
|
1616
|
+
const modelSpec = this.modelSpecs[model] || { provider: 'AZURE' };
|
|
1617
|
+
|
|
1618
|
+
let formattedMessages;
|
|
1619
|
+
|
|
1620
|
+
if (typeof messages === 'string') {
|
|
1621
|
+
// Single message
|
|
1622
|
+
formattedMessages = [{
|
|
1623
|
+
role: 'user',
|
|
1624
|
+
content: messages
|
|
1625
|
+
}];
|
|
1626
|
+
} else {
|
|
1627
|
+
// Message array
|
|
1628
|
+
formattedMessages = messages.map(msg => this._formatSingleMessage(msg, model));
|
|
1629
|
+
}
|
|
1630
|
+
|
|
1631
|
+
// Apply provider-specific formatting
|
|
1632
|
+
switch (modelSpec.provider) {
|
|
1633
|
+
case MODEL_PROVIDERS.ANTHROPIC:
|
|
1634
|
+
return this._formatForAnthropic(formattedMessages);
|
|
1635
|
+
case MODEL_PROVIDERS.OPENAI:
|
|
1636
|
+
return this._formatForOpenAI(formattedMessages);
|
|
1637
|
+
case MODEL_PROVIDERS.AZURE:
|
|
1638
|
+
return this._formatForAzure(formattedMessages);
|
|
1639
|
+
default:
|
|
1640
|
+
return formattedMessages;
|
|
1641
|
+
}
|
|
1642
|
+
}
|
|
1643
|
+
|
|
1644
|
+
/**
|
|
1645
|
+
* Format single message for model
|
|
1646
|
+
* @private
|
|
1647
|
+
*/
|
|
1648
|
+
_formatSingleMessage(message, model) {
|
|
1649
|
+
return {
|
|
1650
|
+
role: message.role || 'user',
|
|
1651
|
+
content: message.content,
|
|
1652
|
+
timestamp: message.timestamp
|
|
1653
|
+
};
|
|
1654
|
+
}
|
|
1655
|
+
|
|
1656
|
+
/**
|
|
1657
|
+
* Format messages for Anthropic models
|
|
1658
|
+
* @private
|
|
1659
|
+
*/
|
|
1660
|
+
_formatForAnthropic(messages) {
|
|
1661
|
+
return messages.map(msg => {
|
|
1662
|
+
if (msg.role === 'system') {
|
|
1663
|
+
return {
|
|
1664
|
+
role: 'user',
|
|
1665
|
+
content: `System: ${msg.content}`
|
|
1666
|
+
};
|
|
1667
|
+
}
|
|
1668
|
+
return msg;
|
|
1669
|
+
});
|
|
1670
|
+
}
|
|
1671
|
+
|
|
1672
|
+
/**
|
|
1673
|
+
* Format messages for OpenAI models
|
|
1674
|
+
* @private
|
|
1675
|
+
*/
|
|
1676
|
+
_formatForOpenAI(messages) {
|
|
1677
|
+
// OpenAI supports system role natively
|
|
1678
|
+
return messages;
|
|
1679
|
+
}
|
|
1680
|
+
|
|
1681
|
+
/**
|
|
1682
|
+
* Format messages for Azure models
|
|
1683
|
+
* @private
|
|
1684
|
+
*/
|
|
1685
|
+
_formatForAzure(messages) {
|
|
1686
|
+
// Azure may have specific formatting requirements
|
|
1687
|
+
return messages.map(msg => ({
|
|
1688
|
+
...msg,
|
|
1689
|
+
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
|
|
1690
|
+
}));
|
|
1691
|
+
}
|
|
1692
|
+
|
|
1693
|
+
/**
|
|
1694
|
+
* Check if model is valid
|
|
1695
|
+
* @private
|
|
1696
|
+
*/
|
|
1697
|
+
_isValidModel(model) {
|
|
1698
|
+
this.logger.debug('Validating model', { model, modelType: typeof model });
|
|
1699
|
+
|
|
1700
|
+
// Check if model exists in our specs directly
|
|
1701
|
+
if (this.modelSpecs[model] !== undefined) {
|
|
1702
|
+
return true;
|
|
1703
|
+
}
|
|
1704
|
+
|
|
1705
|
+
this.logger.warn('Model validation failed', {
|
|
1706
|
+
model,
|
|
1707
|
+
availableModels: Object.keys(this.modelSpecs)
|
|
1708
|
+
});
|
|
1709
|
+
|
|
1710
|
+
return false;
|
|
1711
|
+
}
|
|
1712
|
+
|
|
1713
|
+
/**
|
|
1714
|
+
* Check model health status
|
|
1715
|
+
* @private
|
|
1716
|
+
*/
|
|
1717
|
+
async _checkModelHealth(model) {
|
|
1718
|
+
// Implementation would check model-specific health endpoints
|
|
1719
|
+
// For now, return true (assuming all models are healthy)
|
|
1720
|
+
return true;
|
|
1721
|
+
}
|
|
1722
|
+
|
|
1723
|
+
/**
|
|
1724
|
+
* Estimate tokens for content
|
|
1725
|
+
* @private
|
|
1726
|
+
*/
|
|
1727
|
+
async _estimateTokens(content, model) {
|
|
1728
|
+
// Rough estimation: 1 token ≈ 4 characters for most models
|
|
1729
|
+
return Math.ceil(content.length / 4);
|
|
1730
|
+
}
|
|
1731
|
+
|
|
1732
|
+
/**
|
|
1733
|
+
* Truncate message to fit token limit
|
|
1734
|
+
* @private
|
|
1735
|
+
*/
|
|
1736
|
+
async _truncateMessage(content, maxTokens) {
|
|
1737
|
+
const maxChars = maxTokens * 4; // Rough estimation
|
|
1738
|
+
if (content.length <= maxChars) {
|
|
1739
|
+
return content;
|
|
1740
|
+
}
|
|
1741
|
+
|
|
1742
|
+
return content.substring(0, maxChars - 20) + '\n... [message truncated]';
|
|
1743
|
+
}
|
|
1744
|
+
|
|
1745
|
+
/**
|
|
1746
|
+
* Summarize messages for compactization
|
|
1747
|
+
* @private
|
|
1748
|
+
*/
|
|
1749
|
+
async _summarizeMessages(messages, model) {
|
|
1750
|
+
const combinedContent = messages
|
|
1751
|
+
.map(msg => `${msg.role}: ${msg.content}`)
|
|
1752
|
+
.join('\n');
|
|
1753
|
+
|
|
1754
|
+
// This would use the AI service to create a summary
|
|
1755
|
+
// For now, return a simple truncated version
|
|
1756
|
+
const maxLength = 500;
|
|
1757
|
+
if (combinedContent.length <= maxLength) {
|
|
1758
|
+
return combinedContent;
|
|
1759
|
+
}
|
|
1760
|
+
|
|
1761
|
+
return combinedContent.substring(0, maxLength) + '... [conversation summary truncated]';
|
|
1762
|
+
}
|
|
1763
|
+
|
|
1764
|
+
/**
|
|
1765
|
+
* Make API request (single attempt, no retries)
|
|
1766
|
+
* @private
|
|
1767
|
+
*/
|
|
1768
|
+
async _makeAPIRequest(endpoint, payload, requestId, options = {}) {
|
|
1769
|
+
// Make request to configured backend
|
|
1770
|
+
const backendUrl = `${this.baseUrl}/llm/chat`;
|
|
1771
|
+
|
|
1772
|
+
// Get API keys from session-based storage
|
|
1773
|
+
let apiKey = null;
|
|
1774
|
+
let vendorApiKey = null;
|
|
1775
|
+
|
|
1776
|
+
// Log the state for debugging
|
|
1777
|
+
this.logger?.info('🔑 API Key retrieval state', {
|
|
1778
|
+
hasApiKeyManager: !!this.apiKeyManager,
|
|
1779
|
+
sessionId: options.sessionId,
|
|
1780
|
+
hasSessionId: !!options.sessionId,
|
|
1781
|
+
optionsKeys: Object.keys(options),
|
|
1782
|
+
model: payload.model
|
|
1783
|
+
});
|
|
1784
|
+
|
|
1785
|
+
// First try to get from API key manager using session ID
|
|
1786
|
+
if (this.apiKeyManager) {
|
|
1787
|
+
const keys = this.apiKeyManager.getKeysForRequest(options.sessionId, {
|
|
1788
|
+
platformProvided: options.platformProvided !== false,
|
|
1789
|
+
vendor: this._getVendorFromModel(payload.model)
|
|
1790
|
+
});
|
|
1791
|
+
|
|
1792
|
+
apiKey = keys.loxiaApiKey;
|
|
1793
|
+
vendorApiKey = keys.vendorApiKey;
|
|
1794
|
+
|
|
1795
|
+
this.logger?.debug('Retrieved API keys from session manager', {
|
|
1796
|
+
sessionId: options.sessionId,
|
|
1797
|
+
hasLoxiaKey: !!apiKey,
|
|
1798
|
+
hasVendorKey: !!vendorApiKey,
|
|
1799
|
+
vendor: this._getVendorFromModel(payload.model)
|
|
1800
|
+
});
|
|
1801
|
+
}
|
|
1802
|
+
|
|
1803
|
+
// Fallback to options (passed from frontend)
|
|
1804
|
+
if (!apiKey && options.apiKey) {
|
|
1805
|
+
apiKey = options.apiKey;
|
|
1806
|
+
}
|
|
1807
|
+
|
|
1808
|
+
// Fallback to config if no API key from session or options
|
|
1809
|
+
if (!apiKey && this.config.apiKey) {
|
|
1810
|
+
apiKey = this.config.apiKey;
|
|
1811
|
+
}
|
|
1812
|
+
|
|
1813
|
+
if (!apiKey) {
|
|
1814
|
+
throw new Error('No API key configured. Please configure your Loxia API key in Settings.');
|
|
1815
|
+
}
|
|
1816
|
+
|
|
1817
|
+
// Validate messages before sending
|
|
1818
|
+
if (!payload.messages || payload.messages.length === 0) {
|
|
1819
|
+
throw new Error('No messages to send - messages array is empty');
|
|
1820
|
+
}
|
|
1821
|
+
|
|
1822
|
+
const lastMessage = payload.messages[payload.messages.length - 1];
|
|
1823
|
+
const messageContent = lastMessage?.content || '';
|
|
1824
|
+
|
|
1825
|
+
if (!messageContent.trim()) {
|
|
1826
|
+
this.logger?.warn('Empty message content detected (non-streaming)', {
|
|
1827
|
+
messagesCount: payload.messages.length,
|
|
1828
|
+
lastMessageRole: lastMessage?.role,
|
|
1829
|
+
agentId: options.agentId
|
|
1830
|
+
});
|
|
1831
|
+
throw new Error('Message content is empty - cannot send empty request');
|
|
1832
|
+
}
|
|
1833
|
+
|
|
1834
|
+
// Use the model name from payload (already transformed)
|
|
1835
|
+
const modelName = payload.model;
|
|
1836
|
+
|
|
1837
|
+
// Transform the payload to match the Azure backend API format
|
|
1838
|
+
const azurePayload = {
|
|
1839
|
+
conversationId: requestId,
|
|
1840
|
+
message: messageContent,
|
|
1841
|
+
messages: payload.messages,
|
|
1842
|
+
model: modelName,
|
|
1843
|
+
requestId,
|
|
1844
|
+
options: payload.options || {},
|
|
1845
|
+
platformProvided: options.platformProvided !== false // Indicate if this is a platform model
|
|
1846
|
+
};
|
|
1847
|
+
|
|
1848
|
+
// Add system prompt if provided
|
|
1849
|
+
if (payload.system) {
|
|
1850
|
+
azurePayload.systemPrompt = payload.system;
|
|
1851
|
+
}
|
|
1852
|
+
|
|
1853
|
+
// Include appropriate API key based on model type
|
|
1854
|
+
if (options.platformProvided) {
|
|
1855
|
+
// Platform models use Loxia API key
|
|
1856
|
+
azurePayload.apiKey = apiKey;
|
|
1857
|
+
} else {
|
|
1858
|
+
// Direct access models use vendor-specific keys
|
|
1859
|
+
if (vendorApiKey) {
|
|
1860
|
+
azurePayload.vendorApiKey = vendorApiKey;
|
|
1861
|
+
}
|
|
1862
|
+
|
|
1863
|
+
// Also include custom API keys from options for backward compatibility
|
|
1864
|
+
if (options.customApiKeys) {
|
|
1865
|
+
azurePayload.customApiKeys = options.customApiKeys;
|
|
1866
|
+
}
|
|
1867
|
+
|
|
1868
|
+
// Include Loxia API key as fallback
|
|
1869
|
+
azurePayload.apiKey = apiKey;
|
|
1870
|
+
}
|
|
1871
|
+
|
|
1872
|
+
// Single attempt - no automatic retries for LLM requests
|
|
1873
|
+
// On timeout, return to chat mode and let user retry manually
|
|
1874
|
+
const agentId = options.agentId;
|
|
1875
|
+
|
|
1876
|
+
try {
|
|
1877
|
+
this.logger.info('Making request to Azure backend', {
|
|
1878
|
+
url: backendUrl,
|
|
1879
|
+
model: payload.model,
|
|
1880
|
+
requestId,
|
|
1881
|
+
agentId,
|
|
1882
|
+
hasApiKey: !!apiKey,
|
|
1883
|
+
timeout: this.timeout
|
|
1884
|
+
});
|
|
1885
|
+
|
|
1886
|
+
// Use AbortController for proper timeout handling (fetch timeout option doesn't work in Node.js)
|
|
1887
|
+
const controller = new AbortController();
|
|
1888
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
1889
|
+
|
|
1890
|
+
// Track active request for abort support
|
|
1891
|
+
if (agentId) {
|
|
1892
|
+
this.activeRequests.set(agentId, {
|
|
1893
|
+
controller,
|
|
1894
|
+
requestId,
|
|
1895
|
+
startTime: new Date(),
|
|
1896
|
+
type: 'non-streaming'
|
|
1897
|
+
});
|
|
1898
|
+
}
|
|
1899
|
+
|
|
1900
|
+
try {
|
|
1901
|
+
const response = await fetch(backendUrl, {
|
|
1902
|
+
method: 'POST',
|
|
1903
|
+
headers: {
|
|
1904
|
+
'Content-Type': 'application/json',
|
|
1905
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
1906
|
+
'X-Request-ID': requestId
|
|
1907
|
+
},
|
|
1908
|
+
body: JSON.stringify(azurePayload),
|
|
1909
|
+
signal: controller.signal
|
|
1910
|
+
});
|
|
1911
|
+
|
|
1912
|
+
clearTimeout(timeoutId);
|
|
1913
|
+
|
|
1914
|
+
if (!response.ok) {
|
|
1915
|
+
const errorText = await response.text();
|
|
1916
|
+
const error = new Error(`HTTP ${response.status}: ${response.statusText}${errorText ? ` - ${errorText}` : ''}`);
|
|
1917
|
+
error.status = response.status;
|
|
1918
|
+
throw error;
|
|
1919
|
+
}
|
|
1920
|
+
|
|
1921
|
+
const data = await response.json();
|
|
1922
|
+
|
|
1923
|
+
// Clean up active request tracking on success
|
|
1924
|
+
if (agentId) {
|
|
1925
|
+
this.activeRequests.delete(agentId);
|
|
1926
|
+
}
|
|
1927
|
+
|
|
1928
|
+
// Transform Azure backend response to match expected format
|
|
1929
|
+
return {
|
|
1930
|
+
choices: [{
|
|
1931
|
+
message: {
|
|
1932
|
+
content: data.content || '',
|
|
1933
|
+
role: 'assistant'
|
|
1934
|
+
},
|
|
1935
|
+
finish_reason: data.finishReason || 'stop'
|
|
1936
|
+
}],
|
|
1937
|
+
usage: data.usage || {
|
|
1938
|
+
prompt_tokens: 0,
|
|
1939
|
+
completion_tokens: 0,
|
|
1940
|
+
total_tokens: 0
|
|
1941
|
+
},
|
|
1942
|
+
model: data.model || payload.model,
|
|
1943
|
+
id: data.requestId || requestId
|
|
1944
|
+
};
|
|
1945
|
+
|
|
1946
|
+
} finally {
|
|
1947
|
+
clearTimeout(timeoutId);
|
|
1948
|
+
}
|
|
1949
|
+
|
|
1950
|
+
} catch (error) {
|
|
1951
|
+
// Check if this was a user-initiated abort (tracked in userAbortedRequests Set)
|
|
1952
|
+
const wasUserAbort = agentId && this.userAbortedRequests.has(agentId);
|
|
1953
|
+
|
|
1954
|
+
// Clean up tracking
|
|
1955
|
+
if (agentId) {
|
|
1956
|
+
this.activeRequests.delete(agentId);
|
|
1957
|
+
this.userAbortedRequests.delete(agentId); // Clean up abort flag
|
|
1958
|
+
}
|
|
1959
|
+
|
|
1960
|
+
const isTimeout = (error.name === 'AbortError' && !wasUserAbort) ||
|
|
1961
|
+
error.message?.includes('timeout') ||
|
|
1962
|
+
error.message?.includes('aborted');
|
|
1963
|
+
const isGatewayTimeout = error.status === 504 ||
|
|
1964
|
+
error.message?.includes('504') ||
|
|
1965
|
+
error.message?.includes('Gateway Timeout');
|
|
1966
|
+
|
|
1967
|
+
// Handle user-initiated abort gracefully - no error thrown
|
|
1968
|
+
if (wasUserAbort) {
|
|
1969
|
+
this.logger.info('Request aborted by user', { requestId, agentId });
|
|
1970
|
+
// Return a minimal valid response instead of throwing
|
|
1971
|
+
return {
|
|
1972
|
+
choices: [{
|
|
1973
|
+
message: {
|
|
1974
|
+
content: '',
|
|
1975
|
+
role: 'assistant'
|
|
1976
|
+
},
|
|
1977
|
+
finish_reason: 'user_abort'
|
|
1978
|
+
}],
|
|
1979
|
+
usage: null,
|
|
1980
|
+
model: payload.model,
|
|
1981
|
+
id: requestId,
|
|
1982
|
+
aborted: true
|
|
1983
|
+
};
|
|
1984
|
+
}
|
|
1985
|
+
|
|
1986
|
+
this.logger.error('Request to Azure backend failed', {
|
|
1987
|
+
requestId,
|
|
1988
|
+
error: error.message,
|
|
1989
|
+
status: error.status,
|
|
1990
|
+
isTimeout,
|
|
1991
|
+
isGatewayTimeout
|
|
1992
|
+
});
|
|
1993
|
+
|
|
1994
|
+
// For timeout errors, throw immediately to return to chat mode
|
|
1995
|
+
// User can manually retry when ready
|
|
1996
|
+
if (isGatewayTimeout || isTimeout) {
|
|
1997
|
+
const timeoutError = new Error(
|
|
1998
|
+
`The AI service is taking longer than expected to respond. ` +
|
|
1999
|
+
`Please try again in a moment. (Error: ${isGatewayTimeout ? '504 Gateway Timeout' : 'Request Timeout'})`
|
|
2000
|
+
);
|
|
2001
|
+
timeoutError.status = isGatewayTimeout ? 504 : 408;
|
|
2002
|
+
timeoutError.isTimeout = true;
|
|
2003
|
+
timeoutError.shouldReturnToChat = true;
|
|
2004
|
+
throw timeoutError;
|
|
2005
|
+
}
|
|
2006
|
+
|
|
2007
|
+
// For other errors, also throw immediately
|
|
2008
|
+
throw error;
|
|
2009
|
+
}
|
|
2010
|
+
}
|
|
2011
|
+
|
|
2012
|
+
|
|
2013
|
+
/**
|
|
2014
|
+
* Make API request for image generation
|
|
2015
|
+
* @private
|
|
2016
|
+
*/
|
|
2017
|
+
async _makeImageAPIRequest(payload, requestId, options = {}) {
|
|
2018
|
+
// Image generation endpoint
|
|
2019
|
+
const imageUrl = `${this.baseUrl}/llm/generate-image`;
|
|
2020
|
+
|
|
2021
|
+
// Get API keys from session-based storage
|
|
2022
|
+
let apiKey = null;
|
|
2023
|
+
let vendorApiKey = null;
|
|
2024
|
+
|
|
2025
|
+
this.logger?.info('🖼️ Image API request state', {
|
|
2026
|
+
hasApiKeyManager: !!this.apiKeyManager,
|
|
2027
|
+
sessionId: options.sessionId,
|
|
2028
|
+
hasSessionId: !!options.sessionId,
|
|
2029
|
+
model: payload.model
|
|
2030
|
+
});
|
|
2031
|
+
|
|
2032
|
+
// First try to get from API key manager using session ID
|
|
2033
|
+
if (this.apiKeyManager) {
|
|
2034
|
+
const keys = this.apiKeyManager.getKeysForRequest(options.sessionId, {
|
|
2035
|
+
platformProvided: options.platformProvided !== false,
|
|
2036
|
+
vendor: this._getVendorFromModel(payload.model)
|
|
2037
|
+
});
|
|
2038
|
+
|
|
2039
|
+
apiKey = keys.loxiaApiKey;
|
|
2040
|
+
vendorApiKey = keys.vendorApiKey;
|
|
2041
|
+
|
|
2042
|
+
this.logger?.debug('Retrieved API keys from session manager for image', {
|
|
2043
|
+
sessionId: options.sessionId,
|
|
2044
|
+
hasLoxiaKey: !!apiKey,
|
|
2045
|
+
hasVendorKey: !!vendorApiKey
|
|
2046
|
+
});
|
|
2047
|
+
}
|
|
2048
|
+
|
|
2049
|
+
// Fallback to options (passed from frontend)
|
|
2050
|
+
if (!apiKey && options.apiKey) {
|
|
2051
|
+
apiKey = options.apiKey;
|
|
2052
|
+
}
|
|
2053
|
+
|
|
2054
|
+
// Fallback to config if no API key from session or options
|
|
2055
|
+
if (!apiKey && this.config.apiKey) {
|
|
2056
|
+
apiKey = this.config.apiKey;
|
|
2057
|
+
}
|
|
2058
|
+
|
|
2059
|
+
// Fallback to environment variable (matching webServer.js and modelsService.js pattern)
|
|
2060
|
+
if (!apiKey && process.env.LOXIA_API_KEY) {
|
|
2061
|
+
apiKey = process.env.LOXIA_API_KEY;
|
|
2062
|
+
this.logger?.debug('Using LOXIA_API_KEY from environment for image generation');
|
|
2063
|
+
}
|
|
2064
|
+
|
|
2065
|
+
if (!apiKey) {
|
|
2066
|
+
throw new Error('No API key configured. Please configure your Loxia API key in Settings.');
|
|
2067
|
+
}
|
|
2068
|
+
|
|
2069
|
+
// Transform the payload to match Azure backend format
|
|
2070
|
+
// Backend expects: prompt, model, size, quality, style, n, requestId
|
|
2071
|
+
const azurePayload = {
|
|
2072
|
+
prompt: payload.prompt,
|
|
2073
|
+
model: payload.model,
|
|
2074
|
+
size: payload.size,
|
|
2075
|
+
quality: payload.quality,
|
|
2076
|
+
style: payload.style || 'vivid', // Backend default
|
|
2077
|
+
n: payload.n || 1,
|
|
2078
|
+
requestId
|
|
2079
|
+
};
|
|
2080
|
+
|
|
2081
|
+
// API key is sent via Authorization header, not in body
|
|
2082
|
+
|
|
2083
|
+
const imageTimeout = this.timeout * 2; // Image generation may take longer (6 minutes)
|
|
2084
|
+
|
|
2085
|
+
// Single attempt - no automatic retries
|
|
2086
|
+
// On timeout, return to chat mode and let user retry manually
|
|
2087
|
+
try {
|
|
2088
|
+
this.logger.info('Making image request to Azure backend', {
|
|
2089
|
+
url: imageUrl,
|
|
2090
|
+
model: payload.model,
|
|
2091
|
+
requestId,
|
|
2092
|
+
hasApiKey: !!apiKey,
|
|
2093
|
+
timeout: imageTimeout
|
|
2094
|
+
});
|
|
2095
|
+
|
|
2096
|
+
// Use AbortController for proper timeout handling
|
|
2097
|
+
const controller = new AbortController();
|
|
2098
|
+
const timeoutId = setTimeout(() => controller.abort(), imageTimeout);
|
|
2099
|
+
|
|
2100
|
+
try {
|
|
2101
|
+
const response = await fetch(imageUrl, {
|
|
2102
|
+
method: 'POST',
|
|
2103
|
+
headers: {
|
|
2104
|
+
'Content-Type': 'application/json',
|
|
2105
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
2106
|
+
'X-Request-ID': requestId
|
|
2107
|
+
},
|
|
2108
|
+
body: JSON.stringify(azurePayload),
|
|
2109
|
+
signal: controller.signal
|
|
2110
|
+
});
|
|
2111
|
+
|
|
2112
|
+
clearTimeout(timeoutId);
|
|
2113
|
+
|
|
2114
|
+
if (!response.ok) {
|
|
2115
|
+
const errorText = await response.text();
|
|
2116
|
+
const error = new Error(`HTTP ${response.status}: ${response.statusText}${errorText ? ` - ${errorText}` : ''}`);
|
|
2117
|
+
error.status = response.status;
|
|
2118
|
+
throw error;
|
|
2119
|
+
}
|
|
2120
|
+
|
|
2121
|
+
const data = await response.json();
|
|
2122
|
+
|
|
2123
|
+
// Backend returns: { images: [...], usage: {...}, model, requestId, created }
|
|
2124
|
+
// Transform to match our expected format
|
|
2125
|
+
return {
|
|
2126
|
+
data: data.images || [],
|
|
2127
|
+
model: data.model || payload.model,
|
|
2128
|
+
created: data.created || Date.now(),
|
|
2129
|
+
usage: data.usage
|
|
2130
|
+
};
|
|
2131
|
+
|
|
2132
|
+
} finally {
|
|
2133
|
+
clearTimeout(timeoutId);
|
|
2134
|
+
}
|
|
2135
|
+
|
|
2136
|
+
} catch (error) {
|
|
2137
|
+
const isTimeout = error.name === 'AbortError' || error.message?.includes('timeout');
|
|
2138
|
+
const isGatewayTimeout = error.status === 504 || error.message?.includes('504');
|
|
2139
|
+
|
|
2140
|
+
this.logger.error('Image request to Azure backend failed', {
|
|
2141
|
+
requestId,
|
|
2142
|
+
error: error.message,
|
|
2143
|
+
status: error.status,
|
|
2144
|
+
isTimeout,
|
|
2145
|
+
isGatewayTimeout
|
|
2146
|
+
});
|
|
2147
|
+
|
|
2148
|
+
// For timeout errors, throw with user-friendly message
|
|
2149
|
+
if (isGatewayTimeout || isTimeout) {
|
|
2150
|
+
const timeoutError = new Error(
|
|
2151
|
+
`Image generation is taking longer than expected. ` +
|
|
2152
|
+
`Please try again in a moment. (Error: ${isGatewayTimeout ? '504 Gateway Timeout' : 'Request Timeout'})`
|
|
2153
|
+
);
|
|
2154
|
+
timeoutError.status = isGatewayTimeout ? 504 : 408;
|
|
2155
|
+
timeoutError.isTimeout = true;
|
|
2156
|
+
timeoutError.shouldReturnToChat = true;
|
|
2157
|
+
throw timeoutError;
|
|
2158
|
+
}
|
|
2159
|
+
|
|
2160
|
+
throw error;
|
|
2161
|
+
}
|
|
2162
|
+
}
|
|
2163
|
+
|
|
2164
|
+
/**
|
|
2165
|
+
* Make API request for video generation (job submission)
|
|
2166
|
+
* @private
|
|
2167
|
+
*/
|
|
2168
|
+
async _makeVideoAPIRequest(payload, requestId, options = {}) {
|
|
2169
|
+
const videoUrl = `${this.baseUrl}/llm/generate-video`;
|
|
2170
|
+
|
|
2171
|
+
// Get API keys from session-based storage
|
|
2172
|
+
let apiKey = null;
|
|
2173
|
+
|
|
2174
|
+
this.logger?.info('🎬 Video API request state', {
|
|
2175
|
+
hasApiKeyManager: !!this.apiKeyManager,
|
|
2176
|
+
sessionId: options.sessionId,
|
|
2177
|
+
hasSessionId: !!options.sessionId,
|
|
2178
|
+
model: payload.model
|
|
2179
|
+
});
|
|
2180
|
+
|
|
2181
|
+
// First try to get from API key manager using session ID
|
|
2182
|
+
if (this.apiKeyManager) {
|
|
2183
|
+
const keys = this.apiKeyManager.getKeysForRequest(options.sessionId, {
|
|
2184
|
+
platformProvided: options.platformProvided !== false,
|
|
2185
|
+
vendor: this._getVendorFromModel(payload.model)
|
|
2186
|
+
});
|
|
2187
|
+
apiKey = keys.loxiaApiKey;
|
|
2188
|
+
}
|
|
2189
|
+
|
|
2190
|
+
// Fallback to options (passed from frontend)
|
|
2191
|
+
if (!apiKey && options.apiKey) {
|
|
2192
|
+
apiKey = options.apiKey;
|
|
2193
|
+
}
|
|
2194
|
+
|
|
2195
|
+
// Fallback to config if no API key from session or options
|
|
2196
|
+
if (!apiKey && this.config.apiKey) {
|
|
2197
|
+
apiKey = this.config.apiKey;
|
|
2198
|
+
}
|
|
2199
|
+
|
|
2200
|
+
// Fallback to environment variable (matching webServer.js and modelsService.js pattern)
|
|
2201
|
+
if (!apiKey && process.env.LOXIA_API_KEY) {
|
|
2202
|
+
apiKey = process.env.LOXIA_API_KEY;
|
|
2203
|
+
this.logger?.debug('Using LOXIA_API_KEY from environment for video generation');
|
|
2204
|
+
}
|
|
2205
|
+
|
|
2206
|
+
if (!apiKey) {
|
|
2207
|
+
throw new Error('No API key configured. Please configure your Loxia API key in Settings.');
|
|
2208
|
+
}
|
|
2209
|
+
|
|
2210
|
+
// Transform the payload to match backend format
|
|
2211
|
+
const videoPayload = {
|
|
2212
|
+
prompt: payload.prompt,
|
|
2213
|
+
model: payload.model,
|
|
2214
|
+
width: payload.width || 1080,
|
|
2215
|
+
height: payload.height || 1080,
|
|
2216
|
+
duration: payload.duration || 5,
|
|
2217
|
+
variants: payload.variants || 1,
|
|
2218
|
+
requestId
|
|
2219
|
+
};
|
|
2220
|
+
|
|
2221
|
+
const videoTimeout = 60000; // 1 minute timeout for job submission
|
|
2222
|
+
|
|
2223
|
+
try {
|
|
2224
|
+
this.logger.info('Making video request to Azure backend', {
|
|
2225
|
+
url: videoUrl,
|
|
2226
|
+
model: payload.model,
|
|
2227
|
+
requestId,
|
|
2228
|
+
hasApiKey: !!apiKey,
|
|
2229
|
+
timeout: videoTimeout
|
|
2230
|
+
});
|
|
2231
|
+
|
|
2232
|
+
const controller = new AbortController();
|
|
2233
|
+
const timeoutId = setTimeout(() => controller.abort(), videoTimeout);
|
|
2234
|
+
|
|
2235
|
+
try {
|
|
2236
|
+
const response = await fetch(videoUrl, {
|
|
2237
|
+
method: 'POST',
|
|
2238
|
+
headers: {
|
|
2239
|
+
'Content-Type': 'application/json',
|
|
2240
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
2241
|
+
'X-Request-ID': requestId
|
|
2242
|
+
},
|
|
2243
|
+
body: JSON.stringify(videoPayload),
|
|
2244
|
+
signal: controller.signal
|
|
2245
|
+
});
|
|
2246
|
+
|
|
2247
|
+
clearTimeout(timeoutId);
|
|
2248
|
+
|
|
2249
|
+
if (!response.ok) {
|
|
2250
|
+
const errorText = await response.text();
|
|
2251
|
+
const error = new Error(`HTTP ${response.status}: ${response.statusText}${errorText ? ` - ${errorText}` : ''}`);
|
|
2252
|
+
error.status = response.status;
|
|
2253
|
+
throw error;
|
|
2254
|
+
}
|
|
2255
|
+
|
|
2256
|
+
return await response.json();
|
|
2257
|
+
|
|
2258
|
+
} finally {
|
|
2259
|
+
clearTimeout(timeoutId);
|
|
2260
|
+
}
|
|
2261
|
+
|
|
2262
|
+
} catch (error) {
|
|
2263
|
+
const isTimeout = error.name === 'AbortError' || error.message?.includes('timeout');
|
|
2264
|
+
const isGatewayTimeout = error.status === 504 || error.message?.includes('504');
|
|
2265
|
+
|
|
2266
|
+
this.logger.error('Video request to Azure backend failed', {
|
|
2267
|
+
requestId,
|
|
2268
|
+
error: error.message,
|
|
2269
|
+
status: error.status,
|
|
2270
|
+
isTimeout,
|
|
2271
|
+
isGatewayTimeout
|
|
2272
|
+
});
|
|
2273
|
+
|
|
2274
|
+
if (isGatewayTimeout || isTimeout) {
|
|
2275
|
+
const timeoutError = new Error(
|
|
2276
|
+
`Video job submission is taking longer than expected. ` +
|
|
2277
|
+
`Please try again in a moment. (Error: ${isGatewayTimeout ? '504 Gateway Timeout' : 'Request Timeout'})`
|
|
2278
|
+
);
|
|
2279
|
+
timeoutError.status = isGatewayTimeout ? 504 : 408;
|
|
2280
|
+
timeoutError.isTimeout = true;
|
|
2281
|
+
throw timeoutError;
|
|
2282
|
+
}
|
|
2283
|
+
|
|
2284
|
+
throw error;
|
|
2285
|
+
}
|
|
2286
|
+
}
|
|
2287
|
+
|
|
2288
|
+
/**
|
|
2289
|
+
* Make API request to get video job status
|
|
2290
|
+
* @private
|
|
2291
|
+
*/
|
|
2292
|
+
async _makeVideoStatusRequest(jobId, options = {}) {
|
|
2293
|
+
const statusUrl = `${this.baseUrl}/llm/video-job/${encodeURIComponent(jobId)}`;
|
|
2294
|
+
|
|
2295
|
+
// Get API key
|
|
2296
|
+
let apiKey = null;
|
|
2297
|
+
|
|
2298
|
+
if (this.apiKeyManager) {
|
|
2299
|
+
const keys = this.apiKeyManager.getKeysForRequest(options.sessionId, {
|
|
2300
|
+
platformProvided: options.platformProvided !== false,
|
|
2301
|
+
vendor: 'openai' // Sora is Azure OpenAI
|
|
2302
|
+
});
|
|
2303
|
+
apiKey = keys.loxiaApiKey;
|
|
2304
|
+
}
|
|
2305
|
+
|
|
2306
|
+
if (!apiKey && options.apiKey) {
|
|
2307
|
+
apiKey = options.apiKey;
|
|
2308
|
+
}
|
|
2309
|
+
|
|
2310
|
+
if (!apiKey && this.config.apiKey) {
|
|
2311
|
+
apiKey = this.config.apiKey;
|
|
2312
|
+
}
|
|
2313
|
+
|
|
2314
|
+
// Fallback to environment variable
|
|
2315
|
+
if (!apiKey && process.env.LOXIA_API_KEY) {
|
|
2316
|
+
apiKey = process.env.LOXIA_API_KEY;
|
|
2317
|
+
this.logger?.debug('Using LOXIA_API_KEY from environment for video status check');
|
|
2318
|
+
}
|
|
2319
|
+
|
|
2320
|
+
if (!apiKey) {
|
|
2321
|
+
throw new Error('No API key configured. Please configure your Loxia API key in Settings.');
|
|
2322
|
+
}
|
|
2323
|
+
|
|
2324
|
+
const statusTimeout = 30000; // 30 second timeout for status check
|
|
2325
|
+
|
|
2326
|
+
try {
|
|
2327
|
+
const controller = new AbortController();
|
|
2328
|
+
const timeoutId = setTimeout(() => controller.abort(), statusTimeout);
|
|
2329
|
+
|
|
2330
|
+
try {
|
|
2331
|
+
const url = new URL(statusUrl);
|
|
2332
|
+
url.searchParams.set('model', options.model || '');
|
|
2333
|
+
|
|
2334
|
+
const response = await fetch(url.toString(), {
|
|
2335
|
+
method: 'GET',
|
|
2336
|
+
headers: {
|
|
2337
|
+
'Authorization': `Bearer ${apiKey}`
|
|
2338
|
+
},
|
|
2339
|
+
signal: controller.signal
|
|
2340
|
+
});
|
|
2341
|
+
|
|
2342
|
+
clearTimeout(timeoutId);
|
|
2343
|
+
|
|
2344
|
+
if (!response.ok) {
|
|
2345
|
+
const errorText = await response.text();
|
|
2346
|
+
const error = new Error(`HTTP ${response.status}: ${response.statusText}${errorText ? ` - ${errorText}` : ''}`);
|
|
2347
|
+
error.status = response.status;
|
|
2348
|
+
throw error;
|
|
2349
|
+
}
|
|
2350
|
+
|
|
2351
|
+
return await response.json();
|
|
2352
|
+
|
|
2353
|
+
} finally {
|
|
2354
|
+
clearTimeout(timeoutId);
|
|
2355
|
+
}
|
|
2356
|
+
|
|
2357
|
+
} catch (error) {
|
|
2358
|
+
this.logger.error('Video status request failed', {
|
|
2359
|
+
jobId,
|
|
2360
|
+
error: error.message,
|
|
2361
|
+
status: error.status
|
|
2362
|
+
});
|
|
2363
|
+
throw error;
|
|
2364
|
+
}
|
|
2365
|
+
}
|
|
2366
|
+
|
|
2367
|
+
/**
|
|
2368
|
+
* Check rate limits
|
|
2369
|
+
* @private
|
|
2370
|
+
*/
|
|
2371
|
+
async _checkRateLimit(model) {
|
|
2372
|
+
// Implementation would check model-specific rate limits
|
|
2373
|
+
// For now, just add a small delay
|
|
2374
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
2375
|
+
}
|
|
2376
|
+
|
|
2377
|
+
/**
|
|
2378
|
+
* Handle rate limit exceeded
|
|
2379
|
+
* @private
|
|
2380
|
+
*/
|
|
2381
|
+
async _handleRateLimit(context) {
|
|
2382
|
+
const delay = 60000; // 1 minute delay for rate limits
|
|
2383
|
+
this.logger.info(`Rate limit exceeded, waiting ${delay}ms`, context);
|
|
2384
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
2385
|
+
}
|
|
2386
|
+
|
|
2387
|
+
/**
|
|
2388
|
+
* Handle service unavailable
|
|
2389
|
+
* @private
|
|
2390
|
+
*/
|
|
2391
|
+
async _handleServiceUnavailable(context) {
|
|
2392
|
+
this._recordFailure();
|
|
2393
|
+
const delay = 30000; // 30 second delay for service issues
|
|
2394
|
+
this.logger.info(`Service unavailable, waiting ${delay}ms`, context);
|
|
2395
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
2396
|
+
}
|
|
2397
|
+
|
|
2398
|
+
/**
|
|
2399
|
+
* Check if circuit breaker is open
|
|
2400
|
+
* @private
|
|
2401
|
+
*/
|
|
2402
|
+
_isCircuitBreakerOpen() {
|
|
2403
|
+
if (!this.circuitBreaker.isOpen) {
|
|
2404
|
+
return false;
|
|
2405
|
+
}
|
|
2406
|
+
|
|
2407
|
+
const timeSinceLastFailure = Date.now() - this.circuitBreaker.lastFailureTime;
|
|
2408
|
+
if (timeSinceLastFailure > this.circuitBreaker.timeout) {
|
|
2409
|
+
this.circuitBreaker.isOpen = false;
|
|
2410
|
+
this.circuitBreaker.failures = 0;
|
|
2411
|
+
return false;
|
|
2412
|
+
}
|
|
2413
|
+
|
|
2414
|
+
return true;
|
|
2415
|
+
}
|
|
2416
|
+
|
|
2417
|
+
/**
|
|
2418
|
+
* Record failure for circuit breaker
|
|
2419
|
+
* @private
|
|
2420
|
+
*/
|
|
2421
|
+
_recordFailure() {
|
|
2422
|
+
this.circuitBreaker.failures++;
|
|
2423
|
+
this.circuitBreaker.lastFailureTime = Date.now();
|
|
2424
|
+
|
|
2425
|
+
if (this.circuitBreaker.failures >= this.circuitBreaker.threshold) {
|
|
2426
|
+
this.circuitBreaker.isOpen = true;
|
|
2427
|
+
this.logger.warn('Circuit breaker opened due to repeated failures');
|
|
2428
|
+
}
|
|
2429
|
+
}
|
|
2430
|
+
|
|
2431
|
+
/**
|
|
2432
|
+
* Reset circuit breaker on success
|
|
2433
|
+
* @private
|
|
2434
|
+
*/
|
|
2435
|
+
_resetCircuitBreaker() {
|
|
2436
|
+
if (this.circuitBreaker.failures > 0) {
|
|
2437
|
+
this.circuitBreaker.failures = 0;
|
|
2438
|
+
this.circuitBreaker.isOpen = false;
|
|
2439
|
+
this.logger.info('Circuit breaker reset - service recovered');
|
|
2440
|
+
}
|
|
2441
|
+
}
|
|
2442
|
+
|
|
2443
|
+
/**
|
|
2444
|
+
* Resolve a model name by its API type from modelsService.
|
|
2445
|
+
* Queries the cached model catalog for models whose api_type array
|
|
2446
|
+
* contains the given type (e.g., 'image_generation', 'video_generation').
|
|
2447
|
+
* @param {string} apiType - The API type to match (e.g., 'image_generation')
|
|
2448
|
+
* @returns {string|null} Model name from the catalog, or null if not found
|
|
2449
|
+
* @private
|
|
2450
|
+
*/
|
|
2451
|
+
_resolveModelByApiType(apiType) {
|
|
2452
|
+
if (!this.modelsService) return null;
|
|
2453
|
+
try {
|
|
2454
|
+
const models = this.modelsService.getModels();
|
|
2455
|
+
const match = models.find(m =>
|
|
2456
|
+
(Array.isArray(m.api_type) && m.api_type.includes(apiType)) ||
|
|
2457
|
+
m.modality === apiType
|
|
2458
|
+
);
|
|
2459
|
+
if (match) {
|
|
2460
|
+
// Use the catalog key (match.name) — the backend resolves deployment internally
|
|
2461
|
+
this.logger?.debug(`Resolved ${apiType} model from catalog`, { model: match.name });
|
|
2462
|
+
return match.name;
|
|
2463
|
+
}
|
|
2464
|
+
} catch (e) {
|
|
2465
|
+
this.logger?.debug(`Failed to resolve ${apiType} model from modelsService`, { error: e.message });
|
|
2466
|
+
}
|
|
2467
|
+
return null;
|
|
2468
|
+
}
|
|
2469
|
+
|
|
2470
|
+
/**
|
|
2471
|
+
* Extract vendor name from model name
|
|
2472
|
+
* @param {string} model - Model name
|
|
2473
|
+
* @returns {string|null} Vendor name
|
|
2474
|
+
* @private
|
|
2475
|
+
*/
|
|
2476
|
+
_getVendorFromModel(model) {
|
|
2477
|
+
if (!model) return null;
|
|
2478
|
+
|
|
2479
|
+
const modelName = model.toLowerCase();
|
|
2480
|
+
|
|
2481
|
+
if (modelName.includes('anthropic') || modelName.includes('claude')) {
|
|
2482
|
+
return 'anthropic';
|
|
2483
|
+
} else if (modelName.includes('openai') || modelName.includes('gpt')) {
|
|
2484
|
+
return 'openai';
|
|
2485
|
+
} else if (modelName.includes('deepseek')) {
|
|
2486
|
+
return 'deepseek';
|
|
2487
|
+
} else if (modelName.includes('phi')) {
|
|
2488
|
+
return 'microsoft';
|
|
2489
|
+
} else if (modelName.startsWith('ollama-')) {
|
|
2490
|
+
return 'ollama';
|
|
2491
|
+
}
|
|
2492
|
+
|
|
2493
|
+
return null;
|
|
2494
|
+
}
|
|
2495
|
+
|
|
2496
|
+
// === Ollama Local Model Methods ===
|
|
2497
|
+
|
|
2498
|
+
/**
|
|
2499
|
+
* Send non-streaming message via Ollama (local inference)
|
|
2500
|
+
* @private
|
|
2501
|
+
*/
|
|
2502
|
+
async _sendOllamaMessage(model, messages, options, requestId) {
|
|
2503
|
+
try {
|
|
2504
|
+
const ollama = this._getOllamaService();
|
|
2505
|
+
const available = await ollama.isAvailable();
|
|
2506
|
+
if (!available) {
|
|
2507
|
+
throw new Error('Ollama is not running. Start it with "ollama serve" and try again.');
|
|
2508
|
+
}
|
|
2509
|
+
|
|
2510
|
+
const formattedMessages = this._formatMessagesForModel(messages, model, options);
|
|
2511
|
+
|
|
2512
|
+
const response = await ollama.sendMessage(model, formattedMessages, {
|
|
2513
|
+
systemPrompt: options.systemPrompt,
|
|
2514
|
+
temperature: options.temperature,
|
|
2515
|
+
maxTokens: options.maxTokens
|
|
2516
|
+
});
|
|
2517
|
+
|
|
2518
|
+
// Track usage (with zero cost — local models are free)
|
|
2519
|
+
if (response.usage && this.budgetService) {
|
|
2520
|
+
await this.budgetService.trackUsage(options.agentId, model, response.usage);
|
|
2521
|
+
}
|
|
2522
|
+
|
|
2523
|
+
this.recordModelSuccess(model);
|
|
2524
|
+
|
|
2525
|
+
return {
|
|
2526
|
+
content: response.choices[0]?.message?.content || '',
|
|
2527
|
+
model: response.model,
|
|
2528
|
+
tokenUsage: response.usage,
|
|
2529
|
+
requestId,
|
|
2530
|
+
finishReason: response.choices[0]?.finish_reason || 'stop'
|
|
2531
|
+
};
|
|
2532
|
+
} catch (error) {
|
|
2533
|
+
this.recordModelFailure(model, 'ollama_error');
|
|
2534
|
+
this.logger?.error(`[Ollama] sendMessage failed: ${error.message}`, {
|
|
2535
|
+
requestId, model, agentId: options.agentId
|
|
2536
|
+
});
|
|
2537
|
+
throw error;
|
|
2538
|
+
}
|
|
2539
|
+
}
|
|
2540
|
+
|
|
2541
|
+
/**
|
|
2542
|
+
* Send streaming message via Ollama (local inference)
|
|
2543
|
+
* @private
|
|
2544
|
+
*/
|
|
2545
|
+
async _sendOllamaStream(model, messages, options, requestId) {
|
|
2546
|
+
try {
|
|
2547
|
+
const ollama = this._getOllamaService();
|
|
2548
|
+
const available = await ollama.isAvailable();
|
|
2549
|
+
if (!available) {
|
|
2550
|
+
throw new Error('Ollama is not running. Start it with "ollama serve" and try again.');
|
|
2551
|
+
}
|
|
2552
|
+
|
|
2553
|
+
const formattedMessages = this._formatMessagesForModel(messages, model, options);
|
|
2554
|
+
|
|
2555
|
+
const result = await ollama.sendMessageStream(model, formattedMessages, {
|
|
2556
|
+
systemPrompt: options.systemPrompt,
|
|
2557
|
+
temperature: options.temperature,
|
|
2558
|
+
maxTokens: options.maxTokens,
|
|
2559
|
+
onChunk: options.onChunk,
|
|
2560
|
+
onDone: options.onDone,
|
|
2561
|
+
onError: options.onError
|
|
2562
|
+
});
|
|
2563
|
+
|
|
2564
|
+
// Track usage (zero cost)
|
|
2565
|
+
if (result.tokenUsage && this.budgetService) {
|
|
2566
|
+
await this.budgetService.trackUsage(options.agentId, model, result.tokenUsage);
|
|
2567
|
+
}
|
|
2568
|
+
|
|
2569
|
+
this.recordModelSuccess(model);
|
|
2570
|
+
|
|
2571
|
+
return {
|
|
2572
|
+
content: result.content,
|
|
2573
|
+
model: result.model,
|
|
2574
|
+
tokenUsage: result.tokenUsage,
|
|
2575
|
+
requestId,
|
|
2576
|
+
finishReason: result.finishReason || 'stop'
|
|
2577
|
+
};
|
|
2578
|
+
} catch (error) {
|
|
2579
|
+
this.recordModelFailure(model, 'ollama_error');
|
|
2580
|
+
this.logger?.error(`[Ollama] sendMessageStream failed: ${error.message}`, {
|
|
2581
|
+
requestId, model, agentId: options.agentId
|
|
2582
|
+
});
|
|
2583
|
+
if (options.onError) options.onError(error);
|
|
2584
|
+
throw error;
|
|
2585
|
+
}
|
|
2586
|
+
}
|
|
2587
|
+
|
|
2588
|
+
/**
|
|
2589
|
+
* Get available Ollama models (for model discovery)
|
|
2590
|
+
*/
|
|
2591
|
+
async getOllamaModels() {
|
|
2592
|
+
const ollama = this._getOllamaService();
|
|
2593
|
+
return ollama.listModels();
|
|
2594
|
+
}
|
|
2595
|
+
}
|
|
2596
|
+
|
|
2597
|
+
/**
|
|
2598
|
+
* ConversationManager - Handles multi-model conversation state
|
|
2599
|
+
*/
|
|
2600
|
+
class ConversationManager {
|
|
2601
|
+
constructor(agentId, logger) {
|
|
2602
|
+
this.agentId = agentId;
|
|
2603
|
+
this.logger = logger;
|
|
2604
|
+
this.conversations = new Map();
|
|
2605
|
+
}
|
|
2606
|
+
|
|
2607
|
+
async switchModel(newModel) {
|
|
2608
|
+
// Implementation would handle model switching logic
|
|
2609
|
+
// For now, return empty conversation
|
|
2610
|
+
return {
|
|
2611
|
+
messages: [],
|
|
2612
|
+
model: newModel,
|
|
2613
|
+
lastUpdated: new Date().toISOString()
|
|
2614
|
+
};
|
|
2615
|
+
}
|
|
2616
|
+
}
|
|
2617
|
+
|
|
2618
|
+
export default AIService;
|