@kispace-io/extension-ai-system 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +20 -0
- package/src/agents/agent-registry.ts +65 -0
- package/src/agents/index.ts +4 -0
- package/src/agents/message-processor.ts +50 -0
- package/src/agents/prompt-builder.ts +167 -0
- package/src/ai-system-extension.ts +104 -0
- package/src/aisystem.json +154 -0
- package/src/chat-provider-contributions.ts +95 -0
- package/src/core/constants.ts +23 -0
- package/src/core/index.ts +6 -0
- package/src/core/interfaces.ts +137 -0
- package/src/core/types.ts +126 -0
- package/src/general-assistant-prompt.txt +14 -0
- package/src/i18n.json +11 -0
- package/src/index.ts +13 -0
- package/src/prompt-enhancer-contributions.ts +29 -0
- package/src/providers/index.ts +5 -0
- package/src/providers/ollama-provider.ts +13 -0
- package/src/providers/openai-provider.ts +12 -0
- package/src/providers/provider-factory.ts +36 -0
- package/src/providers/provider.ts +156 -0
- package/src/providers/streaming/ollama-parser.ts +114 -0
- package/src/providers/streaming/sse-parser.ts +152 -0
- package/src/providers/streaming/stream-parser.ts +16 -0
- package/src/register.ts +16 -0
- package/src/service/ai-service.ts +744 -0
- package/src/service/token-usage-tracker.ts +139 -0
- package/src/tools/index.ts +4 -0
- package/src/tools/tool-call-accumulator.ts +81 -0
- package/src/tools/tool-executor.ts +174 -0
- package/src/tools/tool-registry.ts +70 -0
- package/src/translation.ts +3 -0
- package/src/utils/token-estimator.ts +87 -0
- package/src/utils/tool-detector.ts +144 -0
- package/src/view/agent-group-manager.ts +146 -0
- package/src/view/components/ai-agent-response-card.ts +198 -0
- package/src/view/components/ai-agent-response-group.ts +220 -0
- package/src/view/components/ai-chat-input.ts +131 -0
- package/src/view/components/ai-chat-message.ts +615 -0
- package/src/view/components/ai-empty-state.ts +52 -0
- package/src/view/components/ai-loading-indicator.ts +91 -0
- package/src/view/components/index.ts +7 -0
- package/src/view/components/k-ai-config-editor.ts +828 -0
- package/src/view/index.ts +6 -0
- package/src/view/k-aiview.ts +901 -0
- package/src/view/k-token-usage.ts +220 -0
- package/src/view/provider-manager.ts +196 -0
- package/src/view/session-manager.ts +255 -0
- package/src/view/stream-manager.ts +123 -0
- package/src/workflows/conditional-workflow.ts +98 -0
- package/src/workflows/index.ts +6 -0
- package/src/workflows/parallel-workflow.ts +45 -0
- package/src/workflows/sequential-workflow.ts +95 -0
- package/src/workflows/workflow-engine.ts +63 -0
- package/src/workflows/workflow-strategy.ts +21 -0
- package/tsconfig.json +12 -0
package/package.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@kispace-io/extension-ai-system",
|
|
3
|
+
"version": "0.8.0",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"main": "./src/index.ts",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"import": "./src/index.ts",
|
|
9
|
+
"types": "./src/index.ts"
|
|
10
|
+
}
|
|
11
|
+
},
|
|
12
|
+
"dependencies": {
|
|
13
|
+
"@kispace-io/core": "*",
|
|
14
|
+
"@kispace-io/extension-in-browser-ml": "*",
|
|
15
|
+
"marked": "^12.0.0 || ^16.4.1"
|
|
16
|
+
},
|
|
17
|
+
"devDependencies": {
|
|
18
|
+
"typescript": "^5.9.3"
|
|
19
|
+
}
|
|
20
|
+
}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import { contributionRegistry } from "@kispace-io/core";
|
|
2
|
+
import type { ExecutionContext } from "@kispace-io/core";
|
|
3
|
+
import type { AgentContribution } from "../core/interfaces";
|
|
4
|
+
import { CID_AGENTS } from "../core/constants";
|
|
5
|
+
|
|
6
|
+
export class AgentRegistry {
|
|
7
|
+
getAgentContributions(): AgentContribution[] {
|
|
8
|
+
return contributionRegistry.getContributions(CID_AGENTS) as AgentContribution[];
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
filterAndSortAgents(
|
|
12
|
+
contributions: AgentContribution[],
|
|
13
|
+
context: ExecutionContext
|
|
14
|
+
): AgentContribution[] {
|
|
15
|
+
return contributions
|
|
16
|
+
.filter(contrib => {
|
|
17
|
+
if (contrib.canHandle) {
|
|
18
|
+
return contrib.canHandle(context);
|
|
19
|
+
}
|
|
20
|
+
return true;
|
|
21
|
+
})
|
|
22
|
+
.sort((a, b) => (b.priority || 0) - (a.priority || 0));
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
getMatchingAgents(
|
|
26
|
+
context: ExecutionContext,
|
|
27
|
+
roles?: string[]
|
|
28
|
+
): AgentContribution[] {
|
|
29
|
+
const contributions = this.getAgentContributions();
|
|
30
|
+
|
|
31
|
+
if (contributions.length === 0) {
|
|
32
|
+
throw new Error('No agents are registered. The App Support agent should be available from the AI system extension.');
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
let activeContributions = contributions.filter(contrib => {
|
|
36
|
+
if (roles && !roles.includes(contrib.role)) {
|
|
37
|
+
return false;
|
|
38
|
+
}
|
|
39
|
+
if (contrib.canHandle) {
|
|
40
|
+
return contrib.canHandle(context);
|
|
41
|
+
}
|
|
42
|
+
return true;
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
if (roles && roles.length > 0) {
|
|
46
|
+
activeContributions = activeContributions.sort(
|
|
47
|
+
(a, b) => (b.priority || 0) - (a.priority || 0)
|
|
48
|
+
);
|
|
49
|
+
|
|
50
|
+
if (activeContributions.length === 0) {
|
|
51
|
+
const requestedRoles = roles.join(', ');
|
|
52
|
+
throw new Error(`No agents found for requested roles: ${requestedRoles}. Available agents: ${contributions.map(c => c.role).join(', ')}`);
|
|
53
|
+
}
|
|
54
|
+
} else {
|
|
55
|
+
activeContributions = this.filterAndSortAgents(activeContributions, context);
|
|
56
|
+
|
|
57
|
+
if (activeContributions.length === 0) {
|
|
58
|
+
throw new Error(`No agents can handle the current context. Available agents: ${contributions.map(c => c.role).join(', ')}`);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
return activeContributions;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import type { ExecutionContext } from "@kispace-io/core";
|
|
2
|
+
import type { ChatMessage, UserAttentionRequest } from "../core/types";
|
|
3
|
+
import type { MessageProcessor as IMessageProcessor, AgentContribution } from "../core/interfaces";
|
|
4
|
+
|
|
5
|
+
export class MessageProcessorService {
|
|
6
|
+
private processors: IMessageProcessor[] = [];
|
|
7
|
+
|
|
8
|
+
addProcessor(processor: IMessageProcessor): void {
|
|
9
|
+
this.processors.push(processor);
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
private getSortedProcessors(): IMessageProcessor[] {
|
|
13
|
+
return [...this.processors].sort(
|
|
14
|
+
(a, b) => (b.priority || 0) - (a.priority || 0)
|
|
15
|
+
);
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
async process(
|
|
19
|
+
message: ChatMessage,
|
|
20
|
+
contribution: AgentContribution,
|
|
21
|
+
context: ExecutionContext
|
|
22
|
+
): Promise<ChatMessage> {
|
|
23
|
+
let processedMessage = { ...message };
|
|
24
|
+
|
|
25
|
+
const allProcessors = [
|
|
26
|
+
...(contribution.messageProcessors || []),
|
|
27
|
+
...this.processors
|
|
28
|
+
].sort((a, b) => (b.priority || 0) - (a.priority || 0));
|
|
29
|
+
|
|
30
|
+
for (const processor of allProcessors) {
|
|
31
|
+
processedMessage = await processor.process(processedMessage, context);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
const requiresAttention = processedMessage.actions?.some(a => a.requiresAttention) ||
|
|
35
|
+
processedMessage.attentionRequests?.some(r => r.requiresAction) ||
|
|
36
|
+
false;
|
|
37
|
+
|
|
38
|
+
return {
|
|
39
|
+
...processedMessage,
|
|
40
|
+
requiresAttention
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
private checkRequiresAttention(message: ChatMessage): boolean {
|
|
45
|
+
return message.actions?.some(a => a.requiresAttention) ||
|
|
46
|
+
message.attentionRequests?.some(r => r.requiresAction) ||
|
|
47
|
+
false;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
import type { ExecutionContext } from "@kispace-io/core";
|
|
2
|
+
import type { ChatMessage, ApiMessage } from "../core/types";
|
|
3
|
+
import type { PromptEnhancer, AgentContribution, AgentToolsConfig, PromptEnhancerContribution } from "../core/interfaces";
|
|
4
|
+
import { ToolRegistry } from "../tools/tool-registry";
|
|
5
|
+
import { contributionRegistry } from "@kispace-io/core";
|
|
6
|
+
import { CID_PROMPT_ENHANCERS } from "../core/constants";
|
|
7
|
+
import { toolDetector } from "../utils/tool-detector";
|
|
8
|
+
|
|
9
|
+
export class PromptBuilder {
|
|
10
|
+
private toolRegistry = new ToolRegistry();
|
|
11
|
+
private enhancers: PromptEnhancer[] = [];
|
|
12
|
+
|
|
13
|
+
addEnhancer(enhancer: PromptEnhancer): void {
|
|
14
|
+
this.enhancers.push(enhancer);
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
private async getSysPrompt(
|
|
18
|
+
contribution: AgentContribution,
|
|
19
|
+
context: ExecutionContext
|
|
20
|
+
): Promise<string> {
|
|
21
|
+
let sysPrompt = contribution.sysPrompt;
|
|
22
|
+
if (typeof sysPrompt === "function") {
|
|
23
|
+
sysPrompt = sysPrompt();
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
// The App Support agent should always have a sysPrompt
|
|
27
|
+
if (!sysPrompt || typeof sysPrompt !== 'string') {
|
|
28
|
+
throw new Error(`Agent "${contribution.role}" (${contribution.label}) is missing a system prompt. All agents must provide a sysPrompt.`);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Apply prompt enhancers to system prompt instead of user prompt
|
|
32
|
+
// This keeps contextual information in the system prompt (instructions/context)
|
|
33
|
+
// and keeps the user prompt clean (just the user's request)
|
|
34
|
+
const allEnhancers = [
|
|
35
|
+
...(contribution.promptEnhancers || []),
|
|
36
|
+
...this.enhancers,
|
|
37
|
+
...this.getContributedEnhancers()
|
|
38
|
+
].sort((a, b) => (b.priority || 0) - (a.priority || 0));
|
|
39
|
+
|
|
40
|
+
let enhancedSysPrompt = sysPrompt;
|
|
41
|
+
for (const enhancer of allEnhancers) {
|
|
42
|
+
try {
|
|
43
|
+
enhancedSysPrompt = await enhancer.enhance(enhancedSysPrompt, context);
|
|
44
|
+
// Ensure enhancer returns a string
|
|
45
|
+
if (!enhancedSysPrompt || typeof enhancedSysPrompt !== 'string') {
|
|
46
|
+
enhancedSysPrompt = sysPrompt; // Fallback to original if enhancer returns invalid value
|
|
47
|
+
}
|
|
48
|
+
} catch (err) {
|
|
49
|
+
// If an enhancer fails, log the error but continue with the current prompt
|
|
50
|
+
console.warn(`[PromptBuilder] Enhancer failed:`, err);
|
|
51
|
+
// Continue with the current enhancedSysPrompt value
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
return enhancedSysPrompt;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
private rewriteChatHistoryForAgent(
|
|
59
|
+
history: ApiMessage[],
|
|
60
|
+
targetRole: string
|
|
61
|
+
): ApiMessage[] {
|
|
62
|
+
return history.map(m => {
|
|
63
|
+
if (m.role === "user") {
|
|
64
|
+
return { role: m.role, content: m.content };
|
|
65
|
+
}
|
|
66
|
+
if (m.role === targetRole) {
|
|
67
|
+
return { role: "assistant", content: m.content };
|
|
68
|
+
}
|
|
69
|
+
return {
|
|
70
|
+
role: "user",
|
|
71
|
+
content: `***Agent '${m.role}' replied:***\n${m.content}`
|
|
72
|
+
};
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
private getContributedEnhancers(): PromptEnhancer[] {
|
|
77
|
+
const contributions = contributionRegistry.getContributions(CID_PROMPT_ENHANCERS) as PromptEnhancerContribution[];
|
|
78
|
+
return contributions.map(contrib => ({
|
|
79
|
+
...contrib.enhancer,
|
|
80
|
+
priority: contrib.priority ?? contrib.enhancer.priority
|
|
81
|
+
}));
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Removed enhancePrompt method - prompt enhancers now enhance the system prompt instead
|
|
85
|
+
// This keeps contextual information in system prompts and user prompts clean
|
|
86
|
+
|
|
87
|
+
async build(
|
|
88
|
+
contribution: AgentContribution,
|
|
89
|
+
messages: ChatMessage[],
|
|
90
|
+
context: ExecutionContext,
|
|
91
|
+
hooks?: { beforeSend?: (messages: ChatMessage[], context: ExecutionContext) => Promise<void> }
|
|
92
|
+
): Promise<{ messages: ApiMessage[]; userPromptIndex: number; tools?: import("../core/types").ToolDefinition[] }> {
|
|
93
|
+
if (hooks?.beforeSend) {
|
|
94
|
+
await hooks.beforeSend(messages, context);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
const sanitizedMessages = this.sanitizeMessagesForAPI(messages);
|
|
98
|
+
const messagesCopy = this.rewriteChatHistoryForAgent(sanitizedMessages, contribution.role);
|
|
99
|
+
|
|
100
|
+
// Resolve tools config if it's a function
|
|
101
|
+
let toolsConfig = contribution.tools;
|
|
102
|
+
if (typeof toolsConfig === 'function') {
|
|
103
|
+
toolsConfig = await toolsConfig();
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
let tools: import("../core/types").ToolDefinition[] | undefined;
|
|
107
|
+
if (toolsConfig?.enabled) {
|
|
108
|
+
// If smart tool detection is enabled, check if the prompt needs tools
|
|
109
|
+
if (toolsConfig.smartToolDetection) {
|
|
110
|
+
const lastUserMessage = messages[messages.length - 1];
|
|
111
|
+
const userPrompt = lastUserMessage?.content || '';
|
|
112
|
+
|
|
113
|
+
// Use ML model to detect if tools are needed
|
|
114
|
+
const needsTools = await toolDetector.needsTools(userPrompt);
|
|
115
|
+
|
|
116
|
+
if (needsTools) {
|
|
117
|
+
tools = this.toolRegistry.getAvailableTools(
|
|
118
|
+
context,
|
|
119
|
+
toolsConfig.commandFilter
|
|
120
|
+
);
|
|
121
|
+
}
|
|
122
|
+
// If not needed, tools remains undefined (no tools provided)
|
|
123
|
+
} else {
|
|
124
|
+
// Default behavior: always provide tools if enabled
|
|
125
|
+
tools = this.toolRegistry.getAvailableTools(
|
|
126
|
+
context,
|
|
127
|
+
toolsConfig.commandFilter
|
|
128
|
+
);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// Enhance system prompt with contextual information (app state, language context, etc.)
|
|
133
|
+
// Keep user prompt clean - just the user's actual request
|
|
134
|
+
const sysPrompt = await this.getSysPrompt(contribution, context);
|
|
135
|
+
messagesCopy.unshift({
|
|
136
|
+
role: "system",
|
|
137
|
+
content: sysPrompt
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
// Calculate userPromptIndex AFTER adding system message (system is at index 0, user is at index 1)
|
|
141
|
+
const lastUserPromptIndex = messagesCopy.length - 1;
|
|
142
|
+
|
|
143
|
+
return { messages: messagesCopy, userPromptIndex: lastUserPromptIndex, tools };
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
private sanitizeMessageForAPI(message: ChatMessage | ApiMessage): ApiMessage {
|
|
147
|
+
const apiMessage: ApiMessage = {
|
|
148
|
+
role: message.role,
|
|
149
|
+
content: message.content
|
|
150
|
+
};
|
|
151
|
+
|
|
152
|
+
if ('tool_call_id' in message && message.tool_call_id) {
|
|
153
|
+
apiMessage.tool_call_id = message.tool_call_id;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
if ('tool_calls' in message && (message as any).tool_calls) {
|
|
157
|
+
apiMessage.tool_calls = (message as any).tool_calls;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return apiMessage;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
private sanitizeMessagesForAPI(messages: (ChatMessage | ApiMessage)[]): ApiMessage[] {
|
|
164
|
+
return messages.map(msg => this.sanitizeMessageForAPI(msg));
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import { html } from "lit";
|
|
2
|
+
import { rootContext } from "@kispace-io/core";
|
|
3
|
+
import { aiService } from "./service/ai-service";
|
|
4
|
+
import { contributionRegistry, HTMLContribution } from "@kispace-io/core";
|
|
5
|
+
import { editorRegistry, EditorInput } from "@kispace-io/core";
|
|
6
|
+
import { registerAll } from "@kispace-io/core";
|
|
7
|
+
import { TOOLBAR_BOTTOM, TOOLBAR_MAIN_RIGHT, SIDEBAR_AUXILIARY } from "@kispace-io/core";
|
|
8
|
+
import { CID_AGENTS, KEY_AI_CONFIG } from "./core/constants";
|
|
9
|
+
import type { AgentContribution, AgentToolsConfig } from "./core/interfaces";
|
|
10
|
+
import type { AIConfig } from "./core/types";
|
|
11
|
+
import { appSettings } from "@kispace-io/core";
|
|
12
|
+
import { i18n } from "@kispace-io/core";
|
|
13
|
+
import GENERAL_SYS_PROMPT from "./general-assistant-prompt.txt?raw";
|
|
14
|
+
import "./chat-provider-contributions";
|
|
15
|
+
import "./prompt-enhancer-contributions";
|
|
16
|
+
import "./view/k-aiview";
|
|
17
|
+
import "./view/k-token-usage";
|
|
18
|
+
import "./view/components/k-ai-config-editor";
|
|
19
|
+
import aisystemBundle from "./aisystem.json";
|
|
20
|
+
import { SYSTEM_LANGUAGE_BUNDLES } from "@kispace-io/core";
|
|
21
|
+
|
|
22
|
+
import { t } from "./translation";
|
|
23
|
+
|
|
24
|
+
// Register language bundle as early as possible to avoid race conditions
|
|
25
|
+
contributionRegistry.registerContribution(SYSTEM_LANGUAGE_BUNDLES, aisystemBundle as any);
|
|
26
|
+
|
|
27
|
+
contributionRegistry.registerContribution(SIDEBAR_AUXILIARY, {
|
|
28
|
+
name: "aiview",
|
|
29
|
+
label: t('SIDEBAR_LABEL'),
|
|
30
|
+
icon: "robot",
|
|
31
|
+
component: (id: string) => html`<k-aiview id="${id}"></k-aiview>`
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
// Register default App Support agent with general assistant prompt
|
|
35
|
+
// Apps can enhance this prompt using prompt enhancers
|
|
36
|
+
// smartToolDetection is read from AIConfig dynamically
|
|
37
|
+
contributionRegistry.registerContribution(CID_AGENTS, {
|
|
38
|
+
label: t('APP_SUPPORT'),
|
|
39
|
+
description: t('APP_SUPPORT_DESC'),
|
|
40
|
+
role: "appsupport",
|
|
41
|
+
priority: 100,
|
|
42
|
+
icon: "question-circle",
|
|
43
|
+
sysPrompt: GENERAL_SYS_PROMPT,
|
|
44
|
+
tools: () => {
|
|
45
|
+
// Read smartToolDetection setting from AIConfig synchronously
|
|
46
|
+
// This will be resolved in the prompt builder
|
|
47
|
+
return appSettings.get(KEY_AI_CONFIG).then((config: AIConfig | undefined) => {
|
|
48
|
+
const smartToolDetection = config?.smartToolDetection ?? false;
|
|
49
|
+
return {
|
|
50
|
+
enabled: true,
|
|
51
|
+
smartToolDetection
|
|
52
|
+
} as AgentToolsConfig;
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
} as AgentContribution);
|
|
56
|
+
|
|
57
|
+
contributionRegistry.registerContribution(TOOLBAR_BOTTOM, {
|
|
58
|
+
target: TOOLBAR_BOTTOM,
|
|
59
|
+
label: t('TOKEN_USAGE'),
|
|
60
|
+
html: `<k-token-usage></k-token-usage>`
|
|
61
|
+
} as HTMLContribution);
|
|
62
|
+
|
|
63
|
+
editorRegistry.registerEditorInputHandler({
|
|
64
|
+
ranking: 1000,
|
|
65
|
+
canHandle: (input: EditorInput) => {
|
|
66
|
+
return input.key === '.system.ai-config';
|
|
67
|
+
},
|
|
68
|
+
handle: async (input: EditorInput) => {
|
|
69
|
+
input.editorId = "ai-config-editor";
|
|
70
|
+
input.widgetFactory = () => html`
|
|
71
|
+
<k-ai-config-editor .input=${input}></k-ai-config-editor>
|
|
72
|
+
`;
|
|
73
|
+
return input;
|
|
74
|
+
}
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
registerAll({
|
|
78
|
+
command: {
|
|
79
|
+
"id": "open_ai_config",
|
|
80
|
+
"name": t('OPEN_AI_CONFIG'),
|
|
81
|
+
"description": t('OPEN_AI_CONFIG_DESC'),
|
|
82
|
+
"parameters": []
|
|
83
|
+
},
|
|
84
|
+
handler: {
|
|
85
|
+
execute: _context => {
|
|
86
|
+
const editorInput = {
|
|
87
|
+
title: t('AI_SETTINGS'),
|
|
88
|
+
data: {},
|
|
89
|
+
key: ".system.ai-config",
|
|
90
|
+
icon: "robot",
|
|
91
|
+
state: {},
|
|
92
|
+
} as EditorInput;
|
|
93
|
+
editorRegistry.loadEditor(editorInput).then();
|
|
94
|
+
}
|
|
95
|
+
},
|
|
96
|
+
contribution: {
|
|
97
|
+
target: TOOLBAR_MAIN_RIGHT,
|
|
98
|
+
icon: "robot",
|
|
99
|
+
label: t('AI_CONFIG'),
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
rootContext.put("aiService", aiService);
|
|
104
|
+
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
{
|
|
2
|
+
"namespace": "aisystem",
|
|
3
|
+
"en": {
|
|
4
|
+
"APP_SUPPORT": "App Support",
|
|
5
|
+
"APP_SUPPORT_DESC": "General app support",
|
|
6
|
+
"SIDEBAR_LABEL": "AI",
|
|
7
|
+
"TOKEN_USAGE": "Token Usage",
|
|
8
|
+
"OPEN_AI_CONFIG": "Open AI Config",
|
|
9
|
+
"OPEN_AI_CONFIG_DESC": "Opens the AI settings editor",
|
|
10
|
+
"AI_SETTINGS": "AI Settings",
|
|
11
|
+
"AI_CONFIG": "AI Config",
|
|
12
|
+
"NO_PROVIDER_CONFIGURED": "No AI provider configured",
|
|
13
|
+
"CLICK_SETTINGS_TO_CONFIGURE": "Click the settings button to configure",
|
|
14
|
+
"TYPE_MESSAGE_ENTER": "Type your message and press Enter...",
|
|
15
|
+
"STOP": "Stop",
|
|
16
|
+
"SETTINGS": "Settings",
|
|
17
|
+
"CLOSE": "Close",
|
|
18
|
+
"ARCHIVED_SESSIONS": "Archived Sessions",
|
|
19
|
+
"RESTORE": "Restore",
|
|
20
|
+
"DELETE_ALL_ARCHIVED": "Delete All Archived",
|
|
21
|
+
"DELETE_ALL_CONFIRM": "Are you sure you want to permanently delete all {count} archived session{plural}?",
|
|
22
|
+
"NEW_CHAT": "New Chat",
|
|
23
|
+
"CHAT": "Chat",
|
|
24
|
+
"CANCEL": "Cancel",
|
|
25
|
+
"APPROVE": "Approve",
|
|
26
|
+
"TOOL_APPROVAL_REQUEST": "Tool Approval Request",
|
|
27
|
+
"TOOL_CALLS": "Tool Calls",
|
|
28
|
+
"TOOL_APPROVAL_MESSAGE": "The AI wants to execute {toolCount} tool call{plural}: {toolList}",
|
|
29
|
+
"COPY": "Copy",
|
|
30
|
+
"AI_ASSISTANT": "AI Assistant",
|
|
31
|
+
"MULTIPLE_AGENTS": "Multiple Agents",
|
|
32
|
+
"QUICK_ACTIONS": "Quick Actions",
|
|
33
|
+
"TOKEN_USAGE_STATS": "Token Usage Statistics",
|
|
34
|
+
"TOTAL_USAGE": "Total Usage",
|
|
35
|
+
"TOTAL": "Total",
|
|
36
|
+
"PROMPT": "Prompt",
|
|
37
|
+
"COMPLETION": "Completion",
|
|
38
|
+
"REQUESTS": "Requests",
|
|
39
|
+
"BY_PROVIDER": "By Provider",
|
|
40
|
+
"RESET_STATISTICS": "Reset Statistics",
|
|
41
|
+
"RESET_CONFIRM": "Reset all token usage statistics?",
|
|
42
|
+
"TOKENS": "tokens",
|
|
43
|
+
"LOADING_MODELS": "Loading models...",
|
|
44
|
+
"SELECT_MODEL": "Select a model",
|
|
45
|
+
"TOOL_EXECUTION_PENDING": "Tool execution pending: {toolCount} tool{plural} ({toolName}{more})",
|
|
46
|
+
"AGENT_WANTS_TO_EXECUTE": "Agent \"{role}\" wants to execute the following tools:",
|
|
47
|
+
"ALWAYS_ALLOW": "Always allow",
|
|
48
|
+
"COPY_CODE": "Copy code",
|
|
49
|
+
"REQUIRES_ATTENTION": "Requires attention",
|
|
50
|
+
"RESEND": "Resend",
|
|
51
|
+
"CONFIRM": "Confirm",
|
|
52
|
+
"REJECT": "Reject",
|
|
53
|
+
"ENTER_RESPONSE": "Enter your response...",
|
|
54
|
+
"SUBMIT": "Submit",
|
|
55
|
+
"CONTINUE_WORKFLOW": "Continue Workflow",
|
|
56
|
+
"PROVIDERS": "Providers",
|
|
57
|
+
"ADD_PROVIDER": "Add Provider",
|
|
58
|
+
"DEFAULT": "Default",
|
|
59
|
+
"NAME": "Name",
|
|
60
|
+
"MODEL": "Model",
|
|
61
|
+
"API_ENDPOINT": "API Endpoint",
|
|
62
|
+
"API_KEY": "API Key",
|
|
63
|
+
"OCR_ENDPOINT": "OCR Endpoint",
|
|
64
|
+
"OCR_MODEL": "OCR Model",
|
|
65
|
+
"ACTIONS": "Actions",
|
|
66
|
+
"DELETE_PROVIDER": "Delete",
|
|
67
|
+
"DELETE_PROVIDER_CONFIRM": "Delete provider \"{name}\"?",
|
|
68
|
+
"NO_PROVIDERS_CONFIGURED": "No providers configured. Click \"Add Provider\" to get started.",
|
|
69
|
+
"TOOL_APPROVALS": "Tool Approvals",
|
|
70
|
+
"REQUIRE_APPROVAL_BEFORE_EXECUTING": "Require approval before executing tools",
|
|
71
|
+
"SMART_TOOL_DETECTION": "Use smart tool detection (reduces token usage)",
|
|
72
|
+
"SMART_TOOL_DETECTION_HINT": "When enabled, a small ML model running in your browser will detect if a prompt needs tools. This reduces token usage for simple queries like greetings. Note: The model (approximately 60-80MB quantized) will be downloaded on first use, which may take some time.",
|
|
73
|
+
"APPROVED_COMMANDS": "Approved Commands",
|
|
74
|
+
"SELECT_COMMANDS_WITHOUT_APPROVAL": "Select commands that can be executed without approval:",
|
|
75
|
+
"COMMANDS_AUTO_APPROVED": "These commands will be approved automatically when approval is enabled:",
|
|
76
|
+
"OPTIONAL": "Optional"
|
|
77
|
+
},
|
|
78
|
+
"de": {
|
|
79
|
+
"APP_SUPPORT": "App-Unterstützung",
|
|
80
|
+
"APP_SUPPORT_DESC": "Allgemeine App-Unterstützung",
|
|
81
|
+
"SIDEBAR_LABEL": "KI",
|
|
82
|
+
"TOKEN_USAGE": "Token-Verbrauch",
|
|
83
|
+
"OPEN_AI_CONFIG": "KI-Konfiguration öffnen",
|
|
84
|
+
"OPEN_AI_CONFIG_DESC": "Öffnet den KI-Einstellungseditor",
|
|
85
|
+
"AI_SETTINGS": "KI-Einstellungen",
|
|
86
|
+
"AI_CONFIG": "KI-Konfiguration",
|
|
87
|
+
"NO_PROVIDER_CONFIGURED": "Kein KI-Anbieter konfiguriert",
|
|
88
|
+
"CLICK_SETTINGS_TO_CONFIGURE": "Klicken Sie auf die Einstellungsschaltfläche zum Konfigurieren",
|
|
89
|
+
"TYPE_MESSAGE_ENTER": "Geben Sie Ihre Nachricht ein und drücken Sie Enter...",
|
|
90
|
+
"STOP": "Stopp",
|
|
91
|
+
"SETTINGS": "Einstellungen",
|
|
92
|
+
"CLOSE": "Schließen",
|
|
93
|
+
"ARCHIVED_SESSIONS": "Archivierte Sitzungen",
|
|
94
|
+
"RESTORE": "Wiederherstellen",
|
|
95
|
+
"DELETE_ALL_ARCHIVED": "Alle archivierten löschen",
|
|
96
|
+
"DELETE_ALL_CONFIRM": "Möchten Sie wirklich alle {count} archivierte Sitzung{plural} dauerhaft löschen?",
|
|
97
|
+
"NEW_CHAT": "Neuer Chat",
|
|
98
|
+
"CHAT": "Chat",
|
|
99
|
+
"CANCEL": "Abbrechen",
|
|
100
|
+
"APPROVE": "Genehmigen",
|
|
101
|
+
"TOOL_APPROVAL_REQUEST": "Werkzeug-Genehmigungsanfrage",
|
|
102
|
+
"TOOL_CALLS": "Werkzeugaufrufe",
|
|
103
|
+
"TOOL_APPROVAL_MESSAGE": "Die KI möchte {toolCount} Werkzeugaufruf{plural} ausführen: {toolList}",
|
|
104
|
+
"COPY": "Kopieren",
|
|
105
|
+
"AI_ASSISTANT": "KI-Assistent",
|
|
106
|
+
"MULTIPLE_AGENTS": "Mehrere Agenten",
|
|
107
|
+
"QUICK_ACTIONS": "Schnellaktionen",
|
|
108
|
+
"TOKEN_USAGE_STATS": "Token-Verbrauchsstatistiken",
|
|
109
|
+
"TOTAL_USAGE": "Gesamtverbrauch",
|
|
110
|
+
"TOTAL": "Gesamt",
|
|
111
|
+
"PROMPT": "Eingabeaufforderung",
|
|
112
|
+
"COMPLETION": "Vervollständigung",
|
|
113
|
+
"REQUESTS": "Anfragen",
|
|
114
|
+
"BY_PROVIDER": "Nach Anbieter",
|
|
115
|
+
"RESET_STATISTICS": "Statistiken zurücksetzen",
|
|
116
|
+
"RESET_CONFIRM": "Alle Token-Verbrauchsstatistiken zurücksetzen?",
|
|
117
|
+
"TOKENS": "Tokens",
|
|
118
|
+
"LOADING_MODELS": "Modelle werden geladen...",
|
|
119
|
+
"SELECT_MODEL": "Modell auswählen",
|
|
120
|
+
"TOOL_EXECUTION_PENDING": "Werkzeugausführung ausstehend: {toolCount} Werkzeug{plural} ({toolName}{more})",
|
|
121
|
+
"AGENT_WANTS_TO_EXECUTE": "Agent \"{role}\" möchte die folgenden Werkzeuge ausführen:",
|
|
122
|
+
"ALWAYS_ALLOW": "Immer erlauben",
|
|
123
|
+
"COPY_CODE": "Code kopieren",
|
|
124
|
+
"REQUIRES_ATTENTION": "Erfordert Aufmerksamkeit",
|
|
125
|
+
"RESEND": "Erneut senden",
|
|
126
|
+
"CONFIRM": "Bestätigen",
|
|
127
|
+
"REJECT": "Ablehnen",
|
|
128
|
+
"ENTER_RESPONSE": "Geben Sie Ihre Antwort ein...",
|
|
129
|
+
"SUBMIT": "Absenden",
|
|
130
|
+
"CONTINUE_WORKFLOW": "Workflow fortsetzen",
|
|
131
|
+
"PROVIDERS": "Anbieter",
|
|
132
|
+
"ADD_PROVIDER": "Anbieter hinzufügen",
|
|
133
|
+
"DEFAULT": "Standard",
|
|
134
|
+
"NAME": "Name",
|
|
135
|
+
"MODEL": "Modell",
|
|
136
|
+
"API_ENDPOINT": "API-Endpunkt",
|
|
137
|
+
"API_KEY": "API-Schlüssel",
|
|
138
|
+
"OCR_ENDPOINT": "OCR-Endpunkt",
|
|
139
|
+
"OCR_MODEL": "OCR-Modell",
|
|
140
|
+
"ACTIONS": "Aktionen",
|
|
141
|
+
"DELETE_PROVIDER": "Löschen",
|
|
142
|
+
"DELETE_PROVIDER_CONFIRM": "Anbieter \"{name}\" löschen?",
|
|
143
|
+
"NO_PROVIDERS_CONFIGURED": "Keine Anbieter konfiguriert. Klicken Sie auf \"Anbieter hinzufügen\", um zu beginnen.",
|
|
144
|
+
"TOOL_APPROVALS": "Werkzeug-Genehmigungen",
|
|
145
|
+
"REQUIRE_APPROVAL_BEFORE_EXECUTING": "Genehmigung vor der Ausführung von Werkzeugen erforderlich",
|
|
146
|
+
"SMART_TOOL_DETECTION": "Intelligente Werkzeugerkennung verwenden (reduziert Token-Verbrauch)",
|
|
147
|
+
"SMART_TOOL_DETECTION_HINT": "Wenn aktiviert, erkennt ein kleines ML-Modell in Ihrem Browser, ob eine Eingabeaufforderung Werkzeuge benötigt. Dies reduziert den Token-Verbrauch bei einfachen Abfragen wie Begrüßungen. Hinweis: Das Modell (ca. 60-80MB quantisiert) wird bei der ersten Verwendung heruntergeladen, was einige Zeit dauern kann.",
|
|
148
|
+
"APPROVED_COMMANDS": "Genehmigte Befehle",
|
|
149
|
+
"SELECT_COMMANDS_WITHOUT_APPROVAL": "Wählen Sie Befehle aus, die ohne Genehmigung ausgeführt werden können:",
|
|
150
|
+
"COMMANDS_AUTO_APPROVED": "Diese Befehle werden automatisch genehmigt, wenn die Genehmigung aktiviert ist:",
|
|
151
|
+
"OPTIONAL": "Optional"
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import { contributionRegistry } from '@kispace-io/core';
|
|
2
|
+
import { CID_CHAT_PROVIDERS, ChatProviderContribution } from './core';
|
|
3
|
+
|
|
4
|
+
contributionRegistry.registerContribution<ChatProviderContribution>(CID_CHAT_PROVIDERS, {
|
|
5
|
+
target: CID_CHAT_PROVIDERS,
|
|
6
|
+
label: "Ollama (Local)",
|
|
7
|
+
provider: {
|
|
8
|
+
name: "ollama",
|
|
9
|
+
model: "gemma3:12b",
|
|
10
|
+
chatApiEndpoint: "https://<your-server>/v1/chat/completions",
|
|
11
|
+
apiKey: ""
|
|
12
|
+
}
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
contributionRegistry.registerContribution<ChatProviderContribution>(CID_CHAT_PROVIDERS, {
|
|
16
|
+
target: CID_CHAT_PROVIDERS,
|
|
17
|
+
label: "OpenWebUI (Self Hosted)",
|
|
18
|
+
provider: {
|
|
19
|
+
name: "openwebui",
|
|
20
|
+
model: "gemma3:12b",
|
|
21
|
+
chatApiEndpoint: "https://<your-server>/api/v1/chat/completion",
|
|
22
|
+
apiKey: ""
|
|
23
|
+
}
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
contributionRegistry.registerContribution<ChatProviderContribution>(CID_CHAT_PROVIDERS, {
|
|
27
|
+
target: CID_CHAT_PROVIDERS,
|
|
28
|
+
label: "OpenAI",
|
|
29
|
+
provider: {
|
|
30
|
+
name: "openai",
|
|
31
|
+
model: "gpt-4.1",
|
|
32
|
+
chatApiEndpoint: "https://api.openai.com/v1/chat/completions",
|
|
33
|
+
apiKey: "<your api key>"
|
|
34
|
+
}
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
contributionRegistry.registerContribution<ChatProviderContribution>(CID_CHAT_PROVIDERS, {
|
|
38
|
+
target: CID_CHAT_PROVIDERS,
|
|
39
|
+
label: "Groq",
|
|
40
|
+
provider: {
|
|
41
|
+
name: "groq",
|
|
42
|
+
model: "llama-3.1-8b-instant",
|
|
43
|
+
chatApiEndpoint: "https://api.groq.com/openai/v1/chat/completions",
|
|
44
|
+
apiKey: "<your api key>"
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
contributionRegistry.registerContribution<ChatProviderContribution>(CID_CHAT_PROVIDERS, {
|
|
49
|
+
target: CID_CHAT_PROVIDERS,
|
|
50
|
+
label: "Cerebras",
|
|
51
|
+
provider: {
|
|
52
|
+
name: "cerebras",
|
|
53
|
+
model: "llama3.1-8b",
|
|
54
|
+
chatApiEndpoint: "https://api.cerebras.ai/v1/chat/completions",
|
|
55
|
+
apiKey: "<your api key>"
|
|
56
|
+
}
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
contributionRegistry.registerContribution<ChatProviderContribution>(CID_CHAT_PROVIDERS, {
|
|
60
|
+
target: CID_CHAT_PROVIDERS,
|
|
61
|
+
label: "WebLLM",
|
|
62
|
+
provider: {
|
|
63
|
+
name: "webllm",
|
|
64
|
+
model: "gemma-2-9b-it-q4f16_1-MLC",
|
|
65
|
+
chatApiEndpoint: "",
|
|
66
|
+
apiKey: "",
|
|
67
|
+
parameters: {
|
|
68
|
+
context_window_size: 4096
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
contributionRegistry.registerContribution<ChatProviderContribution>(CID_CHAT_PROVIDERS, {
|
|
74
|
+
target: CID_CHAT_PROVIDERS,
|
|
75
|
+
label: "Mistral",
|
|
76
|
+
provider: {
|
|
77
|
+
name: "mistral",
|
|
78
|
+
model: "mistral-large-latest",
|
|
79
|
+
chatApiEndpoint: "https://api.mistral.ai/v1/chat/completions",
|
|
80
|
+
apiKey: "<your api key>",
|
|
81
|
+
ocrApiEndpoint: "https://api.mistral.ai/v1/ocr",
|
|
82
|
+
ocrModel: "mistral-ocr-latest"
|
|
83
|
+
}
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
contributionRegistry.registerContribution<ChatProviderContribution>(CID_CHAT_PROVIDERS, {
|
|
87
|
+
target: CID_CHAT_PROVIDERS,
|
|
88
|
+
label: "LiteLLM",
|
|
89
|
+
provider: {
|
|
90
|
+
name: "litellm",
|
|
91
|
+
model: "gpt-3.5-turbo",
|
|
92
|
+
chatApiEndpoint: "https://<your-server>/v1/chat/completions",
|
|
93
|
+
apiKey: "<your api key>"
|
|
94
|
+
}
|
|
95
|
+
});
|