ideaco 1.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.dockerignore +33 -0
- package/.nvmrc +1 -0
- package/ARCHITECTURE.md +394 -0
- package/Dockerfile +50 -0
- package/LICENSE +29 -0
- package/README.md +206 -0
- package/bin/i18n.js +46 -0
- package/bin/ideaco.js +494 -0
- package/deploy.sh +15 -0
- package/docker-compose.yml +30 -0
- package/electron/main.cjs +986 -0
- package/electron/preload.cjs +14 -0
- package/electron/web-backends.cjs +854 -0
- package/jsconfig.json +8 -0
- package/next.config.mjs +34 -0
- package/package.json +134 -0
- package/postcss.config.mjs +6 -0
- package/public/demo/dashboard.png +0 -0
- package/public/demo/employee.png +0 -0
- package/public/demo/messages.png +0 -0
- package/public/demo/office.png +0 -0
- package/public/demo/requirement.png +0 -0
- package/public/logo.jpeg +0 -0
- package/public/logo.png +0 -0
- package/scripts/prepare-electron.js +67 -0
- package/scripts/release.js +76 -0
- package/src/app/api/agents/[agentId]/chat/route.js +70 -0
- package/src/app/api/agents/[agentId]/conversations/route.js +35 -0
- package/src/app/api/agents/[agentId]/route.js +106 -0
- package/src/app/api/avatar/route.js +104 -0
- package/src/app/api/browse-dir/route.js +44 -0
- package/src/app/api/chat/route.js +265 -0
- package/src/app/api/company/factory-reset/route.js +43 -0
- package/src/app/api/company/route.js +82 -0
- package/src/app/api/departments/[deptId]/agents/[agentId]/dismiss/route.js +19 -0
- package/src/app/api/departments/route.js +92 -0
- package/src/app/api/group-chat-loop/events/route.js +70 -0
- package/src/app/api/group-chat-loop/route.js +94 -0
- package/src/app/api/mailbox/route.js +100 -0
- package/src/app/api/messages/route.js +14 -0
- package/src/app/api/providers/[id]/configure/route.js +21 -0
- package/src/app/api/providers/[id]/refresh-cookie/route.js +38 -0
- package/src/app/api/providers/[id]/test-cookie/route.js +28 -0
- package/src/app/api/providers/route.js +11 -0
- package/src/app/api/requirements/route.js +242 -0
- package/src/app/api/secretary/route.js +65 -0
- package/src/app/api/system/cli-backends/route.js +91 -0
- package/src/app/api/system/cron/route.js +110 -0
- package/src/app/api/system/knowledge/route.js +104 -0
- package/src/app/api/system/plugins/route.js +40 -0
- package/src/app/api/system/skills/route.js +46 -0
- package/src/app/api/system/status/route.js +46 -0
- package/src/app/api/talent-market/[profileId]/recall/route.js +22 -0
- package/src/app/api/talent-market/[profileId]/route.js +17 -0
- package/src/app/api/talent-market/route.js +26 -0
- package/src/app/api/teams/route.js +773 -0
- package/src/app/api/ws-files/[departmentId]/file/route.js +27 -0
- package/src/app/api/ws-files/[departmentId]/files/route.js +22 -0
- package/src/app/globals.css +130 -0
- package/src/app/layout.jsx +40 -0
- package/src/app/page.jsx +97 -0
- package/src/components/AgentChatModal.jsx +164 -0
- package/src/components/AgentDetailModal.jsx +425 -0
- package/src/components/AgentSpyModal.jsx +481 -0
- package/src/components/AvatarGrid.jsx +29 -0
- package/src/components/BossProfileModal.jsx +162 -0
- package/src/components/CachedAvatar.jsx +77 -0
- package/src/components/ChatPanel.jsx +219 -0
- package/src/components/ChatShared.jsx +255 -0
- package/src/components/DepartmentDetail.jsx +842 -0
- package/src/components/DepartmentView.jsx +367 -0
- package/src/components/FileReference.jsx +260 -0
- package/src/components/FilesView.jsx +465 -0
- package/src/components/GroupChatView.jsx +799 -0
- package/src/components/Mailbox.jsx +926 -0
- package/src/components/MessagesView.jsx +112 -0
- package/src/components/OnboardingGuide.jsx +209 -0
- package/src/components/OrgTree.jsx +151 -0
- package/src/components/Overview.jsx +391 -0
- package/src/components/PixelOffice.jsx +2281 -0
- package/src/components/ProviderGrid.jsx +551 -0
- package/src/components/ProvidersBoard.jsx +16 -0
- package/src/components/RequirementDetail.jsx +1279 -0
- package/src/components/RequirementsBoard.jsx +187 -0
- package/src/components/SecretarySettings.jsx +295 -0
- package/src/components/SetupWizard.jsx +388 -0
- package/src/components/Sidebar.jsx +169 -0
- package/src/components/SystemMonitor.jsx +808 -0
- package/src/components/TalentMarket.jsx +183 -0
- package/src/components/TeamDetail.jsx +697 -0
- package/src/core/agent/base-agent.js +104 -0
- package/src/core/agent/chat-store.js +602 -0
- package/src/core/agent/cli-agent/backends/claude-code/README.md +52 -0
- package/src/core/agent/cli-agent/backends/claude-code/config.js +27 -0
- package/src/core/agent/cli-agent/backends/codebuddy/README.md +236 -0
- package/src/core/agent/cli-agent/backends/codebuddy/config.js +27 -0
- package/src/core/agent/cli-agent/backends/codex/README.md +51 -0
- package/src/core/agent/cli-agent/backends/codex/config.js +27 -0
- package/src/core/agent/cli-agent/backends/index.js +27 -0
- package/src/core/agent/cli-agent/backends/registry.js +580 -0
- package/src/core/agent/cli-agent/index.js +154 -0
- package/src/core/agent/index.js +60 -0
- package/src/core/agent/llm-agent/client.js +320 -0
- package/src/core/agent/llm-agent/index.js +97 -0
- package/src/core/agent/message-bus.js +211 -0
- package/src/core/agent/session.js +608 -0
- package/src/core/agent/tools.js +596 -0
- package/src/core/agent/web-agent/backends/base-backend.js +180 -0
- package/src/core/agent/web-agent/backends/chatgpt/client.js +146 -0
- package/src/core/agent/web-agent/backends/chatgpt/config.js +148 -0
- package/src/core/agent/web-agent/backends/chatgpt/dom-scripts.js +303 -0
- package/src/core/agent/web-agent/backends/index.js +91 -0
- package/src/core/agent/web-agent/index.js +278 -0
- package/src/core/agent/web-agent/web-client.js +407 -0
- package/src/core/employee/base-employee.js +1088 -0
- package/src/core/employee/index.js +35 -0
- package/src/core/employee/knowledge.js +327 -0
- package/src/core/employee/lifecycle.js +990 -0
- package/src/core/employee/memory/index.js +642 -0
- package/src/core/employee/memory/store.js +143 -0
- package/src/core/employee/performance.js +224 -0
- package/src/core/employee/secretary.js +625 -0
- package/src/core/employee/skills.js +398 -0
- package/src/core/index.js +38 -0
- package/src/core/organization/company.js +2600 -0
- package/src/core/organization/department.js +737 -0
- package/src/core/organization/group-chat-loop.js +264 -0
- package/src/core/organization/index.js +8 -0
- package/src/core/organization/persistence.js +111 -0
- package/src/core/organization/team.js +267 -0
- package/src/core/organization/workforce/hr.js +377 -0
- package/src/core/organization/workforce/providers.js +468 -0
- package/src/core/organization/workforce/role-archetypes.js +805 -0
- package/src/core/organization/workforce/talent-market.js +205 -0
- package/src/core/prompts.js +532 -0
- package/src/core/requirement.js +1789 -0
- package/src/core/system/audit.js +483 -0
- package/src/core/system/cron.js +449 -0
- package/src/core/system/index.js +7 -0
- package/src/core/system/plugin.js +2183 -0
- package/src/core/utils/json-parse.js +188 -0
- package/src/core/workspace.js +239 -0
- package/src/lib/api-i18n.js +211 -0
- package/src/lib/avatar.js +268 -0
- package/src/lib/client-store.js +1025 -0
- package/src/lib/config-validator.js +483 -0
- package/src/lib/format-time.js +22 -0
- package/src/lib/hooks.js +414 -0
- package/src/lib/i18n.js +134 -0
- package/src/lib/paths.js +23 -0
- package/src/lib/store.js +72 -0
- package/src/locales/de.js +393 -0
- package/src/locales/en.js +1054 -0
- package/src/locales/es.js +393 -0
- package/src/locales/fr.js +393 -0
- package/src/locales/ja.js +501 -0
- package/src/locales/ko.js +513 -0
- package/src/locales/zh.js +828 -0
- package/tailwind.config.mjs +11 -0
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
import { BaseAgent } from '../base-agent.js';
|
|
2
|
+
import { llmClient } from '../llm-agent/client.js';
|
|
3
|
+
import { cliBackendRegistry } from './backends/index.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* CLIAgent — Communication engine powered by local CLI coding assistants.
|
|
7
|
+
*
|
|
8
|
+
* Pure communication layer: handles CLI execution, fallback LLM chat, provider management.
|
|
9
|
+
* No business logic (identity, memory, tasks) — that's the Employee layer.
|
|
10
|
+
*/
|
|
11
|
+
export class CLIAgent extends BaseAgent {
|
|
12
|
+
/**
|
|
13
|
+
* @param {object} config
|
|
14
|
+
* @param {string} config.cliBackend - CLI backend ID
|
|
15
|
+
* @param {object} [config.cliProvider] - CLI provider info (for display)
|
|
16
|
+
* @param {object} [config.fallbackProvider] - LLM provider for lightweight chat
|
|
17
|
+
*/
|
|
18
|
+
constructor(config) {
|
|
19
|
+
super();
|
|
20
|
+
this.cliBackend = config.cliBackend;
|
|
21
|
+
this.cliProvider = config.cliProvider || null;
|
|
22
|
+
this.fallbackProvider = config.fallbackProvider || null;
|
|
23
|
+
// Backward compat: some external code reads agent.provider
|
|
24
|
+
this.provider = config.fallbackProvider || config.provider || null;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
get agentType() {
|
|
28
|
+
return 'cli';
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
isAvailable() {
|
|
32
|
+
const backend = cliBackendRegistry.backends.get(this.cliBackend);
|
|
33
|
+
return !!(backend && (backend.state === 'detected' || backend.state === 'configured'));
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
canChat() {
|
|
37
|
+
return !!(this.fallbackProvider && this.fallbackProvider.enabled && this.fallbackProvider.apiKey && !this.fallbackProvider.isCLI);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
setFallbackProvider(provider) {
|
|
41
|
+
if (provider && provider.enabled && provider.apiKey && !provider.isCLI) {
|
|
42
|
+
this.fallbackProvider = provider;
|
|
43
|
+
this.provider = provider;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
async chat(messages, options = {}) {
|
|
48
|
+
if (!this.canChat()) {
|
|
49
|
+
throw new Error(`CLIAgent has no fallback LLM provider for chat`);
|
|
50
|
+
}
|
|
51
|
+
return await llmClient.chat(this.fallbackProvider, messages, options);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
async chatWithTools(messages, toolExecutor, options = {}) {
|
|
55
|
+
if (!this.canChat()) {
|
|
56
|
+
throw new Error(`CLIAgent has no fallback LLM provider for chatWithTools`);
|
|
57
|
+
}
|
|
58
|
+
return await llmClient.chatWithTools(this.fallbackProvider, messages, toolExecutor, options);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
getDisplayInfo() {
|
|
62
|
+
const backend = cliBackendRegistry.backends.get(this.cliBackend);
|
|
63
|
+
return {
|
|
64
|
+
name: this.cliProvider?.name || backend?.config?.name || this.cliBackend,
|
|
65
|
+
provider: 'Local CLI',
|
|
66
|
+
model: backend?.config?.execCommand || this.cliBackend,
|
|
67
|
+
type: 'cli',
|
|
68
|
+
category: 'cli',
|
|
69
|
+
backendId: this.cliBackend,
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
getProviderDisplayInfo() {
|
|
74
|
+
return {
|
|
75
|
+
id: this.cliProvider?.id,
|
|
76
|
+
name: this.cliProvider?.name,
|
|
77
|
+
provider: this.cliProvider?.provider || 'Local CLI',
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
getFallbackProviderName() {
|
|
82
|
+
return this.fallbackProvider?.name || this.provider?.name || null;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
switchProvider(newProvider) {
|
|
86
|
+
if (newProvider.isCLI && newProvider.cliBackendId) {
|
|
87
|
+
this.cliBackend = newProvider.cliBackendId;
|
|
88
|
+
this.cliProvider = newProvider;
|
|
89
|
+
} else {
|
|
90
|
+
this.fallbackProvider = newProvider;
|
|
91
|
+
this.provider = newProvider;
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
getCostPerToken() {
|
|
96
|
+
return this.fallbackProvider?.costPerToken || this.provider?.costPerToken || 0.001;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
serializeAgent() {
|
|
100
|
+
return {
|
|
101
|
+
agentType: 'cli',
|
|
102
|
+
cliBackend: this.cliBackend,
|
|
103
|
+
cliProvider: this.cliProvider ? {
|
|
104
|
+
id: this.cliProvider.id,
|
|
105
|
+
name: this.cliProvider.name,
|
|
106
|
+
provider: this.cliProvider.provider,
|
|
107
|
+
model: this.cliProvider.model,
|
|
108
|
+
} : null,
|
|
109
|
+
fallbackProvider: this.fallbackProvider ? {
|
|
110
|
+
id: this.fallbackProvider.id,
|
|
111
|
+
name: this.fallbackProvider.name,
|
|
112
|
+
provider: this.fallbackProvider.provider,
|
|
113
|
+
model: this.fallbackProvider.model,
|
|
114
|
+
category: this.fallbackProvider.category,
|
|
115
|
+
costPerToken: this.fallbackProvider.costPerToken,
|
|
116
|
+
enabled: this.fallbackProvider.enabled,
|
|
117
|
+
} : null,
|
|
118
|
+
provider: this.provider ? {
|
|
119
|
+
id: this.provider.id,
|
|
120
|
+
name: this.provider.name,
|
|
121
|
+
provider: this.provider.provider,
|
|
122
|
+
model: this.provider.model,
|
|
123
|
+
category: this.provider.category,
|
|
124
|
+
costPerToken: this.provider.costPerToken,
|
|
125
|
+
enabled: this.provider.enabled,
|
|
126
|
+
} : null,
|
|
127
|
+
};
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Restore CLIAgent from serialized data.
|
|
132
|
+
*/
|
|
133
|
+
static deserialize(data, providerRegistry) {
|
|
134
|
+
let fallbackProvider = data.fallbackProvider || null;
|
|
135
|
+
if (fallbackProvider?.id && providerRegistry) {
|
|
136
|
+
fallbackProvider = providerRegistry.getById(fallbackProvider.id) || fallbackProvider;
|
|
137
|
+
}
|
|
138
|
+
let provider = data.provider || null;
|
|
139
|
+
if (provider?.id && providerRegistry) {
|
|
140
|
+
provider = providerRegistry.getById(provider.id) || provider;
|
|
141
|
+
}
|
|
142
|
+
let cliProvider = data.cliProvider || null;
|
|
143
|
+
if (cliProvider?.id && providerRegistry) {
|
|
144
|
+
cliProvider = providerRegistry.getById(cliProvider.id) || cliProvider;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
return new CLIAgent({
|
|
148
|
+
cliBackend: data.cliBackend,
|
|
149
|
+
cliProvider,
|
|
150
|
+
fallbackProvider: fallbackProvider || provider,
|
|
151
|
+
provider: provider || fallbackProvider,
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
}
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent module — pure communication engine layer.
|
|
3
|
+
*
|
|
4
|
+
* Agents handle ONLY: LLM/CLI/Web communication, provider management, availability.
|
|
5
|
+
* Business logic (identity, memory, tasks, org) lives in the Employee layer.
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* import { createAgent, LLMAgent, CLIAgent, WebAgent } from './agent/index.js';
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { BaseAgent } from './base-agent.js';
|
|
12
|
+
import { LLMAgent } from './llm-agent/index.js';
|
|
13
|
+
import { CLIAgent } from './cli-agent/index.js';
|
|
14
|
+
import { WebAgent } from './web-agent/index.js';
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Create an agent from config. Auto-picks LLMAgent, CLIAgent, or WebAgent.
|
|
18
|
+
* @param {object} config
|
|
19
|
+
* @returns {BaseAgent}
|
|
20
|
+
*/
|
|
21
|
+
export function createAgent(config) {
|
|
22
|
+
if (config.cliBackend) {
|
|
23
|
+
return new CLIAgent({
|
|
24
|
+
...config,
|
|
25
|
+
fallbackProvider: config.provider,
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
if (config.provider?.isWeb) {
|
|
29
|
+
return new WebAgent(config);
|
|
30
|
+
}
|
|
31
|
+
return new LLMAgent(config);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Deserialize an agent from saved data. Auto-detects type.
|
|
36
|
+
* @param {object} data
|
|
37
|
+
* @param {object} [providerRegistry]
|
|
38
|
+
* @returns {BaseAgent}
|
|
39
|
+
*/
|
|
40
|
+
export function deserializeAgent(data, providerRegistry) {
|
|
41
|
+
if (data.agentType === 'cli' || data.cliBackend || data.brain?.type === 'cli') {
|
|
42
|
+
// Legacy brain data conversion
|
|
43
|
+
if (!data.cliBackend && data.brain?.backendId) {
|
|
44
|
+
data.cliBackend = data.brain.backendId;
|
|
45
|
+
}
|
|
46
|
+
if (!data.cliProvider && data.brain?.cliProvider) {
|
|
47
|
+
data.cliProvider = data.brain.cliProvider;
|
|
48
|
+
}
|
|
49
|
+
if (!data.fallbackProvider && data.brain?.fallbackProvider) {
|
|
50
|
+
data.fallbackProvider = data.brain.fallbackProvider;
|
|
51
|
+
}
|
|
52
|
+
return CLIAgent.deserialize(data, providerRegistry);
|
|
53
|
+
}
|
|
54
|
+
if (data.agentType === 'web' || data.provider?.isWeb) {
|
|
55
|
+
return WebAgent.deserialize(data, providerRegistry);
|
|
56
|
+
}
|
|
57
|
+
return LLMAgent.deserialize(data, providerRegistry);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export { BaseAgent, LLMAgent, CLIAgent, WebAgent };
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Unified LLM Client - Makes real API calls to various model providers
|
|
3
|
+
*
|
|
4
|
+
* Supports: OpenAI/GPT, Anthropic/Claude, DeepSeek, and other OpenAI-compatible APIs
|
|
5
|
+
* as well as image models like DALL-E, Midjourney
|
|
6
|
+
*/
|
|
7
|
+
import OpenAI from 'openai';
|
|
8
|
+
import { auditLogger, AuditCategory, AuditLevel } from '../../system/audit.js';
|
|
9
|
+
import { hookRegistry, HookEvent } from '../../../lib/hooks.js';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Create an API client based on provider configuration
|
|
13
|
+
* @param {object} provider - Provider config object
|
|
14
|
+
* @returns {object} API client instance
|
|
15
|
+
*/
|
|
16
|
+
function createClient(provider) {
|
|
17
|
+
const { id, apiKey } = provider;
|
|
18
|
+
|
|
19
|
+
// OpenAI series (GPT, DALL-E)
|
|
20
|
+
if (id.startsWith('openai-')) {
|
|
21
|
+
return new OpenAI({ apiKey });
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// Anthropic Claude - via OpenAI-compatible interface
|
|
25
|
+
if (id.startsWith('anthropic-')) {
|
|
26
|
+
return new OpenAI({
|
|
27
|
+
apiKey,
|
|
28
|
+
baseURL: 'https://api.anthropic.com/v1',
|
|
29
|
+
defaultHeaders: {
|
|
30
|
+
'anthropic-version': '2023-06-01',
|
|
31
|
+
'x-api-key': apiKey,
|
|
32
|
+
},
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
// DeepSeek - OpenAI-compatible interface
|
|
37
|
+
if (id.startsWith('deepseek-')) {
|
|
38
|
+
return new OpenAI({
|
|
39
|
+
apiKey,
|
|
40
|
+
baseURL: 'https://api.deepseek.com',
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// Qwen (Tongyi Qianwen) - Alibaba Cloud DashScope OpenAI-compatible interface
|
|
45
|
+
if (id.startsWith('qwen-')) {
|
|
46
|
+
return new OpenAI({
|
|
47
|
+
apiKey,
|
|
48
|
+
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
|
49
|
+
});
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Default: use OpenAI-compatible interface
|
|
53
|
+
return new OpenAI({ apiKey });
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Get model name for a given provider
|
|
58
|
+
*/
|
|
59
|
+
function getModelName(provider) {
|
|
60
|
+
const modelMap = {
|
|
61
|
+
'openai-gpt4': 'gpt-4-turbo',
|
|
62
|
+
'openai-gpt35': 'gpt-3.5-turbo',
|
|
63
|
+
'anthropic-claude': 'claude-3-5-sonnet-20241022',
|
|
64
|
+
'deepseek-v3': 'deepseek-chat',
|
|
65
|
+
'qwen-max': 'qwen-max',
|
|
66
|
+
};
|
|
67
|
+
return modelMap[provider.id] || provider.model || 'gpt-4-turbo';
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Unified LLM communication client
|
|
72
|
+
*/
|
|
73
|
+
export class LLMClient {
|
|
74
|
+
constructor() {
|
|
75
|
+
// Cache created client instances
|
|
76
|
+
this.clients = new Map();
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Get or create an API client
|
|
81
|
+
*/
|
|
82
|
+
_getClient(provider) {
|
|
83
|
+
if (!provider.apiKey) {
|
|
84
|
+
throw new Error(`Provider ${provider.name} has no API Key configured`);
|
|
85
|
+
}
|
|
86
|
+
if (!this.clients.has(provider.id)) {
|
|
87
|
+
this.clients.set(provider.id, createClient(provider));
|
|
88
|
+
}
|
|
89
|
+
return this.clients.get(provider.id);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Send chat messages (general text models)
|
|
94
|
+
*
|
|
95
|
+
* @param {object} provider - Provider config
|
|
96
|
+
* @param {Array<{role: string, content: string}>} messages - Message list
|
|
97
|
+
* @param {object} [options] - Extra options
|
|
98
|
+
* @param {string[]} [options.tools] - Available tool definitions
|
|
99
|
+
* @param {number} [options.temperature] - Temperature
|
|
100
|
+
* @param {number} [options.maxTokens] - Max tokens
|
|
101
|
+
* @returns {Promise<{content: string, toolCalls: Array|null, usage: object}>}
|
|
102
|
+
*/
|
|
103
|
+
async chat(provider, messages, options = {}) {
|
|
104
|
+
const client = this._getClient(provider);
|
|
105
|
+
const model = getModelName(provider);
|
|
106
|
+
|
|
107
|
+
const requestParams = {
|
|
108
|
+
model,
|
|
109
|
+
messages,
|
|
110
|
+
temperature: options.temperature ?? 0.7,
|
|
111
|
+
max_tokens: options.maxTokens ?? 4096,
|
|
112
|
+
};
|
|
113
|
+
|
|
114
|
+
// Add tool definitions to request if provided
|
|
115
|
+
if (options.tools && options.tools.length > 0) {
|
|
116
|
+
requestParams.tools = options.tools;
|
|
117
|
+
requestParams.tool_choice = 'auto';
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
const startTime = Date.now();
|
|
121
|
+
try {
|
|
122
|
+
// Fire hook: LLM request start
|
|
123
|
+
hookRegistry.trigger(HookEvent.LLM_REQUEST_START, {
|
|
124
|
+
providerId: provider.id, model, agentId: options._agentId,
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
const response = await client.chat.completions.create(requestParams);
|
|
128
|
+
const choice = response.choices[0];
|
|
129
|
+
const latency = Date.now() - startTime;
|
|
130
|
+
|
|
131
|
+
// Audit log the LLM request
|
|
132
|
+
auditLogger.log({
|
|
133
|
+
category: AuditCategory.LLM_REQUEST,
|
|
134
|
+
level: AuditLevel.INFO,
|
|
135
|
+
agentId: options._agentId || 'system',
|
|
136
|
+
agentName: options._agentName || '',
|
|
137
|
+
action: `LLM call: ${provider.name} (${model}) - ${latency}ms`,
|
|
138
|
+
details: { providerId: provider.id, model, latency, usage: response.usage },
|
|
139
|
+
});
|
|
140
|
+
|
|
141
|
+
// Fire hook: LLM request end + token usage
|
|
142
|
+
hookRegistry.trigger(HookEvent.LLM_REQUEST_END, {
|
|
143
|
+
providerId: provider.id, model, latency,
|
|
144
|
+
usage: response.usage, agentId: options._agentId,
|
|
145
|
+
});
|
|
146
|
+
if (response.usage) {
|
|
147
|
+
hookRegistry.trigger(HookEvent.LLM_TOKEN_USAGE, {
|
|
148
|
+
providerId: provider.id, model,
|
|
149
|
+
promptTokens: response.usage.prompt_tokens || 0,
|
|
150
|
+
completionTokens: response.usage.completion_tokens || 0,
|
|
151
|
+
totalTokens: response.usage.total_tokens || 0,
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
return {
|
|
156
|
+
content: choice.message.content || '',
|
|
157
|
+
toolCalls: choice.message.tool_calls || null,
|
|
158
|
+
finishReason: choice.finish_reason,
|
|
159
|
+
usage: response.usage || {},
|
|
160
|
+
};
|
|
161
|
+
} catch (error) {
|
|
162
|
+
// Fire hook: LLM error
|
|
163
|
+
hookRegistry.trigger(HookEvent.LLM_ERROR, {
|
|
164
|
+
providerId: provider.id, model, error: error.message,
|
|
165
|
+
agentId: options._agentId,
|
|
166
|
+
});
|
|
167
|
+
console.error(`[LLMClient] Call to ${provider.name} failed:`, error.message);
|
|
168
|
+
throw new Error(`LLM call failed (${provider.name}): ${error.message}`);
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Multi-turn conversation (with tool call loop)
|
|
174
|
+
* Core method for Agent task execution: send message -> model may call tools -> execute tools -> continue conversation
|
|
175
|
+
*
|
|
176
|
+
* @param {object} provider - Provider config
|
|
177
|
+
* @param {Array} messages - Initial messages
|
|
178
|
+
* @param {object} toolExecutor - Tool executor { definitions, execute(name, args) }
|
|
179
|
+
* @param {object} [options] - Options
|
|
180
|
+
* @param {number} [options.maxIterations] - Max tool call loop iterations
|
|
181
|
+
* @returns {Promise<{content: string, toolResults: Array, messages: Array}>}
|
|
182
|
+
*/
|
|
183
|
+
async chatWithTools(provider, messages, toolExecutor, options = {}) {
|
|
184
|
+
const maxIterations = options.maxIterations || 5; const onToolCall = options.onToolCall || null; // Callback: notify on tool call
|
|
185
|
+
const onLLMCall = options.onLLMCall || null; // Callback: notify on each LLM call
|
|
186
|
+
const conversationMessages = [...messages];
|
|
187
|
+
const toolResults = [];
|
|
188
|
+
|
|
189
|
+
for (let i = 0; i < maxIterations; i++) {
|
|
190
|
+
// Notify: about to call LLM
|
|
191
|
+
if (onLLMCall) {
|
|
192
|
+
try { onLLMCall({ iteration: i + 1, maxIterations }); } catch {}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
const response = await this.chat(provider, conversationMessages, {
|
|
196
|
+
tools: toolExecutor.definitions,
|
|
197
|
+
temperature: options.temperature,
|
|
198
|
+
maxTokens: options.maxTokens,
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
// If no tool calls, return final result
|
|
202
|
+
if (!response.toolCalls || response.toolCalls.length === 0) {
|
|
203
|
+
return {
|
|
204
|
+
content: response.content,
|
|
205
|
+
toolResults,
|
|
206
|
+
messages: conversationMessages,
|
|
207
|
+
usage: response.usage,
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// Process tool calls
|
|
212
|
+
conversationMessages.push({
|
|
213
|
+
role: 'assistant',
|
|
214
|
+
content: response.content,
|
|
215
|
+
tool_calls: response.toolCalls,
|
|
216
|
+
});
|
|
217
|
+
|
|
218
|
+
for (const toolCall of response.toolCalls) {
|
|
219
|
+
const { name, arguments: argsStr } = toolCall.function;
|
|
220
|
+
let args;
|
|
221
|
+
try {
|
|
222
|
+
args = JSON.parse(argsStr);
|
|
223
|
+
} catch (parseErr) {
|
|
224
|
+
console.warn(` ⚠️ [Tool Call] Failed to parse arguments for ${name}: ${argsStr?.slice(0, 200)}`);
|
|
225
|
+
try {
|
|
226
|
+
const cleaned = (argsStr || '{}').replace(/,\s*}/g, '}').replace(/,\s*]/g, ']').replace(/'/g, '"');
|
|
227
|
+
args = JSON.parse(cleaned);
|
|
228
|
+
} catch {
|
|
229
|
+
args = {};
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
console.log(` 🔧 [Tool Call] ${name}(${JSON.stringify(args).slice(0, 100)}...)`);
|
|
234
|
+
|
|
235
|
+
// Notify: calling tool
|
|
236
|
+
if (onToolCall) {
|
|
237
|
+
try { onToolCall({ tool: name, args, status: 'start' }); } catch {}
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
let result;
|
|
241
|
+
try {
|
|
242
|
+
result = await toolExecutor.execute(name, args);
|
|
243
|
+
toolResults.push({ tool: name, args, result, success: true });
|
|
244
|
+
if (onToolCall) {
|
|
245
|
+
try { onToolCall({ tool: name, args, status: 'done', success: true }); } catch {}
|
|
246
|
+
}
|
|
247
|
+
} catch (error) {
|
|
248
|
+
result = `Tool execution error: ${error.message}`;
|
|
249
|
+
toolResults.push({ tool: name, args, error: error.message, success: false });
|
|
250
|
+
if (onToolCall) {
|
|
251
|
+
try { onToolCall({ tool: name, args, status: 'error', error: error.message }); } catch {}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
conversationMessages.push({
|
|
256
|
+
role: 'tool',
|
|
257
|
+
tool_call_id: toolCall.id,
|
|
258
|
+
content: typeof result === 'string' ? result : JSON.stringify(result),
|
|
259
|
+
});
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// Exceeded max iterations, make one final call without tools
|
|
264
|
+
const finalResponse = await this.chat(provider, conversationMessages, {
|
|
265
|
+
temperature: options.temperature,
|
|
266
|
+
maxTokens: options.maxTokens,
|
|
267
|
+
});
|
|
268
|
+
|
|
269
|
+
return {
|
|
270
|
+
content: finalResponse.content,
|
|
271
|
+
toolResults,
|
|
272
|
+
messages: conversationMessages,
|
|
273
|
+
usage: finalResponse.usage,
|
|
274
|
+
};
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
/**
|
|
278
|
+
* Generate image (image models)
|
|
279
|
+
*
|
|
280
|
+
* @param {object} provider - Provider config
|
|
281
|
+
* @param {string} prompt - Image description
|
|
282
|
+
* @param {object} [options] - Options
|
|
283
|
+
* @returns {Promise<{url: string, revisedPrompt: string}>}
|
|
284
|
+
*/
|
|
285
|
+
async generateImage(provider, prompt, options = {}) {
|
|
286
|
+
const client = this._getClient(provider);
|
|
287
|
+
|
|
288
|
+
try {
|
|
289
|
+
const response = await client.images.generate({
|
|
290
|
+
model: provider.model || 'dall-e-3',
|
|
291
|
+
prompt,
|
|
292
|
+
n: 1,
|
|
293
|
+
size: options.size || '1024x1024',
|
|
294
|
+
quality: options.quality || 'standard',
|
|
295
|
+
});
|
|
296
|
+
|
|
297
|
+
return {
|
|
298
|
+
url: response.data[0].url,
|
|
299
|
+
revisedPrompt: response.data[0].revised_prompt || prompt,
|
|
300
|
+
};
|
|
301
|
+
} catch (error) {
|
|
302
|
+
console.error(`[LLMClient] Image generation failed (${provider.name}):`, error.message);
|
|
303
|
+
throw new Error(`Image generation failed (${provider.name}): ${error.message}`);
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/**
|
|
308
|
+
* Clear cached client (call when provider API Key is updated)
|
|
309
|
+
*/
|
|
310
|
+
clearClient(providerId) {
|
|
311
|
+
this.clients.delete(providerId);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
clearAll() {
|
|
315
|
+
this.clients.clear();
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
// Global singleton
|
|
320
|
+
export const llmClient = new LLMClient();
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import { BaseAgent } from '../base-agent.js';
|
|
2
|
+
import { llmClient } from './client.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* LLMAgent — Communication engine powered by API-based LLM models.
|
|
6
|
+
*
|
|
7
|
+
* Pure communication layer: handles chat, tool-calling, provider management.
|
|
8
|
+
* No business logic (identity, memory, tasks) — that's the Employee layer.
|
|
9
|
+
*/
|
|
10
|
+
export class LLMAgent extends BaseAgent {
|
|
11
|
+
/**
|
|
12
|
+
* @param {object} config
|
|
13
|
+
* @param {object} config.provider - Provider config from ProviderRegistry
|
|
14
|
+
*/
|
|
15
|
+
constructor(config) {
|
|
16
|
+
super();
|
|
17
|
+
this.provider = config.provider;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
get agentType() {
|
|
21
|
+
return 'llm';
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
isAvailable() {
|
|
25
|
+
return !!(this.provider && this.provider.enabled && this.provider.apiKey);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
canChat() {
|
|
29
|
+
return this.isAvailable();
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
async chat(messages, options = {}) {
|
|
33
|
+
if (!this.isAvailable()) {
|
|
34
|
+
throw new Error(`LLMAgent provider "${this.provider?.name}" is not available`);
|
|
35
|
+
}
|
|
36
|
+
return await llmClient.chat(this.provider, messages, options);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async chatWithTools(messages, toolExecutor, options = {}) {
|
|
40
|
+
if (!this.isAvailable()) {
|
|
41
|
+
throw new Error(`LLMAgent provider "${this.provider?.name}" is not available`);
|
|
42
|
+
}
|
|
43
|
+
return await llmClient.chatWithTools(this.provider, messages, toolExecutor, options);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
getDisplayInfo() {
|
|
47
|
+
return {
|
|
48
|
+
name: this.provider?.name || 'Unknown LLM',
|
|
49
|
+
provider: this.provider?.provider || 'Unknown',
|
|
50
|
+
model: this.provider?.model || 'unknown',
|
|
51
|
+
type: 'llm',
|
|
52
|
+
category: this.provider?.category || 'general',
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
getProviderDisplayInfo() {
|
|
57
|
+
return {
|
|
58
|
+
id: this.provider?.id,
|
|
59
|
+
name: this.provider?.name,
|
|
60
|
+
provider: this.provider?.provider,
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
switchProvider(newProvider) {
|
|
65
|
+
this.provider = newProvider;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
getCostPerToken() {
|
|
69
|
+
return this.provider?.costPerToken || 0.001;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
serializeAgent() {
|
|
73
|
+
return {
|
|
74
|
+
agentType: 'llm',
|
|
75
|
+
provider: this.provider ? {
|
|
76
|
+
id: this.provider.id,
|
|
77
|
+
name: this.provider.name,
|
|
78
|
+
provider: this.provider.provider,
|
|
79
|
+
model: this.provider.model,
|
|
80
|
+
category: this.provider.category,
|
|
81
|
+
costPerToken: this.provider.costPerToken,
|
|
82
|
+
enabled: this.provider.enabled,
|
|
83
|
+
} : null,
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Restore LLMAgent from serialized data.
|
|
89
|
+
*/
|
|
90
|
+
static deserialize(data, providerRegistry) {
|
|
91
|
+
let provider = data.provider;
|
|
92
|
+
if (providerRegistry && data.provider?.id) {
|
|
93
|
+
provider = providerRegistry.getById(data.provider.id) || data.provider;
|
|
94
|
+
}
|
|
95
|
+
return new LLMAgent({ provider });
|
|
96
|
+
}
|
|
97
|
+
}
|