@usejarvis/brain 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +153 -0
- package/README.md +278 -0
- package/bin/jarvis.ts +413 -0
- package/package.json +74 -0
- package/scripts/ensure-bun.cjs +8 -0
- package/src/actions/README.md +421 -0
- package/src/actions/app-control/desktop-controller.test.ts +26 -0
- package/src/actions/app-control/desktop-controller.ts +438 -0
- package/src/actions/app-control/interface.ts +64 -0
- package/src/actions/app-control/linux.ts +273 -0
- package/src/actions/app-control/macos.ts +54 -0
- package/src/actions/app-control/sidecar-launcher.test.ts +23 -0
- package/src/actions/app-control/sidecar-launcher.ts +286 -0
- package/src/actions/app-control/windows.ts +44 -0
- package/src/actions/browser/cdp.ts +138 -0
- package/src/actions/browser/chrome-launcher.ts +252 -0
- package/src/actions/browser/session.ts +437 -0
- package/src/actions/browser/stealth.ts +49 -0
- package/src/actions/index.ts +20 -0
- package/src/actions/terminal/executor.ts +157 -0
- package/src/actions/terminal/wsl-bridge.ts +126 -0
- package/src/actions/test.ts +93 -0
- package/src/actions/tools/agents.ts +321 -0
- package/src/actions/tools/builtin.ts +846 -0
- package/src/actions/tools/commitments.ts +192 -0
- package/src/actions/tools/content.ts +217 -0
- package/src/actions/tools/delegate.ts +147 -0
- package/src/actions/tools/desktop.test.ts +55 -0
- package/src/actions/tools/desktop.ts +305 -0
- package/src/actions/tools/goals.ts +376 -0
- package/src/actions/tools/local-tools-guard.ts +20 -0
- package/src/actions/tools/registry.ts +171 -0
- package/src/actions/tools/research.ts +111 -0
- package/src/actions/tools/sidecar-list.ts +57 -0
- package/src/actions/tools/sidecar-route.ts +105 -0
- package/src/actions/tools/workflows.ts +216 -0
- package/src/agents/agent.ts +132 -0
- package/src/agents/delegation.ts +107 -0
- package/src/agents/hierarchy.ts +113 -0
- package/src/agents/index.ts +19 -0
- package/src/agents/messaging.ts +125 -0
- package/src/agents/orchestrator.ts +576 -0
- package/src/agents/role-discovery.ts +61 -0
- package/src/agents/sub-agent-runner.ts +307 -0
- package/src/agents/task-manager.ts +151 -0
- package/src/authority/approval-delivery.ts +59 -0
- package/src/authority/approval.ts +196 -0
- package/src/authority/audit.ts +158 -0
- package/src/authority/authority.test.ts +519 -0
- package/src/authority/deferred-executor.ts +103 -0
- package/src/authority/emergency.ts +66 -0
- package/src/authority/engine.ts +297 -0
- package/src/authority/index.ts +12 -0
- package/src/authority/learning.ts +111 -0
- package/src/authority/tool-action-map.ts +74 -0
- package/src/awareness/analytics.ts +466 -0
- package/src/awareness/awareness.test.ts +332 -0
- package/src/awareness/capture-engine.ts +305 -0
- package/src/awareness/context-graph.ts +130 -0
- package/src/awareness/context-tracker.ts +349 -0
- package/src/awareness/index.ts +25 -0
- package/src/awareness/intelligence.ts +321 -0
- package/src/awareness/ocr-engine.ts +88 -0
- package/src/awareness/service.ts +528 -0
- package/src/awareness/struggle-detector.ts +342 -0
- package/src/awareness/suggestion-engine.ts +476 -0
- package/src/awareness/types.ts +201 -0
- package/src/cli/autostart.ts +241 -0
- package/src/cli/deps.ts +449 -0
- package/src/cli/doctor.ts +230 -0
- package/src/cli/helpers.ts +401 -0
- package/src/cli/onboard.ts +580 -0
- package/src/comms/README.md +329 -0
- package/src/comms/auth-error.html +48 -0
- package/src/comms/channels/discord.ts +228 -0
- package/src/comms/channels/signal.ts +56 -0
- package/src/comms/channels/telegram.ts +316 -0
- package/src/comms/channels/whatsapp.ts +60 -0
- package/src/comms/channels.test.ts +173 -0
- package/src/comms/desktop-notify.ts +114 -0
- package/src/comms/example.ts +129 -0
- package/src/comms/index.ts +129 -0
- package/src/comms/streaming.ts +142 -0
- package/src/comms/voice.test.ts +152 -0
- package/src/comms/voice.ts +291 -0
- package/src/comms/websocket.test.ts +409 -0
- package/src/comms/websocket.ts +473 -0
- package/src/config/README.md +387 -0
- package/src/config/index.ts +6 -0
- package/src/config/loader.test.ts +137 -0
- package/src/config/loader.ts +142 -0
- package/src/config/types.ts +260 -0
- package/src/daemon/README.md +232 -0
- package/src/daemon/agent-service-interface.ts +9 -0
- package/src/daemon/agent-service.ts +600 -0
- package/src/daemon/api-routes.ts +2119 -0
- package/src/daemon/background-agent-service.ts +396 -0
- package/src/daemon/background-agent.test.ts +78 -0
- package/src/daemon/channel-service.ts +201 -0
- package/src/daemon/commitment-executor.ts +297 -0
- package/src/daemon/event-classifier.ts +239 -0
- package/src/daemon/event-coalescer.ts +123 -0
- package/src/daemon/event-reactor.ts +214 -0
- package/src/daemon/health.ts +220 -0
- package/src/daemon/index.ts +1004 -0
- package/src/daemon/llm-settings.ts +316 -0
- package/src/daemon/observer-service.ts +150 -0
- package/src/daemon/pid.ts +98 -0
- package/src/daemon/research-queue.ts +155 -0
- package/src/daemon/services.ts +175 -0
- package/src/daemon/ws-service.ts +788 -0
- package/src/goals/accountability.ts +240 -0
- package/src/goals/awareness-bridge.ts +185 -0
- package/src/goals/estimator.ts +185 -0
- package/src/goals/events.ts +28 -0
- package/src/goals/goals.test.ts +400 -0
- package/src/goals/integration.test.ts +329 -0
- package/src/goals/nl-builder.test.ts +220 -0
- package/src/goals/nl-builder.ts +256 -0
- package/src/goals/rhythm.test.ts +177 -0
- package/src/goals/rhythm.ts +275 -0
- package/src/goals/service.test.ts +135 -0
- package/src/goals/service.ts +348 -0
- package/src/goals/types.ts +106 -0
- package/src/goals/workflow-bridge.ts +96 -0
- package/src/integrations/google-api.ts +134 -0
- package/src/integrations/google-auth.ts +175 -0
- package/src/llm/README.md +291 -0
- package/src/llm/anthropic.ts +386 -0
- package/src/llm/gemini.ts +371 -0
- package/src/llm/index.ts +19 -0
- package/src/llm/manager.ts +153 -0
- package/src/llm/ollama.ts +307 -0
- package/src/llm/openai.ts +350 -0
- package/src/llm/provider.test.ts +231 -0
- package/src/llm/provider.ts +60 -0
- package/src/llm/test.ts +87 -0
- package/src/observers/README.md +278 -0
- package/src/observers/calendar.ts +113 -0
- package/src/observers/clipboard.ts +136 -0
- package/src/observers/email.ts +109 -0
- package/src/observers/example.ts +58 -0
- package/src/observers/file-watcher.ts +124 -0
- package/src/observers/index.ts +159 -0
- package/src/observers/notifications.ts +197 -0
- package/src/observers/observers.test.ts +203 -0
- package/src/observers/processes.ts +225 -0
- package/src/personality/README.md +61 -0
- package/src/personality/adapter.ts +196 -0
- package/src/personality/index.ts +20 -0
- package/src/personality/learner.ts +209 -0
- package/src/personality/model.ts +132 -0
- package/src/personality/personality.test.ts +236 -0
- package/src/roles/README.md +252 -0
- package/src/roles/authority.ts +119 -0
- package/src/roles/example-usage.ts +198 -0
- package/src/roles/index.ts +42 -0
- package/src/roles/loader.ts +143 -0
- package/src/roles/prompt-builder.ts +194 -0
- package/src/roles/test-multi.ts +102 -0
- package/src/roles/test-role.yaml +77 -0
- package/src/roles/test-utils.ts +93 -0
- package/src/roles/test.ts +106 -0
- package/src/roles/tool-guide.ts +190 -0
- package/src/roles/types.ts +36 -0
- package/src/roles/utils.ts +200 -0
- package/src/scripts/google-setup.ts +168 -0
- package/src/sidecar/connection.ts +179 -0
- package/src/sidecar/index.ts +6 -0
- package/src/sidecar/manager.ts +542 -0
- package/src/sidecar/protocol.ts +85 -0
- package/src/sidecar/rpc.ts +161 -0
- package/src/sidecar/scheduler.ts +136 -0
- package/src/sidecar/types.ts +112 -0
- package/src/sidecar/validator.ts +144 -0
- package/src/vault/README.md +110 -0
- package/src/vault/awareness.ts +341 -0
- package/src/vault/commitments.ts +299 -0
- package/src/vault/content-pipeline.ts +260 -0
- package/src/vault/conversations.ts +173 -0
- package/src/vault/entities.ts +180 -0
- package/src/vault/extractor.test.ts +356 -0
- package/src/vault/extractor.ts +345 -0
- package/src/vault/facts.ts +190 -0
- package/src/vault/goals.ts +477 -0
- package/src/vault/index.ts +87 -0
- package/src/vault/keychain.ts +99 -0
- package/src/vault/observations.ts +115 -0
- package/src/vault/relationships.ts +178 -0
- package/src/vault/retrieval.test.ts +126 -0
- package/src/vault/retrieval.ts +227 -0
- package/src/vault/schema.ts +658 -0
- package/src/vault/settings.ts +38 -0
- package/src/vault/vectors.ts +92 -0
- package/src/vault/workflows.ts +403 -0
- package/src/workflows/auto-suggest.ts +290 -0
- package/src/workflows/engine.ts +366 -0
- package/src/workflows/events.ts +24 -0
- package/src/workflows/executor.ts +207 -0
- package/src/workflows/nl-builder.ts +198 -0
- package/src/workflows/nodes/actions/agent-task.ts +73 -0
- package/src/workflows/nodes/actions/calendar-action.ts +85 -0
- package/src/workflows/nodes/actions/code-execution.ts +73 -0
- package/src/workflows/nodes/actions/discord.ts +77 -0
- package/src/workflows/nodes/actions/file-write.ts +73 -0
- package/src/workflows/nodes/actions/gmail.ts +69 -0
- package/src/workflows/nodes/actions/http-request.ts +117 -0
- package/src/workflows/nodes/actions/notification.ts +85 -0
- package/src/workflows/nodes/actions/run-tool.ts +55 -0
- package/src/workflows/nodes/actions/send-message.ts +82 -0
- package/src/workflows/nodes/actions/shell-command.ts +76 -0
- package/src/workflows/nodes/actions/telegram.ts +60 -0
- package/src/workflows/nodes/builtin.ts +119 -0
- package/src/workflows/nodes/error/error-handler.ts +37 -0
- package/src/workflows/nodes/error/fallback.ts +47 -0
- package/src/workflows/nodes/error/retry.ts +82 -0
- package/src/workflows/nodes/logic/delay.ts +42 -0
- package/src/workflows/nodes/logic/if-else.ts +41 -0
- package/src/workflows/nodes/logic/loop.ts +90 -0
- package/src/workflows/nodes/logic/merge.ts +38 -0
- package/src/workflows/nodes/logic/race.ts +40 -0
- package/src/workflows/nodes/logic/switch.ts +59 -0
- package/src/workflows/nodes/logic/template-render.ts +53 -0
- package/src/workflows/nodes/logic/variable-get.ts +37 -0
- package/src/workflows/nodes/logic/variable-set.ts +59 -0
- package/src/workflows/nodes/registry.ts +99 -0
- package/src/workflows/nodes/transform/aggregate.ts +99 -0
- package/src/workflows/nodes/transform/csv-parse.ts +70 -0
- package/src/workflows/nodes/transform/json-parse.ts +63 -0
- package/src/workflows/nodes/transform/map-filter.ts +84 -0
- package/src/workflows/nodes/transform/regex-match.ts +89 -0
- package/src/workflows/nodes/triggers/calendar.ts +33 -0
- package/src/workflows/nodes/triggers/clipboard.ts +32 -0
- package/src/workflows/nodes/triggers/cron.ts +40 -0
- package/src/workflows/nodes/triggers/email.ts +40 -0
- package/src/workflows/nodes/triggers/file-change.ts +45 -0
- package/src/workflows/nodes/triggers/git.ts +46 -0
- package/src/workflows/nodes/triggers/manual.ts +23 -0
- package/src/workflows/nodes/triggers/poll.ts +81 -0
- package/src/workflows/nodes/triggers/process.ts +44 -0
- package/src/workflows/nodes/triggers/screen-event.ts +37 -0
- package/src/workflows/nodes/triggers/webhook.ts +39 -0
- package/src/workflows/safe-eval.ts +139 -0
- package/src/workflows/template.ts +118 -0
- package/src/workflows/triggers/cron.ts +311 -0
- package/src/workflows/triggers/manager.ts +285 -0
- package/src/workflows/triggers/observer-bridge.ts +172 -0
- package/src/workflows/triggers/poller.ts +201 -0
- package/src/workflows/triggers/screen-condition.ts +218 -0
- package/src/workflows/triggers/triggers.test.ts +740 -0
- package/src/workflows/triggers/webhook.ts +191 -0
- package/src/workflows/types.ts +133 -0
- package/src/workflows/variables.ts +72 -0
- package/src/workflows/workflows.test.ts +383 -0
- package/src/workflows/yaml.ts +104 -0
- package/ui/dist/index-j75njzc1.css +1199 -0
- package/ui/dist/index-p2zh407q.js +80603 -0
- package/ui/dist/index.html +13 -0
- package/ui/public/openwakeword/models/embedding_model.onnx +0 -0
- package/ui/public/openwakeword/models/hey_jarvis_v0.1.onnx +0 -0
- package/ui/public/openwakeword/models/melspectrogram.onnx +0 -0
- package/ui/public/openwakeword/models/silero_vad.onnx +0 -0
- package/ui/public/ort/ort-wasm-simd-threaded.jsep.mjs +106 -0
- package/ui/public/ort/ort-wasm-simd-threaded.jsep.wasm +0 -0
- package/ui/public/ort/ort-wasm-simd-threaded.mjs +59 -0
- package/ui/public/ort/ort-wasm-simd-threaded.wasm +0 -0
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LLMProvider,
|
|
3
|
+
LLMMessage,
|
|
4
|
+
LLMOptions,
|
|
5
|
+
LLMResponse,
|
|
6
|
+
LLMStreamEvent,
|
|
7
|
+
LLMTool,
|
|
8
|
+
LLMToolCall,
|
|
9
|
+
} from './provider.ts';
|
|
10
|
+
|
|
11
|
+
type OllamaMessage = {
|
|
12
|
+
role: 'system' | 'user' | 'assistant';
|
|
13
|
+
content: string;
|
|
14
|
+
images?: string[];
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
type OllamaToolDef = {
|
|
18
|
+
type: 'function';
|
|
19
|
+
function: {
|
|
20
|
+
name: string;
|
|
21
|
+
description: string;
|
|
22
|
+
parameters: Record<string, unknown>;
|
|
23
|
+
};
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
type OllamaResponse = {
|
|
27
|
+
model: string;
|
|
28
|
+
created_at: string;
|
|
29
|
+
message: {
|
|
30
|
+
role: 'assistant';
|
|
31
|
+
content: string;
|
|
32
|
+
tool_calls?: Array<{
|
|
33
|
+
function: {
|
|
34
|
+
name: string;
|
|
35
|
+
arguments: Record<string, unknown>;
|
|
36
|
+
};
|
|
37
|
+
}>;
|
|
38
|
+
};
|
|
39
|
+
done: boolean;
|
|
40
|
+
total_duration?: number;
|
|
41
|
+
load_duration?: number;
|
|
42
|
+
prompt_eval_count?: number;
|
|
43
|
+
eval_count?: number;
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
type OllamaStreamChunk = {
|
|
47
|
+
model: string;
|
|
48
|
+
created_at: string;
|
|
49
|
+
message?: {
|
|
50
|
+
role: 'assistant';
|
|
51
|
+
content: string;
|
|
52
|
+
tool_calls?: Array<{
|
|
53
|
+
function: {
|
|
54
|
+
name: string;
|
|
55
|
+
arguments: Record<string, unknown>;
|
|
56
|
+
};
|
|
57
|
+
}>;
|
|
58
|
+
};
|
|
59
|
+
done: boolean;
|
|
60
|
+
total_duration?: number;
|
|
61
|
+
prompt_eval_count?: number;
|
|
62
|
+
eval_count?: number;
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
type OllamaModelInfo = {
|
|
66
|
+
name: string;
|
|
67
|
+
model: string;
|
|
68
|
+
modified_at: string;
|
|
69
|
+
size: number;
|
|
70
|
+
digest: string;
|
|
71
|
+
};
|
|
72
|
+
|
|
73
|
+
export class OllamaProvider implements LLMProvider {
|
|
74
|
+
name = 'ollama';
|
|
75
|
+
private baseUrl: string;
|
|
76
|
+
private defaultModel: string;
|
|
77
|
+
|
|
78
|
+
constructor(baseUrl = 'http://localhost:11434', defaultModel = 'llama3') {
|
|
79
|
+
this.baseUrl = baseUrl.replace(/\/$/, ''); // Remove trailing slash
|
|
80
|
+
this.defaultModel = defaultModel;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
async chat(messages: LLMMessage[], options: LLMOptions = {}): Promise<LLMResponse> {
|
|
84
|
+
const { model = this.defaultModel, temperature, tools } = options;
|
|
85
|
+
|
|
86
|
+
const body: Record<string, unknown> = {
|
|
87
|
+
model,
|
|
88
|
+
messages: this.convertMessages(messages),
|
|
89
|
+
stream: false,
|
|
90
|
+
};
|
|
91
|
+
|
|
92
|
+
if (temperature !== undefined) {
|
|
93
|
+
body.options = { temperature };
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if (tools && tools.length > 0) {
|
|
97
|
+
body.tools = this.convertTools(tools);
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
|
101
|
+
method: 'POST',
|
|
102
|
+
headers: {
|
|
103
|
+
'Content-Type': 'application/json',
|
|
104
|
+
},
|
|
105
|
+
body: JSON.stringify(body),
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
if (!response.ok) {
|
|
109
|
+
const errorText = await response.text();
|
|
110
|
+
throw new Error(`Ollama API error (${response.status}): ${errorText}`);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
const data = await response.json() as OllamaResponse;
|
|
114
|
+
return this.convertResponse(data);
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
async *stream(messages: LLMMessage[], options: LLMOptions = {}): AsyncIterable<LLMStreamEvent> {
|
|
118
|
+
const { model = this.defaultModel, temperature, tools } = options;
|
|
119
|
+
|
|
120
|
+
const body: Record<string, unknown> = {
|
|
121
|
+
model,
|
|
122
|
+
messages: this.convertMessages(messages),
|
|
123
|
+
stream: true,
|
|
124
|
+
};
|
|
125
|
+
|
|
126
|
+
if (temperature !== undefined) {
|
|
127
|
+
body.options = { temperature };
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
if (tools && tools.length > 0) {
|
|
131
|
+
body.tools = this.convertTools(tools);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
|
135
|
+
method: 'POST',
|
|
136
|
+
headers: {
|
|
137
|
+
'Content-Type': 'application/json',
|
|
138
|
+
},
|
|
139
|
+
body: JSON.stringify(body),
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
if (!response.ok) {
|
|
143
|
+
const errorText = await response.text();
|
|
144
|
+
yield { type: 'error', error: `Ollama API error (${response.status}): ${errorText}` };
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
if (!response.body) {
|
|
149
|
+
yield { type: 'error', error: 'No response body' };
|
|
150
|
+
return;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
let accumulatedText = '';
|
|
154
|
+
const toolCalls: LLMToolCall[] = [];
|
|
155
|
+
let responseModel = model;
|
|
156
|
+
let inputTokens = 0;
|
|
157
|
+
let outputTokens = 0;
|
|
158
|
+
|
|
159
|
+
try {
|
|
160
|
+
const reader = response.body.getReader();
|
|
161
|
+
const decoder = new TextDecoder();
|
|
162
|
+
let buffer = '';
|
|
163
|
+
|
|
164
|
+
while (true) {
|
|
165
|
+
const { done, value } = await reader.read();
|
|
166
|
+
if (done) break;
|
|
167
|
+
|
|
168
|
+
buffer += decoder.decode(value, { stream: true });
|
|
169
|
+
const lines = buffer.split('\n');
|
|
170
|
+
buffer = lines.pop() || '';
|
|
171
|
+
|
|
172
|
+
for (const line of lines) {
|
|
173
|
+
if (!line.trim()) continue;
|
|
174
|
+
|
|
175
|
+
try {
|
|
176
|
+
const chunk = JSON.parse(line) as OllamaStreamChunk;
|
|
177
|
+
responseModel = chunk.model;
|
|
178
|
+
|
|
179
|
+
if (chunk.message?.content) {
|
|
180
|
+
accumulatedText += chunk.message.content;
|
|
181
|
+
yield { type: 'text', text: chunk.message.content };
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
if (chunk.message?.tool_calls) {
|
|
185
|
+
for (const toolCall of chunk.message.tool_calls) {
|
|
186
|
+
const id = `ollama_${Date.now()}_${Math.random().toString(36).substring(7)}`;
|
|
187
|
+
const call: LLMToolCall = {
|
|
188
|
+
id,
|
|
189
|
+
name: toolCall.function.name,
|
|
190
|
+
arguments: toolCall.function.arguments,
|
|
191
|
+
};
|
|
192
|
+
toolCalls.push(call);
|
|
193
|
+
yield { type: 'tool_call', tool_call: call };
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
if (chunk.done) {
|
|
198
|
+
inputTokens = chunk.prompt_eval_count || 0;
|
|
199
|
+
outputTokens = chunk.eval_count || 0;
|
|
200
|
+
|
|
201
|
+
yield {
|
|
202
|
+
type: 'done',
|
|
203
|
+
response: {
|
|
204
|
+
content: accumulatedText,
|
|
205
|
+
tool_calls: toolCalls,
|
|
206
|
+
usage: { input_tokens: inputTokens, output_tokens: outputTokens },
|
|
207
|
+
model: responseModel,
|
|
208
|
+
finish_reason: toolCalls.length > 0 ? 'tool_use' : 'stop',
|
|
209
|
+
},
|
|
210
|
+
};
|
|
211
|
+
}
|
|
212
|
+
} catch (err) {
|
|
213
|
+
console.error('Failed to parse Ollama chunk:', err);
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
} catch (err) {
|
|
218
|
+
yield { type: 'error', error: `Stream error: ${err}` };
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
async listModels(): Promise<string[]> {
|
|
223
|
+
try {
|
|
224
|
+
const response = await fetch(`${this.baseUrl}/api/tags`);
|
|
225
|
+
|
|
226
|
+
if (!response.ok) {
|
|
227
|
+
throw new Error(`Failed to list models: ${response.status}`);
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
const data = await response.json() as { models: OllamaModelInfo[] };
|
|
231
|
+
return data.models.map(m => m.name).sort();
|
|
232
|
+
} catch (err) {
|
|
233
|
+
// Fallback to common models if API call fails
|
|
234
|
+
return ['llama3', 'llama2', 'mistral', 'mixtral', 'codellama'];
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
private convertMessages(messages: LLMMessage[]): OllamaMessage[] {
|
|
239
|
+
return messages.map(m => {
|
|
240
|
+
if (typeof m.content === 'string') {
|
|
241
|
+
return {
|
|
242
|
+
role: m.role as 'system' | 'user' | 'assistant',
|
|
243
|
+
content: m.content,
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// ContentBlock[] — extract text and images separately
|
|
248
|
+
let text = '';
|
|
249
|
+
const images: string[] = [];
|
|
250
|
+
|
|
251
|
+
for (const block of m.content) {
|
|
252
|
+
if (block.type === 'text') {
|
|
253
|
+
text += (text ? '\n' : '') + block.text;
|
|
254
|
+
} else if (block.type === 'image') {
|
|
255
|
+
images.push(block.source.data);
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
const msg: OllamaMessage = {
|
|
260
|
+
role: m.role as 'system' | 'user' | 'assistant',
|
|
261
|
+
content: text,
|
|
262
|
+
};
|
|
263
|
+
if (images.length > 0) {
|
|
264
|
+
msg.images = images;
|
|
265
|
+
}
|
|
266
|
+
return msg;
|
|
267
|
+
});
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
private convertTools(tools: LLMTool[]): OllamaToolDef[] {
|
|
271
|
+
return tools.map(tool => ({
|
|
272
|
+
type: 'function',
|
|
273
|
+
function: {
|
|
274
|
+
name: tool.name,
|
|
275
|
+
description: tool.description,
|
|
276
|
+
parameters: tool.parameters,
|
|
277
|
+
},
|
|
278
|
+
}));
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
private convertResponse(response: OllamaResponse): LLMResponse {
|
|
282
|
+
const content = response.message.content;
|
|
283
|
+
const tool_calls: LLMToolCall[] = [];
|
|
284
|
+
|
|
285
|
+
if (response.message.tool_calls) {
|
|
286
|
+
for (const toolCall of response.message.tool_calls) {
|
|
287
|
+
const id = `ollama_${Date.now()}_${Math.random().toString(36).substring(7)}`;
|
|
288
|
+
tool_calls.push({
|
|
289
|
+
id,
|
|
290
|
+
name: toolCall.function.name,
|
|
291
|
+
arguments: toolCall.function.arguments,
|
|
292
|
+
});
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
return {
|
|
297
|
+
content,
|
|
298
|
+
tool_calls,
|
|
299
|
+
usage: {
|
|
300
|
+
input_tokens: response.prompt_eval_count || 0,
|
|
301
|
+
output_tokens: response.eval_count || 0,
|
|
302
|
+
},
|
|
303
|
+
model: response.model,
|
|
304
|
+
finish_reason: tool_calls.length > 0 ? 'tool_use' : 'stop',
|
|
305
|
+
};
|
|
306
|
+
}
|
|
307
|
+
}
|
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LLMProvider,
|
|
3
|
+
LLMMessage,
|
|
4
|
+
LLMOptions,
|
|
5
|
+
LLMResponse,
|
|
6
|
+
LLMStreamEvent,
|
|
7
|
+
LLMTool,
|
|
8
|
+
LLMToolCall,
|
|
9
|
+
} from './provider.ts';
|
|
10
|
+
|
|
11
|
+
type OpenAIMessage = {
|
|
12
|
+
role: 'system' | 'user' | 'assistant';
|
|
13
|
+
content: string;
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
type OpenAIToolDef = {
|
|
17
|
+
type: 'function';
|
|
18
|
+
function: {
|
|
19
|
+
name: string;
|
|
20
|
+
description: string;
|
|
21
|
+
parameters: Record<string, unknown>;
|
|
22
|
+
};
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
type OpenAIToolCall = {
|
|
26
|
+
id: string;
|
|
27
|
+
type: 'function';
|
|
28
|
+
function: {
|
|
29
|
+
name: string;
|
|
30
|
+
arguments: string;
|
|
31
|
+
};
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
type OpenAIResponse = {
|
|
35
|
+
id: string;
|
|
36
|
+
object: 'chat.completion';
|
|
37
|
+
created: number;
|
|
38
|
+
model: string;
|
|
39
|
+
choices: Array<{
|
|
40
|
+
index: number;
|
|
41
|
+
message: {
|
|
42
|
+
role: 'assistant';
|
|
43
|
+
content: string | null;
|
|
44
|
+
tool_calls?: OpenAIToolCall[];
|
|
45
|
+
};
|
|
46
|
+
finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | null;
|
|
47
|
+
}>;
|
|
48
|
+
usage: {
|
|
49
|
+
prompt_tokens: number;
|
|
50
|
+
completion_tokens: number;
|
|
51
|
+
total_tokens: number;
|
|
52
|
+
};
|
|
53
|
+
};
|
|
54
|
+
|
|
55
|
+
type OpenAIStreamChunk = {
|
|
56
|
+
id: string;
|
|
57
|
+
object: 'chat.completion.chunk';
|
|
58
|
+
created: number;
|
|
59
|
+
model: string;
|
|
60
|
+
choices: Array<{
|
|
61
|
+
index: number;
|
|
62
|
+
delta: {
|
|
63
|
+
role?: 'assistant';
|
|
64
|
+
content?: string;
|
|
65
|
+
tool_calls?: Array<{
|
|
66
|
+
index: number;
|
|
67
|
+
id?: string;
|
|
68
|
+
type?: 'function';
|
|
69
|
+
function?: {
|
|
70
|
+
name?: string;
|
|
71
|
+
arguments?: string;
|
|
72
|
+
};
|
|
73
|
+
}>;
|
|
74
|
+
};
|
|
75
|
+
finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | null;
|
|
76
|
+
}>;
|
|
77
|
+
};
|
|
78
|
+
|
|
79
|
+
export class OpenAIProvider implements LLMProvider {
|
|
80
|
+
name = 'openai';
|
|
81
|
+
private apiKey: string;
|
|
82
|
+
private defaultModel: string;
|
|
83
|
+
private apiUrl = 'https://api.openai.com/v1/chat/completions';
|
|
84
|
+
|
|
85
|
+
constructor(apiKey: string, defaultModel = 'gpt-4o') {
|
|
86
|
+
this.apiKey = apiKey;
|
|
87
|
+
this.defaultModel = defaultModel;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async chat(messages: LLMMessage[], options: LLMOptions = {}): Promise<LLMResponse> {
|
|
91
|
+
const { model = this.defaultModel, temperature, max_tokens, tools } = options;
|
|
92
|
+
|
|
93
|
+
const body: Record<string, unknown> = {
|
|
94
|
+
model,
|
|
95
|
+
messages: this.convertMessages(messages),
|
|
96
|
+
};
|
|
97
|
+
|
|
98
|
+
if (temperature !== undefined) body.temperature = temperature;
|
|
99
|
+
if (max_tokens !== undefined) body.max_tokens = max_tokens;
|
|
100
|
+
if (tools && tools.length > 0) {
|
|
101
|
+
body.tools = this.convertTools(tools);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const response = await fetch(this.apiUrl, {
|
|
105
|
+
method: 'POST',
|
|
106
|
+
headers: {
|
|
107
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
108
|
+
'Content-Type': 'application/json',
|
|
109
|
+
},
|
|
110
|
+
body: JSON.stringify(body),
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
if (!response.ok) {
|
|
114
|
+
const errorText = await response.text();
|
|
115
|
+
throw new Error(`OpenAI API error (${response.status}): ${errorText}`);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
const data = await response.json() as OpenAIResponse;
|
|
119
|
+
return this.convertResponse(data);
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
async *stream(messages: LLMMessage[], options: LLMOptions = {}): AsyncIterable<LLMStreamEvent> {
|
|
123
|
+
const { model = this.defaultModel, temperature, max_tokens, tools } = options;
|
|
124
|
+
|
|
125
|
+
const body: Record<string, unknown> = {
|
|
126
|
+
model,
|
|
127
|
+
messages: this.convertMessages(messages),
|
|
128
|
+
stream: true,
|
|
129
|
+
};
|
|
130
|
+
|
|
131
|
+
if (temperature !== undefined) body.temperature = temperature;
|
|
132
|
+
if (max_tokens !== undefined) body.max_tokens = max_tokens;
|
|
133
|
+
if (tools && tools.length > 0) {
|
|
134
|
+
body.tools = this.convertTools(tools);
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
const response = await fetch(this.apiUrl, {
|
|
138
|
+
method: 'POST',
|
|
139
|
+
headers: {
|
|
140
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
141
|
+
'Content-Type': 'application/json',
|
|
142
|
+
},
|
|
143
|
+
body: JSON.stringify(body),
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
if (!response.ok) {
|
|
147
|
+
const errorText = await response.text();
|
|
148
|
+
yield { type: 'error', error: `OpenAI API error (${response.status}): ${errorText}` };
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
if (!response.body) {
|
|
153
|
+
yield { type: 'error', error: 'No response body' };
|
|
154
|
+
return;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
let accumulatedText = '';
|
|
158
|
+
const toolCalls: LLMToolCall[] = [];
|
|
159
|
+
const toolCallBuilders: Map<number, { id: string; name: string; arguments: string }> = new Map();
|
|
160
|
+
let finishReason: string | null = null;
|
|
161
|
+
let responseModel = model;
|
|
162
|
+
|
|
163
|
+
try {
|
|
164
|
+
const reader = response.body.getReader();
|
|
165
|
+
const decoder = new TextDecoder();
|
|
166
|
+
let buffer = '';
|
|
167
|
+
|
|
168
|
+
while (true) {
|
|
169
|
+
const { done, value } = await reader.read();
|
|
170
|
+
if (done) break;
|
|
171
|
+
|
|
172
|
+
buffer += decoder.decode(value, { stream: true });
|
|
173
|
+
const lines = buffer.split('\n');
|
|
174
|
+
buffer = lines.pop() || '';
|
|
175
|
+
|
|
176
|
+
for (const line of lines) {
|
|
177
|
+
if (!line.trim() || !line.startsWith('data: ')) continue;
|
|
178
|
+
|
|
179
|
+
const data = line.slice(6);
|
|
180
|
+
if (data === '[DONE]') continue;
|
|
181
|
+
|
|
182
|
+
try {
|
|
183
|
+
const chunk = JSON.parse(data) as OpenAIStreamChunk;
|
|
184
|
+
if (chunk.choices && chunk.choices.length > 0) {
|
|
185
|
+
const choice = chunk.choices[0];
|
|
186
|
+
responseModel = chunk.model;
|
|
187
|
+
|
|
188
|
+
if (choice!.delta.content) {
|
|
189
|
+
accumulatedText += choice!.delta.content;
|
|
190
|
+
yield { type: 'text', text: choice!.delta.content };
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
if (choice!.delta.tool_calls) {
|
|
194
|
+
for (const toolCallDelta of choice!.delta.tool_calls) {
|
|
195
|
+
const index = toolCallDelta.index;
|
|
196
|
+
let builder = toolCallBuilders.get(index);
|
|
197
|
+
|
|
198
|
+
if (!builder) {
|
|
199
|
+
builder = {
|
|
200
|
+
id: toolCallDelta.id || '',
|
|
201
|
+
name: toolCallDelta.function?.name || '',
|
|
202
|
+
arguments: '',
|
|
203
|
+
};
|
|
204
|
+
toolCallBuilders.set(index, builder);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
if (toolCallDelta.id) builder.id = toolCallDelta.id;
|
|
208
|
+
if (toolCallDelta.function?.name) builder.name = toolCallDelta.function.name;
|
|
209
|
+
if (toolCallDelta.function?.arguments) {
|
|
210
|
+
builder.arguments += toolCallDelta.function.arguments;
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
if (choice!.finish_reason) {
|
|
216
|
+
finishReason = choice!.finish_reason;
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
} catch (err) {
|
|
220
|
+
// Skip invalid JSON lines
|
|
221
|
+
console.error('Failed to parse SSE chunk:', err);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// Convert accumulated tool calls
|
|
227
|
+
for (const builder of toolCallBuilders.values()) {
|
|
228
|
+
try {
|
|
229
|
+
const toolCall: LLMToolCall = {
|
|
230
|
+
id: builder.id,
|
|
231
|
+
name: builder.name,
|
|
232
|
+
arguments: JSON.parse(builder.arguments),
|
|
233
|
+
};
|
|
234
|
+
toolCalls.push(toolCall);
|
|
235
|
+
yield { type: 'tool_call', tool_call: toolCall };
|
|
236
|
+
} catch (err) {
|
|
237
|
+
yield { type: 'error', error: `Failed to parse tool call arguments: ${err}` };
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
const mappedFinishReason = this.mapFinishReason(finishReason);
|
|
242
|
+
yield {
|
|
243
|
+
type: 'done',
|
|
244
|
+
response: {
|
|
245
|
+
content: accumulatedText,
|
|
246
|
+
tool_calls: toolCalls,
|
|
247
|
+
usage: { input_tokens: 0, output_tokens: 0 }, // OpenAI doesn't provide usage in stream
|
|
248
|
+
model: responseModel,
|
|
249
|
+
finish_reason: mappedFinishReason,
|
|
250
|
+
},
|
|
251
|
+
};
|
|
252
|
+
} catch (err) {
|
|
253
|
+
yield { type: 'error', error: `Stream error: ${err}` };
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
async listModels(): Promise<string[]> {
|
|
258
|
+
try {
|
|
259
|
+
const response = await fetch('https://api.openai.com/v1/models', {
|
|
260
|
+
headers: {
|
|
261
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
262
|
+
},
|
|
263
|
+
});
|
|
264
|
+
|
|
265
|
+
if (!response.ok) {
|
|
266
|
+
throw new Error(`Failed to list models: ${response.status}`);
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
const data = await response.json() as { data: Array<{ id: string }> };
|
|
270
|
+
return data.data
|
|
271
|
+
.map(m => m.id)
|
|
272
|
+
.filter(id => id.startsWith('gpt-'))
|
|
273
|
+
.sort();
|
|
274
|
+
} catch (err) {
|
|
275
|
+
// Fallback to known models if API call fails
|
|
276
|
+
return [
|
|
277
|
+
'gpt-4o',
|
|
278
|
+
'gpt-4o-mini',
|
|
279
|
+
'gpt-4-turbo',
|
|
280
|
+
'gpt-4',
|
|
281
|
+
'gpt-3.5-turbo',
|
|
282
|
+
];
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
private convertMessages(messages: LLMMessage[]): OpenAIMessage[] {
|
|
287
|
+
return messages.map(m => ({
|
|
288
|
+
role: m.role as 'system' | 'user' | 'assistant',
|
|
289
|
+
content: m.content as string,
|
|
290
|
+
}));
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
private convertTools(tools: LLMTool[]): OpenAIToolDef[] {
|
|
294
|
+
return tools.map(tool => ({
|
|
295
|
+
type: 'function',
|
|
296
|
+
function: {
|
|
297
|
+
name: tool.name,
|
|
298
|
+
description: tool.description,
|
|
299
|
+
parameters: tool.parameters,
|
|
300
|
+
},
|
|
301
|
+
}));
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
private convertResponse(response: OpenAIResponse): LLMResponse {
|
|
305
|
+
const choice = response.choices[0]!;
|
|
306
|
+
const message = choice.message;
|
|
307
|
+
const content = message.content || '';
|
|
308
|
+
const tool_calls: LLMToolCall[] = [];
|
|
309
|
+
|
|
310
|
+
if (message.tool_calls) {
|
|
311
|
+
for (const toolCall of message.tool_calls) {
|
|
312
|
+
try {
|
|
313
|
+
tool_calls.push({
|
|
314
|
+
id: toolCall.id,
|
|
315
|
+
name: toolCall.function.name,
|
|
316
|
+
arguments: JSON.parse(toolCall.function.arguments),
|
|
317
|
+
});
|
|
318
|
+
} catch (err) {
|
|
319
|
+
console.error('Failed to parse tool call arguments:', err);
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
return {
|
|
325
|
+
content,
|
|
326
|
+
tool_calls,
|
|
327
|
+
usage: {
|
|
328
|
+
input_tokens: response.usage.prompt_tokens,
|
|
329
|
+
output_tokens: response.usage.completion_tokens,
|
|
330
|
+
},
|
|
331
|
+
model: response.model,
|
|
332
|
+
finish_reason: this.mapFinishReason(choice!.finish_reason),
|
|
333
|
+
};
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
private mapFinishReason(finishReason: string | null): 'stop' | 'tool_use' | 'length' | 'error' {
|
|
337
|
+
switch (finishReason) {
|
|
338
|
+
case 'stop':
|
|
339
|
+
return 'stop';
|
|
340
|
+
case 'tool_calls':
|
|
341
|
+
return 'tool_use';
|
|
342
|
+
case 'length':
|
|
343
|
+
return 'length';
|
|
344
|
+
case 'content_filter':
|
|
345
|
+
return 'error';
|
|
346
|
+
default:
|
|
347
|
+
return 'stop';
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
}
|