@build-astron-co/nimbus 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +628 -0
- package/bin/nimbus +38 -0
- package/package.json +80 -0
- package/src/__tests__/app.test.ts +76 -0
- package/src/__tests__/audit.test.ts +877 -0
- package/src/__tests__/circuit-breaker.test.ts +116 -0
- package/src/__tests__/cli-run.test.ts +115 -0
- package/src/__tests__/context-manager.test.ts +502 -0
- package/src/__tests__/context.test.ts +242 -0
- package/src/__tests__/enterprise.test.ts +401 -0
- package/src/__tests__/generator.test.ts +433 -0
- package/src/__tests__/hooks.test.ts +582 -0
- package/src/__tests__/init.test.ts +436 -0
- package/src/__tests__/intent-parser.test.ts +229 -0
- package/src/__tests__/llm-router.test.ts +209 -0
- package/src/__tests__/lsp.test.ts +293 -0
- package/src/__tests__/modes.test.ts +336 -0
- package/src/__tests__/permissions.test.ts +338 -0
- package/src/__tests__/serve.test.ts +275 -0
- package/src/__tests__/sessions.test.ts +227 -0
- package/src/__tests__/sharing.test.ts +288 -0
- package/src/__tests__/snapshots.test.ts +581 -0
- package/src/__tests__/state-db.test.ts +334 -0
- package/src/__tests__/stream-with-tools.test.ts +732 -0
- package/src/__tests__/subagents.test.ts +176 -0
- package/src/__tests__/system-prompt.test.ts +169 -0
- package/src/__tests__/tool-converter.test.ts +256 -0
- package/src/__tests__/tool-schemas.test.ts +397 -0
- package/src/__tests__/tools.test.ts +143 -0
- package/src/__tests__/version.test.ts +49 -0
- package/src/agent/compaction-agent.ts +227 -0
- package/src/agent/context-manager.ts +435 -0
- package/src/agent/context.ts +427 -0
- package/src/agent/deploy-preview.ts +426 -0
- package/src/agent/index.ts +68 -0
- package/src/agent/loop.ts +717 -0
- package/src/agent/modes.ts +429 -0
- package/src/agent/permissions.ts +466 -0
- package/src/agent/subagents/base.ts +116 -0
- package/src/agent/subagents/cost.ts +51 -0
- package/src/agent/subagents/explore.ts +42 -0
- package/src/agent/subagents/general.ts +54 -0
- package/src/agent/subagents/index.ts +102 -0
- package/src/agent/subagents/infra.ts +59 -0
- package/src/agent/subagents/security.ts +69 -0
- package/src/agent/system-prompt.ts +436 -0
- package/src/app.ts +122 -0
- package/src/audit/activity-log.ts +290 -0
- package/src/audit/compliance-checker.ts +540 -0
- package/src/audit/cost-tracker.ts +318 -0
- package/src/audit/index.ts +23 -0
- package/src/audit/security-scanner.ts +596 -0
- package/src/auth/guard.ts +75 -0
- package/src/auth/index.ts +56 -0
- package/src/auth/oauth.ts +455 -0
- package/src/auth/providers.ts +470 -0
- package/src/auth/sso.ts +113 -0
- package/src/auth/store.ts +505 -0
- package/src/auth/types.ts +187 -0
- package/src/build.ts +141 -0
- package/src/cli/index.ts +16 -0
- package/src/cli/init.ts +854 -0
- package/src/cli/openapi-spec.ts +356 -0
- package/src/cli/run.ts +237 -0
- package/src/cli/serve-auth.ts +80 -0
- package/src/cli/serve.ts +462 -0
- package/src/cli/web.ts +67 -0
- package/src/cli.ts +1417 -0
- package/src/clients/core-engine-client.ts +227 -0
- package/src/clients/enterprise-client.ts +334 -0
- package/src/clients/generator-client.ts +351 -0
- package/src/clients/git-client.ts +627 -0
- package/src/clients/github-client.ts +410 -0
- package/src/clients/helm-client.ts +504 -0
- package/src/clients/index.ts +80 -0
- package/src/clients/k8s-client.ts +497 -0
- package/src/clients/llm-client.ts +161 -0
- package/src/clients/rest-client.ts +130 -0
- package/src/clients/service-discovery.ts +33 -0
- package/src/clients/terraform-client.ts +482 -0
- package/src/clients/tools-client.ts +1843 -0
- package/src/clients/ws-client.ts +115 -0
- package/src/commands/analyze/index.ts +352 -0
- package/src/commands/apply/helm.ts +473 -0
- package/src/commands/apply/index.ts +213 -0
- package/src/commands/apply/k8s.ts +454 -0
- package/src/commands/apply/terraform.ts +582 -0
- package/src/commands/ask.ts +167 -0
- package/src/commands/audit/index.ts +238 -0
- package/src/commands/auth-cloud.ts +294 -0
- package/src/commands/auth-list.ts +134 -0
- package/src/commands/auth-profile.ts +121 -0
- package/src/commands/auth-status.ts +141 -0
- package/src/commands/aws/ec2.ts +501 -0
- package/src/commands/aws/iam.ts +397 -0
- package/src/commands/aws/index.ts +133 -0
- package/src/commands/aws/lambda.ts +396 -0
- package/src/commands/aws/rds.ts +439 -0
- package/src/commands/aws/s3.ts +439 -0
- package/src/commands/aws/vpc.ts +393 -0
- package/src/commands/aws-discover.ts +649 -0
- package/src/commands/aws-terraform.ts +805 -0
- package/src/commands/azure/aks.ts +376 -0
- package/src/commands/azure/functions.ts +253 -0
- package/src/commands/azure/index.ts +116 -0
- package/src/commands/azure/storage.ts +478 -0
- package/src/commands/azure/vm.ts +355 -0
- package/src/commands/billing/index.ts +256 -0
- package/src/commands/chat.ts +314 -0
- package/src/commands/config.ts +346 -0
- package/src/commands/cost/cloud-cost-estimator.ts +266 -0
- package/src/commands/cost/estimator.ts +79 -0
- package/src/commands/cost/index.ts +594 -0
- package/src/commands/cost/parsers/terraform.ts +273 -0
- package/src/commands/cost/parsers/types.ts +25 -0
- package/src/commands/cost/pricing/aws.ts +544 -0
- package/src/commands/cost/pricing/azure.ts +499 -0
- package/src/commands/cost/pricing/gcp.ts +396 -0
- package/src/commands/cost/pricing/index.ts +40 -0
- package/src/commands/demo.ts +250 -0
- package/src/commands/doctor.ts +794 -0
- package/src/commands/drift/index.ts +439 -0
- package/src/commands/explain.ts +277 -0
- package/src/commands/feedback.ts +389 -0
- package/src/commands/fix.ts +324 -0
- package/src/commands/fs/index.ts +402 -0
- package/src/commands/gcp/compute.ts +325 -0
- package/src/commands/gcp/functions.ts +271 -0
- package/src/commands/gcp/gke.ts +438 -0
- package/src/commands/gcp/iam.ts +344 -0
- package/src/commands/gcp/index.ts +129 -0
- package/src/commands/gcp/storage.ts +284 -0
- package/src/commands/generate-helm.ts +1249 -0
- package/src/commands/generate-k8s.ts +1560 -0
- package/src/commands/generate-terraform.ts +1460 -0
- package/src/commands/gh/index.ts +863 -0
- package/src/commands/git/index.ts +1343 -0
- package/src/commands/helm/index.ts +1126 -0
- package/src/commands/help.ts +539 -0
- package/src/commands/history.ts +142 -0
- package/src/commands/import.ts +868 -0
- package/src/commands/index.ts +367 -0
- package/src/commands/init.ts +1046 -0
- package/src/commands/k8s/index.ts +1137 -0
- package/src/commands/login.ts +631 -0
- package/src/commands/logout.ts +83 -0
- package/src/commands/onboarding.ts +228 -0
- package/src/commands/plan/display.ts +279 -0
- package/src/commands/plan/index.ts +599 -0
- package/src/commands/preview.ts +452 -0
- package/src/commands/questionnaire.ts +1270 -0
- package/src/commands/resume.ts +55 -0
- package/src/commands/team/index.ts +346 -0
- package/src/commands/template.ts +232 -0
- package/src/commands/tf/index.ts +1034 -0
- package/src/commands/upgrade.ts +550 -0
- package/src/commands/usage/index.ts +134 -0
- package/src/commands/version.ts +170 -0
- package/src/compat/index.ts +2 -0
- package/src/compat/runtime.ts +12 -0
- package/src/compat/sqlite.ts +107 -0
- package/src/config/index.ts +17 -0
- package/src/config/manager.ts +530 -0
- package/src/config/safety-policy.ts +358 -0
- package/src/config/schema.ts +125 -0
- package/src/config/types.ts +527 -0
- package/src/context/context-db.ts +199 -0
- package/src/demo/index.ts +349 -0
- package/src/demo/scenarios/full-journey.ts +229 -0
- package/src/demo/scenarios/getting-started.ts +127 -0
- package/src/demo/scenarios/helm-release.ts +341 -0
- package/src/demo/scenarios/k8s-deployment.ts +194 -0
- package/src/demo/scenarios/terraform-vpc.ts +170 -0
- package/src/demo/types.ts +92 -0
- package/src/engine/cost-estimator.ts +438 -0
- package/src/engine/diagram-generator.ts +256 -0
- package/src/engine/drift-detector.ts +902 -0
- package/src/engine/executor.ts +1035 -0
- package/src/engine/index.ts +76 -0
- package/src/engine/orchestrator.ts +636 -0
- package/src/engine/planner.ts +720 -0
- package/src/engine/safety.ts +743 -0
- package/src/engine/verifier.ts +770 -0
- package/src/enterprise/audit.ts +348 -0
- package/src/enterprise/auth.ts +270 -0
- package/src/enterprise/billing.ts +822 -0
- package/src/enterprise/index.ts +17 -0
- package/src/enterprise/teams.ts +443 -0
- package/src/generator/best-practices.ts +1608 -0
- package/src/generator/helm.ts +630 -0
- package/src/generator/index.ts +37 -0
- package/src/generator/intent-parser.ts +514 -0
- package/src/generator/kubernetes.ts +976 -0
- package/src/generator/terraform.ts +1867 -0
- package/src/history/index.ts +8 -0
- package/src/history/manager.ts +322 -0
- package/src/history/types.ts +34 -0
- package/src/hooks/config.ts +432 -0
- package/src/hooks/engine.ts +391 -0
- package/src/hooks/index.ts +4 -0
- package/src/llm/auth-bridge.ts +198 -0
- package/src/llm/circuit-breaker.ts +140 -0
- package/src/llm/config-loader.ts +201 -0
- package/src/llm/cost-calculator.ts +171 -0
- package/src/llm/index.ts +8 -0
- package/src/llm/model-aliases.ts +115 -0
- package/src/llm/provider-registry.ts +63 -0
- package/src/llm/providers/anthropic.ts +433 -0
- package/src/llm/providers/bedrock.ts +477 -0
- package/src/llm/providers/google.ts +405 -0
- package/src/llm/providers/ollama.ts +767 -0
- package/src/llm/providers/openai-compatible.ts +340 -0
- package/src/llm/providers/openai.ts +328 -0
- package/src/llm/providers/openrouter.ts +338 -0
- package/src/llm/router.ts +1035 -0
- package/src/llm/types.ts +232 -0
- package/src/lsp/client.ts +298 -0
- package/src/lsp/languages.ts +116 -0
- package/src/lsp/manager.ts +278 -0
- package/src/mcp/client.ts +402 -0
- package/src/mcp/index.ts +5 -0
- package/src/mcp/manager.ts +133 -0
- package/src/nimbus.ts +214 -0
- package/src/plugins/index.ts +27 -0
- package/src/plugins/loader.ts +334 -0
- package/src/plugins/manager.ts +376 -0
- package/src/plugins/types.ts +284 -0
- package/src/scanners/cicd-scanner.ts +258 -0
- package/src/scanners/cloud-scanner.ts +466 -0
- package/src/scanners/framework-scanner.ts +469 -0
- package/src/scanners/iac-scanner.ts +388 -0
- package/src/scanners/index.ts +539 -0
- package/src/scanners/language-scanner.ts +276 -0
- package/src/scanners/package-manager-scanner.ts +277 -0
- package/src/scanners/types.ts +172 -0
- package/src/sessions/manager.ts +365 -0
- package/src/sessions/types.ts +44 -0
- package/src/sharing/sync.ts +296 -0
- package/src/sharing/viewer.ts +97 -0
- package/src/snapshots/index.ts +2 -0
- package/src/snapshots/manager.ts +530 -0
- package/src/state/artifacts.ts +147 -0
- package/src/state/audit.ts +137 -0
- package/src/state/billing.ts +240 -0
- package/src/state/checkpoints.ts +117 -0
- package/src/state/config.ts +67 -0
- package/src/state/conversations.ts +14 -0
- package/src/state/credentials.ts +154 -0
- package/src/state/db.ts +58 -0
- package/src/state/index.ts +26 -0
- package/src/state/messages.ts +115 -0
- package/src/state/projects.ts +123 -0
- package/src/state/schema.ts +236 -0
- package/src/state/sessions.ts +147 -0
- package/src/state/teams.ts +200 -0
- package/src/telemetry.ts +108 -0
- package/src/tools/aws-ops.ts +952 -0
- package/src/tools/azure-ops.ts +579 -0
- package/src/tools/file-ops.ts +593 -0
- package/src/tools/gcp-ops.ts +625 -0
- package/src/tools/git-ops.ts +773 -0
- package/src/tools/github-ops.ts +799 -0
- package/src/tools/helm-ops.ts +943 -0
- package/src/tools/index.ts +17 -0
- package/src/tools/k8s-ops.ts +819 -0
- package/src/tools/schemas/converter.ts +184 -0
- package/src/tools/schemas/devops.ts +612 -0
- package/src/tools/schemas/index.ts +73 -0
- package/src/tools/schemas/standard.ts +1144 -0
- package/src/tools/schemas/types.ts +705 -0
- package/src/tools/terraform-ops.ts +862 -0
- package/src/types/ambient.d.ts +193 -0
- package/src/types/config.ts +83 -0
- package/src/types/drift.ts +116 -0
- package/src/types/enterprise.ts +335 -0
- package/src/types/index.ts +20 -0
- package/src/types/plan.ts +44 -0
- package/src/types/request.ts +65 -0
- package/src/types/response.ts +54 -0
- package/src/types/service.ts +51 -0
- package/src/ui/App.tsx +997 -0
- package/src/ui/DeployPreview.tsx +169 -0
- package/src/ui/Header.tsx +68 -0
- package/src/ui/InputBox.tsx +350 -0
- package/src/ui/MessageList.tsx +585 -0
- package/src/ui/PermissionPrompt.tsx +151 -0
- package/src/ui/StatusBar.tsx +158 -0
- package/src/ui/ToolCallDisplay.tsx +409 -0
- package/src/ui/chat-ui.ts +853 -0
- package/src/ui/index.ts +33 -0
- package/src/ui/ink/index.ts +711 -0
- package/src/ui/streaming.ts +176 -0
- package/src/ui/types.ts +57 -0
- package/src/utils/analytics.ts +72 -0
- package/src/utils/cost-warning.ts +27 -0
- package/src/utils/env.ts +46 -0
- package/src/utils/errors.ts +69 -0
- package/src/utils/event-bus.ts +38 -0
- package/src/utils/index.ts +24 -0
- package/src/utils/logger.ts +171 -0
- package/src/utils/rate-limiter.ts +121 -0
- package/src/utils/service-auth.ts +49 -0
- package/src/utils/validation.ts +53 -0
- package/src/version.ts +4 -0
- package/src/watcher/index.ts +163 -0
- package/src/wizard/approval.ts +383 -0
- package/src/wizard/index.ts +25 -0
- package/src/wizard/prompts.ts +338 -0
- package/src/wizard/types.ts +171 -0
- package/src/wizard/ui.ts +556 -0
- package/src/wizard/wizard.ts +304 -0
- package/tsconfig.json +24 -0
|
@@ -0,0 +1,732 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* streamWithTools() Tests
|
|
3
|
+
*
|
|
4
|
+
* Validates the streamWithTools() method added to three LLM providers:
|
|
5
|
+
* - OllamaProvider (src/llm/providers/ollama.ts)
|
|
6
|
+
* - OpenRouterProvider (src/llm/providers/openrouter.ts)
|
|
7
|
+
* - OpenAICompatibleProvider (src/llm/providers/openai-compatible.ts)
|
|
8
|
+
*
|
|
9
|
+
* Each provider is tested for:
|
|
10
|
+
* 1. Text-only streaming (yields text chunks then a final done chunk)
|
|
11
|
+
* 2. Tool call streaming (yields tool calls in the final chunk)
|
|
12
|
+
* 3. Fallback behavior (when streaming fails, falls back gracefully)
|
|
13
|
+
*
|
|
14
|
+
* All tests use mocks -- no real API calls are made.
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import { describe, test, expect, mock, beforeEach } from 'bun:test';
|
|
18
|
+
import type { ToolCompletionRequest, StreamChunk } from '../llm/types';
|
|
19
|
+
|
|
20
|
+
// ---------------------------------------------------------------------------
|
|
21
|
+
// Helpers
|
|
22
|
+
// ---------------------------------------------------------------------------
|
|
23
|
+
|
|
24
|
+
/** Collect all chunks from an async generator into an array. */
|
|
25
|
+
async function collectChunks(gen: AsyncIterable<StreamChunk>): Promise<StreamChunk[]> {
|
|
26
|
+
const chunks: StreamChunk[] = [];
|
|
27
|
+
for await (const chunk of gen) {
|
|
28
|
+
chunks.push(chunk);
|
|
29
|
+
}
|
|
30
|
+
return chunks;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/** Minimal ToolCompletionRequest used across all tests. */
|
|
34
|
+
function makeRequest(overrides?: Partial<ToolCompletionRequest>): ToolCompletionRequest {
|
|
35
|
+
return {
|
|
36
|
+
messages: [{ role: 'user', content: 'List files in the current directory' }],
|
|
37
|
+
tools: [
|
|
38
|
+
{
|
|
39
|
+
type: 'function',
|
|
40
|
+
function: {
|
|
41
|
+
name: 'bash',
|
|
42
|
+
description: 'Run a bash command',
|
|
43
|
+
parameters: {
|
|
44
|
+
type: 'object',
|
|
45
|
+
properties: {
|
|
46
|
+
command: { type: 'string', description: 'The command to run' },
|
|
47
|
+
},
|
|
48
|
+
required: ['command'],
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
},
|
|
52
|
+
],
|
|
53
|
+
...overrides,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Build a ReadableStream from an array of raw strings.
|
|
59
|
+
* Each string becomes one chunk the reader yields.
|
|
60
|
+
*/
|
|
61
|
+
function buildReadableStream(lines: string[]): ReadableStream<Uint8Array> {
|
|
62
|
+
const encoder = new TextEncoder();
|
|
63
|
+
let index = 0;
|
|
64
|
+
return new ReadableStream<Uint8Array>({
|
|
65
|
+
pull(controller) {
|
|
66
|
+
if (index < lines.length) {
|
|
67
|
+
controller.enqueue(encoder.encode(lines[index]));
|
|
68
|
+
index++;
|
|
69
|
+
} else {
|
|
70
|
+
controller.close();
|
|
71
|
+
}
|
|
72
|
+
},
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Create a mock async iterable (simulating the OpenAI SDK stream object).
|
|
78
|
+
* Accepts an array of chunk objects and yields them in order.
|
|
79
|
+
*/
|
|
80
|
+
function createMockOpenAIStream(
|
|
81
|
+
chunks: Array<{
|
|
82
|
+
choices: Array<{
|
|
83
|
+
delta: { content?: string; tool_calls?: any[] };
|
|
84
|
+
finish_reason: string | null;
|
|
85
|
+
}>;
|
|
86
|
+
usage?: { prompt_tokens: number; completion_tokens: number; total_tokens: number };
|
|
87
|
+
}>
|
|
88
|
+
) {
|
|
89
|
+
return {
|
|
90
|
+
async *[Symbol.asyncIterator]() {
|
|
91
|
+
for (const chunk of chunks) {
|
|
92
|
+
yield chunk;
|
|
93
|
+
}
|
|
94
|
+
},
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// ===========================================================================
|
|
99
|
+
// OllamaProvider
|
|
100
|
+
// ===========================================================================
|
|
101
|
+
|
|
102
|
+
describe('OllamaProvider.streamWithTools', () => {
|
|
103
|
+
let originalFetch: typeof globalThis.fetch;
|
|
104
|
+
|
|
105
|
+
beforeEach(() => {
|
|
106
|
+
originalFetch = globalThis.fetch;
|
|
107
|
+
});
|
|
108
|
+
|
|
109
|
+
// Restore fetch after each test to avoid leaking mocks
|
|
110
|
+
function restoreFetch() {
|
|
111
|
+
globalThis.fetch = originalFetch;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
test('text-only streaming yields text chunks then a done chunk', async () => {
|
|
115
|
+
// Mock the fetch for /v1/chat/completions (native tool streaming endpoint)
|
|
116
|
+
const sseLines = [
|
|
117
|
+
'data: {"choices":[{"delta":{"content":"Hello"},"finish_reason":null}]}\n\n',
|
|
118
|
+
'data: {"choices":[{"delta":{"content":" world"},"finish_reason":null}]}\n\n',
|
|
119
|
+
'data: {"choices":[{"delta":{},"finish_reason":"stop"}],"usage":{"prompt_tokens":10,"completion_tokens":5,"total_tokens":15}}\n\n',
|
|
120
|
+
'data: [DONE]\n\n',
|
|
121
|
+
];
|
|
122
|
+
|
|
123
|
+
globalThis.fetch = mock(() =>
|
|
124
|
+
Promise.resolve(
|
|
125
|
+
new Response(buildReadableStream(sseLines), {
|
|
126
|
+
status: 200,
|
|
127
|
+
headers: { 'Content-Type': 'text/event-stream' },
|
|
128
|
+
})
|
|
129
|
+
)
|
|
130
|
+
) as any;
|
|
131
|
+
|
|
132
|
+
try {
|
|
133
|
+
const { OllamaProvider } = await import('../llm/providers/ollama');
|
|
134
|
+
const provider = new OllamaProvider('http://localhost:11434');
|
|
135
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
136
|
+
|
|
137
|
+
// Should have text chunks followed by a done chunk
|
|
138
|
+
const textChunks = chunks.filter(c => c.content && !c.done);
|
|
139
|
+
const doneChunks = chunks.filter(c => c.done);
|
|
140
|
+
|
|
141
|
+
expect(textChunks.length).toBe(2);
|
|
142
|
+
expect(textChunks[0].content).toBe('Hello');
|
|
143
|
+
expect(textChunks[1].content).toBe(' world');
|
|
144
|
+
|
|
145
|
+
expect(doneChunks.length).toBe(1);
|
|
146
|
+
expect(doneChunks[0].done).toBe(true);
|
|
147
|
+
expect(doneChunks[0].toolCalls).toBeUndefined();
|
|
148
|
+
|
|
149
|
+
// Usage should be present on the final chunk
|
|
150
|
+
expect(doneChunks[0].usage).toEqual({
|
|
151
|
+
promptTokens: 10,
|
|
152
|
+
completionTokens: 5,
|
|
153
|
+
totalTokens: 15,
|
|
154
|
+
});
|
|
155
|
+
} finally {
|
|
156
|
+
restoreFetch();
|
|
157
|
+
}
|
|
158
|
+
});
|
|
159
|
+
|
|
160
|
+
test('tool call streaming accumulates tool calls and yields them on the done chunk', async () => {
|
|
161
|
+
const sseLines = [
|
|
162
|
+
// First chunk: tool call header
|
|
163
|
+
'data: {"choices":[{"delta":{"tool_calls":[{"index":0,"id":"call_abc123","function":{"name":"bash","arguments":""}}]},"finish_reason":null}]}\n\n',
|
|
164
|
+
// Second chunk: tool call arguments (streamed incrementally)
|
|
165
|
+
'data: {"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\\"command\\""}}]},"finish_reason":null}]}\n\n',
|
|
166
|
+
// Third chunk: remaining arguments
|
|
167
|
+
'data: {"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":":\\"ls -la\\"}"}}]},"finish_reason":null}]}\n\n',
|
|
168
|
+
// Final chunk with finish_reason
|
|
169
|
+
'data: {"choices":[{"delta":{},"finish_reason":"tool_calls"}]}\n\n',
|
|
170
|
+
'data: [DONE]\n\n',
|
|
171
|
+
];
|
|
172
|
+
|
|
173
|
+
globalThis.fetch = mock(() =>
|
|
174
|
+
Promise.resolve(
|
|
175
|
+
new Response(buildReadableStream(sseLines), {
|
|
176
|
+
status: 200,
|
|
177
|
+
headers: { 'Content-Type': 'text/event-stream' },
|
|
178
|
+
})
|
|
179
|
+
)
|
|
180
|
+
) as any;
|
|
181
|
+
|
|
182
|
+
try {
|
|
183
|
+
const { OllamaProvider } = await import('../llm/providers/ollama');
|
|
184
|
+
const provider = new OllamaProvider('http://localhost:11434');
|
|
185
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
186
|
+
|
|
187
|
+
const doneChunk = chunks.find(c => c.done);
|
|
188
|
+
expect(doneChunk).toBeDefined();
|
|
189
|
+
expect(doneChunk!.toolCalls).toBeDefined();
|
|
190
|
+
expect(doneChunk!.toolCalls!.length).toBe(1);
|
|
191
|
+
|
|
192
|
+
const tc = doneChunk!.toolCalls![0];
|
|
193
|
+
expect(tc.id).toBe('call_abc123');
|
|
194
|
+
expect(tc.type).toBe('function');
|
|
195
|
+
expect(tc.function.name).toBe('bash');
|
|
196
|
+
expect(tc.function.arguments).toBe('{"command":"ls -la"}');
|
|
197
|
+
} finally {
|
|
198
|
+
restoreFetch();
|
|
199
|
+
}
|
|
200
|
+
});
|
|
201
|
+
|
|
202
|
+
test('fallback: when native streaming fails, falls back to completeWithTools', async () => {
|
|
203
|
+
let _callCount = 0;
|
|
204
|
+
|
|
205
|
+
globalThis.fetch = mock((url: string | URL | Request) => {
|
|
206
|
+
_callCount++;
|
|
207
|
+
const urlStr = typeof url === 'string' ? url : url instanceof URL ? url.toString() : url.url;
|
|
208
|
+
|
|
209
|
+
// First call: /v1/chat/completions (native streaming) -- fail
|
|
210
|
+
if (urlStr.includes('/v1/chat/completions')) {
|
|
211
|
+
return Promise.resolve(new Response('Not found', { status: 404 }));
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// Second call: /api/chat (native tool calling attempt) -- return tool call
|
|
215
|
+
if (urlStr.includes('/api/chat')) {
|
|
216
|
+
return Promise.resolve(
|
|
217
|
+
new Response(
|
|
218
|
+
JSON.stringify({
|
|
219
|
+
message: {
|
|
220
|
+
content: '',
|
|
221
|
+
tool_calls: [
|
|
222
|
+
{
|
|
223
|
+
function: {
|
|
224
|
+
name: 'bash',
|
|
225
|
+
arguments: { command: 'ls -la' },
|
|
226
|
+
},
|
|
227
|
+
},
|
|
228
|
+
],
|
|
229
|
+
},
|
|
230
|
+
model: 'llama3.2',
|
|
231
|
+
prompt_eval_count: 20,
|
|
232
|
+
eval_count: 10,
|
|
233
|
+
}),
|
|
234
|
+
{ status: 200, headers: { 'Content-Type': 'application/json' } }
|
|
235
|
+
)
|
|
236
|
+
);
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
return Promise.reject(new Error(`Unexpected fetch to ${urlStr}`));
|
|
240
|
+
}) as any;
|
|
241
|
+
|
|
242
|
+
try {
|
|
243
|
+
const { OllamaProvider } = await import('../llm/providers/ollama');
|
|
244
|
+
const provider = new OllamaProvider('http://localhost:11434');
|
|
245
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
246
|
+
|
|
247
|
+
// Fallback should yield a final done chunk with tool calls
|
|
248
|
+
const doneChunk = chunks.find(c => c.done);
|
|
249
|
+
expect(doneChunk).toBeDefined();
|
|
250
|
+
expect(doneChunk!.toolCalls).toBeDefined();
|
|
251
|
+
expect(doneChunk!.toolCalls!.length).toBe(1);
|
|
252
|
+
expect(doneChunk!.toolCalls![0].function.name).toBe('bash');
|
|
253
|
+
} finally {
|
|
254
|
+
restoreFetch();
|
|
255
|
+
}
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
test('multiple tool calls are accumulated correctly', async () => {
|
|
259
|
+
const sseLines = [
|
|
260
|
+
// Two tool calls starting in the same delta
|
|
261
|
+
'data: {"choices":[{"delta":{"tool_calls":[{"index":0,"id":"call_1","function":{"name":"bash","arguments":""}},{"index":1,"id":"call_2","function":{"name":"bash","arguments":""}}]},"finish_reason":null}]}\n\n',
|
|
262
|
+
// Arguments for first tool call
|
|
263
|
+
'data: {"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\\"command\\":\\"ls\\"}"}}]},"finish_reason":null}]}\n\n',
|
|
264
|
+
// Arguments for second tool call
|
|
265
|
+
'data: {"choices":[{"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{\\"command\\":\\"pwd\\"}"}}]},"finish_reason":null}]}\n\n',
|
|
266
|
+
// Done
|
|
267
|
+
'data: {"choices":[{"delta":{},"finish_reason":"tool_calls"}]}\n\n',
|
|
268
|
+
'data: [DONE]\n\n',
|
|
269
|
+
];
|
|
270
|
+
|
|
271
|
+
globalThis.fetch = mock(() =>
|
|
272
|
+
Promise.resolve(
|
|
273
|
+
new Response(buildReadableStream(sseLines), {
|
|
274
|
+
status: 200,
|
|
275
|
+
headers: { 'Content-Type': 'text/event-stream' },
|
|
276
|
+
})
|
|
277
|
+
)
|
|
278
|
+
) as any;
|
|
279
|
+
|
|
280
|
+
try {
|
|
281
|
+
const { OllamaProvider } = await import('../llm/providers/ollama');
|
|
282
|
+
const provider = new OllamaProvider('http://localhost:11434');
|
|
283
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
284
|
+
|
|
285
|
+
const doneChunk = chunks.find(c => c.done);
|
|
286
|
+
expect(doneChunk).toBeDefined();
|
|
287
|
+
expect(doneChunk!.toolCalls).toBeDefined();
|
|
288
|
+
expect(doneChunk!.toolCalls!.length).toBe(2);
|
|
289
|
+
expect(doneChunk!.toolCalls![0].function.name).toBe('bash');
|
|
290
|
+
expect(doneChunk!.toolCalls![0].function.arguments).toBe('{"command":"ls"}');
|
|
291
|
+
expect(doneChunk!.toolCalls![1].function.name).toBe('bash');
|
|
292
|
+
expect(doneChunk!.toolCalls![1].function.arguments).toBe('{"command":"pwd"}');
|
|
293
|
+
} finally {
|
|
294
|
+
restoreFetch();
|
|
295
|
+
}
|
|
296
|
+
});
|
|
297
|
+
});
|
|
298
|
+
|
|
299
|
+
// ===========================================================================
|
|
300
|
+
// OpenRouterProvider
|
|
301
|
+
// ===========================================================================
|
|
302
|
+
|
|
303
|
+
describe('OpenRouterProvider.streamWithTools', () => {
|
|
304
|
+
test('text-only streaming yields text chunks then a done chunk', async () => {
|
|
305
|
+
const streamChunks = createMockOpenAIStream([
|
|
306
|
+
{ choices: [{ delta: { content: 'Here is' }, finish_reason: null }] },
|
|
307
|
+
{ choices: [{ delta: { content: ' the answer' }, finish_reason: null }] },
|
|
308
|
+
{
|
|
309
|
+
choices: [{ delta: {}, finish_reason: 'stop' }],
|
|
310
|
+
usage: { prompt_tokens: 12, completion_tokens: 8, total_tokens: 20 },
|
|
311
|
+
},
|
|
312
|
+
]);
|
|
313
|
+
|
|
314
|
+
const mockCreate = mock(() => Promise.resolve(streamChunks));
|
|
315
|
+
|
|
316
|
+
const { OpenRouterProvider } = await import('../llm/providers/openrouter');
|
|
317
|
+
const provider = new OpenRouterProvider('test-api-key');
|
|
318
|
+
|
|
319
|
+
// Replace the client's create method with our mock
|
|
320
|
+
(provider as any).client = {
|
|
321
|
+
chat: { completions: { create: mockCreate } },
|
|
322
|
+
};
|
|
323
|
+
|
|
324
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
325
|
+
|
|
326
|
+
// Verify text chunks
|
|
327
|
+
const textChunks = chunks.filter(c => c.content && !c.done);
|
|
328
|
+
expect(textChunks.length).toBe(2);
|
|
329
|
+
expect(textChunks[0].content).toBe('Here is');
|
|
330
|
+
expect(textChunks[1].content).toBe(' the answer');
|
|
331
|
+
|
|
332
|
+
// Verify done chunk
|
|
333
|
+
const doneChunk = chunks.find(c => c.done);
|
|
334
|
+
expect(doneChunk).toBeDefined();
|
|
335
|
+
expect(doneChunk!.done).toBe(true);
|
|
336
|
+
expect(doneChunk!.toolCalls).toBeUndefined();
|
|
337
|
+
|
|
338
|
+
// Verify usage
|
|
339
|
+
expect(doneChunk!.usage).toEqual({
|
|
340
|
+
promptTokens: 12,
|
|
341
|
+
completionTokens: 8,
|
|
342
|
+
totalTokens: 20,
|
|
343
|
+
});
|
|
344
|
+
|
|
345
|
+
// Verify the create call was made with stream: true and tools
|
|
346
|
+
expect(mockCreate).toHaveBeenCalledTimes(1);
|
|
347
|
+
const createArg = (mockCreate.mock.calls[0] as unknown[])[0] as any;
|
|
348
|
+
expect(createArg.stream).toBe(true);
|
|
349
|
+
expect(createArg.tools).toBeDefined();
|
|
350
|
+
expect(createArg.tools.length).toBe(1);
|
|
351
|
+
expect(createArg.tools[0].function.name).toBe('bash');
|
|
352
|
+
expect(createArg.stream_options).toEqual({ include_usage: true });
|
|
353
|
+
});
|
|
354
|
+
|
|
355
|
+
test('tool call streaming accumulates tool calls and yields them on the done chunk', async () => {
|
|
356
|
+
const streamChunks = createMockOpenAIStream([
|
|
357
|
+
// Tool call header
|
|
358
|
+
{
|
|
359
|
+
choices: [
|
|
360
|
+
{
|
|
361
|
+
delta: {
|
|
362
|
+
tool_calls: [{ index: 0, id: 'call_xyz', function: { name: 'bash', arguments: '' } }],
|
|
363
|
+
},
|
|
364
|
+
finish_reason: null,
|
|
365
|
+
},
|
|
366
|
+
],
|
|
367
|
+
},
|
|
368
|
+
// Streamed arguments
|
|
369
|
+
{
|
|
370
|
+
choices: [
|
|
371
|
+
{
|
|
372
|
+
delta: {
|
|
373
|
+
tool_calls: [{ index: 0, function: { arguments: '{"command"' } }],
|
|
374
|
+
},
|
|
375
|
+
finish_reason: null,
|
|
376
|
+
},
|
|
377
|
+
],
|
|
378
|
+
},
|
|
379
|
+
{
|
|
380
|
+
choices: [
|
|
381
|
+
{
|
|
382
|
+
delta: {
|
|
383
|
+
tool_calls: [{ index: 0, function: { arguments: ':"ls -la"}' } }],
|
|
384
|
+
},
|
|
385
|
+
finish_reason: null,
|
|
386
|
+
},
|
|
387
|
+
],
|
|
388
|
+
},
|
|
389
|
+
// Finish with tool_calls reason
|
|
390
|
+
{
|
|
391
|
+
choices: [{ delta: {}, finish_reason: 'tool_calls' }],
|
|
392
|
+
},
|
|
393
|
+
]);
|
|
394
|
+
|
|
395
|
+
const mockCreate = mock(() => Promise.resolve(streamChunks));
|
|
396
|
+
|
|
397
|
+
const { OpenRouterProvider } = await import('../llm/providers/openrouter');
|
|
398
|
+
const provider = new OpenRouterProvider('test-api-key');
|
|
399
|
+
(provider as any).client = {
|
|
400
|
+
chat: { completions: { create: mockCreate } },
|
|
401
|
+
};
|
|
402
|
+
|
|
403
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
404
|
+
|
|
405
|
+
const doneChunk = chunks.find(c => c.done);
|
|
406
|
+
expect(doneChunk).toBeDefined();
|
|
407
|
+
expect(doneChunk!.toolCalls).toBeDefined();
|
|
408
|
+
expect(doneChunk!.toolCalls!.length).toBe(1);
|
|
409
|
+
|
|
410
|
+
const tc = doneChunk!.toolCalls![0];
|
|
411
|
+
expect(tc.id).toBe('call_xyz');
|
|
412
|
+
expect(tc.type).toBe('function');
|
|
413
|
+
expect(tc.function.name).toBe('bash');
|
|
414
|
+
expect(tc.function.arguments).toBe('{"command":"ls -la"}');
|
|
415
|
+
});
|
|
416
|
+
|
|
417
|
+
test('fallback: when SDK stream creation throws, the generator yields nothing', async () => {
|
|
418
|
+
const mockCreate = mock(() => Promise.reject(new Error('API unavailable')));
|
|
419
|
+
|
|
420
|
+
const { OpenRouterProvider } = await import('../llm/providers/openrouter');
|
|
421
|
+
const provider = new OpenRouterProvider('test-api-key');
|
|
422
|
+
(provider as any).client = {
|
|
423
|
+
chat: { completions: { create: mockCreate } },
|
|
424
|
+
};
|
|
425
|
+
|
|
426
|
+
// The method is an async generator that will throw when it tries to create
|
|
427
|
+
// the stream. The error propagates to the caller.
|
|
428
|
+
try {
|
|
429
|
+
await collectChunks(provider.streamWithTools(makeRequest()));
|
|
430
|
+
// If we get here, it means no error was thrown (unexpected)
|
|
431
|
+
expect(true).toBe(false);
|
|
432
|
+
} catch (err: any) {
|
|
433
|
+
expect(err.message).toBe('API unavailable');
|
|
434
|
+
}
|
|
435
|
+
});
|
|
436
|
+
|
|
437
|
+
test('mixed content and tool calls are handled correctly', async () => {
|
|
438
|
+
const streamChunks = createMockOpenAIStream([
|
|
439
|
+
// Some text content first
|
|
440
|
+
{ choices: [{ delta: { content: 'Let me run that' }, finish_reason: null }] },
|
|
441
|
+
// Then a tool call
|
|
442
|
+
{
|
|
443
|
+
choices: [
|
|
444
|
+
{
|
|
445
|
+
delta: {
|
|
446
|
+
tool_calls: [
|
|
447
|
+
{
|
|
448
|
+
index: 0,
|
|
449
|
+
id: 'call_mix',
|
|
450
|
+
function: { name: 'bash', arguments: '{"command":"ls"}' },
|
|
451
|
+
},
|
|
452
|
+
],
|
|
453
|
+
},
|
|
454
|
+
finish_reason: null,
|
|
455
|
+
},
|
|
456
|
+
],
|
|
457
|
+
},
|
|
458
|
+
// Done
|
|
459
|
+
{
|
|
460
|
+
choices: [{ delta: {}, finish_reason: 'tool_calls' }],
|
|
461
|
+
usage: { prompt_tokens: 5, completion_tokens: 3, total_tokens: 8 },
|
|
462
|
+
},
|
|
463
|
+
]);
|
|
464
|
+
|
|
465
|
+
const mockCreate = mock(() => Promise.resolve(streamChunks));
|
|
466
|
+
|
|
467
|
+
const { OpenRouterProvider } = await import('../llm/providers/openrouter');
|
|
468
|
+
const provider = new OpenRouterProvider('test-api-key');
|
|
469
|
+
(provider as any).client = {
|
|
470
|
+
chat: { completions: { create: mockCreate } },
|
|
471
|
+
};
|
|
472
|
+
|
|
473
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
474
|
+
|
|
475
|
+
// Text chunk
|
|
476
|
+
const textChunks = chunks.filter(c => c.content && !c.done);
|
|
477
|
+
expect(textChunks.length).toBe(1);
|
|
478
|
+
expect(textChunks[0].content).toBe('Let me run that');
|
|
479
|
+
|
|
480
|
+
// Done chunk with tool calls
|
|
481
|
+
const doneChunk = chunks.find(c => c.done);
|
|
482
|
+
expect(doneChunk).toBeDefined();
|
|
483
|
+
expect(doneChunk!.toolCalls).toBeDefined();
|
|
484
|
+
expect(doneChunk!.toolCalls![0].function.name).toBe('bash');
|
|
485
|
+
expect(doneChunk!.usage).toEqual({
|
|
486
|
+
promptTokens: 5,
|
|
487
|
+
completionTokens: 3,
|
|
488
|
+
totalTokens: 8,
|
|
489
|
+
});
|
|
490
|
+
});
|
|
491
|
+
});
|
|
492
|
+
|
|
493
|
+
// ===========================================================================
|
|
494
|
+
// OpenAICompatibleProvider
|
|
495
|
+
// ===========================================================================
|
|
496
|
+
|
|
497
|
+
describe('OpenAICompatibleProvider.streamWithTools', () => {
|
|
498
|
+
function createProvider() {
|
|
499
|
+
// Dynamic import to avoid module-level side effects
|
|
500
|
+
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
501
|
+
const { OpenAICompatibleProvider } = require('../llm/providers/openai-compatible');
|
|
502
|
+
return new OpenAICompatibleProvider({
|
|
503
|
+
name: 'test-compat',
|
|
504
|
+
apiKey: 'test-key',
|
|
505
|
+
baseURL: 'https://api.test.com/v1',
|
|
506
|
+
defaultModel: 'test-model',
|
|
507
|
+
});
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
test('text-only streaming yields text chunks then a done chunk', async () => {
|
|
511
|
+
const streamChunks = createMockOpenAIStream([
|
|
512
|
+
{ choices: [{ delta: { content: 'Response' }, finish_reason: null }] },
|
|
513
|
+
{ choices: [{ delta: { content: ' text' }, finish_reason: null }] },
|
|
514
|
+
{
|
|
515
|
+
choices: [{ delta: {}, finish_reason: 'stop' }],
|
|
516
|
+
usage: { prompt_tokens: 8, completion_tokens: 4, total_tokens: 12 },
|
|
517
|
+
},
|
|
518
|
+
]);
|
|
519
|
+
|
|
520
|
+
const mockCreate = mock(() => Promise.resolve(streamChunks));
|
|
521
|
+
|
|
522
|
+
const provider = createProvider();
|
|
523
|
+
(provider as any).client = {
|
|
524
|
+
chat: { completions: { create: mockCreate } },
|
|
525
|
+
};
|
|
526
|
+
|
|
527
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
528
|
+
|
|
529
|
+
// Text chunks
|
|
530
|
+
const textChunks = chunks.filter(c => c.content && !c.done);
|
|
531
|
+
expect(textChunks.length).toBe(2);
|
|
532
|
+
expect(textChunks[0].content).toBe('Response');
|
|
533
|
+
expect(textChunks[1].content).toBe(' text');
|
|
534
|
+
|
|
535
|
+
// Done chunk
|
|
536
|
+
const doneChunk = chunks.find(c => c.done);
|
|
537
|
+
expect(doneChunk).toBeDefined();
|
|
538
|
+
expect(doneChunk!.done).toBe(true);
|
|
539
|
+
expect(doneChunk!.toolCalls).toBeUndefined();
|
|
540
|
+
expect(doneChunk!.usage).toEqual({
|
|
541
|
+
promptTokens: 8,
|
|
542
|
+
completionTokens: 4,
|
|
543
|
+
totalTokens: 12,
|
|
544
|
+
});
|
|
545
|
+
|
|
546
|
+
// Verify stream options
|
|
547
|
+
const createArg = (mockCreate.mock.calls[0] as unknown[])[0] as any;
|
|
548
|
+
expect(createArg.stream).toBe(true);
|
|
549
|
+
expect(createArg.stream_options).toEqual({ include_usage: true });
|
|
550
|
+
});
|
|
551
|
+
|
|
552
|
+
test('tool call streaming accumulates tool calls and yields them on the done chunk', async () => {
|
|
553
|
+
const streamChunks = createMockOpenAIStream([
|
|
554
|
+
{
|
|
555
|
+
choices: [
|
|
556
|
+
{
|
|
557
|
+
delta: {
|
|
558
|
+
tool_calls: [
|
|
559
|
+
{ index: 0, id: 'call_compat1', function: { name: 'bash', arguments: '' } },
|
|
560
|
+
],
|
|
561
|
+
},
|
|
562
|
+
finish_reason: null,
|
|
563
|
+
},
|
|
564
|
+
],
|
|
565
|
+
},
|
|
566
|
+
{
|
|
567
|
+
choices: [
|
|
568
|
+
{
|
|
569
|
+
delta: {
|
|
570
|
+
tool_calls: [{ index: 0, function: { arguments: '{"command":"pwd"}' } }],
|
|
571
|
+
},
|
|
572
|
+
finish_reason: null,
|
|
573
|
+
},
|
|
574
|
+
],
|
|
575
|
+
},
|
|
576
|
+
{
|
|
577
|
+
choices: [{ delta: {}, finish_reason: 'tool_calls' }],
|
|
578
|
+
},
|
|
579
|
+
]);
|
|
580
|
+
|
|
581
|
+
const mockCreate = mock(() => Promise.resolve(streamChunks));
|
|
582
|
+
|
|
583
|
+
const provider = createProvider();
|
|
584
|
+
(provider as any).client = {
|
|
585
|
+
chat: { completions: { create: mockCreate } },
|
|
586
|
+
};
|
|
587
|
+
|
|
588
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
589
|
+
|
|
590
|
+
const doneChunk = chunks.find(c => c.done);
|
|
591
|
+
expect(doneChunk).toBeDefined();
|
|
592
|
+
expect(doneChunk!.toolCalls).toBeDefined();
|
|
593
|
+
expect(doneChunk!.toolCalls!.length).toBe(1);
|
|
594
|
+
|
|
595
|
+
const tc = doneChunk!.toolCalls![0];
|
|
596
|
+
expect(tc.id).toBe('call_compat1');
|
|
597
|
+
expect(tc.type).toBe('function');
|
|
598
|
+
expect(tc.function.name).toBe('bash');
|
|
599
|
+
expect(tc.function.arguments).toBe('{"command":"pwd"}');
|
|
600
|
+
});
|
|
601
|
+
|
|
602
|
+
test('fallback: when SDK stream creation throws, the error propagates', async () => {
|
|
603
|
+
const mockCreate = mock(() => Promise.reject(new Error('Provider down')));
|
|
604
|
+
|
|
605
|
+
const provider = createProvider();
|
|
606
|
+
(provider as any).client = {
|
|
607
|
+
chat: { completions: { create: mockCreate } },
|
|
608
|
+
};
|
|
609
|
+
|
|
610
|
+
try {
|
|
611
|
+
await collectChunks(provider.streamWithTools(makeRequest()));
|
|
612
|
+
expect(true).toBe(false);
|
|
613
|
+
} catch (err: any) {
|
|
614
|
+
expect(err.message).toBe('Provider down');
|
|
615
|
+
}
|
|
616
|
+
});
|
|
617
|
+
|
|
618
|
+
test('multiple tool calls across different indices are accumulated', async () => {
|
|
619
|
+
const streamChunks = createMockOpenAIStream([
|
|
620
|
+
// Two tool calls in separate chunks
|
|
621
|
+
{
|
|
622
|
+
choices: [
|
|
623
|
+
{
|
|
624
|
+
delta: {
|
|
625
|
+
tool_calls: [
|
|
626
|
+
{
|
|
627
|
+
index: 0,
|
|
628
|
+
id: 'call_a',
|
|
629
|
+
function: { name: 'bash', arguments: '{"command":"ls"}' },
|
|
630
|
+
},
|
|
631
|
+
],
|
|
632
|
+
},
|
|
633
|
+
finish_reason: null,
|
|
634
|
+
},
|
|
635
|
+
],
|
|
636
|
+
},
|
|
637
|
+
{
|
|
638
|
+
choices: [
|
|
639
|
+
{
|
|
640
|
+
delta: {
|
|
641
|
+
tool_calls: [
|
|
642
|
+
{
|
|
643
|
+
index: 1,
|
|
644
|
+
id: 'call_b',
|
|
645
|
+
function: { name: 'bash', arguments: '{"command":"cat file.txt"}' },
|
|
646
|
+
},
|
|
647
|
+
],
|
|
648
|
+
},
|
|
649
|
+
finish_reason: null,
|
|
650
|
+
},
|
|
651
|
+
],
|
|
652
|
+
},
|
|
653
|
+
{
|
|
654
|
+
choices: [{ delta: {}, finish_reason: 'tool_calls' }],
|
|
655
|
+
},
|
|
656
|
+
]);
|
|
657
|
+
|
|
658
|
+
const mockCreate = mock(() => Promise.resolve(streamChunks));
|
|
659
|
+
|
|
660
|
+
const provider = createProvider();
|
|
661
|
+
(provider as any).client = {
|
|
662
|
+
chat: { completions: { create: mockCreate } },
|
|
663
|
+
};
|
|
664
|
+
|
|
665
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
666
|
+
|
|
667
|
+
const doneChunk = chunks.find(c => c.done);
|
|
668
|
+
expect(doneChunk).toBeDefined();
|
|
669
|
+
expect(doneChunk!.toolCalls).toBeDefined();
|
|
670
|
+
expect(doneChunk!.toolCalls!.length).toBe(2);
|
|
671
|
+
expect(doneChunk!.toolCalls![0].id).toBe('call_a');
|
|
672
|
+
expect(doneChunk!.toolCalls![0].function.arguments).toBe('{"command":"ls"}');
|
|
673
|
+
expect(doneChunk!.toolCalls![1].id).toBe('call_b');
|
|
674
|
+
expect(doneChunk!.toolCalls![1].function.arguments).toBe('{"command":"cat file.txt"}');
|
|
675
|
+
});
|
|
676
|
+
|
|
677
|
+
test('usage from a mid-stream chunk is captured on the done chunk', async () => {
|
|
678
|
+
const streamChunks = createMockOpenAIStream([
|
|
679
|
+
{ choices: [{ delta: { content: 'ok' }, finish_reason: null }] },
|
|
680
|
+
{
|
|
681
|
+
choices: [{ delta: {}, finish_reason: null }],
|
|
682
|
+
usage: { prompt_tokens: 100, completion_tokens: 50, total_tokens: 150 },
|
|
683
|
+
},
|
|
684
|
+
{
|
|
685
|
+
choices: [{ delta: {}, finish_reason: 'stop' }],
|
|
686
|
+
},
|
|
687
|
+
]);
|
|
688
|
+
|
|
689
|
+
const mockCreate = mock(() => Promise.resolve(streamChunks));
|
|
690
|
+
|
|
691
|
+
const provider = createProvider();
|
|
692
|
+
(provider as any).client = {
|
|
693
|
+
chat: { completions: { create: mockCreate } },
|
|
694
|
+
};
|
|
695
|
+
|
|
696
|
+
const chunks = await collectChunks(provider.streamWithTools(makeRequest()));
|
|
697
|
+
|
|
698
|
+
const doneChunk = chunks.find(c => c.done);
|
|
699
|
+
expect(doneChunk).toBeDefined();
|
|
700
|
+
expect(doneChunk!.usage).toEqual({
|
|
701
|
+
promptTokens: 100,
|
|
702
|
+
completionTokens: 50,
|
|
703
|
+
totalTokens: 150,
|
|
704
|
+
});
|
|
705
|
+
});
|
|
706
|
+
|
|
707
|
+
test('tool_choice is forwarded to the API call', async () => {
|
|
708
|
+
const streamChunks = createMockOpenAIStream([
|
|
709
|
+
{ choices: [{ delta: { content: 'done' }, finish_reason: 'stop' }] },
|
|
710
|
+
]);
|
|
711
|
+
|
|
712
|
+
const mockCreate = mock(() => Promise.resolve(streamChunks));
|
|
713
|
+
|
|
714
|
+
const provider = createProvider();
|
|
715
|
+
(provider as any).client = {
|
|
716
|
+
chat: { completions: { create: mockCreate } },
|
|
717
|
+
};
|
|
718
|
+
|
|
719
|
+
const request = makeRequest({
|
|
720
|
+
toolChoice: { type: 'function', function: { name: 'bash' } },
|
|
721
|
+
temperature: 0.5,
|
|
722
|
+
maxTokens: 1024,
|
|
723
|
+
});
|
|
724
|
+
|
|
725
|
+
await collectChunks(provider.streamWithTools(request));
|
|
726
|
+
|
|
727
|
+
const createArg = (mockCreate.mock.calls[0] as unknown[])[0] as any;
|
|
728
|
+
expect(createArg.tool_choice).toEqual({ type: 'function', function: { name: 'bash' } });
|
|
729
|
+
expect(createArg.temperature).toBe(0.5);
|
|
730
|
+
expect(createArg.max_tokens).toBe(1024);
|
|
731
|
+
});
|
|
732
|
+
});
|