@lota-sdk/core 0.1.44 → 0.1.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lota-sdk/core",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.46",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"main": "./src/index.ts",
|
|
6
6
|
"types": "./src/index.ts",
|
|
@@ -32,7 +32,7 @@
|
|
|
32
32
|
"@chat-adapter/slack": "^4.23.0",
|
|
33
33
|
"@chat-adapter/state-ioredis": "^4.23.0",
|
|
34
34
|
"@logtape/logtape": "^2.0.5",
|
|
35
|
-
"@lota-sdk/shared": "0.1.
|
|
35
|
+
"@lota-sdk/shared": "0.1.46",
|
|
36
36
|
"@mendable/firecrawl-js": "^4.18.0",
|
|
37
37
|
"@surrealdb/node": "^3.0.3",
|
|
38
38
|
"ai": "^6.0.141",
|
|
@@ -953,11 +953,10 @@ export async function prepareWorkstreamRunCore(params: WorkstreamRunCoreParams):
|
|
|
953
953
|
// No specialist match — fallback to owner (core) or chief (non-core), single agent turn
|
|
954
954
|
await runGroupAgent(fallbackAgentId)
|
|
955
955
|
} else {
|
|
956
|
-
// Run first routed agent —
|
|
956
|
+
// Run first routed agent — let finish flow naturally so the stream resets between agents
|
|
957
957
|
const respondedAgents: string[] = []
|
|
958
958
|
let lastResponse = await runGroupAgent(triageResult.agentId, {
|
|
959
959
|
routingContext: triageResult.routingContext,
|
|
960
|
-
suppressFinish: true,
|
|
961
960
|
})
|
|
962
961
|
respondedAgents.push(triageResult.agentId)
|
|
963
962
|
|
|
@@ -994,15 +993,9 @@ export async function prepareWorkstreamRunCore(params: WorkstreamRunCoreParams):
|
|
|
994
993
|
|
|
995
994
|
lastResponse = await runGroupAgent(checkResult.agentId, {
|
|
996
995
|
routingContext: checkResult.routingContext,
|
|
997
|
-
suppressFinish: true,
|
|
998
996
|
})
|
|
999
997
|
respondedAgents.push(checkResult.agentId)
|
|
1000
998
|
}
|
|
1001
|
-
|
|
1002
|
-
// Write final finish chunk so the client knows the turn is complete
|
|
1003
|
-
if (writer) {
|
|
1004
|
-
writer.write({ type: 'finish', finishReason: 'stop' } as ChatStreamChunk)
|
|
1005
|
-
}
|
|
1006
999
|
}
|
|
1007
1000
|
}
|
|
1008
1001
|
}
|
|
@@ -4,7 +4,6 @@ import { z } from 'zod'
|
|
|
4
4
|
import { aiGatewayChatModel } from '../ai-gateway/ai-gateway'
|
|
5
5
|
import { buildAiGatewayDirectCacheHeaders } from '../ai-gateway/cache-headers'
|
|
6
6
|
import { agentDescriptions, agentDisplayNames, routerModelId } from '../config/agent-defaults'
|
|
7
|
-
import { OPENROUTER_FAST_REASONING_MODEL_ID } from '../config/model-constants'
|
|
8
7
|
|
|
9
8
|
// ---------------------------------------------------------------------------
|
|
10
9
|
// Schemas
|
|
@@ -96,16 +95,12 @@ Format: {"done":true} or {"done":false,"agentId":"<id>","routingContext":"<1-sen
|
|
|
96
95
|
// ---------------------------------------------------------------------------
|
|
97
96
|
|
|
98
97
|
function createRouterAgent(systemPrompt: string) {
|
|
99
|
-
const modelId = routerModelId ??
|
|
100
|
-
// Router needs plain JSON output, not reasoning tokens
|
|
101
|
-
const providerOptions = routerModelId
|
|
102
|
-
? { openai: { provider: { order: ['groq'], allow_fallbacks: true } } }
|
|
103
|
-
: undefined
|
|
98
|
+
const modelId = routerModelId ?? 'openai/gpt-5.4-nano'
|
|
104
99
|
return new ToolLoopAgent({
|
|
105
100
|
id: 'workstream-router',
|
|
106
101
|
model: aiGatewayChatModel(modelId),
|
|
107
102
|
headers: buildAiGatewayDirectCacheHeaders('workstream-router'),
|
|
108
|
-
providerOptions,
|
|
103
|
+
providerOptions: { openai: { reasoningEffort: 'high' } },
|
|
109
104
|
instructions: systemPrompt,
|
|
110
105
|
maxOutputTokens: 256,
|
|
111
106
|
})
|