@ekairos/thread 1.21.88-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +363 -0
- package/dist/codex.d.ts +95 -0
- package/dist/codex.js +91 -0
- package/dist/env.d.ts +12 -0
- package/dist/env.js +62 -0
- package/dist/events.d.ts +35 -0
- package/dist/events.js +102 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.js +9 -0
- package/dist/mcp.d.ts +1 -0
- package/dist/mcp.js +1 -0
- package/dist/mirror.d.ts +41 -0
- package/dist/mirror.js +1 -0
- package/dist/oidc.d.ts +7 -0
- package/dist/oidc.js +25 -0
- package/dist/polyfills/dom-events.d.ts +1 -0
- package/dist/polyfills/dom-events.js +89 -0
- package/dist/react.d.ts +62 -0
- package/dist/react.js +101 -0
- package/dist/runtime.d.ts +17 -0
- package/dist/runtime.js +23 -0
- package/dist/runtime.step.d.ts +9 -0
- package/dist/runtime.step.js +7 -0
- package/dist/schema.d.ts +2 -0
- package/dist/schema.js +200 -0
- package/dist/steps/do-story-stream-step.d.ts +29 -0
- package/dist/steps/do-story-stream-step.js +89 -0
- package/dist/steps/do-thread-stream-step.d.ts +29 -0
- package/dist/steps/do-thread-stream-step.js +90 -0
- package/dist/steps/mirror.steps.d.ts +6 -0
- package/dist/steps/mirror.steps.js +48 -0
- package/dist/steps/reaction.steps.d.ts +43 -0
- package/dist/steps/reaction.steps.js +354 -0
- package/dist/steps/store.steps.d.ts +98 -0
- package/dist/steps/store.steps.js +512 -0
- package/dist/steps/stream.steps.d.ts +41 -0
- package/dist/steps/stream.steps.js +99 -0
- package/dist/steps/trace.steps.d.ts +37 -0
- package/dist/steps/trace.steps.js +265 -0
- package/dist/stores/instant.document-parser.d.ts +6 -0
- package/dist/stores/instant.document-parser.js +210 -0
- package/dist/stores/instant.documents.d.ts +16 -0
- package/dist/stores/instant.documents.js +152 -0
- package/dist/stores/instant.store.d.ts +78 -0
- package/dist/stores/instant.store.js +530 -0
- package/dist/story.actions.d.ts +60 -0
- package/dist/story.actions.js +120 -0
- package/dist/story.builder.d.ts +115 -0
- package/dist/story.builder.js +130 -0
- package/dist/story.config.d.ts +54 -0
- package/dist/story.config.js +125 -0
- package/dist/story.d.ts +2 -0
- package/dist/story.engine.d.ts +224 -0
- package/dist/story.engine.js +464 -0
- package/dist/story.hooks.d.ts +21 -0
- package/dist/story.hooks.js +31 -0
- package/dist/story.js +6 -0
- package/dist/story.registry.d.ts +21 -0
- package/dist/story.registry.js +30 -0
- package/dist/story.store.d.ts +107 -0
- package/dist/story.store.js +1 -0
- package/dist/story.toolcalls.d.ts +60 -0
- package/dist/story.toolcalls.js +73 -0
- package/dist/thread.builder.d.ts +118 -0
- package/dist/thread.builder.js +134 -0
- package/dist/thread.config.d.ts +15 -0
- package/dist/thread.config.js +30 -0
- package/dist/thread.d.ts +3 -0
- package/dist/thread.engine.d.ts +229 -0
- package/dist/thread.engine.js +471 -0
- package/dist/thread.events.d.ts +35 -0
- package/dist/thread.events.js +105 -0
- package/dist/thread.hooks.d.ts +21 -0
- package/dist/thread.hooks.js +31 -0
- package/dist/thread.js +7 -0
- package/dist/thread.reactor.d.ts +82 -0
- package/dist/thread.reactor.js +65 -0
- package/dist/thread.registry.d.ts +21 -0
- package/dist/thread.registry.js +30 -0
- package/dist/thread.store.d.ts +121 -0
- package/dist/thread.store.js +1 -0
- package/dist/thread.toolcalls.d.ts +60 -0
- package/dist/thread.toolcalls.js +73 -0
- package/dist/tools-to-model-tools.d.ts +19 -0
- package/dist/tools-to-model-tools.js +21 -0
- package/package.json +133 -0
package/dist/schema.js
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
import { i } from "@instantdb/core";
|
|
2
|
+
import { domain } from "@ekairos/domain";
|
|
3
|
+
export const threadDomain = domain("thread")
|
|
4
|
+
.schema({
|
|
5
|
+
entities: {
|
|
6
|
+
thread_threads: i.entity({
|
|
7
|
+
createdAt: i.date(),
|
|
8
|
+
updatedAt: i.date().optional(),
|
|
9
|
+
key: i.string().optional().indexed().unique(),
|
|
10
|
+
name: i.string().optional(),
|
|
11
|
+
status: i.string().optional().indexed(), // open | streaming | closed | failed
|
|
12
|
+
}),
|
|
13
|
+
thread_contexts: i.entity({
|
|
14
|
+
createdAt: i.date(),
|
|
15
|
+
updatedAt: i.date().optional(),
|
|
16
|
+
status: i.string().optional().indexed(), // open | streaming | closed
|
|
17
|
+
content: i.any().optional(),
|
|
18
|
+
}),
|
|
19
|
+
thread_items: i.entity({
|
|
20
|
+
channel: i.string().indexed(),
|
|
21
|
+
createdAt: i.date().indexed(),
|
|
22
|
+
type: i.string().optional().indexed(),
|
|
23
|
+
content: i.any().optional(),
|
|
24
|
+
status: i.string().optional().indexed(),
|
|
25
|
+
}),
|
|
26
|
+
thread_executions: i.entity({
|
|
27
|
+
createdAt: i.date(),
|
|
28
|
+
updatedAt: i.date().optional(),
|
|
29
|
+
status: i.string().optional().indexed(), // executing | completed | failed
|
|
30
|
+
workflowRunId: i.string().optional().indexed(),
|
|
31
|
+
}),
|
|
32
|
+
thread_steps: i.entity({
|
|
33
|
+
createdAt: i.date().indexed(),
|
|
34
|
+
updatedAt: i.date().optional(),
|
|
35
|
+
status: i.string().optional().indexed(), // running | completed | failed
|
|
36
|
+
iteration: i.number().indexed(),
|
|
37
|
+
// Deterministic ids generated in step runtime; stored for convenience/debugging
|
|
38
|
+
executionId: i.string().indexed(),
|
|
39
|
+
triggerEventId: i.string().indexed().optional(),
|
|
40
|
+
reactionEventId: i.string().indexed().optional(),
|
|
41
|
+
eventId: i.string().indexed(),
|
|
42
|
+
toolCalls: i.any().optional(),
|
|
43
|
+
toolExecutionResults: i.any().optional(),
|
|
44
|
+
continueLoop: i.boolean().optional(),
|
|
45
|
+
errorText: i.string().optional(),
|
|
46
|
+
}),
|
|
47
|
+
// Normalized parts (parts-first persistence). These hang off the step that produced them.
|
|
48
|
+
// We still keep `thread_items.content.parts` for back-compat.
|
|
49
|
+
thread_parts: i.entity({
|
|
50
|
+
key: i.string().unique().indexed(), // `${stepId}:${idx}`
|
|
51
|
+
stepId: i.string().indexed(),
|
|
52
|
+
idx: i.number().indexed(),
|
|
53
|
+
type: i.string().optional().indexed(),
|
|
54
|
+
part: i.any().optional(),
|
|
55
|
+
updatedAt: i.date().optional(),
|
|
56
|
+
}),
|
|
57
|
+
thread_trace_events: i.entity({
|
|
58
|
+
key: i.string().unique().indexed(), // `${workflowRunId}:${eventId}`
|
|
59
|
+
workflowRunId: i.string().indexed(),
|
|
60
|
+
seq: i.number().indexed(),
|
|
61
|
+
eventId: i.string().indexed(),
|
|
62
|
+
eventKind: i.string().indexed(),
|
|
63
|
+
eventAt: i.date().optional(),
|
|
64
|
+
ingestedAt: i.date().optional(),
|
|
65
|
+
orgId: i.string().optional().indexed(),
|
|
66
|
+
projectId: i.string().optional().indexed(),
|
|
67
|
+
contextKey: i.string().optional().indexed(),
|
|
68
|
+
contextId: i.string().optional().indexed(),
|
|
69
|
+
executionId: i.string().optional().indexed(),
|
|
70
|
+
stepId: i.string().optional().indexed(),
|
|
71
|
+
contextEventId: i.string().optional().indexed(),
|
|
72
|
+
toolCallId: i.string().optional().indexed(),
|
|
73
|
+
partKey: i.string().optional().indexed(),
|
|
74
|
+
partIdx: i.number().optional().indexed(),
|
|
75
|
+
spanId: i.string().optional().indexed(),
|
|
76
|
+
parentSpanId: i.string().optional().indexed(),
|
|
77
|
+
isDeleted: i.boolean().optional(),
|
|
78
|
+
aiProvider: i.string().optional().indexed(),
|
|
79
|
+
aiModel: i.string().optional().indexed(),
|
|
80
|
+
promptTokens: i.number().optional(),
|
|
81
|
+
promptTokensCached: i.number().optional(),
|
|
82
|
+
promptTokensUncached: i.number().optional(),
|
|
83
|
+
completionTokens: i.number().optional(),
|
|
84
|
+
totalTokens: i.number().optional(),
|
|
85
|
+
latencyMs: i.number().optional(),
|
|
86
|
+
cacheCostUsd: i.number().optional(),
|
|
87
|
+
computeCostUsd: i.number().optional(),
|
|
88
|
+
costUsd: i.number().optional(),
|
|
89
|
+
payload: i.any().optional(),
|
|
90
|
+
}),
|
|
91
|
+
thread_trace_runs: i.entity({
|
|
92
|
+
workflowRunId: i.string().unique().indexed(),
|
|
93
|
+
orgId: i.string().optional().indexed(),
|
|
94
|
+
projectId: i.string().optional().indexed(),
|
|
95
|
+
firstEventAt: i.date().optional().indexed(),
|
|
96
|
+
lastEventAt: i.date().optional().indexed(),
|
|
97
|
+
lastIngestedAt: i.date().optional().indexed(),
|
|
98
|
+
eventsCount: i.number().optional(),
|
|
99
|
+
status: i.string().optional().indexed(),
|
|
100
|
+
payload: i.any().optional(),
|
|
101
|
+
}),
|
|
102
|
+
thread_trace_spans: i.entity({
|
|
103
|
+
spanId: i.string().unique().indexed(),
|
|
104
|
+
parentSpanId: i.string().optional().indexed(),
|
|
105
|
+
workflowRunId: i.string().indexed(),
|
|
106
|
+
executionId: i.string().optional().indexed(),
|
|
107
|
+
stepId: i.string().optional().indexed(),
|
|
108
|
+
kind: i.string().optional().indexed(),
|
|
109
|
+
name: i.string().optional().indexed(),
|
|
110
|
+
status: i.string().optional().indexed(),
|
|
111
|
+
startedAt: i.date().optional().indexed(),
|
|
112
|
+
endedAt: i.date().optional().indexed(),
|
|
113
|
+
durationMs: i.number().optional(),
|
|
114
|
+
payload: i.any().optional(),
|
|
115
|
+
}),
|
|
116
|
+
// Documents (moved from schema-document.ts)
|
|
117
|
+
document_documents: i.entity({
|
|
118
|
+
name: i.string().optional().indexed(),
|
|
119
|
+
mimeType: i.string().optional(),
|
|
120
|
+
size: i.number().optional(),
|
|
121
|
+
ownerId: i.string().optional().indexed(),
|
|
122
|
+
orgId: i.string().optional().indexed(),
|
|
123
|
+
createdAt: i.date().optional().indexed(),
|
|
124
|
+
processedAt: i.date().optional().indexed(),
|
|
125
|
+
updatedAt: i.date().optional(),
|
|
126
|
+
lastJobId: i.string().optional(),
|
|
127
|
+
content: i.json().optional(), // Store parsed content (pages, text, etc.)
|
|
128
|
+
}),
|
|
129
|
+
},
|
|
130
|
+
links: {
|
|
131
|
+
// Contexts belong to a thread
|
|
132
|
+
threadContextsThread: {
|
|
133
|
+
forward: { on: "thread_contexts", has: "one", label: "thread" },
|
|
134
|
+
reverse: { on: "thread_threads", has: "many", label: "contexts" },
|
|
135
|
+
},
|
|
136
|
+
// Items belong to a thread (conversation timeline)
|
|
137
|
+
threadItemsThread: {
|
|
138
|
+
forward: { on: "thread_items", has: "one", label: "thread" },
|
|
139
|
+
reverse: { on: "thread_threads", has: "many", label: "items" },
|
|
140
|
+
},
|
|
141
|
+
threadItemsContext: {
|
|
142
|
+
forward: { on: "thread_items", has: "one", label: "context" },
|
|
143
|
+
reverse: { on: "thread_contexts", has: "many", label: "items" },
|
|
144
|
+
},
|
|
145
|
+
// Executions belong to a context
|
|
146
|
+
threadExecutionsContext: {
|
|
147
|
+
forward: { on: "thread_executions", has: "one", label: "context" },
|
|
148
|
+
reverse: { on: "thread_contexts", has: "many", label: "executions" },
|
|
149
|
+
},
|
|
150
|
+
// Executions also belong to a thread
|
|
151
|
+
threadExecutionsThread: {
|
|
152
|
+
forward: { on: "thread_executions", has: "one", label: "thread" },
|
|
153
|
+
reverse: { on: "thread_threads", has: "many", label: "executions" },
|
|
154
|
+
},
|
|
155
|
+
// Current execution pointer on a context
|
|
156
|
+
threadContextsCurrentExecution: {
|
|
157
|
+
forward: { on: "thread_contexts", has: "one", label: "currentExecution" },
|
|
158
|
+
reverse: { on: "thread_executions", has: "one", label: "currentOf" },
|
|
159
|
+
},
|
|
160
|
+
// Link execution to its trigger event
|
|
161
|
+
threadExecutionsTrigger: {
|
|
162
|
+
forward: { on: "thread_executions", has: "one", label: "trigger" },
|
|
163
|
+
reverse: { on: "thread_items", has: "many", label: "executionsAsTrigger" },
|
|
164
|
+
},
|
|
165
|
+
// Link execution to its reaction event
|
|
166
|
+
threadExecutionsReaction: {
|
|
167
|
+
forward: { on: "thread_executions", has: "one", label: "reaction" },
|
|
168
|
+
reverse: { on: "thread_items", has: "many", label: "executionsAsReaction" },
|
|
169
|
+
},
|
|
170
|
+
// Steps belong to an execution
|
|
171
|
+
threadStepsExecution: {
|
|
172
|
+
forward: { on: "thread_steps", has: "one", label: "execution" },
|
|
173
|
+
reverse: { on: "thread_executions", has: "many", label: "steps" },
|
|
174
|
+
},
|
|
175
|
+
// Iteration events belong to an execution (enables: event -> execution -> steps)
|
|
176
|
+
threadExecutionItems: {
|
|
177
|
+
forward: { on: "thread_items", has: "one", label: "execution" },
|
|
178
|
+
reverse: { on: "thread_executions", has: "many", label: "items" },
|
|
179
|
+
},
|
|
180
|
+
// Parts belong to a step
|
|
181
|
+
threadPartsStep: {
|
|
182
|
+
forward: { on: "thread_parts", has: "one", label: "step" },
|
|
183
|
+
reverse: { on: "thread_steps", has: "many", label: "parts" },
|
|
184
|
+
},
|
|
185
|
+
// Documents (moved from schema-document.ts)
|
|
186
|
+
documentFile: {
|
|
187
|
+
forward: {
|
|
188
|
+
on: "document_documents",
|
|
189
|
+
has: "one",
|
|
190
|
+
label: "file",
|
|
191
|
+
},
|
|
192
|
+
reverse: {
|
|
193
|
+
on: "$files",
|
|
194
|
+
has: "one",
|
|
195
|
+
label: "document",
|
|
196
|
+
},
|
|
197
|
+
},
|
|
198
|
+
},
|
|
199
|
+
rooms: {},
|
|
200
|
+
});
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import type { ModelMessage } from "ai";
|
|
2
|
+
import type { ContextEvent } from "../story.store.js";
|
|
3
|
+
import type { SerializableToolForModel } from "../tools-to-model-tools.js";
|
|
4
|
+
/**
|
|
5
|
+
* Runs a single LLM streaming step as a Workflow step.
|
|
6
|
+
*
|
|
7
|
+
* - Performs the provider/network call in a step (Node/runtime allowed).
|
|
8
|
+
* - Pipes AI SDK UI message chunks into the workflow-owned writable stream.
|
|
9
|
+
* - Returns the assistant event + extracted tool calls for the workflow loop.
|
|
10
|
+
*/
|
|
11
|
+
export declare function doStoryStreamStep(params: {
|
|
12
|
+
model: any;
|
|
13
|
+
system: string;
|
|
14
|
+
messages: ModelMessage[];
|
|
15
|
+
tools: Record<string, SerializableToolForModel>;
|
|
16
|
+
eventId: string;
|
|
17
|
+
maxSteps: number;
|
|
18
|
+
/**
|
|
19
|
+
* Whether to emit a `start` chunk for this streamed assistant message.
|
|
20
|
+
*
|
|
21
|
+
* IMPORTANT:
|
|
22
|
+
* Our story loop may call the model multiple times within a single "turn" while continuing
|
|
23
|
+
* to append to the same `eventId`. In that case, `start` must only be sent once.
|
|
24
|
+
*/
|
|
25
|
+
sendStart?: boolean;
|
|
26
|
+
}): Promise<{
|
|
27
|
+
assistantEvent: ContextEvent;
|
|
28
|
+
toolCalls: import("../story.toolcalls.js").ToolCall[];
|
|
29
|
+
}>;
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Runs a single LLM streaming step as a Workflow step.
|
|
3
|
+
*
|
|
4
|
+
* - Performs the provider/network call in a step (Node/runtime allowed).
|
|
5
|
+
* - Pipes AI SDK UI message chunks into the workflow-owned writable stream.
|
|
6
|
+
* - Returns the assistant event + extracted tool calls for the workflow loop.
|
|
7
|
+
*/
|
|
8
|
+
export async function doStoryStreamStep(params) {
|
|
9
|
+
"use step";
|
|
10
|
+
const { getWritable } = await import("workflow");
|
|
11
|
+
const writable = getWritable();
|
|
12
|
+
const { jsonSchema, gateway, smoothStream, stepCountIs, streamText } = await import("ai");
|
|
13
|
+
const { extractToolCallsFromParts } = await import("@ekairos/thread");
|
|
14
|
+
// Match DurableAgent's model init behavior:
|
|
15
|
+
// - string => AI Gateway model id, resolved via `gateway(...)` in the step runtime
|
|
16
|
+
// - function => model factory (should be a `"use step"` function for workflow serialization)
|
|
17
|
+
const resolvedModel = typeof params.model === "string"
|
|
18
|
+
? gateway(params.model)
|
|
19
|
+
: typeof params.model === "function"
|
|
20
|
+
? await params.model()
|
|
21
|
+
: params.model;
|
|
22
|
+
// IMPORTANT:
|
|
23
|
+
// `streamText` expects tools in the AI SDK ToolSet shape, where `inputSchema` is a Schema-like value.
|
|
24
|
+
// We pass plain JSON schema objects across the step boundary (serializable), then wrap them here with
|
|
25
|
+
// `jsonSchema(...)` so the AI SDK does not attempt Zod conversion at runtime.
|
|
26
|
+
const toolsForStreamText = {};
|
|
27
|
+
for (const [name, t] of Object.entries(params.tools)) {
|
|
28
|
+
toolsForStreamText[name] = {
|
|
29
|
+
description: t?.description,
|
|
30
|
+
inputSchema: jsonSchema(t.inputSchema),
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
const result = streamText({
|
|
34
|
+
model: resolvedModel,
|
|
35
|
+
system: params.system,
|
|
36
|
+
messages: params.messages,
|
|
37
|
+
tools: toolsForStreamText,
|
|
38
|
+
toolChoice: "required",
|
|
39
|
+
stopWhen: stepCountIs(params.maxSteps),
|
|
40
|
+
experimental_transform: smoothStream({ delayInMs: 30, chunking: "word" }),
|
|
41
|
+
});
|
|
42
|
+
// Ensure the underlying stream is consumed (AI SDK requirement)
|
|
43
|
+
result.consumeStream();
|
|
44
|
+
let resolveFinish;
|
|
45
|
+
let rejectFinish;
|
|
46
|
+
const finishPromise = new Promise((resolve, reject) => {
|
|
47
|
+
resolveFinish = resolve;
|
|
48
|
+
rejectFinish = reject;
|
|
49
|
+
});
|
|
50
|
+
const uiStream = result
|
|
51
|
+
.toUIMessageStream({
|
|
52
|
+
// Emit `start` only when the engine says so (typically once per turn).
|
|
53
|
+
sendStart: Boolean(params.sendStart),
|
|
54
|
+
generateMessageId: () => params.eventId,
|
|
55
|
+
messageMetadata() {
|
|
56
|
+
return { eventId: params.eventId };
|
|
57
|
+
},
|
|
58
|
+
onFinish: ({ messages }) => {
|
|
59
|
+
const lastMessage = messages[messages.length - 1];
|
|
60
|
+
const event = {
|
|
61
|
+
id: params.eventId,
|
|
62
|
+
type: "assistant.message",
|
|
63
|
+
channel: "web",
|
|
64
|
+
createdAt: new Date().toISOString(),
|
|
65
|
+
content: { parts: lastMessage?.parts ?? [] },
|
|
66
|
+
};
|
|
67
|
+
resolveFinish(event);
|
|
68
|
+
},
|
|
69
|
+
onError: (e) => {
|
|
70
|
+
rejectFinish(e);
|
|
71
|
+
return e instanceof Error ? e.message : String(e);
|
|
72
|
+
},
|
|
73
|
+
})
|
|
74
|
+
// Filter out per-step finish boundary. Workflow will emit a single finish.
|
|
75
|
+
.pipeThrough(new TransformStream({
|
|
76
|
+
transform(chunk, controller) {
|
|
77
|
+
if (chunk.type === "finish")
|
|
78
|
+
return;
|
|
79
|
+
controller.enqueue(chunk);
|
|
80
|
+
},
|
|
81
|
+
}));
|
|
82
|
+
await uiStream.pipeTo(writable, { preventClose: true });
|
|
83
|
+
const assistantEvent = await finishPromise;
|
|
84
|
+
const toolCalls = extractToolCallsFromParts(assistantEvent?.content?.parts);
|
|
85
|
+
return {
|
|
86
|
+
assistantEvent,
|
|
87
|
+
toolCalls,
|
|
88
|
+
};
|
|
89
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import type { ModelMessage } from "ai";
|
|
2
|
+
import type { ThreadItem } from "../thread.store.js";
|
|
3
|
+
import type { SerializableToolForModel } from "../tools-to-model-tools.js";
|
|
4
|
+
/**
|
|
5
|
+
* Runs a single LLM streaming step as a Workflow step.
|
|
6
|
+
*
|
|
7
|
+
* - Performs the provider/network call in a step (Node/runtime allowed).
|
|
8
|
+
* - Pipes AI SDK UI message chunks into the workflow-owned writable stream.
|
|
9
|
+
* - Returns the assistant event + extracted tool calls for the workflow loop.
|
|
10
|
+
*/
|
|
11
|
+
export declare function doThreadStreamStep(params: {
|
|
12
|
+
model: any;
|
|
13
|
+
system: string;
|
|
14
|
+
messages: ModelMessage[];
|
|
15
|
+
tools: Record<string, SerializableToolForModel>;
|
|
16
|
+
eventId: string;
|
|
17
|
+
maxSteps: number;
|
|
18
|
+
/**
|
|
19
|
+
* Whether to emit a `start` chunk for this streamed assistant message.
|
|
20
|
+
*
|
|
21
|
+
* IMPORTANT:
|
|
22
|
+
* Our thread loop may call the model multiple times within a single "turn" while continuing
|
|
23
|
+
* to append to the same `eventId`. In that case, `start` must only be sent once.
|
|
24
|
+
*/
|
|
25
|
+
sendStart?: boolean;
|
|
26
|
+
}): Promise<{
|
|
27
|
+
assistantEvent: ThreadItem;
|
|
28
|
+
toolCalls: import("../thread.toolcalls.js").ToolCall[];
|
|
29
|
+
}>;
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import { OUTPUT_TEXT_ITEM_TYPE } from "../thread.events.js";
|
|
2
|
+
/**
|
|
3
|
+
* Runs a single LLM streaming step as a Workflow step.
|
|
4
|
+
*
|
|
5
|
+
* - Performs the provider/network call in a step (Node/runtime allowed).
|
|
6
|
+
* - Pipes AI SDK UI message chunks into the workflow-owned writable stream.
|
|
7
|
+
* - Returns the assistant event + extracted tool calls for the workflow loop.
|
|
8
|
+
*/
|
|
9
|
+
export async function doThreadStreamStep(params) {
|
|
10
|
+
"use step";
|
|
11
|
+
const { getWritable } = await import("workflow");
|
|
12
|
+
const writable = getWritable();
|
|
13
|
+
const { jsonSchema, gateway, smoothStream, stepCountIs, streamText } = await import("ai");
|
|
14
|
+
const { extractToolCallsFromParts } = await import("@ekairos/thread");
|
|
15
|
+
// Match DurableAgent's model init behavior:
|
|
16
|
+
// - string => AI Gateway model id, resolved via `gateway(...)` in the step runtime
|
|
17
|
+
// - function => model factory (should be a `"use step"` function for workflow serialization)
|
|
18
|
+
const resolvedModel = typeof params.model === "string"
|
|
19
|
+
? gateway(params.model)
|
|
20
|
+
: typeof params.model === "function"
|
|
21
|
+
? await params.model()
|
|
22
|
+
: params.model;
|
|
23
|
+
// IMPORTANT:
|
|
24
|
+
// `streamText` expects tools in the AI SDK ToolSet shape, where `inputSchema` is a Schema-like value.
|
|
25
|
+
// We pass plain JSON schema objects across the step boundary (serializable), then wrap them here with
|
|
26
|
+
// `jsonSchema(...)` so the AI SDK does not attempt Zod conversion at runtime.
|
|
27
|
+
const toolsForStreamText = {};
|
|
28
|
+
for (const [name, t] of Object.entries(params.tools)) {
|
|
29
|
+
toolsForStreamText[name] = {
|
|
30
|
+
description: t?.description,
|
|
31
|
+
inputSchema: jsonSchema(t.inputSchema),
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
const result = streamText({
|
|
35
|
+
model: resolvedModel,
|
|
36
|
+
system: params.system,
|
|
37
|
+
messages: params.messages,
|
|
38
|
+
tools: toolsForStreamText,
|
|
39
|
+
toolChoice: "required",
|
|
40
|
+
stopWhen: stepCountIs(params.maxSteps),
|
|
41
|
+
experimental_transform: smoothStream({ delayInMs: 30, chunking: "word" }),
|
|
42
|
+
});
|
|
43
|
+
// Ensure the underlying stream is consumed (AI SDK requirement)
|
|
44
|
+
result.consumeStream();
|
|
45
|
+
let resolveFinish;
|
|
46
|
+
let rejectFinish;
|
|
47
|
+
const finishPromise = new Promise((resolve, reject) => {
|
|
48
|
+
resolveFinish = resolve;
|
|
49
|
+
rejectFinish = reject;
|
|
50
|
+
});
|
|
51
|
+
const uiStream = result
|
|
52
|
+
.toUIMessageStream({
|
|
53
|
+
// Emit `start` only when the engine says so (typically once per turn).
|
|
54
|
+
sendStart: Boolean(params.sendStart),
|
|
55
|
+
generateMessageId: () => params.eventId,
|
|
56
|
+
messageMetadata() {
|
|
57
|
+
return { eventId: params.eventId };
|
|
58
|
+
},
|
|
59
|
+
onFinish: ({ messages }) => {
|
|
60
|
+
const lastMessage = messages[messages.length - 1];
|
|
61
|
+
const event = {
|
|
62
|
+
id: params.eventId,
|
|
63
|
+
type: OUTPUT_TEXT_ITEM_TYPE,
|
|
64
|
+
channel: "web",
|
|
65
|
+
createdAt: new Date().toISOString(),
|
|
66
|
+
content: { parts: lastMessage?.parts ?? [] },
|
|
67
|
+
};
|
|
68
|
+
resolveFinish(event);
|
|
69
|
+
},
|
|
70
|
+
onError: (e) => {
|
|
71
|
+
rejectFinish(e);
|
|
72
|
+
return e instanceof Error ? e.message : String(e);
|
|
73
|
+
},
|
|
74
|
+
})
|
|
75
|
+
// Filter out per-step finish boundary. Workflow will emit a single finish.
|
|
76
|
+
.pipeThrough(new TransformStream({
|
|
77
|
+
transform(chunk, controller) {
|
|
78
|
+
if (chunk.type === "finish")
|
|
79
|
+
return;
|
|
80
|
+
controller.enqueue(chunk);
|
|
81
|
+
},
|
|
82
|
+
}));
|
|
83
|
+
await uiStream.pipeTo(writable, { preventClose: true });
|
|
84
|
+
const assistantEvent = await finishPromise;
|
|
85
|
+
const toolCalls = extractToolCallsFromParts(assistantEvent?.content?.parts);
|
|
86
|
+
return {
|
|
87
|
+
assistantEvent,
|
|
88
|
+
toolCalls,
|
|
89
|
+
};
|
|
90
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
function requireOrgId(env) {
|
|
2
|
+
const orgId = env?.orgId;
|
|
3
|
+
if (typeof orgId !== "string" || !orgId) {
|
|
4
|
+
throw new Error("[thread/mirror] Missing env.orgId");
|
|
5
|
+
}
|
|
6
|
+
return orgId;
|
|
7
|
+
}
|
|
8
|
+
function requireBaseUrl() {
|
|
9
|
+
const baseUrl = process.env.EKAIROS_CORE_BASE_URL ||
|
|
10
|
+
process.env.EKAIROS_MIRROR_BASE_URL ||
|
|
11
|
+
process.env.EKAIROS_BASE_URL;
|
|
12
|
+
if (!baseUrl) {
|
|
13
|
+
throw new Error("[thread/mirror] Missing EKAIROS_CORE_BASE_URL (or EKAIROS_MIRROR_BASE_URL)");
|
|
14
|
+
}
|
|
15
|
+
return baseUrl.replace(/\/$/, "");
|
|
16
|
+
}
|
|
17
|
+
function requireToken() {
|
|
18
|
+
// Preferred: Clerk org API key (opaque token) for ekairos-core.
|
|
19
|
+
const apiKey = process.env.EKAIROS_CLERK_API_KEY;
|
|
20
|
+
if (apiKey)
|
|
21
|
+
return apiKey;
|
|
22
|
+
// Fallback: shared secret token.
|
|
23
|
+
const token = process.env.EKAIROS_THREAD_MIRROR_TOKEN;
|
|
24
|
+
if (token)
|
|
25
|
+
return token;
|
|
26
|
+
throw new Error("[thread/mirror] Missing EKAIROS_CLERK_API_KEY (or EKAIROS_THREAD_MIRROR_TOKEN fallback)");
|
|
27
|
+
}
|
|
28
|
+
export async function mirrorThreadWrites(params) {
|
|
29
|
+
"use step";
|
|
30
|
+
if (!params.writes?.length)
|
|
31
|
+
return;
|
|
32
|
+
const orgId = requireOrgId(params.env);
|
|
33
|
+
const baseUrl = requireBaseUrl();
|
|
34
|
+
const token = requireToken();
|
|
35
|
+
const body = { orgId, writes: params.writes };
|
|
36
|
+
const res = await fetch(`${baseUrl}/api/thread`, {
|
|
37
|
+
method: "POST",
|
|
38
|
+
headers: {
|
|
39
|
+
"content-type": "application/json",
|
|
40
|
+
authorization: `Bearer ${token}`,
|
|
41
|
+
},
|
|
42
|
+
body: JSON.stringify(body),
|
|
43
|
+
});
|
|
44
|
+
if (!res.ok) {
|
|
45
|
+
const text = await res.text().catch(() => "");
|
|
46
|
+
throw new Error(`[thread/mirror] ekairos-core write failed (${res.status}): ${text}`);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import type { ModelMessage, UIMessageChunk } from "ai";
|
|
2
|
+
import type { ThreadEnvironment } from "../thread.config.js";
|
|
3
|
+
import type { ThreadItem, ContextIdentifier } from "../thread.store.js";
|
|
4
|
+
import type { SerializableToolForModel } from "../tools-to-model-tools.js";
|
|
5
|
+
/**
|
|
6
|
+
* Executes a full "reaction" inside a single workflow step:
|
|
7
|
+
* - load events from store
|
|
8
|
+
* - convert events to model messages
|
|
9
|
+
* - run the streaming model call and emit chunks
|
|
10
|
+
* - extract tool calls from the resulting assistant event
|
|
11
|
+
*/
|
|
12
|
+
export declare function executeReaction(params: {
|
|
13
|
+
env: ThreadEnvironment;
|
|
14
|
+
contextIdentifier: ContextIdentifier;
|
|
15
|
+
model: any;
|
|
16
|
+
system: string;
|
|
17
|
+
tools: Record<string, SerializableToolForModel>;
|
|
18
|
+
eventId: string;
|
|
19
|
+
iteration?: number;
|
|
20
|
+
maxSteps: number;
|
|
21
|
+
sendStart?: boolean;
|
|
22
|
+
silent?: boolean;
|
|
23
|
+
writable?: WritableStream<UIMessageChunk>;
|
|
24
|
+
executionId?: string;
|
|
25
|
+
contextId?: string;
|
|
26
|
+
stepId?: string;
|
|
27
|
+
}): Promise<{
|
|
28
|
+
assistantEvent: ThreadItem;
|
|
29
|
+
toolCalls: any[];
|
|
30
|
+
messagesForModel: ModelMessage[];
|
|
31
|
+
llm?: {
|
|
32
|
+
provider?: string;
|
|
33
|
+
model?: string;
|
|
34
|
+
promptTokens?: number;
|
|
35
|
+
promptTokensCached?: number;
|
|
36
|
+
promptTokensUncached?: number;
|
|
37
|
+
completionTokens?: number;
|
|
38
|
+
totalTokens?: number;
|
|
39
|
+
latencyMs?: number;
|
|
40
|
+
rawUsage?: any;
|
|
41
|
+
rawProviderMetadata?: any;
|
|
42
|
+
};
|
|
43
|
+
}>;
|