@ekairos/thread 1.21.88-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +363 -0
  2. package/dist/codex.d.ts +95 -0
  3. package/dist/codex.js +91 -0
  4. package/dist/env.d.ts +12 -0
  5. package/dist/env.js +62 -0
  6. package/dist/events.d.ts +35 -0
  7. package/dist/events.js +102 -0
  8. package/dist/index.d.ts +9 -0
  9. package/dist/index.js +9 -0
  10. package/dist/mcp.d.ts +1 -0
  11. package/dist/mcp.js +1 -0
  12. package/dist/mirror.d.ts +41 -0
  13. package/dist/mirror.js +1 -0
  14. package/dist/oidc.d.ts +7 -0
  15. package/dist/oidc.js +25 -0
  16. package/dist/polyfills/dom-events.d.ts +1 -0
  17. package/dist/polyfills/dom-events.js +89 -0
  18. package/dist/react.d.ts +62 -0
  19. package/dist/react.js +101 -0
  20. package/dist/runtime.d.ts +17 -0
  21. package/dist/runtime.js +23 -0
  22. package/dist/runtime.step.d.ts +9 -0
  23. package/dist/runtime.step.js +7 -0
  24. package/dist/schema.d.ts +2 -0
  25. package/dist/schema.js +200 -0
  26. package/dist/steps/do-story-stream-step.d.ts +29 -0
  27. package/dist/steps/do-story-stream-step.js +89 -0
  28. package/dist/steps/do-thread-stream-step.d.ts +29 -0
  29. package/dist/steps/do-thread-stream-step.js +90 -0
  30. package/dist/steps/mirror.steps.d.ts +6 -0
  31. package/dist/steps/mirror.steps.js +48 -0
  32. package/dist/steps/reaction.steps.d.ts +43 -0
  33. package/dist/steps/reaction.steps.js +354 -0
  34. package/dist/steps/store.steps.d.ts +98 -0
  35. package/dist/steps/store.steps.js +512 -0
  36. package/dist/steps/stream.steps.d.ts +41 -0
  37. package/dist/steps/stream.steps.js +99 -0
  38. package/dist/steps/trace.steps.d.ts +37 -0
  39. package/dist/steps/trace.steps.js +265 -0
  40. package/dist/stores/instant.document-parser.d.ts +6 -0
  41. package/dist/stores/instant.document-parser.js +210 -0
  42. package/dist/stores/instant.documents.d.ts +16 -0
  43. package/dist/stores/instant.documents.js +152 -0
  44. package/dist/stores/instant.store.d.ts +78 -0
  45. package/dist/stores/instant.store.js +530 -0
  46. package/dist/story.actions.d.ts +60 -0
  47. package/dist/story.actions.js +120 -0
  48. package/dist/story.builder.d.ts +115 -0
  49. package/dist/story.builder.js +130 -0
  50. package/dist/story.config.d.ts +54 -0
  51. package/dist/story.config.js +125 -0
  52. package/dist/story.d.ts +2 -0
  53. package/dist/story.engine.d.ts +224 -0
  54. package/dist/story.engine.js +464 -0
  55. package/dist/story.hooks.d.ts +21 -0
  56. package/dist/story.hooks.js +31 -0
  57. package/dist/story.js +6 -0
  58. package/dist/story.registry.d.ts +21 -0
  59. package/dist/story.registry.js +30 -0
  60. package/dist/story.store.d.ts +107 -0
  61. package/dist/story.store.js +1 -0
  62. package/dist/story.toolcalls.d.ts +60 -0
  63. package/dist/story.toolcalls.js +73 -0
  64. package/dist/thread.builder.d.ts +118 -0
  65. package/dist/thread.builder.js +134 -0
  66. package/dist/thread.config.d.ts +15 -0
  67. package/dist/thread.config.js +30 -0
  68. package/dist/thread.d.ts +3 -0
  69. package/dist/thread.engine.d.ts +229 -0
  70. package/dist/thread.engine.js +471 -0
  71. package/dist/thread.events.d.ts +35 -0
  72. package/dist/thread.events.js +105 -0
  73. package/dist/thread.hooks.d.ts +21 -0
  74. package/dist/thread.hooks.js +31 -0
  75. package/dist/thread.js +7 -0
  76. package/dist/thread.reactor.d.ts +82 -0
  77. package/dist/thread.reactor.js +65 -0
  78. package/dist/thread.registry.d.ts +21 -0
  79. package/dist/thread.registry.js +30 -0
  80. package/dist/thread.store.d.ts +121 -0
  81. package/dist/thread.store.js +1 -0
  82. package/dist/thread.toolcalls.d.ts +60 -0
  83. package/dist/thread.toolcalls.js +73 -0
  84. package/dist/tools-to-model-tools.d.ts +19 -0
  85. package/dist/tools-to-model-tools.js +21 -0
  86. package/package.json +133 -0
@@ -0,0 +1,354 @@
1
+ import { OUTPUT_TEXT_ITEM_TYPE } from "../thread.events.js";
2
+ import { writeThreadTraceEvents } from "./trace.steps.js";
3
+ async function readWorkflowMetadata() {
4
+ try {
5
+ const { getWorkflowMetadata } = await import("workflow");
6
+ return getWorkflowMetadata?.() ?? null;
7
+ }
8
+ catch {
9
+ return null;
10
+ }
11
+ }
12
+ async function resolveWorkflowRunId(env, executionId) {
13
+ const meta = await readWorkflowMetadata();
14
+ let runId = meta && meta.workflowRunId !== undefined && meta.workflowRunId !== null
15
+ ? String(meta.workflowRunId)
16
+ : "";
17
+ if (!runId) {
18
+ const envRunId = env?.workflowRunId;
19
+ if (typeof envRunId === "string" && envRunId.trim()) {
20
+ runId = envRunId.trim();
21
+ }
22
+ }
23
+ if (!runId && executionId) {
24
+ try {
25
+ const { getThreadRuntime } = await import("@ekairos/thread/runtime");
26
+ const runtime = await getThreadRuntime(env);
27
+ const db = runtime?.db;
28
+ if (db) {
29
+ const q = await db.query({
30
+ thread_executions: {
31
+ $: { where: { id: String(executionId) }, limit: 1 },
32
+ },
33
+ });
34
+ const row = q?.thread_executions?.[0];
35
+ if (row?.workflowRunId) {
36
+ runId = String(row.workflowRunId);
37
+ }
38
+ }
39
+ }
40
+ catch {
41
+ // ignore
42
+ }
43
+ }
44
+ return runId || undefined;
45
+ }
46
+ function safeErrorJson(error) {
47
+ const seen = new WeakSet();
48
+ const redactKey = (k) => /token|authorization|cookie|secret|api[_-]?key|password/i.test(k);
49
+ const err = error;
50
+ const payload = {
51
+ name: err?.name,
52
+ message: err?.message,
53
+ status: err?.status,
54
+ body: err?.body,
55
+ data: err?.data,
56
+ stack: err?.stack,
57
+ };
58
+ try {
59
+ return JSON.stringify(payload, (k, v) => {
60
+ if (redactKey(k))
61
+ return "[redacted]";
62
+ if (typeof v === "string" && v.length > 5000)
63
+ return "[truncated-string]";
64
+ if (typeof v === "object" && v !== null) {
65
+ if (seen.has(v))
66
+ return "[circular]";
67
+ seen.add(v);
68
+ }
69
+ return v;
70
+ });
71
+ }
72
+ catch {
73
+ return JSON.stringify({ message: String(err?.message ?? "error") });
74
+ }
75
+ }
76
+ /**
77
+ * Executes a full "reaction" inside a single workflow step:
78
+ * - load events from store
79
+ * - convert events to model messages
80
+ * - run the streaming model call and emit chunks
81
+ * - extract tool calls from the resulting assistant event
82
+ */
83
+ export async function executeReaction(params) {
84
+ "use step";
85
+ const { getThreadRuntime } = await import("@ekairos/thread/runtime");
86
+ const { store } = await getThreadRuntime(params.env);
87
+ let events;
88
+ try {
89
+ events = await store.getItems(params.contextIdentifier);
90
+ }
91
+ catch (error) {
92
+ console.error("[ekairos/story] reaction.step store.getItems failed");
93
+ throw error;
94
+ }
95
+ let messagesForModel;
96
+ try {
97
+ messagesForModel = (await store.itemsToModelMessages(events));
98
+ }
99
+ catch (error) {
100
+ console.error("[ekairos/story] reaction.step store.itemsToModelMessages failed", safeErrorJson(error));
101
+ throw error;
102
+ }
103
+ const writable = params.silent || !params.writable
104
+ ? new WritableStream({ write() { } })
105
+ : params.writable;
106
+ const { jsonSchema, gateway, smoothStream, stepCountIs, streamText } = await import("ai");
107
+ const { extractToolCallsFromParts } = await import("@ekairos/thread");
108
+ const isMockModelConfig = (value) => {
109
+ if (!value || typeof value !== "object")
110
+ return false;
111
+ if ("specificationVersion" in value)
112
+ return false;
113
+ if (value.source === "mock")
114
+ return true;
115
+ return typeof value.provider === "string" && typeof value.modelId === "string";
116
+ };
117
+ const buildMockModel = async (config) => {
118
+ const toolName = typeof config.toolName === "string" && config.toolName.trim()
119
+ ? config.toolName.trim()
120
+ : Object.keys(params.tools || {})[0] || "tool";
121
+ const provider = config.provider ?? "mock-provider";
122
+ const modelId = config.modelId ?? "mock-model-id";
123
+ return {
124
+ specificationVersion: "v2",
125
+ provider,
126
+ modelId,
127
+ supportedUrls: {},
128
+ doGenerate: async () => ({
129
+ content: [
130
+ {
131
+ type: "tool-call",
132
+ toolCallId: "mock-tool-call",
133
+ toolName,
134
+ input: JSON.stringify({ instruction: "" }),
135
+ },
136
+ ],
137
+ finishReason: "tool-calls",
138
+ usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 },
139
+ warnings: [],
140
+ }),
141
+ doStream: async () => {
142
+ const toolCallId = `mock-tool-${Date.now()}`;
143
+ const stream = new ReadableStream({
144
+ start(controller) {
145
+ controller.enqueue({ type: "stream-start", warnings: [] });
146
+ controller.enqueue({
147
+ type: "tool-call",
148
+ toolCallId,
149
+ toolName,
150
+ input: JSON.stringify({ instruction: "" }),
151
+ });
152
+ controller.enqueue({
153
+ type: "finish",
154
+ finishReason: "tool-calls",
155
+ usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 },
156
+ });
157
+ controller.close();
158
+ },
159
+ });
160
+ return { stream };
161
+ },
162
+ };
163
+ };
164
+ // Match DurableAgent-style model init behavior:
165
+ const resolvedModel = typeof params.model === "string"
166
+ ? gateway(params.model)
167
+ : isMockModelConfig(params.model)
168
+ ? await buildMockModel(params.model)
169
+ : typeof params.model === "function"
170
+ ? await params.model()
171
+ : params.model;
172
+ // Wrap plain JSON Schema objects so the AI SDK doesn't attempt Zod conversion at runtime.
173
+ const toolsForStreamText = {};
174
+ for (const [name, t] of Object.entries(params.tools)) {
175
+ toolsForStreamText[name] = {
176
+ description: t?.description,
177
+ inputSchema: jsonSchema(t.inputSchema),
178
+ };
179
+ }
180
+ const startedAtMs = Date.now();
181
+ const result = streamText({
182
+ model: resolvedModel,
183
+ system: params.system,
184
+ messages: messagesForModel,
185
+ tools: toolsForStreamText,
186
+ toolChoice: "required",
187
+ stopWhen: stepCountIs(params.maxSteps),
188
+ experimental_transform: smoothStream({ delayInMs: 30, chunking: "word" }),
189
+ });
190
+ // Ensure the underlying stream is consumed (AI SDK requirement)
191
+ result.consumeStream();
192
+ let resolveFinish;
193
+ let rejectFinish;
194
+ const finishPromise = new Promise((resolve, reject) => {
195
+ resolveFinish = resolve;
196
+ rejectFinish = reject;
197
+ });
198
+ const uiStream = result
199
+ .toUIMessageStream({
200
+ sendStart: Boolean(params.sendStart),
201
+ generateMessageId: () => params.eventId,
202
+ messageMetadata() {
203
+ return { eventId: params.eventId };
204
+ },
205
+ onFinish: ({ messages }) => {
206
+ const lastMessage = messages[messages.length - 1];
207
+ const event = {
208
+ id: params.eventId,
209
+ type: OUTPUT_TEXT_ITEM_TYPE,
210
+ channel: "web",
211
+ createdAt: new Date().toISOString(),
212
+ content: { parts: lastMessage?.parts ?? [] },
213
+ };
214
+ resolveFinish(event);
215
+ },
216
+ onError: (e) => {
217
+ rejectFinish(e);
218
+ return e instanceof Error ? e.message : String(e);
219
+ },
220
+ })
221
+ // Filter out per-step finish boundary. Workflow will emit a single finish.
222
+ .pipeThrough(new TransformStream({
223
+ transform(chunk, controller) {
224
+ if (chunk.type === "finish")
225
+ return;
226
+ controller.enqueue(chunk);
227
+ },
228
+ }));
229
+ await uiStream.pipeTo(writable, { preventClose: true });
230
+ const assistantEvent = await finishPromise;
231
+ const finishedAtMs = Date.now();
232
+ const toolCalls = extractToolCallsFromParts(assistantEvent?.content?.parts);
233
+ // Best-effort usage extraction (AI SDK provider dependent).
234
+ // We keep this loose because providers differ and SDK evolves quickly.
235
+ const latencyMs = Math.max(0, finishedAtMs - startedAtMs);
236
+ let usage = undefined;
237
+ let providerMetadata = undefined;
238
+ try {
239
+ usage = result?.usage;
240
+ if (typeof usage?.then === "function")
241
+ usage = await usage;
242
+ }
243
+ catch {
244
+ usage = undefined;
245
+ }
246
+ try {
247
+ providerMetadata =
248
+ result?.providerMetadata ??
249
+ result?.experimental_providerMetadata ??
250
+ result?.response?.providerMetadata ??
251
+ undefined;
252
+ }
253
+ catch {
254
+ providerMetadata = undefined;
255
+ }
256
+ // Workflow steps must return serializable values. Provider SDKs may include
257
+ // classes/streams/etc in metadata, so we defensively sanitize.
258
+ function toPlainJson(value) {
259
+ if (typeof value === "undefined")
260
+ return undefined;
261
+ try {
262
+ return JSON.parse(JSON.stringify(value));
263
+ }
264
+ catch {
265
+ return undefined;
266
+ }
267
+ }
268
+ const usageJson = toPlainJson(usage);
269
+ const providerMetadataJson = toPlainJson(providerMetadata);
270
+ // Derive provider/model from gateway id when available.
271
+ const modelId = typeof params.model === "string" ? params.model : "";
272
+ const provider = modelId.includes("/") ? modelId.split("/")[0] : providerMetadata?.provider;
273
+ const model = modelId.includes("/") ? modelId.split("/").slice(1).join("/") : providerMetadata?.model;
274
+ // Token accounting: attempt to read cached prompt tokens from OpenAI-like usage shapes.
275
+ const promptTokens = Number(usage?.promptTokens ?? usage?.prompt_tokens ?? usage?.inputTokens ?? 0) || 0;
276
+ const completionTokens = Number(usage?.completionTokens ?? usage?.completion_tokens ?? usage?.outputTokens ?? 0) || 0;
277
+ const totalTokens = Number(usage?.totalTokens ?? usage?.total_tokens ?? 0) || (promptTokens + completionTokens);
278
+ const cachedPromptTokens = Number(usage?.promptTokensCached ??
279
+ usage?.cached_prompt_tokens ??
280
+ usage?.prompt_tokens_details?.cached_tokens ??
281
+ usage?.input_tokens_details?.cached_tokens ??
282
+ 0) || 0;
283
+ const uncachedPromptTokens = Math.max(0, promptTokens - cachedPromptTokens);
284
+ const llm = promptTokens || completionTokens || cachedPromptTokens
285
+ ? {
286
+ provider,
287
+ model,
288
+ promptTokens,
289
+ promptTokensCached: cachedPromptTokens,
290
+ promptTokensUncached: uncachedPromptTokens,
291
+ completionTokens,
292
+ totalTokens,
293
+ latencyMs,
294
+ rawUsage: usageJson,
295
+ rawProviderMetadata: providerMetadataJson,
296
+ }
297
+ : {
298
+ provider,
299
+ model,
300
+ latencyMs,
301
+ rawUsage: usageJson,
302
+ rawProviderMetadata: providerMetadataJson,
303
+ };
304
+ try {
305
+ const runId = await resolveWorkflowRunId(params.env, params.executionId);
306
+ if (runId && llm) {
307
+ await writeThreadTraceEvents({
308
+ env: params.env,
309
+ events: [
310
+ {
311
+ workflowRunId: runId,
312
+ eventId: `thread_llm:${String(params.executionId ?? "unknown")}:${String(params.stepId ?? params.eventId)}:${String(params.iteration ?? 0)}`,
313
+ eventKind: "thread.llm",
314
+ eventAt: new Date().toISOString(),
315
+ contextId: params.contextId,
316
+ executionId: params.executionId,
317
+ stepId: params.stepId,
318
+ aiProvider: typeof llm.provider === "string" ? llm.provider : undefined,
319
+ aiModel: typeof llm.model === "string" ? llm.model : undefined,
320
+ promptTokens: Number.isFinite(Number(llm.promptTokens))
321
+ ? Number(llm.promptTokens)
322
+ : undefined,
323
+ promptTokensCached: Number.isFinite(Number(llm.promptTokensCached))
324
+ ? Number(llm.promptTokensCached)
325
+ : undefined,
326
+ promptTokensUncached: Number.isFinite(Number(llm.promptTokensUncached))
327
+ ? Number(llm.promptTokensUncached)
328
+ : undefined,
329
+ completionTokens: Number.isFinite(Number(llm.completionTokens))
330
+ ? Number(llm.completionTokens)
331
+ : undefined,
332
+ totalTokens: Number.isFinite(Number(llm.totalTokens))
333
+ ? Number(llm.totalTokens)
334
+ : undefined,
335
+ latencyMs: Number.isFinite(Number(llm.latencyMs))
336
+ ? Number(llm.latencyMs)
337
+ : undefined,
338
+ payload: {
339
+ provider: llm.provider,
340
+ model: llm.model,
341
+ usage: llm.rawUsage,
342
+ providerMetadata: llm.rawProviderMetadata,
343
+ iteration: params.iteration,
344
+ },
345
+ },
346
+ ],
347
+ });
348
+ }
349
+ }
350
+ catch {
351
+ // tracing must not break reaction
352
+ }
353
+ return { assistantEvent, toolCalls, messagesForModel, llm };
354
+ }
@@ -0,0 +1,98 @@
1
+ import type { UIMessageChunk } from "ai";
2
+ import type { ThreadEnvironment } from "../thread.config.js";
3
+ import type { ThreadItem, ContextIdentifier, StoredContext } from "../thread.store.js";
4
+ export type ThreadReviewRequest = {
5
+ toolCallId: string;
6
+ toolName?: string;
7
+ };
8
+ /**
9
+ * Initializes/ensures the story context exists and emits a single `data-context-id` chunk.
10
+ *
11
+ * This is the "context init" boundary for the story engine.
12
+ */
13
+ export declare function initializeContext<C>(env: ThreadEnvironment, contextIdentifier: ContextIdentifier | null, opts?: {
14
+ silent?: boolean;
15
+ writable?: WritableStream<UIMessageChunk>;
16
+ }): Promise<{
17
+ context: StoredContext<C>;
18
+ isNew: boolean;
19
+ }>;
20
+ export declare function updateContextContent<C>(env: ThreadEnvironment, contextIdentifier: ContextIdentifier, content: C): Promise<StoredContext<C>>;
21
+ export declare function updateContextStatus(env: ThreadEnvironment, contextIdentifier: ContextIdentifier, status: "open" | "streaming" | "closed"): Promise<void>;
22
+ export declare function saveTriggerItem(env: ThreadEnvironment, contextIdentifier: ContextIdentifier, event: ThreadItem): Promise<ThreadItem>;
23
+ export declare function emitContextIdChunk(params: {
24
+ env: ThreadEnvironment;
25
+ contextId: string;
26
+ writable?: WritableStream<UIMessageChunk>;
27
+ }): Promise<void>;
28
+ export declare function saveTriggerAndCreateExecution(params: {
29
+ env: ThreadEnvironment;
30
+ contextIdentifier: ContextIdentifier;
31
+ triggerEvent: ThreadItem;
32
+ }): Promise<{
33
+ triggerEvent: ThreadItem;
34
+ triggerEventId: string;
35
+ reactionEventId: string;
36
+ executionId: string;
37
+ }>;
38
+ export declare function saveReactionItem(env: ThreadEnvironment, contextIdentifier: ContextIdentifier, event: ThreadItem, opts?: {
39
+ executionId?: string;
40
+ contextId?: string;
41
+ reviewRequests?: ThreadReviewRequest[];
42
+ }): Promise<ThreadItem>;
43
+ export declare function updateItem(env: ThreadEnvironment, eventId: string, event: ThreadItem, opts?: {
44
+ executionId?: string;
45
+ contextId?: string;
46
+ }): Promise<ThreadItem>;
47
+ export declare function createExecution(env: ThreadEnvironment, contextIdentifier: ContextIdentifier, triggerEventId: string, reactionEventId: string): Promise<{
48
+ id: string;
49
+ }>;
50
+ export declare function createReactionItem(params: {
51
+ env: ThreadEnvironment;
52
+ contextIdentifier: ContextIdentifier;
53
+ triggerEventId: string;
54
+ }): Promise<{
55
+ reactionEventId: string;
56
+ executionId: string;
57
+ }>;
58
+ export declare function completeExecution(env: ThreadEnvironment, contextIdentifier: ContextIdentifier, executionId: string, status: "completed" | "failed"): Promise<void>;
59
+ export declare function updateExecutionWorkflowRun(params: {
60
+ env: ThreadEnvironment;
61
+ executionId: string;
62
+ workflowRunId: string;
63
+ }): Promise<void>;
64
+ export declare function createThreadStep(params: {
65
+ env: ThreadEnvironment;
66
+ executionId: string;
67
+ iteration: number;
68
+ }): Promise<{
69
+ stepId: string;
70
+ eventId: string;
71
+ }>;
72
+ export declare function updateThreadStep(params: {
73
+ env: ThreadEnvironment;
74
+ stepId: string;
75
+ executionId?: string;
76
+ contextId?: string;
77
+ iteration?: number;
78
+ patch: {
79
+ status?: "running" | "completed" | "failed";
80
+ toolCalls?: any;
81
+ toolExecutionResults?: any;
82
+ continueLoop?: boolean;
83
+ errorText?: string;
84
+ };
85
+ }): Promise<void>;
86
+ export declare function linkItemToExecutionStep(params: {
87
+ env: ThreadEnvironment;
88
+ itemId: string;
89
+ executionId: string;
90
+ }): Promise<void>;
91
+ export declare function saveThreadPartsStep(params: {
92
+ env: ThreadEnvironment;
93
+ stepId: string;
94
+ executionId?: string;
95
+ contextId?: string;
96
+ iteration?: number;
97
+ parts: any[];
98
+ }): Promise<void>;