@fluidframework/ai-collab 2.22.1 → 2.23.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +4 -0
  2. package/README.md +70 -4
  3. package/api-report/ai-collab.alpha.api.md +170 -2
  4. package/dist/aiCollab.d.ts +0 -1
  5. package/dist/aiCollab.d.ts.map +1 -1
  6. package/dist/aiCollab.js +1 -2
  7. package/dist/aiCollab.js.map +1 -1
  8. package/dist/aiCollabApi.d.ts +50 -3
  9. package/dist/aiCollabApi.d.ts.map +1 -1
  10. package/dist/aiCollabApi.js.map +1 -1
  11. package/dist/alpha.d.ts +17 -0
  12. package/dist/explicit-strategy/debugEvents.d.ts +248 -0
  13. package/dist/explicit-strategy/debugEvents.d.ts.map +1 -0
  14. package/dist/explicit-strategy/debugEvents.js +36 -0
  15. package/dist/explicit-strategy/debugEvents.js.map +1 -0
  16. package/dist/explicit-strategy/index.d.ts +4 -4
  17. package/dist/explicit-strategy/index.d.ts.map +1 -1
  18. package/dist/explicit-strategy/index.js +176 -54
  19. package/dist/explicit-strategy/index.js.map +1 -1
  20. package/dist/index.d.ts +2 -1
  21. package/dist/index.d.ts.map +1 -1
  22. package/dist/index.js.map +1 -1
  23. package/lib/aiCollab.d.ts +0 -1
  24. package/lib/aiCollab.d.ts.map +1 -1
  25. package/lib/aiCollab.js +1 -2
  26. package/lib/aiCollab.js.map +1 -1
  27. package/lib/aiCollabApi.d.ts +50 -3
  28. package/lib/aiCollabApi.d.ts.map +1 -1
  29. package/lib/aiCollabApi.js.map +1 -1
  30. package/lib/alpha.d.ts +17 -0
  31. package/lib/explicit-strategy/debugEvents.d.ts +248 -0
  32. package/lib/explicit-strategy/debugEvents.d.ts.map +1 -0
  33. package/lib/explicit-strategy/debugEvents.js +32 -0
  34. package/lib/explicit-strategy/debugEvents.js.map +1 -0
  35. package/lib/explicit-strategy/index.d.ts +4 -4
  36. package/lib/explicit-strategy/index.d.ts.map +1 -1
  37. package/lib/explicit-strategy/index.js +174 -52
  38. package/lib/explicit-strategy/index.js.map +1 -1
  39. package/lib/index.d.ts +2 -1
  40. package/lib/index.d.ts.map +1 -1
  41. package/lib/index.js.map +1 -1
  42. package/package.json +13 -11
  43. package/src/aiCollab.ts +1 -2
  44. package/src/aiCollabApi.ts +54 -3
  45. package/src/explicit-strategy/debugEvents.ts +297 -0
  46. package/src/explicit-strategy/index.ts +269 -59
  47. package/src/index.ts +20 -0
@@ -3,6 +3,7 @@
3
3
  * Licensed under the MIT License.
4
4
  */
5
5
 
6
+ import { UsageError } from "@fluidframework/telemetry-utils/internal";
6
7
  import {
7
8
  getSimpleSchema,
8
9
  Tree,
@@ -15,12 +16,34 @@ import type {
15
16
  ChatCompletionCreateParams,
16
17
  // eslint-disable-next-line import/no-internal-modules
17
18
  } from "openai/resources/index.mjs";
19
+ import { v4 as uuidv4 } from "uuid";
18
20
  import { z } from "zod";
19
21
 
20
- import type { OpenAiClientOptions, TokenLimits, TokenUsage } from "../aiCollabApi.js";
22
+ import type {
23
+ DebugEventLogHandler,
24
+ OpenAiClientOptions,
25
+ TokenLimits,
26
+ TokenUsage,
27
+ } from "../aiCollabApi.js";
21
28
 
22
29
  import { applyAgentEdit } from "./agentEditReducer.js";
23
30
  import type { EditWrapper, TreeEdit } from "./agentEditTypes.js";
31
+ import {
32
+ type ApplyEditFailure,
33
+ type ApplyEditSuccess,
34
+ type GenerateTreeEditCompleted,
35
+ type GenerateTreeEditStarted,
36
+ type FinalReviewCompleted,
37
+ type FinalReviewStarted,
38
+ type LlmApiCallDebugEvent,
39
+ type PlanningPromptCompleted,
40
+ type CoreEventLoopStarted,
41
+ type CoreEventLoopCompleted,
42
+ generateDebugEvent,
43
+ type PlanningPromptStarted,
44
+ type EventFlowDebugName,
45
+ EventFlowDebugNames,
46
+ } from "./debugEvents.js";
24
47
  import { IdGenerator } from "./idGenerator.js";
25
48
  import {
26
49
  getEditingSystemPrompt,
@@ -32,7 +55,23 @@ import {
32
55
  import { generateGenericEditTypes } from "./typeGeneration.js";
33
56
  import { fail } from "./utils.js";
34
57
 
35
- const DEBUG_LOG: string[] = [];
58
+ // TODO: Create a proper index file and move the logic of this file to a new location
59
+ export type {
60
+ ApplyEditFailure,
61
+ ApplyEditSuccess,
62
+ CoreEventLoopCompleted,
63
+ CoreEventLoopStarted,
64
+ FinalReviewCompleted,
65
+ FinalReviewStarted,
66
+ GenerateTreeEditCompleted,
67
+ GenerateTreeEditStarted,
68
+ LlmApiCallDebugEvent,
69
+ PlanningPromptCompleted,
70
+ PlanningPromptStarted,
71
+ LlmTreeEdit,
72
+ EventFlowDebugName,
73
+ EventFlowDebugNames,
74
+ } from "./debugEvents.js";
36
75
 
37
76
  /**
38
77
  * {@link generateTreeEdits} options.
@@ -54,7 +93,7 @@ export interface GenerateTreeEditsOptions {
54
93
  };
55
94
  finalReviewStep?: boolean;
56
95
  validator?: (newContent: TreeNode) => void;
57
- dumpDebugLog?: boolean;
96
+ debugEventLogHandler?: DebugEventLogHandler;
58
97
  planningStep?: boolean;
59
98
  }
60
99
 
@@ -65,7 +104,12 @@ interface GenerateTreeEditsSuccessResponse {
65
104
 
66
105
  interface GenerateTreeEditsErrorResponse {
67
106
  status: "failure" | "partial-failure";
68
- errorMessage: "tokenLimitExceeded" | "tooManyErrors" | "tooManyModelCalls" | "aborted";
107
+ errorMessage:
108
+ | "tokenLimitExceeded"
109
+ | "tooManyErrors"
110
+ | "tooManyModelCalls"
111
+ | "aborted"
112
+ | "unexpectedError";
69
113
  tokensUsed: TokenUsage;
70
114
  }
71
115
 
@@ -91,18 +135,32 @@ export async function generateTreeEdits(
91
135
 
92
136
  const tokensUsed = { inputTokens: 0, outputTokens: 0 };
93
137
 
138
+ const debugLogTraceId = uuidv4();
139
+
140
+ const coreEventFlowTraceId = uuidv4();
141
+ options.debugEventLogHandler?.({
142
+ ...generateDebugEvent("CORE_EVENT_LOOP_STARTED", debugLogTraceId),
143
+ eventFlowName: EventFlowDebugNames.CORE_EVENT_LOOP,
144
+ eventFlowStatus: "STARTED",
145
+ eventFlowTraceId: coreEventFlowTraceId,
146
+ } satisfies CoreEventLoopStarted);
147
+
94
148
  try {
95
- for await (const edit of generateEdits(
149
+ for await (const generateEditResult of generateEdits(
96
150
  options,
97
151
  simpleSchema,
98
152
  idGenerator,
99
153
  editLog,
100
154
  options.limiters?.tokenLimits,
101
155
  tokensUsed,
156
+ options.debugEventLogHandler && {
157
+ eventLogHandler: options.debugEventLogHandler,
158
+ traceId: debugLogTraceId,
159
+ },
102
160
  )) {
103
161
  try {
104
162
  const result = applyAgentEdit(
105
- edit,
163
+ generateEditResult.edit,
106
164
  idGenerator,
107
165
  simpleSchema.definitions,
108
166
  options.validator,
@@ -110,55 +168,81 @@ export async function generateTreeEdits(
110
168
  const explanation = result.explanation;
111
169
  editLog.push({ edit: { ...result, explanation } });
112
170
  sequentialErrorCount = 0;
171
+
172
+ options.debugEventLogHandler?.({
173
+ ...generateDebugEvent("APPLIED_EDIT_SUCCESS", debugLogTraceId),
174
+ eventFlowName: EventFlowDebugNames.GENERATE_AND_APPLY_TREE_EDIT,
175
+ eventFlowStatus: "IN_PROGRESS",
176
+ eventFlowTraceId: generateEditResult.eventFlowTraceId,
177
+ edit: generateEditResult.edit as unknown as Record<string, unknown>,
178
+ } satisfies ApplyEditSuccess);
113
179
  } catch (error: unknown) {
114
- if (error instanceof Error) {
180
+ options.debugEventLogHandler?.({
181
+ ...generateDebugEvent("APPLIED_EDIT_FAILURE", debugLogTraceId),
182
+ eventFlowName: EventFlowDebugNames.GENERATE_AND_APPLY_TREE_EDIT,
183
+ eventFlowStatus: "IN_PROGRESS",
184
+ eventFlowTraceId: generateEditResult.eventFlowTraceId,
185
+ edit: generateEditResult.edit as unknown as Record<string, unknown>,
186
+ errorMessage: (error as Error)?.message,
187
+ sequentialErrorCount,
188
+ } satisfies ApplyEditFailure);
189
+
190
+ if (error instanceof UsageError) {
115
191
  sequentialErrorCount += 1;
116
- editLog.push({ edit, error: error.message });
117
- DEBUG_LOG?.push(`Error: ${error.message}`);
192
+ editLog.push({ edit: generateEditResult.edit, error: error.message });
118
193
  } else {
119
194
  throw error;
120
195
  }
121
196
  }
122
197
 
123
- const responseStatus =
124
- editCount > 0 && sequentialErrorCount < editCount ? "partial-failure" : "failure";
198
+ let shouldExitEarly = false;
199
+ const completionResponse: GenerateTreeEditsErrorResponse = {
200
+ status:
201
+ editCount > 0 && sequentialErrorCount < editCount ? "partial-failure" : "failure",
202
+ errorMessage: "unexpectedError",
203
+ tokensUsed,
204
+ };
125
205
 
126
206
  if (options.limiters?.abortController?.signal.aborted === true) {
127
- return {
128
- status: responseStatus,
129
- errorMessage: "aborted",
130
- tokensUsed,
131
- };
132
- }
133
-
134
- if (
207
+ completionResponse.errorMessage = "aborted";
208
+ shouldExitEarly = true;
209
+ } else if (
135
210
  sequentialErrorCount >
136
211
  (options.limiters?.maxSequentialErrors ?? Number.POSITIVE_INFINITY)
137
212
  ) {
138
- return {
139
- status: responseStatus,
140
- errorMessage: "tooManyErrors",
141
- tokensUsed,
142
- };
213
+ completionResponse.errorMessage = "tooManyErrors";
214
+ shouldExitEarly = true;
215
+ } else if (
216
+ ++editCount >= (options.limiters?.maxModelCalls ?? Number.POSITIVE_INFINITY)
217
+ ) {
218
+ completionResponse.errorMessage = "tooManyModelCalls";
219
+ shouldExitEarly = true;
143
220
  }
144
221
 
145
- if (++editCount >= (options.limiters?.maxModelCalls ?? Number.POSITIVE_INFINITY)) {
146
- return {
147
- status: responseStatus,
148
- errorMessage: "tooManyModelCalls",
149
- tokensUsed,
150
- };
222
+ if (shouldExitEarly) {
223
+ options.debugEventLogHandler?.({
224
+ ...generateDebugEvent("CORE_EVENT_LOOP_COMPLETED", debugLogTraceId),
225
+ eventFlowName: EventFlowDebugNames.CORE_EVENT_LOOP,
226
+ eventFlowStatus: "COMPLETED",
227
+ status: "failure",
228
+ failureReason: completionResponse.errorMessage,
229
+ eventFlowTraceId: coreEventFlowTraceId,
230
+ } satisfies CoreEventLoopCompleted);
231
+
232
+ return completionResponse;
151
233
  }
152
234
  }
153
235
  } catch (error: unknown) {
154
- if (error instanceof Error) {
155
- DEBUG_LOG?.push(`Error: ${error.message}`);
156
- }
157
-
158
- if (options.dumpDebugLog ?? false) {
159
- console.log(DEBUG_LOG.join("\n\n"));
160
- DEBUG_LOG.length = 0;
161
- }
236
+ options.debugEventLogHandler?.({
237
+ ...generateDebugEvent("CORE_EVENT_LOOP_COMPLETED", debugLogTraceId),
238
+ eventFlowName: EventFlowDebugNames.CORE_EVENT_LOOP,
239
+ eventFlowStatus: "COMPLETED",
240
+ status: "failure",
241
+ eventFlowTraceId: coreEventFlowTraceId,
242
+ failureReason:
243
+ error instanceof TokenLimitExceededError ? "tokenLimitExceeded" : "unexpectedError",
244
+ errorMessage: (error as Error)?.message,
245
+ } satisfies CoreEventLoopCompleted);
162
246
 
163
247
  if (error instanceof TokenLimitExceededError) {
164
248
  return {
@@ -171,10 +255,13 @@ export async function generateTreeEdits(
171
255
  throw error;
172
256
  }
173
257
 
174
- if (options.dumpDebugLog ?? false) {
175
- console.log(DEBUG_LOG.join("\n\n"));
176
- DEBUG_LOG.length = 0;
177
- }
258
+ options.debugEventLogHandler?.({
259
+ ...generateDebugEvent("CORE_EVENT_LOOP_COMPLETED", debugLogTraceId),
260
+ eventFlowName: EventFlowDebugNames.CORE_EVENT_LOOP,
261
+ eventFlowStatus: "COMPLETED",
262
+ eventFlowTraceId: coreEventFlowTraceId,
263
+ status: "success",
264
+ } satisfies CoreEventLoopCompleted);
178
265
 
179
266
  return {
180
267
  status: "success",
@@ -202,19 +289,48 @@ async function* generateEdits(
202
289
  editLog: EditLog,
203
290
  tokenLimits: TokenLimits | undefined,
204
291
  tokensUsed: TokenUsage,
205
- ): AsyncGenerator<TreeEdit> {
292
+ debugOptions?: {
293
+ eventLogHandler: DebugEventLogHandler;
294
+ traceId: string;
295
+ },
296
+ ): AsyncGenerator<{ edit: TreeEdit; eventFlowTraceId: string }> {
206
297
  const [types, rootTypeName] = generateGenericEditTypes(simpleSchema, true);
207
298
 
208
299
  let plan: string | undefined;
209
300
  if (options.planningStep !== undefined) {
210
- const planningPromt = getPlanningSystemPrompt(
301
+ const planningPrompt = getPlanningSystemPrompt(
211
302
  options.treeNode,
212
303
  options.prompt.userAsk,
213
304
  options.prompt.systemRoleContext,
214
305
  );
215
- DEBUG_LOG?.push(planningPromt);
216
- plan = await getStringFromLlm(planningPromt, options.openAI, tokensUsed);
217
- DEBUG_LOG?.push(`AI Generated the following plan: ${planningPromt}`);
306
+
307
+ const generatePlanningPromptEventFlowId = uuidv4();
308
+ debugOptions?.eventLogHandler?.({
309
+ ...generateDebugEvent("GENERATE_PLANNING_PROMPT_STARTED", debugOptions.traceId),
310
+ eventFlowName: EventFlowDebugNames.GENERATE_PLANNING_PROMPT,
311
+ eventFlowTraceId: generatePlanningPromptEventFlowId,
312
+ eventFlowStatus: "STARTED",
313
+ } satisfies PlanningPromptStarted);
314
+
315
+ plan = await getStringFromLlm(
316
+ planningPrompt,
317
+ options.openAI,
318
+ tokensUsed,
319
+ debugOptions && {
320
+ ...debugOptions,
321
+ triggeringEventFlowName: EventFlowDebugNames.GENERATE_PLANNING_PROMPT,
322
+ eventFlowTraceId: generatePlanningPromptEventFlowId,
323
+ },
324
+ );
325
+
326
+ debugOptions?.eventLogHandler?.({
327
+ ...generateDebugEvent("GENERATE_PLANNING_PROMPT_COMPLETED", debugOptions.traceId),
328
+ eventFlowName: EventFlowDebugNames.GENERATE_PLANNING_PROMPT,
329
+ eventFlowStatus: "COMPLETED",
330
+ eventFlowTraceId: generatePlanningPromptEventFlowId,
331
+ isLlmResponseValid: plan !== undefined,
332
+ llmGeneratedPlan: plan,
333
+ } satisfies PlanningPromptCompleted);
218
334
  }
219
335
 
220
336
  const originalDecoratedJson =
@@ -223,7 +339,9 @@ async function* generateEdits(
223
339
  : undefined;
224
340
  // reviewed is implicitly true if finalReviewStep is false
225
341
  let hasReviewed = (options.finalReviewStep ?? false) ? false : true;
226
- async function getNextEdit(): Promise<TreeEdit | undefined> {
342
+ async function getNextEdit(): Promise<
343
+ { edit: TreeEdit; eventFlowTraceId: string } | undefined
344
+ > {
227
345
  const systemPrompt = getEditingSystemPrompt(
228
346
  options.prompt.userAsk,
229
347
  idGenerator,
@@ -233,30 +351,47 @@ async function* generateEdits(
233
351
  plan,
234
352
  );
235
353
 
236
- DEBUG_LOG?.push(systemPrompt);
237
-
238
354
  const schema = types[rootTypeName] ?? fail("Root type not found.");
355
+
356
+ const generateTreeEditEventFlowId = uuidv4();
357
+ debugOptions?.eventLogHandler?.({
358
+ ...generateDebugEvent("GENERATE_TREE_EDIT_STARTED", debugOptions.traceId),
359
+ eventFlowName: EventFlowDebugNames.GENERATE_AND_APPLY_TREE_EDIT,
360
+ eventFlowStatus: "STARTED",
361
+ eventFlowTraceId: generateTreeEditEventFlowId,
362
+ llmPrompt: systemPrompt,
363
+ } satisfies GenerateTreeEditStarted);
364
+
239
365
  const wrapper = await getStructuredOutputFromLlm<EditWrapper>(
240
366
  systemPrompt,
241
367
  options.openAI,
242
368
  schema,
243
369
  "A JSON object that represents an edit to a JSON tree.",
244
370
  tokensUsed,
371
+ debugOptions && {
372
+ ...debugOptions,
373
+ triggeringEventFlowName: EventFlowDebugNames.GENERATE_AND_APPLY_TREE_EDIT,
374
+ eventFlowTraceId: generateTreeEditEventFlowId,
375
+ },
245
376
  );
246
377
 
247
- // eslint-disable-next-line unicorn/no-null
248
- DEBUG_LOG?.push(JSON.stringify(wrapper, null, 2));
378
+ debugOptions?.eventLogHandler?.({
379
+ ...generateDebugEvent("GENERATE_TREE_EDIT_COMPLETED", debugOptions.traceId),
380
+ eventFlowName: EventFlowDebugNames.GENERATE_AND_APPLY_TREE_EDIT,
381
+ eventFlowStatus: "COMPLETED",
382
+ eventFlowTraceId: generateTreeEditEventFlowId,
383
+ isLlmResponseValid: wrapper?.edit !== undefined,
384
+ llmGeneratedEdit: wrapper?.edit as Record<string, unknown> | null,
385
+ } satisfies GenerateTreeEditCompleted);
386
+
249
387
  if (wrapper === undefined) {
250
- DEBUG_LOG?.push("Failed to get response");
251
388
  return undefined;
252
389
  }
253
390
 
254
391
  if (wrapper.edit === null) {
255
- DEBUG_LOG?.push("No more edits.");
256
392
  if ((options.finalReviewStep ?? false) && !hasReviewed) {
257
393
  const reviewResult = await reviewGoal();
258
394
  if (reviewResult === undefined) {
259
- DEBUG_LOG?.push("Failed to get review response");
260
395
  return undefined;
261
396
  }
262
397
  // eslint-disable-next-line require-atomic-updates
@@ -270,7 +405,7 @@ async function* generateEdits(
270
405
  }
271
406
  }
272
407
  } else {
273
- return wrapper.edit;
408
+ return { edit: wrapper.edit, eventFlowTraceId: generateTreeEditEventFlowId };
274
409
  }
275
410
  }
276
411
 
@@ -283,14 +418,46 @@ async function* generateEdits(
283
418
  options.prompt.systemRoleContext,
284
419
  );
285
420
 
286
- DEBUG_LOG?.push(systemPrompt);
287
-
288
421
  const schema = z.object({
289
422
  goalAccomplished: z
290
423
  .enum(["yes", "no"])
291
424
  .describe('Whether the user\'s goal was met in the "after" tree.'),
292
425
  });
293
- return getStructuredOutputFromLlm<ReviewResult>(systemPrompt, options.openAI, schema);
426
+
427
+ const finalReviewEventFlowTraceId = uuidv4();
428
+ debugOptions?.eventLogHandler?.({
429
+ ...generateDebugEvent("FINAL_REVIEW_STARTED", debugOptions.traceId),
430
+ eventFlowName: EventFlowDebugNames.FINAL_REVIEW,
431
+ eventFlowStatus: "STARTED",
432
+ eventFlowTraceId: finalReviewEventFlowTraceId,
433
+ llmPrompt: systemPrompt,
434
+ } satisfies FinalReviewStarted);
435
+
436
+ // TODO: In the future, when using structured output isn't guarenteed, we will
437
+ // need to add a custom type guard to ensure that output is in the right shape.
438
+ const output = await getStructuredOutputFromLlm<ReviewResult>(
439
+ systemPrompt,
440
+ options.openAI,
441
+ schema,
442
+ undefined,
443
+ tokensUsed,
444
+ debugOptions && {
445
+ ...debugOptions,
446
+ triggeringEventFlowName: EventFlowDebugNames.FINAL_REVIEW,
447
+ eventFlowTraceId: finalReviewEventFlowTraceId,
448
+ },
449
+ );
450
+
451
+ debugOptions?.eventLogHandler?.({
452
+ ...generateDebugEvent("FINAL_REVIEW_COMPLETED", debugOptions.traceId),
453
+ eventFlowName: EventFlowDebugNames.FINAL_REVIEW,
454
+ eventFlowStatus: "COMPLETED",
455
+ eventFlowTraceId: finalReviewEventFlowTraceId,
456
+ isLlmResponseValid: output !== undefined,
457
+ didLlmAccomplishGoal: output?.goalAccomplished,
458
+ } satisfies FinalReviewCompleted);
459
+
460
+ return output;
294
461
  }
295
462
 
296
463
  let edit = await getNextEdit();
@@ -315,6 +482,12 @@ async function getStructuredOutputFromLlm<T>(
315
482
  structuredOutputSchema: Zod.ZodTypeAny,
316
483
  description?: string,
317
484
  tokensUsed?: TokenUsage,
485
+ debugOptions?: {
486
+ eventLogHandler: DebugEventLogHandler;
487
+ traceId: string;
488
+ triggeringEventFlowName: EventFlowDebugName;
489
+ eventFlowTraceId: string;
490
+ },
318
491
  ): Promise<T | undefined> {
319
492
  const response_format = zodResponseFormat(structuredOutputSchema, "SharedTreeAI", {
320
493
  description,
@@ -328,6 +501,21 @@ async function getStructuredOutputFromLlm<T>(
328
501
 
329
502
  const result = await openAi.client.beta.chat.completions.parse(body);
330
503
 
504
+ debugOptions?.eventLogHandler?.({
505
+ ...generateDebugEvent("LLM_API_CALL", debugOptions.traceId),
506
+ triggeringEventFlowName: debugOptions.triggeringEventFlowName,
507
+ eventFlowTraceId: debugOptions.eventFlowTraceId,
508
+ modelName: openAi.modelName ?? "gpt-4o",
509
+ requestParams: body,
510
+ response: { ...result },
511
+ ...(result.usage && {
512
+ tokenUsage: {
513
+ promptTokens: result.usage.prompt_tokens,
514
+ completionTokens: result.usage.completion_tokens,
515
+ },
516
+ }),
517
+ } satisfies LlmApiCallDebugEvent);
518
+
331
519
  if (result.usage !== undefined && tokensUsed !== undefined) {
332
520
  tokensUsed.inputTokens += result.usage?.prompt_tokens;
333
521
  tokensUsed.outputTokens += result.usage?.completion_tokens;
@@ -335,6 +523,7 @@ async function getStructuredOutputFromLlm<T>(
335
523
 
336
524
  // TODO: fix types so this isn't null and doesn't need a cast
337
525
  // The type should be derived from the zod schema
526
+ // TODO: Determine why this value would be undefined.
338
527
  return result.choices[0]?.message.parsed as T | undefined;
339
528
  }
340
529
 
@@ -345,6 +534,12 @@ async function getStringFromLlm(
345
534
  prompt: string,
346
535
  openAi: OpenAiClientOptions,
347
536
  tokensUsed?: TokenUsage,
537
+ debugOptions?: {
538
+ eventLogHandler: DebugEventLogHandler;
539
+ traceId: string;
540
+ triggeringEventFlowName: EventFlowDebugName;
541
+ eventFlowTraceId: string;
542
+ },
348
543
  ): Promise<string | undefined> {
349
544
  const body: ChatCompletionCreateParams = {
350
545
  messages: [{ role: "system", content: prompt }],
@@ -353,6 +548,21 @@ async function getStringFromLlm(
353
548
 
354
549
  const result = await openAi.client.chat.completions.create(body);
355
550
 
551
+ debugOptions?.eventLogHandler?.({
552
+ ...generateDebugEvent("LLM_API_CALL", debugOptions.traceId),
553
+ triggeringEventFlowName: debugOptions.triggeringEventFlowName,
554
+ eventFlowTraceId: debugOptions.eventFlowTraceId,
555
+ modelName: openAi.modelName ?? "gpt-4o",
556
+ requestParams: body,
557
+ response: { ...result },
558
+ ...(result.usage && {
559
+ tokenUsage: {
560
+ promptTokens: result.usage.prompt_tokens,
561
+ completionTokens: result.usage.completion_tokens,
562
+ },
563
+ }),
564
+ } satisfies LlmApiCallDebugEvent);
565
+
356
566
  if (result.usage !== undefined && tokensUsed !== undefined) {
357
567
  tokensUsed.inputTokens += result.usage?.prompt_tokens;
358
568
  tokensUsed.outputTokens += result.usage?.completion_tokens;
package/src/index.ts CHANGED
@@ -27,6 +27,23 @@ export {
27
27
  sharedTreeTraverse,
28
28
  } from "./implicit-strategy/index.js";
29
29
 
30
+ export type {
31
+ ApplyEditFailure,
32
+ ApplyEditSuccess,
33
+ CoreEventLoopCompleted,
34
+ CoreEventLoopStarted,
35
+ FinalReviewCompleted,
36
+ FinalReviewStarted,
37
+ GenerateTreeEditCompleted,
38
+ GenerateTreeEditStarted,
39
+ LlmApiCallDebugEvent,
40
+ PlanningPromptCompleted,
41
+ PlanningPromptStarted,
42
+ LlmTreeEdit,
43
+ EventFlowDebugName,
44
+ EventFlowDebugNames,
45
+ } from "./explicit-strategy/index.js";
46
+
30
47
  export {
31
48
  type AiCollabOptions,
32
49
  type AiCollabSuccessResponse,
@@ -34,6 +51,9 @@ export {
34
51
  type TokenUsage,
35
52
  type TokenLimits,
36
53
  type OpenAiClientOptions,
54
+ type DebugEvent,
55
+ type DebugEventLogHandler,
56
+ type EventFlowDebugEvent,
37
57
  } from "./aiCollabApi.js";
38
58
 
39
59
  export { aiCollab } from "./aiCollab.js";