@fallom/trace 0.1.3 → 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +120 -1
- package/dist/chunk-IGJD7GBO.mjs +248 -0
- package/dist/chunk-VNUUS74T.mjs +242 -0
- package/dist/index.d.mts +141 -16
- package/dist/index.d.ts +141 -16
- package/dist/index.js +472 -120
- package/dist/index.mjs +141 -37
- package/dist/prompts-67DJ33I4.mjs +14 -0
- package/dist/prompts-ODF4KO2E.mjs +14 -0
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -7,6 +7,7 @@
|
|
|
7
7
|
interface SessionContext {
|
|
8
8
|
configKey: string;
|
|
9
9
|
sessionId: string;
|
|
10
|
+
customerId?: string;
|
|
10
11
|
}
|
|
11
12
|
/**
|
|
12
13
|
* Initialize Fallom tracing. Auto-instruments all LLM calls.
|
|
@@ -32,7 +33,7 @@ interface SessionContext {
|
|
|
32
33
|
* await agent.run(message); // Automatically traced
|
|
33
34
|
* ```
|
|
34
35
|
*/
|
|
35
|
-
declare function init$
|
|
36
|
+
declare function init$3(options?: {
|
|
36
37
|
apiKey?: string;
|
|
37
38
|
baseUrl?: string;
|
|
38
39
|
captureContent?: boolean;
|
|
@@ -42,34 +43,36 @@ declare function init$2(options?: {
|
|
|
42
43
|
* Set the current session context.
|
|
43
44
|
*
|
|
44
45
|
* All subsequent LLM calls in this async context will be
|
|
45
|
-
* automatically tagged with this configKey and
|
|
46
|
+
* automatically tagged with this configKey, sessionId, and customerId.
|
|
46
47
|
*
|
|
47
48
|
* @param configKey - Your config name (e.g., "linkedin-agent")
|
|
48
49
|
* @param sessionId - Your session/conversation ID
|
|
50
|
+
* @param customerId - Optional customer/user identifier for analytics
|
|
49
51
|
*
|
|
50
52
|
* @example
|
|
51
53
|
* ```typescript
|
|
52
|
-
* trace.setSession("linkedin-agent", sessionId);
|
|
53
|
-
* await agent.run(message); // Automatically traced with session
|
|
54
|
+
* trace.setSession("linkedin-agent", sessionId, "user_123");
|
|
55
|
+
* await agent.run(message); // Automatically traced with session + customer
|
|
54
56
|
* ```
|
|
55
57
|
*/
|
|
56
|
-
declare function setSession(configKey: string, sessionId: string): void;
|
|
58
|
+
declare function setSession(configKey: string, sessionId: string, customerId?: string): void;
|
|
57
59
|
/**
|
|
58
60
|
* Run a function with session context.
|
|
59
61
|
* Use this to ensure session context propagates across async boundaries.
|
|
60
62
|
*
|
|
61
63
|
* @param configKey - Your config name
|
|
62
64
|
* @param sessionId - Your session ID
|
|
65
|
+
* @param customerId - Optional customer/user identifier
|
|
63
66
|
* @param fn - Function to run with session context
|
|
64
67
|
*
|
|
65
68
|
* @example
|
|
66
69
|
* ```typescript
|
|
67
|
-
* await trace.runWithSession("my-agent", sessionId, async () => {
|
|
70
|
+
* await trace.runWithSession("my-agent", sessionId, "user_123", async () => {
|
|
68
71
|
* await agent.run(message); // Has session context
|
|
69
72
|
* });
|
|
70
73
|
* ```
|
|
71
74
|
*/
|
|
72
|
-
declare function runWithSession<T>(configKey: string, sessionId: string,
|
|
75
|
+
declare function runWithSession<T>(configKey: string, sessionId: string, customerIdOrFn: string | (() => T), fn?: () => T): T;
|
|
73
76
|
/**
|
|
74
77
|
* Get current session context, if any.
|
|
75
78
|
*/
|
|
@@ -191,7 +194,7 @@ declare const trace_wrapAnthropic: typeof wrapAnthropic;
|
|
|
191
194
|
declare const trace_wrapGoogleAI: typeof wrapGoogleAI;
|
|
192
195
|
declare const trace_wrapOpenAI: typeof wrapOpenAI;
|
|
193
196
|
declare namespace trace {
|
|
194
|
-
export { trace_clearSession as clearSession, trace_getSession as getSession, init$
|
|
197
|
+
export { trace_clearSession as clearSession, trace_getSession as getSession, init$3 as init, trace_runWithSession as runWithSession, trace_setSession as setSession, trace_shutdown as shutdown, trace_span as span, trace_wrapAnthropic as wrapAnthropic, trace_wrapGoogleAI as wrapGoogleAI, trace_wrapOpenAI as wrapOpenAI };
|
|
195
198
|
}
|
|
196
199
|
|
|
197
200
|
/**
|
|
@@ -212,7 +215,7 @@ declare namespace trace {
|
|
|
212
215
|
* This is optional - get() will auto-init if needed.
|
|
213
216
|
* Non-blocking: starts background config fetch immediately.
|
|
214
217
|
*/
|
|
215
|
-
declare function init$
|
|
218
|
+
declare function init$2(options?: {
|
|
216
219
|
apiKey?: string;
|
|
217
220
|
baseUrl?: string;
|
|
218
221
|
}): void;
|
|
@@ -236,19 +239,132 @@ declare function init$1(options?: {
|
|
|
236
239
|
* @returns Model string (e.g., "claude-opus", "gpt-4o")
|
|
237
240
|
* @throws Error if config not found AND no fallback provided
|
|
238
241
|
*/
|
|
239
|
-
declare function get(configKey: string, sessionId: string, options?: {
|
|
242
|
+
declare function get$1(configKey: string, sessionId: string, options?: {
|
|
240
243
|
version?: number;
|
|
241
244
|
fallback?: string;
|
|
242
245
|
debug?: boolean;
|
|
243
246
|
}): Promise<string>;
|
|
244
247
|
|
|
245
|
-
declare const models_get: typeof get;
|
|
246
248
|
declare namespace models {
|
|
247
|
-
export {
|
|
249
|
+
export { get$1 as get, init$2 as init };
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Fallom prompts module.
|
|
254
|
+
*
|
|
255
|
+
* Provides prompt management and A/B testing.
|
|
256
|
+
* Zero latency on get() - uses local cache + template interpolation.
|
|
257
|
+
*
|
|
258
|
+
* Design principles:
|
|
259
|
+
* - Never block user's app if Fallom is down
|
|
260
|
+
* - Very short timeouts (1-2 seconds max)
|
|
261
|
+
* - Always return usable prompt (or throw if not found)
|
|
262
|
+
* - Background sync keeps prompts fresh
|
|
263
|
+
* - Auto-tag next trace with prompt context
|
|
264
|
+
*/
|
|
265
|
+
interface PromptContext {
|
|
266
|
+
promptKey: string;
|
|
267
|
+
promptVersion: number;
|
|
268
|
+
abTestKey?: string;
|
|
269
|
+
variantIndex?: number;
|
|
270
|
+
}
|
|
271
|
+
/**
|
|
272
|
+
* Result from prompts.get() or prompts.getAB()
|
|
273
|
+
*/
|
|
274
|
+
interface PromptResult {
|
|
275
|
+
key: string;
|
|
276
|
+
version: number;
|
|
277
|
+
system: string;
|
|
278
|
+
user: string;
|
|
279
|
+
abTestKey?: string;
|
|
280
|
+
variantIndex?: number;
|
|
281
|
+
}
|
|
282
|
+
/**
|
|
283
|
+
* Initialize Fallom prompts.
|
|
284
|
+
* This is called automatically by fallom.init().
|
|
285
|
+
*/
|
|
286
|
+
declare function init$1(options?: {
|
|
287
|
+
apiKey?: string;
|
|
288
|
+
baseUrl?: string;
|
|
289
|
+
}): void;
|
|
290
|
+
/**
|
|
291
|
+
* Get and clear prompt context (one-shot).
|
|
292
|
+
*/
|
|
293
|
+
declare function getPromptContext(): PromptContext | null;
|
|
294
|
+
/**
|
|
295
|
+
* Get a prompt (non-A/B).
|
|
296
|
+
*
|
|
297
|
+
* Zero latency - uses local cache + string interpolation.
|
|
298
|
+
* Also sets prompt context for next trace auto-tagging.
|
|
299
|
+
*
|
|
300
|
+
* @param promptKey - Your prompt key (e.g., "onboarding")
|
|
301
|
+
* @param options - Optional settings
|
|
302
|
+
* @param options.variables - Template variables (e.g., { userName: "John" })
|
|
303
|
+
* @param options.version - Pin to specific version. undefined = current
|
|
304
|
+
* @param options.debug - Enable debug logging
|
|
305
|
+
*
|
|
306
|
+
* @example
|
|
307
|
+
* ```typescript
|
|
308
|
+
* const prompt = await prompts.get("onboarding", {
|
|
309
|
+
* variables: { userName: "John" }
|
|
310
|
+
* });
|
|
311
|
+
*
|
|
312
|
+
* // Use with OpenAI
|
|
313
|
+
* const response = await openai.chat.completions.create({
|
|
314
|
+
* model,
|
|
315
|
+
* messages: [
|
|
316
|
+
* { role: "system", content: prompt.system },
|
|
317
|
+
* { role: "user", content: prompt.user }
|
|
318
|
+
* ]
|
|
319
|
+
* });
|
|
320
|
+
* ```
|
|
321
|
+
*/
|
|
322
|
+
declare function get(promptKey: string, options?: {
|
|
323
|
+
variables?: Record<string, unknown>;
|
|
324
|
+
version?: number;
|
|
325
|
+
debug?: boolean;
|
|
326
|
+
}): Promise<PromptResult>;
|
|
327
|
+
/**
|
|
328
|
+
* Get a prompt from an A/B test.
|
|
329
|
+
*
|
|
330
|
+
* Uses sessionId hash for deterministic, sticky assignment.
|
|
331
|
+
* Same session always gets same variant.
|
|
332
|
+
*
|
|
333
|
+
* Also sets prompt context for next trace auto-tagging.
|
|
334
|
+
*
|
|
335
|
+
* @param abTestKey - Your A/B test key (e.g., "onboarding-experiment")
|
|
336
|
+
* @param sessionId - Your session/conversation ID (for sticky assignment)
|
|
337
|
+
* @param options - Optional settings
|
|
338
|
+
* @param options.variables - Template variables
|
|
339
|
+
* @param options.debug - Enable debug logging
|
|
340
|
+
*
|
|
341
|
+
* @example
|
|
342
|
+
* ```typescript
|
|
343
|
+
* const prompt = await prompts.getAB("onboarding-test", sessionId, {
|
|
344
|
+
* variables: { userName: "John" }
|
|
345
|
+
* });
|
|
346
|
+
* ```
|
|
347
|
+
*/
|
|
348
|
+
declare function getAB(abTestKey: string, sessionId: string, options?: {
|
|
349
|
+
variables?: Record<string, unknown>;
|
|
350
|
+
debug?: boolean;
|
|
351
|
+
}): Promise<PromptResult>;
|
|
352
|
+
/**
|
|
353
|
+
* Manually clear prompt context.
|
|
354
|
+
*/
|
|
355
|
+
declare function clearPromptContext(): void;
|
|
356
|
+
|
|
357
|
+
type prompts_PromptResult = PromptResult;
|
|
358
|
+
declare const prompts_clearPromptContext: typeof clearPromptContext;
|
|
359
|
+
declare const prompts_get: typeof get;
|
|
360
|
+
declare const prompts_getAB: typeof getAB;
|
|
361
|
+
declare const prompts_getPromptContext: typeof getPromptContext;
|
|
362
|
+
declare namespace prompts {
|
|
363
|
+
export { type prompts_PromptResult as PromptResult, prompts_clearPromptContext as clearPromptContext, prompts_get as get, prompts_getAB as getAB, prompts_getPromptContext as getPromptContext, init$1 as init };
|
|
248
364
|
}
|
|
249
365
|
|
|
250
366
|
/**
|
|
251
|
-
* Combined initialization for
|
|
367
|
+
* Combined initialization for trace, models, and prompts.
|
|
252
368
|
*/
|
|
253
369
|
interface InitOptions {
|
|
254
370
|
apiKey?: string;
|
|
@@ -281,7 +397,7 @@ interface InitOptions {
|
|
|
281
397
|
declare function init(options?: InitOptions): Promise<void>;
|
|
282
398
|
|
|
283
399
|
/**
|
|
284
|
-
* Fallom - Model A/B testing and tracing for LLM applications.
|
|
400
|
+
* Fallom - Model A/B testing, prompt management, and tracing for LLM applications.
|
|
285
401
|
*
|
|
286
402
|
* @example
|
|
287
403
|
* ```typescript
|
|
@@ -298,10 +414,18 @@ declare function init(options?: InitOptions): Promise<void>;
|
|
|
298
414
|
* fallback: "gpt-4o-mini"
|
|
299
415
|
* });
|
|
300
416
|
*
|
|
417
|
+
* // Get managed prompts (with optional A/B testing)
|
|
418
|
+
* const prompt = await fallom.prompts.get("onboarding", {
|
|
419
|
+
* variables: { userName: "John" }
|
|
420
|
+
* });
|
|
421
|
+
*
|
|
301
422
|
* // Use with OpenAI
|
|
302
423
|
* const response = await openai.chat.completions.create({
|
|
303
424
|
* model,
|
|
304
|
-
* messages: [
|
|
425
|
+
* messages: [
|
|
426
|
+
* { role: "system", content: prompt.system },
|
|
427
|
+
* { role: "user", content: prompt.user }
|
|
428
|
+
* ]
|
|
305
429
|
* });
|
|
306
430
|
*
|
|
307
431
|
* // Record custom metrics
|
|
@@ -313,6 +437,7 @@ declare const _default: {
|
|
|
313
437
|
init: typeof init;
|
|
314
438
|
trace: typeof trace;
|
|
315
439
|
models: typeof models;
|
|
440
|
+
prompts: typeof prompts;
|
|
316
441
|
};
|
|
317
442
|
|
|
318
|
-
export { type InitOptions, _default as default, init, models, trace };
|
|
443
|
+
export { type InitOptions, type PromptResult, _default as default, init, models, prompts, trace };
|
package/dist/index.d.ts
CHANGED
|
@@ -7,6 +7,7 @@
|
|
|
7
7
|
interface SessionContext {
|
|
8
8
|
configKey: string;
|
|
9
9
|
sessionId: string;
|
|
10
|
+
customerId?: string;
|
|
10
11
|
}
|
|
11
12
|
/**
|
|
12
13
|
* Initialize Fallom tracing. Auto-instruments all LLM calls.
|
|
@@ -32,7 +33,7 @@ interface SessionContext {
|
|
|
32
33
|
* await agent.run(message); // Automatically traced
|
|
33
34
|
* ```
|
|
34
35
|
*/
|
|
35
|
-
declare function init$
|
|
36
|
+
declare function init$3(options?: {
|
|
36
37
|
apiKey?: string;
|
|
37
38
|
baseUrl?: string;
|
|
38
39
|
captureContent?: boolean;
|
|
@@ -42,34 +43,36 @@ declare function init$2(options?: {
|
|
|
42
43
|
* Set the current session context.
|
|
43
44
|
*
|
|
44
45
|
* All subsequent LLM calls in this async context will be
|
|
45
|
-
* automatically tagged with this configKey and
|
|
46
|
+
* automatically tagged with this configKey, sessionId, and customerId.
|
|
46
47
|
*
|
|
47
48
|
* @param configKey - Your config name (e.g., "linkedin-agent")
|
|
48
49
|
* @param sessionId - Your session/conversation ID
|
|
50
|
+
* @param customerId - Optional customer/user identifier for analytics
|
|
49
51
|
*
|
|
50
52
|
* @example
|
|
51
53
|
* ```typescript
|
|
52
|
-
* trace.setSession("linkedin-agent", sessionId);
|
|
53
|
-
* await agent.run(message); // Automatically traced with session
|
|
54
|
+
* trace.setSession("linkedin-agent", sessionId, "user_123");
|
|
55
|
+
* await agent.run(message); // Automatically traced with session + customer
|
|
54
56
|
* ```
|
|
55
57
|
*/
|
|
56
|
-
declare function setSession(configKey: string, sessionId: string): void;
|
|
58
|
+
declare function setSession(configKey: string, sessionId: string, customerId?: string): void;
|
|
57
59
|
/**
|
|
58
60
|
* Run a function with session context.
|
|
59
61
|
* Use this to ensure session context propagates across async boundaries.
|
|
60
62
|
*
|
|
61
63
|
* @param configKey - Your config name
|
|
62
64
|
* @param sessionId - Your session ID
|
|
65
|
+
* @param customerId - Optional customer/user identifier
|
|
63
66
|
* @param fn - Function to run with session context
|
|
64
67
|
*
|
|
65
68
|
* @example
|
|
66
69
|
* ```typescript
|
|
67
|
-
* await trace.runWithSession("my-agent", sessionId, async () => {
|
|
70
|
+
* await trace.runWithSession("my-agent", sessionId, "user_123", async () => {
|
|
68
71
|
* await agent.run(message); // Has session context
|
|
69
72
|
* });
|
|
70
73
|
* ```
|
|
71
74
|
*/
|
|
72
|
-
declare function runWithSession<T>(configKey: string, sessionId: string,
|
|
75
|
+
declare function runWithSession<T>(configKey: string, sessionId: string, customerIdOrFn: string | (() => T), fn?: () => T): T;
|
|
73
76
|
/**
|
|
74
77
|
* Get current session context, if any.
|
|
75
78
|
*/
|
|
@@ -191,7 +194,7 @@ declare const trace_wrapAnthropic: typeof wrapAnthropic;
|
|
|
191
194
|
declare const trace_wrapGoogleAI: typeof wrapGoogleAI;
|
|
192
195
|
declare const trace_wrapOpenAI: typeof wrapOpenAI;
|
|
193
196
|
declare namespace trace {
|
|
194
|
-
export { trace_clearSession as clearSession, trace_getSession as getSession, init$
|
|
197
|
+
export { trace_clearSession as clearSession, trace_getSession as getSession, init$3 as init, trace_runWithSession as runWithSession, trace_setSession as setSession, trace_shutdown as shutdown, trace_span as span, trace_wrapAnthropic as wrapAnthropic, trace_wrapGoogleAI as wrapGoogleAI, trace_wrapOpenAI as wrapOpenAI };
|
|
195
198
|
}
|
|
196
199
|
|
|
197
200
|
/**
|
|
@@ -212,7 +215,7 @@ declare namespace trace {
|
|
|
212
215
|
* This is optional - get() will auto-init if needed.
|
|
213
216
|
* Non-blocking: starts background config fetch immediately.
|
|
214
217
|
*/
|
|
215
|
-
declare function init$
|
|
218
|
+
declare function init$2(options?: {
|
|
216
219
|
apiKey?: string;
|
|
217
220
|
baseUrl?: string;
|
|
218
221
|
}): void;
|
|
@@ -236,19 +239,132 @@ declare function init$1(options?: {
|
|
|
236
239
|
* @returns Model string (e.g., "claude-opus", "gpt-4o")
|
|
237
240
|
* @throws Error if config not found AND no fallback provided
|
|
238
241
|
*/
|
|
239
|
-
declare function get(configKey: string, sessionId: string, options?: {
|
|
242
|
+
declare function get$1(configKey: string, sessionId: string, options?: {
|
|
240
243
|
version?: number;
|
|
241
244
|
fallback?: string;
|
|
242
245
|
debug?: boolean;
|
|
243
246
|
}): Promise<string>;
|
|
244
247
|
|
|
245
|
-
declare const models_get: typeof get;
|
|
246
248
|
declare namespace models {
|
|
247
|
-
export {
|
|
249
|
+
export { get$1 as get, init$2 as init };
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Fallom prompts module.
|
|
254
|
+
*
|
|
255
|
+
* Provides prompt management and A/B testing.
|
|
256
|
+
* Zero latency on get() - uses local cache + template interpolation.
|
|
257
|
+
*
|
|
258
|
+
* Design principles:
|
|
259
|
+
* - Never block user's app if Fallom is down
|
|
260
|
+
* - Very short timeouts (1-2 seconds max)
|
|
261
|
+
* - Always return usable prompt (or throw if not found)
|
|
262
|
+
* - Background sync keeps prompts fresh
|
|
263
|
+
* - Auto-tag next trace with prompt context
|
|
264
|
+
*/
|
|
265
|
+
interface PromptContext {
|
|
266
|
+
promptKey: string;
|
|
267
|
+
promptVersion: number;
|
|
268
|
+
abTestKey?: string;
|
|
269
|
+
variantIndex?: number;
|
|
270
|
+
}
|
|
271
|
+
/**
|
|
272
|
+
* Result from prompts.get() or prompts.getAB()
|
|
273
|
+
*/
|
|
274
|
+
interface PromptResult {
|
|
275
|
+
key: string;
|
|
276
|
+
version: number;
|
|
277
|
+
system: string;
|
|
278
|
+
user: string;
|
|
279
|
+
abTestKey?: string;
|
|
280
|
+
variantIndex?: number;
|
|
281
|
+
}
|
|
282
|
+
/**
|
|
283
|
+
* Initialize Fallom prompts.
|
|
284
|
+
* This is called automatically by fallom.init().
|
|
285
|
+
*/
|
|
286
|
+
declare function init$1(options?: {
|
|
287
|
+
apiKey?: string;
|
|
288
|
+
baseUrl?: string;
|
|
289
|
+
}): void;
|
|
290
|
+
/**
|
|
291
|
+
* Get and clear prompt context (one-shot).
|
|
292
|
+
*/
|
|
293
|
+
declare function getPromptContext(): PromptContext | null;
|
|
294
|
+
/**
|
|
295
|
+
* Get a prompt (non-A/B).
|
|
296
|
+
*
|
|
297
|
+
* Zero latency - uses local cache + string interpolation.
|
|
298
|
+
* Also sets prompt context for next trace auto-tagging.
|
|
299
|
+
*
|
|
300
|
+
* @param promptKey - Your prompt key (e.g., "onboarding")
|
|
301
|
+
* @param options - Optional settings
|
|
302
|
+
* @param options.variables - Template variables (e.g., { userName: "John" })
|
|
303
|
+
* @param options.version - Pin to specific version. undefined = current
|
|
304
|
+
* @param options.debug - Enable debug logging
|
|
305
|
+
*
|
|
306
|
+
* @example
|
|
307
|
+
* ```typescript
|
|
308
|
+
* const prompt = await prompts.get("onboarding", {
|
|
309
|
+
* variables: { userName: "John" }
|
|
310
|
+
* });
|
|
311
|
+
*
|
|
312
|
+
* // Use with OpenAI
|
|
313
|
+
* const response = await openai.chat.completions.create({
|
|
314
|
+
* model,
|
|
315
|
+
* messages: [
|
|
316
|
+
* { role: "system", content: prompt.system },
|
|
317
|
+
* { role: "user", content: prompt.user }
|
|
318
|
+
* ]
|
|
319
|
+
* });
|
|
320
|
+
* ```
|
|
321
|
+
*/
|
|
322
|
+
declare function get(promptKey: string, options?: {
|
|
323
|
+
variables?: Record<string, unknown>;
|
|
324
|
+
version?: number;
|
|
325
|
+
debug?: boolean;
|
|
326
|
+
}): Promise<PromptResult>;
|
|
327
|
+
/**
|
|
328
|
+
* Get a prompt from an A/B test.
|
|
329
|
+
*
|
|
330
|
+
* Uses sessionId hash for deterministic, sticky assignment.
|
|
331
|
+
* Same session always gets same variant.
|
|
332
|
+
*
|
|
333
|
+
* Also sets prompt context for next trace auto-tagging.
|
|
334
|
+
*
|
|
335
|
+
* @param abTestKey - Your A/B test key (e.g., "onboarding-experiment")
|
|
336
|
+
* @param sessionId - Your session/conversation ID (for sticky assignment)
|
|
337
|
+
* @param options - Optional settings
|
|
338
|
+
* @param options.variables - Template variables
|
|
339
|
+
* @param options.debug - Enable debug logging
|
|
340
|
+
*
|
|
341
|
+
* @example
|
|
342
|
+
* ```typescript
|
|
343
|
+
* const prompt = await prompts.getAB("onboarding-test", sessionId, {
|
|
344
|
+
* variables: { userName: "John" }
|
|
345
|
+
* });
|
|
346
|
+
* ```
|
|
347
|
+
*/
|
|
348
|
+
declare function getAB(abTestKey: string, sessionId: string, options?: {
|
|
349
|
+
variables?: Record<string, unknown>;
|
|
350
|
+
debug?: boolean;
|
|
351
|
+
}): Promise<PromptResult>;
|
|
352
|
+
/**
|
|
353
|
+
* Manually clear prompt context.
|
|
354
|
+
*/
|
|
355
|
+
declare function clearPromptContext(): void;
|
|
356
|
+
|
|
357
|
+
type prompts_PromptResult = PromptResult;
|
|
358
|
+
declare const prompts_clearPromptContext: typeof clearPromptContext;
|
|
359
|
+
declare const prompts_get: typeof get;
|
|
360
|
+
declare const prompts_getAB: typeof getAB;
|
|
361
|
+
declare const prompts_getPromptContext: typeof getPromptContext;
|
|
362
|
+
declare namespace prompts {
|
|
363
|
+
export { type prompts_PromptResult as PromptResult, prompts_clearPromptContext as clearPromptContext, prompts_get as get, prompts_getAB as getAB, prompts_getPromptContext as getPromptContext, init$1 as init };
|
|
248
364
|
}
|
|
249
365
|
|
|
250
366
|
/**
|
|
251
|
-
* Combined initialization for
|
|
367
|
+
* Combined initialization for trace, models, and prompts.
|
|
252
368
|
*/
|
|
253
369
|
interface InitOptions {
|
|
254
370
|
apiKey?: string;
|
|
@@ -281,7 +397,7 @@ interface InitOptions {
|
|
|
281
397
|
declare function init(options?: InitOptions): Promise<void>;
|
|
282
398
|
|
|
283
399
|
/**
|
|
284
|
-
* Fallom - Model A/B testing and tracing for LLM applications.
|
|
400
|
+
* Fallom - Model A/B testing, prompt management, and tracing for LLM applications.
|
|
285
401
|
*
|
|
286
402
|
* @example
|
|
287
403
|
* ```typescript
|
|
@@ -298,10 +414,18 @@ declare function init(options?: InitOptions): Promise<void>;
|
|
|
298
414
|
* fallback: "gpt-4o-mini"
|
|
299
415
|
* });
|
|
300
416
|
*
|
|
417
|
+
* // Get managed prompts (with optional A/B testing)
|
|
418
|
+
* const prompt = await fallom.prompts.get("onboarding", {
|
|
419
|
+
* variables: { userName: "John" }
|
|
420
|
+
* });
|
|
421
|
+
*
|
|
301
422
|
* // Use with OpenAI
|
|
302
423
|
* const response = await openai.chat.completions.create({
|
|
303
424
|
* model,
|
|
304
|
-
* messages: [
|
|
425
|
+
* messages: [
|
|
426
|
+
* { role: "system", content: prompt.system },
|
|
427
|
+
* { role: "user", content: prompt.user }
|
|
428
|
+
* ]
|
|
305
429
|
* });
|
|
306
430
|
*
|
|
307
431
|
* // Record custom metrics
|
|
@@ -313,6 +437,7 @@ declare const _default: {
|
|
|
313
437
|
init: typeof init;
|
|
314
438
|
trace: typeof trace;
|
|
315
439
|
models: typeof models;
|
|
440
|
+
prompts: typeof prompts;
|
|
316
441
|
};
|
|
317
442
|
|
|
318
|
-
export { type InitOptions, _default as default, init, models, trace };
|
|
443
|
+
export { type InitOptions, type PromptResult, _default as default, init, models, prompts, trace };
|