@fallom/trace 0.1.3 → 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +120 -1
- package/dist/chunk-IGJD7GBO.mjs +248 -0
- package/dist/chunk-VNUUS74T.mjs +242 -0
- package/dist/index.d.mts +132 -10
- package/dist/index.d.ts +132 -10
- package/dist/index.js +447 -116
- package/dist/index.mjs +115 -32
- package/dist/prompts-67DJ33I4.mjs +14 -0
- package/dist/prompts-ODF4KO2E.mjs +14 -0
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -32,7 +32,7 @@ interface SessionContext {
|
|
|
32
32
|
* await agent.run(message); // Automatically traced
|
|
33
33
|
* ```
|
|
34
34
|
*/
|
|
35
|
-
declare function init$
|
|
35
|
+
declare function init$3(options?: {
|
|
36
36
|
apiKey?: string;
|
|
37
37
|
baseUrl?: string;
|
|
38
38
|
captureContent?: boolean;
|
|
@@ -191,7 +191,7 @@ declare const trace_wrapAnthropic: typeof wrapAnthropic;
|
|
|
191
191
|
declare const trace_wrapGoogleAI: typeof wrapGoogleAI;
|
|
192
192
|
declare const trace_wrapOpenAI: typeof wrapOpenAI;
|
|
193
193
|
declare namespace trace {
|
|
194
|
-
export { trace_clearSession as clearSession, trace_getSession as getSession, init$
|
|
194
|
+
export { trace_clearSession as clearSession, trace_getSession as getSession, init$3 as init, trace_runWithSession as runWithSession, trace_setSession as setSession, trace_shutdown as shutdown, trace_span as span, trace_wrapAnthropic as wrapAnthropic, trace_wrapGoogleAI as wrapGoogleAI, trace_wrapOpenAI as wrapOpenAI };
|
|
195
195
|
}
|
|
196
196
|
|
|
197
197
|
/**
|
|
@@ -212,7 +212,7 @@ declare namespace trace {
|
|
|
212
212
|
* This is optional - get() will auto-init if needed.
|
|
213
213
|
* Non-blocking: starts background config fetch immediately.
|
|
214
214
|
*/
|
|
215
|
-
declare function init$
|
|
215
|
+
declare function init$2(options?: {
|
|
216
216
|
apiKey?: string;
|
|
217
217
|
baseUrl?: string;
|
|
218
218
|
}): void;
|
|
@@ -236,19 +236,132 @@ declare function init$1(options?: {
|
|
|
236
236
|
* @returns Model string (e.g., "claude-opus", "gpt-4o")
|
|
237
237
|
* @throws Error if config not found AND no fallback provided
|
|
238
238
|
*/
|
|
239
|
-
declare function get(configKey: string, sessionId: string, options?: {
|
|
239
|
+
declare function get$1(configKey: string, sessionId: string, options?: {
|
|
240
240
|
version?: number;
|
|
241
241
|
fallback?: string;
|
|
242
242
|
debug?: boolean;
|
|
243
243
|
}): Promise<string>;
|
|
244
244
|
|
|
245
|
-
declare const models_get: typeof get;
|
|
246
245
|
declare namespace models {
|
|
247
|
-
export {
|
|
246
|
+
export { get$1 as get, init$2 as init };
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
/**
|
|
250
|
+
* Fallom prompts module.
|
|
251
|
+
*
|
|
252
|
+
* Provides prompt management and A/B testing.
|
|
253
|
+
* Zero latency on get() - uses local cache + template interpolation.
|
|
254
|
+
*
|
|
255
|
+
* Design principles:
|
|
256
|
+
* - Never block user's app if Fallom is down
|
|
257
|
+
* - Very short timeouts (1-2 seconds max)
|
|
258
|
+
* - Always return usable prompt (or throw if not found)
|
|
259
|
+
* - Background sync keeps prompts fresh
|
|
260
|
+
* - Auto-tag next trace with prompt context
|
|
261
|
+
*/
|
|
262
|
+
interface PromptContext {
|
|
263
|
+
promptKey: string;
|
|
264
|
+
promptVersion: number;
|
|
265
|
+
abTestKey?: string;
|
|
266
|
+
variantIndex?: number;
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Result from prompts.get() or prompts.getAB()
|
|
270
|
+
*/
|
|
271
|
+
interface PromptResult {
|
|
272
|
+
key: string;
|
|
273
|
+
version: number;
|
|
274
|
+
system: string;
|
|
275
|
+
user: string;
|
|
276
|
+
abTestKey?: string;
|
|
277
|
+
variantIndex?: number;
|
|
278
|
+
}
|
|
279
|
+
/**
|
|
280
|
+
* Initialize Fallom prompts.
|
|
281
|
+
* This is called automatically by fallom.init().
|
|
282
|
+
*/
|
|
283
|
+
declare function init$1(options?: {
|
|
284
|
+
apiKey?: string;
|
|
285
|
+
baseUrl?: string;
|
|
286
|
+
}): void;
|
|
287
|
+
/**
|
|
288
|
+
* Get and clear prompt context (one-shot).
|
|
289
|
+
*/
|
|
290
|
+
declare function getPromptContext(): PromptContext | null;
|
|
291
|
+
/**
|
|
292
|
+
* Get a prompt (non-A/B).
|
|
293
|
+
*
|
|
294
|
+
* Zero latency - uses local cache + string interpolation.
|
|
295
|
+
* Also sets prompt context for next trace auto-tagging.
|
|
296
|
+
*
|
|
297
|
+
* @param promptKey - Your prompt key (e.g., "onboarding")
|
|
298
|
+
* @param options - Optional settings
|
|
299
|
+
* @param options.variables - Template variables (e.g., { userName: "John" })
|
|
300
|
+
* @param options.version - Pin to specific version. undefined = current
|
|
301
|
+
* @param options.debug - Enable debug logging
|
|
302
|
+
*
|
|
303
|
+
* @example
|
|
304
|
+
* ```typescript
|
|
305
|
+
* const prompt = await prompts.get("onboarding", {
|
|
306
|
+
* variables: { userName: "John" }
|
|
307
|
+
* });
|
|
308
|
+
*
|
|
309
|
+
* // Use with OpenAI
|
|
310
|
+
* const response = await openai.chat.completions.create({
|
|
311
|
+
* model,
|
|
312
|
+
* messages: [
|
|
313
|
+
* { role: "system", content: prompt.system },
|
|
314
|
+
* { role: "user", content: prompt.user }
|
|
315
|
+
* ]
|
|
316
|
+
* });
|
|
317
|
+
* ```
|
|
318
|
+
*/
|
|
319
|
+
declare function get(promptKey: string, options?: {
|
|
320
|
+
variables?: Record<string, unknown>;
|
|
321
|
+
version?: number;
|
|
322
|
+
debug?: boolean;
|
|
323
|
+
}): Promise<PromptResult>;
|
|
324
|
+
/**
|
|
325
|
+
* Get a prompt from an A/B test.
|
|
326
|
+
*
|
|
327
|
+
* Uses sessionId hash for deterministic, sticky assignment.
|
|
328
|
+
* Same session always gets same variant.
|
|
329
|
+
*
|
|
330
|
+
* Also sets prompt context for next trace auto-tagging.
|
|
331
|
+
*
|
|
332
|
+
* @param abTestKey - Your A/B test key (e.g., "onboarding-experiment")
|
|
333
|
+
* @param sessionId - Your session/conversation ID (for sticky assignment)
|
|
334
|
+
* @param options - Optional settings
|
|
335
|
+
* @param options.variables - Template variables
|
|
336
|
+
* @param options.debug - Enable debug logging
|
|
337
|
+
*
|
|
338
|
+
* @example
|
|
339
|
+
* ```typescript
|
|
340
|
+
* const prompt = await prompts.getAB("onboarding-test", sessionId, {
|
|
341
|
+
* variables: { userName: "John" }
|
|
342
|
+
* });
|
|
343
|
+
* ```
|
|
344
|
+
*/
|
|
345
|
+
declare function getAB(abTestKey: string, sessionId: string, options?: {
|
|
346
|
+
variables?: Record<string, unknown>;
|
|
347
|
+
debug?: boolean;
|
|
348
|
+
}): Promise<PromptResult>;
|
|
349
|
+
/**
|
|
350
|
+
* Manually clear prompt context.
|
|
351
|
+
*/
|
|
352
|
+
declare function clearPromptContext(): void;
|
|
353
|
+
|
|
354
|
+
type prompts_PromptResult = PromptResult;
|
|
355
|
+
declare const prompts_clearPromptContext: typeof clearPromptContext;
|
|
356
|
+
declare const prompts_get: typeof get;
|
|
357
|
+
declare const prompts_getAB: typeof getAB;
|
|
358
|
+
declare const prompts_getPromptContext: typeof getPromptContext;
|
|
359
|
+
declare namespace prompts {
|
|
360
|
+
export { type prompts_PromptResult as PromptResult, prompts_clearPromptContext as clearPromptContext, prompts_get as get, prompts_getAB as getAB, prompts_getPromptContext as getPromptContext, init$1 as init };
|
|
248
361
|
}
|
|
249
362
|
|
|
250
363
|
/**
|
|
251
|
-
* Combined initialization for
|
|
364
|
+
* Combined initialization for trace, models, and prompts.
|
|
252
365
|
*/
|
|
253
366
|
interface InitOptions {
|
|
254
367
|
apiKey?: string;
|
|
@@ -281,7 +394,7 @@ interface InitOptions {
|
|
|
281
394
|
declare function init(options?: InitOptions): Promise<void>;
|
|
282
395
|
|
|
283
396
|
/**
|
|
284
|
-
* Fallom - Model A/B testing and tracing for LLM applications.
|
|
397
|
+
* Fallom - Model A/B testing, prompt management, and tracing for LLM applications.
|
|
285
398
|
*
|
|
286
399
|
* @example
|
|
287
400
|
* ```typescript
|
|
@@ -298,10 +411,18 @@ declare function init(options?: InitOptions): Promise<void>;
|
|
|
298
411
|
* fallback: "gpt-4o-mini"
|
|
299
412
|
* });
|
|
300
413
|
*
|
|
414
|
+
* // Get managed prompts (with optional A/B testing)
|
|
415
|
+
* const prompt = await fallom.prompts.get("onboarding", {
|
|
416
|
+
* variables: { userName: "John" }
|
|
417
|
+
* });
|
|
418
|
+
*
|
|
301
419
|
* // Use with OpenAI
|
|
302
420
|
* const response = await openai.chat.completions.create({
|
|
303
421
|
* model,
|
|
304
|
-
* messages: [
|
|
422
|
+
* messages: [
|
|
423
|
+
* { role: "system", content: prompt.system },
|
|
424
|
+
* { role: "user", content: prompt.user }
|
|
425
|
+
* ]
|
|
305
426
|
* });
|
|
306
427
|
*
|
|
307
428
|
* // Record custom metrics
|
|
@@ -313,6 +434,7 @@ declare const _default: {
|
|
|
313
434
|
init: typeof init;
|
|
314
435
|
trace: typeof trace;
|
|
315
436
|
models: typeof models;
|
|
437
|
+
prompts: typeof prompts;
|
|
316
438
|
};
|
|
317
439
|
|
|
318
|
-
export { type InitOptions, _default as default, init, models, trace };
|
|
440
|
+
export { type InitOptions, type PromptResult, _default as default, init, models, prompts, trace };
|
package/dist/index.d.ts
CHANGED
|
@@ -32,7 +32,7 @@ interface SessionContext {
|
|
|
32
32
|
* await agent.run(message); // Automatically traced
|
|
33
33
|
* ```
|
|
34
34
|
*/
|
|
35
|
-
declare function init$
|
|
35
|
+
declare function init$3(options?: {
|
|
36
36
|
apiKey?: string;
|
|
37
37
|
baseUrl?: string;
|
|
38
38
|
captureContent?: boolean;
|
|
@@ -191,7 +191,7 @@ declare const trace_wrapAnthropic: typeof wrapAnthropic;
|
|
|
191
191
|
declare const trace_wrapGoogleAI: typeof wrapGoogleAI;
|
|
192
192
|
declare const trace_wrapOpenAI: typeof wrapOpenAI;
|
|
193
193
|
declare namespace trace {
|
|
194
|
-
export { trace_clearSession as clearSession, trace_getSession as getSession, init$
|
|
194
|
+
export { trace_clearSession as clearSession, trace_getSession as getSession, init$3 as init, trace_runWithSession as runWithSession, trace_setSession as setSession, trace_shutdown as shutdown, trace_span as span, trace_wrapAnthropic as wrapAnthropic, trace_wrapGoogleAI as wrapGoogleAI, trace_wrapOpenAI as wrapOpenAI };
|
|
195
195
|
}
|
|
196
196
|
|
|
197
197
|
/**
|
|
@@ -212,7 +212,7 @@ declare namespace trace {
|
|
|
212
212
|
* This is optional - get() will auto-init if needed.
|
|
213
213
|
* Non-blocking: starts background config fetch immediately.
|
|
214
214
|
*/
|
|
215
|
-
declare function init$
|
|
215
|
+
declare function init$2(options?: {
|
|
216
216
|
apiKey?: string;
|
|
217
217
|
baseUrl?: string;
|
|
218
218
|
}): void;
|
|
@@ -236,19 +236,132 @@ declare function init$1(options?: {
|
|
|
236
236
|
* @returns Model string (e.g., "claude-opus", "gpt-4o")
|
|
237
237
|
* @throws Error if config not found AND no fallback provided
|
|
238
238
|
*/
|
|
239
|
-
declare function get(configKey: string, sessionId: string, options?: {
|
|
239
|
+
declare function get$1(configKey: string, sessionId: string, options?: {
|
|
240
240
|
version?: number;
|
|
241
241
|
fallback?: string;
|
|
242
242
|
debug?: boolean;
|
|
243
243
|
}): Promise<string>;
|
|
244
244
|
|
|
245
|
-
declare const models_get: typeof get;
|
|
246
245
|
declare namespace models {
|
|
247
|
-
export {
|
|
246
|
+
export { get$1 as get, init$2 as init };
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
/**
|
|
250
|
+
* Fallom prompts module.
|
|
251
|
+
*
|
|
252
|
+
* Provides prompt management and A/B testing.
|
|
253
|
+
* Zero latency on get() - uses local cache + template interpolation.
|
|
254
|
+
*
|
|
255
|
+
* Design principles:
|
|
256
|
+
* - Never block user's app if Fallom is down
|
|
257
|
+
* - Very short timeouts (1-2 seconds max)
|
|
258
|
+
* - Always return usable prompt (or throw if not found)
|
|
259
|
+
* - Background sync keeps prompts fresh
|
|
260
|
+
* - Auto-tag next trace with prompt context
|
|
261
|
+
*/
|
|
262
|
+
interface PromptContext {
|
|
263
|
+
promptKey: string;
|
|
264
|
+
promptVersion: number;
|
|
265
|
+
abTestKey?: string;
|
|
266
|
+
variantIndex?: number;
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Result from prompts.get() or prompts.getAB()
|
|
270
|
+
*/
|
|
271
|
+
interface PromptResult {
|
|
272
|
+
key: string;
|
|
273
|
+
version: number;
|
|
274
|
+
system: string;
|
|
275
|
+
user: string;
|
|
276
|
+
abTestKey?: string;
|
|
277
|
+
variantIndex?: number;
|
|
278
|
+
}
|
|
279
|
+
/**
|
|
280
|
+
* Initialize Fallom prompts.
|
|
281
|
+
* This is called automatically by fallom.init().
|
|
282
|
+
*/
|
|
283
|
+
declare function init$1(options?: {
|
|
284
|
+
apiKey?: string;
|
|
285
|
+
baseUrl?: string;
|
|
286
|
+
}): void;
|
|
287
|
+
/**
|
|
288
|
+
* Get and clear prompt context (one-shot).
|
|
289
|
+
*/
|
|
290
|
+
declare function getPromptContext(): PromptContext | null;
|
|
291
|
+
/**
|
|
292
|
+
* Get a prompt (non-A/B).
|
|
293
|
+
*
|
|
294
|
+
* Zero latency - uses local cache + string interpolation.
|
|
295
|
+
* Also sets prompt context for next trace auto-tagging.
|
|
296
|
+
*
|
|
297
|
+
* @param promptKey - Your prompt key (e.g., "onboarding")
|
|
298
|
+
* @param options - Optional settings
|
|
299
|
+
* @param options.variables - Template variables (e.g., { userName: "John" })
|
|
300
|
+
* @param options.version - Pin to specific version. undefined = current
|
|
301
|
+
* @param options.debug - Enable debug logging
|
|
302
|
+
*
|
|
303
|
+
* @example
|
|
304
|
+
* ```typescript
|
|
305
|
+
* const prompt = await prompts.get("onboarding", {
|
|
306
|
+
* variables: { userName: "John" }
|
|
307
|
+
* });
|
|
308
|
+
*
|
|
309
|
+
* // Use with OpenAI
|
|
310
|
+
* const response = await openai.chat.completions.create({
|
|
311
|
+
* model,
|
|
312
|
+
* messages: [
|
|
313
|
+
* { role: "system", content: prompt.system },
|
|
314
|
+
* { role: "user", content: prompt.user }
|
|
315
|
+
* ]
|
|
316
|
+
* });
|
|
317
|
+
* ```
|
|
318
|
+
*/
|
|
319
|
+
declare function get(promptKey: string, options?: {
|
|
320
|
+
variables?: Record<string, unknown>;
|
|
321
|
+
version?: number;
|
|
322
|
+
debug?: boolean;
|
|
323
|
+
}): Promise<PromptResult>;
|
|
324
|
+
/**
|
|
325
|
+
* Get a prompt from an A/B test.
|
|
326
|
+
*
|
|
327
|
+
* Uses sessionId hash for deterministic, sticky assignment.
|
|
328
|
+
* Same session always gets same variant.
|
|
329
|
+
*
|
|
330
|
+
* Also sets prompt context for next trace auto-tagging.
|
|
331
|
+
*
|
|
332
|
+
* @param abTestKey - Your A/B test key (e.g., "onboarding-experiment")
|
|
333
|
+
* @param sessionId - Your session/conversation ID (for sticky assignment)
|
|
334
|
+
* @param options - Optional settings
|
|
335
|
+
* @param options.variables - Template variables
|
|
336
|
+
* @param options.debug - Enable debug logging
|
|
337
|
+
*
|
|
338
|
+
* @example
|
|
339
|
+
* ```typescript
|
|
340
|
+
* const prompt = await prompts.getAB("onboarding-test", sessionId, {
|
|
341
|
+
* variables: { userName: "John" }
|
|
342
|
+
* });
|
|
343
|
+
* ```
|
|
344
|
+
*/
|
|
345
|
+
declare function getAB(abTestKey: string, sessionId: string, options?: {
|
|
346
|
+
variables?: Record<string, unknown>;
|
|
347
|
+
debug?: boolean;
|
|
348
|
+
}): Promise<PromptResult>;
|
|
349
|
+
/**
|
|
350
|
+
* Manually clear prompt context.
|
|
351
|
+
*/
|
|
352
|
+
declare function clearPromptContext(): void;
|
|
353
|
+
|
|
354
|
+
type prompts_PromptResult = PromptResult;
|
|
355
|
+
declare const prompts_clearPromptContext: typeof clearPromptContext;
|
|
356
|
+
declare const prompts_get: typeof get;
|
|
357
|
+
declare const prompts_getAB: typeof getAB;
|
|
358
|
+
declare const prompts_getPromptContext: typeof getPromptContext;
|
|
359
|
+
declare namespace prompts {
|
|
360
|
+
export { type prompts_PromptResult as PromptResult, prompts_clearPromptContext as clearPromptContext, prompts_get as get, prompts_getAB as getAB, prompts_getPromptContext as getPromptContext, init$1 as init };
|
|
248
361
|
}
|
|
249
362
|
|
|
250
363
|
/**
|
|
251
|
-
* Combined initialization for
|
|
364
|
+
* Combined initialization for trace, models, and prompts.
|
|
252
365
|
*/
|
|
253
366
|
interface InitOptions {
|
|
254
367
|
apiKey?: string;
|
|
@@ -281,7 +394,7 @@ interface InitOptions {
|
|
|
281
394
|
declare function init(options?: InitOptions): Promise<void>;
|
|
282
395
|
|
|
283
396
|
/**
|
|
284
|
-
* Fallom - Model A/B testing and tracing for LLM applications.
|
|
397
|
+
* Fallom - Model A/B testing, prompt management, and tracing for LLM applications.
|
|
285
398
|
*
|
|
286
399
|
* @example
|
|
287
400
|
* ```typescript
|
|
@@ -298,10 +411,18 @@ declare function init(options?: InitOptions): Promise<void>;
|
|
|
298
411
|
* fallback: "gpt-4o-mini"
|
|
299
412
|
* });
|
|
300
413
|
*
|
|
414
|
+
* // Get managed prompts (with optional A/B testing)
|
|
415
|
+
* const prompt = await fallom.prompts.get("onboarding", {
|
|
416
|
+
* variables: { userName: "John" }
|
|
417
|
+
* });
|
|
418
|
+
*
|
|
301
419
|
* // Use with OpenAI
|
|
302
420
|
* const response = await openai.chat.completions.create({
|
|
303
421
|
* model,
|
|
304
|
-
* messages: [
|
|
422
|
+
* messages: [
|
|
423
|
+
* { role: "system", content: prompt.system },
|
|
424
|
+
* { role: "user", content: prompt.user }
|
|
425
|
+
* ]
|
|
305
426
|
* });
|
|
306
427
|
*
|
|
307
428
|
* // Record custom metrics
|
|
@@ -313,6 +434,7 @@ declare const _default: {
|
|
|
313
434
|
init: typeof init;
|
|
314
435
|
trace: typeof trace;
|
|
315
436
|
models: typeof models;
|
|
437
|
+
prompts: typeof prompts;
|
|
316
438
|
};
|
|
317
439
|
|
|
318
|
-
export { type InitOptions, _default as default, init, models, trace };
|
|
440
|
+
export { type InitOptions, type PromptResult, _default as default, init, models, prompts, trace };
|