@usewhisper/sdk 1.0.1 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (6) hide show
  1. package/README.md +256 -256
  2. package/index.d.mts +208 -2
  3. package/index.d.ts +208 -2
  4. package/index.js +232 -15
  5. package/index.mjs +228 -15
  6. package/package.json +56 -56
package/index.d.mts CHANGED
@@ -1,3 +1,154 @@
1
+ /**
2
+ * Whisper - Simple Memory Layer for AI Agents
3
+ *
4
+ * Two methods:
5
+ * - getContext(): Retrieve relevant context before LLM call
6
+ * - capture(): Extract and store memories after LLM response
7
+ *
8
+ * Zero magic - you control when to get context and when to capture
9
+ */
10
+
11
+ interface WhisperOptions extends WhisperConfig {
12
+ /**
13
+ * Maximum context results to retrieve.
14
+ * Default: 10
15
+ */
16
+ contextLimit?: number;
17
+ /**
18
+ * Which memory types to use.
19
+ * Default: all 7 types
20
+ */
21
+ memoryTypes?: Array<"factual" | "preference" | "event" | "relationship" | "opinion" | "goal" | "instruction">;
22
+ /**
23
+ * Prefix for context injection.
24
+ * Default: "Relevant context:"
25
+ */
26
+ contextPrefix?: string;
27
+ }
28
+ interface ContextResult {
29
+ context: string;
30
+ results: QueryResult["results"];
31
+ count: number;
32
+ }
33
+ /**
34
+ * Simple, transparent memory layer
35
+ *
36
+ * @example
37
+ * ```typescript
38
+ * import { Whisper } from '@usewhisper/sdk';
39
+ *
40
+ * const whisper = new Whisper({
41
+ * apiKey: process.env.WHISPER_KEY,
42
+ * project: 'my-app'
43
+ * });
44
+ *
45
+ * // BEFORE: Get relevant context
46
+ * const { context, results } = await whisper.getContext("What does user prefer?");
47
+ *
48
+ * // Inject context into your LLM prompt
49
+ * const prompt = `${context}\n\nUser: What does user prefer?`;
50
+ * const response = await llm.complete(prompt);
51
+ *
52
+ * // AFTER: Capture what happened
53
+ * await whisper.capture(response);
54
+ * // → Memories extracted & stored (async)
55
+ * ```
56
+ */
57
+ declare class Whisper {
58
+ private client;
59
+ private options;
60
+ private sessionId?;
61
+ private userId?;
62
+ constructor(options: WhisperOptions);
63
+ /**
64
+ * Set session ID for conversation tracking
65
+ */
66
+ session(sessionId: string): this;
67
+ /**
68
+ * Set user ID for user-specific memories
69
+ */
70
+ user(userId: string): this;
71
+ /**
72
+ * Get relevant context BEFORE your LLM call
73
+ *
74
+ * @param query - What you want to know / user question
75
+ * @returns Context string and raw results
76
+ *
77
+ * @example
78
+ * ```typescript
79
+ * const { context, results, count } = await whisper.getContext(
80
+ * "What are user's preferences?",
81
+ * { userId: "user-123" }
82
+ * );
83
+ *
84
+ * // Results: [
85
+ * // { content: "User prefers dark mode", type: "preference", score: 0.95 },
86
+ * // { content: "Allergic to nuts", type: "factual", score: 0.89 }
87
+ * // ]
88
+ * ```
89
+ */
90
+ getContext(query: string, options?: {
91
+ userId?: string;
92
+ sessionId?: string;
93
+ project?: string;
94
+ limit?: number;
95
+ }): Promise<ContextResult>;
96
+ /**
97
+ * Remember what happened AFTER your LLM response
98
+ *
99
+ * Fire-and-forget - doesn't block your response
100
+ *
101
+ * @param content - What your LLM responded with
102
+ * @returns Promise that resolves when stored (or fails silently)
103
+ *
104
+ * @example
105
+ * ```typescript
106
+ * const llmResponse = "I've set your theme to dark mode and removed nuts from recommendations.";
107
+ *
108
+ * await whisper.remember(llmResponse, { userId: "user-123" });
109
+ * // → Auto-extracts: "theme set to dark mode", "nut allergy"
110
+ * // → Stored as preferences
111
+ * ```
112
+ */
113
+ remember(content: string, options?: {
114
+ userId?: string;
115
+ sessionId?: string;
116
+ project?: string;
117
+ }): Promise<{
118
+ success: boolean;
119
+ memoryId?: string;
120
+ }>;
121
+ /**
122
+ * Alias for remember() - same thing
123
+ */
124
+ capture(content: string, options?: {
125
+ userId?: string;
126
+ sessionId?: string;
127
+ project?: string;
128
+ }): Promise<{
129
+ success: boolean;
130
+ memoryId?: string;
131
+ }>;
132
+ /**
133
+ * Capture from multiple messages (e.g., full conversation)
134
+ */
135
+ captureSession(messages: Array<{
136
+ role: string;
137
+ content: string;
138
+ }>, options?: {
139
+ userId?: string;
140
+ sessionId?: string;
141
+ project?: string;
142
+ }): Promise<{
143
+ success: boolean;
144
+ extracted: number;
145
+ }>;
146
+ /**
147
+ * Direct access to WhisperContext for advanced usage
148
+ */
149
+ raw(): WhisperContext;
150
+ }
151
+
1
152
  /**
2
153
  * Whisper Context SDK
3
154
  * TypeScript SDK for the Whisper Context API
@@ -170,16 +321,19 @@ declare class WhisperContext {
170
321
  addMemory(params: {
171
322
  project?: string;
172
323
  content: string;
173
- memory_type?: "factual" | "preference" | "event" | "relationship" | "opinion" | "goal" | "instruction";
324
+ memory_type?: "factual" | "episodic" | "semantic" | "procedural" | "preference" | "event" | "relationship" | "opinion" | "goal" | "instruction";
174
325
  user_id?: string;
175
326
  session_id?: string;
176
327
  agent_id?: string;
177
328
  importance?: number;
178
329
  metadata?: Record<string, any>;
179
330
  expires_in_seconds?: number;
331
+ allow_legacy_fallback?: boolean;
180
332
  }): Promise<{
181
333
  id: string;
182
334
  success: boolean;
335
+ path: "sota" | "legacy";
336
+ fallback_used: boolean;
183
337
  }>;
184
338
  searchMemories(params: {
185
339
  project?: string;
@@ -399,6 +553,56 @@ declare class WhisperContext {
399
553
  start_date?: string;
400
554
  end_date?: string;
401
555
  }): Promise<any>;
556
+ /**
557
+ * Semantic search over raw documents without pre-indexing.
558
+ * Send file contents/summaries directly — the API embeds them in-memory and ranks by similarity.
559
+ * Perfect for AI agents to semantically explore a codebase on-the-fly.
560
+ */
561
+ semanticSearch(params: {
562
+ query: string;
563
+ documents: Array<{
564
+ id: string;
565
+ content: string;
566
+ }>;
567
+ top_k?: number;
568
+ threshold?: number;
569
+ }): Promise<{
570
+ results: Array<{
571
+ id: string;
572
+ score: number;
573
+ content: string;
574
+ snippet: string;
575
+ }>;
576
+ total_searched: number;
577
+ total_returned: number;
578
+ query: string;
579
+ latency_ms: number;
580
+ }>;
581
+ searchFiles(params: {
582
+ query: string;
583
+ path?: string;
584
+ mode?: "content" | "filename" | "both";
585
+ file_types?: string[];
586
+ max_results?: number;
587
+ context_lines?: number;
588
+ case_sensitive?: boolean;
589
+ }): Promise<{
590
+ results: Array<{
591
+ file: string;
592
+ matches: Array<{
593
+ line: number;
594
+ content: string;
595
+ context_before: string[];
596
+ context_after: string[];
597
+ }>;
598
+ }>;
599
+ total_files: number;
600
+ total_matches: number;
601
+ search_path: string;
602
+ mode: string;
603
+ latency_ms: number;
604
+ engine: "ripgrep" | "node";
605
+ }>;
402
606
  getCostSavings(params?: {
403
607
  project?: string;
404
608
  start_date?: string;
@@ -434,6 +638,8 @@ declare class WhisperContext {
434
638
  add: (params: Parameters<WhisperContext["addMemory"]>[0]) => Promise<{
435
639
  id: string;
436
640
  success: boolean;
641
+ path: "sota" | "legacy";
642
+ fallback_used: boolean;
437
643
  }>;
438
644
  search: (params: Parameters<WhisperContext["searchMemories"]>[0]) => Promise<any>;
439
645
  searchSOTA: (params: Parameters<WhisperContext["searchMemoriesSOTA"]>[0]) => Promise<any>;
@@ -556,4 +762,4 @@ declare class WhisperContext {
556
762
  };
557
763
  }
558
764
 
559
- export { type Memory, type Project, type QueryParams, type QueryResult, type Source, type WhisperConfig, WhisperContext, WhisperError, type WhisperErrorCode, WhisperContext as default };
765
+ export { type Memory, type Project, type QueryParams, type QueryResult, type Source, Whisper, type WhisperConfig, WhisperContext, Whisper as WhisperDefault, WhisperError, type WhisperErrorCode, WhisperContext as default };
package/index.d.ts CHANGED
@@ -1,3 +1,154 @@
1
+ /**
2
+ * Whisper - Simple Memory Layer for AI Agents
3
+ *
4
+ * Two methods:
5
+ * - getContext(): Retrieve relevant context before LLM call
6
+ * - capture(): Extract and store memories after LLM response
7
+ *
8
+ * Zero magic - you control when to get context and when to capture
9
+ */
10
+
11
+ interface WhisperOptions extends WhisperConfig {
12
+ /**
13
+ * Maximum context results to retrieve.
14
+ * Default: 10
15
+ */
16
+ contextLimit?: number;
17
+ /**
18
+ * Which memory types to use.
19
+ * Default: all 7 types
20
+ */
21
+ memoryTypes?: Array<"factual" | "preference" | "event" | "relationship" | "opinion" | "goal" | "instruction">;
22
+ /**
23
+ * Prefix for context injection.
24
+ * Default: "Relevant context:"
25
+ */
26
+ contextPrefix?: string;
27
+ }
28
+ interface ContextResult {
29
+ context: string;
30
+ results: QueryResult["results"];
31
+ count: number;
32
+ }
33
+ /**
34
+ * Simple, transparent memory layer
35
+ *
36
+ * @example
37
+ * ```typescript
38
+ * import { Whisper } from '@usewhisper/sdk';
39
+ *
40
+ * const whisper = new Whisper({
41
+ * apiKey: process.env.WHISPER_KEY,
42
+ * project: 'my-app'
43
+ * });
44
+ *
45
+ * // BEFORE: Get relevant context
46
+ * const { context, results } = await whisper.getContext("What does user prefer?");
47
+ *
48
+ * // Inject context into your LLM prompt
49
+ * const prompt = `${context}\n\nUser: What does user prefer?`;
50
+ * const response = await llm.complete(prompt);
51
+ *
52
+ * // AFTER: Capture what happened
53
+ * await whisper.capture(response);
54
+ * // → Memories extracted & stored (async)
55
+ * ```
56
+ */
57
+ declare class Whisper {
58
+ private client;
59
+ private options;
60
+ private sessionId?;
61
+ private userId?;
62
+ constructor(options: WhisperOptions);
63
+ /**
64
+ * Set session ID for conversation tracking
65
+ */
66
+ session(sessionId: string): this;
67
+ /**
68
+ * Set user ID for user-specific memories
69
+ */
70
+ user(userId: string): this;
71
+ /**
72
+ * Get relevant context BEFORE your LLM call
73
+ *
74
+ * @param query - What you want to know / user question
75
+ * @returns Context string and raw results
76
+ *
77
+ * @example
78
+ * ```typescript
79
+ * const { context, results, count } = await whisper.getContext(
80
+ * "What are user's preferences?",
81
+ * { userId: "user-123" }
82
+ * );
83
+ *
84
+ * // Results: [
85
+ * // { content: "User prefers dark mode", type: "preference", score: 0.95 },
86
+ * // { content: "Allergic to nuts", type: "factual", score: 0.89 }
87
+ * // ]
88
+ * ```
89
+ */
90
+ getContext(query: string, options?: {
91
+ userId?: string;
92
+ sessionId?: string;
93
+ project?: string;
94
+ limit?: number;
95
+ }): Promise<ContextResult>;
96
+ /**
97
+ * Remember what happened AFTER your LLM response
98
+ *
99
+ * Fire-and-forget - doesn't block your response
100
+ *
101
+ * @param content - What your LLM responded with
102
+ * @returns Promise that resolves when stored (or fails silently)
103
+ *
104
+ * @example
105
+ * ```typescript
106
+ * const llmResponse = "I've set your theme to dark mode and removed nuts from recommendations.";
107
+ *
108
+ * await whisper.remember(llmResponse, { userId: "user-123" });
109
+ * // → Auto-extracts: "theme set to dark mode", "nut allergy"
110
+ * // → Stored as preferences
111
+ * ```
112
+ */
113
+ remember(content: string, options?: {
114
+ userId?: string;
115
+ sessionId?: string;
116
+ project?: string;
117
+ }): Promise<{
118
+ success: boolean;
119
+ memoryId?: string;
120
+ }>;
121
+ /**
122
+ * Alias for remember() - same thing
123
+ */
124
+ capture(content: string, options?: {
125
+ userId?: string;
126
+ sessionId?: string;
127
+ project?: string;
128
+ }): Promise<{
129
+ success: boolean;
130
+ memoryId?: string;
131
+ }>;
132
+ /**
133
+ * Capture from multiple messages (e.g., full conversation)
134
+ */
135
+ captureSession(messages: Array<{
136
+ role: string;
137
+ content: string;
138
+ }>, options?: {
139
+ userId?: string;
140
+ sessionId?: string;
141
+ project?: string;
142
+ }): Promise<{
143
+ success: boolean;
144
+ extracted: number;
145
+ }>;
146
+ /**
147
+ * Direct access to WhisperContext for advanced usage
148
+ */
149
+ raw(): WhisperContext;
150
+ }
151
+
1
152
  /**
2
153
  * Whisper Context SDK
3
154
  * TypeScript SDK for the Whisper Context API
@@ -170,16 +321,19 @@ declare class WhisperContext {
170
321
  addMemory(params: {
171
322
  project?: string;
172
323
  content: string;
173
- memory_type?: "factual" | "preference" | "event" | "relationship" | "opinion" | "goal" | "instruction";
324
+ memory_type?: "factual" | "episodic" | "semantic" | "procedural" | "preference" | "event" | "relationship" | "opinion" | "goal" | "instruction";
174
325
  user_id?: string;
175
326
  session_id?: string;
176
327
  agent_id?: string;
177
328
  importance?: number;
178
329
  metadata?: Record<string, any>;
179
330
  expires_in_seconds?: number;
331
+ allow_legacy_fallback?: boolean;
180
332
  }): Promise<{
181
333
  id: string;
182
334
  success: boolean;
335
+ path: "sota" | "legacy";
336
+ fallback_used: boolean;
183
337
  }>;
184
338
  searchMemories(params: {
185
339
  project?: string;
@@ -399,6 +553,56 @@ declare class WhisperContext {
399
553
  start_date?: string;
400
554
  end_date?: string;
401
555
  }): Promise<any>;
556
+ /**
557
+ * Semantic search over raw documents without pre-indexing.
558
+ * Send file contents/summaries directly — the API embeds them in-memory and ranks by similarity.
559
+ * Perfect for AI agents to semantically explore a codebase on-the-fly.
560
+ */
561
+ semanticSearch(params: {
562
+ query: string;
563
+ documents: Array<{
564
+ id: string;
565
+ content: string;
566
+ }>;
567
+ top_k?: number;
568
+ threshold?: number;
569
+ }): Promise<{
570
+ results: Array<{
571
+ id: string;
572
+ score: number;
573
+ content: string;
574
+ snippet: string;
575
+ }>;
576
+ total_searched: number;
577
+ total_returned: number;
578
+ query: string;
579
+ latency_ms: number;
580
+ }>;
581
+ searchFiles(params: {
582
+ query: string;
583
+ path?: string;
584
+ mode?: "content" | "filename" | "both";
585
+ file_types?: string[];
586
+ max_results?: number;
587
+ context_lines?: number;
588
+ case_sensitive?: boolean;
589
+ }): Promise<{
590
+ results: Array<{
591
+ file: string;
592
+ matches: Array<{
593
+ line: number;
594
+ content: string;
595
+ context_before: string[];
596
+ context_after: string[];
597
+ }>;
598
+ }>;
599
+ total_files: number;
600
+ total_matches: number;
601
+ search_path: string;
602
+ mode: string;
603
+ latency_ms: number;
604
+ engine: "ripgrep" | "node";
605
+ }>;
402
606
  getCostSavings(params?: {
403
607
  project?: string;
404
608
  start_date?: string;
@@ -434,6 +638,8 @@ declare class WhisperContext {
434
638
  add: (params: Parameters<WhisperContext["addMemory"]>[0]) => Promise<{
435
639
  id: string;
436
640
  success: boolean;
641
+ path: "sota" | "legacy";
642
+ fallback_used: boolean;
437
643
  }>;
438
644
  search: (params: Parameters<WhisperContext["searchMemories"]>[0]) => Promise<any>;
439
645
  searchSOTA: (params: Parameters<WhisperContext["searchMemoriesSOTA"]>[0]) => Promise<any>;
@@ -556,4 +762,4 @@ declare class WhisperContext {
556
762
  };
557
763
  }
558
764
 
559
- export { type Memory, type Project, type QueryParams, type QueryResult, type Source, type WhisperConfig, WhisperContext, WhisperError, type WhisperErrorCode, WhisperContext as default };
765
+ export { type Memory, type Project, type QueryParams, type QueryResult, type Source, Whisper, type WhisperConfig, WhisperContext, Whisper as WhisperDefault, WhisperError, type WhisperErrorCode, WhisperContext as default };