@bmx-labs/chat-widget 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +11 -16
  2. package/dist/adapters/context/PineconeRAGAdapter.d.ts +58 -23
  3. package/dist/adapters/context/debug.d.ts +11 -0
  4. package/dist/adapters/context/fetchers.d.ts +40 -0
  5. package/dist/adapters/context/index.d.ts +0 -1
  6. package/dist/adapters/context/processing.d.ts +96 -0
  7. package/dist/adapters/index.d.ts +1 -1
  8. package/dist/index.d.ts +1 -1
  9. package/dist/index.js +1 -1
  10. package/dist/types.d.ts +57 -0
  11. package/package.json +10 -9
  12. package/dist/adapters/AnthropicAdapter.d.ts +0 -16
  13. package/dist/adapters/ContextAdapter.d.ts +0 -19
  14. package/dist/adapters/CustomAPIAdapter.d.ts +0 -20
  15. package/dist/adapters/KnowledgeBaseAdapter.d.ts +0 -20
  16. package/dist/adapters/MockAdapter.d.ts +0 -6
  17. package/dist/adapters/MorphexAdapter.d.ts +0 -16
  18. package/dist/adapters/OpenAIAdapter.d.ts +0 -17
  19. package/dist/adapters/RAGAdapter.d.ts +0 -29
  20. package/dist/adapters/RestRAGAdapter.d.ts +0 -9
  21. package/dist/adapters/TestAdapter.d.ts +0 -6
  22. package/dist/adapters/context/ContextAdapter.d.ts +0 -19
  23. package/dist/adapters/context/KnowledgeBaseAdapter.d.ts +0 -20
  24. package/dist/adapters/context/MorphexAdapter.d.ts +0 -16
  25. package/dist/adapters/context/RAGAdapter.d.ts +0 -29
  26. package/dist/adapters/rest.d.ts +0 -7
  27. package/dist/components/LazyComponents.d.ts +0 -3
  28. package/dist/components/Orb.d.ts +0 -8
  29. package/dist/components/OrbButton.d.ts +0 -10
  30. package/dist/components/SimpleIridescence.d.ts +0 -8
  31. package/dist/components/SimpleOrb.d.ts +0 -8
  32. package/dist/components/ogl/DarkVeil.d.ts +0 -11
  33. package/dist/framer.js +0 -1
  34. package/dist/index.js.map +0 -1
  35. package/dist/ogl.js +0 -1
  36. package/dist/styles.css.map +0 -1
  37. package/dist/styles.js.map +0 -1
  38. package/dist/vendors.js +0 -1
package/README.md CHANGED
@@ -8,6 +8,14 @@ A reusable chat AI widget for React/Next.js apps. Fixed bottom-right with highes
8
8
  npm i @bmx-labs/chat-widget react react-dom
9
9
  ```
10
10
 
11
+ ```bash
12
+ yarn add @bmx-labs/chat-widget react react-dom
13
+ ```
14
+
15
+ ```bash
16
+ pnpm add @bmx-labs/chat-widget react react-dom
17
+ ```
18
+
11
19
  ## Usage
12
20
 
13
21
  ```tsx
@@ -43,7 +51,7 @@ export default function App() {
43
51
  type PineconeRAGAdapterConfig = {
44
52
  openAIApiKey: string;
45
53
  pineconeApiKey: string;
46
- pineconeIndexUrl: string; // e.g. https://bmx-xxxx.svc.region.pinecone.io
54
+ pineconeIndexUrl: string;
47
55
  namespace?: string; // default: 'bmx-docs'
48
56
  topK?: number; // default: 4
49
57
  chatModel?: string; // default: gpt-4o-mini
@@ -68,23 +76,10 @@ type PineconeRAGAdapterConfig = {
68
76
  ## Development
69
77
 
70
78
  ```bash
71
- npm install
72
- npm run build
73
- npm run dev
79
+ pnpm i
80
+ pnpm dev
74
81
  ```
75
82
 
76
- ### GitHub Actions (Publish on tag)
77
-
78
- 1. Create an npm token and add it as `NPM_TOKEN` in GitHub repository secrets.
79
- 2. Push a tag to trigger publish:
80
-
81
- ```bash
82
- git tag v0.1.1
83
- git push --tags
84
- ```
85
-
86
- The workflow at `.github/workflows/publish.yml` will build and publish to npm.
87
-
88
83
  ## License
89
84
 
90
85
  This project is licensed as **GNU AGPLv3**.
@@ -1,39 +1,74 @@
1
- import type { BmxChatApiAdapter, ChatMessage } from "../../types";
2
- export interface PineconeRAGAdapterConfig {
3
- openAIApiKey: string;
4
- pineconeApiKey: string;
5
- /** Example: https://myindex-abc123.svc.us-east1-aws.pinecone.io */
6
- pineconeIndexUrl: string;
7
- namespace?: string;
8
- topK?: number;
9
- chatModel?: string;
10
- embeddingModel?: string;
11
- openAIBaseURL?: string;
12
- enableStreaming?: boolean;
13
- maxContextTokens?: number;
14
- minScoreThreshold?: number;
15
- debug?: boolean;
16
- }
1
+ import type { BmxChatApiAdapter, ChatMessage, PineconeRAGAdapterConfig } from "../../types";
2
+ /**
3
+ * Pinecone RAG Adapter.
4
+ *
5
+ * This adapter uses Pinecone for vector search and OpenAI for chat completion.
6
+ * It embeds the user's query, retrieves the most relevant context from Pinecone,
7
+ * builds a system prompt with the context, and calls OpenAI chat to generate the final response.
8
+ */
17
9
  export declare class PineconeRAGAdapter implements BmxChatApiAdapter {
10
+ /**
11
+ * OpenAI API key.
12
+ */
18
13
  private openAIApiKey;
14
+ /**
15
+ * Pinecone API key.
16
+ */
19
17
  private pineconeApiKey;
18
+ /**
19
+ * Pinecone index URL.
20
+ */
20
21
  private pineconeIndexUrl;
22
+ /**
23
+ * Pinecone namespace.
24
+ */
21
25
  private namespace;
26
+ /**
27
+ * Number of relevant chunks to retrieve.
28
+ */
22
29
  private topK;
30
+ /**
31
+ * OpenAI model for chat completion.
32
+ */
23
33
  private chatModel;
34
+ /**
35
+ * OpenAI model for embeddings.
36
+ */
24
37
  private embeddingModel;
38
+ /**
39
+ * OpenAI API base URL.
40
+ */
25
41
  private openAIBaseURL;
26
- private enableStreaming;
42
+ /**
43
+ * Maximum context tokens.
44
+ */
27
45
  private maxContextTokens;
46
+ /**
47
+ * Minimum score threshold.
48
+ */
28
49
  private minScoreThreshold;
50
+ /**
51
+ * Logger.
52
+ */
29
53
  private logger;
54
+ /**
55
+ * Dynamic context.
56
+ */
57
+ private dynamicContext?;
58
+ /**
59
+ * Initialize the adapter.
60
+ *
61
+ * @param {PineconeRAGAdapterConfig} cfg - The configuration.
62
+ */
30
63
  constructor(cfg: PineconeRAGAdapterConfig);
31
- private embed;
32
- private queryPinecone;
33
- private preprocessQuery;
34
- private estimateTokens;
35
- private truncateContext;
36
- private buildSystemPrompt;
64
+ /**
65
+ * Send a message to the adapter.
66
+ *
67
+ * @param {ChatMessage[]} history - The history of messages.
68
+ * @param {string} input - The input message.
69
+ * @param {AbortSignal} options?.signal - The abort signal.
70
+ * @returns {Promise<ChatMessage>} The response message.
71
+ */
37
72
  sendMessage(history: ChatMessage[], input: string, options?: {
38
73
  signal?: AbortSignal;
39
74
  }): Promise<ChatMessage>;
@@ -0,0 +1,11 @@
1
+ import { PineconeQueryMatch } from "../../types";
2
+ import { Logger } from "../../utils/logger";
3
+ /**
4
+ * Debug logging for the filtered matches.
5
+ *
6
+ * @param {string} processedInput - The processed input.
7
+ * @param {PineconeQueryMatch[]} allMatches - The all matches.
8
+ * @param {PineconeQueryMatch[]} filteredMatches - The filtered matches.
9
+ * @param {Logger} logger - The logger.
10
+ */
11
+ export declare function debugFilterMatches(processedInput: string, allMatches: PineconeQueryMatch[], filteredMatches: PineconeQueryMatch[], logger: Logger): void;
@@ -0,0 +1,40 @@
1
+ import { ChatCompletionResult, OpenAIModel, PineconeQueryMatch } from "../../types";
2
+ import { Logger } from "../../utils/logger";
3
+ /**
4
+ * Embeds text using OpenAI embeddings.
5
+ *
6
+ * @param {string} text - The text to embed.
7
+ * @param {string} openAIBaseURL - The OpenAI API base URL.
8
+ * @param {string} openAIApiKey - The OpenAI API key.
9
+ * @param {string} embeddingModel - The OpenAI embedding model.
10
+ * @param {AbortSignal} signal - The abort signal.
11
+ * @returns {Promise<number[]>} The embedding.
12
+ */
13
+ export declare function getEmbeddedVector(text: string, openAIBaseURL: string, openAIApiKey: string, embeddingModel: string, signal?: AbortSignal): Promise<number[]>;
14
+ /**
15
+ * Calls OpenAI Chat Completions API and returns the assistant content.
16
+ *
17
+ * @param {string} openAIBaseURL - The OpenAI API base URL.
18
+ * @param {string} openAIApiKey - The OpenAI API key.
19
+ * @param {OpenAIModel} model - The OpenAI chat model.
20
+ * @param {Array<{role: string; content: string}>} messages - The chat messages.
21
+ * @param {Logger} logger - The logger.
22
+ * @param {AbortSignal} signal - Abort signal.
23
+ * @returns {Promise<ChatCompletionResult>} The assistant content.
24
+ */
25
+ export declare function getChatCompletion(openAIBaseURL: string, openAIApiKey: string, model: OpenAIModel, messages: Array<{
26
+ role: string;
27
+ content: string;
28
+ }>, logger: Logger, signal?: AbortSignal): Promise<ChatCompletionResult>;
29
+ /**
30
+ * Queries Pinecone for the most relevant matches.
31
+ *
32
+ * @param {number[]} vector - The embedding vector.
33
+ * @param {string} pineconeIndexUrl - The Pinecone index URL.
34
+ * @param {string} pineconeApiKey - The Pinecone API key.
35
+ * @param {number} topK - The number of relevant matches to retrieve.
36
+ * @param {string} namespace - The Pinecone namespace.
37
+ * @param {AbortSignal} signal - The abort signal.
38
+ * @returns {Promise<PineconeQueryMatch[]>} The relevant matches.
39
+ */
40
+ export declare function queryPinecone(vector: number[], pineconeIndexUrl: string, pineconeApiKey: string, topK: number, namespace: string, signal?: AbortSignal): Promise<PineconeQueryMatch[]>;
@@ -1,2 +1 @@
1
- export { MorphexAdapter } from "./MorphexAdapter";
2
1
  export { PineconeRAGAdapter } from "./PineconeRAGAdapter";
@@ -0,0 +1,96 @@
1
+ import { ChatMessage, PineconeQueryMatch } from "../../types";
2
+ import { Logger } from "../../utils/logger";
3
+ /**
4
+ * Preprocesses the query for better retrieval.
5
+ *
6
+ * @param {string} query - The query.
7
+ * @returns {string} The preprocessed query.
8
+ */
9
+ export declare function preprocessQuery(query: string): string;
10
+ /**
11
+ * Optionally fetches dynamic (live) context and appends it to the retrieved
12
+ * Pinecone matches before prompting the LLM.
13
+ *
14
+ * Typical use cases include live token prices, on-chain metrics, or any
15
+ * time-sensitive information you want the assistant to reference. This keeps
16
+ * the adapter decoupled from arbitrary APIs while allowing the host app to
17
+ * supply additional context blocks on demand.
18
+ *
19
+ * Behavior:
20
+ * - If `dynamicContext` is not provided, returns `filteredMatches` unchanged.
21
+ * - If the provider returns blocks, they are converted into synthetic matches
22
+ * (scored as 1 to prioritize) and merged with existing matches.
23
+ * - The merged list is truncated by `maxContextTokens` to control cost.
24
+ * - Any errors are logged and the original matches are returned (no throw).
25
+ *
26
+ * @param {string} processedInput - A normalized version of the user's input (e.g., after query expansion)
27
+ * @param {PineconeQueryMatch[]} filteredMatches - Matches already filtered by score and topK
28
+ * @param {number} maxContextTokens - Hard cap for context tokens to keep OpenAI cost bounded
29
+ * @param {Logger} logger - Logger instance for structured (optional) debug output
30
+ * @param {Function} dynamicContext - Optional async provider that returns additional context blocks
31
+ * @param {AbortSignal} options - Optional AbortSignal for cancellation
32
+ * @returns {PineconeQueryMatch[]} The merged and truncated list of matches ready for prompting
33
+ */
34
+ export declare function processDynamicContext(processedInput: string, filteredMatches: PineconeQueryMatch[], maxContextTokens: number, logger: Logger, dynamicContext?: (input: string, options?: {
35
+ signal?: AbortSignal;
36
+ }) => Promise<Array<{
37
+ title?: string;
38
+ url?: string;
39
+ text: string;
40
+ }>>, options?: {
41
+ signal?: AbortSignal;
42
+ }): Promise<PineconeQueryMatch[]>;
43
+ /**
44
+ * Coarse token estimator used for quick context-size budgeting.
45
+ *
46
+ * The heuristic assumes ~4 characters per token. This is intentionally simple
47
+ * and fast; for strict budgeting consider a model-specific tokenizer if needed.
48
+ *
49
+ * @param {string} text - Arbitrary text to estimate token count for
50
+ * @returns {number} Estimated token count
51
+ */
52
+ export declare function estimateTokens(text: string): number;
53
+ /**
54
+ * Truncates a list of matches so that the combined preview text fits within a
55
+ * specified token budget. Preserves order and partially truncates the last
56
+ * match if needed (and if there is enough remaining budget to be meaningful).
57
+ *
58
+ * Rationale: Keeping the most relevant matches at the top and avoiding hard cuts
59
+ * that remove all context for the last item often yields better LLM answers.
60
+ *
61
+ * @param {PineconeQueryMatch[]} matches - Ordered list of matches (highest priority first)
62
+ * @param {number} maxContextTokens - Total token budget for all match texts combined
63
+ * @returns {PineconeQueryMatch[]} A truncated list of matches within the token budget
64
+ */
65
+ export declare function truncateContext(matches: PineconeQueryMatch[], maxContextTokens: number): PineconeQueryMatch[];
66
+ /**
67
+ * Refuse gracefully if no relevant matches are found.
68
+ *
69
+ * @param {Logger} logger - The logger.
70
+ */
71
+ export declare function createNoMatchesResponse(logger: Logger): ChatMessage;
72
+ /**
73
+ * Extracts sources from the matches.
74
+ *
75
+ * @param {PineconeQueryMatch[]} matches - The matches.
76
+ * @returns {Array<{ title: string; url: string }>} The sources.
77
+ */
78
+ export declare function extractSources(matches: PineconeQueryMatch[]): Array<{
79
+ title: string;
80
+ url: string;
81
+ }>;
82
+ /**
83
+ * Ensures that the content has a Sources section with real links from context.
84
+ *
85
+ * @param {string} content - The content.
86
+ * @param {PineconeQueryMatch[]} finalMatches - The final matches.
87
+ * @returns {string} The content with the Sources section.
88
+ */
89
+ export declare function ensureSources(content: string, finalMatches: PineconeQueryMatch[]): string;
90
+ /**
91
+ * Builds the system prompt for the LLM.
92
+ *
93
+ * @param {PineconeQueryMatch[]} matches - The matches.
94
+ * @returns {string} The system prompt.
95
+ */
96
+ export declare function buildSystemPrompt(matches: PineconeQueryMatch[]): string;
@@ -1,2 +1,2 @@
1
1
  export { createMockAdapter } from "./mock";
2
- export { MorphexAdapter, PineconeRAGAdapter } from "./context";
2
+ export { PineconeRAGAdapter } from "./context";
package/dist/index.d.ts CHANGED
@@ -1,3 +1,3 @@
1
1
  export { BmxChatBot } from "./components/BmxChatBot";
2
2
  export type { BmxChatApiAdapter, ChatMessage, ChatRole } from "./types";
3
- export { createMockAdapter, MorphexAdapter, PineconeRAGAdapter, } from "./adapters";
3
+ export { createMockAdapter, PineconeRAGAdapter } from "./adapters";