@push.rocks/smartai 0.13.3 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/dist_ts/00_commitinfo_data.js +3 -3
  2. package/dist_ts/index.d.ts +6 -11
  3. package/dist_ts/index.js +6 -12
  4. package/dist_ts/plugins.d.ts +10 -15
  5. package/dist_ts/plugins.js +13 -19
  6. package/dist_ts/smartai.classes.smartai.d.ts +7 -0
  7. package/dist_ts/smartai.classes.smartai.js +51 -0
  8. package/dist_ts/smartai.interfaces.d.ts +41 -0
  9. package/dist_ts/smartai.interfaces.js +2 -0
  10. package/dist_ts/smartai.middleware.anthropic.d.ts +7 -0
  11. package/dist_ts/smartai.middleware.anthropic.js +36 -0
  12. package/dist_ts/smartai.provider.ollama.d.ts +8 -0
  13. package/dist_ts/smartai.provider.ollama.js +378 -0
  14. package/dist_ts_audio/index.d.ts +9 -0
  15. package/dist_ts_audio/index.js +15 -0
  16. package/dist_ts_audio/plugins.d.ts +2 -0
  17. package/dist_ts_audio/plugins.js +3 -0
  18. package/dist_ts_document/index.d.ts +11 -0
  19. package/dist_ts_document/index.js +45 -0
  20. package/dist_ts_document/plugins.d.ts +3 -0
  21. package/dist_ts_document/plugins.js +4 -0
  22. package/dist_ts_image/index.d.ts +46 -0
  23. package/dist_ts_image/index.js +110 -0
  24. package/dist_ts_image/plugins.d.ts +3 -0
  25. package/dist_ts_image/plugins.js +4 -0
  26. package/dist_ts_research/index.d.ts +19 -0
  27. package/dist_ts_research/index.js +98 -0
  28. package/dist_ts_research/plugins.d.ts +2 -0
  29. package/dist_ts_research/plugins.js +3 -0
  30. package/dist_ts_vision/index.d.ts +8 -0
  31. package/dist_ts_vision/index.js +21 -0
  32. package/dist_ts_vision/plugins.d.ts +2 -0
  33. package/dist_ts_vision/plugins.js +3 -0
  34. package/package.json +50 -22
  35. package/readme.hints.md +34 -88
  36. package/readme.md +284 -547
  37. package/ts/00_commitinfo_data.ts +2 -2
  38. package/ts/index.ts +8 -11
  39. package/ts/plugins.ts +19 -35
  40. package/ts/smartai.classes.smartai.ts +51 -0
  41. package/ts/smartai.interfaces.ts +53 -0
  42. package/ts/smartai.middleware.anthropic.ts +38 -0
  43. package/ts/smartai.provider.ollama.ts +426 -0
  44. package/ts_audio/index.ts +24 -0
  45. package/ts_audio/plugins.ts +2 -0
  46. package/ts_document/index.ts +61 -0
  47. package/ts_document/plugins.ts +3 -0
  48. package/ts_image/index.ts +147 -0
  49. package/ts_image/plugins.ts +3 -0
  50. package/ts_research/index.ts +120 -0
  51. package/ts_research/plugins.ts +2 -0
  52. package/ts_vision/index.ts +29 -0
  53. package/ts_vision/plugins.ts +2 -0
  54. package/dist_ts/abstract.classes.multimodal.d.ts +0 -212
  55. package/dist_ts/abstract.classes.multimodal.js +0 -43
  56. package/dist_ts/classes.conversation.d.ts +0 -31
  57. package/dist_ts/classes.conversation.js +0 -150
  58. package/dist_ts/classes.smartai.d.ts +0 -59
  59. package/dist_ts/classes.smartai.js +0 -139
  60. package/dist_ts/classes.tts.d.ts +0 -6
  61. package/dist_ts/classes.tts.js +0 -10
  62. package/dist_ts/interfaces.d.ts +0 -1
  63. package/dist_ts/interfaces.js +0 -2
  64. package/dist_ts/paths.d.ts +0 -2
  65. package/dist_ts/paths.js +0 -4
  66. package/dist_ts/provider.anthropic.d.ts +0 -48
  67. package/dist_ts/provider.anthropic.js +0 -369
  68. package/dist_ts/provider.elevenlabs.d.ts +0 -43
  69. package/dist_ts/provider.elevenlabs.js +0 -64
  70. package/dist_ts/provider.exo.d.ts +0 -40
  71. package/dist_ts/provider.exo.js +0 -116
  72. package/dist_ts/provider.groq.d.ts +0 -39
  73. package/dist_ts/provider.groq.js +0 -178
  74. package/dist_ts/provider.mistral.d.ts +0 -61
  75. package/dist_ts/provider.mistral.js +0 -288
  76. package/dist_ts/provider.ollama.d.ts +0 -141
  77. package/dist_ts/provider.ollama.js +0 -529
  78. package/dist_ts/provider.openai.d.ts +0 -62
  79. package/dist_ts/provider.openai.js +0 -403
  80. package/dist_ts/provider.perplexity.d.ts +0 -37
  81. package/dist_ts/provider.perplexity.js +0 -215
  82. package/dist_ts/provider.xai.d.ts +0 -52
  83. package/dist_ts/provider.xai.js +0 -160
  84. package/ts/abstract.classes.multimodal.ts +0 -240
  85. package/ts/classes.conversation.ts +0 -176
  86. package/ts/classes.smartai.ts +0 -187
  87. package/ts/classes.tts.ts +0 -15
  88. package/ts/interfaces.ts +0 -0
  89. package/ts/paths.ts +0 -4
  90. package/ts/provider.anthropic.ts +0 -446
  91. package/ts/provider.elevenlabs.ts +0 -116
  92. package/ts/provider.exo.ts +0 -155
  93. package/ts/provider.groq.ts +0 -219
  94. package/ts/provider.mistral.ts +0 -352
  95. package/ts/provider.ollama.ts +0 -705
  96. package/ts/provider.openai.ts +0 -462
  97. package/ts/provider.perplexity.ts +0 -259
  98. package/ts/provider.xai.ts +0 -214
@@ -1,259 +0,0 @@
1
- import * as plugins from './plugins.js';
2
- import * as paths from './paths.js';
3
- import { MultiModalModel } from './abstract.classes.multimodal.js';
4
- import type {
5
- ChatOptions,
6
- ChatResponse,
7
- ChatMessage,
8
- ResearchOptions,
9
- ResearchResponse,
10
- ImageGenerateOptions,
11
- ImageEditOptions,
12
- ImageResponse
13
- } from './abstract.classes.multimodal.js';
14
-
15
- export interface IPerplexityProviderOptions {
16
- perplexityToken: string;
17
- }
18
-
19
- export class PerplexityProvider extends MultiModalModel {
20
- private options: IPerplexityProviderOptions;
21
-
22
- constructor(optionsArg: IPerplexityProviderOptions) {
23
- super();
24
- this.options = optionsArg;
25
- }
26
-
27
- async start() {
28
- // Initialize any necessary clients or resources
29
- }
30
-
31
- async stop() {}
32
-
33
- public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
34
- // Create a TextDecoder to handle incoming chunks
35
- const decoder = new TextDecoder();
36
- let buffer = '';
37
- let currentMessage: { role: string; content: string; } | null = null;
38
-
39
- // Create a TransformStream to process the input
40
- const transform = new TransformStream<Uint8Array, string>({
41
- async transform(chunk, controller) {
42
- buffer += decoder.decode(chunk, { stream: true });
43
-
44
- // Try to parse complete JSON messages from the buffer
45
- while (true) {
46
- const newlineIndex = buffer.indexOf('\n');
47
- if (newlineIndex === -1) break;
48
-
49
- const line = buffer.slice(0, newlineIndex);
50
- buffer = buffer.slice(newlineIndex + 1);
51
-
52
- if (line.trim()) {
53
- try {
54
- const message = JSON.parse(line);
55
- currentMessage = {
56
- role: message.role || 'user',
57
- content: message.content || '',
58
- };
59
- } catch (e) {
60
- console.error('Failed to parse message:', e);
61
- }
62
- }
63
- }
64
-
65
- // If we have a complete message, send it to Perplexity
66
- if (currentMessage) {
67
- const response = await fetch('https://api.perplexity.ai/chat/completions', {
68
- method: 'POST',
69
- headers: {
70
- 'Authorization': `Bearer ${this.options.perplexityToken}`,
71
- 'Content-Type': 'application/json',
72
- },
73
- body: JSON.stringify({
74
- model: 'mixtral-8x7b-instruct',
75
- messages: [{ role: currentMessage.role, content: currentMessage.content }],
76
- stream: true,
77
- }),
78
- });
79
-
80
- // Process each chunk from Perplexity
81
- const reader = response.body?.getReader();
82
- if (reader) {
83
- try {
84
- while (true) {
85
- const { done, value } = await reader.read();
86
- if (done) break;
87
-
88
- const chunk = new TextDecoder().decode(value);
89
- const lines = chunk.split('\n');
90
-
91
- for (const line of lines) {
92
- if (line.startsWith('data: ')) {
93
- const data = line.slice(6);
94
- if (data === '[DONE]') break;
95
-
96
- try {
97
- const parsed = JSON.parse(data);
98
- const content = parsed.choices[0]?.delta?.content;
99
- if (content) {
100
- controller.enqueue(content);
101
- }
102
- } catch (e) {
103
- console.error('Failed to parse SSE data:', e);
104
- }
105
- }
106
- }
107
- }
108
- } finally {
109
- reader.releaseLock();
110
- }
111
- }
112
-
113
- currentMessage = null;
114
- }
115
- },
116
-
117
- flush(controller) {
118
- if (buffer) {
119
- try {
120
- const message = JSON.parse(buffer);
121
- controller.enqueue(message.content || '');
122
- } catch (e) {
123
- console.error('Failed to parse remaining buffer:', e);
124
- }
125
- }
126
- }
127
- });
128
-
129
- // Connect the input to our transform stream
130
- return input.pipeThrough(transform);
131
- }
132
-
133
- // Implementing the synchronous chat interaction
134
- public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
135
- // Make API call to Perplexity
136
- const response = await fetch('https://api.perplexity.ai/chat/completions', {
137
- method: 'POST',
138
- headers: {
139
- 'Authorization': `Bearer ${this.options.perplexityToken}`,
140
- 'Content-Type': 'application/json',
141
- },
142
- body: JSON.stringify({
143
- model: 'mixtral-8x7b-instruct', // Using Mixtral model
144
- messages: [
145
- { role: 'system', content: optionsArg.systemMessage },
146
- ...optionsArg.messageHistory,
147
- { role: 'user', content: optionsArg.userMessage }
148
- ],
149
- }),
150
- });
151
-
152
- if (!response.ok) {
153
- throw new Error(`Perplexity API error: ${response.statusText}`);
154
- }
155
-
156
- const result = await response.json();
157
-
158
- return {
159
- role: 'assistant' as const,
160
- message: result.choices[0].message.content,
161
- };
162
- }
163
-
164
- public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
165
- throw new Error('Audio generation is not supported by Perplexity.');
166
- }
167
-
168
- public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
169
- throw new Error('Vision tasks are not supported by Perplexity.');
170
- }
171
-
172
- public async document(optionsArg: {
173
- systemMessage: string;
174
- userMessage: string;
175
- pdfDocuments: Uint8Array[];
176
- messageHistory: ChatMessage[];
177
- }): Promise<{ message: any }> {
178
- throw new Error('Document processing is not supported by Perplexity.');
179
- }
180
-
181
- public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
182
- // Perplexity has Sonar models that are optimized for search
183
- // sonar models: sonar, sonar-pro
184
- const model = optionsArg.searchDepth === 'deep' ? 'sonar-pro' : 'sonar';
185
-
186
- try {
187
- const response = await fetch('https://api.perplexity.ai/chat/completions', {
188
- method: 'POST',
189
- headers: {
190
- 'Authorization': `Bearer ${this.options.perplexityToken}`,
191
- 'Content-Type': 'application/json',
192
- },
193
- body: JSON.stringify({
194
- model,
195
- messages: [
196
- {
197
- role: 'system',
198
- content: 'You are a helpful research assistant. Provide accurate information with sources.'
199
- },
200
- {
201
- role: 'user',
202
- content: optionsArg.query
203
- }
204
- ],
205
- temperature: 0.7,
206
- max_tokens: 4000
207
- }),
208
- });
209
-
210
- if (!response.ok) {
211
- throw new Error(`Perplexity API error: ${response.statusText}`);
212
- }
213
-
214
- const result = await response.json();
215
- const answer = result.choices[0].message.content;
216
-
217
- // Parse citations from the response
218
- const sources: Array<{ url: string; title: string; snippet: string }> = [];
219
-
220
- // Perplexity includes citations in the format [1], [2], etc. with sources listed
221
- // This is a simplified parser - could be enhanced based on actual Perplexity response format
222
- if (result.citations) {
223
- for (const citation of result.citations) {
224
- sources.push({
225
- url: citation.url || '',
226
- title: citation.title || '',
227
- snippet: citation.snippet || ''
228
- });
229
- }
230
- }
231
-
232
- return {
233
- answer,
234
- sources,
235
- metadata: {
236
- model,
237
- searchDepth: optionsArg.searchDepth || 'basic'
238
- }
239
- };
240
- } catch (error) {
241
- console.error('Perplexity research error:', error);
242
- throw new Error(`Failed to perform research: ${error.message}`);
243
- }
244
- }
245
-
246
- /**
247
- * Image generation is not supported by Perplexity
248
- */
249
- public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
250
- throw new Error('Image generation is not supported by Perplexity. Please use OpenAI provider for image generation.');
251
- }
252
-
253
- /**
254
- * Image editing is not supported by Perplexity
255
- */
256
- public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
257
- throw new Error('Image editing is not supported by Perplexity. Please use OpenAI provider for image editing.');
258
- }
259
- }
@@ -1,214 +0,0 @@
1
- import * as plugins from './plugins.js';
2
- import * as paths from './paths.js';
3
- import { MultiModalModel } from './abstract.classes.multimodal.js';
4
- import type {
5
- ChatOptions,
6
- ChatResponse,
7
- ChatMessage,
8
- ResearchOptions,
9
- ResearchResponse,
10
- ImageGenerateOptions,
11
- ImageEditOptions,
12
- ImageResponse
13
- } from './abstract.classes.multimodal.js';
14
- import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
15
-
16
- export interface IXAIProviderOptions {
17
- xaiToken: string;
18
- }
19
-
20
- export class XAIProvider extends MultiModalModel {
21
- private options: IXAIProviderOptions;
22
- public openAiApiClient: plugins.openai.default;
23
-
24
- constructor(optionsArg: IXAIProviderOptions) {
25
- super();
26
- this.options = optionsArg;
27
- }
28
-
29
- public async start() {
30
- await super.start();
31
- this.openAiApiClient = new plugins.openai.default({
32
- apiKey: this.options.xaiToken,
33
- baseURL: 'https://api.x.ai/v1',
34
- });
35
- }
36
-
37
- public async stop() {
38
- await super.stop();
39
- }
40
-
41
- public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
42
- // Create a TextDecoder to handle incoming chunks
43
- const decoder = new TextDecoder();
44
- let buffer = '';
45
- let currentMessage: { role: string; content: string; } | null = null;
46
-
47
- // Create a TransformStream to process the input
48
- const transform = new TransformStream<Uint8Array, string>({
49
- async transform(chunk, controller) {
50
- buffer += decoder.decode(chunk, { stream: true });
51
-
52
- // Try to parse complete JSON messages from the buffer
53
- while (true) {
54
- const newlineIndex = buffer.indexOf('\n');
55
- if (newlineIndex === -1) break;
56
-
57
- const line = buffer.slice(0, newlineIndex);
58
- buffer = buffer.slice(newlineIndex + 1);
59
-
60
- if (line.trim()) {
61
- try {
62
- const message = JSON.parse(line);
63
- currentMessage = {
64
- role: message.role || 'user',
65
- content: message.content || '',
66
- };
67
- } catch (e) {
68
- console.error('Failed to parse message:', e);
69
- }
70
- }
71
- }
72
-
73
- // If we have a complete message, send it to X.AI
74
- if (currentMessage) {
75
- const stream = await this.openAiApiClient.chat.completions.create({
76
- model: 'grok-2-latest',
77
- messages: [{ role: currentMessage.role, content: currentMessage.content }],
78
- stream: true,
79
- });
80
-
81
- // Process each chunk from X.AI
82
- for await (const chunk of stream) {
83
- const content = chunk.choices[0]?.delta?.content;
84
- if (content) {
85
- controller.enqueue(content);
86
- }
87
- }
88
-
89
- currentMessage = null;
90
- }
91
- },
92
-
93
- flush(controller) {
94
- if (buffer) {
95
- try {
96
- const message = JSON.parse(buffer);
97
- controller.enqueue(message.content || '');
98
- } catch (e) {
99
- console.error('Failed to parse remaining buffer:', e);
100
- }
101
- }
102
- }
103
- });
104
-
105
- // Connect the input to our transform stream
106
- return input.pipeThrough(transform);
107
- }
108
-
109
- public async chat(optionsArg: {
110
- systemMessage: string;
111
- userMessage: string;
112
- messageHistory: { role: string; content: string; }[];
113
- }): Promise<{ role: 'assistant'; message: string; }> {
114
- // Prepare messages array with system message, history, and user message
115
- const messages: ChatCompletionMessageParam[] = [
116
- { role: 'system', content: optionsArg.systemMessage },
117
- ...optionsArg.messageHistory.map(msg => ({
118
- role: msg.role as 'system' | 'user' | 'assistant',
119
- content: msg.content
120
- })),
121
- { role: 'user', content: optionsArg.userMessage }
122
- ];
123
-
124
- // Call X.AI's chat completion API
125
- const completion = await this.openAiApiClient.chat.completions.create({
126
- model: 'grok-2-latest',
127
- messages: messages,
128
- stream: false,
129
- });
130
-
131
- // Return the assistant's response
132
- return {
133
- role: 'assistant',
134
- message: completion.choices[0]?.message?.content || ''
135
- };
136
- }
137
-
138
- public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
139
- throw new Error('Audio generation is not supported by X.AI');
140
- }
141
-
142
- public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
143
- throw new Error('Vision tasks are not supported by X.AI');
144
- }
145
-
146
- public async document(optionsArg: {
147
- systemMessage: string;
148
- userMessage: string;
149
- pdfDocuments: Uint8Array[];
150
- messageHistory: { role: string; content: string; }[];
151
- }): Promise<{ message: any }> {
152
- // Ensure SmartPdf is initialized before processing documents
153
- await this.ensureSmartpdfReady();
154
-
155
- // First convert PDF documents to images
156
- let pdfDocumentImageBytesArray: Uint8Array[] = [];
157
-
158
- for (const pdfDocument of optionsArg.pdfDocuments) {
159
- const documentImageArray = await this.smartpdfInstance!.convertPDFToPngBytes(pdfDocument);
160
- pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
161
- }
162
-
163
- // Convert images to base64 for inclusion in the message
164
- const imageBase64Array = pdfDocumentImageBytesArray.map(bytes =>
165
- Buffer.from(bytes).toString('base64')
166
- );
167
-
168
- // Combine document images into the user message
169
- const enhancedUserMessage = `
170
- ${optionsArg.userMessage}
171
-
172
- Document contents (as images):
173
- ${imageBase64Array.map((img, i) => `Image ${i + 1}: <image data>`).join('\n')}
174
- `;
175
-
176
- // Use chat completion to analyze the documents
177
- const messages: ChatCompletionMessageParam[] = [
178
- { role: 'system', content: optionsArg.systemMessage },
179
- ...optionsArg.messageHistory.map(msg => ({
180
- role: msg.role as 'system' | 'user' | 'assistant',
181
- content: msg.content
182
- })),
183
- { role: 'user', content: enhancedUserMessage }
184
- ];
185
-
186
- const completion = await this.openAiApiClient.chat.completions.create({
187
- model: 'grok-2-latest',
188
- messages: messages,
189
- stream: false,
190
- });
191
-
192
- return {
193
- message: completion.choices[0]?.message?.content || ''
194
- };
195
- }
196
-
197
- public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
198
- throw new Error('Research capabilities are not yet supported by xAI provider.');
199
- }
200
-
201
- /**
202
- * Image generation is not supported by xAI
203
- */
204
- public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
205
- throw new Error('Image generation is not supported by xAI. Please use OpenAI provider for image generation.');
206
- }
207
-
208
- /**
209
- * Image editing is not supported by xAI
210
- */
211
- public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
212
- throw new Error('Image editing is not supported by xAI. Please use OpenAI provider for image editing.');
213
- }
214
- }