@push.rocks/smartai 0.13.3 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/dist_ts/00_commitinfo_data.js +3 -3
  2. package/dist_ts/index.d.ts +6 -11
  3. package/dist_ts/index.js +6 -12
  4. package/dist_ts/plugins.d.ts +10 -15
  5. package/dist_ts/plugins.js +13 -19
  6. package/dist_ts/smartai.classes.smartai.d.ts +7 -0
  7. package/dist_ts/smartai.classes.smartai.js +51 -0
  8. package/dist_ts/smartai.interfaces.d.ts +41 -0
  9. package/dist_ts/smartai.interfaces.js +2 -0
  10. package/dist_ts/smartai.middleware.anthropic.d.ts +7 -0
  11. package/dist_ts/smartai.middleware.anthropic.js +36 -0
  12. package/dist_ts/smartai.provider.ollama.d.ts +8 -0
  13. package/dist_ts/smartai.provider.ollama.js +378 -0
  14. package/dist_ts_audio/index.d.ts +9 -0
  15. package/dist_ts_audio/index.js +15 -0
  16. package/dist_ts_audio/plugins.d.ts +2 -0
  17. package/dist_ts_audio/plugins.js +3 -0
  18. package/dist_ts_document/index.d.ts +11 -0
  19. package/dist_ts_document/index.js +45 -0
  20. package/dist_ts_document/plugins.d.ts +3 -0
  21. package/dist_ts_document/plugins.js +4 -0
  22. package/dist_ts_image/index.d.ts +46 -0
  23. package/dist_ts_image/index.js +110 -0
  24. package/dist_ts_image/plugins.d.ts +3 -0
  25. package/dist_ts_image/plugins.js +4 -0
  26. package/dist_ts_research/index.d.ts +19 -0
  27. package/dist_ts_research/index.js +98 -0
  28. package/dist_ts_research/plugins.d.ts +2 -0
  29. package/dist_ts_research/plugins.js +3 -0
  30. package/dist_ts_vision/index.d.ts +8 -0
  31. package/dist_ts_vision/index.js +21 -0
  32. package/dist_ts_vision/plugins.d.ts +2 -0
  33. package/dist_ts_vision/plugins.js +3 -0
  34. package/package.json +50 -22
  35. package/readme.hints.md +34 -88
  36. package/readme.md +284 -547
  37. package/ts/00_commitinfo_data.ts +2 -2
  38. package/ts/index.ts +8 -11
  39. package/ts/plugins.ts +19 -35
  40. package/ts/smartai.classes.smartai.ts +51 -0
  41. package/ts/smartai.interfaces.ts +53 -0
  42. package/ts/smartai.middleware.anthropic.ts +38 -0
  43. package/ts/smartai.provider.ollama.ts +426 -0
  44. package/ts_audio/index.ts +24 -0
  45. package/ts_audio/plugins.ts +2 -0
  46. package/ts_document/index.ts +61 -0
  47. package/ts_document/plugins.ts +3 -0
  48. package/ts_image/index.ts +147 -0
  49. package/ts_image/plugins.ts +3 -0
  50. package/ts_research/index.ts +120 -0
  51. package/ts_research/plugins.ts +2 -0
  52. package/ts_vision/index.ts +29 -0
  53. package/ts_vision/plugins.ts +2 -0
  54. package/dist_ts/abstract.classes.multimodal.d.ts +0 -212
  55. package/dist_ts/abstract.classes.multimodal.js +0 -43
  56. package/dist_ts/classes.conversation.d.ts +0 -31
  57. package/dist_ts/classes.conversation.js +0 -150
  58. package/dist_ts/classes.smartai.d.ts +0 -59
  59. package/dist_ts/classes.smartai.js +0 -139
  60. package/dist_ts/classes.tts.d.ts +0 -6
  61. package/dist_ts/classes.tts.js +0 -10
  62. package/dist_ts/interfaces.d.ts +0 -1
  63. package/dist_ts/interfaces.js +0 -2
  64. package/dist_ts/paths.d.ts +0 -2
  65. package/dist_ts/paths.js +0 -4
  66. package/dist_ts/provider.anthropic.d.ts +0 -48
  67. package/dist_ts/provider.anthropic.js +0 -369
  68. package/dist_ts/provider.elevenlabs.d.ts +0 -43
  69. package/dist_ts/provider.elevenlabs.js +0 -64
  70. package/dist_ts/provider.exo.d.ts +0 -40
  71. package/dist_ts/provider.exo.js +0 -116
  72. package/dist_ts/provider.groq.d.ts +0 -39
  73. package/dist_ts/provider.groq.js +0 -178
  74. package/dist_ts/provider.mistral.d.ts +0 -61
  75. package/dist_ts/provider.mistral.js +0 -288
  76. package/dist_ts/provider.ollama.d.ts +0 -141
  77. package/dist_ts/provider.ollama.js +0 -529
  78. package/dist_ts/provider.openai.d.ts +0 -62
  79. package/dist_ts/provider.openai.js +0 -403
  80. package/dist_ts/provider.perplexity.d.ts +0 -37
  81. package/dist_ts/provider.perplexity.js +0 -215
  82. package/dist_ts/provider.xai.d.ts +0 -52
  83. package/dist_ts/provider.xai.js +0 -160
  84. package/ts/abstract.classes.multimodal.ts +0 -240
  85. package/ts/classes.conversation.ts +0 -176
  86. package/ts/classes.smartai.ts +0 -187
  87. package/ts/classes.tts.ts +0 -15
  88. package/ts/interfaces.ts +0 -0
  89. package/ts/paths.ts +0 -4
  90. package/ts/provider.anthropic.ts +0 -446
  91. package/ts/provider.elevenlabs.ts +0 -116
  92. package/ts/provider.exo.ts +0 -155
  93. package/ts/provider.groq.ts +0 -219
  94. package/ts/provider.mistral.ts +0 -352
  95. package/ts/provider.ollama.ts +0 -705
  96. package/ts/provider.openai.ts +0 -462
  97. package/ts/provider.perplexity.ts +0 -259
  98. package/ts/provider.xai.ts +0 -214
@@ -1,155 +0,0 @@
1
- import * as plugins from './plugins.js';
2
- import * as paths from './paths.js';
3
- import { MultiModalModel } from './abstract.classes.multimodal.js';
4
- import type {
5
- ChatOptions,
6
- ChatResponse,
7
- ChatMessage,
8
- ResearchOptions,
9
- ResearchResponse,
10
- ImageGenerateOptions,
11
- ImageEditOptions,
12
- ImageResponse
13
- } from './abstract.classes.multimodal.js';
14
- import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
15
-
16
- export interface IExoProviderOptions {
17
- exoBaseUrl?: string;
18
- apiKey?: string;
19
- }
20
-
21
- export class ExoProvider extends MultiModalModel {
22
- private options: IExoProviderOptions;
23
- public openAiApiClient: plugins.openai.default;
24
-
25
- constructor(optionsArg: IExoProviderOptions = {}) {
26
- super();
27
- this.options = {
28
- exoBaseUrl: 'http://localhost:8080/v1', // Default Exo API endpoint
29
- ...optionsArg
30
- };
31
- }
32
-
33
- public async start() {
34
- this.openAiApiClient = new plugins.openai.default({
35
- apiKey: this.options.apiKey || 'not-needed', // Exo might not require an API key for local deployment
36
- baseURL: this.options.exoBaseUrl,
37
- });
38
- }
39
-
40
- public async stop() {}
41
-
42
- public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
43
- // Create a TextDecoder to handle incoming chunks
44
- const decoder = new TextDecoder();
45
- let buffer = '';
46
- let currentMessage: { role: string; content: string; } | null = null;
47
-
48
- // Create a TransformStream to process the input
49
- const transform = new TransformStream<Uint8Array, string>({
50
- transform: async (chunk, controller) => {
51
- buffer += decoder.decode(chunk, { stream: true });
52
-
53
- // Try to parse complete JSON messages from the buffer
54
- while (true) {
55
- const newlineIndex = buffer.indexOf('\n');
56
- if (newlineIndex === -1) break;
57
-
58
- const line = buffer.slice(0, newlineIndex);
59
- buffer = buffer.slice(newlineIndex + 1);
60
-
61
- if (line.trim()) {
62
- try {
63
- const message = JSON.parse(line);
64
- currentMessage = message;
65
-
66
- // Process the message based on its type
67
- if (message.type === 'message') {
68
- const response = await this.chat({
69
- systemMessage: '',
70
- userMessage: message.content,
71
- messageHistory: [{ role: message.role as 'user' | 'assistant' | 'system', content: message.content }]
72
- });
73
-
74
- controller.enqueue(JSON.stringify(response) + '\n');
75
- }
76
- } catch (error) {
77
- console.error('Error processing message:', error);
78
- }
79
- }
80
- }
81
- },
82
- flush(controller) {
83
- if (buffer) {
84
- try {
85
- const message = JSON.parse(buffer);
86
- currentMessage = message;
87
- } catch (error) {
88
- console.error('Error processing remaining buffer:', error);
89
- }
90
- }
91
- }
92
- });
93
-
94
- return input.pipeThrough(transform);
95
- }
96
-
97
- public async chat(options: ChatOptions): Promise<ChatResponse> {
98
- const messages: ChatCompletionMessageParam[] = [
99
- { role: 'system', content: options.systemMessage },
100
- ...options.messageHistory,
101
- { role: 'user', content: options.userMessage }
102
- ];
103
-
104
- try {
105
- const response = await this.openAiApiClient.chat.completions.create({
106
- model: 'local-model', // Exo uses local models
107
- messages: messages,
108
- stream: false
109
- });
110
-
111
- return {
112
- role: 'assistant',
113
- message: response.choices[0]?.message?.content || ''
114
- };
115
- } catch (error) {
116
- console.error('Error in chat completion:', error);
117
- throw error;
118
- }
119
- }
120
-
121
- public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
122
- throw new Error('Audio generation is not supported by Exo provider');
123
- }
124
-
125
- public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
126
- throw new Error('Vision processing is not supported by Exo provider');
127
- }
128
-
129
- public async document(optionsArg: {
130
- systemMessage: string;
131
- userMessage: string;
132
- pdfDocuments: Uint8Array[];
133
- messageHistory: ChatMessage[];
134
- }): Promise<{ message: any }> {
135
- throw new Error('Document processing is not supported by Exo provider');
136
- }
137
-
138
- public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
139
- throw new Error('Research capabilities are not yet supported by Exo provider.');
140
- }
141
-
142
- /**
143
- * Image generation is not supported by Exo
144
- */
145
- public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
146
- throw new Error('Image generation is not supported by Exo. Please use OpenAI provider for image generation.');
147
- }
148
-
149
- /**
150
- * Image editing is not supported by Exo
151
- */
152
- public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
153
- throw new Error('Image editing is not supported by Exo. Please use OpenAI provider for image editing.');
154
- }
155
- }
@@ -1,219 +0,0 @@
1
- import * as plugins from './plugins.js';
2
- import * as paths from './paths.js';
3
- import { MultiModalModel } from './abstract.classes.multimodal.js';
4
- import type {
5
- ChatOptions,
6
- ChatResponse,
7
- ChatMessage,
8
- ResearchOptions,
9
- ResearchResponse,
10
- ImageGenerateOptions,
11
- ImageEditOptions,
12
- ImageResponse
13
- } from './abstract.classes.multimodal.js';
14
-
15
- export interface IGroqProviderOptions {
16
- groqToken: string;
17
- model?: string;
18
- }
19
-
20
- export class GroqProvider extends MultiModalModel {
21
- private options: IGroqProviderOptions;
22
- private baseUrl = 'https://api.groq.com/v1';
23
-
24
- constructor(optionsArg: IGroqProviderOptions) {
25
- super();
26
- this.options = {
27
- ...optionsArg,
28
- model: optionsArg.model || 'llama-3.3-70b-versatile', // Default model
29
- };
30
- }
31
-
32
- async start() {}
33
-
34
- async stop() {}
35
-
36
- public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
37
- // Create a TextDecoder to handle incoming chunks
38
- const decoder = new TextDecoder();
39
- let buffer = '';
40
- let currentMessage: { role: string; content: string; } | null = null;
41
-
42
- // Create a TransformStream to process the input
43
- const transform = new TransformStream<Uint8Array, string>({
44
- transform: async (chunk, controller) => {
45
- buffer += decoder.decode(chunk, { stream: true });
46
-
47
- // Try to parse complete JSON messages from the buffer
48
- while (true) {
49
- const newlineIndex = buffer.indexOf('\n');
50
- if (newlineIndex === -1) break;
51
-
52
- const line = buffer.slice(0, newlineIndex);
53
- buffer = buffer.slice(newlineIndex + 1);
54
-
55
- if (line.trim()) {
56
- try {
57
- const message = JSON.parse(line);
58
- currentMessage = {
59
- role: message.role || 'user',
60
- content: message.content || '',
61
- };
62
- } catch (e) {
63
- console.error('Failed to parse message:', e);
64
- }
65
- }
66
- }
67
-
68
- // If we have a complete message, send it to Groq
69
- if (currentMessage) {
70
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
71
- method: 'POST',
72
- headers: {
73
- 'Authorization': `Bearer ${this.options.groqToken}`,
74
- 'Content-Type': 'application/json',
75
- },
76
- body: JSON.stringify({
77
- model: this.options.model,
78
- messages: [{ role: currentMessage.role, content: currentMessage.content }],
79
- stream: true,
80
- }),
81
- });
82
-
83
- // Process each chunk from Groq
84
- const reader = response.body?.getReader();
85
- if (reader) {
86
- try {
87
- while (true) {
88
- const { done, value } = await reader.read();
89
- if (done) break;
90
-
91
- const chunk = new TextDecoder().decode(value);
92
- const lines = chunk.split('\n');
93
-
94
- for (const line of lines) {
95
- if (line.startsWith('data: ')) {
96
- const data = line.slice(6);
97
- if (data === '[DONE]') break;
98
-
99
- try {
100
- const parsed = JSON.parse(data);
101
- const content = parsed.choices[0]?.delta?.content;
102
- if (content) {
103
- controller.enqueue(content);
104
- }
105
- } catch (e) {
106
- console.error('Failed to parse SSE data:', e);
107
- }
108
- }
109
- }
110
- }
111
- } finally {
112
- reader.releaseLock();
113
- }
114
- }
115
-
116
- currentMessage = null;
117
- }
118
- },
119
-
120
- flush(controller) {
121
- if (buffer) {
122
- try {
123
- const message = JSON.parse(buffer);
124
- controller.enqueue(message.content || '');
125
- } catch (e) {
126
- console.error('Failed to parse remaining buffer:', e);
127
- }
128
- }
129
- }
130
- });
131
-
132
- // Connect the input to our transform stream
133
- return input.pipeThrough(transform);
134
- }
135
-
136
- // Implementing the synchronous chat interaction
137
- public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
138
- const messages = [
139
- // System message
140
- {
141
- role: 'system',
142
- content: optionsArg.systemMessage,
143
- },
144
- // Message history
145
- ...optionsArg.messageHistory.map(msg => ({
146
- role: msg.role,
147
- content: msg.content,
148
- })),
149
- // User message
150
- {
151
- role: 'user',
152
- content: optionsArg.userMessage,
153
- },
154
- ];
155
-
156
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
157
- method: 'POST',
158
- headers: {
159
- 'Authorization': `Bearer ${this.options.groqToken}`,
160
- 'Content-Type': 'application/json',
161
- },
162
- body: JSON.stringify({
163
- model: this.options.model,
164
- messages,
165
- temperature: 0.7,
166
- max_completion_tokens: 1024,
167
- stream: false,
168
- }),
169
- });
170
-
171
- if (!response.ok) {
172
- const error = await response.json();
173
- throw new Error(`Groq API error: ${error.message || response.statusText}`);
174
- }
175
-
176
- const result = await response.json();
177
-
178
- return {
179
- role: 'assistant',
180
- message: result.choices[0].message.content,
181
- };
182
- }
183
-
184
- public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
185
- // Groq does not provide an audio API, so this method is not implemented.
186
- throw new Error('Audio generation is not yet supported by Groq.');
187
- }
188
-
189
- public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
190
- throw new Error('Vision tasks are not yet supported by Groq.');
191
- }
192
-
193
- public async document(optionsArg: {
194
- systemMessage: string;
195
- userMessage: string;
196
- pdfDocuments: Uint8Array[];
197
- messageHistory: ChatMessage[];
198
- }): Promise<{ message: any }> {
199
- throw new Error('Document processing is not yet supported by Groq.');
200
- }
201
-
202
- public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
203
- throw new Error('Research capabilities are not yet supported by Groq provider.');
204
- }
205
-
206
- /**
207
- * Image generation is not supported by Groq
208
- */
209
- public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
210
- throw new Error('Image generation is not supported by Groq. Please use OpenAI provider for image generation.');
211
- }
212
-
213
- /**
214
- * Image editing is not supported by Groq
215
- */
216
- public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
217
- throw new Error('Image editing is not supported by Groq. Please use OpenAI provider for image editing.');
218
- }
219
- }