@push.rocks/smartai 0.13.2 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/dist_ts/00_commitinfo_data.js +3 -3
  2. package/dist_ts/index.d.ts +6 -11
  3. package/dist_ts/index.js +6 -12
  4. package/dist_ts/plugins.d.ts +10 -15
  5. package/dist_ts/plugins.js +13 -19
  6. package/dist_ts/smartai.classes.smartai.d.ts +7 -0
  7. package/dist_ts/smartai.classes.smartai.js +51 -0
  8. package/dist_ts/smartai.interfaces.d.ts +41 -0
  9. package/dist_ts/smartai.interfaces.js +2 -0
  10. package/dist_ts/smartai.middleware.anthropic.d.ts +7 -0
  11. package/dist_ts/smartai.middleware.anthropic.js +36 -0
  12. package/dist_ts/smartai.provider.ollama.d.ts +8 -0
  13. package/dist_ts/smartai.provider.ollama.js +378 -0
  14. package/dist_ts_audio/index.d.ts +9 -0
  15. package/dist_ts_audio/index.js +15 -0
  16. package/dist_ts_audio/plugins.d.ts +2 -0
  17. package/dist_ts_audio/plugins.js +3 -0
  18. package/dist_ts_document/index.d.ts +11 -0
  19. package/dist_ts_document/index.js +45 -0
  20. package/dist_ts_document/plugins.d.ts +3 -0
  21. package/dist_ts_document/plugins.js +4 -0
  22. package/dist_ts_image/index.d.ts +46 -0
  23. package/dist_ts_image/index.js +110 -0
  24. package/dist_ts_image/plugins.d.ts +3 -0
  25. package/dist_ts_image/plugins.js +4 -0
  26. package/dist_ts_research/index.d.ts +19 -0
  27. package/dist_ts_research/index.js +98 -0
  28. package/dist_ts_research/plugins.d.ts +2 -0
  29. package/dist_ts_research/plugins.js +3 -0
  30. package/dist_ts_vision/index.d.ts +8 -0
  31. package/dist_ts_vision/index.js +21 -0
  32. package/dist_ts_vision/plugins.d.ts +2 -0
  33. package/dist_ts_vision/plugins.js +3 -0
  34. package/package.json +50 -22
  35. package/readme.hints.md +34 -88
  36. package/readme.md +284 -547
  37. package/ts/00_commitinfo_data.ts +2 -2
  38. package/ts/index.ts +8 -11
  39. package/ts/plugins.ts +19 -35
  40. package/ts/smartai.classes.smartai.ts +51 -0
  41. package/ts/smartai.interfaces.ts +53 -0
  42. package/ts/smartai.middleware.anthropic.ts +38 -0
  43. package/ts/smartai.provider.ollama.ts +426 -0
  44. package/ts_audio/index.ts +24 -0
  45. package/ts_audio/plugins.ts +2 -0
  46. package/ts_document/index.ts +61 -0
  47. package/ts_document/plugins.ts +3 -0
  48. package/ts_image/index.ts +147 -0
  49. package/ts_image/plugins.ts +3 -0
  50. package/ts_research/index.ts +120 -0
  51. package/ts_research/plugins.ts +2 -0
  52. package/ts_vision/index.ts +29 -0
  53. package/ts_vision/plugins.ts +2 -0
  54. package/dist_ts/abstract.classes.multimodal.d.ts +0 -212
  55. package/dist_ts/abstract.classes.multimodal.js +0 -43
  56. package/dist_ts/classes.conversation.d.ts +0 -31
  57. package/dist_ts/classes.conversation.js +0 -150
  58. package/dist_ts/classes.smartai.d.ts +0 -59
  59. package/dist_ts/classes.smartai.js +0 -139
  60. package/dist_ts/classes.tts.d.ts +0 -6
  61. package/dist_ts/classes.tts.js +0 -10
  62. package/dist_ts/interfaces.d.ts +0 -1
  63. package/dist_ts/interfaces.js +0 -2
  64. package/dist_ts/paths.d.ts +0 -2
  65. package/dist_ts/paths.js +0 -4
  66. package/dist_ts/provider.anthropic.d.ts +0 -48
  67. package/dist_ts/provider.anthropic.js +0 -369
  68. package/dist_ts/provider.elevenlabs.d.ts +0 -43
  69. package/dist_ts/provider.elevenlabs.js +0 -64
  70. package/dist_ts/provider.exo.d.ts +0 -40
  71. package/dist_ts/provider.exo.js +0 -116
  72. package/dist_ts/provider.groq.d.ts +0 -39
  73. package/dist_ts/provider.groq.js +0 -178
  74. package/dist_ts/provider.mistral.d.ts +0 -61
  75. package/dist_ts/provider.mistral.js +0 -288
  76. package/dist_ts/provider.ollama.d.ts +0 -141
  77. package/dist_ts/provider.ollama.js +0 -519
  78. package/dist_ts/provider.openai.d.ts +0 -62
  79. package/dist_ts/provider.openai.js +0 -403
  80. package/dist_ts/provider.perplexity.d.ts +0 -37
  81. package/dist_ts/provider.perplexity.js +0 -215
  82. package/dist_ts/provider.xai.d.ts +0 -52
  83. package/dist_ts/provider.xai.js +0 -160
  84. package/ts/abstract.classes.multimodal.ts +0 -240
  85. package/ts/classes.conversation.ts +0 -176
  86. package/ts/classes.smartai.ts +0 -187
  87. package/ts/classes.tts.ts +0 -15
  88. package/ts/interfaces.ts +0 -0
  89. package/ts/paths.ts +0 -4
  90. package/ts/provider.anthropic.ts +0 -446
  91. package/ts/provider.elevenlabs.ts +0 -116
  92. package/ts/provider.exo.ts +0 -155
  93. package/ts/provider.groq.ts +0 -219
  94. package/ts/provider.mistral.ts +0 -352
  95. package/ts/provider.ollama.ts +0 -695
  96. package/ts/provider.openai.ts +0 -462
  97. package/ts/provider.perplexity.ts +0 -259
  98. package/ts/provider.xai.ts +0 -214
@@ -1,240 +0,0 @@
1
- import * as plugins from './plugins.js';
2
-
3
- /**
4
- * Message format for chat interactions
5
- */
6
- export interface ChatMessage {
7
- role: 'assistant' | 'user' | 'system';
8
- content: string;
9
- /** Base64-encoded images for vision-capable models */
10
- images?: string[];
11
- /** Chain-of-thought reasoning for GPT-OSS models (e.g., Ollama) */
12
- reasoning?: string;
13
- }
14
-
15
- /**
16
- * Options for chat interactions
17
- */
18
- export interface ChatOptions {
19
- systemMessage: string;
20
- userMessage: string;
21
- messageHistory: ChatMessage[];
22
- /** Base64-encoded images for the current message (vision-capable models) */
23
- images?: string[];
24
- }
25
-
26
- /**
27
- * Options for streaming chat interactions
28
- */
29
- export interface StreamingChatOptions extends ChatOptions {
30
- /** Callback fired for each token during generation */
31
- onToken?: (token: string) => void;
32
- }
33
-
34
- /**
35
- * Response format for chat interactions
36
- */
37
- export interface ChatResponse {
38
- role: 'assistant';
39
- message: string;
40
- /** Chain-of-thought reasoning from reasoning models */
41
- reasoning?: string;
42
- }
43
-
44
- /**
45
- * Options for research interactions
46
- */
47
- export interface ResearchOptions {
48
- query: string;
49
- searchDepth?: 'basic' | 'advanced' | 'deep';
50
- maxSources?: number;
51
- includeWebSearch?: boolean;
52
- background?: boolean;
53
- }
54
-
55
- /**
56
- * Response format for research interactions
57
- */
58
- export interface ResearchResponse {
59
- answer: string;
60
- sources: Array<{
61
- url: string;
62
- title: string;
63
- snippet: string;
64
- }>;
65
- searchQueries?: string[];
66
- metadata?: any;
67
- }
68
-
69
- /**
70
- * Options for image generation
71
- */
72
- export interface ImageGenerateOptions {
73
- prompt: string;
74
- model?: 'gpt-image-1' | 'dall-e-3' | 'dall-e-2';
75
- quality?: 'low' | 'medium' | 'high' | 'standard' | 'hd' | 'auto';
76
- size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | '1792x1024' | '1024x1792' | 'auto';
77
- style?: 'vivid' | 'natural';
78
- background?: 'transparent' | 'opaque' | 'auto';
79
- outputFormat?: 'png' | 'jpeg' | 'webp';
80
- outputCompression?: number; // 0-100 for webp/jpeg
81
- moderation?: 'low' | 'auto';
82
- n?: number; // Number of images to generate
83
- stream?: boolean;
84
- partialImages?: number; // 0-3 for streaming
85
- }
86
-
87
- /**
88
- * Options for image editing
89
- */
90
- export interface ImageEditOptions {
91
- image: Buffer;
92
- prompt: string;
93
- mask?: Buffer;
94
- model?: 'gpt-image-1' | 'dall-e-2';
95
- quality?: 'low' | 'medium' | 'high' | 'standard' | 'auto';
96
- size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto';
97
- background?: 'transparent' | 'opaque' | 'auto';
98
- outputFormat?: 'png' | 'jpeg' | 'webp';
99
- outputCompression?: number;
100
- n?: number;
101
- stream?: boolean;
102
- partialImages?: number;
103
- }
104
-
105
- /**
106
- * Response format for image operations
107
- */
108
- export interface ImageResponse {
109
- images: Array<{
110
- b64_json?: string;
111
- url?: string;
112
- revisedPrompt?: string;
113
- }>;
114
- metadata?: {
115
- model: string;
116
- quality?: string;
117
- size?: string;
118
- outputFormat?: string;
119
- tokensUsed?: number;
120
- };
121
- }
122
-
123
- /**
124
- * Abstract base class for multi-modal AI models.
125
- * Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama)
126
- */
127
- export abstract class MultiModalModel {
128
- /**
129
- * SmartPdf instance for document processing
130
- * Lazy-loaded only when PDF processing is needed to avoid starting browser unnecessarily
131
- */
132
- protected smartpdfInstance: plugins.smartpdf.SmartPdf | null = null;
133
-
134
- /**
135
- * Ensures SmartPdf instance is initialized and ready
136
- * Call this before using smartpdfInstance in document processing methods
137
- */
138
- protected async ensureSmartpdfReady(): Promise<void> {
139
- if (!this.smartpdfInstance) {
140
- this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
141
- await this.smartpdfInstance.start();
142
- }
143
- }
144
-
145
- /**
146
- * Initializes the model and any necessary resources
147
- * Should be called before using any other methods
148
- */
149
- public async start(): Promise<void> {
150
- // SmartPdf is now lazy-loaded only when needed for PDF processing
151
- // This avoids starting a browser unless document() method is actually used
152
- }
153
-
154
- /**
155
- * Cleans up any resources used by the model
156
- * Should be called when the model is no longer needed
157
- */
158
- public async stop(): Promise<void> {
159
- if (this.smartpdfInstance) {
160
- await this.smartpdfInstance.stop();
161
- this.smartpdfInstance = null;
162
- }
163
- }
164
-
165
- /**
166
- * Synchronous chat interaction with the model
167
- * @param optionsArg Options containing system message, user message, and message history
168
- * @returns Promise resolving to the assistant's response
169
- */
170
- public abstract chat(optionsArg: ChatOptions): Promise<ChatResponse>;
171
-
172
- /**
173
- * Streaming interface for chat interactions
174
- * Allows for real-time responses from the model
175
- * @param input Stream of user messages
176
- * @returns Stream of model responses
177
- */
178
- public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
179
-
180
- /**
181
- * Streaming chat with token callback
182
- * Calls onToken for each token generated, returns final response
183
- * @param optionsArg Options containing system message, user message, message history, and onToken callback
184
- * @returns Promise resolving to the assistant's response
185
- */
186
- public chatStreaming?(optionsArg: StreamingChatOptions): Promise<ChatResponse>;
187
-
188
- /**
189
- * Text-to-speech conversion
190
- * @param optionsArg Options containing the message to convert to speech
191
- * @returns Promise resolving to a readable stream of audio data
192
- * @throws Error if the provider doesn't support audio generation
193
- */
194
- public abstract audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream>;
195
-
196
- /**
197
- * Vision-language processing
198
- * @param optionsArg Options containing the image and prompt for analysis
199
- * @returns Promise resolving to the model's description or analysis of the image
200
- * @throws Error if the provider doesn't support vision tasks
201
- */
202
- public abstract vision(optionsArg: { image: Buffer; prompt: string }): Promise<string>;
203
-
204
- /**
205
- * Document analysis and processing
206
- * @param optionsArg Options containing system message, user message, PDF documents, and message history
207
- * @returns Promise resolving to the model's analysis of the documents
208
- * @throws Error if the provider doesn't support document processing
209
- */
210
- public abstract document(optionsArg: {
211
- systemMessage: string;
212
- userMessage: string;
213
- pdfDocuments: Uint8Array[];
214
- messageHistory: ChatMessage[];
215
- }): Promise<{ message: any }>;
216
-
217
- /**
218
- * Research and web search capabilities
219
- * @param optionsArg Options containing the research query and configuration
220
- * @returns Promise resolving to the research results with sources
221
- * @throws Error if the provider doesn't support research capabilities
222
- */
223
- public abstract research(optionsArg: ResearchOptions): Promise<ResearchResponse>;
224
-
225
- /**
226
- * Image generation from text prompts
227
- * @param optionsArg Options containing the prompt and generation parameters
228
- * @returns Promise resolving to the generated image(s)
229
- * @throws Error if the provider doesn't support image generation
230
- */
231
- public abstract imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse>;
232
-
233
- /**
234
- * Image editing and inpainting
235
- * @param optionsArg Options containing the image, prompt, and editing parameters
236
- * @returns Promise resolving to the edited image(s)
237
- * @throws Error if the provider doesn't support image editing
238
- */
239
- public abstract imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse>;
240
- }
@@ -1,176 +0,0 @@
1
- import type { SmartAi } from "./classes.smartai.js";
2
- import { OpenAiProvider } from "./provider.openai.js";
3
-
4
- type TProcessFunction = (input: string) => Promise<string>;
5
-
6
- export interface IConversationOptions {
7
- processFunction: TProcessFunction;
8
- }
9
-
10
- /**
11
- * a conversation
12
- */
13
- export class Conversation {
14
- // STATIC
15
- public static async createWithOpenAi(smartaiRefArg: SmartAi) {
16
- if (!smartaiRefArg.openaiProvider) {
17
- throw new Error('OpenAI provider not available');
18
- }
19
- const conversation = new Conversation(smartaiRefArg, {
20
- processFunction: async (input) => {
21
- return '' // TODO implement proper streaming
22
- }
23
- });
24
- return conversation;
25
- }
26
-
27
- public static async createWithAnthropic(smartaiRefArg: SmartAi) {
28
- if (!smartaiRefArg.anthropicProvider) {
29
- throw new Error('Anthropic provider not available');
30
- }
31
- const conversation = new Conversation(smartaiRefArg, {
32
- processFunction: async (input) => {
33
- return '' // TODO implement proper streaming
34
- }
35
- });
36
- return conversation;
37
- }
38
-
39
- public static async createWithPerplexity(smartaiRefArg: SmartAi) {
40
- if (!smartaiRefArg.perplexityProvider) {
41
- throw new Error('Perplexity provider not available');
42
- }
43
- const conversation = new Conversation(smartaiRefArg, {
44
- processFunction: async (input) => {
45
- return '' // TODO implement proper streaming
46
- }
47
- });
48
- return conversation;
49
- }
50
-
51
- public static async createWithExo(smartaiRefArg: SmartAi) {
52
- if (!smartaiRefArg.exoProvider) {
53
- throw new Error('Exo provider not available');
54
- }
55
- const conversation = new Conversation(smartaiRefArg, {
56
- processFunction: async (input) => {
57
- return '' // TODO implement proper streaming
58
- }
59
- });
60
- return conversation;
61
- }
62
-
63
- public static async createWithOllama(smartaiRefArg: SmartAi) {
64
- if (!smartaiRefArg.ollamaProvider) {
65
- throw new Error('Ollama provider not available');
66
- }
67
- const conversation = new Conversation(smartaiRefArg, {
68
- processFunction: async (input) => {
69
- return '' // TODO implement proper streaming
70
- }
71
- });
72
- return conversation;
73
- }
74
-
75
- public static async createWithGroq(smartaiRefArg: SmartAi) {
76
- if (!smartaiRefArg.groqProvider) {
77
- throw new Error('Groq provider not available');
78
- }
79
- const conversation = new Conversation(smartaiRefArg, {
80
- processFunction: async (input) => {
81
- return '' // TODO implement proper streaming
82
- }
83
- });
84
- return conversation;
85
- }
86
-
87
- public static async createWithMistral(smartaiRefArg: SmartAi) {
88
- if (!smartaiRefArg.mistralProvider) {
89
- throw new Error('Mistral provider not available');
90
- }
91
- const conversation = new Conversation(smartaiRefArg, {
92
- processFunction: async (input) => {
93
- return '' // TODO implement proper streaming
94
- }
95
- });
96
- return conversation;
97
- }
98
-
99
- public static async createWithXai(smartaiRefArg: SmartAi) {
100
- if (!smartaiRefArg.xaiProvider) {
101
- throw new Error('XAI provider not available');
102
- }
103
- const conversation = new Conversation(smartaiRefArg, {
104
- processFunction: async (input) => {
105
- return '' // TODO implement proper streaming
106
- }
107
- });
108
- return conversation;
109
- }
110
-
111
- public static async createWithElevenlabs(smartaiRefArg: SmartAi) {
112
- if (!smartaiRefArg.elevenlabsProvider) {
113
- throw new Error('ElevenLabs provider not available');
114
- }
115
- const conversation = new Conversation(smartaiRefArg, {
116
- processFunction: async (input) => {
117
- return '' // TODO implement proper streaming
118
- }
119
- });
120
- return conversation;
121
- }
122
-
123
- // INSTANCE
124
- smartaiRef: SmartAi
125
- private systemMessage: string;
126
- private processFunction: TProcessFunction;
127
- private inputStreamWriter: WritableStreamDefaultWriter<string> | null = null;
128
- private outputStreamController: ReadableStreamDefaultController<string> | null = null;
129
-
130
- constructor(smartairefArg: SmartAi, options: IConversationOptions) {
131
- this.processFunction = options.processFunction;
132
- }
133
-
134
- public async setSystemMessage(systemMessageArg: string) {
135
- this.systemMessage = systemMessageArg;
136
- }
137
-
138
- private setupOutputStream(): ReadableStream<string> {
139
- return new ReadableStream<string>({
140
- start: (controller) => {
141
- this.outputStreamController = controller;
142
- }
143
- });
144
- }
145
-
146
- private setupInputStream(): WritableStream<string> {
147
- const writableStream = new WritableStream<string>({
148
- write: async (chunk) => {
149
- const processedData = await this.processFunction(chunk);
150
- if (this.outputStreamController) {
151
- this.outputStreamController.enqueue(processedData);
152
- }
153
- },
154
- close: () => {
155
- this.outputStreamController?.close();
156
- },
157
- abort: (err) => {
158
- console.error('Stream aborted', err);
159
- this.outputStreamController?.error(err);
160
- }
161
- });
162
- return writableStream;
163
- }
164
-
165
- public getInputStreamWriter(): WritableStreamDefaultWriter<string> {
166
- if (!this.inputStreamWriter) {
167
- const inputStream = this.setupInputStream();
168
- this.inputStreamWriter = inputStream.getWriter();
169
- }
170
- return this.inputStreamWriter;
171
- }
172
-
173
- public getOutputStream(): ReadableStream<string> {
174
- return this.setupOutputStream();
175
- }
176
- }
@@ -1,187 +0,0 @@
1
- import { Conversation } from './classes.conversation.js';
2
- import * as plugins from './plugins.js';
3
- import { AnthropicProvider } from './provider.anthropic.js';
4
- import { ElevenLabsProvider } from './provider.elevenlabs.js';
5
- import { MistralProvider } from './provider.mistral.js';
6
- import { OllamaProvider, type IOllamaModelOptions } from './provider.ollama.js';
7
- import { OpenAiProvider } from './provider.openai.js';
8
- import { PerplexityProvider } from './provider.perplexity.js';
9
- import { ExoProvider } from './provider.exo.js';
10
- import { GroqProvider } from './provider.groq.js';
11
- import { XAIProvider } from './provider.xai.js';
12
-
13
-
14
- export interface ISmartAiOptions {
15
- openaiToken?: string;
16
- anthropicToken?: string;
17
- perplexityToken?: string;
18
- groqToken?: string;
19
- mistralToken?: string;
20
- xaiToken?: string;
21
- elevenlabsToken?: string;
22
- exo?: {
23
- baseUrl?: string;
24
- apiKey?: string;
25
- };
26
- mistral?: {
27
- chatModel?: string;
28
- ocrModel?: string;
29
- tableFormat?: 'markdown' | 'html';
30
- };
31
- ollama?: {
32
- baseUrl?: string;
33
- model?: string;
34
- visionModel?: string;
35
- defaultOptions?: IOllamaModelOptions;
36
- defaultTimeout?: number;
37
- };
38
- elevenlabs?: {
39
- defaultVoiceId?: string;
40
- defaultModelId?: string;
41
- };
42
- }
43
-
44
- export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'mistral' | 'xai' | 'elevenlabs';
45
-
46
- export class SmartAi {
47
- public options: ISmartAiOptions;
48
-
49
- public openaiProvider: OpenAiProvider;
50
- public anthropicProvider: AnthropicProvider;
51
- public perplexityProvider: PerplexityProvider;
52
- public ollamaProvider: OllamaProvider;
53
- public exoProvider: ExoProvider;
54
- public groqProvider: GroqProvider;
55
- public mistralProvider: MistralProvider;
56
- public xaiProvider: XAIProvider;
57
- public elevenlabsProvider: ElevenLabsProvider;
58
-
59
- constructor(optionsArg: ISmartAiOptions) {
60
- this.options = optionsArg;
61
- }
62
-
63
- public async start() {
64
- if (this.options.openaiToken) {
65
- this.openaiProvider = new OpenAiProvider({
66
- openaiToken: this.options.openaiToken,
67
- });
68
- await this.openaiProvider.start();
69
- }
70
- if (this.options.anthropicToken) {
71
- this.anthropicProvider = new AnthropicProvider({
72
- anthropicToken: this.options.anthropicToken,
73
- });
74
- await this.anthropicProvider.start();
75
- }
76
- if (this.options.perplexityToken) {
77
- this.perplexityProvider = new PerplexityProvider({
78
- perplexityToken: this.options.perplexityToken,
79
- });
80
- await this.perplexityProvider.start();
81
- }
82
- if (this.options.groqToken) {
83
- this.groqProvider = new GroqProvider({
84
- groqToken: this.options.groqToken,
85
- });
86
- await this.groqProvider.start();
87
- }
88
- if (this.options.mistralToken) {
89
- this.mistralProvider = new MistralProvider({
90
- mistralToken: this.options.mistralToken,
91
- chatModel: this.options.mistral?.chatModel,
92
- ocrModel: this.options.mistral?.ocrModel,
93
- tableFormat: this.options.mistral?.tableFormat,
94
- });
95
- await this.mistralProvider.start();
96
- }
97
- if (this.options.xaiToken) {
98
- this.xaiProvider = new XAIProvider({
99
- xaiToken: this.options.xaiToken,
100
- });
101
- await this.xaiProvider.start();
102
- }
103
- if (this.options.elevenlabsToken) {
104
- this.elevenlabsProvider = new ElevenLabsProvider({
105
- elevenlabsToken: this.options.elevenlabsToken,
106
- defaultVoiceId: this.options.elevenlabs?.defaultVoiceId,
107
- defaultModelId: this.options.elevenlabs?.defaultModelId,
108
- });
109
- await this.elevenlabsProvider.start();
110
- }
111
- if (this.options.ollama) {
112
- this.ollamaProvider = new OllamaProvider({
113
- baseUrl: this.options.ollama.baseUrl,
114
- model: this.options.ollama.model,
115
- visionModel: this.options.ollama.visionModel,
116
- defaultOptions: this.options.ollama.defaultOptions,
117
- defaultTimeout: this.options.ollama.defaultTimeout,
118
- });
119
- await this.ollamaProvider.start();
120
- }
121
- if (this.options.exo) {
122
- this.exoProvider = new ExoProvider({
123
- exoBaseUrl: this.options.exo.baseUrl,
124
- apiKey: this.options.exo.apiKey,
125
- });
126
- await this.exoProvider.start();
127
- }
128
- }
129
-
130
- public async stop() {
131
- if (this.openaiProvider) {
132
- await this.openaiProvider.stop();
133
- }
134
- if (this.anthropicProvider) {
135
- await this.anthropicProvider.stop();
136
- }
137
- if (this.perplexityProvider) {
138
- await this.perplexityProvider.stop();
139
- }
140
- if (this.groqProvider) {
141
- await this.groqProvider.stop();
142
- }
143
- if (this.mistralProvider) {
144
- await this.mistralProvider.stop();
145
- }
146
- if (this.xaiProvider) {
147
- await this.xaiProvider.stop();
148
- }
149
- if (this.elevenlabsProvider) {
150
- await this.elevenlabsProvider.stop();
151
- }
152
- if (this.ollamaProvider) {
153
- await this.ollamaProvider.stop();
154
- }
155
- if (this.exoProvider) {
156
- await this.exoProvider.stop();
157
- }
158
- }
159
-
160
- /**
161
- * create a new conversation
162
- */
163
- createConversation(provider: TProvider) {
164
- switch (provider) {
165
- case 'exo':
166
- return Conversation.createWithExo(this);
167
- case 'openai':
168
- return Conversation.createWithOpenAi(this);
169
- case 'anthropic':
170
- return Conversation.createWithAnthropic(this);
171
- case 'perplexity':
172
- return Conversation.createWithPerplexity(this);
173
- case 'ollama':
174
- return Conversation.createWithOllama(this);
175
- case 'groq':
176
- return Conversation.createWithGroq(this);
177
- case 'mistral':
178
- return Conversation.createWithMistral(this);
179
- case 'xai':
180
- return Conversation.createWithXai(this);
181
- case 'elevenlabs':
182
- return Conversation.createWithElevenlabs(this);
183
- default:
184
- throw new Error('Provider not available');
185
- }
186
- }
187
- }
package/ts/classes.tts.ts DELETED
@@ -1,15 +0,0 @@
1
- import type { SmartAi } from './classes.smartai.js';
2
- import * as plugins from './plugins.js';
3
-
4
- export class TTS {
5
- public static async createWithOpenAi(smartaiRef: SmartAi): Promise<TTS> {
6
- return new TTS(smartaiRef);
7
- }
8
-
9
- // INSTANCE
10
- smartaiRef: SmartAi;
11
-
12
- constructor(smartairefArg: SmartAi) {
13
- this.smartaiRef = smartairefArg;
14
- }
15
- }
package/ts/interfaces.ts DELETED
File without changes
package/ts/paths.ts DELETED
@@ -1,4 +0,0 @@
1
- import * as plugins from './plugins.js';
2
-
3
- export const packageDir = plugins.path.join(plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url), '../');
4
- export const nogitDir = plugins.path.join(packageDir, './.nogit');