@push.rocks/smartai 0.13.3 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/dist_ts/00_commitinfo_data.js +3 -3
  2. package/dist_ts/index.d.ts +6 -11
  3. package/dist_ts/index.js +6 -12
  4. package/dist_ts/plugins.d.ts +10 -15
  5. package/dist_ts/plugins.js +13 -19
  6. package/dist_ts/smartai.classes.smartai.d.ts +7 -0
  7. package/dist_ts/smartai.classes.smartai.js +51 -0
  8. package/dist_ts/smartai.interfaces.d.ts +41 -0
  9. package/dist_ts/smartai.interfaces.js +2 -0
  10. package/dist_ts/smartai.middleware.anthropic.d.ts +7 -0
  11. package/dist_ts/smartai.middleware.anthropic.js +36 -0
  12. package/dist_ts/smartai.provider.ollama.d.ts +8 -0
  13. package/dist_ts/smartai.provider.ollama.js +378 -0
  14. package/dist_ts_audio/index.d.ts +9 -0
  15. package/dist_ts_audio/index.js +15 -0
  16. package/dist_ts_audio/plugins.d.ts +2 -0
  17. package/dist_ts_audio/plugins.js +3 -0
  18. package/dist_ts_document/index.d.ts +11 -0
  19. package/dist_ts_document/index.js +45 -0
  20. package/dist_ts_document/plugins.d.ts +3 -0
  21. package/dist_ts_document/plugins.js +4 -0
  22. package/dist_ts_image/index.d.ts +46 -0
  23. package/dist_ts_image/index.js +110 -0
  24. package/dist_ts_image/plugins.d.ts +3 -0
  25. package/dist_ts_image/plugins.js +4 -0
  26. package/dist_ts_research/index.d.ts +19 -0
  27. package/dist_ts_research/index.js +98 -0
  28. package/dist_ts_research/plugins.d.ts +2 -0
  29. package/dist_ts_research/plugins.js +3 -0
  30. package/dist_ts_vision/index.d.ts +8 -0
  31. package/dist_ts_vision/index.js +21 -0
  32. package/dist_ts_vision/plugins.d.ts +2 -0
  33. package/dist_ts_vision/plugins.js +3 -0
  34. package/package.json +50 -22
  35. package/readme.hints.md +34 -88
  36. package/readme.md +284 -547
  37. package/ts/00_commitinfo_data.ts +2 -2
  38. package/ts/index.ts +8 -11
  39. package/ts/plugins.ts +19 -35
  40. package/ts/smartai.classes.smartai.ts +51 -0
  41. package/ts/smartai.interfaces.ts +53 -0
  42. package/ts/smartai.middleware.anthropic.ts +38 -0
  43. package/ts/smartai.provider.ollama.ts +426 -0
  44. package/ts_audio/index.ts +24 -0
  45. package/ts_audio/plugins.ts +2 -0
  46. package/ts_document/index.ts +61 -0
  47. package/ts_document/plugins.ts +3 -0
  48. package/ts_image/index.ts +147 -0
  49. package/ts_image/plugins.ts +3 -0
  50. package/ts_research/index.ts +120 -0
  51. package/ts_research/plugins.ts +2 -0
  52. package/ts_vision/index.ts +29 -0
  53. package/ts_vision/plugins.ts +2 -0
  54. package/dist_ts/abstract.classes.multimodal.d.ts +0 -212
  55. package/dist_ts/abstract.classes.multimodal.js +0 -43
  56. package/dist_ts/classes.conversation.d.ts +0 -31
  57. package/dist_ts/classes.conversation.js +0 -150
  58. package/dist_ts/classes.smartai.d.ts +0 -59
  59. package/dist_ts/classes.smartai.js +0 -139
  60. package/dist_ts/classes.tts.d.ts +0 -6
  61. package/dist_ts/classes.tts.js +0 -10
  62. package/dist_ts/interfaces.d.ts +0 -1
  63. package/dist_ts/interfaces.js +0 -2
  64. package/dist_ts/paths.d.ts +0 -2
  65. package/dist_ts/paths.js +0 -4
  66. package/dist_ts/provider.anthropic.d.ts +0 -48
  67. package/dist_ts/provider.anthropic.js +0 -369
  68. package/dist_ts/provider.elevenlabs.d.ts +0 -43
  69. package/dist_ts/provider.elevenlabs.js +0 -64
  70. package/dist_ts/provider.exo.d.ts +0 -40
  71. package/dist_ts/provider.exo.js +0 -116
  72. package/dist_ts/provider.groq.d.ts +0 -39
  73. package/dist_ts/provider.groq.js +0 -178
  74. package/dist_ts/provider.mistral.d.ts +0 -61
  75. package/dist_ts/provider.mistral.js +0 -288
  76. package/dist_ts/provider.ollama.d.ts +0 -141
  77. package/dist_ts/provider.ollama.js +0 -529
  78. package/dist_ts/provider.openai.d.ts +0 -62
  79. package/dist_ts/provider.openai.js +0 -403
  80. package/dist_ts/provider.perplexity.d.ts +0 -37
  81. package/dist_ts/provider.perplexity.js +0 -215
  82. package/dist_ts/provider.xai.d.ts +0 -52
  83. package/dist_ts/provider.xai.js +0 -160
  84. package/ts/abstract.classes.multimodal.ts +0 -240
  85. package/ts/classes.conversation.ts +0 -176
  86. package/ts/classes.smartai.ts +0 -187
  87. package/ts/classes.tts.ts +0 -15
  88. package/ts/interfaces.ts +0 -0
  89. package/ts/paths.ts +0 -4
  90. package/ts/provider.anthropic.ts +0 -446
  91. package/ts/provider.elevenlabs.ts +0 -116
  92. package/ts/provider.exo.ts +0 -155
  93. package/ts/provider.groq.ts +0 -219
  94. package/ts/provider.mistral.ts +0 -352
  95. package/ts/provider.ollama.ts +0 -705
  96. package/ts/provider.openai.ts +0 -462
  97. package/ts/provider.perplexity.ts +0 -259
  98. package/ts/provider.xai.ts +0 -214
@@ -1,446 +0,0 @@
1
- import * as plugins from './plugins.js';
2
- import * as paths from './paths.js';
3
- import { MultiModalModel } from './abstract.classes.multimodal.js';
4
- import type {
5
- ChatOptions,
6
- ChatResponse,
7
- ChatMessage,
8
- ResearchOptions,
9
- ResearchResponse,
10
- ImageGenerateOptions,
11
- ImageEditOptions,
12
- ImageResponse
13
- } from './abstract.classes.multimodal.js';
14
- import type { ImageBlockParam, TextBlockParam } from '@anthropic-ai/sdk/resources/messages';
15
-
16
- type ContentBlock = ImageBlockParam | TextBlockParam;
17
-
18
- export interface IAnthropicProviderOptions {
19
- anthropicToken: string;
20
- enableWebSearch?: boolean;
21
- searchDomainAllowList?: string[];
22
- searchDomainBlockList?: string[];
23
- extendedThinking?: 'quick' | 'normal' | 'deep' | 'off';
24
- }
25
-
26
- export class AnthropicProvider extends MultiModalModel {
27
- private options: IAnthropicProviderOptions;
28
- public anthropicApiClient: plugins.anthropic.default;
29
-
30
- constructor(optionsArg: IAnthropicProviderOptions) {
31
- super();
32
- this.options = optionsArg // Ensure the token is stored
33
- }
34
-
35
- async start() {
36
- await super.start();
37
- this.anthropicApiClient = new plugins.anthropic.default({
38
- apiKey: this.options.anthropicToken,
39
- });
40
- }
41
-
42
- async stop() {
43
- await super.stop();
44
- }
45
-
46
- /**
47
- * Returns the thinking configuration based on provider options.
48
- * Defaults to 'normal' mode (8000 tokens) if not specified.
49
- */
50
- private getThinkingConfig(): { type: 'enabled'; budget_tokens: number } | undefined {
51
- const mode = this.options.extendedThinking ?? 'normal';
52
-
53
- const budgetMap = {
54
- quick: 2048,
55
- normal: 8000,
56
- deep: 16000,
57
- off: 0,
58
- };
59
-
60
- const budget = budgetMap[mode];
61
-
62
- return budget > 0 ? { type: 'enabled', budget_tokens: budget } : undefined;
63
- }
64
-
65
- public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
66
- // Create a TextDecoder to handle incoming chunks
67
- const decoder = new TextDecoder();
68
- let buffer = '';
69
- let currentMessage: { role: string; content: string; } | null = null;
70
-
71
- // Create a TransformStream to process the input
72
- const transform = new TransformStream<Uint8Array, string>({
73
- async transform(chunk, controller) {
74
- buffer += decoder.decode(chunk, { stream: true });
75
-
76
- // Try to parse complete JSON messages from the buffer
77
- while (true) {
78
- const newlineIndex = buffer.indexOf('\n');
79
- if (newlineIndex === -1) break;
80
-
81
- const line = buffer.slice(0, newlineIndex);
82
- buffer = buffer.slice(newlineIndex + 1);
83
-
84
- if (line.trim()) {
85
- try {
86
- const message = JSON.parse(line);
87
- currentMessage = {
88
- role: message.role || 'user',
89
- content: message.content || '',
90
- };
91
- } catch (e) {
92
- console.error('Failed to parse message:', e);
93
- }
94
- }
95
- }
96
-
97
- // If we have a complete message, send it to Anthropic
98
- if (currentMessage) {
99
- const thinkingConfig = this.getThinkingConfig();
100
- const stream = await this.anthropicApiClient.messages.create({
101
- model: 'claude-sonnet-4-5-20250929',
102
- messages: [{ role: currentMessage.role, content: currentMessage.content }],
103
- system: '',
104
- stream: true,
105
- max_tokens: 20000,
106
- ...(thinkingConfig && { thinking: thinkingConfig }),
107
- });
108
-
109
- // Process each chunk from Anthropic
110
- for await (const chunk of stream) {
111
- const content = chunk.delta?.text;
112
- if (content) {
113
- controller.enqueue(content);
114
- }
115
- }
116
-
117
- currentMessage = null;
118
- }
119
- },
120
-
121
- flush(controller) {
122
- if (buffer) {
123
- try {
124
- const message = JSON.parse(buffer);
125
- controller.enqueue(message.content || '');
126
- } catch (e) {
127
- console.error('Failed to parse remaining buffer:', e);
128
- }
129
- }
130
- }
131
- });
132
-
133
- // Connect the input to our transform stream
134
- return input.pipeThrough(transform);
135
- }
136
-
137
- // Implementing the synchronous chat interaction
138
- public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
139
- // Convert message history to Anthropic format
140
- const messages = optionsArg.messageHistory.map(msg => ({
141
- role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
142
- content: msg.content
143
- }));
144
-
145
- const thinkingConfig = this.getThinkingConfig();
146
- const result = await this.anthropicApiClient.messages.create({
147
- model: 'claude-sonnet-4-5-20250929',
148
- system: optionsArg.systemMessage,
149
- messages: [
150
- ...messages,
151
- { role: 'user' as const, content: optionsArg.userMessage }
152
- ],
153
- max_tokens: 20000,
154
- ...(thinkingConfig && { thinking: thinkingConfig }),
155
- });
156
-
157
- // Extract text content from the response
158
- let message = '';
159
- for (const block of result.content) {
160
- if ('text' in block) {
161
- message += block.text;
162
- }
163
- }
164
-
165
- return {
166
- role: 'assistant' as const,
167
- message,
168
- };
169
- }
170
-
171
- public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
172
- // Anthropic does not provide an audio API, so this method is not implemented.
173
- throw new Error('Audio generation is not yet supported by Anthropic.');
174
- }
175
-
176
- public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
177
- const base64Image = optionsArg.image.toString('base64');
178
-
179
- const content: ContentBlock[] = [
180
- {
181
- type: 'text',
182
- text: optionsArg.prompt
183
- },
184
- {
185
- type: 'image',
186
- source: {
187
- type: 'base64',
188
- media_type: 'image/jpeg',
189
- data: base64Image
190
- }
191
- }
192
- ];
193
-
194
- const thinkingConfig = this.getThinkingConfig();
195
- const result = await this.anthropicApiClient.messages.create({
196
- model: 'claude-sonnet-4-5-20250929',
197
- messages: [{
198
- role: 'user',
199
- content
200
- }],
201
- max_tokens: 10000,
202
- ...(thinkingConfig && { thinking: thinkingConfig }),
203
- });
204
-
205
- // Extract text content from the response
206
- let message = '';
207
- for (const block of result.content) {
208
- if ('text' in block) {
209
- message += block.text;
210
- }
211
- }
212
- return message;
213
- }
214
-
215
- public async document(optionsArg: {
216
- systemMessage: string;
217
- userMessage: string;
218
- pdfDocuments: Uint8Array[];
219
- messageHistory: ChatMessage[];
220
- }): Promise<{ message: any }> {
221
- // Ensure SmartPdf is initialized before processing documents
222
- await this.ensureSmartpdfReady();
223
-
224
- // Convert PDF documents to images using SmartPDF
225
- let documentImageBytesArray: Uint8Array[] = [];
226
-
227
- for (const pdfDocument of optionsArg.pdfDocuments) {
228
- const documentImageArray = await this.smartpdfInstance!.convertPDFToPngBytes(pdfDocument);
229
- documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
230
- }
231
-
232
- // Convert message history to Anthropic format
233
- const messages = optionsArg.messageHistory.map(msg => ({
234
- role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
235
- content: msg.content
236
- }));
237
-
238
- // Create content array with text and images
239
- const content: ContentBlock[] = [
240
- {
241
- type: 'text',
242
- text: optionsArg.userMessage
243
- }
244
- ];
245
-
246
- // Add each document page as an image
247
- for (const imageBytes of documentImageBytesArray) {
248
- content.push({
249
- type: 'image',
250
- source: {
251
- type: 'base64',
252
- media_type: 'image/png',
253
- data: Buffer.from(imageBytes).toString('base64')
254
- }
255
- });
256
- }
257
-
258
- const thinkingConfig = this.getThinkingConfig();
259
- const result = await this.anthropicApiClient.messages.create({
260
- model: 'claude-sonnet-4-5-20250929',
261
- system: optionsArg.systemMessage,
262
- messages: [
263
- ...messages,
264
- { role: 'user', content }
265
- ],
266
- max_tokens: 20000,
267
- ...(thinkingConfig && { thinking: thinkingConfig }),
268
- });
269
-
270
- // Extract text content from the response
271
- let message = '';
272
- for (const block of result.content) {
273
- if ('text' in block) {
274
- message += block.text;
275
- }
276
- }
277
-
278
- return {
279
- message: {
280
- role: 'assistant',
281
- content: message
282
- }
283
- };
284
- }
285
-
286
- public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
287
- // Prepare the messages for the research request
288
- const systemMessage = `You are a research assistant with web search capabilities.
289
- Provide comprehensive, well-researched answers with citations and sources.
290
- When searching the web, be thorough and cite your sources accurately.`;
291
-
292
- try {
293
- // Build the tool configuration for web search
294
- const tools: any[] = [];
295
-
296
- if (this.options.enableWebSearch) {
297
- const webSearchTool: any = {
298
- type: 'web_search_20250305',
299
- name: 'web_search'
300
- };
301
-
302
- // Add optional parameters
303
- if (optionsArg.maxSources) {
304
- webSearchTool.max_uses = optionsArg.maxSources;
305
- }
306
-
307
- if (this.options.searchDomainAllowList?.length) {
308
- webSearchTool.allowed_domains = this.options.searchDomainAllowList;
309
- } else if (this.options.searchDomainBlockList?.length) {
310
- webSearchTool.blocked_domains = this.options.searchDomainBlockList;
311
- }
312
-
313
- tools.push(webSearchTool);
314
- }
315
-
316
- // Configure the request based on search depth
317
- const maxTokens = optionsArg.searchDepth === 'deep' ? 20000 :
318
- optionsArg.searchDepth === 'advanced' ? 20000 : 20000;
319
-
320
- // Add thinking configuration if enabled
321
- const thinkingConfig = this.getThinkingConfig();
322
-
323
- // Create the research request
324
- // Note: When thinking is enabled, temperature must be 1 (or omitted)
325
- const requestParams: any = {
326
- model: 'claude-sonnet-4-5-20250929',
327
- system: systemMessage,
328
- messages: [
329
- {
330
- role: 'user' as const,
331
- content: optionsArg.query
332
- }
333
- ],
334
- max_tokens: maxTokens,
335
- // Only set temperature when thinking is NOT enabled
336
- ...(thinkingConfig ? {} : { temperature: 0.7 })
337
- };
338
-
339
- // Add tools if web search is enabled
340
- if (tools.length > 0) {
341
- requestParams.tools = tools;
342
- }
343
-
344
- // Add thinking configuration if enabled
345
- if (thinkingConfig) {
346
- requestParams.thinking = thinkingConfig;
347
- }
348
-
349
- // Execute the research request
350
- const result = await this.anthropicApiClient.messages.create(requestParams);
351
-
352
- // Extract the answer from content blocks
353
- let answer = '';
354
- const sources: Array<{ url: string; title: string; snippet: string }> = [];
355
- const searchQueries: string[] = [];
356
-
357
- // Process content blocks
358
- for (const block of result.content) {
359
- if ('text' in block) {
360
- // Accumulate text content
361
- answer += block.text;
362
-
363
- // Extract citations if present
364
- if ('citations' in block && Array.isArray(block.citations)) {
365
- for (const citation of block.citations) {
366
- if (citation.type === 'web_search_result_location') {
367
- sources.push({
368
- title: citation.title || '',
369
- url: citation.url || '',
370
- snippet: citation.cited_text || ''
371
- });
372
- }
373
- }
374
- }
375
- } else if ('type' in block && block.type === 'server_tool_use') {
376
- // Extract search queries from server tool use
377
- if (block.name === 'web_search' && block.input && typeof block.input === 'object' && 'query' in block.input) {
378
- searchQueries.push((block.input as any).query);
379
- }
380
- } else if ('type' in block && block.type === 'web_search_tool_result') {
381
- // Extract sources from web search results
382
- if (Array.isArray(block.content)) {
383
- for (const result of block.content) {
384
- if (result.type === 'web_search_result') {
385
- // Only add if not already in sources (avoid duplicates from citations)
386
- if (!sources.some(s => s.url === result.url)) {
387
- sources.push({
388
- title: result.title || '',
389
- url: result.url || '',
390
- snippet: '' // Search results don't include snippets, only citations do
391
- });
392
- }
393
- }
394
- }
395
- }
396
- }
397
- }
398
-
399
- // Fallback: Parse markdown-style links if no citations found
400
- if (sources.length === 0) {
401
- const urlRegex = /\[([^\]]+)\]\(([^)]+)\)/g;
402
- let match: RegExpExecArray | null;
403
-
404
- while ((match = urlRegex.exec(answer)) !== null) {
405
- sources.push({
406
- title: match[1],
407
- url: match[2],
408
- snippet: ''
409
- });
410
- }
411
- }
412
-
413
- // Check if web search was used based on usage info
414
- const webSearchCount = result.usage?.server_tool_use?.web_search_requests || 0;
415
-
416
- return {
417
- answer,
418
- sources,
419
- searchQueries: searchQueries.length > 0 ? searchQueries : undefined,
420
- metadata: {
421
- model: 'claude-sonnet-4-5-20250929',
422
- searchDepth: optionsArg.searchDepth || 'basic',
423
- tokensUsed: result.usage?.output_tokens,
424
- webSearchesPerformed: webSearchCount
425
- }
426
- };
427
- } catch (error) {
428
- console.error('Anthropic research error:', error);
429
- throw new Error(`Failed to perform research: ${error.message}`);
430
- }
431
- }
432
-
433
- /**
434
- * Image generation is not supported by Anthropic
435
- */
436
- public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
437
- throw new Error('Image generation is not supported by Anthropic. Claude can only analyze images, not generate them. Please use OpenAI provider for image generation.');
438
- }
439
-
440
- /**
441
- * Image editing is not supported by Anthropic
442
- */
443
- public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
444
- throw new Error('Image editing is not supported by Anthropic. Claude can only analyze images, not edit them. Please use OpenAI provider for image editing.');
445
- }
446
- }
@@ -1,116 +0,0 @@
1
- import * as plugins from './plugins.js';
2
- import { Readable } from 'stream';
3
-
4
- import { MultiModalModel } from './abstract.classes.multimodal.js';
5
- import type {
6
- ChatOptions,
7
- ChatResponse,
8
- ResearchOptions,
9
- ResearchResponse,
10
- ImageGenerateOptions,
11
- ImageEditOptions,
12
- ImageResponse
13
- } from './abstract.classes.multimodal.js';
14
-
15
- export interface IElevenLabsProviderOptions {
16
- elevenlabsToken: string;
17
- defaultVoiceId?: string;
18
- defaultModelId?: string;
19
- }
20
-
21
- export interface IElevenLabsVoiceSettings {
22
- stability?: number;
23
- similarity_boost?: number;
24
- style?: number;
25
- use_speaker_boost?: boolean;
26
- }
27
-
28
- export class ElevenLabsProvider extends MultiModalModel {
29
- private options: IElevenLabsProviderOptions;
30
- private baseUrl: string = 'https://api.elevenlabs.io/v1';
31
-
32
- constructor(optionsArg: IElevenLabsProviderOptions) {
33
- super();
34
- this.options = optionsArg;
35
- }
36
-
37
- public async start() {
38
- await super.start();
39
- }
40
-
41
- public async stop() {
42
- await super.stop();
43
- }
44
-
45
- public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
46
- throw new Error('ElevenLabs does not support chat functionality. This provider is specialized for text-to-speech only.');
47
- }
48
-
49
- public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
50
- throw new Error('ElevenLabs does not support chat streaming functionality. This provider is specialized for text-to-speech only.');
51
- }
52
-
53
- public async audio(optionsArg: {
54
- message: string;
55
- voiceId?: string;
56
- modelId?: string;
57
- voiceSettings?: IElevenLabsVoiceSettings;
58
- }): Promise<NodeJS.ReadableStream> {
59
- // Use Samara voice as default fallback
60
- const voiceId = optionsArg.voiceId || this.options.defaultVoiceId || '19STyYD15bswVz51nqLf';
61
-
62
- const modelId = optionsArg.modelId || this.options.defaultModelId || 'eleven_v3';
63
-
64
- const url = `${this.baseUrl}/text-to-speech/${voiceId}`;
65
-
66
- const requestBody: any = {
67
- text: optionsArg.message,
68
- model_id: modelId,
69
- };
70
-
71
- if (optionsArg.voiceSettings) {
72
- requestBody.voice_settings = optionsArg.voiceSettings;
73
- }
74
-
75
- const response = await plugins.smartrequest.SmartRequest.create()
76
- .url(url)
77
- .header('xi-api-key', this.options.elevenlabsToken)
78
- .json(requestBody)
79
- .autoDrain(false)
80
- .post();
81
-
82
- if (!response.ok) {
83
- const errorText = await response.text();
84
- throw new Error(`ElevenLabs API error: ${response.status} ${response.statusText} - ${errorText}`);
85
- }
86
-
87
- const webStream = response.stream();
88
- const nodeStream = Readable.fromWeb(webStream as any);
89
- return nodeStream;
90
- }
91
-
92
- public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
93
- throw new Error('ElevenLabs does not support vision functionality. This provider is specialized for text-to-speech only.');
94
- }
95
-
96
- public async document(optionsArg: {
97
- systemMessage: string;
98
- userMessage: string;
99
- pdfDocuments: Uint8Array[];
100
- messageHistory: any[];
101
- }): Promise<{ message: any }> {
102
- throw new Error('ElevenLabs does not support document processing. This provider is specialized for text-to-speech only.');
103
- }
104
-
105
- public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
106
- throw new Error('ElevenLabs does not support research capabilities. This provider is specialized for text-to-speech only.');
107
- }
108
-
109
- public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
110
- throw new Error('ElevenLabs does not support image generation. This provider is specialized for text-to-speech only.');
111
- }
112
-
113
- public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
114
- throw new Error('ElevenLabs does not support image editing. This provider is specialized for text-to-speech only.');
115
- }
116
- }