@moontra/moonui-pro 2.8.3 → 2.8.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
// AI Provider Interfaces and Implementations
|
|
2
|
+
|
|
3
|
+
export interface AIResponse {
|
|
4
|
+
text: string;
|
|
5
|
+
error?: string;
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export interface AIProvider {
|
|
9
|
+
generateText(prompt: string): Promise<string>;
|
|
10
|
+
rewrite(text: string): Promise<string>;
|
|
11
|
+
expand(text: string): Promise<string>;
|
|
12
|
+
summarize(text: string): Promise<string>;
|
|
13
|
+
fixGrammar(text: string): Promise<string>;
|
|
14
|
+
translate(text: string, targetLang: string): Promise<string>;
|
|
15
|
+
changeTone(text: string, tone: string): Promise<string>;
|
|
16
|
+
continueWriting(text: string): Promise<string>;
|
|
17
|
+
improveWriting(text: string): Promise<string>;
|
|
18
|
+
generateIdeas(text: string): Promise<string>;
|
|
19
|
+
complete(text: string): Promise<string>;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export interface AIProviderConfig {
|
|
23
|
+
apiKey: string;
|
|
24
|
+
model?: string;
|
|
25
|
+
temperature?: number;
|
|
26
|
+
maxTokens?: number;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Gemini Provider Implementation
|
|
30
|
+
export class GeminiProvider implements AIProvider {
|
|
31
|
+
private apiKey: string;
|
|
32
|
+
private model: string;
|
|
33
|
+
private apiUrl = 'https://generativelanguage.googleapis.com/v1beta/models';
|
|
34
|
+
|
|
35
|
+
constructor(config: AIProviderConfig) {
|
|
36
|
+
this.apiKey = config.apiKey;
|
|
37
|
+
// Use gemini-2.0-flash as shown in the curl example
|
|
38
|
+
this.model = config.model || 'gemini-2.0-flash';
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
private async callGeminiAPI(prompt: string): Promise<string> {
|
|
42
|
+
try {
|
|
43
|
+
const response = await fetch(`${this.apiUrl}/${this.model}:generateContent`, {
|
|
44
|
+
method: 'POST',
|
|
45
|
+
headers: {
|
|
46
|
+
'Content-Type': 'application/json',
|
|
47
|
+
'X-goog-api-key': this.apiKey
|
|
48
|
+
},
|
|
49
|
+
body: JSON.stringify({
|
|
50
|
+
contents: [{
|
|
51
|
+
parts: [{
|
|
52
|
+
text: prompt
|
|
53
|
+
}]
|
|
54
|
+
}]
|
|
55
|
+
})
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
if (!response.ok) {
|
|
59
|
+
const errorData = await response.text();
|
|
60
|
+
console.error('Gemini API error response:', errorData);
|
|
61
|
+
throw new Error(`Gemini API error: ${response.status} ${response.statusText} - ${errorData}`);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const data = await response.json();
|
|
65
|
+
|
|
66
|
+
// Check for API errors in response
|
|
67
|
+
if (data.error) {
|
|
68
|
+
throw new Error(`Gemini API error: ${data.error.message || JSON.stringify(data.error)}`);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// Check if content was blocked
|
|
72
|
+
if (data.candidates?.[0]?.finishReason === 'SAFETY') {
|
|
73
|
+
throw new Error('Content was blocked by safety filters');
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
const text = data.candidates?.[0]?.content?.parts?.[0]?.text;
|
|
77
|
+
if (!text) {
|
|
78
|
+
console.error('Gemini API response:', JSON.stringify(data, null, 2));
|
|
79
|
+
throw new Error('No text content in Gemini API response');
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
return text.trim();
|
|
83
|
+
} catch (error) {
|
|
84
|
+
console.error('Gemini API error details:', error);
|
|
85
|
+
if (error instanceof Error) {
|
|
86
|
+
throw error;
|
|
87
|
+
}
|
|
88
|
+
throw new Error('Unknown error occurred while calling Gemini API');
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
async generateText(prompt: string): Promise<string> {
|
|
93
|
+
return this.callGeminiAPI(prompt);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
async rewrite(text: string): Promise<string> {
|
|
97
|
+
const prompt = `Rewrite the following text to make it clearer and more engaging while maintaining the same meaning. Only return the rewritten text, nothing else:\n\n${text}`;
|
|
98
|
+
return this.callGeminiAPI(prompt);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
async expand(text: string): Promise<string> {
|
|
102
|
+
const prompt = `Expand the following text with more details, examples, and explanations. Only return the expanded text, nothing else:\n\n${text}`;
|
|
103
|
+
return this.callGeminiAPI(prompt);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
async summarize(text: string): Promise<string> {
|
|
107
|
+
const prompt = `Summarize the following text concisely while keeping the main points. Only return the summary, nothing else:\n\n${text}`;
|
|
108
|
+
return this.callGeminiAPI(prompt);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
async fixGrammar(text: string): Promise<string> {
|
|
112
|
+
const prompt = `Fix any grammar and spelling errors in the following text. Only return the corrected text, nothing else:\n\n${text}`;
|
|
113
|
+
return this.callGeminiAPI(prompt);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
async translate(text: string, targetLang: string): Promise<string> {
|
|
117
|
+
const prompt = `Translate the following text to ${targetLang}. Only return the translation, nothing else:\n\n${text}`;
|
|
118
|
+
return this.callGeminiAPI(prompt);
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
async changeTone(text: string, tone: string): Promise<string> {
|
|
122
|
+
const toneDescriptions: Record<string, string> = {
|
|
123
|
+
professional: 'professional and business-appropriate',
|
|
124
|
+
casual: 'casual and conversational',
|
|
125
|
+
friendly: 'warm and friendly',
|
|
126
|
+
formal: 'formal and academic'
|
|
127
|
+
};
|
|
128
|
+
|
|
129
|
+
const toneDesc = toneDescriptions[tone] || tone;
|
|
130
|
+
const prompt = `Rewrite the following text in a ${toneDesc} tone. Only return the rewritten text, nothing else:\n\n${text}`;
|
|
131
|
+
return this.callGeminiAPI(prompt);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
async continueWriting(text: string): Promise<string> {
|
|
135
|
+
const prompt = `Continue writing from where this text ends. Only return the continuation, nothing else:\n\n${text}`;
|
|
136
|
+
return this.callGeminiAPI(prompt);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
async improveWriting(text: string): Promise<string> {
|
|
140
|
+
const prompt = `Improve the following text by making it more compelling, clear, and well-structured. Only return the improved text, nothing else:\n\n${text}`;
|
|
141
|
+
return this.callGeminiAPI(prompt);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
async generateIdeas(text: string): Promise<string> {
|
|
145
|
+
const prompt = `Generate creative ideas and suggestions based on this topic. Format as a bullet list:\n\n${text}`;
|
|
146
|
+
return this.callGeminiAPI(prompt);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
async complete(text: string): Promise<string> {
|
|
150
|
+
const prompt = `Complete this text naturally. Only return the completion, nothing else:\n\n${text}`;
|
|
151
|
+
return this.callGeminiAPI(prompt);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// OpenAI Provider Implementation
|
|
156
|
+
export class OpenAIProvider implements AIProvider {
|
|
157
|
+
private apiKey: string;
|
|
158
|
+
private model: string;
|
|
159
|
+
private temperature: number;
|
|
160
|
+
private maxTokens: number;
|
|
161
|
+
private apiUrl = 'https://api.openai.com/v1/chat/completions';
|
|
162
|
+
|
|
163
|
+
constructor(config: AIProviderConfig) {
|
|
164
|
+
this.apiKey = config.apiKey;
|
|
165
|
+
this.model = config.model || 'gpt-3.5-turbo';
|
|
166
|
+
this.temperature = config.temperature || 0.7;
|
|
167
|
+
this.maxTokens = config.maxTokens || 1000;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
private async callOpenAI(systemPrompt: string, userPrompt: string): Promise<string> {
|
|
171
|
+
try {
|
|
172
|
+
const response = await fetch(this.apiUrl, {
|
|
173
|
+
method: 'POST',
|
|
174
|
+
headers: {
|
|
175
|
+
'Content-Type': 'application/json',
|
|
176
|
+
'Authorization': `Bearer ${this.apiKey}`
|
|
177
|
+
},
|
|
178
|
+
body: JSON.stringify({
|
|
179
|
+
model: this.model,
|
|
180
|
+
messages: [
|
|
181
|
+
{ role: 'system', content: systemPrompt },
|
|
182
|
+
{ role: 'user', content: userPrompt }
|
|
183
|
+
],
|
|
184
|
+
temperature: this.temperature,
|
|
185
|
+
max_tokens: this.maxTokens
|
|
186
|
+
})
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
if (!response.ok) {
|
|
190
|
+
throw new Error(`OpenAI API error: ${response.statusText}`);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
const data = await response.json();
|
|
194
|
+
return data.choices?.[0]?.message?.content || '';
|
|
195
|
+
} catch (error) {
|
|
196
|
+
console.error('OpenAI API error:', error);
|
|
197
|
+
throw error;
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
async generateText(prompt: string): Promise<string> {
|
|
202
|
+
return this.callOpenAI('You are a helpful writing assistant.', prompt);
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
async rewrite(text: string): Promise<string> {
|
|
206
|
+
return this.callOpenAI(
|
|
207
|
+
'You are a professional editor. Rewrite text to be clearer and more engaging.',
|
|
208
|
+
text
|
|
209
|
+
);
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
async expand(text: string): Promise<string> {
|
|
213
|
+
return this.callOpenAI(
|
|
214
|
+
'You are a content writer. Expand the given text with more details and examples.',
|
|
215
|
+
text
|
|
216
|
+
);
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
async summarize(text: string): Promise<string> {
|
|
220
|
+
return this.callOpenAI(
|
|
221
|
+
'You are a summarization expert. Create concise summaries.',
|
|
222
|
+
text
|
|
223
|
+
);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
async fixGrammar(text: string): Promise<string> {
|
|
227
|
+
return this.callOpenAI(
|
|
228
|
+
'You are a grammar expert. Fix all grammar and spelling errors.',
|
|
229
|
+
text
|
|
230
|
+
);
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
async translate(text: string, targetLang: string): Promise<string> {
|
|
234
|
+
return this.callOpenAI(
|
|
235
|
+
`You are a professional translator. Translate to ${targetLang}.`,
|
|
236
|
+
text
|
|
237
|
+
);
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
async changeTone(text: string, tone: string): Promise<string> {
|
|
241
|
+
return this.callOpenAI(
|
|
242
|
+
`You are a writing expert. Rewrite the text in a ${tone} tone.`,
|
|
243
|
+
text
|
|
244
|
+
);
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
async continueWriting(text: string): Promise<string> {
|
|
248
|
+
return this.callOpenAI(
|
|
249
|
+
'You are a creative writer. Continue writing from where the text ends.',
|
|
250
|
+
text
|
|
251
|
+
);
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
async improveWriting(text: string): Promise<string> {
|
|
255
|
+
return this.callOpenAI(
|
|
256
|
+
'You are a professional editor. Improve the text quality.',
|
|
257
|
+
text
|
|
258
|
+
);
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
async generateIdeas(text: string): Promise<string> {
|
|
262
|
+
return this.callOpenAI(
|
|
263
|
+
'You are a creative consultant. Generate ideas based on the topic.',
|
|
264
|
+
text
|
|
265
|
+
);
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
async complete(text: string): Promise<string> {
|
|
269
|
+
return this.callOpenAI(
|
|
270
|
+
'You are a writing assistant. Complete the text naturally.',
|
|
271
|
+
text
|
|
272
|
+
);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// Claude Provider Implementation
|
|
277
|
+
export class ClaudeProvider implements AIProvider {
|
|
278
|
+
private apiKey: string;
|
|
279
|
+
private model: string;
|
|
280
|
+
private apiUrl = 'https://api.anthropic.com/v1/messages';
|
|
281
|
+
|
|
282
|
+
constructor(config: AIProviderConfig) {
|
|
283
|
+
this.apiKey = config.apiKey;
|
|
284
|
+
this.model = config.model || 'claude-3-sonnet-20240229';
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
private async callClaude(prompt: string): Promise<string> {
|
|
288
|
+
try {
|
|
289
|
+
const response = await fetch(this.apiUrl, {
|
|
290
|
+
method: 'POST',
|
|
291
|
+
headers: {
|
|
292
|
+
'Content-Type': 'application/json',
|
|
293
|
+
'x-api-key': this.apiKey,
|
|
294
|
+
'anthropic-version': '2023-06-01'
|
|
295
|
+
},
|
|
296
|
+
body: JSON.stringify({
|
|
297
|
+
model: this.model,
|
|
298
|
+
max_tokens: 1000,
|
|
299
|
+
messages: [
|
|
300
|
+
{ role: 'user', content: prompt }
|
|
301
|
+
]
|
|
302
|
+
})
|
|
303
|
+
});
|
|
304
|
+
|
|
305
|
+
if (!response.ok) {
|
|
306
|
+
throw new Error(`Claude API error: ${response.statusText}`);
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
const data = await response.json();
|
|
310
|
+
return data.content?.[0]?.text || '';
|
|
311
|
+
} catch (error) {
|
|
312
|
+
console.error('Claude API error:', error);
|
|
313
|
+
throw error;
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
async generateText(prompt: string): Promise<string> {
|
|
318
|
+
return this.callClaude(prompt);
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
async rewrite(text: string): Promise<string> {
|
|
322
|
+
return this.callClaude(`Rewrite this text to be clearer and more engaging:\n\n${text}`);
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
async expand(text: string): Promise<string> {
|
|
326
|
+
return this.callClaude(`Expand this text with more details:\n\n${text}`);
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
async summarize(text: string): Promise<string> {
|
|
330
|
+
return this.callClaude(`Summarize this text concisely:\n\n${text}`);
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
async fixGrammar(text: string): Promise<string> {
|
|
334
|
+
return this.callClaude(`Fix grammar and spelling errors in:\n\n${text}`);
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
async translate(text: string, targetLang: string): Promise<string> {
|
|
338
|
+
return this.callClaude(`Translate to ${targetLang}:\n\n${text}`);
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
async changeTone(text: string, tone: string): Promise<string> {
|
|
342
|
+
return this.callClaude(`Rewrite in a ${tone} tone:\n\n${text}`);
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
async continueWriting(text: string): Promise<string> {
|
|
346
|
+
return this.callClaude(`Continue writing from:\n\n${text}`);
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
async improveWriting(text: string): Promise<string> {
|
|
350
|
+
return this.callClaude(`Improve this text:\n\n${text}`);
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
async generateIdeas(text: string): Promise<string> {
|
|
354
|
+
return this.callClaude(`Generate ideas for:\n\n${text}`);
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
async complete(text: string): Promise<string> {
|
|
358
|
+
return this.callClaude(`Complete:\n\n${text}`);
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// Factory function to create AI provider instances
|
|
363
|
+
export function createAIProvider(
|
|
364
|
+
provider: 'openai' | 'gemini' | 'claude',
|
|
365
|
+
config: AIProviderConfig
|
|
366
|
+
): AIProvider {
|
|
367
|
+
switch (provider) {
|
|
368
|
+
case 'gemini':
|
|
369
|
+
return new GeminiProvider(config);
|
|
370
|
+
case 'openai':
|
|
371
|
+
return new OpenAIProvider(config);
|
|
372
|
+
case 'claude':
|
|
373
|
+
return new ClaudeProvider(config);
|
|
374
|
+
default:
|
|
375
|
+
throw new Error(`Unsupported AI provider: ${provider}`);
|
|
376
|
+
}
|
|
377
|
+
}
|