@moontra/moonui-pro 2.8.12 → 2.8.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -49963,31 +49963,31 @@ var GeminiProvider = class {
49963
49963
  return this.callGeminiAPI(prompt);
49964
49964
  }
49965
49965
  async rewrite(text) {
49966
- const prompt = `Rewrite the following text to make it clearer and more engaging while maintaining the same meaning. IMPORTANT: Respond in the SAME LANGUAGE as the input text. Only return the rewritten text, nothing else:
49966
+ const prompt = `Rewrite this text to be clearer and more engaging. Respond in the same language:
49967
49967
 
49968
49968
  ${text}`;
49969
49969
  return this.callGeminiAPI(prompt);
49970
49970
  }
49971
49971
  async expand(text) {
49972
- const prompt = `Expand the following text with more details, examples, and explanations. IMPORTANT: Respond in the SAME LANGUAGE as the input text. Only return the expanded text, nothing else:
49972
+ const prompt = `Expand this text with more details and examples. Respond in the same language:
49973
49973
 
49974
49974
  ${text}`;
49975
49975
  return this.callGeminiAPI(prompt);
49976
49976
  }
49977
49977
  async summarize(text) {
49978
- const prompt = `Summarize the following text concisely while keeping the main points. IMPORTANT: Respond in the SAME LANGUAGE as the input text. Only return the summary, nothing else:
49978
+ const prompt = `Create a detailed summary including key points, arguments, and examples. Respond in the same language:
49979
49979
 
49980
49980
  ${text}`;
49981
49981
  return this.callGeminiAPI(prompt);
49982
49982
  }
49983
49983
  async fixGrammar(text) {
49984
- const prompt = `Fix any grammar and spelling errors in the following text. IMPORTANT: Respond in the SAME LANGUAGE as the input text. Only return the corrected text, nothing else:
49984
+ const prompt = `Fix grammar and spelling errors. Respond in the same language:
49985
49985
 
49986
49986
  ${text}`;
49987
49987
  return this.callGeminiAPI(prompt);
49988
49988
  }
49989
49989
  async translate(text, targetLang) {
49990
- const prompt = `Translate the following text to ${targetLang}. Only return the translation, nothing else:
49990
+ const prompt = `Translate to ${targetLang}:
49991
49991
 
49992
49992
  ${text}`;
49993
49993
  return this.callGeminiAPI(prompt);
@@ -50071,19 +50071,19 @@ var OpenAIProvider = class {
50071
50071
  }
50072
50072
  async rewrite(text) {
50073
50073
  return this.callOpenAI(
50074
- "You are a professional editor. Rewrite text to be clearer and more engaging. Always respond in the same language as the input text.",
50074
+ "Rewrite to be clearer and more engaging. Same language as input.",
50075
50075
  text
50076
50076
  );
50077
50077
  }
50078
50078
  async expand(text) {
50079
50079
  return this.callOpenAI(
50080
- "You are a content writer. Expand the given text with more details and examples. Always respond in the same language as the input text.",
50080
+ "Expand with more details and examples. Same language as input.",
50081
50081
  text
50082
50082
  );
50083
50083
  }
50084
50084
  async summarize(text) {
50085
50085
  return this.callOpenAI(
50086
- "You are a summarization expert. Create concise summaries. Always respond in the same language as the input text.",
50086
+ "Create detailed summary with key points, arguments, and examples. Same language as input.",
50087
50087
  text
50088
50088
  );
50089
50089
  }
@@ -50179,7 +50179,7 @@ ${text}`);
50179
50179
  ${text}`);
50180
50180
  }
50181
50181
  async summarize(text) {
50182
- return this.callClaude(`Summarize this text concisely. IMPORTANT: Respond in the SAME LANGUAGE as the input text:
50182
+ return this.callClaude(`Create detailed summary with key points, arguments, and examples. Same language as input:
50183
50183
 
50184
50184
  ${text}`);
50185
50185
  }
@@ -50475,6 +50475,10 @@ function RichTextEditor({
50475
50475
  const [previewContent, setPreviewContent] = useState("");
50476
50476
  const [previewAction, setPreviewAction] = useState("");
50477
50477
  const [previewOriginalText, setPreviewOriginalText] = useState("");
50478
+ const [wordCount, setWordCount] = useState(0);
50479
+ const [characterCount, setCharacterCount] = useState(0);
50480
+ const [tokensUsed, setTokensUsed] = useState(0);
50481
+ const [lastAIResponse, setLastAIResponse] = useState(null);
50478
50482
  useEffect(() => {
50479
50483
  return () => {
50480
50484
  if (typingIntervalRef.current) {
@@ -50500,6 +50504,28 @@ function RichTextEditor({
50500
50504
  const modalActions = ["expand", "summarize", "ideas", "continue"];
50501
50505
  return modalActions.includes(action);
50502
50506
  };
50507
+ const countWords = (text) => {
50508
+ return text.trim().split(/\s+/).filter((word) => word.length > 0).length;
50509
+ };
50510
+ const countCharacters = (text) => {
50511
+ return text.length;
50512
+ };
50513
+ const estimateTokens = (text) => {
50514
+ return Math.ceil(text.length / 4);
50515
+ };
50516
+ const optimizeTextForAI = (text, action) => {
50517
+ const maxTokens = 2e3;
50518
+ const estimatedTokens = estimateTokens(text);
50519
+ if (estimatedTokens <= maxTokens) {
50520
+ return text;
50521
+ }
50522
+ if (action === "summarize") {
50523
+ const maxChars2 = maxTokens * 4;
50524
+ return text.substring(0, maxChars2) + "...";
50525
+ }
50526
+ const maxChars = maxTokens * 4 / 2;
50527
+ return text.substring(0, maxChars) + "...";
50528
+ };
50503
50529
  const applyAIContentToEditor = (content, replaceSelection = true) => {
50504
50530
  if (!editor)
50505
50531
  return;
@@ -50514,7 +50540,7 @@ function RichTextEditor({
50514
50540
  }
50515
50541
  }
50516
50542
  let currentIndex = 0;
50517
- const typeSpeed = 30;
50543
+ const typeSpeed = 5;
50518
50544
  const typeNextChar = () => {
50519
50545
  if (currentIndex < content.length && isTypingRef.current) {
50520
50546
  const nextChar = content[currentIndex];
@@ -50734,6 +50760,26 @@ function RichTextEditor({
50734
50760
  },
50735
50761
  immediatelyRender: false
50736
50762
  });
50763
+ useEffect(() => {
50764
+ if (editor) {
50765
+ const text = editor.getText();
50766
+ setWordCount(countWords(text));
50767
+ setCharacterCount(countCharacters(text));
50768
+ }
50769
+ }, [editor?.state.doc.content]);
50770
+ useEffect(() => {
50771
+ if (editor) {
50772
+ const updateStats = () => {
50773
+ const text = editor.getText();
50774
+ setWordCount(countWords(text));
50775
+ setCharacterCount(countCharacters(text));
50776
+ };
50777
+ editor.on("update", updateStats);
50778
+ return () => {
50779
+ editor.off("update", updateStats);
50780
+ };
50781
+ }
50782
+ }, [editor]);
50737
50783
  const callAI = async (action, text, targetLanguage) => {
50738
50784
  if (!aiSettings.apiKey) {
50739
50785
  toast({
@@ -50750,47 +50796,64 @@ function RichTextEditor({
50750
50796
  if (!provider) {
50751
50797
  throw new Error("Failed to initialize AI provider");
50752
50798
  }
50799
+ const optimizedText = optimizeTextForAI(text, action);
50800
+ const inputTokens = estimateTokens(optimizedText);
50801
+ if (optimizedText !== text) {
50802
+ toast({
50803
+ title: "Text optimized",
50804
+ description: `Long text was shortened to ${inputTokens} tokens for better efficiency.`
50805
+ });
50806
+ }
50753
50807
  let response;
50754
50808
  switch (action) {
50755
50809
  case "rewrite":
50756
- response = await provider.rewrite(text);
50810
+ response = await provider.rewrite(optimizedText);
50757
50811
  break;
50758
50812
  case "expand":
50759
- response = await provider.expand(text);
50813
+ response = await provider.expand(optimizedText);
50760
50814
  break;
50761
50815
  case "summarize":
50762
- response = await provider.summarize(text);
50816
+ response = await provider.summarize(optimizedText);
50763
50817
  break;
50764
50818
  case "fix":
50765
- response = await provider.fixGrammar(text);
50819
+ response = await provider.fixGrammar(optimizedText);
50766
50820
  break;
50767
50821
  case "translate":
50768
- response = await provider.translate(text, targetLanguage || "English");
50822
+ response = await provider.translate(optimizedText, targetLanguage || "English");
50769
50823
  break;
50770
50824
  case "tone_professional":
50771
- response = await provider.changeTone(text, "professional");
50825
+ response = await provider.changeTone(optimizedText, "professional");
50772
50826
  break;
50773
50827
  case "tone_casual":
50774
- response = await provider.changeTone(text, "casual");
50828
+ response = await provider.changeTone(optimizedText, "casual");
50775
50829
  break;
50776
50830
  case "tone_friendly":
50777
- response = await provider.changeTone(text, "friendly");
50831
+ response = await provider.changeTone(optimizedText, "friendly");
50778
50832
  break;
50779
50833
  case "tone_formal":
50780
- response = await provider.changeTone(text, "formal");
50834
+ response = await provider.changeTone(optimizedText, "formal");
50781
50835
  break;
50782
50836
  case "continue":
50783
- response = await provider.continueWriting(text);
50837
+ response = await provider.continueWriting(optimizedText);
50784
50838
  break;
50785
50839
  case "improve":
50786
- response = await provider.improveWriting(text);
50840
+ response = await provider.improveWriting(optimizedText);
50787
50841
  break;
50788
50842
  case "ideas":
50789
- response = await provider.generateIdeas(text);
50843
+ response = await provider.generateIdeas(optimizedText);
50790
50844
  break;
50791
50845
  default:
50792
- response = await provider.complete(text);
50793
- }
50846
+ response = await provider.complete(optimizedText);
50847
+ }
50848
+ const outputTokens = estimateTokens(response);
50849
+ const totalTokens = inputTokens + outputTokens;
50850
+ setTokensUsed((prev) => prev + totalTokens);
50851
+ setLastAIResponse({
50852
+ action,
50853
+ tokens: totalTokens,
50854
+ model: aiSettings.model,
50855
+ timestamp: /* @__PURE__ */ new Date()
50856
+ });
50794
50857
  return response;
50795
50858
  } catch (error) {
50796
50859
  toast({
@@ -50808,10 +50871,12 @@ function RichTextEditor({
50808
50871
  return;
50809
50872
  const selection = editor.state.selection;
50810
50873
  const selectedText = editor.state.doc.textBetween(selection.from, selection.to, " ");
50811
- if (!selectedText && action !== "complete") {
50874
+ const fullText = editor.getText();
50875
+ const textToProcess = selectedText || fullText;
50876
+ if (!textToProcess && action !== "complete") {
50812
50877
  toast({
50813
- title: "No text selected",
50814
- description: "Please select some text first.",
50878
+ title: "No content available",
50879
+ description: "Please write some text first.",
50815
50880
  variant: "destructive"
50816
50881
  });
50817
50882
  return;
@@ -50823,13 +50888,13 @@ function RichTextEditor({
50823
50888
  duration: 6e4
50824
50889
  // Long duration
50825
50890
  });
50826
- const result = await callAI(action, selectedText || editor.getText(), targetLanguage);
50891
+ const result = await callAI(action, textToProcess, targetLanguage);
50827
50892
  processingToast.dismiss();
50828
50893
  if (result) {
50829
50894
  if (shouldUseModal(action)) {
50830
50895
  setPreviewContent(result);
50831
50896
  setPreviewAction(action);
50832
- setPreviewOriginalText(selectedText || editor.getText());
50897
+ setPreviewOriginalText(textToProcess);
50833
50898
  setIsAiPreviewOpen(true);
50834
50899
  } else {
50835
50900
  applyAIContentToEditor(result, !!selectedText);
@@ -52069,7 +52134,51 @@ function RichTextEditor({
52069
52134
  )
52070
52135
  ] })
52071
52136
  }
52072
- )
52137
+ ),
52138
+ /* @__PURE__ */ jsx("div", { className: "border-t bg-gray-50 dark:bg-gray-900 px-4 py-2", children: /* @__PURE__ */ jsxs("div", { className: "flex items-center justify-between text-xs text-muted-foreground", children: [
52139
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-6", children: [
52140
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-1", children: [
52141
+ /* @__PURE__ */ jsx(FileText, { className: "w-3 h-3" }),
52142
+ /* @__PURE__ */ jsxs("span", { children: [
52143
+ wordCount,
52144
+ " words, ",
52145
+ characterCount,
52146
+ " characters"
52147
+ ] })
52148
+ ] }),
52149
+ lastAIResponse && /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-1", children: [
52150
+ /* @__PURE__ */ jsx(Wand2, { className: "w-3 h-3 text-purple-600 dark:text-purple-400" }),
52151
+ /* @__PURE__ */ jsxs("span", { children: [
52152
+ "Last: ",
52153
+ lastAIResponse.action,
52154
+ " | ",
52155
+ lastAIResponse.tokens,
52156
+ " tokens | ",
52157
+ lastAIResponse.model
52158
+ ] })
52159
+ ] })
52160
+ ] }),
52161
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-4", children: [
52162
+ tokensUsed > 0 && /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-1", children: [
52163
+ /* @__PURE__ */ jsx(Zap, { className: "w-3 h-3 text-orange-500" }),
52164
+ /* @__PURE__ */ jsxs("span", { className: "font-medium", children: [
52165
+ tokensUsed.toLocaleString(),
52166
+ " tokens used"
52167
+ ] })
52168
+ ] }),
52169
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-1", children: [
52170
+ /* @__PURE__ */ jsx("div", { className: cn(
52171
+ "w-2 h-2 rounded-full",
52172
+ aiSettings.apiKey ? "bg-green-500" : "bg-red-500"
52173
+ ) }),
52174
+ /* @__PURE__ */ jsxs("span", { className: "capitalize", children: [
52175
+ aiSettings.provider,
52176
+ " ",
52177
+ aiSettings.apiKey ? "connected" : "disconnected"
52178
+ ] })
52179
+ ] })
52180
+ ] })
52181
+ ] }) })
52073
52182
  ] });
52074
52183
  }
52075
52184
  var MemoryCache = class {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@moontra/moonui-pro",
3
- "version": "2.8.12",
3
+ "version": "2.8.14",
4
4
  "description": "Premium React components for MoonUI - Advanced UI library with 50+ pro components including performance, interactive, and gesture components",
5
5
  "type": "module",
6
6
  "main": "dist/index.mjs",
@@ -449,6 +449,17 @@ export function RichTextEditor({
449
449
  const [previewAction, setPreviewAction] = useState('');
450
450
  const [previewOriginalText, setPreviewOriginalText] = useState('');
451
451
 
452
+ // Statistics states
453
+ const [wordCount, setWordCount] = useState(0);
454
+ const [characterCount, setCharacterCount] = useState(0);
455
+ const [tokensUsed, setTokensUsed] = useState(0);
456
+ const [lastAIResponse, setLastAIResponse] = useState<{
457
+ action: string;
458
+ tokens: number;
459
+ model: string;
460
+ timestamp: Date;
461
+ } | null>(null);
462
+
452
463
  // Clean up typewriter effect on unmount
453
464
  useEffect(() => {
454
465
  return () => {
@@ -480,6 +491,41 @@ export function RichTextEditor({
480
491
  return modalActions.includes(action);
481
492
  };
482
493
 
494
+ // Utility functions for statistics
495
+ const countWords = (text: string): number => {
496
+ return text.trim().split(/\s+/).filter(word => word.length > 0).length;
497
+ };
498
+
499
+ const countCharacters = (text: string): number => {
500
+ return text.length;
501
+ };
502
+
503
+ // Approximate token count (rough estimation: 1 token ≈ 4 characters)
504
+ const estimateTokens = (text: string): number => {
505
+ return Math.ceil(text.length / 4);
506
+ };
507
+
508
+ // Optimize text for token efficiency
509
+ const optimizeTextForAI = (text: string, action: string): string => {
510
+ const maxTokens = 2000; // Safe limit for most providers
511
+ const estimatedTokens = estimateTokens(text);
512
+
513
+ if (estimatedTokens <= maxTokens) {
514
+ return text;
515
+ }
516
+
517
+ // For summarize, we can use more text since output will be shorter
518
+ if (action === 'summarize') {
519
+ const maxChars = maxTokens * 4;
520
+ return text.substring(0, maxChars) + '...';
521
+ }
522
+
523
+ // For other actions, limit to smaller chunks
524
+ const maxChars = (maxTokens * 4) / 2;
525
+ return text.substring(0, maxChars) + '...';
526
+ };
527
+
528
+
483
529
  // Apply AI content to editor with typewriter effect
484
530
  const applyAIContentToEditor = (content: string, replaceSelection: boolean = true) => {
485
531
  if (!editor) return;
@@ -498,7 +544,7 @@ export function RichTextEditor({
498
544
  }
499
545
 
500
546
  let currentIndex = 0;
501
- const typeSpeed = 30; // ms per character
547
+ const typeSpeed = 5; // ms per character - much faster for better UX
502
548
 
503
549
  const typeNextChar = () => {
504
550
  if (currentIndex < content.length && isTypingRef.current) {
@@ -732,6 +778,31 @@ export function RichTextEditor({
732
778
  immediatelyRender: false,
733
779
  });
734
780
 
781
+ // Update statistics when editor content changes
782
+ useEffect(() => {
783
+ if (editor) {
784
+ const text = editor.getText();
785
+ setWordCount(countWords(text));
786
+ setCharacterCount(countCharacters(text));
787
+ }
788
+ }, [editor?.state.doc.content]);
789
+
790
+ // Listen to editor updates
791
+ useEffect(() => {
792
+ if (editor) {
793
+ const updateStats = () => {
794
+ const text = editor.getText();
795
+ setWordCount(countWords(text));
796
+ setCharacterCount(countCharacters(text));
797
+ };
798
+
799
+ editor.on('update', updateStats);
800
+ return () => {
801
+ editor.off('update', updateStats);
802
+ };
803
+ }
804
+ }, [editor]);
805
+
735
806
  // AI işlevleri
736
807
  const callAI = async (action: string, text: string, targetLanguage?: string) => {
737
808
  if (!aiSettings.apiKey) {
@@ -745,55 +816,82 @@ export function RichTextEditor({
745
816
  }
746
817
 
747
818
  setIsProcessing(true);
819
+ const startTime = Date.now();
820
+
748
821
  try {
749
822
  const provider = getAIProvider(aiSettings);
750
823
  if (!provider) {
751
824
  throw new Error('Failed to initialize AI provider');
752
825
  }
753
826
 
827
+ // Optimize text for better token efficiency
828
+ const optimizedText = optimizeTextForAI(text, action);
829
+ const inputTokens = estimateTokens(optimizedText);
830
+
831
+ // Show warning if text was truncated
832
+ if (optimizedText !== text) {
833
+ toast({
834
+ title: "Text optimized",
835
+ description: `Long text was shortened to ${inputTokens} tokens for better efficiency.`,
836
+ });
837
+ }
838
+
754
839
  let response: string;
755
840
 
756
841
  switch (action) {
757
842
  case 'rewrite':
758
- response = await provider.rewrite(text);
843
+ response = await provider.rewrite(optimizedText);
759
844
  break;
760
845
  case 'expand':
761
- response = await provider.expand(text);
846
+ response = await provider.expand(optimizedText);
762
847
  break;
763
848
  case 'summarize':
764
- response = await provider.summarize(text);
849
+ response = await provider.summarize(optimizedText);
765
850
  break;
766
851
  case 'fix':
767
- response = await provider.fixGrammar(text);
852
+ response = await provider.fixGrammar(optimizedText);
768
853
  break;
769
854
  case 'translate':
770
- response = await provider.translate(text, targetLanguage || 'English');
855
+ response = await provider.translate(optimizedText, targetLanguage || 'English');
771
856
  break;
772
857
  case 'tone_professional':
773
- response = await provider.changeTone(text, 'professional');
858
+ response = await provider.changeTone(optimizedText, 'professional');
774
859
  break;
775
860
  case 'tone_casual':
776
- response = await provider.changeTone(text, 'casual');
861
+ response = await provider.changeTone(optimizedText, 'casual');
777
862
  break;
778
863
  case 'tone_friendly':
779
- response = await provider.changeTone(text, 'friendly');
864
+ response = await provider.changeTone(optimizedText, 'friendly');
780
865
  break;
781
866
  case 'tone_formal':
782
- response = await provider.changeTone(text, 'formal');
867
+ response = await provider.changeTone(optimizedText, 'formal');
783
868
  break;
784
869
  case 'continue':
785
- response = await provider.continueWriting(text);
870
+ response = await provider.continueWriting(optimizedText);
786
871
  break;
787
872
  case 'improve':
788
- response = await provider.improveWriting(text);
873
+ response = await provider.improveWriting(optimizedText);
789
874
  break;
790
875
  case 'ideas':
791
- response = await provider.generateIdeas(text);
876
+ response = await provider.generateIdeas(optimizedText);
792
877
  break;
793
878
  default:
794
- response = await provider.complete(text);
879
+ response = await provider.complete(optimizedText);
795
880
  }
796
881
 
882
+ // Estimate output tokens and total usage
883
+ const outputTokens = estimateTokens(response);
884
+ const totalTokens = inputTokens + outputTokens;
885
+
886
+ // Update token usage statistics
887
+ setTokensUsed(prev => prev + totalTokens);
888
+ setLastAIResponse({
889
+ action,
890
+ tokens: totalTokens,
891
+ model: aiSettings.model,
892
+ timestamp: new Date()
893
+ });
894
+
797
895
  return response;
798
896
  } catch (error) {
799
897
  toast({
@@ -812,11 +910,15 @@ export function RichTextEditor({
812
910
 
813
911
  const selection = editor.state.selection;
814
912
  const selectedText = editor.state.doc.textBetween(selection.from, selection.to, ' ');
913
+ const fullText = editor.getText();
914
+
915
+ // If no text is selected, use the full editor content as prompt
916
+ const textToProcess = selectedText || fullText;
815
917
 
816
- if (!selectedText && action !== 'complete') {
918
+ if (!textToProcess && action !== 'complete') {
817
919
  toast({
818
- title: "No text selected",
819
- description: "Please select some text first.",
920
+ title: "No content available",
921
+ description: "Please write some text first.",
820
922
  variant: "destructive",
821
923
  });
822
924
  return;
@@ -832,7 +934,7 @@ export function RichTextEditor({
832
934
  duration: 60000, // Long duration
833
935
  });
834
936
 
835
- const result = await callAI(action, selectedText || editor.getText(), targetLanguage);
937
+ const result = await callAI(action, textToProcess, targetLanguage);
836
938
 
837
939
  // Dismiss processing toast
838
940
  processingToast.dismiss();
@@ -843,7 +945,7 @@ export function RichTextEditor({
843
945
  // Open preview modal instead of directly applying
844
946
  setPreviewContent(result);
845
947
  setPreviewAction(action);
846
- setPreviewOriginalText(selectedText || editor.getText());
948
+ setPreviewOriginalText(textToProcess);
847
949
  setIsAiPreviewOpen(true);
848
950
  } else {
849
951
  // Apply directly with typewriter effect (for rewrite, fix grammar, tone changes, etc.)
@@ -2173,6 +2275,48 @@ export function RichTextEditor({
2173
2275
  </>
2174
2276
  )}
2175
2277
  </div>
2278
+
2279
+ {/* Statistics Bar */}
2280
+ <div className="border-t bg-gray-50 dark:bg-gray-900 px-4 py-2">
2281
+ <div className="flex items-center justify-between text-xs text-muted-foreground">
2282
+ <div className="flex items-center gap-6">
2283
+ <div className="flex items-center gap-1">
2284
+ <FileText className="w-3 h-3" />
2285
+ <span>{wordCount} words, {characterCount} characters</span>
2286
+ </div>
2287
+
2288
+ {lastAIResponse && (
2289
+ <div className="flex items-center gap-1">
2290
+ <Wand2 className="w-3 h-3 text-purple-600 dark:text-purple-400" />
2291
+ <span>
2292
+ Last: {lastAIResponse.action} | {lastAIResponse.tokens} tokens | {lastAIResponse.model}
2293
+ </span>
2294
+ </div>
2295
+ )}
2296
+ </div>
2297
+
2298
+ <div className="flex items-center gap-4">
2299
+ {tokensUsed > 0 && (
2300
+ <div className="flex items-center gap-1">
2301
+ <Zap className="w-3 h-3 text-orange-500" />
2302
+ <span className="font-medium">
2303
+ {tokensUsed.toLocaleString()} tokens used
2304
+ </span>
2305
+ </div>
2306
+ )}
2307
+
2308
+ <div className="flex items-center gap-1">
2309
+ <div className={cn(
2310
+ "w-2 h-2 rounded-full",
2311
+ aiSettings.apiKey ? "bg-green-500" : "bg-red-500"
2312
+ )} />
2313
+ <span className="capitalize">
2314
+ {aiSettings.provider} {aiSettings.apiKey ? "connected" : "disconnected"}
2315
+ </span>
2316
+ </div>
2317
+ </div>
2318
+ </div>
2319
+ </div>
2176
2320
  </div>
2177
2321
  );
2178
2322
  }
@@ -94,27 +94,27 @@ export class GeminiProvider implements AIProvider {
94
94
  }
95
95
 
96
96
  async rewrite(text: string): Promise<string> {
97
- const prompt = `Rewrite the following text to make it clearer and more engaging while maintaining the same meaning. IMPORTANT: Respond in the SAME LANGUAGE as the input text. Only return the rewritten text, nothing else:\n\n${text}`;
97
+ const prompt = `Rewrite this text to be clearer and more engaging. Respond in the same language:\n\n${text}`;
98
98
  return this.callGeminiAPI(prompt);
99
99
  }
100
100
 
101
101
  async expand(text: string): Promise<string> {
102
- const prompt = `Expand the following text with more details, examples, and explanations. IMPORTANT: Respond in the SAME LANGUAGE as the input text. Only return the expanded text, nothing else:\n\n${text}`;
102
+ const prompt = `Expand this text with more details and examples. Respond in the same language:\n\n${text}`;
103
103
  return this.callGeminiAPI(prompt);
104
104
  }
105
105
 
106
106
  async summarize(text: string): Promise<string> {
107
- const prompt = `Summarize the following text concisely while keeping the main points. IMPORTANT: Respond in the SAME LANGUAGE as the input text. Only return the summary, nothing else:\n\n${text}`;
107
+ const prompt = `Create a detailed summary including key points, arguments, and examples. Respond in the same language:\n\n${text}`;
108
108
  return this.callGeminiAPI(prompt);
109
109
  }
110
110
 
111
111
  async fixGrammar(text: string): Promise<string> {
112
- const prompt = `Fix any grammar and spelling errors in the following text. IMPORTANT: Respond in the SAME LANGUAGE as the input text. Only return the corrected text, nothing else:\n\n${text}`;
112
+ const prompt = `Fix grammar and spelling errors. Respond in the same language:\n\n${text}`;
113
113
  return this.callGeminiAPI(prompt);
114
114
  }
115
115
 
116
116
  async translate(text: string, targetLang: string): Promise<string> {
117
- const prompt = `Translate the following text to ${targetLang}. Only return the translation, nothing else:\n\n${text}`;
117
+ const prompt = `Translate to ${targetLang}:\n\n${text}`;
118
118
  return this.callGeminiAPI(prompt);
119
119
  }
120
120
 
@@ -204,21 +204,21 @@ export class OpenAIProvider implements AIProvider {
204
204
 
205
205
  async rewrite(text: string): Promise<string> {
206
206
  return this.callOpenAI(
207
- 'You are a professional editor. Rewrite text to be clearer and more engaging. Always respond in the same language as the input text.',
207
+ 'Rewrite to be clearer and more engaging. Same language as input.',
208
208
  text
209
209
  );
210
210
  }
211
211
 
212
212
  async expand(text: string): Promise<string> {
213
213
  return this.callOpenAI(
214
- 'You are a content writer. Expand the given text with more details and examples. Always respond in the same language as the input text.',
214
+ 'Expand with more details and examples. Same language as input.',
215
215
  text
216
216
  );
217
217
  }
218
218
 
219
219
  async summarize(text: string): Promise<string> {
220
220
  return this.callOpenAI(
221
- 'You are a summarization expert. Create concise summaries. Always respond in the same language as the input text.',
221
+ 'Create detailed summary with key points, arguments, and examples. Same language as input.',
222
222
  text
223
223
  );
224
224
  }
@@ -327,7 +327,7 @@ export class ClaudeProvider implements AIProvider {
327
327
  }
328
328
 
329
329
  async summarize(text: string): Promise<string> {
330
- return this.callClaude(`Summarize this text concisely. IMPORTANT: Respond in the SAME LANGUAGE as the input text:\n\n${text}`);
330
+ return this.callClaude(`Create detailed summary with key points, arguments, and examples. Same language as input:\n\n${text}`);
331
331
  }
332
332
 
333
333
  async fixGrammar(text: string): Promise<string> {