@jupyterlite/ai 0.7.0 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/lib/base-completer.d.ts +23 -1
  2. package/lib/base-completer.js +14 -1
  3. package/lib/chat-handler.d.ts +4 -6
  4. package/lib/chat-handler.js +22 -22
  5. package/lib/completion-provider.js +1 -1
  6. package/lib/default-prompts.d.ts +2 -0
  7. package/lib/default-prompts.js +31 -0
  8. package/lib/default-providers/Anthropic/completer.d.ts +4 -9
  9. package/lib/default-providers/Anthropic/completer.js +4 -13
  10. package/lib/default-providers/ChromeAI/completer.d.ts +4 -9
  11. package/lib/default-providers/ChromeAI/completer.js +4 -13
  12. package/lib/default-providers/ChromeAI/settings-schema.json +0 -3
  13. package/lib/default-providers/Gemini/completer.d.ts +12 -0
  14. package/lib/default-providers/Gemini/completer.js +48 -0
  15. package/lib/default-providers/Gemini/instructions.d.ts +2 -0
  16. package/lib/default-providers/Gemini/instructions.js +9 -0
  17. package/lib/default-providers/Gemini/settings-schema.json +64 -0
  18. package/lib/default-providers/MistralAI/completer.d.ts +10 -11
  19. package/lib/default-providers/MistralAI/completer.js +41 -50
  20. package/lib/default-providers/MistralAI/instructions.d.ts +1 -1
  21. package/lib/default-providers/MistralAI/instructions.js +2 -0
  22. package/lib/default-providers/Ollama/completer.d.ts +4 -9
  23. package/lib/default-providers/Ollama/completer.js +7 -13
  24. package/lib/default-providers/Ollama/settings-schema.json +1 -4
  25. package/lib/default-providers/OpenAI/completer.d.ts +4 -9
  26. package/lib/default-providers/OpenAI/completer.js +7 -13
  27. package/lib/default-providers/OpenAI/settings-schema.json +88 -128
  28. package/lib/default-providers/WebLLM/completer.d.ts +3 -9
  29. package/lib/default-providers/WebLLM/completer.js +4 -13
  30. package/lib/default-providers/WebLLM/settings-schema.json +1 -3
  31. package/lib/default-providers/index.js +23 -19
  32. package/lib/index.d.ts +1 -0
  33. package/lib/index.js +68 -14
  34. package/lib/provider.d.ts +39 -11
  35. package/lib/provider.js +166 -81
  36. package/lib/settings/index.d.ts +1 -0
  37. package/lib/settings/index.js +1 -0
  38. package/lib/settings/panel.d.ts +116 -8
  39. package/lib/settings/panel.js +117 -22
  40. package/lib/settings/textarea.d.ts +2 -0
  41. package/lib/settings/textarea.js +18 -0
  42. package/lib/tokens.d.ts +24 -20
  43. package/lib/tokens.js +2 -1
  44. package/package.json +10 -9
  45. package/schema/chat.json +1 -1
  46. package/schema/provider-registry.json +11 -5
  47. package/schema/system-prompts.json +22 -0
  48. package/src/base-completer.ts +39 -1
  49. package/src/chat-handler.ts +23 -25
  50. package/src/completion-provider.ts +1 -1
  51. package/src/default-prompts.ts +33 -0
  52. package/src/default-providers/Anthropic/completer.ts +5 -16
  53. package/src/default-providers/ChromeAI/completer.ts +5 -16
  54. package/src/default-providers/Gemini/completer.ts +61 -0
  55. package/src/default-providers/Gemini/instructions.ts +9 -0
  56. package/src/default-providers/MistralAI/completer.ts +47 -60
  57. package/src/default-providers/MistralAI/instructions.ts +2 -0
  58. package/src/default-providers/Ollama/completer.ts +8 -16
  59. package/src/default-providers/OpenAI/completer.ts +8 -16
  60. package/src/default-providers/WebLLM/completer.ts +5 -16
  61. package/src/default-providers/index.ts +23 -20
  62. package/src/index.ts +95 -15
  63. package/src/provider.ts +198 -94
  64. package/src/settings/index.ts +1 -0
  65. package/src/settings/panel.tsx +262 -34
  66. package/src/settings/textarea.tsx +33 -0
  67. package/src/tokens.ts +26 -22
  68. package/style/base.css +29 -0
@@ -1,61 +1,52 @@
1
- import { HumanMessage, SystemMessage } from '@langchain/core/messages';
2
- import { ChatMistralAI } from '@langchain/mistralai';
3
- import { Throttler } from '@lumino/polling';
4
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
5
- /**
6
- * The Mistral API has a rate limit of 1 request per second
7
- */
8
- const INTERVAL = 1000;
9
- export class CodestralCompleter {
1
+ import { MistralAI } from '@langchain/mistralai';
2
+ import { BaseCompleter } from '../../base-completer';
3
+ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
4
+ const CODE_BLOCK_END_REGEX = /```$/;
5
+ export class CodestralCompleter extends BaseCompleter {
10
6
  constructor(options) {
11
- this._completer = new ChatMistralAI({ ...options.settings });
12
- this._throttler = new Throttler(async (messages) => {
13
- const response = await this._completer.invoke(messages);
14
- // Extract results of completion request.
15
- const items = [];
16
- if (typeof response.content === 'string') {
17
- items.push({
18
- insertText: response.content
19
- });
20
- }
21
- else {
22
- response.content.forEach(content => {
23
- if (content.type !== 'text') {
24
- return;
25
- }
26
- items.push({
27
- insertText: content.text
28
- });
29
- });
30
- }
31
- return { items };
32
- }, { limit: INTERVAL });
33
- }
34
- /**
35
- * Getter and setter for the initial prompt.
36
- */
37
- get prompt() {
38
- return this._prompt;
39
- }
40
- set prompt(value) {
41
- this._prompt = value;
7
+ super(options);
8
+ this._completer = new MistralAI({ ...options.settings });
42
9
  }
43
10
  async fetch(request, context) {
44
- const { text, offset: cursorOffset } = request;
45
- const prompt = text.slice(0, cursorOffset);
46
- const messages = [
47
- new SystemMessage(this._prompt),
48
- new HumanMessage(prompt)
49
- ];
50
11
  try {
51
- return await this._throttler.invoke(messages);
12
+ const { text, offset: cursorOffset } = request;
13
+ const prompt = this.systemPrompt.concat(text.slice(0, cursorOffset));
14
+ const suffix = text.slice(cursorOffset);
15
+ this._controller.abort();
16
+ this._controller = new AbortController();
17
+ const response = await this._completer.completionWithRetry({
18
+ prompt,
19
+ model: this._completer.model,
20
+ suffix
21
+ }, { signal: this._controller.signal }, false);
22
+ const items = response.choices.map(choice => {
23
+ const messageContent = choice.message.content;
24
+ let content = '';
25
+ if (typeof messageContent === 'string') {
26
+ content = messageContent
27
+ .replace(CODE_BLOCK_START_REGEX, '')
28
+ .replace(CODE_BLOCK_END_REGEX, '');
29
+ }
30
+ else if (Array.isArray(messageContent)) {
31
+ // Handle ContentChunk[] case - extract text content
32
+ content = messageContent
33
+ .filter(chunk => chunk.type === 'text')
34
+ .map(chunk => chunk.text || '')
35
+ .join('')
36
+ .replace(CODE_BLOCK_START_REGEX, '')
37
+ .replace(CODE_BLOCK_END_REGEX, '');
38
+ }
39
+ return {
40
+ insertText: content
41
+ };
42
+ });
43
+ return { items };
52
44
  }
53
45
  catch (error) {
54
- console.error('Error fetching completions', error);
46
+ // the request may be aborted
55
47
  return { items: [] };
56
48
  }
57
49
  }
58
- _throttler;
50
+ _controller = new AbortController();
59
51
  _completer;
60
- _prompt = COMPLETION_SYSTEM_PROMPT;
61
52
  }
@@ -1,2 +1,2 @@
1
- declare const _default: "\n<i class=\"fas fa-exclamation-triangle\"></i> This extension is still very much experimental. It is not an official MistralAI extension.\n\n1. Go to <https://console.mistral.ai/api-keys/> and create an API key.\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/1-api-key.png\" alt=\"Screenshot showing how to create an API key\" width=\"500px\">\n\n2. Open the JupyterLab settings and go to the **Ai providers** section to select the `MistralAI`\n provider and the API key (required).\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png\" alt=\"Screenshot showing how to add the API key to the settings\" width=\"500px\">\n\n3. Open the chat, or use the inline completer\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png\" alt=\"Screenshot showing how to use the chat\" width=\"500px\">\n";
1
+ declare const _default: "\n<i class=\"fas fa-exclamation-triangle\"></i> This extension is still very much experimental. It is not an official MistralAI extension.\n\n1. Go to <https://console.mistral.ai/api-keys/> and create an API key.\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/1-api-key.png\" alt=\"Screenshot showing how to create an API key\" width=\"500px\">\n\n2. Open the JupyterLab settings and go to the **Ai providers** section to select the `MistralAI`\n provider and the API key (required).\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png\" alt=\"Screenshot showing how to add the API key to the settings\" width=\"500px\">\n\n**Note:** When using MistralAI for completions, only a subset of models are available. Please check [this resource](https://docs.mistral.ai/api/#tag/fim) to see the list of supported models for completions.\n\n3. Open the chat, or use the inline completer\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png\" alt=\"Screenshot showing how to use the chat\" width=\"500px\">\n";
2
2
  export default _default;
@@ -10,6 +10,8 @@ export default `
10
10
 
11
11
  <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png" alt="Screenshot showing how to add the API key to the settings" width="500px">
12
12
 
13
+ **Note:** When using MistralAI for completions, only a subset of models are available. Please check [this resource](https://docs.mistral.ai/api/#tag/fim) to see the list of supported models for completions.
14
+
13
15
  3. Open the chat, or use the inline completer
14
16
 
15
17
  <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png" alt="Screenshot showing how to use the chat" width="500px">
@@ -1,17 +1,12 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
3
- export declare class OllamaCompleter implements IBaseCompleter {
2
+ import { ChatOllama } from '@langchain/ollama';
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export declare class OllamaCompleter extends BaseCompleter {
4
5
  constructor(options: BaseCompleter.IOptions);
5
- /**
6
- * Getter and setter for the initial prompt.
7
- */
8
- get prompt(): string;
9
- set prompt(value: string);
10
6
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
11
7
  items: {
12
8
  insertText: string;
13
9
  }[];
14
10
  }>;
15
- private _completer;
16
- private _prompt;
11
+ protected _completer: ChatOllama;
17
12
  }
@@ -1,23 +1,18 @@
1
1
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
2
2
  import { ChatOllama } from '@langchain/ollama';
3
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
4
- export class OllamaCompleter {
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export class OllamaCompleter extends BaseCompleter {
5
5
  constructor(options) {
6
+ super(options);
6
7
  this._completer = new ChatOllama({ ...options.settings });
7
8
  }
8
- /**
9
- * Getter and setter for the initial prompt.
10
- */
11
- get prompt() {
12
- return this._prompt;
13
- }
14
- set prompt(value) {
15
- this._prompt = value;
16
- }
17
9
  async fetch(request, context) {
18
10
  const { text, offset: cursorOffset } = request;
19
11
  const prompt = text.slice(0, cursorOffset);
20
- const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)];
12
+ const messages = [
13
+ new SystemMessage(this.systemPrompt),
14
+ new AIMessage(prompt)
15
+ ];
21
16
  try {
22
17
  const response = await this._completer.invoke(messages);
23
18
  const items = [];
@@ -45,5 +40,4 @@ export class OllamaCompleter {
45
40
  }
46
41
  }
47
42
  _completer;
48
- _prompt = COMPLETION_SYSTEM_PROMPT;
49
43
  }
@@ -90,10 +90,7 @@
90
90
  "type": "boolean"
91
91
  },
92
92
  "keepAlive": {
93
- "type": [
94
- "string",
95
- "number"
96
- ],
93
+ "type": ["string", "number"],
97
94
  "default": "5m"
98
95
  },
99
96
  "stop": {
@@ -1,17 +1,12 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
3
- export declare class OpenAICompleter implements IBaseCompleter {
2
+ import { ChatOpenAI } from '@langchain/openai';
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export declare class OpenAICompleter extends BaseCompleter {
4
5
  constructor(options: BaseCompleter.IOptions);
5
- /**
6
- * Getter and setter for the initial prompt.
7
- */
8
- get prompt(): string;
9
- set prompt(value: string);
10
6
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
11
7
  items: {
12
8
  insertText: string;
13
9
  }[];
14
10
  }>;
15
- private _completer;
16
- private _prompt;
11
+ protected _completer: ChatOpenAI;
17
12
  }
@@ -1,23 +1,18 @@
1
1
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
2
2
  import { ChatOpenAI } from '@langchain/openai';
3
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
4
- export class OpenAICompleter {
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export class OpenAICompleter extends BaseCompleter {
5
5
  constructor(options) {
6
+ super(options);
6
7
  this._completer = new ChatOpenAI({ ...options.settings });
7
8
  }
8
- /**
9
- * Getter and setter for the initial prompt.
10
- */
11
- get prompt() {
12
- return this._prompt;
13
- }
14
- set prompt(value) {
15
- this._prompt = value;
16
- }
17
9
  async fetch(request, context) {
18
10
  const { text, offset: cursorOffset } = request;
19
11
  const prompt = text.slice(0, cursorOffset);
20
- const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)];
12
+ const messages = [
13
+ new SystemMessage(this.systemPrompt),
14
+ new AIMessage(prompt)
15
+ ];
21
16
  try {
22
17
  const response = await this._completer.invoke(messages);
23
18
  const items = [];
@@ -45,5 +40,4 @@ export class OpenAICompleter {
45
40
  }
46
41
  }
47
42
  _completer;
48
- _prompt = COMPLETION_SYSTEM_PROMPT;
49
43
  }