@jupyterlite/ai 0.7.0 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/lib/base-completer.d.ts +23 -1
  2. package/lib/base-completer.js +14 -1
  3. package/lib/chat-handler.d.ts +4 -6
  4. package/lib/chat-handler.js +22 -22
  5. package/lib/completion-provider.js +1 -1
  6. package/lib/default-prompts.d.ts +2 -0
  7. package/lib/default-prompts.js +31 -0
  8. package/lib/default-providers/Anthropic/completer.d.ts +4 -9
  9. package/lib/default-providers/Anthropic/completer.js +4 -13
  10. package/lib/default-providers/ChromeAI/completer.d.ts +4 -9
  11. package/lib/default-providers/ChromeAI/completer.js +4 -13
  12. package/lib/default-providers/ChromeAI/settings-schema.json +0 -3
  13. package/lib/default-providers/Gemini/completer.d.ts +12 -0
  14. package/lib/default-providers/Gemini/completer.js +48 -0
  15. package/lib/default-providers/Gemini/instructions.d.ts +2 -0
  16. package/lib/default-providers/Gemini/instructions.js +9 -0
  17. package/lib/default-providers/Gemini/settings-schema.json +64 -0
  18. package/lib/default-providers/MistralAI/completer.d.ts +10 -11
  19. package/lib/default-providers/MistralAI/completer.js +41 -50
  20. package/lib/default-providers/MistralAI/instructions.d.ts +1 -1
  21. package/lib/default-providers/MistralAI/instructions.js +2 -0
  22. package/lib/default-providers/Ollama/completer.d.ts +4 -9
  23. package/lib/default-providers/Ollama/completer.js +7 -13
  24. package/lib/default-providers/Ollama/settings-schema.json +1 -4
  25. package/lib/default-providers/OpenAI/completer.d.ts +4 -9
  26. package/lib/default-providers/OpenAI/completer.js +7 -13
  27. package/lib/default-providers/OpenAI/settings-schema.json +88 -128
  28. package/lib/default-providers/WebLLM/completer.d.ts +3 -9
  29. package/lib/default-providers/WebLLM/completer.js +4 -13
  30. package/lib/default-providers/WebLLM/settings-schema.json +1 -3
  31. package/lib/default-providers/index.js +23 -19
  32. package/lib/index.d.ts +1 -0
  33. package/lib/index.js +68 -14
  34. package/lib/provider.d.ts +39 -11
  35. package/lib/provider.js +166 -81
  36. package/lib/settings/index.d.ts +1 -0
  37. package/lib/settings/index.js +1 -0
  38. package/lib/settings/panel.d.ts +116 -8
  39. package/lib/settings/panel.js +117 -22
  40. package/lib/settings/textarea.d.ts +2 -0
  41. package/lib/settings/textarea.js +18 -0
  42. package/lib/tokens.d.ts +24 -20
  43. package/lib/tokens.js +2 -1
  44. package/package.json +10 -9
  45. package/schema/chat.json +1 -1
  46. package/schema/provider-registry.json +11 -5
  47. package/schema/system-prompts.json +22 -0
  48. package/src/base-completer.ts +39 -1
  49. package/src/chat-handler.ts +23 -25
  50. package/src/completion-provider.ts +1 -1
  51. package/src/default-prompts.ts +33 -0
  52. package/src/default-providers/Anthropic/completer.ts +5 -16
  53. package/src/default-providers/ChromeAI/completer.ts +5 -16
  54. package/src/default-providers/Gemini/completer.ts +61 -0
  55. package/src/default-providers/Gemini/instructions.ts +9 -0
  56. package/src/default-providers/MistralAI/completer.ts +47 -60
  57. package/src/default-providers/MistralAI/instructions.ts +2 -0
  58. package/src/default-providers/Ollama/completer.ts +8 -16
  59. package/src/default-providers/OpenAI/completer.ts +8 -16
  60. package/src/default-providers/WebLLM/completer.ts +5 -16
  61. package/src/default-providers/index.ts +23 -20
  62. package/src/index.ts +95 -15
  63. package/src/provider.ts +198 -94
  64. package/src/settings/index.ts +1 -0
  65. package/src/settings/panel.tsx +262 -34
  66. package/src/settings/textarea.tsx +33 -0
  67. package/src/tokens.ts +26 -22
  68. package/style/base.css +29 -0
@@ -2,13 +2,17 @@ import {
2
2
  CompletionHandler,
3
3
  IInlineCompletionContext
4
4
  } from '@jupyterlab/completer';
5
+ import { BaseLanguageModel } from '@langchain/core/language_models/base';
5
6
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
6
7
 
8
+ import { DEFAULT_COMPLETION_SYSTEM_PROMPT } from './default-prompts';
9
+ import { IAIProviderRegistry } from './tokens';
10
+
7
11
  export interface IBaseCompleter {
8
12
  /**
9
13
  * The completion prompt.
10
14
  */
11
- prompt: string;
15
+ readonly systemPrompt: string;
12
16
 
13
17
  /**
14
18
  * The function to fetch a new completion.
@@ -24,6 +28,33 @@ export interface IBaseCompleter {
24
28
  ): Promise<any>;
25
29
  }
26
30
 
31
+ export abstract class BaseCompleter implements IBaseCompleter {
32
+ constructor(options: BaseCompleter.IOptions) {
33
+ this._providerRegistry = options.providerRegistry;
34
+ }
35
+
36
+ /**
37
+ * Get the system prompt for the completion.
38
+ */
39
+ get systemPrompt(): string {
40
+ return (
41
+ this._providerRegistry.completerSystemPrompt ??
42
+ DEFAULT_COMPLETION_SYSTEM_PROMPT
43
+ );
44
+ }
45
+
46
+ /**
47
+ * The fetch request for the LLM completer.
48
+ */
49
+ abstract fetch(
50
+ request: CompletionHandler.IRequest,
51
+ context: IInlineCompletionContext
52
+ ): Promise<any>;
53
+
54
+ protected _providerRegistry: IAIProviderRegistry;
55
+ protected abstract _completer: BaseLanguageModel<any, any>;
56
+ }
57
+
27
58
  /**
28
59
  * The namespace for the base completer.
29
60
  */
@@ -32,6 +63,13 @@ export namespace BaseCompleter {
32
63
  * The options for the constructor of a completer.
33
64
  */
34
65
  export interface IOptions {
66
+ /**
67
+ * The provider registry.
68
+ */
69
+ providerRegistry: IAIProviderRegistry;
70
+ /**
71
+ * The settings of the provider.
72
+ */
35
73
  settings: ReadonlyPartialJSONObject;
36
74
  }
37
75
  }
@@ -23,8 +23,8 @@ import {
23
23
  } from '@langchain/core/messages';
24
24
  import { UUID } from '@lumino/coreutils';
25
25
 
26
+ import { DEFAULT_CHAT_SYSTEM_PROMPT } from './default-prompts';
26
27
  import { jupyternautLiteIcon } from './icons';
27
- import { chatSystemPrompt } from './provider';
28
28
  import { IAIProviderRegistry } from './tokens';
29
29
  import { AIChatModel } from './types/ai-model';
30
30
 
@@ -39,7 +39,7 @@ export const welcomeMessage = (providers: string[]) => `
39
39
  #### Ask JupyterLite AI
40
40
 
41
41
 
42
- The provider to use can be set in the settings editor, by selecting it from
42
+ The provider to use can be set in the <button data-commandLinker-command="settingeditor:open" data-commandLinker-args='{"query": "AI provider"}' href="#">settings editor</button>, by selecting it from
43
43
  the <img src="${AI_AVATAR}" width="16" height="16"> _AI provider_ settings.
44
44
 
45
45
  The current providers that are available are _${providers.sort().join('_, _')}_.
@@ -56,15 +56,9 @@ export class ChatHandler extends AbstractChatModel {
56
56
  constructor(options: ChatHandler.IOptions) {
57
57
  super(options);
58
58
  this._providerRegistry = options.providerRegistry;
59
- this._prompt = chatSystemPrompt({
60
- provider_name: this._providerRegistry.currentName
61
- });
62
59
 
63
60
  this._providerRegistry.providerChanged.connect(() => {
64
61
  this._errorMessage = this._providerRegistry.chatError;
65
- this._prompt = chatSystemPrompt({
66
- provider_name: this._providerRegistry.currentName
67
- });
68
62
  });
69
63
  }
70
64
 
@@ -90,13 +84,12 @@ export class ChatHandler extends AbstractChatModel {
90
84
  }
91
85
 
92
86
  /**
93
- * Getter and setter for the initial prompt.
87
+ * Get/set the system prompt for the chat.
94
88
  */
95
- get prompt(): string {
96
- return this._prompt;
97
- }
98
- set prompt(value: string) {
99
- this._prompt = value;
89
+ get systemPrompt(): string {
90
+ return (
91
+ this._providerRegistry.chatSystemPrompt ?? DEFAULT_CHAT_SYSTEM_PROMPT
92
+ );
100
93
  }
101
94
 
102
95
  async sendMessage(message: INewMessage): Promise<boolean> {
@@ -112,7 +105,7 @@ export class ChatHandler extends AbstractChatModel {
112
105
  id: message.id,
113
106
  body,
114
107
  sender: { username: 'User' },
115
- time: Date.now(),
108
+ time: Private.getTimestampMs(),
116
109
  type: 'msg'
117
110
  };
118
111
  this.messageAdded(msg);
@@ -122,7 +115,7 @@ export class ChatHandler extends AbstractChatModel {
122
115
  id: UUID.uuid4(),
123
116
  body: `**${this._errorMessage ? this._errorMessage : this._defaultErrorMessage}**`,
124
117
  sender: { username: 'ERROR' },
125
- time: Date.now(),
118
+ time: Private.getTimestampMs(),
126
119
  type: 'msg'
127
120
  };
128
121
  this.messageAdded(errorMsg);
@@ -131,7 +124,7 @@ export class ChatHandler extends AbstractChatModel {
131
124
 
132
125
  this._history.messages.push(msg);
133
126
 
134
- const messages = mergeMessageRuns([new SystemMessage(this._prompt)]);
127
+ const messages = mergeMessageRuns([new SystemMessage(this.systemPrompt)]);
135
128
  messages.push(
136
129
  ...this._history.messages.map(msg => {
137
130
  if (msg.sender.username === 'User') {
@@ -149,7 +142,7 @@ export class ChatHandler extends AbstractChatModel {
149
142
  id: UUID.uuid4(),
150
143
  body: '',
151
144
  sender,
152
- time: Date.now(),
145
+ time: Private.getTimestampMs(),
153
146
  type: 'msg'
154
147
  };
155
148
 
@@ -173,7 +166,7 @@ export class ChatHandler extends AbstractChatModel {
173
166
  id: UUID.uuid4(),
174
167
  body: `**${error}**`,
175
168
  sender: { username: 'ERROR' },
176
- time: Date.now(),
169
+ time: Private.getTimestampMs(),
177
170
  type: 'msg'
178
171
  };
179
172
  this.messageAdded(errorMsg);
@@ -206,7 +199,6 @@ export class ChatHandler extends AbstractChatModel {
206
199
 
207
200
  private _providerRegistry: IAIProviderRegistry;
208
201
  private _personaName = 'AI';
209
- private _prompt: string;
210
202
  private _errorMessage: string = '';
211
203
  private _history: IChatHistory = { messages: [] };
212
204
  private _defaultErrorMessage = 'AI provider not configured';
@@ -241,7 +233,7 @@ export namespace ChatHandler {
241
233
  description: 'Clear the chat'
242
234
  }
243
235
  ];
244
- async getChatCommands(inputModel: IInputModel) {
236
+ async listCommandCompletions(inputModel: IInputModel) {
245
237
  const match = inputModel.currentWord?.match(/^\/\w*/)?.[0];
246
238
  if (!match) {
247
239
  return [];
@@ -253,12 +245,18 @@ export namespace ChatHandler {
253
245
  return commands;
254
246
  }
255
247
 
256
- async handleChatCommand(
257
- command: ChatCommand,
258
- inputModel: IInputModel
259
- ): Promise<void> {
248
+ async onSubmit(inputModel: IInputModel): Promise<void> {
260
249
  // no handling needed because `replaceWith` is set in each command.
261
250
  return;
262
251
  }
263
252
  }
264
253
  }
254
+
255
+ namespace Private {
256
+ /**
257
+ * Return the current timestamp in milliseconds.
258
+ */
259
+ export function getTimestampMs(): number {
260
+ return Date.now() / 1000;
261
+ }
262
+ }
@@ -28,7 +28,7 @@ export class CompletionProvider implements IInlineCompletionProvider {
28
28
  * Get the current completer name.
29
29
  */
30
30
  get name(): string {
31
- return this._providerRegistry.currentName;
31
+ return this._providerRegistry.currentName('completer');
32
32
  }
33
33
 
34
34
  /**
@@ -0,0 +1,33 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+
6
+ export const DEFAULT_CHAT_SYSTEM_PROMPT = `
7
+ You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
8
+ You are not a language model, but rather an application built on a foundation model from $provider_name$.
9
+ You are talkative and you provide lots of specific details from the foundation model's context.
10
+ You may use Markdown to format your response.
11
+ If your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after).
12
+ If your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters.
13
+ All dollar quantities (of USD) must be formatted in LaTeX, with the \`$\` symbol escaped by a single backslash \`\\\`.
14
+ - Example prompt: \`If I have \\\\$100 and spend \\\\$20, how much money do I have left?\`
15
+ - **Correct** response: \`You have \\(\\$80\\) remaining.\`
16
+ - **Incorrect** response: \`You have $80 remaining.\`
17
+ If you do not know the answer to a question, answer truthfully by responding that you do not know.
18
+ The following is a friendly conversation between you and a human.
19
+ `;
20
+
21
+ export const DEFAULT_COMPLETION_SYSTEM_PROMPT = `
22
+ You are an application built to provide helpful code completion suggestions.
23
+ You should only produce code. Keep comments to minimum, use the
24
+ programming language comment syntax. Produce clean code.
25
+ The code is written in JupyterLab, a data analysis and code development
26
+ environment which can execute code extended with additional syntax for
27
+ interactive features, such as magics.
28
+ Only give raw strings back, do not format the response using backticks.
29
+ The output should be a single string, and should only contain the code that will complete the
30
+ give code passed as input, no explanation whatsoever.
31
+ Do not include the prompt in the output, only the string that should be appended to the current input.
32
+ Here is the code to complete:
33
+ `;
@@ -5,24 +5,14 @@ import {
5
5
  import { ChatAnthropic } from '@langchain/anthropic';
6
6
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
7
7
 
8
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
9
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
8
+ import { BaseCompleter } from '../../base-completer';
10
9
 
11
- export class AnthropicCompleter implements IBaseCompleter {
10
+ export class AnthropicCompleter extends BaseCompleter {
12
11
  constructor(options: BaseCompleter.IOptions) {
12
+ super(options);
13
13
  this._completer = new ChatAnthropic({ ...options.settings });
14
14
  }
15
15
 
16
- /**
17
- * Getter and setter for the initial prompt.
18
- */
19
- get prompt(): string {
20
- return this._prompt;
21
- }
22
- set prompt(value: string) {
23
- this._prompt = value;
24
- }
25
-
26
16
  async fetch(
27
17
  request: CompletionHandler.IRequest,
28
18
  context: IInlineCompletionContext
@@ -34,7 +24,7 @@ export class AnthropicCompleter implements IBaseCompleter {
34
24
  const trimmedPrompt = prompt.trim();
35
25
 
36
26
  const messages = [
37
- new SystemMessage(this._prompt),
27
+ new SystemMessage(this.systemPrompt),
38
28
  new AIMessage(trimmedPrompt)
39
29
  ];
40
30
 
@@ -65,6 +55,5 @@ export class AnthropicCompleter implements IBaseCompleter {
65
55
  }
66
56
  }
67
57
 
68
- private _completer: ChatAnthropic;
69
- private _prompt: string = COMPLETION_SYSTEM_PROMPT;
58
+ protected _completer: ChatAnthropic;
70
59
  }
@@ -5,8 +5,7 @@ import {
5
5
  import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
6
6
  import { HumanMessage, SystemMessage } from '@langchain/core/messages';
7
7
 
8
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
9
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
8
+ import { BaseCompleter } from '../../base-completer';
10
9
 
11
10
  /**
12
11
  * Regular expression to match the '```' string at the start of a string.
@@ -29,21 +28,12 @@ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
29
28
  */
30
29
  const CODE_BLOCK_END_REGEX = /```$/;
31
30
 
32
- export class ChromeCompleter implements IBaseCompleter {
31
+ export class ChromeCompleter extends BaseCompleter {
33
32
  constructor(options: BaseCompleter.IOptions) {
33
+ super(options);
34
34
  this._completer = new ChromeAI({ ...options.settings });
35
35
  }
36
36
 
37
- /**
38
- * Getter and setter for the initial prompt.
39
- */
40
- get prompt(): string {
41
- return this._prompt;
42
- }
43
- set prompt(value: string) {
44
- this._prompt = value;
45
- }
46
-
47
37
  async fetch(
48
38
  request: CompletionHandler.IRequest,
49
39
  context: IInlineCompletionContext
@@ -54,7 +44,7 @@ export class ChromeCompleter implements IBaseCompleter {
54
44
  const trimmedPrompt = prompt.trim();
55
45
 
56
46
  const messages = [
57
- new SystemMessage(this._prompt),
47
+ new SystemMessage(this.systemPrompt),
58
48
  new HumanMessage(trimmedPrompt)
59
49
  ];
60
50
 
@@ -79,6 +69,5 @@ export class ChromeCompleter implements IBaseCompleter {
79
69
  }
80
70
  }
81
71
 
82
- private _completer: ChromeAI;
83
- private _prompt: string = COMPLETION_SYSTEM_PROMPT;
72
+ protected _completer: ChromeAI;
84
73
  }
@@ -0,0 +1,61 @@
1
+ import {
2
+ CompletionHandler,
3
+ IInlineCompletionContext
4
+ } from '@jupyterlab/completer';
5
+ import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
6
+ import { AIMessage, SystemMessage } from '@langchain/core/messages';
7
+
8
+ import { BaseCompleter } from '../../base-completer';
9
+
10
+ export class GeminiCompleter extends BaseCompleter {
11
+ constructor(options: BaseCompleter.IOptions) {
12
+ super(options);
13
+ this._completer = new ChatGoogleGenerativeAI({
14
+ model: 'gemini-pro',
15
+ ...options.settings
16
+ });
17
+ }
18
+
19
+ async fetch(
20
+ request: CompletionHandler.IRequest,
21
+ context: IInlineCompletionContext
22
+ ) {
23
+ const { text, offset: cursorOffset } = request;
24
+ const prompt = text.slice(0, cursorOffset);
25
+
26
+ const trimmedPrompt = prompt.trim();
27
+
28
+ const messages = [
29
+ new SystemMessage(this.systemPrompt),
30
+ new AIMessage(trimmedPrompt)
31
+ ];
32
+
33
+ try {
34
+ const response = await this._completer.invoke(messages);
35
+ const items = [];
36
+
37
+ // Gemini can return string or complex content, a list of string/images/other.
38
+ if (typeof response.content === 'string') {
39
+ items.push({
40
+ insertText: response.content
41
+ });
42
+ } else {
43
+ response.content.forEach(content => {
44
+ if (content.type !== 'text') {
45
+ return;
46
+ }
47
+ items.push({
48
+ insertText: content.text,
49
+ filterText: prompt.substring(trimmedPrompt.length)
50
+ });
51
+ });
52
+ }
53
+ return { items };
54
+ } catch (error) {
55
+ console.error('Error fetching completions', error);
56
+ return { items: [] };
57
+ }
58
+ }
59
+
60
+ protected _completer: ChatGoogleGenerativeAI;
61
+ }
@@ -0,0 +1,9 @@
1
+ export default `
2
+ <i class="fas fa-exclamation-triangle"></i> This extension is still very much experimental. It is not an official Google extension.
3
+
4
+ 1. Go to <https://aistudio.google.com> and create an API key.
5
+
6
+ 2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`Gemini\`
7
+ provider and add your API key (required).
8
+ 3. Open the chat, or use the inline completer.
9
+ `;
@@ -2,81 +2,68 @@ import {
2
2
  CompletionHandler,
3
3
  IInlineCompletionContext
4
4
  } from '@jupyterlab/completer';
5
- import {
6
- BaseMessage,
7
- HumanMessage,
8
- SystemMessage
9
- } from '@langchain/core/messages';
10
- import { ChatMistralAI } from '@langchain/mistralai';
11
- import { Throttler } from '@lumino/polling';
5
+ import { MistralAI } from '@langchain/mistralai';
12
6
 
13
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
14
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
7
+ import { BaseCompleter } from '../../base-completer';
15
8
 
16
- /**
17
- * The Mistral API has a rate limit of 1 request per second
18
- */
19
- const INTERVAL = 1000;
9
+ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
10
+ const CODE_BLOCK_END_REGEX = /```$/;
20
11
 
21
- export class CodestralCompleter implements IBaseCompleter {
12
+ export class CodestralCompleter extends BaseCompleter {
22
13
  constructor(options: BaseCompleter.IOptions) {
23
- this._completer = new ChatMistralAI({ ...options.settings });
24
- this._throttler = new Throttler(
25
- async (messages: BaseMessage[]) => {
26
- const response = await this._completer.invoke(messages);
27
- // Extract results of completion request.
28
- const items = [];
29
- if (typeof response.content === 'string') {
30
- items.push({
31
- insertText: response.content
32
- });
33
- } else {
34
- response.content.forEach(content => {
35
- if (content.type !== 'text') {
36
- return;
37
- }
38
- items.push({
39
- insertText: content.text
40
- });
41
- });
42
- }
43
- return { items };
44
- },
45
- { limit: INTERVAL }
46
- );
47
- }
48
-
49
- /**
50
- * Getter and setter for the initial prompt.
51
- */
52
- get prompt(): string {
53
- return this._prompt;
54
- }
55
- set prompt(value: string) {
56
- this._prompt = value;
14
+ super(options);
15
+ this._completer = new MistralAI({ ...options.settings });
57
16
  }
58
17
 
59
18
  async fetch(
60
19
  request: CompletionHandler.IRequest,
61
20
  context: IInlineCompletionContext
62
21
  ) {
63
- const { text, offset: cursorOffset } = request;
64
- const prompt = text.slice(0, cursorOffset);
22
+ try {
23
+ const { text, offset: cursorOffset } = request;
24
+ const prompt = this.systemPrompt.concat(text.slice(0, cursorOffset));
25
+ const suffix = text.slice(cursorOffset);
26
+ this._controller.abort();
27
+ this._controller = new AbortController();
65
28
 
66
- const messages: BaseMessage[] = [
67
- new SystemMessage(this._prompt),
68
- new HumanMessage(prompt)
69
- ];
29
+ const response = await this._completer.completionWithRetry(
30
+ {
31
+ prompt,
32
+ model: this._completer.model,
33
+ suffix
34
+ },
35
+ { signal: this._controller.signal },
36
+ false
37
+ );
38
+ const items = response.choices.map(choice => {
39
+ const messageContent = choice.message.content;
40
+ let content = '';
70
41
 
71
- try {
72
- return await this._throttler.invoke(messages);
42
+ if (typeof messageContent === 'string') {
43
+ content = messageContent
44
+ .replace(CODE_BLOCK_START_REGEX, '')
45
+ .replace(CODE_BLOCK_END_REGEX, '');
46
+ } else if (Array.isArray(messageContent)) {
47
+ // Handle ContentChunk[] case - extract text content
48
+ content = messageContent
49
+ .filter(chunk => chunk.type === 'text')
50
+ .map(chunk => chunk.text || '')
51
+ .join('')
52
+ .replace(CODE_BLOCK_START_REGEX, '')
53
+ .replace(CODE_BLOCK_END_REGEX, '');
54
+ }
55
+
56
+ return {
57
+ insertText: content
58
+ };
59
+ });
60
+ return { items };
73
61
  } catch (error) {
74
- console.error('Error fetching completions', error);
62
+ // the request may be aborted
75
63
  return { items: [] };
76
64
  }
77
65
  }
78
66
 
79
- private _throttler: Throttler;
80
- private _completer: ChatMistralAI;
81
- private _prompt: string = COMPLETION_SYSTEM_PROMPT;
67
+ private _controller = new AbortController();
68
+ protected _completer: MistralAI;
82
69
  }
@@ -10,6 +10,8 @@ export default `
10
10
 
11
11
  <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png" alt="Screenshot showing how to add the API key to the settings" width="500px">
12
12
 
13
+ **Note:** When using MistralAI for completions, only a subset of models are available. Please check [this resource](https://docs.mistral.ai/api/#tag/fim) to see the list of supported models for completions.
14
+
13
15
  3. Open the chat, or use the inline completer
14
16
 
15
17
  <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png" alt="Screenshot showing how to use the chat" width="500px">
@@ -5,24 +5,14 @@ import {
5
5
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
6
6
  import { ChatOllama } from '@langchain/ollama';
7
7
 
8
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
9
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
8
+ import { BaseCompleter } from '../../base-completer';
10
9
 
11
- export class OllamaCompleter implements IBaseCompleter {
10
+ export class OllamaCompleter extends BaseCompleter {
12
11
  constructor(options: BaseCompleter.IOptions) {
12
+ super(options);
13
13
  this._completer = new ChatOllama({ ...options.settings });
14
14
  }
15
15
 
16
- /**
17
- * Getter and setter for the initial prompt.
18
- */
19
- get prompt(): string {
20
- return this._prompt;
21
- }
22
- set prompt(value: string) {
23
- this._prompt = value;
24
- }
25
-
26
16
  async fetch(
27
17
  request: CompletionHandler.IRequest,
28
18
  context: IInlineCompletionContext
@@ -30,7 +20,10 @@ export class OllamaCompleter implements IBaseCompleter {
30
20
  const { text, offset: cursorOffset } = request;
31
21
  const prompt = text.slice(0, cursorOffset);
32
22
 
33
- const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)];
23
+ const messages = [
24
+ new SystemMessage(this.systemPrompt),
25
+ new AIMessage(prompt)
26
+ ];
34
27
 
35
28
  try {
36
29
  const response = await this._completer.invoke(messages);
@@ -57,6 +50,5 @@ export class OllamaCompleter implements IBaseCompleter {
57
50
  }
58
51
  }
59
52
 
60
- private _completer: ChatOllama;
61
- private _prompt: string = COMPLETION_SYSTEM_PROMPT;
53
+ protected _completer: ChatOllama;
62
54
  }
@@ -5,24 +5,14 @@ import {
5
5
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
6
6
  import { ChatOpenAI } from '@langchain/openai';
7
7
 
8
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
9
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
8
+ import { BaseCompleter } from '../../base-completer';
10
9
 
11
- export class OpenAICompleter implements IBaseCompleter {
10
+ export class OpenAICompleter extends BaseCompleter {
12
11
  constructor(options: BaseCompleter.IOptions) {
12
+ super(options);
13
13
  this._completer = new ChatOpenAI({ ...options.settings });
14
14
  }
15
15
 
16
- /**
17
- * Getter and setter for the initial prompt.
18
- */
19
- get prompt(): string {
20
- return this._prompt;
21
- }
22
- set prompt(value: string) {
23
- this._prompt = value;
24
- }
25
-
26
16
  async fetch(
27
17
  request: CompletionHandler.IRequest,
28
18
  context: IInlineCompletionContext
@@ -30,7 +20,10 @@ export class OpenAICompleter implements IBaseCompleter {
30
20
  const { text, offset: cursorOffset } = request;
31
21
  const prompt = text.slice(0, cursorOffset);
32
22
 
33
- const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)];
23
+ const messages = [
24
+ new SystemMessage(this.systemPrompt),
25
+ new AIMessage(prompt)
26
+ ];
34
27
 
35
28
  try {
36
29
  const response = await this._completer.invoke(messages);
@@ -57,6 +50,5 @@ export class OpenAICompleter implements IBaseCompleter {
57
50
  }
58
51
  }
59
52
 
60
- private _completer: ChatOpenAI;
61
- private _prompt: string = COMPLETION_SYSTEM_PROMPT;
53
+ protected _completer: ChatOpenAI;
62
54
  }