@jupyterlite/ai 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +48 -9
  2. package/lib/chat-handler.d.ts +15 -3
  3. package/lib/chat-handler.js +80 -28
  4. package/lib/completion-provider.d.ts +5 -18
  5. package/lib/completion-provider.js +8 -34
  6. package/lib/icons.d.ts +2 -0
  7. package/lib/icons.js +15 -0
  8. package/lib/index.d.ts +3 -2
  9. package/lib/index.js +79 -22
  10. package/lib/llm-models/anthropic-completer.d.ts +19 -0
  11. package/lib/llm-models/anthropic-completer.js +57 -0
  12. package/lib/llm-models/base-completer.d.ts +6 -2
  13. package/lib/llm-models/chrome-completer.d.ts +19 -0
  14. package/lib/llm-models/chrome-completer.js +67 -0
  15. package/lib/llm-models/codestral-completer.d.ts +9 -8
  16. package/lib/llm-models/codestral-completer.js +37 -54
  17. package/lib/llm-models/index.d.ts +3 -2
  18. package/lib/llm-models/index.js +42 -2
  19. package/lib/llm-models/openai-completer.d.ts +19 -0
  20. package/lib/llm-models/openai-completer.js +51 -0
  21. package/lib/provider.d.ts +54 -15
  22. package/lib/provider.js +123 -41
  23. package/lib/settings/instructions.d.ts +2 -0
  24. package/lib/settings/instructions.js +44 -0
  25. package/lib/settings/panel.d.ts +70 -0
  26. package/lib/settings/panel.js +190 -0
  27. package/lib/settings/schemas/_generated/Anthropic.json +70 -0
  28. package/lib/settings/schemas/_generated/ChromeAI.json +21 -0
  29. package/lib/settings/schemas/_generated/MistralAI.json +75 -0
  30. package/lib/settings/schemas/_generated/OpenAI.json +668 -0
  31. package/lib/settings/schemas/base.json +7 -0
  32. package/lib/settings/schemas/index.d.ts +3 -0
  33. package/lib/settings/schemas/index.js +11 -0
  34. package/lib/slash-commands.d.ts +16 -0
  35. package/lib/slash-commands.js +25 -0
  36. package/lib/tokens.d.ts +103 -0
  37. package/lib/tokens.js +5 -0
  38. package/package.json +27 -104
  39. package/schema/chat.json +8 -0
  40. package/schema/provider-registry.json +17 -0
  41. package/src/chat-handler.ts +103 -43
  42. package/src/completion-provider.ts +13 -37
  43. package/src/icons.ts +18 -0
  44. package/src/index.ts +101 -24
  45. package/src/llm-models/anthropic-completer.ts +75 -0
  46. package/src/llm-models/base-completer.ts +7 -2
  47. package/src/llm-models/chrome-completer.ts +88 -0
  48. package/src/llm-models/codestral-completer.ts +43 -69
  49. package/src/llm-models/index.ts +49 -2
  50. package/src/llm-models/openai-completer.ts +67 -0
  51. package/src/llm-models/svg.d.ts +9 -0
  52. package/src/provider.ts +138 -43
  53. package/src/settings/instructions.ts +48 -0
  54. package/src/settings/panel.tsx +257 -0
  55. package/src/settings/schemas/index.ts +15 -0
  56. package/src/slash-commands.tsx +55 -0
  57. package/src/tokens.ts +112 -0
  58. package/style/base.css +4 -0
  59. package/style/icons/jupyternaut-lite.svg +7 -0
  60. package/lib/llm-models/utils.d.ts +0 -15
  61. package/lib/llm-models/utils.js +0 -29
  62. package/lib/token.d.ts +0 -13
  63. package/lib/token.js +0 -2
  64. package/schema/ai-provider.json +0 -21
  65. package/src/llm-models/utils.ts +0 -41
  66. package/src/token.ts +0 -19
@@ -3,10 +3,9 @@ import {
3
3
  IInlineCompletionContext,
4
4
  IInlineCompletionProvider
5
5
  } from '@jupyterlab/completer';
6
- import { LLM } from '@langchain/core/language_models/llms';
7
6
 
8
- import { getCompleter, IBaseCompleter, BaseCompleter } from './llm-models';
9
- import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
7
+ import { IBaseCompleter } from './llm-models';
8
+ import { IAIProviderRegistry } from './tokens';
10
9
 
11
10
  /**
12
11
  * The generic completion provider to register to the completion provider manager.
@@ -15,67 +14,44 @@ export class CompletionProvider implements IInlineCompletionProvider {
15
14
  readonly identifier = '@jupyterlite/ai';
16
15
 
17
16
  constructor(options: CompletionProvider.IOptions) {
18
- const { name, settings } = options;
17
+ this._providerRegistry = options.providerRegistry;
19
18
  this._requestCompletion = options.requestCompletion;
20
- this.setCompleter(name, settings);
21
- }
22
19
 
23
- /**
24
- * Set the completer.
25
- *
26
- * @param name - the name of the completer.
27
- * @param settings - The settings associated to the completer.
28
- */
29
- setCompleter(name: string, settings: ReadonlyPartialJSONObject) {
30
- try {
31
- this._completer = getCompleter(name, settings);
32
- if (this._completer) {
33
- this._completer.requestCompletion = this._requestCompletion;
20
+ this._providerRegistry.providerChanged.connect(() => {
21
+ if (this.completer) {
22
+ this.completer.requestCompletion = this._requestCompletion;
34
23
  }
35
- this._name = this._completer === null ? 'None' : name;
36
- } catch (e: any) {
37
- this._completer = null;
38
- this._name = 'None';
39
- throw e;
40
- }
24
+ });
41
25
  }
42
26
 
43
27
  /**
44
28
  * Get the current completer name.
45
29
  */
46
30
  get name(): string {
47
- return this._name;
31
+ return this._providerRegistry.currentName;
48
32
  }
49
33
 
50
34
  /**
51
35
  * Get the current completer.
52
36
  */
53
37
  get completer(): IBaseCompleter | null {
54
- return this._completer;
55
- }
56
-
57
- /**
58
- * Get the LLM completer.
59
- */
60
- get llmCompleter(): LLM | null {
61
- return this._completer?.provider || null;
38
+ return this._providerRegistry.currentCompleter;
62
39
  }
63
40
 
64
41
  async fetch(
65
42
  request: CompletionHandler.IRequest,
66
43
  context: IInlineCompletionContext
67
44
  ) {
68
- return this._completer?.fetch(request, context);
45
+ return this.completer?.fetch(request, context);
69
46
  }
70
47
 
71
- private _name: string = 'None';
48
+ private _providerRegistry: IAIProviderRegistry;
72
49
  private _requestCompletion: () => void;
73
- private _completer: IBaseCompleter | null = null;
74
50
  }
75
51
 
76
52
  export namespace CompletionProvider {
77
- export interface IOptions extends BaseCompleter.IOptions {
78
- name: string;
53
+ export interface IOptions {
54
+ providerRegistry: IAIProviderRegistry;
79
55
  requestCompletion: () => void;
80
56
  }
81
57
  }
package/src/icons.ts ADDED
@@ -0,0 +1,18 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+
6
+ import { LabIcon } from '@jupyterlab/ui-components';
7
+
8
+ /**
9
+ * This icon is based on the jupyternaut icon from Jupyter AI:
10
+ * https://github.com/jupyterlab/jupyter-ai/blob/main/packages/jupyter-ai/style/icons/jupyternaut.svg
11
+ * With a small tweak for the colors to match the JupyterLite icon.
12
+ */
13
+ import jupyternautLiteSvg from '../style/icons/jupyternaut-lite.svg';
14
+
15
+ export const jupyternautLiteIcon = new LabIcon({
16
+ name: '@jupyterlite/ai:jupyternaut-lite',
17
+ svgstr: jupyternautLiteSvg
18
+ });
package/src/index.ts CHANGED
@@ -1,8 +1,11 @@
1
1
  import {
2
2
  ActiveCellManager,
3
+ AutocompletionRegistry,
3
4
  buildChatSidebar,
4
5
  buildErrorWidget,
5
- IActiveCellManager
6
+ IActiveCellManager,
7
+ IAutocompletionCommandsProps,
8
+ IAutocompletionRegistry
6
9
  } from '@jupyter/chat';
7
10
  import {
8
11
  JupyterFrontEnd,
@@ -13,21 +16,55 @@ import { ICompletionProviderManager } from '@jupyterlab/completer';
13
16
  import { INotebookTracker } from '@jupyterlab/notebook';
14
17
  import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
15
18
  import { ISettingRegistry } from '@jupyterlab/settingregistry';
19
+ import { IFormRendererRegistry } from '@jupyterlab/ui-components';
20
+ import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
16
21
 
17
22
  import { ChatHandler } from './chat-handler';
18
- import { AIProvider } from './provider';
19
- import { IAIProvider } from './token';
23
+ import { CompletionProvider } from './completion-provider';
24
+ import { AIProviders } from './llm-models';
25
+ import { AIProviderRegistry } from './provider';
26
+ import { aiSettingsRenderer } from './settings/panel';
27
+ import { renderSlashCommandOption } from './slash-commands';
28
+ import { IAIProviderRegistry } from './tokens';
29
+
30
+ const autocompletionRegistryPlugin: JupyterFrontEndPlugin<IAutocompletionRegistry> =
31
+ {
32
+ id: '@jupyterlite/ai:autocompletion-registry',
33
+ description: 'Autocompletion registry',
34
+ autoStart: true,
35
+ provides: IAutocompletionRegistry,
36
+ activate: () => {
37
+ const autocompletionRegistry = new AutocompletionRegistry();
38
+ const options = ['/clear'];
39
+ const autocompletionCommands: IAutocompletionCommandsProps = {
40
+ opener: '/',
41
+ commands: options.map(option => {
42
+ return {
43
+ id: option.slice(1),
44
+ label: option,
45
+ description: 'Clear the chat window'
46
+ };
47
+ }),
48
+ props: {
49
+ renderOption: renderSlashCommandOption
50
+ }
51
+ };
52
+ autocompletionRegistry.add('jupyterlite-ai', autocompletionCommands);
53
+ return autocompletionRegistry;
54
+ }
55
+ };
20
56
 
21
57
  const chatPlugin: JupyterFrontEndPlugin<void> = {
22
58
  id: '@jupyterlite/ai:chat',
23
59
  description: 'LLM chat extension',
24
60
  autoStart: true,
61
+ requires: [IAIProviderRegistry, IRenderMimeRegistry, IAutocompletionRegistry],
25
62
  optional: [INotebookTracker, ISettingRegistry, IThemeManager],
26
- requires: [IAIProvider, IRenderMimeRegistry],
27
63
  activate: async (
28
64
  app: JupyterFrontEnd,
29
- aiProvider: IAIProvider,
65
+ providerRegistry: IAIProviderRegistry,
30
66
  rmRegistry: IRenderMimeRegistry,
67
+ autocompletionRegistry: IAutocompletionRegistry,
31
68
  notebookTracker: INotebookTracker | null,
32
69
  settingsRegistry: ISettingRegistry | null,
33
70
  themeManager: IThemeManager | null
@@ -41,18 +78,23 @@ const chatPlugin: JupyterFrontEndPlugin<void> = {
41
78
  }
42
79
 
43
80
  const chatHandler = new ChatHandler({
44
- aiProvider: aiProvider,
45
- activeCellManager: activeCellManager
81
+ providerRegistry,
82
+ activeCellManager
46
83
  });
47
84
 
48
85
  let sendWithShiftEnter = false;
49
86
  let enableCodeToolbar = true;
87
+ let personaName = 'AI';
50
88
 
51
89
  function loadSetting(setting: ISettingRegistry.ISettings): void {
52
90
  sendWithShiftEnter = setting.get('sendWithShiftEnter')
53
91
  .composite as boolean;
54
92
  enableCodeToolbar = setting.get('enableCodeToolbar').composite as boolean;
93
+ personaName = setting.get('personaName').composite as string;
94
+
95
+ // set the properties
55
96
  chatHandler.config = { sendWithShiftEnter, enableCodeToolbar };
97
+ chatHandler.personaName = personaName;
56
98
  }
57
99
 
58
100
  Promise.all([app.restored, settingsRegistry?.load(chatPlugin.id)])
@@ -77,9 +119,10 @@ const chatPlugin: JupyterFrontEndPlugin<void> = {
77
119
  chatWidget = buildChatSidebar({
78
120
  model: chatHandler,
79
121
  themeManager,
80
- rmRegistry
122
+ rmRegistry,
123
+ autocompletionRegistry
81
124
  });
82
- chatWidget.title.caption = 'Codestral Chat';
125
+ chatWidget.title.caption = 'Jupyterlite AI Chat';
83
126
  } catch (e) {
84
127
  chatWidget = buildErrorWidget(themeManager);
85
128
  }
@@ -90,27 +133,53 @@ const chatPlugin: JupyterFrontEndPlugin<void> = {
90
133
  }
91
134
  };
92
135
 
93
- const aiProviderPlugin: JupyterFrontEndPlugin<IAIProvider> = {
94
- id: '@jupyterlite/ai:ai-provider',
136
+ const completerPlugin: JupyterFrontEndPlugin<void> = {
137
+ id: '@jupyterlite/ai:completer',
95
138
  autoStart: true,
96
- requires: [ICompletionProviderManager, ISettingRegistry],
97
- provides: IAIProvider,
139
+ requires: [IAIProviderRegistry, ICompletionProviderManager],
98
140
  activate: (
99
141
  app: JupyterFrontEnd,
100
- manager: ICompletionProviderManager,
101
- settingRegistry: ISettingRegistry
102
- ): IAIProvider => {
103
- const aiProvider = new AIProvider({
104
- completionProviderManager: manager,
142
+ providerRegistry: IAIProviderRegistry,
143
+ manager: ICompletionProviderManager
144
+ ): void => {
145
+ const completer = new CompletionProvider({
146
+ providerRegistry,
105
147
  requestCompletion: () => app.commands.execute('inline-completer:invoke')
106
148
  });
149
+ manager.registerInlineProvider(completer);
150
+ }
151
+ };
107
152
 
153
+ const providerRegistryPlugin: JupyterFrontEndPlugin<IAIProviderRegistry> = {
154
+ id: '@jupyterlite/ai:provider-registry',
155
+ autoStart: true,
156
+ requires: [IFormRendererRegistry, ISettingRegistry],
157
+ optional: [IRenderMimeRegistry],
158
+ provides: IAIProviderRegistry,
159
+ activate: (
160
+ app: JupyterFrontEnd,
161
+ editorRegistry: IFormRendererRegistry,
162
+ settingRegistry: ISettingRegistry,
163
+ rmRegistry?: IRenderMimeRegistry
164
+ ): IAIProviderRegistry => {
165
+ const providerRegistry = new AIProviderRegistry();
166
+
167
+ editorRegistry.addRenderer(
168
+ '@jupyterlite/ai:provider-registry.AIprovider',
169
+ aiSettingsRenderer({ providerRegistry, rmRegistry })
170
+ );
108
171
  settingRegistry
109
- .load(aiProviderPlugin.id)
172
+ .load(providerRegistryPlugin.id)
110
173
  .then(settings => {
111
174
  const updateProvider = () => {
112
- const provider = settings.get('provider').composite as string;
113
- aiProvider.setModels(provider, settings.composite);
175
+ // Update the settings to the AI providers.
176
+ const providerSettings = (settings.get('AIprovider').composite ?? {
177
+ provider: 'None'
178
+ }) as ReadonlyPartialJSONObject;
179
+ providerRegistry.setProvider(
180
+ providerSettings.provider as string,
181
+ providerSettings
182
+ );
114
183
  };
115
184
 
116
185
  settings.changed.connect(() => updateProvider());
@@ -118,13 +187,21 @@ const aiProviderPlugin: JupyterFrontEndPlugin<IAIProvider> = {
118
187
  })
119
188
  .catch(reason => {
120
189
  console.error(
121
- `Failed to load settings for ${aiProviderPlugin.id}`,
190
+ `Failed to load settings for ${providerRegistryPlugin.id}`,
122
191
  reason
123
192
  );
124
193
  });
125
194
 
126
- return aiProvider;
195
+ // Initialize the registry with the default providers
196
+ AIProviders.forEach(provider => providerRegistry.add(provider));
197
+
198
+ return providerRegistry;
127
199
  }
128
200
  };
129
201
 
130
- export default [chatPlugin, aiProviderPlugin];
202
+ export default [
203
+ providerRegistryPlugin,
204
+ autocompletionRegistryPlugin,
205
+ chatPlugin,
206
+ completerPlugin
207
+ ];
@@ -0,0 +1,75 @@
1
+ import {
2
+ CompletionHandler,
3
+ IInlineCompletionContext
4
+ } from '@jupyterlab/completer';
5
+ import { ChatAnthropic } from '@langchain/anthropic';
6
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
7
+ import { AIMessage, SystemMessage } from '@langchain/core/messages';
8
+
9
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
10
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
11
+
12
+ export class AnthropicCompleter implements IBaseCompleter {
13
+ constructor(options: BaseCompleter.IOptions) {
14
+ this._anthropicProvider = new ChatAnthropic({ ...options.settings });
15
+ }
16
+
17
+ get provider(): BaseChatModel {
18
+ return this._anthropicProvider;
19
+ }
20
+
21
+ /**
22
+ * Getter and setter for the initial prompt.
23
+ */
24
+ get prompt(): string {
25
+ return this._prompt;
26
+ }
27
+ set prompt(value: string) {
28
+ this._prompt = value;
29
+ }
30
+
31
+ async fetch(
32
+ request: CompletionHandler.IRequest,
33
+ context: IInlineCompletionContext
34
+ ) {
35
+ const { text, offset: cursorOffset } = request;
36
+ const prompt = text.slice(0, cursorOffset);
37
+
38
+ // Anthropic does not allow whitespace at the end of the AIMessage
39
+ const trimmedPrompt = prompt.trim();
40
+
41
+ const messages = [
42
+ new SystemMessage(this._prompt),
43
+ new AIMessage(trimmedPrompt)
44
+ ];
45
+
46
+ try {
47
+ const response = await this._anthropicProvider.invoke(messages);
48
+ const items = [];
49
+
50
+ // Anthropic can return string or complex content, a list of string/images/other.
51
+ if (typeof response.content === 'string') {
52
+ items.push({
53
+ insertText: response.content
54
+ });
55
+ } else {
56
+ response.content.forEach(content => {
57
+ if (content.type !== 'text') {
58
+ return;
59
+ }
60
+ items.push({
61
+ insertText: content.text,
62
+ filterText: prompt.substring(trimmedPrompt.length)
63
+ });
64
+ });
65
+ }
66
+ return { items };
67
+ } catch (error) {
68
+ console.error('Error fetching completions', error);
69
+ return { items: [] };
70
+ }
71
+ }
72
+
73
+ private _anthropicProvider: ChatAnthropic;
74
+ private _prompt: string = COMPLETION_SYSTEM_PROMPT;
75
+ }
@@ -2,14 +2,19 @@ import {
2
2
  CompletionHandler,
3
3
  IInlineCompletionContext
4
4
  } from '@jupyterlab/completer';
5
- import { LLM } from '@langchain/core/language_models/llms';
5
+ import { BaseLanguageModel } from '@langchain/core/language_models/base';
6
6
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
7
7
 
8
8
  export interface IBaseCompleter {
9
9
  /**
10
10
  * The LLM completer.
11
11
  */
12
- provider: LLM;
12
+ provider: BaseLanguageModel;
13
+
14
+ /**
15
+ * The completion prompt.
16
+ */
17
+ prompt: string;
13
18
 
14
19
  /**
15
20
  * The function to fetch a new completion.
@@ -0,0 +1,88 @@
1
+ import {
2
+ CompletionHandler,
3
+ IInlineCompletionContext
4
+ } from '@jupyterlab/completer';
5
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
6
+ import { LLM } from '@langchain/core/language_models/llms';
7
+ import { HumanMessage, SystemMessage } from '@langchain/core/messages';
8
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
9
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
10
+
11
+ /**
12
+ * Regular expression to match the '```' string at the start of a string.
13
+ * So the completions returned by the LLM can still be kept after removing the code block formatting.
14
+ *
15
+ * For example, if the response contains the following content after typing `import pandas`:
16
+ *
17
+ * ```python
18
+ * as pd
19
+ * ```
20
+ *
21
+ * The formatting string after removing the code block delimiters will be:
22
+ *
23
+ * as pd
24
+ */
25
+ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
26
+
27
+ /**
28
+ * Regular expression to match the '```' string at the end of a string.
29
+ */
30
+ const CODE_BLOCK_END_REGEX = /```$/;
31
+
32
+ export class ChromeCompleter implements IBaseCompleter {
33
+ constructor(options: BaseCompleter.IOptions) {
34
+ this._chromeProvider = new ChromeAI({ ...options.settings });
35
+ }
36
+
37
+ /**
38
+ * Getter and setter for the initial prompt.
39
+ */
40
+ get prompt(): string {
41
+ return this._prompt;
42
+ }
43
+ set prompt(value: string) {
44
+ this._prompt = value;
45
+ }
46
+
47
+ get provider(): LLM {
48
+ return this._chromeProvider;
49
+ }
50
+
51
+ async fetch(
52
+ request: CompletionHandler.IRequest,
53
+ context: IInlineCompletionContext
54
+ ) {
55
+ const { text, offset: cursorOffset } = request;
56
+ const prompt = text.slice(0, cursorOffset);
57
+
58
+ const trimmedPrompt = prompt.trim();
59
+
60
+ const messages = [
61
+ new SystemMessage(this._prompt),
62
+ new HumanMessage(trimmedPrompt)
63
+ ];
64
+
65
+ try {
66
+ let response = await this._chromeProvider.invoke(messages);
67
+
68
+ // ChromeAI sometimes returns a string starting with '```',
69
+ // so process the response to remove the code block delimiters
70
+ if (CODE_BLOCK_START_REGEX.test(response)) {
71
+ response = response
72
+ .replace(CODE_BLOCK_START_REGEX, '')
73
+ .replace(CODE_BLOCK_END_REGEX, '');
74
+ }
75
+
76
+ const items = [{ insertText: response }];
77
+ return {
78
+ items
79
+ };
80
+ } catch (error) {
81
+ console.error('Error fetching completion:', error);
82
+ return { items: [] };
83
+ }
84
+ }
85
+
86
+ private _chromeProvider: ChromeAI;
87
+ private _prompt: string = COMPLETION_SYSTEM_PROMPT;
88
+ }
@@ -2,72 +2,63 @@ import {
2
2
  CompletionHandler,
3
3
  IInlineCompletionContext
4
4
  } from '@jupyterlab/completer';
5
- import { LLM } from '@langchain/core/language_models/llms';
6
- import { MistralAI } from '@langchain/mistralai';
5
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
6
+ import {
7
+ BaseMessage,
8
+ HumanMessage,
9
+ SystemMessage
10
+ } from '@langchain/core/messages';
11
+ import { ChatMistralAI } from '@langchain/mistralai';
7
12
  import { Throttler } from '@lumino/polling';
8
- import { CompletionRequest } from '@mistralai/mistralai';
9
13
 
10
14
  import { BaseCompleter, IBaseCompleter } from './base-completer';
15
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
11
16
 
12
17
  /**
13
18
  * The Mistral API has a rate limit of 1 request per second
14
19
  */
15
20
  const INTERVAL = 1000;
16
21
 
17
- /**
18
- * Timeout to avoid endless requests
19
- */
20
- const REQUEST_TIMEOUT = 3000;
21
-
22
22
  export class CodestralCompleter implements IBaseCompleter {
23
23
  constructor(options: BaseCompleter.IOptions) {
24
- // this._requestCompletion = options.requestCompletion;
25
- this._mistralProvider = new MistralAI({ ...options.settings });
24
+ this._mistralProvider = new ChatMistralAI({ ...options.settings });
26
25
  this._throttler = new Throttler(
27
- async (data: CompletionRequest) => {
28
- const invokedData = data;
29
-
30
- // Request completion.
31
- const request = this._mistralProvider.completionWithRetry(
32
- data,
33
- {},
34
- false
35
- );
36
- const timeoutPromise = new Promise<null>(resolve => {
37
- return setTimeout(() => resolve(null), REQUEST_TIMEOUT);
38
- });
39
-
40
- // Fetch again if the request is too long or if the prompt has changed.
41
- const response = await Promise.race([request, timeoutPromise]);
42
- if (
43
- response === null ||
44
- invokedData.prompt !== this._currentData?.prompt
45
- ) {
46
- return {
47
- items: [],
48
- fetchAgain: true
49
- };
50
- }
51
-
26
+ async (messages: BaseMessage[]) => {
27
+ const response = await this._mistralProvider.invoke(messages);
52
28
  // Extract results of completion request.
53
- const items = response.choices.map((choice: any) => {
54
- return { insertText: choice.message.content as string };
55
- });
56
-
57
- return {
58
- items
59
- };
29
+ const items = [];
30
+ if (typeof response.content === 'string') {
31
+ items.push({
32
+ insertText: response.content
33
+ });
34
+ } else {
35
+ response.content.forEach(content => {
36
+ if (content.type !== 'text') {
37
+ return;
38
+ }
39
+ items.push({
40
+ insertText: content.text
41
+ });
42
+ });
43
+ }
44
+ return { items };
60
45
  },
61
46
  { limit: INTERVAL }
62
47
  );
63
48
  }
64
49
 
65
- get provider(): LLM {
50
+ get provider(): BaseChatModel {
66
51
  return this._mistralProvider;
67
52
  }
68
53
 
69
- set requestCompletion(value: () => void) {
70
- this._requestCompletion = value;
54
+ /**
55
+ * Getter and setter for the initial prompt.
56
+ */
57
+ get prompt(): string {
58
+ return this._prompt;
59
+ }
60
+ set prompt(value: string) {
61
+ this._prompt = value;
71
62
  }
72
63
 
73
64
  async fetch(
@@ -76,38 +67,21 @@ export class CodestralCompleter implements IBaseCompleter {
76
67
  ) {
77
68
  const { text, offset: cursorOffset } = request;
78
69
  const prompt = text.slice(0, cursorOffset);
79
- const suffix = text.slice(cursorOffset);
80
70
 
81
- const data = {
82
- prompt,
83
- suffix,
84
- model: this._mistralProvider.model,
85
- // temperature: 0,
86
- // top_p: 1,
87
- // max_tokens: 1024,
88
- // min_tokens: 0,
89
- stream: false,
90
- // random_seed: 1337,
91
- stop: []
92
- };
71
+ const messages: BaseMessage[] = [
72
+ new SystemMessage(this._prompt),
73
+ new HumanMessage(prompt)
74
+ ];
93
75
 
94
76
  try {
95
- this._currentData = data;
96
- const completionResult = await this._throttler.invoke(data);
97
- if (completionResult.fetchAgain) {
98
- if (this._requestCompletion) {
99
- this._requestCompletion();
100
- }
101
- }
102
- return { items: completionResult.items };
77
+ return await this._throttler.invoke(messages);
103
78
  } catch (error) {
104
79
  console.error('Error fetching completions', error);
105
80
  return { items: [] };
106
81
  }
107
82
  }
108
83
 
109
- private _requestCompletion?: () => void;
110
84
  private _throttler: Throttler;
111
- private _mistralProvider: MistralAI;
112
- private _currentData: CompletionRequest | null = null;
85
+ private _mistralProvider: ChatMistralAI;
86
+ private _prompt: string = COMPLETION_SYSTEM_PROMPT;
113
87
  }