@jupyterlite/ai 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/lib/chat-handler.d.ts +10 -4
  2. package/lib/chat-handler.js +42 -10
  3. package/lib/completion-provider.d.ts +5 -18
  4. package/lib/completion-provider.js +8 -34
  5. package/lib/{llm-models/anthropic-completer.d.ts → default-providers/Anthropic/completer.d.ts} +1 -1
  6. package/lib/{llm-models/anthropic-completer.js → default-providers/Anthropic/completer.js} +1 -1
  7. package/lib/{llm-models/chrome-completer.d.ts → default-providers/ChromeAI/completer.d.ts} +1 -1
  8. package/lib/{llm-models/chrome-completer.js → default-providers/ChromeAI/completer.js} +1 -1
  9. package/lib/default-providers/ChromeAI/instructions.d.ts +2 -0
  10. package/lib/default-providers/ChromeAI/instructions.js +24 -0
  11. package/lib/{llm-models/codestral-completer.d.ts → default-providers/MistralAI/completer.d.ts} +1 -1
  12. package/lib/{llm-models/codestral-completer.js → default-providers/MistralAI/completer.js} +1 -1
  13. package/lib/default-providers/MistralAI/instructions.d.ts +2 -0
  14. package/lib/default-providers/MistralAI/instructions.js +16 -0
  15. package/lib/{llm-models/openai-completer.d.ts → default-providers/OpenAI/completer.d.ts} +1 -1
  16. package/lib/{llm-models/openai-completer.js → default-providers/OpenAI/completer.js} +1 -1
  17. package/lib/default-providers/index.d.ts +2 -0
  18. package/lib/default-providers/index.js +60 -0
  19. package/lib/index.d.ts +3 -3
  20. package/lib/index.js +51 -64
  21. package/lib/provider.d.ts +45 -17
  22. package/lib/provider.js +97 -41
  23. package/lib/settings/base.json +7 -0
  24. package/lib/settings/panel.d.ts +84 -0
  25. package/lib/settings/panel.js +267 -0
  26. package/lib/tokens.d.ts +103 -0
  27. package/lib/tokens.js +5 -0
  28. package/package.json +12 -5
  29. package/schema/provider-registry.json +23 -0
  30. package/src/chat-handler.ts +50 -13
  31. package/src/completion-provider.ts +13 -37
  32. package/src/{llm-models/anthropic-completer.ts → default-providers/Anthropic/completer.ts} +2 -2
  33. package/src/{llm-models/chrome-completer.ts → default-providers/ChromeAI/completer.ts} +3 -2
  34. package/src/default-providers/ChromeAI/instructions.ts +24 -0
  35. package/src/{llm-models/codestral-completer.ts → default-providers/MistralAI/completer.ts} +2 -2
  36. package/src/default-providers/MistralAI/instructions.ts +16 -0
  37. package/src/{llm-models/openai-completer.ts → default-providers/OpenAI/completer.ts} +2 -2
  38. package/src/default-providers/index.ts +71 -0
  39. package/src/index.ts +75 -77
  40. package/src/provider.ts +100 -43
  41. package/src/settings/panel.tsx +346 -0
  42. package/src/tokens.ts +112 -0
  43. package/style/base.css +4 -0
  44. package/lib/llm-models/index.d.ts +0 -3
  45. package/lib/llm-models/index.js +0 -3
  46. package/lib/llm-models/utils.d.ts +0 -16
  47. package/lib/llm-models/utils.js +0 -86
  48. package/lib/slash-commands.d.ts +0 -16
  49. package/lib/slash-commands.js +0 -25
  50. package/lib/token.d.ts +0 -13
  51. package/lib/token.js +0 -2
  52. package/schema/ai-provider.json +0 -17
  53. package/src/llm-models/index.ts +0 -3
  54. package/src/llm-models/utils.ts +0 -90
  55. package/src/slash-commands.tsx +0 -55
  56. package/src/token.ts +0 -19
  57. /package/lib/{llm-models/base-completer.d.ts → base-completer.d.ts} +0 -0
  58. /package/lib/{llm-models/base-completer.js → base-completer.js} +0 -0
  59. /package/lib/{_provider-settings/anthropic.json → default-providers/Anthropic/settings-schema.json} +0 -0
  60. /package/lib/{_provider-settings/chromeAI.json → default-providers/ChromeAI/settings-schema.json} +0 -0
  61. /package/lib/{_provider-settings/mistralAI.json → default-providers/MistralAI/settings-schema.json} +0 -0
  62. /package/lib/{_provider-settings/openAI.json → default-providers/OpenAI/settings-schema.json} +0 -0
  63. /package/src/{llm-models/base-completer.ts → base-completer.ts} +0 -0
  64. /package/src/{llm-models/svg.d.ts → global.d.ts} +0 -0
@@ -0,0 +1,103 @@
1
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
2
+ import { ReadonlyPartialJSONObject, Token } from '@lumino/coreutils';
3
+ import { ISignal } from '@lumino/signaling';
4
+ import { JSONSchema7 } from 'json-schema';
5
+ import { IBaseCompleter } from './base-completer';
6
+ export interface IDict<T = any> {
7
+ [key: string]: T;
8
+ }
9
+ export interface IType<T> {
10
+ new (...args: any[]): T;
11
+ }
12
+ /**
13
+ * The provider interface.
14
+ */
15
+ export interface IAIProvider {
16
+ /**
17
+ * The name of the provider.
18
+ */
19
+ name: string;
20
+ /**
21
+ * The chat model class to use.
22
+ */
23
+ chatModel?: IType<BaseChatModel>;
24
+ /**
25
+ * The completer class to use.
26
+ */
27
+ completer?: IType<IBaseCompleter>;
28
+ /**
29
+ * the settings schema for the provider.
30
+ */
31
+ settingsSchema?: any;
32
+ /**
33
+ * The instructions to be displayed in the settings, as helper to use the provider.
34
+ * A markdown renderer is used to render the instructions.
35
+ */
36
+ instructions?: string;
37
+ /**
38
+ * A function that extract the error message from the provider API error.
39
+ * Default to `(error) => error.message`.
40
+ */
41
+ errorMessage?: (error: any) => string;
42
+ }
43
+ /**
44
+ * The provider registry interface.
45
+ */
46
+ export interface IAIProviderRegistry {
47
+ /**
48
+ * Get the list of provider names.
49
+ */
50
+ readonly providers: string[];
51
+ /**
52
+ * Add a new provider.
53
+ */
54
+ add(provider: IAIProvider): void;
55
+ /**
56
+ * Get the current provider name.
57
+ */
58
+ currentName: string;
59
+ /**
60
+ * Get the current completer of the completion provider.
61
+ */
62
+ currentCompleter: IBaseCompleter | null;
63
+ /**
64
+ * Get the current llm chat model.
65
+ */
66
+ currentChatModel: BaseChatModel | null;
67
+ /**
68
+ * Get the settings schema of a given provider.
69
+ */
70
+ getSettingsSchema(provider: string): JSONSchema7;
71
+ /**
72
+ * Get the instructions of a given provider.
73
+ */
74
+ getInstructions(provider: string): string | undefined;
75
+ /**
76
+ * Format an error message from the current provider.
77
+ */
78
+ formatErrorMessage(error: any): string;
79
+ /**
80
+ * Set the providers (chat model and completer).
81
+ * Creates the providers if the name has changed, otherwise only updates their config.
82
+ *
83
+ * @param name - the name of the provider to use.
84
+ * @param settings - the settings for the models.
85
+ */
86
+ setProvider(name: string, settings: ReadonlyPartialJSONObject): void;
87
+ /**
88
+ * A signal emitting when the provider or its settings has changed.
89
+ */
90
+ readonly providerChanged: ISignal<IAIProviderRegistry, void>;
91
+ /**
92
+ * Get the current chat error;
93
+ */
94
+ readonly chatError: string;
95
+ /**
96
+ * get the current completer error.
97
+ */
98
+ readonly completerError: string;
99
+ }
100
+ /**
101
+ * The provider registry token.
102
+ */
103
+ export declare const IAIProviderRegistry: Token<IAIProviderRegistry>;
package/lib/tokens.js ADDED
@@ -0,0 +1,5 @@
1
+ import { Token } from '@lumino/coreutils';
2
+ /**
3
+ * The provider registry token.
4
+ */
5
+ export const IAIProviderRegistry = new Token('@jupyterlite/ai:provider-registry', 'Provider for chat and completion LLM provider');
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@jupyterlite/ai",
3
- "version": "0.3.0",
3
+ "version": "0.5.0",
4
4
  "description": "AI code completions and chat for JupyterLite",
5
5
  "keywords": [
6
6
  "jupyter",
@@ -14,7 +14,7 @@
14
14
  "license": "BSD-3-Clause",
15
15
  "author": "JupyterLite Contributors",
16
16
  "files": [
17
- "lib/**/*.{d.ts,eot,gif,html,jpg,js,js.map,json,png,svg,woff2,ttf}",
17
+ "lib/**/*.{d.ts,eot,gif,html,jpg,js,js.map,json,png,svg,woff2,ttf,md}",
18
18
  "style/**/*.{css,js,eot,gif,html,jpg,json,png,svg,woff2,ttf}",
19
19
  "src/**/*.{ts,tsx}",
20
20
  "schema/*.json"
@@ -27,9 +27,9 @@
27
27
  "url": "https://github.com/jupyterlite/ai.git"
28
28
  },
29
29
  "scripts": {
30
- "build": "node ./scripts/settings-generator.js && jlpm build:lib && jlpm build:labextension:dev",
30
+ "build": "jlpm build:lib && jlpm build:labextension:dev",
31
31
  "build:dev": "jlpm build:lib && jlpm build:labextension:dev",
32
- "build:prod": "node ./scripts/settings-generator.js && jlpm clean && jlpm build:lib:prod && jlpm build:labextension",
32
+ "build:prod": "jlpm settings:build && jlpm clean && jlpm build:lib:prod && jlpm build:labextension",
33
33
  "build:labextension": "jupyter labextension build .",
34
34
  "build:labextension:dev": "jupyter labextension build --development True .",
35
35
  "build:lib": "tsc --sourceMap",
@@ -47,6 +47,8 @@
47
47
  "prettier": "jlpm prettier:base --write --list-different",
48
48
  "prettier:base": "prettier \"**/*{.ts,.tsx,.js,.jsx,.css,.json,.md}\"",
49
49
  "prettier:check": "jlpm prettier:base --check",
50
+ "settings:build": "node ./scripts/settings-checker.js --generate",
51
+ "settings:check": "node ./scripts/settings-checker.js",
50
52
  "stylelint": "jlpm stylelint:check --fix",
51
53
  "stylelint:check": "stylelint --cache \"style/**/*.css\"",
52
54
  "watch": "run-p watch:src watch:labextension",
@@ -54,13 +56,14 @@
54
56
  "watch:labextension": "jupyter labextension watch ."
55
57
  },
56
58
  "dependencies": {
57
- "@jupyter/chat": "^0.7.1",
59
+ "@jupyter/chat": "^0.8.1",
58
60
  "@jupyterlab/application": "^4.4.0-alpha.0",
59
61
  "@jupyterlab/apputils": "^4.5.0-alpha.0",
60
62
  "@jupyterlab/completer": "^4.4.0-alpha.0",
61
63
  "@jupyterlab/notebook": "^4.4.0-alpha.0",
62
64
  "@jupyterlab/rendermime": "^4.4.0-alpha.0",
63
65
  "@jupyterlab/settingregistry": "^4.4.0-alpha.0",
66
+ "@jupyterlab/ui-components": "^4.4.0-alpha.0",
64
67
  "@langchain/anthropic": "^0.3.9",
65
68
  "@langchain/community": "^0.3.31",
66
69
  "@langchain/core": "^0.3.40",
@@ -71,6 +74,10 @@
71
74
  "@lumino/signaling": "^2.1.2",
72
75
  "@mui/icons-material": "^5.11.0",
73
76
  "@mui/material": "^5.11.0",
77
+ "@rjsf/core": "^4.2.0",
78
+ "@rjsf/utils": "^5.18.4",
79
+ "@rjsf/validator-ajv8": "^5.18.4",
80
+ "jupyter-secrets-manager": "^0.1.1",
74
81
  "react": "^18.2.0",
75
82
  "react-dom": "^18.2.0"
76
83
  },
@@ -0,0 +1,23 @@
1
+ {
2
+ "title": "AI provider",
3
+ "description": "Provider registry settings",
4
+ "jupyter.lab.setting-icon": "@jupyterlite/ai:jupyternaut-lite",
5
+ "jupyter.lab.setting-icon-label": "JupyterLite AI Chat",
6
+ "type": "object",
7
+ "properties": {
8
+ "UseSecretsManager": {
9
+ "type": "boolean",
10
+ "title": "Use secrets manager",
11
+ "description": "Whether to use or not the secrets manager. If not, secrets will be stored in the browser (local storage)",
12
+ "default": true
13
+ },
14
+ "AIprovider": {
15
+ "type": "object",
16
+ "title": "AI provider",
17
+ "description": "The AI provider configuration",
18
+ "default": {},
19
+ "additionalProperties": true
20
+ }
21
+ },
22
+ "additionalProperties": false
23
+ }
@@ -4,9 +4,12 @@
4
4
  */
5
5
 
6
6
  import {
7
+ ChatCommand,
7
8
  ChatModel,
9
+ IChatCommandProvider,
8
10
  IChatHistory,
9
11
  IChatMessage,
12
+ IInputModel,
10
13
  INewMessage
11
14
  } from '@jupyter/chat';
12
15
  import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
@@ -17,9 +20,8 @@ import {
17
20
  SystemMessage
18
21
  } from '@langchain/core/messages';
19
22
  import { UUID } from '@lumino/coreutils';
20
- import { getErrorMessage } from './llm-models';
21
23
  import { chatSystemPrompt } from './provider';
22
- import { IAIProvider } from './token';
24
+ import { IAIProviderRegistry } from './tokens';
23
25
  import { jupyternautLiteIcon } from './icons';
24
26
 
25
27
  /**
@@ -37,17 +39,21 @@ export type ConnectionMessage = {
37
39
  export class ChatHandler extends ChatModel {
38
40
  constructor(options: ChatHandler.IOptions) {
39
41
  super(options);
40
- this._aiProvider = options.aiProvider;
41
- this._prompt = chatSystemPrompt({ provider_name: this._aiProvider.name });
42
+ this._providerRegistry = options.providerRegistry;
43
+ this._prompt = chatSystemPrompt({
44
+ provider_name: this._providerRegistry.currentName
45
+ });
42
46
 
43
- this._aiProvider.modelChange.connect(() => {
44
- this._errorMessage = this._aiProvider.chatError;
45
- this._prompt = chatSystemPrompt({ provider_name: this._aiProvider.name });
47
+ this._providerRegistry.providerChanged.connect(() => {
48
+ this._errorMessage = this._providerRegistry.chatError;
49
+ this._prompt = chatSystemPrompt({
50
+ provider_name: this._providerRegistry.currentName
51
+ });
46
52
  });
47
53
  }
48
54
 
49
55
  get provider(): BaseChatModel | null {
50
- return this._aiProvider.chatModel;
56
+ return this._providerRegistry.currentChatModel;
51
57
  }
52
58
 
53
59
  /**
@@ -95,7 +101,7 @@ export class ChatHandler extends ChatModel {
95
101
  };
96
102
  this.messageAdded(msg);
97
103
 
98
- if (this._aiProvider.chatModel === null) {
104
+ if (this._providerRegistry.currentChatModel === null) {
99
105
  const errorMsg: IChatMessage = {
100
106
  id: UUID.uuid4(),
101
107
  body: `**${this._errorMessage ? this._errorMessage : this._defaultErrorMessage}**`,
@@ -134,7 +140,7 @@ export class ChatHandler extends ChatModel {
134
140
  let content = '';
135
141
 
136
142
  try {
137
- for await (const chunk of await this._aiProvider.chatModel.stream(
143
+ for await (const chunk of await this._providerRegistry.currentChatModel.stream(
138
144
  messages
139
145
  )) {
140
146
  content += chunk.content ?? chunk;
@@ -144,7 +150,7 @@ export class ChatHandler extends ChatModel {
144
150
  this._history.messages.push(botMsg);
145
151
  return true;
146
152
  } catch (reason) {
147
- const error = getErrorMessage(this._aiProvider.name, reason);
153
+ const error = this._providerRegistry.formatErrorMessage(reason);
148
154
  const errorMsg: IChatMessage = {
149
155
  id: UUID.uuid4(),
150
156
  body: `**${error}**`,
@@ -171,7 +177,7 @@ export class ChatHandler extends ChatModel {
171
177
  super.messageAdded(message);
172
178
  }
173
179
 
174
- private _aiProvider: IAIProvider;
180
+ private _providerRegistry: IAIProviderRegistry;
175
181
  private _personaName = 'AI';
176
182
  private _prompt: string;
177
183
  private _errorMessage: string = '';
@@ -181,6 +187,37 @@ export class ChatHandler extends ChatModel {
181
187
 
182
188
  export namespace ChatHandler {
183
189
  export interface IOptions extends ChatModel.IOptions {
184
- aiProvider: IAIProvider;
190
+ providerRegistry: IAIProviderRegistry;
191
+ }
192
+
193
+ export class ClearCommandProvider implements IChatCommandProvider {
194
+ public id: string = '@jupyterlite/ai:clear-commands';
195
+ private _slash_commands: ChatCommand[] = [
196
+ {
197
+ name: '/clear',
198
+ providerId: this.id,
199
+ replaceWith: '/clear',
200
+ description: 'Clear the chat'
201
+ }
202
+ ];
203
+ async getChatCommands(inputModel: IInputModel) {
204
+ const match = inputModel.currentWord?.match(/^\/\w*/)?.[0];
205
+ if (!match) {
206
+ return [];
207
+ }
208
+
209
+ const commands = this._slash_commands.filter(cmd =>
210
+ cmd.name.startsWith(match)
211
+ );
212
+ return commands;
213
+ }
214
+
215
+ async handleChatCommand(
216
+ command: ChatCommand,
217
+ inputModel: IInputModel
218
+ ): Promise<void> {
219
+ // no handling needed because `replaceWith` is set in each command.
220
+ return;
221
+ }
185
222
  }
186
223
  }
@@ -3,10 +3,9 @@ import {
3
3
  IInlineCompletionContext,
4
4
  IInlineCompletionProvider
5
5
  } from '@jupyterlab/completer';
6
- import { BaseLanguageModel } from '@langchain/core/language_models/base';
7
- import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
8
6
 
9
- import { getCompleter, IBaseCompleter, BaseCompleter } from './llm-models';
7
+ import { IBaseCompleter } from './base-completer';
8
+ import { IAIProviderRegistry } from './tokens';
10
9
 
11
10
  /**
12
11
  * The generic completion provider to register to the completion provider manager.
@@ -15,67 +14,44 @@ export class CompletionProvider implements IInlineCompletionProvider {
15
14
  readonly identifier = '@jupyterlite/ai';
16
15
 
17
16
  constructor(options: CompletionProvider.IOptions) {
18
- const { name, settings } = options;
17
+ this._providerRegistry = options.providerRegistry;
19
18
  this._requestCompletion = options.requestCompletion;
20
- this.setCompleter(name, settings);
21
- }
22
19
 
23
- /**
24
- * Set the completer.
25
- *
26
- * @param name - the name of the completer.
27
- * @param settings - The settings associated to the completer.
28
- */
29
- setCompleter(name: string, settings: ReadonlyPartialJSONObject) {
30
- try {
31
- this._completer = getCompleter(name, settings);
32
- if (this._completer) {
33
- this._completer.requestCompletion = this._requestCompletion;
20
+ this._providerRegistry.providerChanged.connect(() => {
21
+ if (this.completer) {
22
+ this.completer.requestCompletion = this._requestCompletion;
34
23
  }
35
- this._name = this._completer === null ? 'None' : name;
36
- } catch (e: any) {
37
- this._completer = null;
38
- this._name = 'None';
39
- throw e;
40
- }
24
+ });
41
25
  }
42
26
 
43
27
  /**
44
28
  * Get the current completer name.
45
29
  */
46
30
  get name(): string {
47
- return this._name;
31
+ return this._providerRegistry.currentName;
48
32
  }
49
33
 
50
34
  /**
51
35
  * Get the current completer.
52
36
  */
53
37
  get completer(): IBaseCompleter | null {
54
- return this._completer;
55
- }
56
-
57
- /**
58
- * Get the LLM completer.
59
- */
60
- get llmCompleter(): BaseLanguageModel | null {
61
- return this._completer?.provider || null;
38
+ return this._providerRegistry.currentCompleter;
62
39
  }
63
40
 
64
41
  async fetch(
65
42
  request: CompletionHandler.IRequest,
66
43
  context: IInlineCompletionContext
67
44
  ) {
68
- return this._completer?.fetch(request, context);
45
+ return this.completer?.fetch(request, context);
69
46
  }
70
47
 
71
- private _name: string = 'None';
48
+ private _providerRegistry: IAIProviderRegistry;
72
49
  private _requestCompletion: () => void;
73
- private _completer: IBaseCompleter | null = null;
74
50
  }
75
51
 
76
52
  export namespace CompletionProvider {
77
- export interface IOptions extends BaseCompleter.IOptions {
78
- name: string;
53
+ export interface IOptions {
54
+ providerRegistry: IAIProviderRegistry;
79
55
  requestCompletion: () => void;
80
56
  }
81
57
  }
@@ -6,8 +6,8 @@ import { ChatAnthropic } from '@langchain/anthropic';
6
6
  import { BaseChatModel } from '@langchain/core/language_models/chat_models';
7
7
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
8
8
 
9
- import { BaseCompleter, IBaseCompleter } from './base-completer';
10
- import { COMPLETION_SYSTEM_PROMPT } from '../provider';
9
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
10
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
11
11
 
12
12
  export class AnthropicCompleter implements IBaseCompleter {
13
13
  constructor(options: BaseCompleter.IOptions) {
@@ -5,8 +5,9 @@ import {
5
5
  import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
6
6
  import { LLM } from '@langchain/core/language_models/llms';
7
7
  import { HumanMessage, SystemMessage } from '@langchain/core/messages';
8
- import { BaseCompleter, IBaseCompleter } from './base-completer';
9
- import { COMPLETION_SYSTEM_PROMPT } from '../provider';
8
+
9
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
10
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
10
11
 
11
12
  /**
12
13
  * Regular expression to match the '```' string at the start of a string.
@@ -0,0 +1,24 @@
1
+ export default `
2
+ <i class="fas fa-exclamation-triangle"></i> Support for ChromeAI is still experimental and only available in Google Chrome.
3
+
4
+ You can test ChromeAI is enabled in your browser by going to the following URL: <https://chromeai.org/>
5
+
6
+ Enable the proper flags in Google Chrome.
7
+
8
+ - chrome://flags/#prompt-api-for-gemini-nano
9
+ - Select: \`Enabled\`
10
+ - chrome://flags/#optimization-guide-on-device-model
11
+ - Select: \`Enabled BypassPrefRequirement\`
12
+ - chrome://components
13
+ - Click \`Check for Update\` on Optimization Guide On Device Model to download the model
14
+ - [Optional] chrome://flags/#text-safety-classifier
15
+
16
+ <img src="https://github.com/user-attachments/assets/d48f46cc-52ee-4ce5-9eaf-c763cdbee04c" alt="A screenshot showing how to enable the ChromeAI flag in Google Chrome" width="500px">
17
+
18
+ Then restart Chrome for these changes to take effect.
19
+
20
+ <i class="fas fa-exclamation-triangle"></i> On first use, Chrome will download the on-device model, which can be as large as 22GB (according to their docs and at the time of writing).
21
+ During the download, ChromeAI may not be available via the extension.
22
+
23
+ <i class="fa fa-info-circle" aria-hidden="true"></i> For more information about Chrome Built-in AI: <https://developer.chrome.com/docs/ai/get-started>
24
+ `;
@@ -11,8 +11,8 @@ import {
11
11
  import { ChatMistralAI } from '@langchain/mistralai';
12
12
  import { Throttler } from '@lumino/polling';
13
13
 
14
- import { BaseCompleter, IBaseCompleter } from './base-completer';
15
- import { COMPLETION_SYSTEM_PROMPT } from '../provider';
14
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
15
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
16
16
 
17
17
  /**
18
18
  * The Mistral API has a rate limit of 1 request per second
@@ -0,0 +1,16 @@
1
+ export default `
2
+ <i class="fas fa-exclamation-triangle"></i> This extension is still very much experimental. It is not an official MistralAI extension.
3
+
4
+ 1. Go to <https://console.mistral.ai/api-keys/> and create an API key.
5
+
6
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/1-api-key.png" alt="Screenshot showing how to create an API key" width="500px">
7
+
8
+ 2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`MistralAI\`
9
+ provider and the API key (required).
10
+
11
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png" alt="Screenshot showing how to add the API key to the settings" width="500px">
12
+
13
+ 3. Open the chat, or use the inline completer
14
+
15
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png" alt="Screenshot showing how to use the chat" width="500px">
16
+ `;
@@ -6,8 +6,8 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
6
6
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
7
7
  import { ChatOpenAI } from '@langchain/openai';
8
8
 
9
- import { BaseCompleter, IBaseCompleter } from './base-completer';
10
- import { COMPLETION_SYSTEM_PROMPT } from '../provider';
9
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
10
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
11
11
 
12
12
  export class OpenAICompleter implements IBaseCompleter {
13
13
  constructor(options: BaseCompleter.IOptions) {
@@ -0,0 +1,71 @@
1
+ import {
2
+ JupyterFrontEnd,
3
+ JupyterFrontEndPlugin
4
+ } from '@jupyterlab/application';
5
+ import { ChatAnthropic } from '@langchain/anthropic';
6
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
7
+ import { ChatMistralAI } from '@langchain/mistralai';
8
+ import { ChatOpenAI } from '@langchain/openai';
9
+
10
+ import { IAIProvider, IAIProviderRegistry } from '../tokens';
11
+
12
+ // Import completers
13
+ import { AnthropicCompleter } from './Anthropic/completer';
14
+ import { ChromeCompleter } from './ChromeAI/completer';
15
+ import { CodestralCompleter } from './MistralAI/completer';
16
+ import { OpenAICompleter } from './OpenAI/completer';
17
+
18
+ // Import Settings
19
+ import AnthropicSettings from './Anthropic/settings-schema.json';
20
+ import ChromeAISettings from './ChromeAI/settings-schema.json';
21
+ import MistralAISettings from './MistralAI/settings-schema.json';
22
+ import OpenAISettings from './OpenAI/settings-schema.json';
23
+
24
+ // Import instructions
25
+ import ChromeAIInstructions from './ChromeAI/instructions';
26
+ import MistralAIInstructions from './MistralAI/instructions';
27
+
28
+ // Build the AIProvider list
29
+ const AIProviders: IAIProvider[] = [
30
+ {
31
+ name: 'Anthropic',
32
+ chatModel: ChatAnthropic,
33
+ completer: AnthropicCompleter,
34
+ settingsSchema: AnthropicSettings,
35
+ errorMessage: (error: any) => error.error.error.message
36
+ },
37
+ {
38
+ name: 'ChromeAI',
39
+ // TODO: fix
40
+ // @ts-expect-error: missing properties
41
+ chatModel: ChromeAI,
42
+ completer: ChromeCompleter,
43
+ instructions: ChromeAIInstructions,
44
+ settingsSchema: ChromeAISettings
45
+ },
46
+ {
47
+ name: 'MistralAI',
48
+ chatModel: ChatMistralAI,
49
+ completer: CodestralCompleter,
50
+ instructions: MistralAIInstructions,
51
+ settingsSchema: MistralAISettings
52
+ },
53
+ {
54
+ name: 'OpenAI',
55
+ chatModel: ChatOpenAI,
56
+ completer: OpenAICompleter,
57
+ settingsSchema: OpenAISettings
58
+ }
59
+ ];
60
+
61
+ export const defaultProviderPlugins: JupyterFrontEndPlugin<void>[] =
62
+ AIProviders.map(provider => {
63
+ return {
64
+ id: `@jupyterlite/ai:${provider.name}`,
65
+ autoStart: true,
66
+ requires: [IAIProviderRegistry],
67
+ activate: (app: JupyterFrontEnd, registry: IAIProviderRegistry) => {
68
+ registry.add(provider);
69
+ }
70
+ };
71
+ });