@jupyterlite/ai 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +48 -9
  2. package/lib/chat-handler.d.ts +15 -3
  3. package/lib/chat-handler.js +80 -28
  4. package/lib/completion-provider.d.ts +5 -18
  5. package/lib/completion-provider.js +8 -34
  6. package/lib/icons.d.ts +2 -0
  7. package/lib/icons.js +15 -0
  8. package/lib/index.d.ts +3 -2
  9. package/lib/index.js +79 -22
  10. package/lib/llm-models/anthropic-completer.d.ts +19 -0
  11. package/lib/llm-models/anthropic-completer.js +57 -0
  12. package/lib/llm-models/base-completer.d.ts +6 -2
  13. package/lib/llm-models/chrome-completer.d.ts +19 -0
  14. package/lib/llm-models/chrome-completer.js +67 -0
  15. package/lib/llm-models/codestral-completer.d.ts +9 -8
  16. package/lib/llm-models/codestral-completer.js +37 -54
  17. package/lib/llm-models/index.d.ts +3 -2
  18. package/lib/llm-models/index.js +42 -2
  19. package/lib/llm-models/openai-completer.d.ts +19 -0
  20. package/lib/llm-models/openai-completer.js +51 -0
  21. package/lib/provider.d.ts +54 -15
  22. package/lib/provider.js +123 -41
  23. package/lib/settings/instructions.d.ts +2 -0
  24. package/lib/settings/instructions.js +44 -0
  25. package/lib/settings/panel.d.ts +70 -0
  26. package/lib/settings/panel.js +190 -0
  27. package/lib/settings/schemas/_generated/Anthropic.json +70 -0
  28. package/lib/settings/schemas/_generated/ChromeAI.json +21 -0
  29. package/lib/settings/schemas/_generated/MistralAI.json +75 -0
  30. package/lib/settings/schemas/_generated/OpenAI.json +668 -0
  31. package/lib/settings/schemas/base.json +7 -0
  32. package/lib/settings/schemas/index.d.ts +3 -0
  33. package/lib/settings/schemas/index.js +11 -0
  34. package/lib/slash-commands.d.ts +16 -0
  35. package/lib/slash-commands.js +25 -0
  36. package/lib/tokens.d.ts +103 -0
  37. package/lib/tokens.js +5 -0
  38. package/package.json +27 -104
  39. package/schema/chat.json +8 -0
  40. package/schema/provider-registry.json +17 -0
  41. package/src/chat-handler.ts +103 -43
  42. package/src/completion-provider.ts +13 -37
  43. package/src/icons.ts +18 -0
  44. package/src/index.ts +101 -24
  45. package/src/llm-models/anthropic-completer.ts +75 -0
  46. package/src/llm-models/base-completer.ts +7 -2
  47. package/src/llm-models/chrome-completer.ts +88 -0
  48. package/src/llm-models/codestral-completer.ts +43 -69
  49. package/src/llm-models/index.ts +49 -2
  50. package/src/llm-models/openai-completer.ts +67 -0
  51. package/src/llm-models/svg.d.ts +9 -0
  52. package/src/provider.ts +138 -43
  53. package/src/settings/instructions.ts +48 -0
  54. package/src/settings/panel.tsx +257 -0
  55. package/src/settings/schemas/index.ts +15 -0
  56. package/src/slash-commands.tsx +55 -0
  57. package/src/tokens.ts +112 -0
  58. package/style/base.css +4 -0
  59. package/style/icons/jupyternaut-lite.svg +7 -0
  60. package/lib/llm-models/utils.d.ts +0 -15
  61. package/lib/llm-models/utils.js +0 -29
  62. package/lib/token.d.ts +0 -13
  63. package/lib/token.js +0 -2
  64. package/schema/ai-provider.json +0 -21
  65. package/src/llm-models/utils.ts +0 -41
  66. package/src/token.ts +0 -19
@@ -1,3 +1,50 @@
1
+ import { ChatAnthropic } from '@langchain/anthropic';
2
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
3
+ import { ChatMistralAI } from '@langchain/mistralai';
4
+ import { ChatOpenAI } from '@langchain/openai';
5
+
6
+ import { AnthropicCompleter } from './anthropic-completer';
7
+ import { CodestralCompleter } from './codestral-completer';
8
+ import { ChromeCompleter } from './chrome-completer';
9
+ import { OpenAICompleter } from './openai-completer';
10
+
11
+ import { instructions } from '../settings/instructions';
12
+ import { ProviderSettings } from '../settings/schemas';
13
+
14
+ import { IAIProvider } from '../tokens';
15
+
1
16
  export * from './base-completer';
2
- export * from './codestral-completer';
3
- export * from './utils';
17
+
18
+ const AIProviders: IAIProvider[] = [
19
+ {
20
+ name: 'Anthropic',
21
+ chatModel: ChatAnthropic,
22
+ completer: AnthropicCompleter,
23
+ settingsSchema: ProviderSettings.Anthropic,
24
+ errorMessage: (error: any) => error.error.error.message
25
+ },
26
+ {
27
+ name: 'ChromeAI',
28
+ // TODO: fix
29
+ // @ts-expect-error: missing properties
30
+ chatModel: ChromeAI,
31
+ completer: ChromeCompleter,
32
+ instructions: instructions.ChromeAI,
33
+ settingsSchema: ProviderSettings.ChromeAI
34
+ },
35
+ {
36
+ name: 'MistralAI',
37
+ chatModel: ChatMistralAI,
38
+ completer: CodestralCompleter,
39
+ instructions: instructions.MistralAI,
40
+ settingsSchema: ProviderSettings.MistralAI
41
+ },
42
+ {
43
+ name: 'OpenAI',
44
+ chatModel: ChatOpenAI,
45
+ completer: OpenAICompleter,
46
+ settingsSchema: ProviderSettings.OpenAI
47
+ }
48
+ ];
49
+
50
+ export { AIProviders };
@@ -0,0 +1,67 @@
1
+ import {
2
+ CompletionHandler,
3
+ IInlineCompletionContext
4
+ } from '@jupyterlab/completer';
5
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
6
+ import { AIMessage, SystemMessage } from '@langchain/core/messages';
7
+ import { ChatOpenAI } from '@langchain/openai';
8
+
9
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
10
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
11
+
12
+ export class OpenAICompleter implements IBaseCompleter {
13
+ constructor(options: BaseCompleter.IOptions) {
14
+ this._openAIProvider = new ChatOpenAI({ ...options.settings });
15
+ }
16
+
17
+ get provider(): BaseChatModel {
18
+ return this._openAIProvider;
19
+ }
20
+
21
+ /**
22
+ * Getter and setter for the initial prompt.
23
+ */
24
+ get prompt(): string {
25
+ return this._prompt;
26
+ }
27
+ set prompt(value: string) {
28
+ this._prompt = value;
29
+ }
30
+
31
+ async fetch(
32
+ request: CompletionHandler.IRequest,
33
+ context: IInlineCompletionContext
34
+ ) {
35
+ const { text, offset: cursorOffset } = request;
36
+ const prompt = text.slice(0, cursorOffset);
37
+
38
+ const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)];
39
+
40
+ try {
41
+ const response = await this._openAIProvider.invoke(messages);
42
+ const items = [];
43
+ if (typeof response.content === 'string') {
44
+ items.push({
45
+ insertText: response.content
46
+ });
47
+ } else {
48
+ response.content.forEach(content => {
49
+ if (content.type !== 'text') {
50
+ return;
51
+ }
52
+ items.push({
53
+ insertText: content.text,
54
+ filterText: prompt.substring(prompt.length)
55
+ });
56
+ });
57
+ }
58
+ return { items };
59
+ } catch (error) {
60
+ console.error('Error fetching completions', error);
61
+ return { items: [] };
62
+ }
63
+ }
64
+
65
+ private _openAIProvider: ChatOpenAI;
66
+ private _prompt: string = COMPLETION_SYSTEM_PROMPT;
67
+ }
@@ -0,0 +1,9 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+
6
+ declare module '*.svg' {
7
+ const value: string;
8
+ export default value;
9
+ }
package/src/provider.ts CHANGED
@@ -4,44 +4,113 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
4
4
  import { ISignal, Signal } from '@lumino/signaling';
5
5
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
6
6
 
7
- import { CompletionProvider } from './completion-provider';
8
- import { getChatModel, IBaseCompleter } from './llm-models';
9
- import { IAIProvider } from './token';
10
-
11
- export class AIProvider implements IAIProvider {
12
- constructor(options: AIProvider.IOptions) {
13
- this._completionProvider = new CompletionProvider({
14
- name: 'None',
15
- settings: {},
16
- requestCompletion: options.requestCompletion
17
- });
18
- options.completionProviderManager.registerInlineProvider(
19
- this._completionProvider
20
- );
7
+ import { IBaseCompleter } from './llm-models';
8
+ import { IAIProvider, IAIProviderRegistry } from './tokens';
9
+ import { JSONSchema7 } from 'json-schema';
10
+
11
+ export const chatSystemPrompt = (
12
+ options: AIProviderRegistry.IPromptOptions
13
+ ) => `
14
+ You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
15
+ You are not a language model, but rather an application built on a foundation model from ${options.provider_name}.
16
+ You are talkative and you provide lots of specific details from the foundation model's context.
17
+ You may use Markdown to format your response.
18
+ If your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after).
19
+ If your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters.
20
+ All dollar quantities (of USD) must be formatted in LaTeX, with the \`$\` symbol escaped by a single backslash \`\\\`.
21
+ - Example prompt: \`If I have \\\\$100 and spend \\\\$20, how much money do I have left?\`
22
+ - **Correct** response: \`You have \\(\\$80\\) remaining.\`
23
+ - **Incorrect** response: \`You have $80 remaining.\`
24
+ If you do not know the answer to a question, answer truthfully by responding that you do not know.
25
+ The following is a friendly conversation between you and a human.
26
+ `;
27
+
28
+ export const COMPLETION_SYSTEM_PROMPT = `
29
+ You are an application built to provide helpful code completion suggestions.
30
+ You should only produce code. Keep comments to minimum, use the
31
+ programming language comment syntax. Produce clean code.
32
+ The code is written in JupyterLab, a data analysis and code development
33
+ environment which can execute code extended with additional syntax for
34
+ interactive features, such as magics.
35
+ Only give raw strings back, do not format the response using backticks.
36
+ The output should be a single string, and should correspond to what a human users
37
+ would write.
38
+ Do not include the prompt in the output, only the string that should be appended to the current input.
39
+ `;
40
+
41
+ export class AIProviderRegistry implements IAIProviderRegistry {
42
+ /**
43
+ * Get the list of provider names.
44
+ */
45
+ get providers(): string[] {
46
+ return Array.from(this._providers.keys());
47
+ }
48
+
49
+ /**
50
+ * Add a new provider.
51
+ */
52
+ add(provider: IAIProvider): void {
53
+ if (this._providers.has(provider.name)) {
54
+ throw new Error(
55
+ `A AI provider named '${provider.name}' is already registered`
56
+ );
57
+ }
58
+ this._providers.set(provider.name, provider);
21
59
  }
22
60
 
23
- get name(): string {
61
+ /**
62
+ * Get the current provider name.
63
+ */
64
+ get currentName(): string {
24
65
  return this._name;
25
66
  }
26
67
 
27
68
  /**
28
69
  * Get the current completer of the completion provider.
29
70
  */
30
- get completer(): IBaseCompleter | null {
31
- if (this._name === null) {
71
+ get currentCompleter(): IBaseCompleter | null {
72
+ if (this._name === 'None') {
32
73
  return null;
33
74
  }
34
- return this._completionProvider.completer;
75
+ return this._completer;
35
76
  }
36
77
 
37
78
  /**
38
79
  * Get the current llm chat model.
39
80
  */
40
- get chatModel(): BaseChatModel | null {
41
- if (this._name === null) {
81
+ get currentChatModel(): BaseChatModel | null {
82
+ if (this._name === 'None') {
42
83
  return null;
43
84
  }
44
- return this._llmChatModel;
85
+ return this._chatModel;
86
+ }
87
+
88
+ /**
89
+ * Get the settings schema of a given provider.
90
+ */
91
+ getSettingsSchema(provider: string): JSONSchema7 {
92
+ return (this._providers.get(provider)?.settingsSchema?.properties ||
93
+ {}) as JSONSchema7;
94
+ }
95
+
96
+ /**
97
+ * Get the instructions of a given provider.
98
+ */
99
+ getInstructions(provider: string): string | undefined {
100
+ return this._providers.get(provider)?.instructions;
101
+ }
102
+
103
+ /**
104
+ * Format an error message from the current provider.
105
+ */
106
+ formatErrorMessage(error: any): string {
107
+ if (this._currentProvider?.errorMessage) {
108
+ return this._currentProvider?.errorMessage(error);
109
+ }
110
+ if (error.message) {
111
+ return error.message;
112
+ }
113
+ return error;
45
114
  }
46
115
 
47
116
  /**
@@ -59,43 +128,59 @@ export class AIProvider implements IAIProvider {
59
128
  }
60
129
 
61
130
  /**
62
- * Set the models (chat model and completer).
63
- * Creates the models if the name has changed, otherwise only updates their config.
131
+ * Set the providers (chat model and completer).
132
+ * Creates the providers if the name has changed, otherwise only updates their config.
64
133
  *
65
- * @param name - the name of the model to use.
134
+ * @param name - the name of the provider to use.
66
135
  * @param settings - the settings for the models.
67
136
  */
68
- setModels(name: string, settings: ReadonlyPartialJSONObject) {
69
- try {
70
- this._completionProvider.setCompleter(name, settings);
71
- this._completerError = '';
72
- } catch (e: any) {
73
- this._completerError = e.message;
137
+ setProvider(name: string, settings: ReadonlyPartialJSONObject): void {
138
+ this._currentProvider = this._providers.get(name) ?? null;
139
+
140
+ if (this._currentProvider?.completer !== undefined) {
141
+ try {
142
+ this._completer = new this._currentProvider.completer({ ...settings });
143
+ this._completerError = '';
144
+ } catch (e: any) {
145
+ this._completerError = e.message;
146
+ }
147
+ } else {
148
+ this._completer = null;
74
149
  }
75
- try {
76
- this._llmChatModel = getChatModel(name, settings);
77
- this._chatError = '';
78
- } catch (e: any) {
79
- this._chatError = e.message;
80
- this._llmChatModel = null;
150
+
151
+ if (this._currentProvider?.chatModel !== undefined) {
152
+ try {
153
+ this._chatModel = new this._currentProvider.chatModel({ ...settings });
154
+ this._chatError = '';
155
+ } catch (e: any) {
156
+ this._chatError = e.message;
157
+ this._chatModel = null;
158
+ }
159
+ } else {
160
+ this._chatModel = null;
81
161
  }
82
162
  this._name = name;
83
- this._modelChange.emit();
163
+ this._providerChanged.emit();
84
164
  }
85
165
 
86
- get modelChange(): ISignal<IAIProvider, void> {
87
- return this._modelChange;
166
+ /**
167
+ * A signal emitting when the provider or its settings has changed.
168
+ */
169
+ get providerChanged(): ISignal<IAIProviderRegistry, void> {
170
+ return this._providerChanged;
88
171
  }
89
172
 
90
- private _completionProvider: CompletionProvider;
91
- private _llmChatModel: BaseChatModel | null = null;
173
+ private _currentProvider: IAIProvider | null = null;
174
+ private _completer: IBaseCompleter | null = null;
175
+ private _chatModel: BaseChatModel | null = null;
92
176
  private _name: string = 'None';
93
- private _modelChange = new Signal<IAIProvider, void>(this);
177
+ private _providerChanged = new Signal<IAIProviderRegistry, void>(this);
94
178
  private _chatError: string = '';
95
179
  private _completerError: string = '';
180
+ private _providers = new Map<string, IAIProvider>();
96
181
  }
97
182
 
98
- export namespace AIProvider {
183
+ export namespace AIProviderRegistry {
99
184
  /**
100
185
  * The options for the LLM provider.
101
186
  */
@@ -110,6 +195,16 @@ export namespace AIProvider {
110
195
  requestCompletion: () => void;
111
196
  }
112
197
 
198
+ /**
199
+ * The options for the Chat system prompt.
200
+ */
201
+ export interface IPromptOptions {
202
+ /**
203
+ * The provider name.
204
+ */
205
+ provider_name: string;
206
+ }
207
+
113
208
  /**
114
209
  * This function indicates whether a key is writable in an object.
115
210
  * https://stackoverflow.com/questions/54724875/can-we-check-whether-property-is-readonly-in-typescript
@@ -0,0 +1,48 @@
1
+ import { IDict } from '../tokens';
2
+
3
+ const chromeAiInstructions = `
4
+ <i class="fas fa-exclamation-triangle"></i> Support for ChromeAI is still experimental and only available in Google Chrome.
5
+
6
+ You can test ChromeAI is enabled in your browser by going to the following URL: https://chromeai.org/
7
+
8
+ Enable the proper flags in Google Chrome.
9
+
10
+ - chrome://flags/#prompt-api-for-gemini-nano
11
+ - Select: \`Enabled\`
12
+ - chrome://flags/#optimization-guide-on-device-model
13
+ - Select: \`Enabled BypassPrefRequirement\`
14
+ - chrome://components
15
+ - Click \`Check for Update\` on Optimization Guide On Device Model to download the model
16
+ - [Optional] chrome://flags/#text-safety-classifier
17
+
18
+ <img src="https://github.com/user-attachments/assets/d48f46cc-52ee-4ce5-9eaf-c763cdbee04c" alt="A screenshot showing how to enable the ChromeAI flag in Google Chrome" width="500px">
19
+
20
+ Then restart Chrome for these changes to take effect.
21
+
22
+ <i class="fas fa-exclamation-triangle"></i> On first use, Chrome will download the on-device model, which can be as large as 22GB (according to their docs and at the time of writing).
23
+ During the download, ChromeAI may not be available via the extension.
24
+
25
+ <i class="fa fa-info-circle" aria-hidden="true"></i> For more information about Chrome Built-in AI: https://developer.chrome.com/docs/ai/get-started
26
+ `;
27
+
28
+ const mistralAIInstructions = `
29
+ <i class="fas fa-exclamation-triangle"></i> This extension is still very much experimental. It is not an official MistralAI extension.
30
+
31
+ 1. Go to https://console.mistral.ai/api-keys/ and create an API key.
32
+
33
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/1-api-key.png" alt="Screenshot showing how to create an API key" width="500px">
34
+
35
+ 2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`MistralAI\`
36
+ provider and the API key (required).
37
+
38
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png" alt="Screenshot showing how to add the API key to the settings" width="500px">
39
+
40
+ 3. Open the chat, or use the inline completer
41
+
42
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png" alt="Screenshot showing how to use the chat" width="500px">
43
+ `;
44
+
45
+ export const instructions: IDict = {
46
+ ChromeAI: chromeAiInstructions,
47
+ MistralAI: mistralAIInstructions
48
+ };
@@ -0,0 +1,257 @@
1
+ import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
2
+ import { ISettingRegistry } from '@jupyterlab/settingregistry';
3
+ import { FormComponent, IFormRenderer } from '@jupyterlab/ui-components';
4
+ import { JSONExt } from '@lumino/coreutils';
5
+ import { IChangeEvent } from '@rjsf/core';
6
+ import type { FieldProps } from '@rjsf/utils';
7
+ import validator from '@rjsf/validator-ajv8';
8
+ import { JSONSchema7 } from 'json-schema';
9
+ import React from 'react';
10
+
11
+ import baseSettings from './schemas/base.json';
12
+ import { IAIProviderRegistry, IDict } from '../tokens';
13
+
14
+ const MD_MIME_TYPE = 'text/markdown';
15
+ const STORAGE_NAME = '@jupyterlite/ai:settings';
16
+ const INSTRUCTION_CLASS = 'jp-AISettingsInstructions';
17
+
18
+ export const aiSettingsRenderer = (options: {
19
+ providerRegistry: IAIProviderRegistry;
20
+ rmRegistry?: IRenderMimeRegistry;
21
+ }): IFormRenderer => {
22
+ return {
23
+ fieldRenderer: (props: FieldProps) => {
24
+ props.formContext = { ...props.formContext, ...options };
25
+ return <AiSettings {...props} />;
26
+ }
27
+ };
28
+ };
29
+
30
+ export interface ISettingsFormStates {
31
+ schema: JSONSchema7;
32
+ instruction: HTMLElement | null;
33
+ }
34
+
35
+ const WrappedFormComponent = (props: any): JSX.Element => {
36
+ return <FormComponent {...props} validator={validator} />;
37
+ };
38
+
39
+ export class AiSettings extends React.Component<
40
+ FieldProps,
41
+ ISettingsFormStates
42
+ > {
43
+ constructor(props: FieldProps) {
44
+ super(props);
45
+ if (!props.formContext.providerRegistry) {
46
+ throw new Error(
47
+ 'The provider registry is needed to enable the jupyterlite-ai settings panel'
48
+ );
49
+ }
50
+ this._providerRegistry = props.formContext.providerRegistry;
51
+ this._rmRegistry = props.formContext.rmRegistry ?? null;
52
+ this._settings = props.formContext.settings;
53
+
54
+ // Initialize the providers schema.
55
+ const providerSchema = JSONExt.deepCopy(baseSettings) as any;
56
+ providerSchema.properties.provider = {
57
+ type: 'string',
58
+ title: 'Provider',
59
+ description: 'The AI provider to use for chat and completion',
60
+ default: 'None',
61
+ enum: ['None'].concat(this._providerRegistry.providers)
62
+ };
63
+ this._providerSchema = providerSchema as JSONSchema7;
64
+
65
+ // Check if there is saved values in local storage, otherwise use the settings from
66
+ // the setting registry (led to default if there are no user settings).
67
+ const storageSettings = localStorage.getItem(STORAGE_NAME);
68
+ if (storageSettings === null) {
69
+ const labSettings = this._settings.get('AIprovider').composite;
70
+ if (labSettings && Object.keys(labSettings).includes('provider')) {
71
+ // Get the provider name.
72
+ const provider = Object.entries(labSettings).find(
73
+ v => v[0] === 'provider'
74
+ )?.[1] as string;
75
+ // Save the settings.
76
+ const settings: any = {
77
+ _current: provider
78
+ };
79
+ settings[provider] = labSettings;
80
+ localStorage.setItem(STORAGE_NAME, JSON.stringify(settings));
81
+ }
82
+ }
83
+
84
+ // Initialize the settings from the saved ones.
85
+ this._provider = this.getCurrentProvider();
86
+ this._currentSettings = this.getSettings();
87
+
88
+ // Initialize the schema.
89
+ const schema = this._buildSchema();
90
+ this.state = { schema, instruction: null };
91
+
92
+ this._renderInstruction();
93
+
94
+ // Update the setting registry.
95
+ this._settings
96
+ .set('AIprovider', this._currentSettings)
97
+ .catch(console.error);
98
+ }
99
+
100
+ /**
101
+ * Get the current provider from the local storage.
102
+ */
103
+ getCurrentProvider(): string {
104
+ const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}');
105
+ return settings['_current'] ?? 'None';
106
+ }
107
+
108
+ /**
109
+ * Save the current provider to the local storage.
110
+ */
111
+ saveCurrentProvider(): void {
112
+ const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}');
113
+ settings['_current'] = this._provider;
114
+ localStorage.setItem(STORAGE_NAME, JSON.stringify(settings));
115
+ }
116
+
117
+ /**
118
+ * Get settings from local storage for a given provider.
119
+ */
120
+ getSettings(): IDict<any> {
121
+ const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}');
122
+ return settings[this._provider] ?? { provider: this._provider };
123
+ }
124
+
125
+ /**
126
+ * Save settings in local storage for a given provider.
127
+ */
128
+ saveSettings(value: IDict<any>) {
129
+ const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) ?? '{}');
130
+ settings[this._provider] = value;
131
+ localStorage.setItem(STORAGE_NAME, JSON.stringify(settings));
132
+ }
133
+
134
+ /**
135
+ * Update the UI schema of the form.
136
+ * Currently use to hide API keys.
137
+ */
138
+ private _updateUiSchema(key: string) {
139
+ if (key.toLowerCase().includes('key')) {
140
+ this._uiSchema[key] = { 'ui:widget': 'password' };
141
+ }
142
+ }
143
+
144
+ /**
145
+ * Build the schema for a given provider.
146
+ */
147
+ private _buildSchema(): JSONSchema7 {
148
+ const schema = JSONExt.deepCopy(baseSettings) as any;
149
+ this._uiSchema = {};
150
+ const settingsSchema = this._providerRegistry.getSettingsSchema(
151
+ this._provider
152
+ );
153
+
154
+ if (settingsSchema) {
155
+ Object.entries(settingsSchema).forEach(([key, value]) => {
156
+ schema.properties[key] = value;
157
+ this._updateUiSchema(key);
158
+ });
159
+ }
160
+ return schema as JSONSchema7;
161
+ }
162
+
163
+ /**
164
+ * Update the schema state for the given provider, that trigger the re-rendering of
165
+ * the component.
166
+ */
167
+ private _updateSchema() {
168
+ const schema = this._buildSchema();
169
+ this.setState({ schema });
170
+ }
171
+
172
+ /**
173
+ * Render the markdown instructions for the current provider.
174
+ */
175
+ private async _renderInstruction(): Promise<void> {
176
+ let instructions = this._providerRegistry.getInstructions(this._provider);
177
+ if (!this._rmRegistry || !instructions) {
178
+ this.setState({ instruction: null });
179
+ return;
180
+ }
181
+ instructions = `---\n\n${instructions}\n\n---`;
182
+ const renderer = this._rmRegistry.createRenderer(MD_MIME_TYPE);
183
+ const model = this._rmRegistry.createModel({
184
+ data: { [MD_MIME_TYPE]: instructions }
185
+ });
186
+ await renderer.renderModel(model);
187
+ this.setState({ instruction: renderer.node });
188
+ }
189
+
190
+ /**
191
+ * Triggered when the provider hes changed, to update the schema and values.
192
+ * Update the Jupyterlab settings accordingly.
193
+ */
194
+ private _onProviderChanged = (e: IChangeEvent) => {
195
+ const provider = e.formData.provider;
196
+ if (provider === this._currentSettings.provider) {
197
+ return;
198
+ }
199
+ this._provider = provider;
200
+ this.saveCurrentProvider();
201
+ this._currentSettings = this.getSettings();
202
+ this._updateSchema();
203
+ this._renderInstruction();
204
+ this._settings
205
+ .set('AIprovider', { provider: this._provider, ...this._currentSettings })
206
+ .catch(console.error);
207
+ };
208
+
209
+ /**
210
+ * Triggered when the form value has changed, to update the current settings and save
211
+ * it in local storage.
212
+ * Update the Jupyterlab settings accordingly.
213
+ */
214
+ private _onFormChange = (e: IChangeEvent) => {
215
+ this._currentSettings = JSONExt.deepCopy(e.formData);
216
+ this.saveSettings(this._currentSettings);
217
+ this._settings
218
+ .set('AIprovider', { provider: this._provider, ...this._currentSettings })
219
+ .catch(console.error);
220
+ };
221
+
222
+ render(): JSX.Element {
223
+ return (
224
+ <>
225
+ <WrappedFormComponent
226
+ formData={{ provider: this._provider }}
227
+ schema={this._providerSchema}
228
+ onChange={this._onProviderChanged}
229
+ />
230
+ {this.state.instruction !== null && (
231
+ <details>
232
+ <summary className={INSTRUCTION_CLASS}>Instructions</summary>
233
+ <span
234
+ ref={node =>
235
+ node && node.replaceChildren(this.state.instruction!)
236
+ }
237
+ />
238
+ </details>
239
+ )}
240
+ <WrappedFormComponent
241
+ formData={this._currentSettings}
242
+ schema={this.state.schema}
243
+ onChange={this._onFormChange}
244
+ uiSchema={this._uiSchema}
245
+ />
246
+ </>
247
+ );
248
+ }
249
+
250
+ private _providerRegistry: IAIProviderRegistry;
251
+ private _provider: string;
252
+ private _providerSchema: JSONSchema7;
253
+ private _rmRegistry: IRenderMimeRegistry | null;
254
+ private _currentSettings: IDict<any> = { provider: 'None' };
255
+ private _uiSchema: IDict<any> = {};
256
+ private _settings: ISettingRegistry.ISettings;
257
+ }
@@ -0,0 +1,15 @@
1
+ import { IDict } from '../../tokens';
2
+
3
+ import ChromeAI from './_generated/ChromeAI.json';
4
+ import MistralAI from './_generated/MistralAI.json';
5
+ import Anthropic from './_generated/Anthropic.json';
6
+ import OpenAI from './_generated/OpenAI.json';
7
+
8
+ const ProviderSettings: IDict<any> = {
9
+ ChromeAI,
10
+ MistralAI,
11
+ Anthropic,
12
+ OpenAI
13
+ };
14
+
15
+ export { ProviderSettings };