@jupyterlite/ai 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/lib/chat-handler.d.ts +3 -3
  2. package/lib/chat-handler.js +13 -10
  3. package/lib/completion-provider.d.ts +5 -18
  4. package/lib/completion-provider.js +8 -34
  5. package/lib/index.d.ts +2 -2
  6. package/lib/index.js +44 -42
  7. package/lib/llm-models/index.d.ts +3 -2
  8. package/lib/llm-models/index.js +42 -2
  9. package/lib/provider.d.ts +44 -16
  10. package/lib/provider.js +97 -41
  11. package/lib/settings/instructions.d.ts +2 -0
  12. package/lib/settings/instructions.js +44 -0
  13. package/lib/settings/panel.d.ts +70 -0
  14. package/lib/settings/panel.js +190 -0
  15. package/lib/settings/schemas/base.json +7 -0
  16. package/lib/settings/schemas/index.d.ts +3 -0
  17. package/lib/settings/schemas/index.js +11 -0
  18. package/lib/tokens.d.ts +103 -0
  19. package/lib/tokens.js +5 -0
  20. package/package.json +5 -1
  21. package/schema/provider-registry.json +17 -0
  22. package/src/chat-handler.ts +16 -13
  23. package/src/completion-provider.ts +13 -37
  24. package/src/index.ts +59 -44
  25. package/src/llm-models/index.ts +49 -2
  26. package/src/provider.ts +100 -43
  27. package/src/settings/instructions.ts +48 -0
  28. package/src/settings/panel.tsx +257 -0
  29. package/src/settings/schemas/index.ts +15 -0
  30. package/src/tokens.ts +112 -0
  31. package/style/base.css +4 -0
  32. package/lib/llm-models/utils.d.ts +0 -16
  33. package/lib/llm-models/utils.js +0 -86
  34. package/lib/token.d.ts +0 -13
  35. package/lib/token.js +0 -2
  36. package/schema/ai-provider.json +0 -17
  37. package/src/llm-models/utils.ts +0 -90
  38. package/src/token.ts +0 -19
  39. /package/lib/{_provider-settings/anthropic.json → settings/schemas/_generated/Anthropic.json} +0 -0
  40. /package/lib/{_provider-settings/chromeAI.json → settings/schemas/_generated/ChromeAI.json} +0 -0
  41. /package/lib/{_provider-settings/mistralAI.json → settings/schemas/_generated/MistralAI.json} +0 -0
  42. /package/lib/{_provider-settings/openAI.json → settings/schemas/_generated/OpenAI.json} +0 -0
@@ -1,6 +1,6 @@
1
1
  import { ChatModel, IChatHistory, IChatMessage, INewMessage } from '@jupyter/chat';
2
2
  import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
- import { IAIProvider } from './token';
3
+ import { IAIProviderRegistry } from './tokens';
4
4
  export type ConnectionMessage = {
5
5
  type: 'connection';
6
6
  client_id: string;
@@ -22,7 +22,7 @@ export declare class ChatHandler extends ChatModel {
22
22
  getHistory(): Promise<IChatHistory>;
23
23
  dispose(): void;
24
24
  messageAdded(message: IChatMessage): void;
25
- private _aiProvider;
25
+ private _providerRegistry;
26
26
  private _personaName;
27
27
  private _prompt;
28
28
  private _errorMessage;
@@ -31,6 +31,6 @@ export declare class ChatHandler extends ChatModel {
31
31
  }
32
32
  export declare namespace ChatHandler {
33
33
  interface IOptions extends ChatModel.IOptions {
34
- aiProvider: IAIProvider;
34
+ providerRegistry: IAIProviderRegistry;
35
35
  }
36
36
  }
@@ -5,7 +5,6 @@
5
5
  import { ChatModel } from '@jupyter/chat';
6
6
  import { AIMessage, HumanMessage, mergeMessageRuns, SystemMessage } from '@langchain/core/messages';
7
7
  import { UUID } from '@lumino/coreutils';
8
- import { getErrorMessage } from './llm-models';
9
8
  import { chatSystemPrompt } from './provider';
10
9
  import { jupyternautLiteIcon } from './icons';
11
10
  /**
@@ -21,15 +20,19 @@ export class ChatHandler extends ChatModel {
21
20
  this._errorMessage = '';
22
21
  this._history = { messages: [] };
23
22
  this._defaultErrorMessage = 'AI provider not configured';
24
- this._aiProvider = options.aiProvider;
25
- this._prompt = chatSystemPrompt({ provider_name: this._aiProvider.name });
26
- this._aiProvider.modelChange.connect(() => {
27
- this._errorMessage = this._aiProvider.chatError;
28
- this._prompt = chatSystemPrompt({ provider_name: this._aiProvider.name });
23
+ this._providerRegistry = options.providerRegistry;
24
+ this._prompt = chatSystemPrompt({
25
+ provider_name: this._providerRegistry.currentName
26
+ });
27
+ this._providerRegistry.providerChanged.connect(() => {
28
+ this._errorMessage = this._providerRegistry.chatError;
29
+ this._prompt = chatSystemPrompt({
30
+ provider_name: this._providerRegistry.currentName
31
+ });
29
32
  });
30
33
  }
31
34
  get provider() {
32
- return this._aiProvider.chatModel;
35
+ return this._providerRegistry.currentChatModel;
33
36
  }
34
37
  /**
35
38
  * Getter and setter for the persona name.
@@ -74,7 +77,7 @@ export class ChatHandler extends ChatModel {
74
77
  type: 'msg'
75
78
  };
76
79
  this.messageAdded(msg);
77
- if (this._aiProvider.chatModel === null) {
80
+ if (this._providerRegistry.currentChatModel === null) {
78
81
  const errorMsg = {
79
82
  id: UUID.uuid4(),
80
83
  body: `**${this._errorMessage ? this._errorMessage : this._defaultErrorMessage}**`,
@@ -105,7 +108,7 @@ export class ChatHandler extends ChatModel {
105
108
  };
106
109
  let content = '';
107
110
  try {
108
- for await (const chunk of await this._aiProvider.chatModel.stream(messages)) {
111
+ for await (const chunk of await this._providerRegistry.currentChatModel.stream(messages)) {
109
112
  content += (_a = chunk.content) !== null && _a !== void 0 ? _a : chunk;
110
113
  botMsg.body = content;
111
114
  this.messageAdded(botMsg);
@@ -114,7 +117,7 @@ export class ChatHandler extends ChatModel {
114
117
  return true;
115
118
  }
116
119
  catch (reason) {
117
- const error = getErrorMessage(this._aiProvider.name, reason);
120
+ const error = this._providerRegistry.formatErrorMessage(reason);
118
121
  const errorMsg = {
119
122
  id: UUID.uuid4(),
120
123
  body: `**${error}**`,
@@ -1,20 +1,12 @@
1
1
  import { CompletionHandler, IInlineCompletionContext, IInlineCompletionProvider } from '@jupyterlab/completer';
2
- import { BaseLanguageModel } from '@langchain/core/language_models/base';
3
- import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
4
- import { IBaseCompleter, BaseCompleter } from './llm-models';
2
+ import { IBaseCompleter } from './llm-models';
3
+ import { IAIProviderRegistry } from './tokens';
5
4
  /**
6
5
  * The generic completion provider to register to the completion provider manager.
7
6
  */
8
7
  export declare class CompletionProvider implements IInlineCompletionProvider {
9
8
  readonly identifier = "@jupyterlite/ai";
10
9
  constructor(options: CompletionProvider.IOptions);
11
- /**
12
- * Set the completer.
13
- *
14
- * @param name - the name of the completer.
15
- * @param settings - The settings associated to the completer.
16
- */
17
- setCompleter(name: string, settings: ReadonlyPartialJSONObject): void;
18
10
  /**
19
11
  * Get the current completer name.
20
12
  */
@@ -23,18 +15,13 @@ export declare class CompletionProvider implements IInlineCompletionProvider {
23
15
  * Get the current completer.
24
16
  */
25
17
  get completer(): IBaseCompleter | null;
26
- /**
27
- * Get the LLM completer.
28
- */
29
- get llmCompleter(): BaseLanguageModel | null;
30
18
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
31
- private _name;
19
+ private _providerRegistry;
32
20
  private _requestCompletion;
33
- private _completer;
34
21
  }
35
22
  export declare namespace CompletionProvider {
36
- interface IOptions extends BaseCompleter.IOptions {
37
- name: string;
23
+ interface IOptions {
24
+ providerRegistry: IAIProviderRegistry;
38
25
  requestCompletion: () => void;
39
26
  }
40
27
  }
@@ -1,57 +1,31 @@
1
- import { getCompleter } from './llm-models';
2
1
  /**
3
2
  * The generic completion provider to register to the completion provider manager.
4
3
  */
5
4
  export class CompletionProvider {
6
5
  constructor(options) {
7
6
  this.identifier = '@jupyterlite/ai';
8
- this._name = 'None';
9
- this._completer = null;
10
- const { name, settings } = options;
7
+ this._providerRegistry = options.providerRegistry;
11
8
  this._requestCompletion = options.requestCompletion;
12
- this.setCompleter(name, settings);
13
- }
14
- /**
15
- * Set the completer.
16
- *
17
- * @param name - the name of the completer.
18
- * @param settings - The settings associated to the completer.
19
- */
20
- setCompleter(name, settings) {
21
- try {
22
- this._completer = getCompleter(name, settings);
23
- if (this._completer) {
24
- this._completer.requestCompletion = this._requestCompletion;
9
+ this._providerRegistry.providerChanged.connect(() => {
10
+ if (this.completer) {
11
+ this.completer.requestCompletion = this._requestCompletion;
25
12
  }
26
- this._name = this._completer === null ? 'None' : name;
27
- }
28
- catch (e) {
29
- this._completer = null;
30
- this._name = 'None';
31
- throw e;
32
- }
13
+ });
33
14
  }
34
15
  /**
35
16
  * Get the current completer name.
36
17
  */
37
18
  get name() {
38
- return this._name;
19
+ return this._providerRegistry.currentName;
39
20
  }
40
21
  /**
41
22
  * Get the current completer.
42
23
  */
43
24
  get completer() {
44
- return this._completer;
45
- }
46
- /**
47
- * Get the LLM completer.
48
- */
49
- get llmCompleter() {
50
- var _a;
51
- return ((_a = this._completer) === null || _a === void 0 ? void 0 : _a.provider) || null;
25
+ return this._providerRegistry.currentCompleter;
52
26
  }
53
27
  async fetch(request, context) {
54
28
  var _a;
55
- return (_a = this._completer) === null || _a === void 0 ? void 0 : _a.fetch(request, context);
29
+ return (_a = this.completer) === null || _a === void 0 ? void 0 : _a.fetch(request, context);
56
30
  }
57
31
  }
package/lib/index.d.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { IAutocompletionRegistry } from '@jupyter/chat';
2
2
  import { JupyterFrontEndPlugin } from '@jupyterlab/application';
3
- import { IAIProvider } from './token';
4
- declare const _default: (JupyterFrontEndPlugin<void> | JupyterFrontEndPlugin<IAutocompletionRegistry> | JupyterFrontEndPlugin<IAIProvider>)[];
3
+ import { IAIProviderRegistry } from './tokens';
4
+ declare const _default: (JupyterFrontEndPlugin<void> | JupyterFrontEndPlugin<IAutocompletionRegistry> | JupyterFrontEndPlugin<IAIProviderRegistry>)[];
5
5
  export default _default;
package/lib/index.js CHANGED
@@ -4,11 +4,14 @@ import { ICompletionProviderManager } from '@jupyterlab/completer';
4
4
  import { INotebookTracker } from '@jupyterlab/notebook';
5
5
  import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
6
6
  import { ISettingRegistry } from '@jupyterlab/settingregistry';
7
+ import { IFormRendererRegistry } from '@jupyterlab/ui-components';
7
8
  import { ChatHandler } from './chat-handler';
8
- import { getSettings } from './llm-models';
9
- import { AIProvider } from './provider';
9
+ import { CompletionProvider } from './completion-provider';
10
+ import { AIProviders } from './llm-models';
11
+ import { AIProviderRegistry } from './provider';
12
+ import { aiSettingsRenderer } from './settings/panel';
10
13
  import { renderSlashCommandOption } from './slash-commands';
11
- import { IAIProvider } from './token';
14
+ import { IAIProviderRegistry } from './tokens';
12
15
  const autocompletionRegistryPlugin = {
13
16
  id: '@jupyterlite/ai:autocompletion-registry',
14
17
  description: 'Autocompletion registry',
@@ -38,9 +41,9 @@ const chatPlugin = {
38
41
  id: '@jupyterlite/ai:chat',
39
42
  description: 'LLM chat extension',
40
43
  autoStart: true,
41
- requires: [IAIProvider, IRenderMimeRegistry, IAutocompletionRegistry],
44
+ requires: [IAIProviderRegistry, IRenderMimeRegistry, IAutocompletionRegistry],
42
45
  optional: [INotebookTracker, ISettingRegistry, IThemeManager],
43
- activate: async (app, aiProvider, rmRegistry, autocompletionRegistry, notebookTracker, settingsRegistry, themeManager) => {
46
+ activate: async (app, providerRegistry, rmRegistry, autocompletionRegistry, notebookTracker, settingsRegistry, themeManager) => {
44
47
  let activeCellManager = null;
45
48
  if (notebookTracker) {
46
49
  activeCellManager = new ActiveCellManager({
@@ -49,8 +52,8 @@ const chatPlugin = {
49
52
  });
50
53
  }
51
54
  const chatHandler = new ChatHandler({
52
- aiProvider: aiProvider,
53
- activeCellManager: activeCellManager
55
+ providerRegistry,
56
+ activeCellManager
54
57
  });
55
58
  let sendWithShiftEnter = false;
56
59
  let enableCodeToolbar = true;
@@ -93,53 +96,52 @@ const chatPlugin = {
93
96
  console.log('Chat extension initialized');
94
97
  }
95
98
  };
96
- const aiProviderPlugin = {
97
- id: '@jupyterlite/ai:ai-provider',
99
+ const completerPlugin = {
100
+ id: '@jupyterlite/ai:completer',
98
101
  autoStart: true,
99
- requires: [ICompletionProviderManager, ISettingRegistry],
100
- provides: IAIProvider,
101
- activate: (app, manager, settingRegistry) => {
102
- const aiProvider = new AIProvider({
103
- completionProviderManager: manager,
102
+ requires: [IAIProviderRegistry, ICompletionProviderManager],
103
+ activate: (app, providerRegistry, manager) => {
104
+ const completer = new CompletionProvider({
105
+ providerRegistry,
104
106
  requestCompletion: () => app.commands.execute('inline-completer:invoke')
105
107
  });
106
- let currentProvider = 'None';
108
+ manager.registerInlineProvider(completer);
109
+ }
110
+ };
111
+ const providerRegistryPlugin = {
112
+ id: '@jupyterlite/ai:provider-registry',
113
+ autoStart: true,
114
+ requires: [IFormRendererRegistry, ISettingRegistry],
115
+ optional: [IRenderMimeRegistry],
116
+ provides: IAIProviderRegistry,
117
+ activate: (app, editorRegistry, settingRegistry, rmRegistry) => {
118
+ const providerRegistry = new AIProviderRegistry();
119
+ editorRegistry.addRenderer('@jupyterlite/ai:provider-registry.AIprovider', aiSettingsRenderer({ providerRegistry, rmRegistry }));
107
120
  settingRegistry
108
- .load(aiProviderPlugin.id)
121
+ .load(providerRegistryPlugin.id)
109
122
  .then(settings => {
110
123
  const updateProvider = () => {
111
- const provider = settings.get('provider').composite;
112
- if (provider !== currentProvider) {
113
- // Update the settings panel.
114
- currentProvider = provider;
115
- const settingsProperties = settings.schema.properties;
116
- if (settingsProperties) {
117
- const schemaKeys = Object.keys(settingsProperties);
118
- schemaKeys.forEach(key => {
119
- var _a;
120
- if (key !== 'provider') {
121
- (_a = settings.schema.properties) === null || _a === void 0 ? true : delete _a[key];
122
- }
123
- });
124
- const properties = getSettings(provider);
125
- if (properties === null) {
126
- return;
127
- }
128
- Object.entries(properties).forEach(([name, value], index) => {
129
- settingsProperties[name] = value;
130
- });
131
- }
132
- }
124
+ var _a;
133
125
  // Update the settings to the AI providers.
134
- aiProvider.setModels(provider, settings.composite);
126
+ const providerSettings = ((_a = settings.get('AIprovider').composite) !== null && _a !== void 0 ? _a : {
127
+ provider: 'None'
128
+ });
129
+ providerRegistry.setProvider(providerSettings.provider, providerSettings);
135
130
  };
136
131
  settings.changed.connect(() => updateProvider());
137
132
  updateProvider();
138
133
  })
139
134
  .catch(reason => {
140
- console.error(`Failed to load settings for ${aiProviderPlugin.id}`, reason);
135
+ console.error(`Failed to load settings for ${providerRegistryPlugin.id}`, reason);
141
136
  });
142
- return aiProvider;
137
+ // Initialize the registry with the default providers
138
+ AIProviders.forEach(provider => providerRegistry.add(provider));
139
+ return providerRegistry;
143
140
  }
144
141
  };
145
- export default [chatPlugin, autocompletionRegistryPlugin, aiProviderPlugin];
142
+ export default [
143
+ providerRegistryPlugin,
144
+ autocompletionRegistryPlugin,
145
+ chatPlugin,
146
+ completerPlugin
147
+ ];
@@ -1,3 +1,4 @@
1
+ import { IAIProvider } from '../tokens';
1
2
  export * from './base-completer';
2
- export * from './codestral-completer';
3
- export * from './utils';
3
+ declare const AIProviders: IAIProvider[];
4
+ export { AIProviders };
@@ -1,3 +1,43 @@
1
+ import { ChatAnthropic } from '@langchain/anthropic';
2
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
3
+ import { ChatMistralAI } from '@langchain/mistralai';
4
+ import { ChatOpenAI } from '@langchain/openai';
5
+ import { AnthropicCompleter } from './anthropic-completer';
6
+ import { CodestralCompleter } from './codestral-completer';
7
+ import { ChromeCompleter } from './chrome-completer';
8
+ import { OpenAICompleter } from './openai-completer';
9
+ import { instructions } from '../settings/instructions';
10
+ import { ProviderSettings } from '../settings/schemas';
1
11
  export * from './base-completer';
2
- export * from './codestral-completer';
3
- export * from './utils';
12
+ const AIProviders = [
13
+ {
14
+ name: 'Anthropic',
15
+ chatModel: ChatAnthropic,
16
+ completer: AnthropicCompleter,
17
+ settingsSchema: ProviderSettings.Anthropic,
18
+ errorMessage: (error) => error.error.error.message
19
+ },
20
+ {
21
+ name: 'ChromeAI',
22
+ // TODO: fix
23
+ // @ts-expect-error: missing properties
24
+ chatModel: ChromeAI,
25
+ completer: ChromeCompleter,
26
+ instructions: instructions.ChromeAI,
27
+ settingsSchema: ProviderSettings.ChromeAI
28
+ },
29
+ {
30
+ name: 'MistralAI',
31
+ chatModel: ChatMistralAI,
32
+ completer: CodestralCompleter,
33
+ instructions: instructions.MistralAI,
34
+ settingsSchema: ProviderSettings.MistralAI
35
+ },
36
+ {
37
+ name: 'OpenAI',
38
+ chatModel: ChatOpenAI,
39
+ completer: OpenAICompleter,
40
+ settingsSchema: ProviderSettings.OpenAI
41
+ }
42
+ ];
43
+ export { AIProviders };
package/lib/provider.d.ts CHANGED
@@ -4,20 +4,43 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
4
4
  import { ISignal } from '@lumino/signaling';
5
5
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
6
6
  import { IBaseCompleter } from './llm-models';
7
- import { IAIProvider } from './token';
8
- export declare const chatSystemPrompt: (options: AIProvider.IPromptOptions) => string;
7
+ import { IAIProvider, IAIProviderRegistry } from './tokens';
8
+ import { JSONSchema7 } from 'json-schema';
9
+ export declare const chatSystemPrompt: (options: AIProviderRegistry.IPromptOptions) => string;
9
10
  export declare const COMPLETION_SYSTEM_PROMPT = "\nYou are an application built to provide helpful code completion suggestions.\nYou should only produce code. Keep comments to minimum, use the\nprogramming language comment syntax. Produce clean code.\nThe code is written in JupyterLab, a data analysis and code development\nenvironment which can execute code extended with additional syntax for\ninteractive features, such as magics.\nOnly give raw strings back, do not format the response using backticks.\nThe output should be a single string, and should correspond to what a human users\nwould write.\nDo not include the prompt in the output, only the string that should be appended to the current input.\n";
10
- export declare class AIProvider implements IAIProvider {
11
- constructor(options: AIProvider.IOptions);
12
- get name(): string;
11
+ export declare class AIProviderRegistry implements IAIProviderRegistry {
12
+ /**
13
+ * Get the list of provider names.
14
+ */
15
+ get providers(): string[];
16
+ /**
17
+ * Add a new provider.
18
+ */
19
+ add(provider: IAIProvider): void;
20
+ /**
21
+ * Get the current provider name.
22
+ */
23
+ get currentName(): string;
13
24
  /**
14
25
  * Get the current completer of the completion provider.
15
26
  */
16
- get completer(): IBaseCompleter | null;
27
+ get currentCompleter(): IBaseCompleter | null;
17
28
  /**
18
29
  * Get the current llm chat model.
19
30
  */
20
- get chatModel(): BaseChatModel | null;
31
+ get currentChatModel(): BaseChatModel | null;
32
+ /**
33
+ * Get the settings schema of a given provider.
34
+ */
35
+ getSettingsSchema(provider: string): JSONSchema7;
36
+ /**
37
+ * Get the instructions of a given provider.
38
+ */
39
+ getInstructions(provider: string): string | undefined;
40
+ /**
41
+ * Format an error message from the current provider.
42
+ */
43
+ formatErrorMessage(error: any): string;
21
44
  /**
22
45
  * Get the current chat error;
23
46
  */
@@ -27,22 +50,27 @@ export declare class AIProvider implements IAIProvider {
27
50
  */
28
51
  get completerError(): string;
29
52
  /**
30
- * Set the models (chat model and completer).
31
- * Creates the models if the name has changed, otherwise only updates their config.
53
+ * Set the providers (chat model and completer).
54
+ * Creates the providers if the name has changed, otherwise only updates their config.
32
55
  *
33
- * @param name - the name of the model to use.
56
+ * @param name - the name of the provider to use.
34
57
  * @param settings - the settings for the models.
35
58
  */
36
- setModels(name: string, settings: ReadonlyPartialJSONObject): void;
37
- get modelChange(): ISignal<IAIProvider, void>;
38
- private _completionProvider;
39
- private _llmChatModel;
59
+ setProvider(name: string, settings: ReadonlyPartialJSONObject): void;
60
+ /**
61
+ * A signal emitting when the provider or its settings has changed.
62
+ */
63
+ get providerChanged(): ISignal<IAIProviderRegistry, void>;
64
+ private _currentProvider;
65
+ private _completer;
66
+ private _chatModel;
40
67
  private _name;
41
- private _modelChange;
68
+ private _providerChanged;
42
69
  private _chatError;
43
70
  private _completerError;
71
+ private _providers;
44
72
  }
45
- export declare namespace AIProvider {
73
+ export declare namespace AIProviderRegistry {
46
74
  /**
47
75
  * The options for the LLM provider.
48
76
  */