@jupyterlite/ai 0.4.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/lib/chat-handler.d.ts +9 -1
  2. package/lib/chat-handler.js +37 -1
  3. package/lib/completion-provider.d.ts +8 -1
  4. package/lib/components/stop-button.d.ts +19 -0
  5. package/lib/components/stop-button.js +32 -0
  6. package/lib/{llm-models/anthropic-completer.d.ts → default-providers/Anthropic/completer.d.ts} +1 -1
  7. package/lib/{llm-models/anthropic-completer.js → default-providers/Anthropic/completer.js} +1 -1
  8. package/lib/{llm-models/chrome-completer.d.ts → default-providers/ChromeAI/completer.d.ts} +1 -1
  9. package/lib/{llm-models/chrome-completer.js → default-providers/ChromeAI/completer.js} +1 -1
  10. package/lib/default-providers/ChromeAI/instructions.d.ts +2 -0
  11. package/lib/default-providers/ChromeAI/instructions.js +24 -0
  12. package/lib/{llm-models/codestral-completer.d.ts → default-providers/MistralAI/completer.d.ts} +1 -1
  13. package/lib/{llm-models/codestral-completer.js → default-providers/MistralAI/completer.js} +1 -1
  14. package/lib/default-providers/MistralAI/instructions.d.ts +2 -0
  15. package/lib/default-providers/MistralAI/instructions.js +16 -0
  16. package/lib/{llm-models/openai-completer.d.ts → default-providers/OpenAI/completer.d.ts} +1 -1
  17. package/lib/{llm-models/openai-completer.js → default-providers/OpenAI/completer.js} +1 -1
  18. package/lib/default-providers/index.d.ts +2 -0
  19. package/lib/default-providers/index.js +60 -0
  20. package/lib/index.d.ts +3 -2
  21. package/lib/index.js +57 -36
  22. package/lib/provider.d.ts +13 -12
  23. package/lib/provider.js +43 -9
  24. package/lib/settings/index.d.ts +3 -0
  25. package/lib/settings/index.js +3 -0
  26. package/lib/settings/panel.d.ts +17 -0
  27. package/lib/settings/panel.js +92 -5
  28. package/lib/settings/settings-connector.d.ts +31 -0
  29. package/lib/settings/settings-connector.js +61 -0
  30. package/lib/settings/utils.d.ts +3 -0
  31. package/lib/settings/utils.js +5 -0
  32. package/lib/tokens.d.ts +16 -4
  33. package/package.json +14 -7
  34. package/schema/provider-registry.json +6 -0
  35. package/src/chat-handler.ts +43 -1
  36. package/src/completion-provider.ts +8 -1
  37. package/src/components/stop-button.tsx +56 -0
  38. package/src/{llm-models/anthropic-completer.ts → default-providers/Anthropic/completer.ts} +2 -2
  39. package/src/{llm-models/chrome-completer.ts → default-providers/ChromeAI/completer.ts} +3 -2
  40. package/src/default-providers/ChromeAI/instructions.ts +24 -0
  41. package/src/{llm-models/codestral-completer.ts → default-providers/MistralAI/completer.ts} +2 -2
  42. package/src/default-providers/MistralAI/instructions.ts +16 -0
  43. package/src/{llm-models/openai-completer.ts → default-providers/OpenAI/completer.ts} +2 -2
  44. package/src/default-providers/index.ts +71 -0
  45. package/src/index.ts +77 -49
  46. package/src/provider.ts +58 -15
  47. package/src/settings/index.ts +3 -0
  48. package/src/settings/panel.tsx +109 -5
  49. package/src/settings/settings-connector.ts +89 -0
  50. package/src/settings/utils.ts +6 -0
  51. package/src/tokens.ts +17 -4
  52. package/lib/llm-models/index.d.ts +0 -4
  53. package/lib/llm-models/index.js +0 -43
  54. package/lib/settings/instructions.d.ts +0 -2
  55. package/lib/settings/instructions.js +0 -44
  56. package/lib/settings/schemas/index.d.ts +0 -3
  57. package/lib/settings/schemas/index.js +0 -11
  58. package/lib/slash-commands.d.ts +0 -16
  59. package/lib/slash-commands.js +0 -25
  60. package/src/llm-models/index.ts +0 -50
  61. package/src/settings/instructions.ts +0 -48
  62. package/src/settings/schemas/index.ts +0 -15
  63. package/src/slash-commands.tsx +0 -55
  64. /package/lib/{llm-models/base-completer.d.ts → base-completer.d.ts} +0 -0
  65. /package/lib/{llm-models/base-completer.js → base-completer.js} +0 -0
  66. /package/lib/{settings/schemas/_generated/Anthropic.json → default-providers/Anthropic/settings-schema.json} +0 -0
  67. /package/lib/{settings/schemas/_generated/ChromeAI.json → default-providers/ChromeAI/settings-schema.json} +0 -0
  68. /package/lib/{settings/schemas/_generated/MistralAI.json → default-providers/MistralAI/settings-schema.json} +0 -0
  69. /package/lib/{settings/schemas/_generated/OpenAI.json → default-providers/OpenAI/settings-schema.json} +0 -0
  70. /package/lib/settings/{schemas/base.json → base.json} +0 -0
  71. /package/src/{llm-models/base-completer.ts → base-completer.ts} +0 -0
  72. /package/src/{llm-models/svg.d.ts → global.d.ts} +0 -0
@@ -1,4 +1,4 @@
1
- import { ChatModel, IChatHistory, IChatMessage, INewMessage } from '@jupyter/chat';
1
+ import { ChatCommand, ChatModel, IChatCommandProvider, IChatHistory, IChatMessage, IInputModel, INewMessage } from '@jupyter/chat';
2
2
  import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
3
  import { IAIProviderRegistry } from './tokens';
4
4
  export type ConnectionMessage = {
@@ -22,15 +22,23 @@ export declare class ChatHandler extends ChatModel {
22
22
  getHistory(): Promise<IChatHistory>;
23
23
  dispose(): void;
24
24
  messageAdded(message: IChatMessage): void;
25
+ stopStreaming(): void;
25
26
  private _providerRegistry;
26
27
  private _personaName;
27
28
  private _prompt;
28
29
  private _errorMessage;
29
30
  private _history;
30
31
  private _defaultErrorMessage;
32
+ private _controller;
31
33
  }
32
34
  export declare namespace ChatHandler {
33
35
  interface IOptions extends ChatModel.IOptions {
34
36
  providerRegistry: IAIProviderRegistry;
35
37
  }
38
+ class ClearCommandProvider implements IChatCommandProvider {
39
+ id: string;
40
+ private _slash_commands;
41
+ getChatCommands(inputModel: IInputModel): Promise<ChatCommand[]>;
42
+ handleChatCommand(command: ChatCommand, inputModel: IInputModel): Promise<void>;
43
+ }
36
44
  }
@@ -20,6 +20,7 @@ export class ChatHandler extends ChatModel {
20
20
  this._errorMessage = '';
21
21
  this._history = { messages: [] };
22
22
  this._defaultErrorMessage = 'AI provider not configured';
23
+ this._controller = null;
23
24
  this._providerRegistry = options.providerRegistry;
24
25
  this._prompt = chatSystemPrompt({
25
26
  provider_name: this._providerRegistry.currentName
@@ -107,8 +108,9 @@ export class ChatHandler extends ChatModel {
107
108
  type: 'msg'
108
109
  };
109
110
  let content = '';
111
+ this._controller = new AbortController();
110
112
  try {
111
- for await (const chunk of await this._providerRegistry.currentChatModel.stream(messages)) {
113
+ for await (const chunk of await this._providerRegistry.currentChatModel.stream(messages, { signal: this._controller.signal })) {
112
114
  content += (_a = chunk.content) !== null && _a !== void 0 ? _a : chunk;
113
115
  botMsg.body = content;
114
116
  this.messageAdded(botMsg);
@@ -130,6 +132,7 @@ export class ChatHandler extends ChatModel {
130
132
  }
131
133
  finally {
132
134
  this.updateWriters([]);
135
+ this._controller = null;
133
136
  }
134
137
  }
135
138
  async getHistory() {
@@ -141,4 +144,37 @@ export class ChatHandler extends ChatModel {
141
144
  messageAdded(message) {
142
145
  super.messageAdded(message);
143
146
  }
147
+ stopStreaming() {
148
+ var _a;
149
+ (_a = this._controller) === null || _a === void 0 ? void 0 : _a.abort();
150
+ }
144
151
  }
152
+ (function (ChatHandler) {
153
+ class ClearCommandProvider {
154
+ constructor() {
155
+ this.id = '@jupyterlite/ai:clear-commands';
156
+ this._slash_commands = [
157
+ {
158
+ name: '/clear',
159
+ providerId: this.id,
160
+ replaceWith: '/clear',
161
+ description: 'Clear the chat'
162
+ }
163
+ ];
164
+ }
165
+ async getChatCommands(inputModel) {
166
+ var _a, _b;
167
+ const match = (_b = (_a = inputModel.currentWord) === null || _a === void 0 ? void 0 : _a.match(/^\/\w*/)) === null || _b === void 0 ? void 0 : _b[0];
168
+ if (!match) {
169
+ return [];
170
+ }
171
+ const commands = this._slash_commands.filter(cmd => cmd.name.startsWith(match));
172
+ return commands;
173
+ }
174
+ async handleChatCommand(command, inputModel) {
175
+ // no handling needed because `replaceWith` is set in each command.
176
+ return;
177
+ }
178
+ }
179
+ ChatHandler.ClearCommandProvider = ClearCommandProvider;
180
+ })(ChatHandler || (ChatHandler = {}));
@@ -1,5 +1,5 @@
1
1
  import { CompletionHandler, IInlineCompletionContext, IInlineCompletionProvider } from '@jupyterlab/completer';
2
- import { IBaseCompleter } from './llm-models';
2
+ import { IBaseCompleter } from './base-completer';
3
3
  import { IAIProviderRegistry } from './tokens';
4
4
  /**
5
5
  * The generic completion provider to register to the completion provider manager.
@@ -21,7 +21,14 @@ export declare class CompletionProvider implements IInlineCompletionProvider {
21
21
  }
22
22
  export declare namespace CompletionProvider {
23
23
  interface IOptions {
24
+ /**
25
+ * The registry where the completion provider belongs.
26
+ */
24
27
  providerRegistry: IAIProviderRegistry;
28
+ /**
29
+ * The request completion commands, can be useful if a provider needs to request
30
+ * the completion by itself.
31
+ */
25
32
  requestCompletion: () => void;
26
33
  }
27
34
  }
@@ -0,0 +1,19 @@
1
+ /// <reference types="react" />
2
+ import { InputToolbarRegistry } from '@jupyter/chat';
3
+ /**
4
+ * Properties of the stop button.
5
+ */
6
+ export interface IStopButtonProps extends InputToolbarRegistry.IToolbarItemProps {
7
+ /**
8
+ * The function to stop streaming.
9
+ */
10
+ stopStreaming: () => void;
11
+ }
12
+ /**
13
+ * The stop button.
14
+ */
15
+ export declare function StopButton(props: IStopButtonProps): JSX.Element;
16
+ /**
17
+ * factory returning the toolbar item.
18
+ */
19
+ export declare function stopItem(stopStreaming: () => void): InputToolbarRegistry.IToolbarItem;
@@ -0,0 +1,32 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+ import StopIcon from '@mui/icons-material/Stop';
6
+ import React from 'react';
7
+ import { TooltippedButton } from '@jupyter/chat';
8
+ /**
9
+ * The stop button.
10
+ */
11
+ export function StopButton(props) {
12
+ const tooltip = 'Stop streaming';
13
+ return (React.createElement(TooltippedButton, { onClick: props.stopStreaming, tooltip: tooltip, buttonProps: {
14
+ size: 'small',
15
+ variant: 'contained',
16
+ title: tooltip
17
+ } },
18
+ React.createElement(StopIcon, null)));
19
+ }
20
+ /**
21
+ * factory returning the toolbar item.
22
+ */
23
+ export function stopItem(stopStreaming) {
24
+ return {
25
+ element: (props) => {
26
+ const stopProps = { ...props, stopStreaming };
27
+ return StopButton(stopProps);
28
+ },
29
+ position: 50,
30
+ hidden: true /* hidden by default */
31
+ };
32
+ }
@@ -1,6 +1,6 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
2
  import { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
- import { BaseCompleter, IBaseCompleter } from './base-completer';
3
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
4
4
  export declare class AnthropicCompleter implements IBaseCompleter {
5
5
  constructor(options: BaseCompleter.IOptions);
6
6
  get provider(): BaseChatModel;
@@ -1,6 +1,6 @@
1
1
  import { ChatAnthropic } from '@langchain/anthropic';
2
2
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
3
- import { COMPLETION_SYSTEM_PROMPT } from '../provider';
3
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
4
4
  export class AnthropicCompleter {
5
5
  constructor(options) {
6
6
  this._prompt = COMPLETION_SYSTEM_PROMPT;
@@ -1,6 +1,6 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
2
  import { LLM } from '@langchain/core/language_models/llms';
3
- import { BaseCompleter, IBaseCompleter } from './base-completer';
3
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
4
4
  export declare class ChromeCompleter implements IBaseCompleter {
5
5
  constructor(options: BaseCompleter.IOptions);
6
6
  /**
@@ -1,6 +1,6 @@
1
1
  import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
2
2
  import { HumanMessage, SystemMessage } from '@langchain/core/messages';
3
- import { COMPLETION_SYSTEM_PROMPT } from '../provider';
3
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
4
4
  /**
5
5
  * Regular expression to match the '```' string at the start of a string.
6
6
  * So the completions returned by the LLM can still be kept after removing the code block formatting.
@@ -0,0 +1,2 @@
1
+ declare const _default: "\n<i class=\"fas fa-exclamation-triangle\"></i> Support for ChromeAI is still experimental and only available in Google Chrome.\n\nYou can test ChromeAI is enabled in your browser by going to the following URL: <https://chromeai.org/>\n\nEnable the proper flags in Google Chrome.\n\n- chrome://flags/#prompt-api-for-gemini-nano\n - Select: `Enabled`\n- chrome://flags/#optimization-guide-on-device-model\n - Select: `Enabled BypassPrefRequirement`\n- chrome://components\n - Click `Check for Update` on Optimization Guide On Device Model to download the model\n- [Optional] chrome://flags/#text-safety-classifier\n\n<img src=\"https://github.com/user-attachments/assets/d48f46cc-52ee-4ce5-9eaf-c763cdbee04c\" alt=\"A screenshot showing how to enable the ChromeAI flag in Google Chrome\" width=\"500px\">\n\nThen restart Chrome for these changes to take effect.\n\n<i class=\"fas fa-exclamation-triangle\"></i> On first use, Chrome will download the on-device model, which can be as large as 22GB (according to their docs and at the time of writing).\nDuring the download, ChromeAI may not be available via the extension.\n\n<i class=\"fa fa-info-circle\" aria-hidden=\"true\"></i> For more information about Chrome Built-in AI: <https://developer.chrome.com/docs/ai/get-started>\n";
2
+ export default _default;
@@ -0,0 +1,24 @@
1
+ export default `
2
+ <i class="fas fa-exclamation-triangle"></i> Support for ChromeAI is still experimental and only available in Google Chrome.
3
+
4
+ You can test ChromeAI is enabled in your browser by going to the following URL: <https://chromeai.org/>
5
+
6
+ Enable the proper flags in Google Chrome.
7
+
8
+ - chrome://flags/#prompt-api-for-gemini-nano
9
+ - Select: \`Enabled\`
10
+ - chrome://flags/#optimization-guide-on-device-model
11
+ - Select: \`Enabled BypassPrefRequirement\`
12
+ - chrome://components
13
+ - Click \`Check for Update\` on Optimization Guide On Device Model to download the model
14
+ - [Optional] chrome://flags/#text-safety-classifier
15
+
16
+ <img src="https://github.com/user-attachments/assets/d48f46cc-52ee-4ce5-9eaf-c763cdbee04c" alt="A screenshot showing how to enable the ChromeAI flag in Google Chrome" width="500px">
17
+
18
+ Then restart Chrome for these changes to take effect.
19
+
20
+ <i class="fas fa-exclamation-triangle"></i> On first use, Chrome will download the on-device model, which can be as large as 22GB (according to their docs and at the time of writing).
21
+ During the download, ChromeAI may not be available via the extension.
22
+
23
+ <i class="fa fa-info-circle" aria-hidden="true"></i> For more information about Chrome Built-in AI: <https://developer.chrome.com/docs/ai/get-started>
24
+ `;
@@ -1,6 +1,6 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
2
  import { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
- import { BaseCompleter, IBaseCompleter } from './base-completer';
3
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
4
4
  export declare class CodestralCompleter implements IBaseCompleter {
5
5
  constructor(options: BaseCompleter.IOptions);
6
6
  get provider(): BaseChatModel;
@@ -1,7 +1,7 @@
1
1
  import { HumanMessage, SystemMessage } from '@langchain/core/messages';
2
2
  import { ChatMistralAI } from '@langchain/mistralai';
3
3
  import { Throttler } from '@lumino/polling';
4
- import { COMPLETION_SYSTEM_PROMPT } from '../provider';
4
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
5
5
  /**
6
6
  * The Mistral API has a rate limit of 1 request per second
7
7
  */
@@ -0,0 +1,2 @@
1
+ declare const _default: "\n<i class=\"fas fa-exclamation-triangle\"></i> This extension is still very much experimental. It is not an official MistralAI extension.\n\n1. Go to <https://console.mistral.ai/api-keys/> and create an API key.\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/1-api-key.png\" alt=\"Screenshot showing how to create an API key\" width=\"500px\">\n\n2. Open the JupyterLab settings and go to the **Ai providers** section to select the `MistralAI`\n provider and the API key (required).\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png\" alt=\"Screenshot showing how to add the API key to the settings\" width=\"500px\">\n\n3. Open the chat, or use the inline completer\n\n <img src=\"https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png\" alt=\"Screenshot showing how to use the chat\" width=\"500px\">\n";
2
+ export default _default;
@@ -0,0 +1,16 @@
1
+ export default `
2
+ <i class="fas fa-exclamation-triangle"></i> This extension is still very much experimental. It is not an official MistralAI extension.
3
+
4
+ 1. Go to <https://console.mistral.ai/api-keys/> and create an API key.
5
+
6
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/1-api-key.png" alt="Screenshot showing how to create an API key" width="500px">
7
+
8
+ 2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`MistralAI\`
9
+ provider and the API key (required).
10
+
11
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png" alt="Screenshot showing how to add the API key to the settings" width="500px">
12
+
13
+ 3. Open the chat, or use the inline completer
14
+
15
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png" alt="Screenshot showing how to use the chat" width="500px">
16
+ `;
@@ -1,6 +1,6 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
2
  import { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
- import { BaseCompleter, IBaseCompleter } from './base-completer';
3
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
4
4
  export declare class OpenAICompleter implements IBaseCompleter {
5
5
  constructor(options: BaseCompleter.IOptions);
6
6
  get provider(): BaseChatModel;
@@ -1,6 +1,6 @@
1
1
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
2
2
  import { ChatOpenAI } from '@langchain/openai';
3
- import { COMPLETION_SYSTEM_PROMPT } from '../provider';
3
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
4
4
  export class OpenAICompleter {
5
5
  constructor(options) {
6
6
  this._prompt = COMPLETION_SYSTEM_PROMPT;
@@ -0,0 +1,2 @@
1
+ import { JupyterFrontEndPlugin } from '@jupyterlab/application';
2
+ export declare const defaultProviderPlugins: JupyterFrontEndPlugin<void>[];
@@ -0,0 +1,60 @@
1
+ import { ChatAnthropic } from '@langchain/anthropic';
2
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
3
+ import { ChatMistralAI } from '@langchain/mistralai';
4
+ import { ChatOpenAI } from '@langchain/openai';
5
+ import { IAIProviderRegistry } from '../tokens';
6
+ // Import completers
7
+ import { AnthropicCompleter } from './Anthropic/completer';
8
+ import { ChromeCompleter } from './ChromeAI/completer';
9
+ import { CodestralCompleter } from './MistralAI/completer';
10
+ import { OpenAICompleter } from './OpenAI/completer';
11
+ // Import Settings
12
+ import AnthropicSettings from './Anthropic/settings-schema.json';
13
+ import ChromeAISettings from './ChromeAI/settings-schema.json';
14
+ import MistralAISettings from './MistralAI/settings-schema.json';
15
+ import OpenAISettings from './OpenAI/settings-schema.json';
16
+ // Import instructions
17
+ import ChromeAIInstructions from './ChromeAI/instructions';
18
+ import MistralAIInstructions from './MistralAI/instructions';
19
+ // Build the AIProvider list
20
+ const AIProviders = [
21
+ {
22
+ name: 'Anthropic',
23
+ chatModel: ChatAnthropic,
24
+ completer: AnthropicCompleter,
25
+ settingsSchema: AnthropicSettings,
26
+ errorMessage: (error) => error.error.error.message
27
+ },
28
+ {
29
+ name: 'ChromeAI',
30
+ // TODO: fix
31
+ // @ts-expect-error: missing properties
32
+ chatModel: ChromeAI,
33
+ completer: ChromeCompleter,
34
+ instructions: ChromeAIInstructions,
35
+ settingsSchema: ChromeAISettings
36
+ },
37
+ {
38
+ name: 'MistralAI',
39
+ chatModel: ChatMistralAI,
40
+ completer: CodestralCompleter,
41
+ instructions: MistralAIInstructions,
42
+ settingsSchema: MistralAISettings
43
+ },
44
+ {
45
+ name: 'OpenAI',
46
+ chatModel: ChatOpenAI,
47
+ completer: OpenAICompleter,
48
+ settingsSchema: OpenAISettings
49
+ }
50
+ ];
51
+ export const defaultProviderPlugins = AIProviders.map(provider => {
52
+ return {
53
+ id: `@jupyterlite/ai:${provider.name}`,
54
+ autoStart: true,
55
+ requires: [IAIProviderRegistry],
56
+ activate: (app, registry) => {
57
+ registry.add(provider);
58
+ }
59
+ };
60
+ });
package/lib/index.d.ts CHANGED
@@ -1,5 +1,6 @@
1
- import { IAutocompletionRegistry } from '@jupyter/chat';
1
+ import { IChatCommandRegistry } from '@jupyter/chat';
2
2
  import { JupyterFrontEndPlugin } from '@jupyterlab/application';
3
+ import { ISettingConnector } from '@jupyterlab/settingregistry';
3
4
  import { IAIProviderRegistry } from './tokens';
4
- declare const _default: (JupyterFrontEndPlugin<void> | JupyterFrontEndPlugin<IAutocompletionRegistry> | JupyterFrontEndPlugin<IAIProviderRegistry>)[];
5
+ declare const _default: (JupyterFrontEndPlugin<void> | JupyterFrontEndPlugin<IChatCommandRegistry> | JupyterFrontEndPlugin<IAIProviderRegistry> | JupyterFrontEndPlugin<ISettingConnector>)[];
5
6
  export default _default;
package/lib/index.js CHANGED
@@ -1,49 +1,36 @@
1
- import { ActiveCellManager, AutocompletionRegistry, buildChatSidebar, buildErrorWidget, IAutocompletionRegistry } from '@jupyter/chat';
1
+ import { ActiveCellManager, buildChatSidebar, buildErrorWidget, ChatCommandRegistry, IChatCommandRegistry, InputToolbarRegistry } from '@jupyter/chat';
2
2
  import { IThemeManager } from '@jupyterlab/apputils';
3
3
  import { ICompletionProviderManager } from '@jupyterlab/completer';
4
4
  import { INotebookTracker } from '@jupyterlab/notebook';
5
5
  import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
6
- import { ISettingRegistry } from '@jupyterlab/settingregistry';
6
+ import { ISettingConnector, ISettingRegistry } from '@jupyterlab/settingregistry';
7
7
  import { IFormRendererRegistry } from '@jupyterlab/ui-components';
8
+ import { ISecretsManager } from 'jupyter-secrets-manager';
8
9
  import { ChatHandler } from './chat-handler';
9
10
  import { CompletionProvider } from './completion-provider';
10
- import { AIProviders } from './llm-models';
11
+ import { defaultProviderPlugins } from './default-providers';
11
12
  import { AIProviderRegistry } from './provider';
12
- import { aiSettingsRenderer } from './settings/panel';
13
- import { renderSlashCommandOption } from './slash-commands';
13
+ import { aiSettingsRenderer, SettingConnector } from './settings';
14
14
  import { IAIProviderRegistry } from './tokens';
15
- const autocompletionRegistryPlugin = {
15
+ import { stopItem } from './components/stop-button';
16
+ const chatCommandRegistryPlugin = {
16
17
  id: '@jupyterlite/ai:autocompletion-registry',
17
18
  description: 'Autocompletion registry',
18
19
  autoStart: true,
19
- provides: IAutocompletionRegistry,
20
+ provides: IChatCommandRegistry,
20
21
  activate: () => {
21
- const autocompletionRegistry = new AutocompletionRegistry();
22
- const options = ['/clear'];
23
- const autocompletionCommands = {
24
- opener: '/',
25
- commands: options.map(option => {
26
- return {
27
- id: option.slice(1),
28
- label: option,
29
- description: 'Clear the chat window'
30
- };
31
- }),
32
- props: {
33
- renderOption: renderSlashCommandOption
34
- }
35
- };
36
- autocompletionRegistry.add('jupyterlite-ai', autocompletionCommands);
37
- return autocompletionRegistry;
22
+ const registry = new ChatCommandRegistry();
23
+ registry.addProvider(new ChatHandler.ClearCommandProvider());
24
+ return registry;
38
25
  }
39
26
  };
40
27
  const chatPlugin = {
41
28
  id: '@jupyterlite/ai:chat',
42
29
  description: 'LLM chat extension',
43
30
  autoStart: true,
44
- requires: [IAIProviderRegistry, IRenderMimeRegistry, IAutocompletionRegistry],
31
+ requires: [IAIProviderRegistry, IRenderMimeRegistry, IChatCommandRegistry],
45
32
  optional: [INotebookTracker, ISettingRegistry, IThemeManager],
46
- activate: async (app, providerRegistry, rmRegistry, autocompletionRegistry, notebookTracker, settingsRegistry, themeManager) => {
33
+ activate: async (app, providerRegistry, rmRegistry, chatCommandRegistry, notebookTracker, settingsRegistry, themeManager) => {
47
34
  let activeCellManager = null;
48
35
  if (notebookTracker) {
49
36
  activeCellManager = new ActiveCellManager({
@@ -80,12 +67,26 @@ const chatPlugin = {
80
67
  console.error(`Something went wrong when reading the settings.\n${reason}`);
81
68
  });
82
69
  let chatWidget = null;
70
+ const inputToolbarRegistry = InputToolbarRegistry.defaultToolbarRegistry();
71
+ const stopButton = stopItem(() => chatHandler.stopStreaming());
72
+ inputToolbarRegistry.addItem('stop', stopButton);
73
+ chatHandler.writersChanged.connect((_, users) => {
74
+ if (users.filter(user => user.username === chatHandler.personaName).length) {
75
+ inputToolbarRegistry.hide('send');
76
+ inputToolbarRegistry.show('stop');
77
+ }
78
+ else {
79
+ inputToolbarRegistry.hide('stop');
80
+ inputToolbarRegistry.show('send');
81
+ }
82
+ });
83
83
  try {
84
84
  chatWidget = buildChatSidebar({
85
85
  model: chatHandler,
86
86
  themeManager,
87
87
  rmRegistry,
88
- autocompletionRegistry
88
+ chatCommandRegistry,
89
+ inputToolbarRegistry
89
90
  });
90
91
  chatWidget.title.caption = 'Jupyterlite AI Chat';
91
92
  }
@@ -112,11 +113,16 @@ const providerRegistryPlugin = {
112
113
  id: '@jupyterlite/ai:provider-registry',
113
114
  autoStart: true,
114
115
  requires: [IFormRendererRegistry, ISettingRegistry],
115
- optional: [IRenderMimeRegistry],
116
+ optional: [IRenderMimeRegistry, ISecretsManager, ISettingConnector],
116
117
  provides: IAIProviderRegistry,
117
- activate: (app, editorRegistry, settingRegistry, rmRegistry) => {
118
- const providerRegistry = new AIProviderRegistry();
119
- editorRegistry.addRenderer('@jupyterlite/ai:provider-registry.AIprovider', aiSettingsRenderer({ providerRegistry, rmRegistry }));
118
+ activate: (app, editorRegistry, settingRegistry, rmRegistry, secretsManager, settingConnector) => {
119
+ const providerRegistry = new AIProviderRegistry({ secretsManager });
120
+ editorRegistry.addRenderer('@jupyterlite/ai:provider-registry.AIprovider', aiSettingsRenderer({
121
+ providerRegistry,
122
+ rmRegistry,
123
+ secretsManager,
124
+ settingConnector
125
+ }));
120
126
  settingRegistry
121
127
  .load(providerRegistryPlugin.id)
122
128
  .then(settings => {
@@ -126,7 +132,10 @@ const providerRegistryPlugin = {
126
132
  const providerSettings = ((_a = settings.get('AIprovider').composite) !== null && _a !== void 0 ? _a : {
127
133
  provider: 'None'
128
134
  });
129
- providerRegistry.setProvider(providerSettings.provider, providerSettings);
135
+ providerRegistry.setProvider({
136
+ name: providerSettings.provider,
137
+ settings: providerSettings
138
+ });
130
139
  };
131
140
  settings.changed.connect(() => updateProvider());
132
141
  updateProvider();
@@ -134,14 +143,26 @@ const providerRegistryPlugin = {
134
143
  .catch(reason => {
135
144
  console.error(`Failed to load settings for ${providerRegistryPlugin.id}`, reason);
136
145
  });
137
- // Initialize the registry with the default providers
138
- AIProviders.forEach(provider => providerRegistry.add(provider));
139
146
  return providerRegistry;
140
147
  }
141
148
  };
149
+ /**
150
+ * Provides the settings connector as a separate plugin to allow for alternative
151
+ * implementations that may want to fetch settings from a different source or
152
+ * endpoint.
153
+ */
154
+ const settingsConnector = {
155
+ id: '@jupyterlite/ai:settings-connector',
156
+ description: 'Provides a settings connector which does not save passwords.',
157
+ autoStart: true,
158
+ provides: ISettingConnector,
159
+ activate: (app) => new SettingConnector(app.serviceManager.settings)
160
+ };
142
161
  export default [
143
162
  providerRegistryPlugin,
144
- autocompletionRegistryPlugin,
163
+ chatCommandRegistryPlugin,
145
164
  chatPlugin,
146
- completerPlugin
165
+ completerPlugin,
166
+ settingsConnector,
167
+ ...defaultProviderPlugins
147
168
  ];
package/lib/provider.d.ts CHANGED
@@ -1,14 +1,18 @@
1
- import { ICompletionProviderManager } from '@jupyterlab/completer';
2
1
  import { BaseLanguageModel } from '@langchain/core/language_models/base';
3
2
  import { BaseChatModel } from '@langchain/core/language_models/chat_models';
4
3
  import { ISignal } from '@lumino/signaling';
5
4
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
6
- import { IBaseCompleter } from './llm-models';
7
- import { IAIProvider, IAIProviderRegistry } from './tokens';
8
5
  import { JSONSchema7 } from 'json-schema';
6
+ import { ISecretsManager } from 'jupyter-secrets-manager';
7
+ import { IBaseCompleter } from './base-completer';
8
+ import { IAIProvider, IAIProviderRegistry, ISetProviderOptions } from './tokens';
9
9
  export declare const chatSystemPrompt: (options: AIProviderRegistry.IPromptOptions) => string;
10
10
  export declare const COMPLETION_SYSTEM_PROMPT = "\nYou are an application built to provide helpful code completion suggestions.\nYou should only produce code. Keep comments to minimum, use the\nprogramming language comment syntax. Produce clean code.\nThe code is written in JupyterLab, a data analysis and code development\nenvironment which can execute code extended with additional syntax for\ninteractive features, such as magics.\nOnly give raw strings back, do not format the response using backticks.\nThe output should be a single string, and should correspond to what a human users\nwould write.\nDo not include the prompt in the output, only the string that should be appended to the current input.\n";
11
11
  export declare class AIProviderRegistry implements IAIProviderRegistry {
12
+ /**
13
+ * The constructor of the provider registry.
14
+ */
15
+ constructor(options: AIProviderRegistry.IOptions);
12
16
  /**
13
17
  * Get the list of provider names.
14
18
  */
@@ -53,14 +57,14 @@ export declare class AIProviderRegistry implements IAIProviderRegistry {
53
57
  * Set the providers (chat model and completer).
54
58
  * Creates the providers if the name has changed, otherwise only updates their config.
55
59
  *
56
- * @param name - the name of the provider to use.
57
- * @param settings - the settings for the models.
60
+ * @param options - An object with the name and the settings of the provider to use.
58
61
  */
59
- setProvider(name: string, settings: ReadonlyPartialJSONObject): void;
62
+ setProvider(options: ISetProviderOptions): Promise<void>;
60
63
  /**
61
64
  * A signal emitting when the provider or its settings has changed.
62
65
  */
63
66
  get providerChanged(): ISignal<IAIProviderRegistry, void>;
67
+ private _secretsManager;
64
68
  private _currentProvider;
65
69
  private _completer;
66
70
  private _chatModel;
@@ -69,6 +73,7 @@ export declare class AIProviderRegistry implements IAIProviderRegistry {
69
73
  private _chatError;
70
74
  private _completerError;
71
75
  private _providers;
76
+ private _deferredProvider;
72
77
  }
73
78
  export declare namespace AIProviderRegistry {
74
79
  /**
@@ -76,13 +81,9 @@ export declare namespace AIProviderRegistry {
76
81
  */
77
82
  interface IOptions {
78
83
  /**
79
- * The completion provider manager in which register the LLM completer.
80
- */
81
- completionProviderManager: ICompletionProviderManager;
82
- /**
83
- * The application commands registry.
84
+ * The secrets manager used in the application.
84
85
  */
85
- requestCompletion: () => void;
86
+ secretsManager?: ISecretsManager;
86
87
  }
87
88
  /**
88
89
  * The options for the Chat system prompt.