@jupyterlite/ai 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +48 -9
  2. package/lib/chat-handler.d.ts +15 -3
  3. package/lib/chat-handler.js +80 -28
  4. package/lib/completion-provider.d.ts +5 -18
  5. package/lib/completion-provider.js +8 -34
  6. package/lib/icons.d.ts +2 -0
  7. package/lib/icons.js +15 -0
  8. package/lib/index.d.ts +3 -2
  9. package/lib/index.js +79 -22
  10. package/lib/llm-models/anthropic-completer.d.ts +19 -0
  11. package/lib/llm-models/anthropic-completer.js +57 -0
  12. package/lib/llm-models/base-completer.d.ts +6 -2
  13. package/lib/llm-models/chrome-completer.d.ts +19 -0
  14. package/lib/llm-models/chrome-completer.js +67 -0
  15. package/lib/llm-models/codestral-completer.d.ts +9 -8
  16. package/lib/llm-models/codestral-completer.js +37 -54
  17. package/lib/llm-models/index.d.ts +3 -2
  18. package/lib/llm-models/index.js +42 -2
  19. package/lib/llm-models/openai-completer.d.ts +19 -0
  20. package/lib/llm-models/openai-completer.js +51 -0
  21. package/lib/provider.d.ts +54 -15
  22. package/lib/provider.js +123 -41
  23. package/lib/settings/instructions.d.ts +2 -0
  24. package/lib/settings/instructions.js +44 -0
  25. package/lib/settings/panel.d.ts +70 -0
  26. package/lib/settings/panel.js +190 -0
  27. package/lib/settings/schemas/_generated/Anthropic.json +70 -0
  28. package/lib/settings/schemas/_generated/ChromeAI.json +21 -0
  29. package/lib/settings/schemas/_generated/MistralAI.json +75 -0
  30. package/lib/settings/schemas/_generated/OpenAI.json +668 -0
  31. package/lib/settings/schemas/base.json +7 -0
  32. package/lib/settings/schemas/index.d.ts +3 -0
  33. package/lib/settings/schemas/index.js +11 -0
  34. package/lib/slash-commands.d.ts +16 -0
  35. package/lib/slash-commands.js +25 -0
  36. package/lib/tokens.d.ts +103 -0
  37. package/lib/tokens.js +5 -0
  38. package/package.json +27 -104
  39. package/schema/chat.json +8 -0
  40. package/schema/provider-registry.json +17 -0
  41. package/src/chat-handler.ts +103 -43
  42. package/src/completion-provider.ts +13 -37
  43. package/src/icons.ts +18 -0
  44. package/src/index.ts +101 -24
  45. package/src/llm-models/anthropic-completer.ts +75 -0
  46. package/src/llm-models/base-completer.ts +7 -2
  47. package/src/llm-models/chrome-completer.ts +88 -0
  48. package/src/llm-models/codestral-completer.ts +43 -69
  49. package/src/llm-models/index.ts +49 -2
  50. package/src/llm-models/openai-completer.ts +67 -0
  51. package/src/llm-models/svg.d.ts +9 -0
  52. package/src/provider.ts +138 -43
  53. package/src/settings/instructions.ts +48 -0
  54. package/src/settings/panel.tsx +257 -0
  55. package/src/settings/schemas/index.ts +15 -0
  56. package/src/slash-commands.tsx +55 -0
  57. package/src/tokens.ts +112 -0
  58. package/style/base.css +4 -0
  59. package/style/icons/jupyternaut-lite.svg +7 -0
  60. package/lib/llm-models/utils.d.ts +0 -15
  61. package/lib/llm-models/utils.js +0 -29
  62. package/lib/token.d.ts +0 -13
  63. package/lib/token.js +0 -2
  64. package/schema/ai-provider.json +0 -21
  65. package/src/llm-models/utils.ts +0 -41
  66. package/src/token.ts +0 -19
@@ -1,11 +1,15 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { LLM } from '@langchain/core/language_models/llms';
2
+ import { BaseLanguageModel } from '@langchain/core/language_models/base';
3
3
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
4
4
  export interface IBaseCompleter {
5
5
  /**
6
6
  * The LLM completer.
7
7
  */
8
- provider: LLM;
8
+ provider: BaseLanguageModel;
9
+ /**
10
+ * The completion prompt.
11
+ */
12
+ prompt: string;
9
13
  /**
10
14
  * The function to fetch a new completion.
11
15
  */
@@ -0,0 +1,19 @@
1
+ import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
+ import { LLM } from '@langchain/core/language_models/llms';
3
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
4
+ export declare class ChromeCompleter implements IBaseCompleter {
5
+ constructor(options: BaseCompleter.IOptions);
6
+ /**
7
+ * Getter and setter for the initial prompt.
8
+ */
9
+ get prompt(): string;
10
+ set prompt(value: string);
11
+ get provider(): LLM;
12
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
13
+ items: {
14
+ insertText: string;
15
+ }[];
16
+ }>;
17
+ private _chromeProvider;
18
+ private _prompt;
19
+ }
@@ -0,0 +1,67 @@
1
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
2
+ import { HumanMessage, SystemMessage } from '@langchain/core/messages';
3
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
4
+ /**
5
+ * Regular expression to match the '```' string at the start of a string.
6
+ * So the completions returned by the LLM can still be kept after removing the code block formatting.
7
+ *
8
+ * For example, if the response contains the following content after typing `import pandas`:
9
+ *
10
+ * ```python
11
+ * as pd
12
+ * ```
13
+ *
14
+ * The formatting string after removing the code block delimiters will be:
15
+ *
16
+ * as pd
17
+ */
18
+ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
19
+ /**
20
+ * Regular expression to match the '```' string at the end of a string.
21
+ */
22
+ const CODE_BLOCK_END_REGEX = /```$/;
23
+ export class ChromeCompleter {
24
+ constructor(options) {
25
+ this._prompt = COMPLETION_SYSTEM_PROMPT;
26
+ this._chromeProvider = new ChromeAI({ ...options.settings });
27
+ }
28
+ /**
29
+ * Getter and setter for the initial prompt.
30
+ */
31
+ get prompt() {
32
+ return this._prompt;
33
+ }
34
+ set prompt(value) {
35
+ this._prompt = value;
36
+ }
37
+ get provider() {
38
+ return this._chromeProvider;
39
+ }
40
+ async fetch(request, context) {
41
+ const { text, offset: cursorOffset } = request;
42
+ const prompt = text.slice(0, cursorOffset);
43
+ const trimmedPrompt = prompt.trim();
44
+ const messages = [
45
+ new SystemMessage(this._prompt),
46
+ new HumanMessage(trimmedPrompt)
47
+ ];
48
+ try {
49
+ let response = await this._chromeProvider.invoke(messages);
50
+ // ChromeAI sometimes returns a string starting with '```',
51
+ // so process the response to remove the code block delimiters
52
+ if (CODE_BLOCK_START_REGEX.test(response)) {
53
+ response = response
54
+ .replace(CODE_BLOCK_START_REGEX, '')
55
+ .replace(CODE_BLOCK_END_REGEX, '');
56
+ }
57
+ const items = [{ insertText: response }];
58
+ return {
59
+ items
60
+ };
61
+ }
62
+ catch (error) {
63
+ console.error('Error fetching completion:', error);
64
+ return { items: [] };
65
+ }
66
+ }
67
+ }
@@ -1,15 +1,16 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { LLM } from '@langchain/core/language_models/llms';
2
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
3
  import { BaseCompleter, IBaseCompleter } from './base-completer';
4
4
  export declare class CodestralCompleter implements IBaseCompleter {
5
5
  constructor(options: BaseCompleter.IOptions);
6
- get provider(): LLM;
7
- set requestCompletion(value: () => void);
8
- fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
9
- items: any;
10
- }>;
11
- private _requestCompletion?;
6
+ get provider(): BaseChatModel;
7
+ /**
8
+ * Getter and setter for the initial prompt.
9
+ */
10
+ get prompt(): string;
11
+ set prompt(value: string);
12
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
12
13
  private _throttler;
13
14
  private _mistralProvider;
14
- private _currentData;
15
+ private _prompt;
15
16
  }
@@ -1,75 +1,58 @@
1
- import { MistralAI } from '@langchain/mistralai';
1
+ import { HumanMessage, SystemMessage } from '@langchain/core/messages';
2
+ import { ChatMistralAI } from '@langchain/mistralai';
2
3
  import { Throttler } from '@lumino/polling';
4
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
3
5
  /**
4
6
  * The Mistral API has a rate limit of 1 request per second
5
7
  */
6
8
  const INTERVAL = 1000;
7
- /**
8
- * Timeout to avoid endless requests
9
- */
10
- const REQUEST_TIMEOUT = 3000;
11
9
  export class CodestralCompleter {
12
10
  constructor(options) {
13
- this._currentData = null;
14
- // this._requestCompletion = options.requestCompletion;
15
- this._mistralProvider = new MistralAI({ ...options.settings });
16
- this._throttler = new Throttler(async (data) => {
17
- var _a;
18
- const invokedData = data;
19
- // Request completion.
20
- const request = this._mistralProvider.completionWithRetry(data, {}, false);
21
- const timeoutPromise = new Promise(resolve => {
22
- return setTimeout(() => resolve(null), REQUEST_TIMEOUT);
23
- });
24
- // Fetch again if the request is too long or if the prompt has changed.
25
- const response = await Promise.race([request, timeoutPromise]);
26
- if (response === null ||
27
- invokedData.prompt !== ((_a = this._currentData) === null || _a === void 0 ? void 0 : _a.prompt)) {
28
- return {
29
- items: [],
30
- fetchAgain: true
31
- };
32
- }
11
+ this._prompt = COMPLETION_SYSTEM_PROMPT;
12
+ this._mistralProvider = new ChatMistralAI({ ...options.settings });
13
+ this._throttler = new Throttler(async (messages) => {
14
+ const response = await this._mistralProvider.invoke(messages);
33
15
  // Extract results of completion request.
34
- const items = response.choices.map((choice) => {
35
- return { insertText: choice.message.content };
36
- });
37
- return {
38
- items
39
- };
16
+ const items = [];
17
+ if (typeof response.content === 'string') {
18
+ items.push({
19
+ insertText: response.content
20
+ });
21
+ }
22
+ else {
23
+ response.content.forEach(content => {
24
+ if (content.type !== 'text') {
25
+ return;
26
+ }
27
+ items.push({
28
+ insertText: content.text
29
+ });
30
+ });
31
+ }
32
+ return { items };
40
33
  }, { limit: INTERVAL });
41
34
  }
42
35
  get provider() {
43
36
  return this._mistralProvider;
44
37
  }
45
- set requestCompletion(value) {
46
- this._requestCompletion = value;
38
+ /**
39
+ * Getter and setter for the initial prompt.
40
+ */
41
+ get prompt() {
42
+ return this._prompt;
43
+ }
44
+ set prompt(value) {
45
+ this._prompt = value;
47
46
  }
48
47
  async fetch(request, context) {
49
48
  const { text, offset: cursorOffset } = request;
50
49
  const prompt = text.slice(0, cursorOffset);
51
- const suffix = text.slice(cursorOffset);
52
- const data = {
53
- prompt,
54
- suffix,
55
- model: this._mistralProvider.model,
56
- // temperature: 0,
57
- // top_p: 1,
58
- // max_tokens: 1024,
59
- // min_tokens: 0,
60
- stream: false,
61
- // random_seed: 1337,
62
- stop: []
63
- };
50
+ const messages = [
51
+ new SystemMessage(this._prompt),
52
+ new HumanMessage(prompt)
53
+ ];
64
54
  try {
65
- this._currentData = data;
66
- const completionResult = await this._throttler.invoke(data);
67
- if (completionResult.fetchAgain) {
68
- if (this._requestCompletion) {
69
- this._requestCompletion();
70
- }
71
- }
72
- return { items: completionResult.items };
55
+ return await this._throttler.invoke(messages);
73
56
  }
74
57
  catch (error) {
75
58
  console.error('Error fetching completions', error);
@@ -1,3 +1,4 @@
1
+ import { IAIProvider } from '../tokens';
1
2
  export * from './base-completer';
2
- export * from './codestral-completer';
3
- export * from './utils';
3
+ declare const AIProviders: IAIProvider[];
4
+ export { AIProviders };
@@ -1,3 +1,43 @@
1
+ import { ChatAnthropic } from '@langchain/anthropic';
2
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
3
+ import { ChatMistralAI } from '@langchain/mistralai';
4
+ import { ChatOpenAI } from '@langchain/openai';
5
+ import { AnthropicCompleter } from './anthropic-completer';
6
+ import { CodestralCompleter } from './codestral-completer';
7
+ import { ChromeCompleter } from './chrome-completer';
8
+ import { OpenAICompleter } from './openai-completer';
9
+ import { instructions } from '../settings/instructions';
10
+ import { ProviderSettings } from '../settings/schemas';
1
11
  export * from './base-completer';
2
- export * from './codestral-completer';
3
- export * from './utils';
12
+ const AIProviders = [
13
+ {
14
+ name: 'Anthropic',
15
+ chatModel: ChatAnthropic,
16
+ completer: AnthropicCompleter,
17
+ settingsSchema: ProviderSettings.Anthropic,
18
+ errorMessage: (error) => error.error.error.message
19
+ },
20
+ {
21
+ name: 'ChromeAI',
22
+ // TODO: fix
23
+ // @ts-expect-error: missing properties
24
+ chatModel: ChromeAI,
25
+ completer: ChromeCompleter,
26
+ instructions: instructions.ChromeAI,
27
+ settingsSchema: ProviderSettings.ChromeAI
28
+ },
29
+ {
30
+ name: 'MistralAI',
31
+ chatModel: ChatMistralAI,
32
+ completer: CodestralCompleter,
33
+ instructions: instructions.MistralAI,
34
+ settingsSchema: ProviderSettings.MistralAI
35
+ },
36
+ {
37
+ name: 'OpenAI',
38
+ chatModel: ChatOpenAI,
39
+ completer: OpenAICompleter,
40
+ settingsSchema: ProviderSettings.OpenAI
41
+ }
42
+ ];
43
+ export { AIProviders };
@@ -0,0 +1,19 @@
1
+ import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
4
+ export declare class OpenAICompleter implements IBaseCompleter {
5
+ constructor(options: BaseCompleter.IOptions);
6
+ get provider(): BaseChatModel;
7
+ /**
8
+ * Getter and setter for the initial prompt.
9
+ */
10
+ get prompt(): string;
11
+ set prompt(value: string);
12
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
13
+ items: {
14
+ insertText: string;
15
+ }[];
16
+ }>;
17
+ private _openAIProvider;
18
+ private _prompt;
19
+ }
@@ -0,0 +1,51 @@
1
+ import { AIMessage, SystemMessage } from '@langchain/core/messages';
2
+ import { ChatOpenAI } from '@langchain/openai';
3
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
4
+ export class OpenAICompleter {
5
+ constructor(options) {
6
+ this._prompt = COMPLETION_SYSTEM_PROMPT;
7
+ this._openAIProvider = new ChatOpenAI({ ...options.settings });
8
+ }
9
+ get provider() {
10
+ return this._openAIProvider;
11
+ }
12
+ /**
13
+ * Getter and setter for the initial prompt.
14
+ */
15
+ get prompt() {
16
+ return this._prompt;
17
+ }
18
+ set prompt(value) {
19
+ this._prompt = value;
20
+ }
21
+ async fetch(request, context) {
22
+ const { text, offset: cursorOffset } = request;
23
+ const prompt = text.slice(0, cursorOffset);
24
+ const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)];
25
+ try {
26
+ const response = await this._openAIProvider.invoke(messages);
27
+ const items = [];
28
+ if (typeof response.content === 'string') {
29
+ items.push({
30
+ insertText: response.content
31
+ });
32
+ }
33
+ else {
34
+ response.content.forEach(content => {
35
+ if (content.type !== 'text') {
36
+ return;
37
+ }
38
+ items.push({
39
+ insertText: content.text,
40
+ filterText: prompt.substring(prompt.length)
41
+ });
42
+ });
43
+ }
44
+ return { items };
45
+ }
46
+ catch (error) {
47
+ console.error('Error fetching completions', error);
48
+ return { items: [] };
49
+ }
50
+ }
51
+ }
package/lib/provider.d.ts CHANGED
@@ -4,18 +4,43 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
4
4
  import { ISignal } from '@lumino/signaling';
5
5
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
6
6
  import { IBaseCompleter } from './llm-models';
7
- import { IAIProvider } from './token';
8
- export declare class AIProvider implements IAIProvider {
9
- constructor(options: AIProvider.IOptions);
10
- get name(): string;
7
+ import { IAIProvider, IAIProviderRegistry } from './tokens';
8
+ import { JSONSchema7 } from 'json-schema';
9
+ export declare const chatSystemPrompt: (options: AIProviderRegistry.IPromptOptions) => string;
10
+ export declare const COMPLETION_SYSTEM_PROMPT = "\nYou are an application built to provide helpful code completion suggestions.\nYou should only produce code. Keep comments to minimum, use the\nprogramming language comment syntax. Produce clean code.\nThe code is written in JupyterLab, a data analysis and code development\nenvironment which can execute code extended with additional syntax for\ninteractive features, such as magics.\nOnly give raw strings back, do not format the response using backticks.\nThe output should be a single string, and should correspond to what a human users\nwould write.\nDo not include the prompt in the output, only the string that should be appended to the current input.\n";
11
+ export declare class AIProviderRegistry implements IAIProviderRegistry {
12
+ /**
13
+ * Get the list of provider names.
14
+ */
15
+ get providers(): string[];
16
+ /**
17
+ * Add a new provider.
18
+ */
19
+ add(provider: IAIProvider): void;
20
+ /**
21
+ * Get the current provider name.
22
+ */
23
+ get currentName(): string;
11
24
  /**
12
25
  * Get the current completer of the completion provider.
13
26
  */
14
- get completer(): IBaseCompleter | null;
27
+ get currentCompleter(): IBaseCompleter | null;
15
28
  /**
16
29
  * Get the current llm chat model.
17
30
  */
18
- get chatModel(): BaseChatModel | null;
31
+ get currentChatModel(): BaseChatModel | null;
32
+ /**
33
+ * Get the settings schema of a given provider.
34
+ */
35
+ getSettingsSchema(provider: string): JSONSchema7;
36
+ /**
37
+ * Get the instructions of a given provider.
38
+ */
39
+ getInstructions(provider: string): string | undefined;
40
+ /**
41
+ * Format an error message from the current provider.
42
+ */
43
+ formatErrorMessage(error: any): string;
19
44
  /**
20
45
  * Get the current chat error;
21
46
  */
@@ -25,22 +50,27 @@ export declare class AIProvider implements IAIProvider {
25
50
  */
26
51
  get completerError(): string;
27
52
  /**
28
- * Set the models (chat model and completer).
29
- * Creates the models if the name has changed, otherwise only updates their config.
53
+ * Set the providers (chat model and completer).
54
+ * Creates the providers if the name has changed, otherwise only updates their config.
30
55
  *
31
- * @param name - the name of the model to use.
56
+ * @param name - the name of the provider to use.
32
57
  * @param settings - the settings for the models.
33
58
  */
34
- setModels(name: string, settings: ReadonlyPartialJSONObject): void;
35
- get modelChange(): ISignal<IAIProvider, void>;
36
- private _completionProvider;
37
- private _llmChatModel;
59
+ setProvider(name: string, settings: ReadonlyPartialJSONObject): void;
60
+ /**
61
+ * A signal emitting when the provider or its settings has changed.
62
+ */
63
+ get providerChanged(): ISignal<IAIProviderRegistry, void>;
64
+ private _currentProvider;
65
+ private _completer;
66
+ private _chatModel;
38
67
  private _name;
39
- private _modelChange;
68
+ private _providerChanged;
40
69
  private _chatError;
41
70
  private _completerError;
71
+ private _providers;
42
72
  }
43
- export declare namespace AIProvider {
73
+ export declare namespace AIProviderRegistry {
44
74
  /**
45
75
  * The options for the LLM provider.
46
76
  */
@@ -54,6 +84,15 @@ export declare namespace AIProvider {
54
84
  */
55
85
  requestCompletion: () => void;
56
86
  }
87
+ /**
88
+ * The options for the Chat system prompt.
89
+ */
90
+ interface IPromptOptions {
91
+ /**
92
+ * The provider name.
93
+ */
94
+ provider_name: string;
95
+ }
57
96
  /**
58
97
  * This function indicates whether a key is writable in an object.
59
98
  * https://stackoverflow.com/questions/54724875/can-we-check-whether-property-is-readonly-in-typescript