@jupyterlite/ai 0.7.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/lib/base-completer.d.ts +23 -1
  2. package/lib/base-completer.js +14 -1
  3. package/lib/chat-handler.d.ts +4 -6
  4. package/lib/chat-handler.js +22 -22
  5. package/lib/completion-provider.js +1 -1
  6. package/lib/default-prompts.d.ts +2 -0
  7. package/lib/default-prompts.js +31 -0
  8. package/lib/default-providers/Anthropic/completer.d.ts +4 -9
  9. package/lib/default-providers/Anthropic/completer.js +4 -13
  10. package/lib/default-providers/ChromeAI/completer.d.ts +4 -9
  11. package/lib/default-providers/ChromeAI/completer.js +4 -13
  12. package/lib/default-providers/ChromeAI/settings-schema.json +0 -3
  13. package/lib/default-providers/Gemini/completer.d.ts +12 -0
  14. package/lib/default-providers/Gemini/completer.js +48 -0
  15. package/lib/default-providers/Gemini/instructions.d.ts +2 -0
  16. package/lib/default-providers/Gemini/instructions.js +9 -0
  17. package/lib/default-providers/Gemini/settings-schema.json +64 -0
  18. package/lib/default-providers/MistralAI/completer.d.ts +10 -11
  19. package/lib/default-providers/MistralAI/completer.js +41 -50
  20. package/lib/default-providers/MistralAI/instructions.d.ts +1 -1
  21. package/lib/default-providers/MistralAI/instructions.js +2 -0
  22. package/lib/default-providers/Ollama/completer.d.ts +4 -9
  23. package/lib/default-providers/Ollama/completer.js +7 -13
  24. package/lib/default-providers/Ollama/settings-schema.json +1 -4
  25. package/lib/default-providers/OpenAI/completer.d.ts +4 -9
  26. package/lib/default-providers/OpenAI/completer.js +7 -13
  27. package/lib/default-providers/OpenAI/settings-schema.json +88 -128
  28. package/lib/default-providers/WebLLM/completer.d.ts +3 -9
  29. package/lib/default-providers/WebLLM/completer.js +4 -13
  30. package/lib/default-providers/WebLLM/settings-schema.json +1 -3
  31. package/lib/default-providers/index.js +23 -19
  32. package/lib/index.d.ts +1 -0
  33. package/lib/index.js +68 -14
  34. package/lib/provider.d.ts +39 -11
  35. package/lib/provider.js +166 -81
  36. package/lib/settings/index.d.ts +1 -0
  37. package/lib/settings/index.js +1 -0
  38. package/lib/settings/panel.d.ts +116 -8
  39. package/lib/settings/panel.js +117 -22
  40. package/lib/settings/textarea.d.ts +2 -0
  41. package/lib/settings/textarea.js +18 -0
  42. package/lib/tokens.d.ts +24 -20
  43. package/lib/tokens.js +2 -1
  44. package/package.json +10 -9
  45. package/schema/chat.json +1 -1
  46. package/schema/provider-registry.json +11 -5
  47. package/schema/system-prompts.json +22 -0
  48. package/src/base-completer.ts +39 -1
  49. package/src/chat-handler.ts +23 -25
  50. package/src/completion-provider.ts +1 -1
  51. package/src/default-prompts.ts +33 -0
  52. package/src/default-providers/Anthropic/completer.ts +5 -16
  53. package/src/default-providers/ChromeAI/completer.ts +5 -16
  54. package/src/default-providers/Gemini/completer.ts +61 -0
  55. package/src/default-providers/Gemini/instructions.ts +9 -0
  56. package/src/default-providers/MistralAI/completer.ts +47 -60
  57. package/src/default-providers/MistralAI/instructions.ts +2 -0
  58. package/src/default-providers/Ollama/completer.ts +8 -16
  59. package/src/default-providers/OpenAI/completer.ts +8 -16
  60. package/src/default-providers/WebLLM/completer.ts +5 -16
  61. package/src/default-providers/index.ts +23 -20
  62. package/src/index.ts +95 -15
  63. package/src/provider.ts +198 -94
  64. package/src/settings/index.ts +1 -0
  65. package/src/settings/panel.tsx +262 -34
  66. package/src/settings/textarea.tsx +33 -0
  67. package/src/tokens.ts +26 -22
  68. package/style/base.css +29 -0
@@ -1,10 +1,12 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
+ import { BaseLanguageModel } from '@langchain/core/language_models/base';
2
3
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
4
+ import { IAIProviderRegistry } from './tokens';
3
5
  export interface IBaseCompleter {
4
6
  /**
5
7
  * The completion prompt.
6
8
  */
7
- prompt: string;
9
+ readonly systemPrompt: string;
8
10
  /**
9
11
  * The function to fetch a new completion.
10
12
  */
@@ -14,6 +16,19 @@ export interface IBaseCompleter {
14
16
  */
15
17
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
16
18
  }
19
+ export declare abstract class BaseCompleter implements IBaseCompleter {
20
+ constructor(options: BaseCompleter.IOptions);
21
+ /**
22
+ * Get the system prompt for the completion.
23
+ */
24
+ get systemPrompt(): string;
25
+ /**
26
+ * The fetch request for the LLM completer.
27
+ */
28
+ abstract fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
29
+ protected _providerRegistry: IAIProviderRegistry;
30
+ protected abstract _completer: BaseLanguageModel<any, any>;
31
+ }
17
32
  /**
18
33
  * The namespace for the base completer.
19
34
  */
@@ -22,6 +37,13 @@ export declare namespace BaseCompleter {
22
37
  * The options for the constructor of a completer.
23
38
  */
24
39
  interface IOptions {
40
+ /**
41
+ * The provider registry.
42
+ */
43
+ providerRegistry: IAIProviderRegistry;
44
+ /**
45
+ * The settings of the provider.
46
+ */
25
47
  settings: ReadonlyPartialJSONObject;
26
48
  }
27
49
  }
@@ -1 +1,14 @@
1
- export {};
1
+ import { DEFAULT_COMPLETION_SYSTEM_PROMPT } from './default-prompts';
2
+ export class BaseCompleter {
3
+ constructor(options) {
4
+ this._providerRegistry = options.providerRegistry;
5
+ }
6
+ /**
7
+ * Get the system prompt for the completion.
8
+ */
9
+ get systemPrompt() {
10
+ return (this._providerRegistry.completerSystemPrompt ??
11
+ DEFAULT_COMPLETION_SYSTEM_PROMPT);
12
+ }
13
+ _providerRegistry;
14
+ }
@@ -15,10 +15,9 @@ export declare class ChatHandler extends AbstractChatModel {
15
15
  get personaName(): string;
16
16
  set personaName(value: string);
17
17
  /**
18
- * Getter and setter for the initial prompt.
18
+ * Get/set the system prompt for the chat.
19
19
  */
20
- get prompt(): string;
21
- set prompt(value: string);
20
+ get systemPrompt(): string;
22
21
  sendMessage(message: INewMessage): Promise<boolean>;
23
22
  getHistory(): Promise<IChatHistory>;
24
23
  dispose(): void;
@@ -27,7 +26,6 @@ export declare class ChatHandler extends AbstractChatModel {
27
26
  createChatContext(): IChatContext;
28
27
  private _providerRegistry;
29
28
  private _personaName;
30
- private _prompt;
31
29
  private _errorMessage;
32
30
  private _history;
33
31
  private _defaultErrorMessage;
@@ -52,7 +50,7 @@ export declare namespace ChatHandler {
52
50
  class ClearCommandProvider implements IChatCommandProvider {
53
51
  id: string;
54
52
  private _slash_commands;
55
- getChatCommands(inputModel: IInputModel): Promise<ChatCommand[]>;
56
- handleChatCommand(command: ChatCommand, inputModel: IInputModel): Promise<void>;
53
+ listCommandCompletions(inputModel: IInputModel): Promise<ChatCommand[]>;
54
+ onSubmit(inputModel: IInputModel): Promise<void>;
57
55
  }
58
56
  }
@@ -5,8 +5,8 @@
5
5
  import { AbstractChatContext, AbstractChatModel } from '@jupyter/chat';
6
6
  import { AIMessage, HumanMessage, mergeMessageRuns, SystemMessage } from '@langchain/core/messages';
7
7
  import { UUID } from '@lumino/coreutils';
8
+ import { DEFAULT_CHAT_SYSTEM_PROMPT } from './default-prompts';
8
9
  import { jupyternautLiteIcon } from './icons';
9
- import { chatSystemPrompt } from './provider';
10
10
  /**
11
11
  * The base64 encoded SVG string of the jupyternaut lite icon.
12
12
  * Encode so it can be passed as avatar_url to jupyter-chat.
@@ -17,7 +17,7 @@ export const welcomeMessage = (providers) => `
17
17
  #### Ask JupyterLite AI
18
18
 
19
19
 
20
- The provider to use can be set in the settings editor, by selecting it from
20
+ The provider to use can be set in the <button data-commandLinker-command="settingeditor:open" data-commandLinker-args='{"query": "AI provider"}' href="#">settings editor</button>, by selecting it from
21
21
  the <img src="${AI_AVATAR}" width="16" height="16"> _AI provider_ settings.
22
22
 
23
23
  The current providers that are available are _${providers.sort().join('_, _')}_.
@@ -28,14 +28,8 @@ export class ChatHandler extends AbstractChatModel {
28
28
  constructor(options) {
29
29
  super(options);
30
30
  this._providerRegistry = options.providerRegistry;
31
- this._prompt = chatSystemPrompt({
32
- provider_name: this._providerRegistry.currentName
33
- });
34
31
  this._providerRegistry.providerChanged.connect(() => {
35
32
  this._errorMessage = this._providerRegistry.chatError;
36
- this._prompt = chatSystemPrompt({
37
- provider_name: this._providerRegistry.currentName
38
- });
39
33
  });
40
34
  }
41
35
  get provider() {
@@ -58,13 +52,10 @@ export class ChatHandler extends AbstractChatModel {
58
52
  this._personaName = value;
59
53
  }
60
54
  /**
61
- * Getter and setter for the initial prompt.
55
+ * Get/set the system prompt for the chat.
62
56
  */
63
- get prompt() {
64
- return this._prompt;
65
- }
66
- set prompt(value) {
67
- this._prompt = value;
57
+ get systemPrompt() {
58
+ return (this._providerRegistry.chatSystemPrompt ?? DEFAULT_CHAT_SYSTEM_PROMPT);
68
59
  }
69
60
  async sendMessage(message) {
70
61
  const body = message.body;
@@ -79,7 +70,7 @@ export class ChatHandler extends AbstractChatModel {
79
70
  id: message.id,
80
71
  body,
81
72
  sender: { username: 'User' },
82
- time: Date.now(),
73
+ time: Private.getTimestampMs(),
83
74
  type: 'msg'
84
75
  };
85
76
  this.messageAdded(msg);
@@ -88,14 +79,14 @@ export class ChatHandler extends AbstractChatModel {
88
79
  id: UUID.uuid4(),
89
80
  body: `**${this._errorMessage ? this._errorMessage : this._defaultErrorMessage}**`,
90
81
  sender: { username: 'ERROR' },
91
- time: Date.now(),
82
+ time: Private.getTimestampMs(),
92
83
  type: 'msg'
93
84
  };
94
85
  this.messageAdded(errorMsg);
95
86
  return false;
96
87
  }
97
88
  this._history.messages.push(msg);
98
- const messages = mergeMessageRuns([new SystemMessage(this._prompt)]);
89
+ const messages = mergeMessageRuns([new SystemMessage(this.systemPrompt)]);
99
90
  messages.push(...this._history.messages.map(msg => {
100
91
  if (msg.sender.username === 'User') {
101
92
  return new HumanMessage(msg.body);
@@ -109,7 +100,7 @@ export class ChatHandler extends AbstractChatModel {
109
100
  id: UUID.uuid4(),
110
101
  body: '',
111
102
  sender,
112
- time: Date.now(),
103
+ time: Private.getTimestampMs(),
113
104
  type: 'msg'
114
105
  };
115
106
  let content = '';
@@ -129,7 +120,7 @@ export class ChatHandler extends AbstractChatModel {
129
120
  id: UUID.uuid4(),
130
121
  body: `**${error}**`,
131
122
  sender: { username: 'ERROR' },
132
- time: Date.now(),
123
+ time: Private.getTimestampMs(),
133
124
  type: 'msg'
134
125
  };
135
126
  this.messageAdded(errorMsg);
@@ -157,7 +148,6 @@ export class ChatHandler extends AbstractChatModel {
157
148
  }
158
149
  _providerRegistry;
159
150
  _personaName = 'AI';
160
- _prompt;
161
151
  _errorMessage = '';
162
152
  _history = { messages: [] };
163
153
  _defaultErrorMessage = 'AI provider not configured';
@@ -184,7 +174,7 @@ export class ChatHandler extends AbstractChatModel {
184
174
  description: 'Clear the chat'
185
175
  }
186
176
  ];
187
- async getChatCommands(inputModel) {
177
+ async listCommandCompletions(inputModel) {
188
178
  const match = inputModel.currentWord?.match(/^\/\w*/)?.[0];
189
179
  if (!match) {
190
180
  return [];
@@ -192,10 +182,20 @@ export class ChatHandler extends AbstractChatModel {
192
182
  const commands = this._slash_commands.filter(cmd => cmd.name.startsWith(match));
193
183
  return commands;
194
184
  }
195
- async handleChatCommand(command, inputModel) {
185
+ async onSubmit(inputModel) {
196
186
  // no handling needed because `replaceWith` is set in each command.
197
187
  return;
198
188
  }
199
189
  }
200
190
  ChatHandler.ClearCommandProvider = ClearCommandProvider;
201
191
  })(ChatHandler || (ChatHandler = {}));
192
+ var Private;
193
+ (function (Private) {
194
+ /**
195
+ * Return the current timestamp in milliseconds.
196
+ */
197
+ function getTimestampMs() {
198
+ return Date.now() / 1000;
199
+ }
200
+ Private.getTimestampMs = getTimestampMs;
201
+ })(Private || (Private = {}));
@@ -16,7 +16,7 @@ export class CompletionProvider {
16
16
  * Get the current completer name.
17
17
  */
18
18
  get name() {
19
- return this._providerRegistry.currentName;
19
+ return this._providerRegistry.currentName('completer');
20
20
  }
21
21
  /**
22
22
  * Get the current completer.
@@ -0,0 +1,2 @@
1
+ export declare const DEFAULT_CHAT_SYSTEM_PROMPT = "\nYou are Jupyternaut, a conversational assistant living in JupyterLab to help users.\nYou are not a language model, but rather an application built on a foundation model from $provider_name$.\nYou are talkative and you provide lots of specific details from the foundation model's context.\nYou may use Markdown to format your response.\nIf your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after).\nIf your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters.\nAll dollar quantities (of USD) must be formatted in LaTeX, with the `$` symbol escaped by a single backslash `\\`.\n- Example prompt: `If I have \\\\$100 and spend \\\\$20, how much money do I have left?`\n- **Correct** response: `You have \\(\\$80\\) remaining.`\n- **Incorrect** response: `You have $80 remaining.`\nIf you do not know the answer to a question, answer truthfully by responding that you do not know.\nThe following is a friendly conversation between you and a human.\n";
2
+ export declare const DEFAULT_COMPLETION_SYSTEM_PROMPT = "\nYou are an application built to provide helpful code completion suggestions.\nYou should only produce code. Keep comments to minimum, use the\nprogramming language comment syntax. Produce clean code.\nThe code is written in JupyterLab, a data analysis and code development\nenvironment which can execute code extended with additional syntax for\ninteractive features, such as magics.\nOnly give raw strings back, do not format the response using backticks.\nThe output should be a single string, and should only contain the code that will complete the\ngive code passed as input, no explanation whatsoever.\nDo not include the prompt in the output, only the string that should be appended to the current input.\nHere is the code to complete:\n";
@@ -0,0 +1,31 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+ export const DEFAULT_CHAT_SYSTEM_PROMPT = `
6
+ You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
7
+ You are not a language model, but rather an application built on a foundation model from $provider_name$.
8
+ You are talkative and you provide lots of specific details from the foundation model's context.
9
+ You may use Markdown to format your response.
10
+ If your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after).
11
+ If your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters.
12
+ All dollar quantities (of USD) must be formatted in LaTeX, with the \`$\` symbol escaped by a single backslash \`\\\`.
13
+ - Example prompt: \`If I have \\\\$100 and spend \\\\$20, how much money do I have left?\`
14
+ - **Correct** response: \`You have \\(\\$80\\) remaining.\`
15
+ - **Incorrect** response: \`You have $80 remaining.\`
16
+ If you do not know the answer to a question, answer truthfully by responding that you do not know.
17
+ The following is a friendly conversation between you and a human.
18
+ `;
19
+ export const DEFAULT_COMPLETION_SYSTEM_PROMPT = `
20
+ You are an application built to provide helpful code completion suggestions.
21
+ You should only produce code. Keep comments to minimum, use the
22
+ programming language comment syntax. Produce clean code.
23
+ The code is written in JupyterLab, a data analysis and code development
24
+ environment which can execute code extended with additional syntax for
25
+ interactive features, such as magics.
26
+ Only give raw strings back, do not format the response using backticks.
27
+ The output should be a single string, and should only contain the code that will complete the
28
+ give code passed as input, no explanation whatsoever.
29
+ Do not include the prompt in the output, only the string that should be appended to the current input.
30
+ Here is the code to complete:
31
+ `;
@@ -1,17 +1,12 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
3
- export declare class AnthropicCompleter implements IBaseCompleter {
2
+ import { ChatAnthropic } from '@langchain/anthropic';
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export declare class AnthropicCompleter extends BaseCompleter {
4
5
  constructor(options: BaseCompleter.IOptions);
5
- /**
6
- * Getter and setter for the initial prompt.
7
- */
8
- get prompt(): string;
9
- set prompt(value: string);
10
6
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
11
7
  items: {
12
8
  insertText: string;
13
9
  }[];
14
10
  }>;
15
- private _completer;
16
- private _prompt;
11
+ protected _completer: ChatAnthropic;
17
12
  }
@@ -1,26 +1,18 @@
1
1
  import { ChatAnthropic } from '@langchain/anthropic';
2
2
  import { AIMessage, SystemMessage } from '@langchain/core/messages';
3
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
4
- export class AnthropicCompleter {
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export class AnthropicCompleter extends BaseCompleter {
5
5
  constructor(options) {
6
+ super(options);
6
7
  this._completer = new ChatAnthropic({ ...options.settings });
7
8
  }
8
- /**
9
- * Getter and setter for the initial prompt.
10
- */
11
- get prompt() {
12
- return this._prompt;
13
- }
14
- set prompt(value) {
15
- this._prompt = value;
16
- }
17
9
  async fetch(request, context) {
18
10
  const { text, offset: cursorOffset } = request;
19
11
  const prompt = text.slice(0, cursorOffset);
20
12
  // Anthropic does not allow whitespace at the end of the AIMessage
21
13
  const trimmedPrompt = prompt.trim();
22
14
  const messages = [
23
- new SystemMessage(this._prompt),
15
+ new SystemMessage(this.systemPrompt),
24
16
  new AIMessage(trimmedPrompt)
25
17
  ];
26
18
  try {
@@ -51,5 +43,4 @@ export class AnthropicCompleter {
51
43
  }
52
44
  }
53
45
  _completer;
54
- _prompt = COMPLETION_SYSTEM_PROMPT;
55
46
  }
@@ -1,17 +1,12 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
3
- export declare class ChromeCompleter implements IBaseCompleter {
2
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export declare class ChromeCompleter extends BaseCompleter {
4
5
  constructor(options: BaseCompleter.IOptions);
5
- /**
6
- * Getter and setter for the initial prompt.
7
- */
8
- get prompt(): string;
9
- set prompt(value: string);
10
6
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
11
7
  items: {
12
8
  insertText: string;
13
9
  }[];
14
10
  }>;
15
- private _completer;
16
- private _prompt;
11
+ protected _completer: ChromeAI;
17
12
  }
@@ -1,6 +1,6 @@
1
1
  import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
2
2
  import { HumanMessage, SystemMessage } from '@langchain/core/messages';
3
- import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
3
+ import { BaseCompleter } from '../../base-completer';
4
4
  /**
5
5
  * Regular expression to match the '```' string at the start of a string.
6
6
  * So the completions returned by the LLM can still be kept after removing the code block formatting.
@@ -20,25 +20,17 @@ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
20
20
  * Regular expression to match the '```' string at the end of a string.
21
21
  */
22
22
  const CODE_BLOCK_END_REGEX = /```$/;
23
- export class ChromeCompleter {
23
+ export class ChromeCompleter extends BaseCompleter {
24
24
  constructor(options) {
25
+ super(options);
25
26
  this._completer = new ChromeAI({ ...options.settings });
26
27
  }
27
- /**
28
- * Getter and setter for the initial prompt.
29
- */
30
- get prompt() {
31
- return this._prompt;
32
- }
33
- set prompt(value) {
34
- this._prompt = value;
35
- }
36
28
  async fetch(request, context) {
37
29
  const { text, offset: cursorOffset } = request;
38
30
  const prompt = text.slice(0, cursorOffset);
39
31
  const trimmedPrompt = prompt.trim();
40
32
  const messages = [
41
- new SystemMessage(this._prompt),
33
+ new SystemMessage(this.systemPrompt),
42
34
  new HumanMessage(trimmedPrompt)
43
35
  ];
44
36
  try {
@@ -61,5 +53,4 @@ export class ChromeCompleter {
61
53
  }
62
54
  }
63
55
  _completer;
64
- _prompt = COMPLETION_SYSTEM_PROMPT;
65
56
  }
@@ -11,9 +11,6 @@
11
11
  },
12
12
  "temperature": {
13
13
  "type": "number"
14
- },
15
- "systemPrompt": {
16
- "type": "string"
17
14
  }
18
15
  },
19
16
  "additionalProperties": false,
@@ -0,0 +1,12 @@
1
+ import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
+ import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export declare class GeminiCompleter extends BaseCompleter {
5
+ constructor(options: BaseCompleter.IOptions);
6
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
7
+ items: {
8
+ insertText: string;
9
+ }[];
10
+ }>;
11
+ protected _completer: ChatGoogleGenerativeAI;
12
+ }
@@ -0,0 +1,48 @@
1
+ import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
2
+ import { AIMessage, SystemMessage } from '@langchain/core/messages';
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export class GeminiCompleter extends BaseCompleter {
5
+ constructor(options) {
6
+ super(options);
7
+ this._completer = new ChatGoogleGenerativeAI({
8
+ model: 'gemini-pro',
9
+ ...options.settings
10
+ });
11
+ }
12
+ async fetch(request, context) {
13
+ const { text, offset: cursorOffset } = request;
14
+ const prompt = text.slice(0, cursorOffset);
15
+ const trimmedPrompt = prompt.trim();
16
+ const messages = [
17
+ new SystemMessage(this.systemPrompt),
18
+ new AIMessage(trimmedPrompt)
19
+ ];
20
+ try {
21
+ const response = await this._completer.invoke(messages);
22
+ const items = [];
23
+ // Gemini can return string or complex content, a list of string/images/other.
24
+ if (typeof response.content === 'string') {
25
+ items.push({
26
+ insertText: response.content
27
+ });
28
+ }
29
+ else {
30
+ response.content.forEach(content => {
31
+ if (content.type !== 'text') {
32
+ return;
33
+ }
34
+ items.push({
35
+ insertText: content.text,
36
+ filterText: prompt.substring(trimmedPrompt.length)
37
+ });
38
+ });
39
+ }
40
+ return { items };
41
+ }
42
+ catch (error) {
43
+ console.error('Error fetching completions', error);
44
+ return { items: [] };
45
+ }
46
+ }
47
+ _completer;
48
+ }
@@ -0,0 +1,2 @@
1
+ declare const _default: "\n<i class=\"fas fa-exclamation-triangle\"></i> This extension is still very much experimental. It is not an official Google extension.\n\n1. Go to <https://aistudio.google.com> and create an API key.\n\n2. Open the JupyterLab settings and go to the **Ai providers** section to select the `Gemini`\n provider and add your API key (required).\n3. Open the chat, or use the inline completer.\n";
2
+ export default _default;
@@ -0,0 +1,9 @@
1
+ export default `
2
+ <i class="fas fa-exclamation-triangle"></i> This extension is still very much experimental. It is not an official Google extension.
3
+
4
+ 1. Go to <https://aistudio.google.com> and create an API key.
5
+
6
+ 2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`Gemini\`
7
+ provider and add your API key (required).
8
+ 3. Open the chat, or use the inline completer.
9
+ `;
@@ -0,0 +1,64 @@
1
+ {
2
+ "$schema": "http://json-schema.org/draft-07/schema#",
3
+ "type": "object",
4
+ "properties": {
5
+ "temperature": {
6
+ "type": "number",
7
+ "description": "Amount of randomness injected into the response. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and temp closer to 1 for creative and generative tasks."
8
+ },
9
+ "topK": {
10
+ "type": "number",
11
+ "description": "Only sample from the top K options for each subsequent token. Used to remove \"long tail\" low probability responses."
12
+ },
13
+ "topP": {
14
+ "type": "number",
15
+ "description": "Nucleus sampling parameter. Only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation."
16
+ },
17
+ "maxOutputTokens": {
18
+ "type": "number",
19
+ "description": "The maximum number of tokens to generate in the response."
20
+ },
21
+ "stopSequences": {
22
+ "type": "array",
23
+ "items": {
24
+ "type": "string"
25
+ },
26
+ "description": "A list of strings upon which to stop generating. You probably want something like [\"\\n\\nHuman:\"] for chat conversations."
27
+ },
28
+ "streaming": {
29
+ "type": "boolean",
30
+ "description": "Whether to stream the results or not"
31
+ },
32
+ "apiKey": {
33
+ "type": "string",
34
+ "description": "Google AI Studio API key"
35
+ },
36
+ "model": {
37
+ "type": "string",
38
+ "description": "Model name to use (e.g., gemini-pro, gemini-2.0-flash, etc.)",
39
+ "default": "gemini-pro"
40
+ },
41
+ "baseURL": {
42
+ "type": "string",
43
+ "description": "Base URL for the Google AI API"
44
+ },
45
+ "safetySettings": {
46
+ "type": "array",
47
+ "description": "Safety settings for content filtering",
48
+ "items": {
49
+ "type": "object",
50
+ "properties": {
51
+ "category": {
52
+ "type": "string"
53
+ },
54
+ "threshold": {
55
+ "type": "string"
56
+ }
57
+ }
58
+ }
59
+ }
60
+ },
61
+ "additionalProperties": false,
62
+ "description": "Input to Google Generative AI Chat class.",
63
+ "definitions": {}
64
+ }
@@ -1,14 +1,13 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { BaseCompleter, IBaseCompleter } from '../../base-completer';
3
- export declare class CodestralCompleter implements IBaseCompleter {
2
+ import { MistralAI } from '@langchain/mistralai';
3
+ import { BaseCompleter } from '../../base-completer';
4
+ export declare class CodestralCompleter extends BaseCompleter {
4
5
  constructor(options: BaseCompleter.IOptions);
5
- /**
6
- * Getter and setter for the initial prompt.
7
- */
8
- get prompt(): string;
9
- set prompt(value: string);
10
- fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
11
- private _throttler;
12
- private _completer;
13
- private _prompt;
6
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
7
+ items: {
8
+ insertText: string;
9
+ }[];
10
+ }>;
11
+ private _controller;
12
+ protected _completer: MistralAI;
14
13
  }