@jupyterlite/ai 0.6.1 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/README.md +1 -1
  2. package/lib/base-completer.d.ts +0 -5
  3. package/lib/chat-handler.d.ts +19 -5
  4. package/lib/chat-handler.js +47 -26
  5. package/lib/completion-provider.d.ts +2 -2
  6. package/lib/completion-provider.js +4 -3
  7. package/lib/components/stop-button.d.ts +0 -1
  8. package/lib/default-providers/Anthropic/completer.d.ts +1 -3
  9. package/lib/default-providers/Anthropic/completer.js +4 -6
  10. package/lib/default-providers/ChromeAI/completer.d.ts +1 -3
  11. package/lib/default-providers/ChromeAI/completer.js +4 -6
  12. package/lib/default-providers/ChromeAI/instructions.d.ts +4 -0
  13. package/lib/default-providers/ChromeAI/instructions.js +18 -0
  14. package/lib/default-providers/MistralAI/completer.d.ts +1 -3
  15. package/lib/default-providers/MistralAI/completer.js +5 -6
  16. package/lib/default-providers/Ollama/completer.d.ts +17 -0
  17. package/lib/default-providers/Ollama/completer.js +49 -0
  18. package/lib/default-providers/Ollama/instructions.d.ts +2 -0
  19. package/lib/default-providers/Ollama/instructions.js +70 -0
  20. package/lib/default-providers/Ollama/settings-schema.json +146 -0
  21. package/lib/default-providers/OpenAI/completer.d.ts +1 -3
  22. package/lib/default-providers/OpenAI/completer.js +4 -6
  23. package/lib/default-providers/WebLLM/completer.d.ts +27 -0
  24. package/lib/default-providers/WebLLM/completer.js +136 -0
  25. package/lib/default-providers/WebLLM/instructions.d.ts +6 -0
  26. package/lib/default-providers/WebLLM/instructions.js +32 -0
  27. package/lib/default-providers/WebLLM/settings-schema.json +21 -0
  28. package/lib/default-providers/index.js +119 -4
  29. package/lib/index.d.ts +2 -2
  30. package/lib/index.js +16 -26
  31. package/lib/provider.d.ts +11 -13
  32. package/lib/provider.js +124 -54
  33. package/lib/settings/index.d.ts +0 -1
  34. package/lib/settings/index.js +0 -1
  35. package/lib/settings/panel.d.ts +44 -11
  36. package/lib/settings/panel.js +231 -130
  37. package/lib/tokens.d.ts +21 -2
  38. package/lib/types/ai-model.d.ts +24 -0
  39. package/lib/types/ai-model.js +5 -0
  40. package/package.json +15 -12
  41. package/schema/provider-registry.json +0 -6
  42. package/src/base-completer.ts +0 -6
  43. package/src/chat-handler.ts +40 -7
  44. package/src/completion-provider.ts +2 -2
  45. package/src/default-providers/Anthropic/completer.ts +3 -8
  46. package/src/default-providers/ChromeAI/completer.ts +3 -8
  47. package/src/default-providers/ChromeAI/instructions.ts +21 -0
  48. package/src/default-providers/MistralAI/completer.ts +3 -8
  49. package/src/default-providers/Ollama/completer.ts +62 -0
  50. package/src/default-providers/Ollama/instructions.ts +70 -0
  51. package/src/default-providers/OpenAI/completer.ts +3 -8
  52. package/src/default-providers/WebLLM/completer.ts +162 -0
  53. package/src/default-providers/WebLLM/instructions.ts +33 -0
  54. package/src/default-providers/index.ts +151 -14
  55. package/src/index.ts +17 -29
  56. package/src/provider.ts +135 -46
  57. package/src/settings/index.ts +0 -1
  58. package/src/settings/panel.tsx +223 -79
  59. package/src/tokens.ts +23 -2
  60. package/src/types/ai-model.ts +37 -0
  61. package/src/types/service-worker.d.ts +6 -0
  62. package/style/base.css +5 -0
  63. package/lib/settings/settings-connector.d.ts +0 -31
  64. package/lib/settings/settings-connector.js +0 -61
  65. package/src/settings/settings-connector.ts +0 -89
package/README.md CHANGED
@@ -14,7 +14,7 @@ AI code completions and chat for JupyterLab, Notebook 7 and JupyterLite ✨
14
14
  > To enable more AI providers in JupyterLab and Jupyter Notebook, we recommend using the [Jupyter AI](https://github.com/jupyterlab/jupyter-ai) extension directly.
15
15
  > At the moment Jupyter AI is not compatible with JupyterLite, but might be to some extent in the future.
16
16
 
17
- - JupyterLab >= 4.4.0a0 or Notebook >= 7.4.0a0
17
+ - JupyterLab >= 4.4.0 or Notebook >= 7.4.0
18
18
 
19
19
  ## ✨ Try it in your browser ✨
20
20
 
@@ -1,11 +1,6 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { BaseLanguageModel } from '@langchain/core/language_models/base';
3
2
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
4
3
  export interface IBaseCompleter {
5
- /**
6
- * The LLM completer.
7
- */
8
- provider: BaseLanguageModel;
9
4
  /**
10
5
  * The completion prompt.
11
6
  */
@@ -1,13 +1,14 @@
1
- import { ChatCommand, ChatModel, IChatCommandProvider, IChatHistory, IChatMessage, IInputModel, INewMessage } from '@jupyter/chat';
2
- import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
1
+ import { ChatCommand, AbstractChatContext, AbstractChatModel, IChatCommandProvider, IChatContext, IChatHistory, IChatMessage, IChatModel, IInputModel, INewMessage } from '@jupyter/chat';
3
2
  import { IAIProviderRegistry } from './tokens';
3
+ import { AIChatModel } from './types/ai-model';
4
+ export declare const welcomeMessage: (providers: string[]) => string;
4
5
  export type ConnectionMessage = {
5
6
  type: 'connection';
6
7
  client_id: string;
7
8
  };
8
- export declare class ChatHandler extends ChatModel {
9
+ export declare class ChatHandler extends AbstractChatModel {
9
10
  constructor(options: ChatHandler.IOptions);
10
- get provider(): BaseChatModel | null;
11
+ get provider(): AIChatModel | null;
11
12
  /**
12
13
  * Getter and setter for the persona name.
13
14
  */
@@ -23,6 +24,7 @@ export declare class ChatHandler extends ChatModel {
23
24
  dispose(): void;
24
25
  messageAdded(message: IChatMessage): void;
25
26
  stopStreaming(): void;
27
+ createChatContext(): IChatContext;
26
28
  private _providerRegistry;
27
29
  private _personaName;
28
30
  private _prompt;
@@ -32,9 +34,21 @@ export declare class ChatHandler extends ChatModel {
32
34
  private _controller;
33
35
  }
34
36
  export declare namespace ChatHandler {
35
- interface IOptions extends ChatModel.IOptions {
37
+ /**
38
+ * The options used to create a chat handler.
39
+ */
40
+ interface IOptions extends IChatModel.IOptions {
36
41
  providerRegistry: IAIProviderRegistry;
37
42
  }
43
+ /**
44
+ * The minimal chat context.
45
+ */
46
+ class ChatContext extends AbstractChatContext {
47
+ users: never[];
48
+ }
49
+ /**
50
+ * The chat command provider for the chat.
51
+ */
38
52
  class ClearCommandProvider implements IChatCommandProvider {
39
53
  id: string;
40
54
  private _slash_commands;
@@ -2,25 +2,31 @@
2
2
  * Copyright (c) Jupyter Development Team.
3
3
  * Distributed under the terms of the Modified BSD License.
4
4
  */
5
- import { ChatModel } from '@jupyter/chat';
5
+ import { AbstractChatContext, AbstractChatModel } from '@jupyter/chat';
6
6
  import { AIMessage, HumanMessage, mergeMessageRuns, SystemMessage } from '@langchain/core/messages';
7
7
  import { UUID } from '@lumino/coreutils';
8
- import { chatSystemPrompt } from './provider';
9
8
  import { jupyternautLiteIcon } from './icons';
9
+ import { chatSystemPrompt } from './provider';
10
10
  /**
11
11
  * The base64 encoded SVG string of the jupyternaut lite icon.
12
12
  * Encode so it can be passed as avatar_url to jupyter-chat.
13
13
  */
14
14
  const AI_AVATAR_BASE64 = btoa(jupyternautLiteIcon.svgstr);
15
15
  const AI_AVATAR = `data:image/svg+xml;base64,${AI_AVATAR_BASE64}`;
16
- export class ChatHandler extends ChatModel {
16
+ export const welcomeMessage = (providers) => `
17
+ #### Ask JupyterLite AI
18
+
19
+
20
+ The provider to use can be set in the settings editor, by selecting it from
21
+ the <img src="${AI_AVATAR}" width="16" height="16"> _AI provider_ settings.
22
+
23
+ The current providers that are available are _${providers.sort().join('_, _')}_.
24
+
25
+ To clear the chat, you can use the \`/clear\` command from the chat input.
26
+ `;
27
+ export class ChatHandler extends AbstractChatModel {
17
28
  constructor(options) {
18
29
  super(options);
19
- this._personaName = 'AI';
20
- this._errorMessage = '';
21
- this._history = { messages: [] };
22
- this._defaultErrorMessage = 'AI provider not configured';
23
- this._controller = null;
24
30
  this._providerRegistry = options.providerRegistry;
25
31
  this._prompt = chatSystemPrompt({
26
32
  provider_name: this._providerRegistry.currentName
@@ -61,7 +67,6 @@ export class ChatHandler extends ChatModel {
61
67
  this._prompt = value;
62
68
  }
63
69
  async sendMessage(message) {
64
- var _a;
65
70
  const body = message.body;
66
71
  if (body.startsWith('/clear')) {
67
72
  // TODO: do we need a clear method?
@@ -98,7 +103,7 @@ export class ChatHandler extends ChatModel {
98
103
  return new AIMessage(msg.body);
99
104
  }));
100
105
  const sender = { username: this._personaName, avatar_url: AI_AVATAR };
101
- this.updateWriters([sender]);
106
+ this.updateWriters([{ user: sender }]);
102
107
  // create an empty message to be filled by the AI provider
103
108
  const botMsg = {
104
109
  id: UUID.uuid4(),
@@ -111,7 +116,7 @@ export class ChatHandler extends ChatModel {
111
116
  this._controller = new AbortController();
112
117
  try {
113
118
  for await (const chunk of await this._providerRegistry.currentChatModel.stream(messages, { signal: this._controller.signal })) {
114
- content += (_a = chunk.content) !== null && _a !== void 0 ? _a : chunk;
119
+ content += chunk.content ?? chunk;
115
120
  botMsg.body = content;
116
121
  this.messageAdded(botMsg);
117
122
  }
@@ -145,26 +150,42 @@ export class ChatHandler extends ChatModel {
145
150
  super.messageAdded(message);
146
151
  }
147
152
  stopStreaming() {
148
- var _a;
149
- (_a = this._controller) === null || _a === void 0 ? void 0 : _a.abort();
153
+ this._controller?.abort();
154
+ }
155
+ createChatContext() {
156
+ return new ChatHandler.ChatContext({ model: this });
150
157
  }
158
+ _providerRegistry;
159
+ _personaName = 'AI';
160
+ _prompt;
161
+ _errorMessage = '';
162
+ _history = { messages: [] };
163
+ _defaultErrorMessage = 'AI provider not configured';
164
+ _controller = null;
151
165
  }
152
166
  (function (ChatHandler) {
167
+ /**
168
+ * The minimal chat context.
169
+ */
170
+ class ChatContext extends AbstractChatContext {
171
+ users = [];
172
+ }
173
+ ChatHandler.ChatContext = ChatContext;
174
+ /**
175
+ * The chat command provider for the chat.
176
+ */
153
177
  class ClearCommandProvider {
154
- constructor() {
155
- this.id = '@jupyterlite/ai:clear-commands';
156
- this._slash_commands = [
157
- {
158
- name: '/clear',
159
- providerId: this.id,
160
- replaceWith: '/clear',
161
- description: 'Clear the chat'
162
- }
163
- ];
164
- }
178
+ id = '@jupyterlite/ai:clear-commands';
179
+ _slash_commands = [
180
+ {
181
+ name: '/clear',
182
+ providerId: this.id,
183
+ replaceWith: '/clear',
184
+ description: 'Clear the chat'
185
+ }
186
+ ];
165
187
  async getChatCommands(inputModel) {
166
- var _a, _b;
167
- const match = (_b = (_a = inputModel.currentWord) === null || _a === void 0 ? void 0 : _a.match(/^\/\w*/)) === null || _b === void 0 ? void 0 : _b[0];
188
+ const match = inputModel.currentWord?.match(/^\/\w*/)?.[0];
168
189
  if (!match) {
169
190
  return [];
170
191
  }
@@ -1,6 +1,6 @@
1
1
  import { CompletionHandler, IInlineCompletionContext, IInlineCompletionProvider } from '@jupyterlab/completer';
2
- import { IBaseCompleter } from './base-completer';
3
2
  import { IAIProviderRegistry } from './tokens';
3
+ import { AICompleter } from './types/ai-model';
4
4
  /**
5
5
  * The generic completion provider to register to the completion provider manager.
6
6
  */
@@ -14,7 +14,7 @@ export declare class CompletionProvider implements IInlineCompletionProvider {
14
14
  /**
15
15
  * Get the current completer.
16
16
  */
17
- get completer(): IBaseCompleter | null;
17
+ get completer(): AICompleter | null;
18
18
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
19
19
  private _providerRegistry;
20
20
  private _requestCompletion;
@@ -2,8 +2,8 @@
2
2
  * The generic completion provider to register to the completion provider manager.
3
3
  */
4
4
  export class CompletionProvider {
5
+ identifier = '@jupyterlite/ai';
5
6
  constructor(options) {
6
- this.identifier = '@jupyterlite/ai';
7
7
  this._providerRegistry = options.providerRegistry;
8
8
  this._requestCompletion = options.requestCompletion;
9
9
  this._providerRegistry.providerChanged.connect(() => {
@@ -25,7 +25,8 @@ export class CompletionProvider {
25
25
  return this._providerRegistry.currentCompleter;
26
26
  }
27
27
  async fetch(request, context) {
28
- var _a;
29
- return (_a = this.completer) === null || _a === void 0 ? void 0 : _a.fetch(request, context);
28
+ return this.completer?.fetch(request, context);
30
29
  }
30
+ _providerRegistry;
31
+ _requestCompletion;
31
32
  }
@@ -1,4 +1,3 @@
1
- /// <reference types="react" />
2
1
  import { InputToolbarRegistry } from '@jupyter/chat';
3
2
  /**
4
3
  * Properties of the stop button.
@@ -1,9 +1,7 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
2
  import { BaseCompleter, IBaseCompleter } from '../../base-completer';
4
3
  export declare class AnthropicCompleter implements IBaseCompleter {
5
4
  constructor(options: BaseCompleter.IOptions);
6
- get provider(): BaseChatModel;
7
5
  /**
8
6
  * Getter and setter for the initial prompt.
9
7
  */
@@ -14,6 +12,6 @@ export declare class AnthropicCompleter implements IBaseCompleter {
14
12
  insertText: string;
15
13
  }[];
16
14
  }>;
17
- private _anthropicProvider;
15
+ private _completer;
18
16
  private _prompt;
19
17
  }
@@ -3,11 +3,7 @@ import { AIMessage, SystemMessage } from '@langchain/core/messages';
3
3
  import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
4
4
  export class AnthropicCompleter {
5
5
  constructor(options) {
6
- this._prompt = COMPLETION_SYSTEM_PROMPT;
7
- this._anthropicProvider = new ChatAnthropic({ ...options.settings });
8
- }
9
- get provider() {
10
- return this._anthropicProvider;
6
+ this._completer = new ChatAnthropic({ ...options.settings });
11
7
  }
12
8
  /**
13
9
  * Getter and setter for the initial prompt.
@@ -28,7 +24,7 @@ export class AnthropicCompleter {
28
24
  new AIMessage(trimmedPrompt)
29
25
  ];
30
26
  try {
31
- const response = await this._anthropicProvider.invoke(messages);
27
+ const response = await this._completer.invoke(messages);
32
28
  const items = [];
33
29
  // Anthropic can return string or complex content, a list of string/images/other.
34
30
  if (typeof response.content === 'string') {
@@ -54,4 +50,6 @@ export class AnthropicCompleter {
54
50
  return { items: [] };
55
51
  }
56
52
  }
53
+ _completer;
54
+ _prompt = COMPLETION_SYSTEM_PROMPT;
57
55
  }
@@ -1,5 +1,4 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { LLM } from '@langchain/core/language_models/llms';
3
2
  import { BaseCompleter, IBaseCompleter } from '../../base-completer';
4
3
  export declare class ChromeCompleter implements IBaseCompleter {
5
4
  constructor(options: BaseCompleter.IOptions);
@@ -8,12 +7,11 @@ export declare class ChromeCompleter implements IBaseCompleter {
8
7
  */
9
8
  get prompt(): string;
10
9
  set prompt(value: string);
11
- get provider(): LLM;
12
10
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
13
11
  items: {
14
12
  insertText: string;
15
13
  }[];
16
14
  }>;
17
- private _chromeProvider;
15
+ private _completer;
18
16
  private _prompt;
19
17
  }
@@ -22,8 +22,7 @@ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
22
22
  const CODE_BLOCK_END_REGEX = /```$/;
23
23
  export class ChromeCompleter {
24
24
  constructor(options) {
25
- this._prompt = COMPLETION_SYSTEM_PROMPT;
26
- this._chromeProvider = new ChromeAI({ ...options.settings });
25
+ this._completer = new ChromeAI({ ...options.settings });
27
26
  }
28
27
  /**
29
28
  * Getter and setter for the initial prompt.
@@ -34,9 +33,6 @@ export class ChromeCompleter {
34
33
  set prompt(value) {
35
34
  this._prompt = value;
36
35
  }
37
- get provider() {
38
- return this._chromeProvider;
39
- }
40
36
  async fetch(request, context) {
41
37
  const { text, offset: cursorOffset } = request;
42
38
  const prompt = text.slice(0, cursorOffset);
@@ -46,7 +42,7 @@ export class ChromeCompleter {
46
42
  new HumanMessage(trimmedPrompt)
47
43
  ];
48
44
  try {
49
- let response = await this._chromeProvider.invoke(messages);
45
+ let response = await this._completer.invoke(messages);
50
46
  // ChromeAI sometimes returns a string starting with '```',
51
47
  // so process the response to remove the code block delimiters
52
48
  if (CODE_BLOCK_START_REGEX.test(response)) {
@@ -64,4 +60,6 @@ export class ChromeCompleter {
64
60
  return { items: [] };
65
61
  }
66
62
  }
63
+ _completer;
64
+ _prompt = COMPLETION_SYSTEM_PROMPT;
67
65
  }
@@ -1,2 +1,6 @@
1
1
  declare const _default: "\n<i class=\"fas fa-exclamation-triangle\"></i> Support for ChromeAI is still experimental and only available in Google Chrome.\n\nYou can test ChromeAI is enabled in your browser by going to the following URL: <https://chromeai.org/>\n\nEnable the proper flags in Google Chrome.\n\n- chrome://flags/#prompt-api-for-gemini-nano\n - Select: `Enabled`\n- chrome://flags/#optimization-guide-on-device-model\n - Select: `Enabled BypassPrefRequirement`\n- chrome://components\n - Click `Check for Update` on Optimization Guide On Device Model to download the model\n- [Optional] chrome://flags/#text-safety-classifier\n\n<img src=\"https://github.com/user-attachments/assets/d48f46cc-52ee-4ce5-9eaf-c763cdbee04c\" alt=\"A screenshot showing how to enable the ChromeAI flag in Google Chrome\" width=\"500px\">\n\nThen restart Chrome for these changes to take effect.\n\n<i class=\"fas fa-exclamation-triangle\"></i> On first use, Chrome will download the on-device model, which can be as large as 22GB (according to their docs and at the time of writing).\nDuring the download, ChromeAI may not be available via the extension.\n\n<i class=\"fa fa-info-circle\" aria-hidden=\"true\"></i> For more information about Chrome Built-in AI: <https://developer.chrome.com/docs/ai/get-started>\n";
2
2
  export default _default;
3
+ /**
4
+ * Check if the browser supports ChromeAI and the model is available.
5
+ */
6
+ export declare function compatibilityCheck(): Promise<string | null>;
@@ -22,3 +22,21 @@ During the download, ChromeAI may not be available via the extension.
22
22
 
23
23
  <i class="fa fa-info-circle" aria-hidden="true"></i> For more information about Chrome Built-in AI: <https://developer.chrome.com/docs/ai/get-started>
24
24
  `;
25
+ /**
26
+ * Check if the browser supports ChromeAI and the model is available.
27
+ */
28
+ export async function compatibilityCheck() {
29
+ // Check if the browser supports the ChromeAI model
30
+ if (typeof window === 'undefined' ||
31
+ !('LanguageModel' in window) ||
32
+ window.LanguageModel === undefined ||
33
+ window.LanguageModel.availability === undefined) {
34
+ return 'Your browser does not support ChromeAI. Please use an updated chrome based browser like Google Chrome, and follow the instructions in settings to enable it.';
35
+ }
36
+ const languageModel = window.LanguageModel;
37
+ if (!(await languageModel.availability())) {
38
+ return 'The ChromeAI model is not available in your browser. Please ensure you have enabled the necessary flags in Google Chrome as described in the instructions in settings.';
39
+ }
40
+ // If the model is available, return null to indicate compatibility
41
+ return null;
42
+ }
@@ -1,9 +1,7 @@
1
1
  import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
- import { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
2
  import { BaseCompleter, IBaseCompleter } from '../../base-completer';
4
3
  export declare class CodestralCompleter implements IBaseCompleter {
5
4
  constructor(options: BaseCompleter.IOptions);
6
- get provider(): BaseChatModel;
7
5
  /**
8
6
  * Getter and setter for the initial prompt.
9
7
  */
@@ -11,6 +9,6 @@ export declare class CodestralCompleter implements IBaseCompleter {
11
9
  set prompt(value: string);
12
10
  fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
13
11
  private _throttler;
14
- private _mistralProvider;
12
+ private _completer;
15
13
  private _prompt;
16
14
  }
@@ -8,10 +8,9 @@ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
8
8
  const INTERVAL = 1000;
9
9
  export class CodestralCompleter {
10
10
  constructor(options) {
11
- this._prompt = COMPLETION_SYSTEM_PROMPT;
12
- this._mistralProvider = new ChatMistralAI({ ...options.settings });
11
+ this._completer = new ChatMistralAI({ ...options.settings });
13
12
  this._throttler = new Throttler(async (messages) => {
14
- const response = await this._mistralProvider.invoke(messages);
13
+ const response = await this._completer.invoke(messages);
15
14
  // Extract results of completion request.
16
15
  const items = [];
17
16
  if (typeof response.content === 'string') {
@@ -32,9 +31,6 @@ export class CodestralCompleter {
32
31
  return { items };
33
32
  }, { limit: INTERVAL });
34
33
  }
35
- get provider() {
36
- return this._mistralProvider;
37
- }
38
34
  /**
39
35
  * Getter and setter for the initial prompt.
40
36
  */
@@ -59,4 +55,7 @@ export class CodestralCompleter {
59
55
  return { items: [] };
60
56
  }
61
57
  }
58
+ _throttler;
59
+ _completer;
60
+ _prompt = COMPLETION_SYSTEM_PROMPT;
62
61
  }
@@ -0,0 +1,17 @@
1
+ import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
+ import { BaseCompleter, IBaseCompleter } from '../../base-completer';
3
+ export declare class OllamaCompleter implements IBaseCompleter {
4
+ constructor(options: BaseCompleter.IOptions);
5
+ /**
6
+ * Getter and setter for the initial prompt.
7
+ */
8
+ get prompt(): string;
9
+ set prompt(value: string);
10
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
11
+ items: {
12
+ insertText: string;
13
+ }[];
14
+ }>;
15
+ private _completer;
16
+ private _prompt;
17
+ }
@@ -0,0 +1,49 @@
1
+ import { AIMessage, SystemMessage } from '@langchain/core/messages';
2
+ import { ChatOllama } from '@langchain/ollama';
3
+ import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
4
+ export class OllamaCompleter {
5
+ constructor(options) {
6
+ this._completer = new ChatOllama({ ...options.settings });
7
+ }
8
+ /**
9
+ * Getter and setter for the initial prompt.
10
+ */
11
+ get prompt() {
12
+ return this._prompt;
13
+ }
14
+ set prompt(value) {
15
+ this._prompt = value;
16
+ }
17
+ async fetch(request, context) {
18
+ const { text, offset: cursorOffset } = request;
19
+ const prompt = text.slice(0, cursorOffset);
20
+ const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)];
21
+ try {
22
+ const response = await this._completer.invoke(messages);
23
+ const items = [];
24
+ if (typeof response.content === 'string') {
25
+ items.push({
26
+ insertText: response.content
27
+ });
28
+ }
29
+ else {
30
+ response.content.forEach(content => {
31
+ if (content.type !== 'text') {
32
+ return;
33
+ }
34
+ items.push({
35
+ insertText: content.text,
36
+ filterText: prompt.substring(prompt.length)
37
+ });
38
+ });
39
+ }
40
+ return { items };
41
+ }
42
+ catch (error) {
43
+ console.error('Error fetching completions', error);
44
+ return { items: [] };
45
+ }
46
+ }
47
+ _completer;
48
+ _prompt = COMPLETION_SYSTEM_PROMPT;
49
+ }
@@ -0,0 +1,2 @@
1
+ declare const _default: "\nOllama allows to run large language models locally on your machine.\nTo use it you need to install the Ollama CLI and pull the model you want to use.\n\n1. Install the Ollama CLI by following the instructions at <https://ollama.com/download>\n\n2. Pull the model you want to use by running the following command in your terminal:\n\n ```bash\n ollama pull <model-name>\n ```\n\n For example, to pull the Llama 2 model, run:\n\n ```bash\n ollama pull llama2\n ```\n\n3. Once the model is pulled, you can use it in your application by running the following command:\n\n ```bash\n ollama serve\n ```\n\n4. This model will be available in the extension, using the model name you used in the command above.\n\n<details>\n<summary>Deploying Lite/Lab on external server</summary>\n\nSee https://objectgraph.com/blog/ollama-cors/ for more details.\n\nOn Linux, you can run the following commands:\n\n1. Check if CORS is enabled on the server. You can do this by running the following command in your terminal:\n\n ```bash\n curl -X OPTIONS http://localhost:11434 -H \"Origin: http://example.com\" -H \"Access-Control-Request-Method: GET\" -I\n ```\n\n If CORS is disabled, you will see a response like this:\n\n ```bash\n HTTP/1.1 403 Forbidden\n Date: Wed, 09 Oct 2024 10:12:15 GMT\n Content-Length: 0\n ```\n\n2. If CORS is not enabled, update _/etc/systemd/system/ollama.service_ with:\n\n ```bash\n [Service]\n Environment=\"OLLAMA_HOST=0.0.0.0\"\n Environment=\"OLLAMA_ORIGINS=*\"\n ```\n\n3. Restart the service:\n\n ```bash\n sudo systemctl daemon-reload\n sudo systemctl restart ollama\n ```\n\n4. Check if CORS is enabled on the server again by running the following command in your terminal:\n\n ```bash\n curl -X OPTIONS http://localhost:11434 -H \"Origin: http://example.com\" -H \"Access-Control-Request-Method: GET\" -I\n ```\n\n</details>\n";
2
+ export default _default;
@@ -0,0 +1,70 @@
1
+ export default `
2
+ Ollama allows to run large language models locally on your machine.
3
+ To use it you need to install the Ollama CLI and pull the model you want to use.
4
+
5
+ 1. Install the Ollama CLI by following the instructions at <https://ollama.com/download>
6
+
7
+ 2. Pull the model you want to use by running the following command in your terminal:
8
+
9
+ \`\`\`bash
10
+ ollama pull <model-name>
11
+ \`\`\`
12
+
13
+ For example, to pull the Llama 2 model, run:
14
+
15
+ \`\`\`bash
16
+ ollama pull llama2
17
+ \`\`\`
18
+
19
+ 3. Once the model is pulled, you can use it in your application by running the following command:
20
+
21
+ \`\`\`bash
22
+ ollama serve
23
+ \`\`\`
24
+
25
+ 4. This model will be available in the extension, using the model name you used in the command above.
26
+
27
+ <details>
28
+ <summary>Deploying Lite/Lab on external server</summary>
29
+
30
+ See https://objectgraph.com/blog/ollama-cors/ for more details.
31
+
32
+ On Linux, you can run the following commands:
33
+
34
+ 1. Check if CORS is enabled on the server. You can do this by running the following command in your terminal:
35
+
36
+ \`\`\`bash
37
+ curl -X OPTIONS http://localhost:11434 -H "Origin: http://example.com" -H "Access-Control-Request-Method: GET" -I
38
+ \`\`\`
39
+
40
+ If CORS is disabled, you will see a response like this:
41
+
42
+ \`\`\`bash
43
+ HTTP/1.1 403 Forbidden
44
+ Date: Wed, 09 Oct 2024 10:12:15 GMT
45
+ Content-Length: 0
46
+ \`\`\`
47
+
48
+ 2. If CORS is not enabled, update _/etc/systemd/system/ollama.service_ with:
49
+
50
+ \`\`\`bash
51
+ [Service]
52
+ Environment="OLLAMA_HOST=0.0.0.0"
53
+ Environment="OLLAMA_ORIGINS=*"
54
+ \`\`\`
55
+
56
+ 3. Restart the service:
57
+
58
+ \`\`\`bash
59
+ sudo systemctl daemon-reload
60
+ sudo systemctl restart ollama
61
+ \`\`\`
62
+
63
+ 4. Check if CORS is enabled on the server again by running the following command in your terminal:
64
+
65
+ \`\`\`bash
66
+ curl -X OPTIONS http://localhost:11434 -H "Origin: http://example.com" -H "Access-Control-Request-Method: GET" -I
67
+ \`\`\`
68
+
69
+ </details>
70
+ `;