@jupyterlite/ai 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,30 @@
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2024, JupyterLite Contributors
4
+ Copyright (c) 2024, Jeremy Tuloup
5
+ All rights reserved.
6
+
7
+ Redistribution and use in source and binary forms, with or without
8
+ modification, are permitted provided that the following conditions are met:
9
+
10
+ 1. Redistributions of source code must retain the above copyright notice, this
11
+ list of conditions and the following disclaimer.
12
+
13
+ 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ this list of conditions and the following disclaimer in the documentation
15
+ and/or other materials provided with the distribution.
16
+
17
+ 3. Neither the name of the copyright holder nor the names of its
18
+ contributors may be used to endorse or promote products derived from
19
+ this software without specific prior written permission.
20
+
21
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package/README.md ADDED
@@ -0,0 +1,112 @@
1
+ # jupyterlite-ai
2
+
3
+ [![Github Actions Status](https://github.com/jupyterlite/ai/workflows/Build/badge.svg)](https://github.com/jupyterlite/ai/actions/workflows/build.yml)
4
+ [![lite-badge](https://jupyterlite.rtfd.io/en/latest/_static/badge.svg)](https://jupyterlite.github.io/ai/lab/index.html)
5
+
6
+ AI code completions and chat for JupyterLab, Notebook 7 and JupyterLite, powered by MistralAI ✨
7
+
8
+ [a screencast showing the Codestral extension in JupyterLite](https://github.com/jupyterlite/ai/assets/591645/855c4e3e-3a63-4868-8052-5c9909922c21)
9
+
10
+ ## Requirements
11
+
12
+ > [!NOTE]
13
+ > This extension is meant to be used in JupyterLite to enable AI code completions and chat in the browser, with a specific provider.
14
+ > To enable more AI providers in JupyterLab and Jupyter Notebook, we recommend using the [Jupyter AI](https://github.com/jupyterlab/jupyter-ai) extension directly.
15
+ > At the moment Jupyter AI is not compatible with JupyterLite, but might be to some extent in the future.
16
+
17
+ - JupyterLab >= 4.1.0 or Notebook >= 7.1.0
18
+
19
+ > [!WARNING]
20
+ > This extension is still very much experimental. It is not an official MistralAI extension.
21
+ > It is exploring the integration of the MistralAI API with JupyterLab, which can also be used in [JupyterLite](https://jupyterlite.readthedocs.io/).
22
+ > For a more complete AI extension for JupyterLab, see [Jupyter AI](https://github.com/jupyterlab/jupyter-ai).
23
+
24
+ ## ✨ Try it in your browser ✨
25
+
26
+ You can try the extension in your browser using JupyterLite:
27
+
28
+ [![lite-badge](https://jupyterlite.rtfd.io/en/latest/_static/badge.svg)](https://jupyterlite.github.io/ai/lab/index.html)
29
+
30
+ See the [Usage](#usage) section below for more information on how to provide your API key.
31
+
32
+ ## Install
33
+
34
+ To install the extension, execute:
35
+
36
+ ```bash
37
+ pip install jupyterlite-ai
38
+ ```
39
+
40
+ # Usage
41
+
42
+ 1. Go to https://console.mistral.ai/api-keys/ and create an API key.
43
+
44
+ ![Screenshot showing how to create an API key](./img/1-api-key.png)
45
+
46
+ 2. Open the JupyterLab settings and go to the Codestral section to enter the API key
47
+
48
+ ![Screenshot showing how to add the API key to the settings](./img/2-jupyterlab-settings.png)
49
+
50
+ 3. Open the chat, or use the inline completer
51
+
52
+ ![Screenshot showing how to use the chat](./img/3-usage.png)
53
+
54
+ ## Uninstall
55
+
56
+ To remove the extension, execute:
57
+
58
+ ```bash
59
+ pip uninstall jupyterlite-ai
60
+ ```
61
+
62
+ ## Contributing
63
+
64
+ ### Development install
65
+
66
+ Note: You will need NodeJS to build the extension package.
67
+
68
+ The `jlpm` command is JupyterLab's pinned version of
69
+ [yarn](https://yarnpkg.com/) that is installed with JupyterLab. You may use
70
+ `yarn` or `npm` in lieu of `jlpm` below.
71
+
72
+ ```bash
73
+ # Clone the repo to your local environment
74
+ # Change directory to the jupyterlite_ai directory
75
+ # Install package in development mode
76
+ pip install -e "."
77
+ # Link your development version of the extension with JupyterLab
78
+ jupyter labextension develop . --overwrite
79
+ # Rebuild extension Typescript source after making changes
80
+ jlpm build
81
+ ```
82
+
83
+ You can watch the source directory and run JupyterLab at the same time in different terminals to watch for changes in the extension's source and automatically rebuild the extension.
84
+
85
+ ```bash
86
+ # Watch the source directory in one terminal, automatically rebuilding when needed
87
+ jlpm watch
88
+ # Run JupyterLab in another terminal
89
+ jupyter lab
90
+ ```
91
+
92
+ With the watch command running, every saved change will immediately be built locally and available in your running JupyterLab. Refresh JupyterLab to load the change in your browser (you may need to wait several seconds for the extension to be rebuilt).
93
+
94
+ By default, the `jlpm build` command generates the source maps for this extension to make it easier to debug using the browser dev tools. To also generate source maps for the JupyterLab core extensions, you can run the following command:
95
+
96
+ ```bash
97
+ jupyter lab build --minimize=False
98
+ ```
99
+
100
+ ### Development uninstall
101
+
102
+ ```bash
103
+ pip uninstall jupyterlite-ai
104
+ ```
105
+
106
+ In development mode, you will also need to remove the symlink created by `jupyter labextension develop`
107
+ command. To find its location, you can run `jupyter labextension list` to figure out where the `labextensions`
108
+ folder is located. Then you can remove the symlink named `@jupyterlite/ai` within that folder.
109
+
110
+ ### Packaging the extension
111
+
112
+ See [RELEASE](RELEASE.md)
@@ -0,0 +1,24 @@
1
+ import { ChatModel, IChatHistory, IChatMessage, INewMessage } from '@jupyter/chat';
2
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
3
+ import { IAIProvider } from './token';
4
+ export type ConnectionMessage = {
5
+ type: 'connection';
6
+ client_id: string;
7
+ };
8
+ export declare class ChatHandler extends ChatModel {
9
+ constructor(options: ChatHandler.IOptions);
10
+ get provider(): BaseChatModel | null;
11
+ sendMessage(message: INewMessage): Promise<boolean>;
12
+ getHistory(): Promise<IChatHistory>;
13
+ dispose(): void;
14
+ messageAdded(message: IChatMessage): void;
15
+ private _aiProvider;
16
+ private _errorMessage;
17
+ private _history;
18
+ private _defaultErrorMessage;
19
+ }
20
+ export declare namespace ChatHandler {
21
+ interface IOptions extends ChatModel.IOptions {
22
+ aiProvider: IAIProvider;
23
+ }
24
+ }
@@ -0,0 +1,92 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+ import { ChatModel } from '@jupyter/chat';
6
+ import { AIMessage, HumanMessage, mergeMessageRuns } from '@langchain/core/messages';
7
+ import { UUID } from '@lumino/coreutils';
8
+ import { getErrorMessage } from './llm-models';
9
+ export class ChatHandler extends ChatModel {
10
+ constructor(options) {
11
+ super(options);
12
+ this._errorMessage = '';
13
+ this._history = { messages: [] };
14
+ this._defaultErrorMessage = 'AI provider not configured';
15
+ this._aiProvider = options.aiProvider;
16
+ this._aiProvider.modelChange.connect(() => {
17
+ this._errorMessage = this._aiProvider.chatError;
18
+ });
19
+ }
20
+ get provider() {
21
+ return this._aiProvider.chatModel;
22
+ }
23
+ async sendMessage(message) {
24
+ message.id = UUID.uuid4();
25
+ const msg = {
26
+ id: message.id,
27
+ body: message.body,
28
+ sender: { username: 'User' },
29
+ time: Date.now(),
30
+ type: 'msg'
31
+ };
32
+ this.messageAdded(msg);
33
+ if (this._aiProvider.chatModel === null) {
34
+ const errorMsg = {
35
+ id: UUID.uuid4(),
36
+ body: `**${this._errorMessage ? this._errorMessage : this._defaultErrorMessage}**`,
37
+ sender: { username: 'ERROR' },
38
+ time: Date.now(),
39
+ type: 'msg'
40
+ };
41
+ this.messageAdded(errorMsg);
42
+ return false;
43
+ }
44
+ this._history.messages.push(msg);
45
+ const messages = mergeMessageRuns(this._history.messages.map(msg => {
46
+ if (msg.sender.username === 'User') {
47
+ return new HumanMessage(msg.body);
48
+ }
49
+ return new AIMessage(msg.body);
50
+ }));
51
+ this.updateWriters([{ username: 'AI' }]);
52
+ return this._aiProvider.chatModel
53
+ .invoke(messages)
54
+ .then(response => {
55
+ const content = response.content;
56
+ const botMsg = {
57
+ id: UUID.uuid4(),
58
+ body: content.toString(),
59
+ sender: { username: 'AI' },
60
+ time: Date.now(),
61
+ type: 'msg'
62
+ };
63
+ this.messageAdded(botMsg);
64
+ this._history.messages.push(botMsg);
65
+ return true;
66
+ })
67
+ .catch(reason => {
68
+ const error = getErrorMessage(this._aiProvider.name, reason);
69
+ const errorMsg = {
70
+ id: UUID.uuid4(),
71
+ body: `**${error}**`,
72
+ sender: { username: 'ERROR' },
73
+ time: Date.now(),
74
+ type: 'msg'
75
+ };
76
+ this.messageAdded(errorMsg);
77
+ return false;
78
+ })
79
+ .finally(() => {
80
+ this.updateWriters([]);
81
+ });
82
+ }
83
+ async getHistory() {
84
+ return this._history;
85
+ }
86
+ dispose() {
87
+ super.dispose();
88
+ }
89
+ messageAdded(message) {
90
+ super.messageAdded(message);
91
+ }
92
+ }
@@ -0,0 +1,40 @@
1
+ import { CompletionHandler, IInlineCompletionContext, IInlineCompletionProvider } from '@jupyterlab/completer';
2
+ import { LLM } from '@langchain/core/language_models/llms';
3
+ import { IBaseCompleter, BaseCompleter } from './llm-models';
4
+ import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
5
+ /**
6
+ * The generic completion provider to register to the completion provider manager.
7
+ */
8
+ export declare class CompletionProvider implements IInlineCompletionProvider {
9
+ readonly identifier = "@jupyterlite/ai";
10
+ constructor(options: CompletionProvider.IOptions);
11
+ /**
12
+ * Set the completer.
13
+ *
14
+ * @param name - the name of the completer.
15
+ * @param settings - The settings associated to the completer.
16
+ */
17
+ setCompleter(name: string, settings: ReadonlyPartialJSONObject): void;
18
+ /**
19
+ * Get the current completer name.
20
+ */
21
+ get name(): string;
22
+ /**
23
+ * Get the current completer.
24
+ */
25
+ get completer(): IBaseCompleter | null;
26
+ /**
27
+ * Get the LLM completer.
28
+ */
29
+ get llmCompleter(): LLM | null;
30
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
31
+ private _name;
32
+ private _requestCompletion;
33
+ private _completer;
34
+ }
35
+ export declare namespace CompletionProvider {
36
+ interface IOptions extends BaseCompleter.IOptions {
37
+ name: string;
38
+ requestCompletion: () => void;
39
+ }
40
+ }
@@ -0,0 +1,57 @@
1
+ import { getCompleter } from './llm-models';
2
+ /**
3
+ * The generic completion provider to register to the completion provider manager.
4
+ */
5
+ export class CompletionProvider {
6
+ constructor(options) {
7
+ this.identifier = '@jupyterlite/ai';
8
+ this._name = 'None';
9
+ this._completer = null;
10
+ const { name, settings } = options;
11
+ this._requestCompletion = options.requestCompletion;
12
+ this.setCompleter(name, settings);
13
+ }
14
+ /**
15
+ * Set the completer.
16
+ *
17
+ * @param name - the name of the completer.
18
+ * @param settings - The settings associated to the completer.
19
+ */
20
+ setCompleter(name, settings) {
21
+ try {
22
+ this._completer = getCompleter(name, settings);
23
+ if (this._completer) {
24
+ this._completer.requestCompletion = this._requestCompletion;
25
+ }
26
+ this._name = this._completer === null ? 'None' : name;
27
+ }
28
+ catch (e) {
29
+ this._completer = null;
30
+ this._name = 'None';
31
+ throw e;
32
+ }
33
+ }
34
+ /**
35
+ * Get the current completer name.
36
+ */
37
+ get name() {
38
+ return this._name;
39
+ }
40
+ /**
41
+ * Get the current completer.
42
+ */
43
+ get completer() {
44
+ return this._completer;
45
+ }
46
+ /**
47
+ * Get the LLM completer.
48
+ */
49
+ get llmCompleter() {
50
+ var _a;
51
+ return ((_a = this._completer) === null || _a === void 0 ? void 0 : _a.provider) || null;
52
+ }
53
+ async fetch(request, context) {
54
+ var _a;
55
+ return (_a = this._completer) === null || _a === void 0 ? void 0 : _a.fetch(request, context);
56
+ }
57
+ }
package/lib/index.d.ts ADDED
@@ -0,0 +1,4 @@
1
+ import { JupyterFrontEndPlugin } from '@jupyterlab/application';
2
+ import { IAIProvider } from './token';
3
+ declare const _default: (JupyterFrontEndPlugin<void> | JupyterFrontEndPlugin<IAIProvider>)[];
4
+ export default _default;
package/lib/index.js ADDED
@@ -0,0 +1,90 @@
1
+ import { ActiveCellManager, buildChatSidebar, buildErrorWidget } from '@jupyter/chat';
2
+ import { IThemeManager } from '@jupyterlab/apputils';
3
+ import { ICompletionProviderManager } from '@jupyterlab/completer';
4
+ import { INotebookTracker } from '@jupyterlab/notebook';
5
+ import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
6
+ import { ISettingRegistry } from '@jupyterlab/settingregistry';
7
+ import { ChatHandler } from './chat-handler';
8
+ import { AIProvider } from './provider';
9
+ import { IAIProvider } from './token';
10
+ const chatPlugin = {
11
+ id: '@jupyterlite/ai:chat',
12
+ description: 'LLM chat extension',
13
+ autoStart: true,
14
+ optional: [INotebookTracker, ISettingRegistry, IThemeManager],
15
+ requires: [IAIProvider, IRenderMimeRegistry],
16
+ activate: async (app, aiProvider, rmRegistry, notebookTracker, settingsRegistry, themeManager) => {
17
+ let activeCellManager = null;
18
+ if (notebookTracker) {
19
+ activeCellManager = new ActiveCellManager({
20
+ tracker: notebookTracker,
21
+ shell: app.shell
22
+ });
23
+ }
24
+ const chatHandler = new ChatHandler({
25
+ aiProvider: aiProvider,
26
+ activeCellManager: activeCellManager
27
+ });
28
+ let sendWithShiftEnter = false;
29
+ let enableCodeToolbar = true;
30
+ function loadSetting(setting) {
31
+ sendWithShiftEnter = setting.get('sendWithShiftEnter')
32
+ .composite;
33
+ enableCodeToolbar = setting.get('enableCodeToolbar').composite;
34
+ chatHandler.config = { sendWithShiftEnter, enableCodeToolbar };
35
+ }
36
+ Promise.all([app.restored, settingsRegistry === null || settingsRegistry === void 0 ? void 0 : settingsRegistry.load(chatPlugin.id)])
37
+ .then(([, settings]) => {
38
+ if (!settings) {
39
+ console.warn('The SettingsRegistry is not loaded for the chat extension');
40
+ return;
41
+ }
42
+ loadSetting(settings);
43
+ settings.changed.connect(loadSetting);
44
+ })
45
+ .catch(reason => {
46
+ console.error(`Something went wrong when reading the settings.\n${reason}`);
47
+ });
48
+ let chatWidget = null;
49
+ try {
50
+ chatWidget = buildChatSidebar({
51
+ model: chatHandler,
52
+ themeManager,
53
+ rmRegistry
54
+ });
55
+ chatWidget.title.caption = 'Codestral Chat';
56
+ }
57
+ catch (e) {
58
+ chatWidget = buildErrorWidget(themeManager);
59
+ }
60
+ app.shell.add(chatWidget, 'left', { rank: 2000 });
61
+ console.log('Chat extension initialized');
62
+ }
63
+ };
64
+ const aiProviderPlugin = {
65
+ id: '@jupyterlite/ai:ai-provider',
66
+ autoStart: true,
67
+ requires: [ICompletionProviderManager, ISettingRegistry],
68
+ provides: IAIProvider,
69
+ activate: (app, manager, settingRegistry) => {
70
+ const aiProvider = new AIProvider({
71
+ completionProviderManager: manager,
72
+ requestCompletion: () => app.commands.execute('inline-completer:invoke')
73
+ });
74
+ settingRegistry
75
+ .load(aiProviderPlugin.id)
76
+ .then(settings => {
77
+ const updateProvider = () => {
78
+ const provider = settings.get('provider').composite;
79
+ aiProvider.setModels(provider, settings.composite);
80
+ };
81
+ settings.changed.connect(() => updateProvider());
82
+ updateProvider();
83
+ })
84
+ .catch(reason => {
85
+ console.error(`Failed to load settings for ${aiProviderPlugin.id}`, reason);
86
+ });
87
+ return aiProvider;
88
+ }
89
+ };
90
+ export default [chatPlugin, aiProviderPlugin];
@@ -0,0 +1,28 @@
1
+ import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
+ import { LLM } from '@langchain/core/language_models/llms';
3
+ import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
4
+ export interface IBaseCompleter {
5
+ /**
6
+ * The LLM completer.
7
+ */
8
+ provider: LLM;
9
+ /**
10
+ * The function to fetch a new completion.
11
+ */
12
+ requestCompletion?: () => void;
13
+ /**
14
+ * The fetch request for the LLM completer.
15
+ */
16
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<any>;
17
+ }
18
+ /**
19
+ * The namespace for the base completer.
20
+ */
21
+ export declare namespace BaseCompleter {
22
+ /**
23
+ * The options for the constructor of a completer.
24
+ */
25
+ interface IOptions {
26
+ settings: ReadonlyPartialJSONObject;
27
+ }
28
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,15 @@
1
+ import { CompletionHandler, IInlineCompletionContext } from '@jupyterlab/completer';
2
+ import { LLM } from '@langchain/core/language_models/llms';
3
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
4
+ export declare class CodestralCompleter implements IBaseCompleter {
5
+ constructor(options: BaseCompleter.IOptions);
6
+ get provider(): LLM;
7
+ set requestCompletion(value: () => void);
8
+ fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<{
9
+ items: any;
10
+ }>;
11
+ private _requestCompletion?;
12
+ private _throttler;
13
+ private _mistralProvider;
14
+ private _currentData;
15
+ }
@@ -0,0 +1,79 @@
1
+ import { MistralAI } from '@langchain/mistralai';
2
+ import { Throttler } from '@lumino/polling';
3
+ /**
4
+ * The Mistral API has a rate limit of 1 request per second
5
+ */
6
+ const INTERVAL = 1000;
7
+ /**
8
+ * Timeout to avoid endless requests
9
+ */
10
+ const REQUEST_TIMEOUT = 3000;
11
+ export class CodestralCompleter {
12
+ constructor(options) {
13
+ this._currentData = null;
14
+ // this._requestCompletion = options.requestCompletion;
15
+ this._mistralProvider = new MistralAI({ ...options.settings });
16
+ this._throttler = new Throttler(async (data) => {
17
+ var _a;
18
+ const invokedData = data;
19
+ // Request completion.
20
+ const request = this._mistralProvider.completionWithRetry(data, {}, false);
21
+ const timeoutPromise = new Promise(resolve => {
22
+ return setTimeout(() => resolve(null), REQUEST_TIMEOUT);
23
+ });
24
+ // Fetch again if the request is too long or if the prompt has changed.
25
+ const response = await Promise.race([request, timeoutPromise]);
26
+ if (response === null ||
27
+ invokedData.prompt !== ((_a = this._currentData) === null || _a === void 0 ? void 0 : _a.prompt)) {
28
+ return {
29
+ items: [],
30
+ fetchAgain: true
31
+ };
32
+ }
33
+ // Extract results of completion request.
34
+ const items = response.choices.map((choice) => {
35
+ return { insertText: choice.message.content };
36
+ });
37
+ return {
38
+ items
39
+ };
40
+ }, { limit: INTERVAL });
41
+ }
42
+ get provider() {
43
+ return this._mistralProvider;
44
+ }
45
+ set requestCompletion(value) {
46
+ this._requestCompletion = value;
47
+ }
48
+ async fetch(request, context) {
49
+ const { text, offset: cursorOffset } = request;
50
+ const prompt = text.slice(0, cursorOffset);
51
+ const suffix = text.slice(cursorOffset);
52
+ const data = {
53
+ prompt,
54
+ suffix,
55
+ model: this._mistralProvider.model,
56
+ // temperature: 0,
57
+ // top_p: 1,
58
+ // max_tokens: 1024,
59
+ // min_tokens: 0,
60
+ stream: false,
61
+ // random_seed: 1337,
62
+ stop: []
63
+ };
64
+ try {
65
+ this._currentData = data;
66
+ const completionResult = await this._throttler.invoke(data);
67
+ if (completionResult.fetchAgain) {
68
+ if (this._requestCompletion) {
69
+ this._requestCompletion();
70
+ }
71
+ }
72
+ return { items: completionResult.items };
73
+ }
74
+ catch (error) {
75
+ console.error('Error fetching completions', error);
76
+ return { items: [] };
77
+ }
78
+ }
79
+ }
@@ -0,0 +1,3 @@
1
+ export * from './base-completer';
2
+ export * from './codestral-completer';
3
+ export * from './utils';
@@ -0,0 +1,3 @@
1
+ export * from './base-completer';
2
+ export * from './codestral-completer';
3
+ export * from './utils';
@@ -0,0 +1,15 @@
1
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
2
+ import { IBaseCompleter } from './base-completer';
3
+ import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
4
+ /**
5
+ * Get an LLM completer from the name.
6
+ */
7
+ export declare function getCompleter(name: string, settings: ReadonlyPartialJSONObject): IBaseCompleter | null;
8
+ /**
9
+ * Get an LLM chat model from the name.
10
+ */
11
+ export declare function getChatModel(name: string, settings: ReadonlyPartialJSONObject): BaseChatModel | null;
12
+ /**
13
+ * Get the error message from provider.
14
+ */
15
+ export declare function getErrorMessage(name: string, error: any): string;
@@ -0,0 +1,29 @@
1
+ import { ChatMistralAI } from '@langchain/mistralai';
2
+ import { CodestralCompleter } from './codestral-completer';
3
+ /**
4
+ * Get an LLM completer from the name.
5
+ */
6
+ export function getCompleter(name, settings) {
7
+ if (name === 'MistralAI') {
8
+ return new CodestralCompleter({ settings });
9
+ }
10
+ return null;
11
+ }
12
+ /**
13
+ * Get an LLM chat model from the name.
14
+ */
15
+ export function getChatModel(name, settings) {
16
+ if (name === 'MistralAI') {
17
+ return new ChatMistralAI({ ...settings });
18
+ }
19
+ return null;
20
+ }
21
+ /**
22
+ * Get the error message from provider.
23
+ */
24
+ export function getErrorMessage(name, error) {
25
+ if (name === 'MistralAI') {
26
+ return error.message;
27
+ }
28
+ return 'Unknown provider';
29
+ }