@jupyterlite/ai 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,74 @@
1
+ import { ICompletionProviderManager } from '@jupyterlab/completer';
2
+ import { BaseLanguageModel } from '@langchain/core/language_models/base';
3
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
4
+ import { ISignal } from '@lumino/signaling';
5
+ import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
6
+ import { IBaseCompleter } from './llm-models';
7
+ import { IAIProvider } from './token';
8
+ export declare class AIProvider implements IAIProvider {
9
+ constructor(options: AIProvider.IOptions);
10
+ get name(): string;
11
+ /**
12
+ * Get the current completer of the completion provider.
13
+ */
14
+ get completer(): IBaseCompleter | null;
15
+ /**
16
+ * Get the current llm chat model.
17
+ */
18
+ get chatModel(): BaseChatModel | null;
19
+ /**
20
+ * Get the current chat error;
21
+ */
22
+ get chatError(): string;
23
+ /**
24
+ * get the current completer error.
25
+ */
26
+ get completerError(): string;
27
+ /**
28
+ * Set the models (chat model and completer).
29
+ * Creates the models if the name has changed, otherwise only updates their config.
30
+ *
31
+ * @param name - the name of the model to use.
32
+ * @param settings - the settings for the models.
33
+ */
34
+ setModels(name: string, settings: ReadonlyPartialJSONObject): void;
35
+ get modelChange(): ISignal<IAIProvider, void>;
36
+ private _completionProvider;
37
+ private _llmChatModel;
38
+ private _name;
39
+ private _modelChange;
40
+ private _chatError;
41
+ private _completerError;
42
+ }
43
+ export declare namespace AIProvider {
44
+ /**
45
+ * The options for the LLM provider.
46
+ */
47
+ interface IOptions {
48
+ /**
49
+ * The completion provider manager in which register the LLM completer.
50
+ */
51
+ completionProviderManager: ICompletionProviderManager;
52
+ /**
53
+ * The application commands registry.
54
+ */
55
+ requestCompletion: () => void;
56
+ }
57
+ /**
58
+ * This function indicates whether a key is writable in an object.
59
+ * https://stackoverflow.com/questions/54724875/can-we-check-whether-property-is-readonly-in-typescript
60
+ *
61
+ * @param obj - An object extending the BaseLanguageModel interface.
62
+ * @param key - A string as a key of the object.
63
+ * @returns a boolean whether the key is writable or not.
64
+ */
65
+ function isWritable<T extends BaseLanguageModel>(obj: T, key: keyof T): boolean;
66
+ /**
67
+ * Update the config of a language model.
68
+ * It only updates the writable attributes of the model.
69
+ *
70
+ * @param model - the model to update.
71
+ * @param settings - the configuration s a JSON object.
72
+ */
73
+ function updateConfig<T extends BaseLanguageModel>(model: T, settings: ReadonlyPartialJSONObject): void;
74
+ }
@@ -0,0 +1,117 @@
1
+ import { Signal } from '@lumino/signaling';
2
+ import { CompletionProvider } from './completion-provider';
3
+ import { getChatModel } from './llm-models';
4
+ export class AIProvider {
5
+ constructor(options) {
6
+ this._llmChatModel = null;
7
+ this._name = 'None';
8
+ this._modelChange = new Signal(this);
9
+ this._chatError = '';
10
+ this._completerError = '';
11
+ this._completionProvider = new CompletionProvider({
12
+ name: 'None',
13
+ settings: {},
14
+ requestCompletion: options.requestCompletion
15
+ });
16
+ options.completionProviderManager.registerInlineProvider(this._completionProvider);
17
+ }
18
+ get name() {
19
+ return this._name;
20
+ }
21
+ /**
22
+ * Get the current completer of the completion provider.
23
+ */
24
+ get completer() {
25
+ if (this._name === null) {
26
+ return null;
27
+ }
28
+ return this._completionProvider.completer;
29
+ }
30
+ /**
31
+ * Get the current llm chat model.
32
+ */
33
+ get chatModel() {
34
+ if (this._name === null) {
35
+ return null;
36
+ }
37
+ return this._llmChatModel;
38
+ }
39
+ /**
40
+ * Get the current chat error;
41
+ */
42
+ get chatError() {
43
+ return this._chatError;
44
+ }
45
+ /**
46
+ * get the current completer error.
47
+ */
48
+ get completerError() {
49
+ return this._completerError;
50
+ }
51
+ /**
52
+ * Set the models (chat model and completer).
53
+ * Creates the models if the name has changed, otherwise only updates their config.
54
+ *
55
+ * @param name - the name of the model to use.
56
+ * @param settings - the settings for the models.
57
+ */
58
+ setModels(name, settings) {
59
+ try {
60
+ this._completionProvider.setCompleter(name, settings);
61
+ this._completerError = '';
62
+ }
63
+ catch (e) {
64
+ this._completerError = e.message;
65
+ }
66
+ try {
67
+ this._llmChatModel = getChatModel(name, settings);
68
+ this._chatError = '';
69
+ }
70
+ catch (e) {
71
+ this._chatError = e.message;
72
+ this._llmChatModel = null;
73
+ }
74
+ this._name = name;
75
+ this._modelChange.emit();
76
+ }
77
+ get modelChange() {
78
+ return this._modelChange;
79
+ }
80
+ }
81
+ (function (AIProvider) {
82
+ /**
83
+ * This function indicates whether a key is writable in an object.
84
+ * https://stackoverflow.com/questions/54724875/can-we-check-whether-property-is-readonly-in-typescript
85
+ *
86
+ * @param obj - An object extending the BaseLanguageModel interface.
87
+ * @param key - A string as a key of the object.
88
+ * @returns a boolean whether the key is writable or not.
89
+ */
90
+ function isWritable(obj, key) {
91
+ const desc = Object.getOwnPropertyDescriptor(obj, key) ||
92
+ Object.getOwnPropertyDescriptor(Object.getPrototypeOf(obj), key) ||
93
+ {};
94
+ return Boolean(desc.writable);
95
+ }
96
+ AIProvider.isWritable = isWritable;
97
+ /**
98
+ * Update the config of a language model.
99
+ * It only updates the writable attributes of the model.
100
+ *
101
+ * @param model - the model to update.
102
+ * @param settings - the configuration s a JSON object.
103
+ */
104
+ function updateConfig(model, settings) {
105
+ Object.entries(settings).forEach(([key, value], index) => {
106
+ if (key in model) {
107
+ const modelKey = key;
108
+ if (isWritable(model, modelKey)) {
109
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
110
+ // @ts-ignore
111
+ model[modelKey] = value;
112
+ }
113
+ }
114
+ });
115
+ }
116
+ AIProvider.updateConfig = updateConfig;
117
+ })(AIProvider || (AIProvider = {}));
package/lib/token.d.ts ADDED
@@ -0,0 +1,13 @@
1
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
2
+ import { Token } from '@lumino/coreutils';
3
+ import { ISignal } from '@lumino/signaling';
4
+ import { IBaseCompleter } from './llm-models';
5
+ export interface IAIProvider {
6
+ name: string;
7
+ completer: IBaseCompleter | null;
8
+ chatModel: BaseChatModel | null;
9
+ modelChange: ISignal<IAIProvider, void>;
10
+ chatError: string;
11
+ completerError: string;
12
+ }
13
+ export declare const IAIProvider: Token<IAIProvider>;
package/lib/token.js ADDED
@@ -0,0 +1,2 @@
1
+ import { Token } from '@lumino/coreutils';
2
+ export const IAIProvider = new Token('@jupyterlite/ai:AIProvider', 'Provider for chat and completion LLM provider');
package/package.json ADDED
@@ -0,0 +1,197 @@
1
+ {
2
+ "name": "@jupyterlite/ai",
3
+ "version": "0.2.0",
4
+ "description": "AI code completions and chat for JupyterLite",
5
+ "keywords": [
6
+ "jupyter",
7
+ "jupyterlab",
8
+ "jupyterlab-extension"
9
+ ],
10
+ "homepage": "https://github.com/jupyterlite/ai",
11
+ "bugs": {
12
+ "url": "https://github.com/jupyterlite/ai/issues"
13
+ },
14
+ "license": "BSD-3-Clause",
15
+ "author": "JupyterLite Contributors",
16
+ "files": [
17
+ "lib/**/*.{d.ts,eot,gif,html,jpg,js,js.map,json,png,svg,woff2,ttf}",
18
+ "style/**/*.{css,js,eot,gif,html,jpg,json,png,svg,woff2,ttf}",
19
+ "src/**/*.{ts,tsx}",
20
+ "schema/*.json"
21
+ ],
22
+ "main": "lib/index.js",
23
+ "types": "lib/index.d.ts",
24
+ "style": "style/index.css",
25
+ "repository": {
26
+ "type": "git",
27
+ "url": "https://github.com/jupyterlite/ai.git"
28
+ },
29
+ "scripts": {
30
+ "build": "jlpm build:lib && jlpm build:labextension:dev",
31
+ "build:prod": "jlpm clean && jlpm build:lib:prod && jlpm build:labextension",
32
+ "build:labextension": "jupyter labextension build .",
33
+ "build:labextension:dev": "jupyter labextension build --development True .",
34
+ "build:lib": "tsc --sourceMap",
35
+ "build:lib:prod": "tsc",
36
+ "clean": "jlpm clean:lib",
37
+ "clean:lib": "rimraf lib tsconfig.tsbuildinfo",
38
+ "clean:lintcache": "rimraf .eslintcache .stylelintcache",
39
+ "clean:labextension": "rimraf jupyterlite_ai/labextension jupyterlite_ai/_version.py",
40
+ "clean:all": "jlpm clean:lib && jlpm clean:labextension && jlpm clean:lintcache",
41
+ "eslint": "jlpm eslint:check --fix",
42
+ "eslint:check": "eslint . --cache --ext .ts,.tsx",
43
+ "install:extension": "jlpm build",
44
+ "lint": "jlpm stylelint && jlpm prettier && jlpm eslint",
45
+ "lint:check": "jlpm stylelint:check && jlpm prettier:check && jlpm eslint:check",
46
+ "prettier": "jlpm prettier:base --write --list-different",
47
+ "prettier:base": "prettier \"**/*{.ts,.tsx,.js,.jsx,.css,.json,.md}\"",
48
+ "prettier:check": "jlpm prettier:base --check",
49
+ "stylelint": "jlpm stylelint:check --fix",
50
+ "stylelint:check": "stylelint --cache \"style/**/*.css\"",
51
+ "watch": "run-p watch:src watch:labextension",
52
+ "watch:src": "tsc -w --sourceMap",
53
+ "watch:labextension": "jupyter labextension watch ."
54
+ },
55
+ "dependencies": {
56
+ "@jupyter/chat": "^0.5.0",
57
+ "@jupyterlab/application": "^4.2.0",
58
+ "@jupyterlab/apputils": "^4.3.0",
59
+ "@jupyterlab/completer": "^4.2.0",
60
+ "@jupyterlab/notebook": "^4.2.0",
61
+ "@jupyterlab/rendermime": "^4.2.0",
62
+ "@jupyterlab/settingregistry": "^4.2.0",
63
+ "@langchain/core": "^0.3.13",
64
+ "@langchain/mistralai": "^0.1.1",
65
+ "@lumino/coreutils": "^2.1.2",
66
+ "@lumino/polling": "^2.1.2",
67
+ "@lumino/signaling": "^2.1.2"
68
+ },
69
+ "devDependencies": {
70
+ "@jupyterlab/builder": "^4.0.0",
71
+ "@types/json-schema": "^7.0.11",
72
+ "@types/react": "^18.0.26",
73
+ "@types/react-addons-linked-state-mixin": "^0.14.22",
74
+ "@typescript-eslint/eslint-plugin": "^6.1.0",
75
+ "@typescript-eslint/parser": "^6.1.0",
76
+ "css-loader": "^6.7.1",
77
+ "eslint": "^8.36.0",
78
+ "eslint-config-prettier": "^8.8.0",
79
+ "eslint-plugin-prettier": "^5.0.0",
80
+ "npm-run-all": "^4.1.5",
81
+ "prettier": "^3.0.0",
82
+ "rimraf": "^5.0.1",
83
+ "source-map-loader": "^1.0.2",
84
+ "style-loader": "^3.3.1",
85
+ "stylelint": "^15.10.1",
86
+ "stylelint-config-recommended": "^13.0.0",
87
+ "stylelint-config-standard": "^34.0.0",
88
+ "stylelint-csstree-validator": "^3.0.0",
89
+ "stylelint-prettier": "^4.0.0",
90
+ "typescript": "~5.0.2",
91
+ "yjs": "^13.5.0"
92
+ },
93
+ "sideEffects": [
94
+ "style/*.css",
95
+ "style/index.js"
96
+ ],
97
+ "styleModule": "style/index.js",
98
+ "publishConfig": {
99
+ "access": "public"
100
+ },
101
+ "jupyterlab": {
102
+ "extension": true,
103
+ "outputDir": "jupyterlite_ai/labextension",
104
+ "schemaDir": "schema"
105
+ },
106
+ "eslintIgnore": [
107
+ "node_modules",
108
+ "dist",
109
+ "coverage",
110
+ "**/*.d.ts"
111
+ ],
112
+ "eslintConfig": {
113
+ "extends": [
114
+ "eslint:recommended",
115
+ "plugin:@typescript-eslint/eslint-recommended",
116
+ "plugin:@typescript-eslint/recommended",
117
+ "plugin:prettier/recommended"
118
+ ],
119
+ "parser": "@typescript-eslint/parser",
120
+ "parserOptions": {
121
+ "project": "tsconfig.json",
122
+ "sourceType": "module"
123
+ },
124
+ "plugins": [
125
+ "@typescript-eslint"
126
+ ],
127
+ "rules": {
128
+ "@typescript-eslint/naming-convention": [
129
+ "error",
130
+ {
131
+ "selector": "interface",
132
+ "format": [
133
+ "PascalCase"
134
+ ],
135
+ "custom": {
136
+ "regex": "^I[A-Z]",
137
+ "match": true
138
+ }
139
+ }
140
+ ],
141
+ "@typescript-eslint/no-unused-vars": [
142
+ "warn",
143
+ {
144
+ "args": "none"
145
+ }
146
+ ],
147
+ "@typescript-eslint/no-explicit-any": "off",
148
+ "@typescript-eslint/no-namespace": "off",
149
+ "@typescript-eslint/no-use-before-define": "off",
150
+ "@typescript-eslint/quotes": [
151
+ "error",
152
+ "single",
153
+ {
154
+ "avoidEscape": true,
155
+ "allowTemplateLiterals": false
156
+ }
157
+ ],
158
+ "curly": [
159
+ "error",
160
+ "all"
161
+ ],
162
+ "eqeqeq": "error",
163
+ "prefer-arrow-callback": "error"
164
+ }
165
+ },
166
+ "prettier": {
167
+ "singleQuote": true,
168
+ "trailingComma": "none",
169
+ "arrowParens": "avoid",
170
+ "endOfLine": "auto",
171
+ "overrides": [
172
+ {
173
+ "files": "package.json",
174
+ "options": {
175
+ "tabWidth": 4
176
+ }
177
+ }
178
+ ]
179
+ },
180
+ "stylelint": {
181
+ "extends": [
182
+ "stylelint-config-recommended",
183
+ "stylelint-config-standard",
184
+ "stylelint-prettier/recommended"
185
+ ],
186
+ "plugins": [
187
+ "stylelint-csstree-validator"
188
+ ],
189
+ "rules": {
190
+ "csstree/validator": true,
191
+ "property-no-vendor-prefix": null,
192
+ "selector-class-pattern": "^([a-z][A-z\\d]*)(-[A-z\\d]+)*$",
193
+ "selector-no-vendor-prefix": null,
194
+ "value-no-vendor-prefix": null
195
+ }
196
+ }
197
+ }
@@ -0,0 +1,21 @@
1
+ {
2
+ "title": "AI provider",
3
+ "description": "Provider settings",
4
+ "type": "object",
5
+ "properties": {
6
+ "provider": {
7
+ "type": "string",
8
+ "title": "The AI provider",
9
+ "description": "The AI provider to use for chat and completion",
10
+ "default": "None",
11
+ "enum": ["None", "MistralAI"]
12
+ },
13
+ "apiKey": {
14
+ "type": "string",
15
+ "title": "The Codestral API key",
16
+ "description": "The API key to use for Codestral",
17
+ "default": ""
18
+ }
19
+ },
20
+ "additionalProperties": false
21
+ }
@@ -0,0 +1,20 @@
1
+ {
2
+ "title": "Chat configuration",
3
+ "description": "Configuration for the chat panel",
4
+ "type": "object",
5
+ "properties": {
6
+ "sendWithShiftEnter": {
7
+ "description": "Whether to send a message via Shift-Enter instead of Enter.",
8
+ "type": "boolean",
9
+ "default": false,
10
+ "readOnly": false
11
+ },
12
+ "enableCodeToolbar": {
13
+ "description": "Whether to enable or not the code toolbar.",
14
+ "type": "boolean",
15
+ "default": true,
16
+ "readOnly": false
17
+ }
18
+ },
19
+ "additionalProperties": false
20
+ }
@@ -0,0 +1,129 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+
6
+ import {
7
+ ChatModel,
8
+ IChatHistory,
9
+ IChatMessage,
10
+ INewMessage
11
+ } from '@jupyter/chat';
12
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
13
+ import {
14
+ AIMessage,
15
+ HumanMessage,
16
+ mergeMessageRuns
17
+ } from '@langchain/core/messages';
18
+ import { UUID } from '@lumino/coreutils';
19
+ import { getErrorMessage } from './llm-models';
20
+ import { IAIProvider } from './token';
21
+
22
+ export type ConnectionMessage = {
23
+ type: 'connection';
24
+ client_id: string;
25
+ };
26
+
27
+ export class ChatHandler extends ChatModel {
28
+ constructor(options: ChatHandler.IOptions) {
29
+ super(options);
30
+ this._aiProvider = options.aiProvider;
31
+ this._aiProvider.modelChange.connect(() => {
32
+ this._errorMessage = this._aiProvider.chatError;
33
+ });
34
+ }
35
+
36
+ get provider(): BaseChatModel | null {
37
+ return this._aiProvider.chatModel;
38
+ }
39
+
40
+ async sendMessage(message: INewMessage): Promise<boolean> {
41
+ message.id = UUID.uuid4();
42
+ const msg: IChatMessage = {
43
+ id: message.id,
44
+ body: message.body,
45
+ sender: { username: 'User' },
46
+ time: Date.now(),
47
+ type: 'msg'
48
+ };
49
+ this.messageAdded(msg);
50
+
51
+ if (this._aiProvider.chatModel === null) {
52
+ const errorMsg: IChatMessage = {
53
+ id: UUID.uuid4(),
54
+ body: `**${this._errorMessage ? this._errorMessage : this._defaultErrorMessage}**`,
55
+ sender: { username: 'ERROR' },
56
+ time: Date.now(),
57
+ type: 'msg'
58
+ };
59
+ this.messageAdded(errorMsg);
60
+ return false;
61
+ }
62
+
63
+ this._history.messages.push(msg);
64
+
65
+ const messages = mergeMessageRuns(
66
+ this._history.messages.map(msg => {
67
+ if (msg.sender.username === 'User') {
68
+ return new HumanMessage(msg.body);
69
+ }
70
+ return new AIMessage(msg.body);
71
+ })
72
+ );
73
+
74
+ this.updateWriters([{ username: 'AI' }]);
75
+ return this._aiProvider.chatModel
76
+ .invoke(messages)
77
+ .then(response => {
78
+ const content = response.content;
79
+ const botMsg: IChatMessage = {
80
+ id: UUID.uuid4(),
81
+ body: content.toString(),
82
+ sender: { username: 'AI' },
83
+ time: Date.now(),
84
+ type: 'msg'
85
+ };
86
+ this.messageAdded(botMsg);
87
+ this._history.messages.push(botMsg);
88
+ return true;
89
+ })
90
+ .catch(reason => {
91
+ const error = getErrorMessage(this._aiProvider.name, reason);
92
+ const errorMsg: IChatMessage = {
93
+ id: UUID.uuid4(),
94
+ body: `**${error}**`,
95
+ sender: { username: 'ERROR' },
96
+ time: Date.now(),
97
+ type: 'msg'
98
+ };
99
+ this.messageAdded(errorMsg);
100
+ return false;
101
+ })
102
+ .finally(() => {
103
+ this.updateWriters([]);
104
+ });
105
+ }
106
+
107
+ async getHistory(): Promise<IChatHistory> {
108
+ return this._history;
109
+ }
110
+
111
+ dispose(): void {
112
+ super.dispose();
113
+ }
114
+
115
+ messageAdded(message: IChatMessage): void {
116
+ super.messageAdded(message);
117
+ }
118
+
119
+ private _aiProvider: IAIProvider;
120
+ private _errorMessage: string = '';
121
+ private _history: IChatHistory = { messages: [] };
122
+ private _defaultErrorMessage = 'AI provider not configured';
123
+ }
124
+
125
+ export namespace ChatHandler {
126
+ export interface IOptions extends ChatModel.IOptions {
127
+ aiProvider: IAIProvider;
128
+ }
129
+ }
@@ -0,0 +1,81 @@
1
+ import {
2
+ CompletionHandler,
3
+ IInlineCompletionContext,
4
+ IInlineCompletionProvider
5
+ } from '@jupyterlab/completer';
6
+ import { LLM } from '@langchain/core/language_models/llms';
7
+
8
+ import { getCompleter, IBaseCompleter, BaseCompleter } from './llm-models';
9
+ import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
10
+
11
+ /**
12
+ * The generic completion provider to register to the completion provider manager.
13
+ */
14
+ export class CompletionProvider implements IInlineCompletionProvider {
15
+ readonly identifier = '@jupyterlite/ai';
16
+
17
+ constructor(options: CompletionProvider.IOptions) {
18
+ const { name, settings } = options;
19
+ this._requestCompletion = options.requestCompletion;
20
+ this.setCompleter(name, settings);
21
+ }
22
+
23
+ /**
24
+ * Set the completer.
25
+ *
26
+ * @param name - the name of the completer.
27
+ * @param settings - The settings associated to the completer.
28
+ */
29
+ setCompleter(name: string, settings: ReadonlyPartialJSONObject) {
30
+ try {
31
+ this._completer = getCompleter(name, settings);
32
+ if (this._completer) {
33
+ this._completer.requestCompletion = this._requestCompletion;
34
+ }
35
+ this._name = this._completer === null ? 'None' : name;
36
+ } catch (e: any) {
37
+ this._completer = null;
38
+ this._name = 'None';
39
+ throw e;
40
+ }
41
+ }
42
+
43
+ /**
44
+ * Get the current completer name.
45
+ */
46
+ get name(): string {
47
+ return this._name;
48
+ }
49
+
50
+ /**
51
+ * Get the current completer.
52
+ */
53
+ get completer(): IBaseCompleter | null {
54
+ return this._completer;
55
+ }
56
+
57
+ /**
58
+ * Get the LLM completer.
59
+ */
60
+ get llmCompleter(): LLM | null {
61
+ return this._completer?.provider || null;
62
+ }
63
+
64
+ async fetch(
65
+ request: CompletionHandler.IRequest,
66
+ context: IInlineCompletionContext
67
+ ) {
68
+ return this._completer?.fetch(request, context);
69
+ }
70
+
71
+ private _name: string = 'None';
72
+ private _requestCompletion: () => void;
73
+ private _completer: IBaseCompleter | null = null;
74
+ }
75
+
76
+ export namespace CompletionProvider {
77
+ export interface IOptions extends BaseCompleter.IOptions {
78
+ name: string;
79
+ requestCompletion: () => void;
80
+ }
81
+ }