@jupyterlite/ai 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +48 -9
  2. package/lib/_provider-settings/anthropic.json +70 -0
  3. package/lib/_provider-settings/chromeAI.json +21 -0
  4. package/lib/_provider-settings/mistralAI.json +75 -0
  5. package/lib/_provider-settings/openAI.json +668 -0
  6. package/lib/chat-handler.d.ts +12 -0
  7. package/lib/chat-handler.js +70 -21
  8. package/lib/completion-provider.d.ts +3 -3
  9. package/lib/icons.d.ts +2 -0
  10. package/lib/icons.js +15 -0
  11. package/lib/index.d.ts +2 -1
  12. package/lib/index.js +61 -6
  13. package/lib/llm-models/anthropic-completer.d.ts +19 -0
  14. package/lib/llm-models/anthropic-completer.js +57 -0
  15. package/lib/llm-models/base-completer.d.ts +6 -2
  16. package/lib/llm-models/chrome-completer.d.ts +19 -0
  17. package/lib/llm-models/chrome-completer.js +67 -0
  18. package/lib/llm-models/codestral-completer.d.ts +9 -8
  19. package/lib/llm-models/codestral-completer.js +37 -54
  20. package/lib/llm-models/openai-completer.d.ts +19 -0
  21. package/lib/llm-models/openai-completer.js +51 -0
  22. package/lib/llm-models/utils.d.ts +1 -0
  23. package/lib/llm-models/utils.js +57 -0
  24. package/lib/provider.d.ts +11 -0
  25. package/lib/provider.js +26 -0
  26. package/lib/slash-commands.d.ts +16 -0
  27. package/lib/slash-commands.js +25 -0
  28. package/package.json +23 -104
  29. package/schema/ai-provider.json +4 -8
  30. package/schema/chat.json +8 -0
  31. package/src/chat-handler.ts +91 -34
  32. package/src/completion-provider.ts +3 -3
  33. package/src/icons.ts +18 -0
  34. package/src/index.ts +67 -5
  35. package/src/llm-models/anthropic-completer.ts +75 -0
  36. package/src/llm-models/base-completer.ts +7 -2
  37. package/src/llm-models/chrome-completer.ts +88 -0
  38. package/src/llm-models/codestral-completer.ts +43 -69
  39. package/src/llm-models/openai-completer.ts +67 -0
  40. package/src/llm-models/svg.d.ts +9 -0
  41. package/src/llm-models/utils.ts +49 -0
  42. package/src/provider.ts +38 -0
  43. package/src/slash-commands.tsx +55 -0
  44. package/style/icons/jupyternaut-lite.svg +7 -0
package/src/icons.ts ADDED
@@ -0,0 +1,18 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+
6
+ import { LabIcon } from '@jupyterlab/ui-components';
7
+
8
+ /**
9
+ * This icon is based on the jupyternaut icon from Jupyter AI:
10
+ * https://github.com/jupyterlab/jupyter-ai/blob/main/packages/jupyter-ai/style/icons/jupyternaut.svg
11
+ * With a small tweak for the colors to match the JupyterLite icon.
12
+ */
13
+ import jupyternautLiteSvg from '../style/icons/jupyternaut-lite.svg';
14
+
15
+ export const jupyternautLiteIcon = new LabIcon({
16
+ name: '@jupyterlite/ai:jupyternaut-lite',
17
+ svgstr: jupyternautLiteSvg
18
+ });
package/src/index.ts CHANGED
@@ -1,8 +1,11 @@
1
1
  import {
2
2
  ActiveCellManager,
3
+ AutocompletionRegistry,
3
4
  buildChatSidebar,
4
5
  buildErrorWidget,
5
- IActiveCellManager
6
+ IActiveCellManager,
7
+ IAutocompletionCommandsProps,
8
+ IAutocompletionRegistry
6
9
  } from '@jupyter/chat';
7
10
  import {
8
11
  JupyterFrontEnd,
@@ -15,19 +18,49 @@ import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
15
18
  import { ISettingRegistry } from '@jupyterlab/settingregistry';
16
19
 
17
20
  import { ChatHandler } from './chat-handler';
21
+ import { getSettings } from './llm-models';
18
22
  import { AIProvider } from './provider';
23
+ import { renderSlashCommandOption } from './slash-commands';
19
24
  import { IAIProvider } from './token';
20
25
 
26
+ const autocompletionRegistryPlugin: JupyterFrontEndPlugin<IAutocompletionRegistry> =
27
+ {
28
+ id: '@jupyterlite/ai:autocompletion-registry',
29
+ description: 'Autocompletion registry',
30
+ autoStart: true,
31
+ provides: IAutocompletionRegistry,
32
+ activate: () => {
33
+ const autocompletionRegistry = new AutocompletionRegistry();
34
+ const options = ['/clear'];
35
+ const autocompletionCommands: IAutocompletionCommandsProps = {
36
+ opener: '/',
37
+ commands: options.map(option => {
38
+ return {
39
+ id: option.slice(1),
40
+ label: option,
41
+ description: 'Clear the chat window'
42
+ };
43
+ }),
44
+ props: {
45
+ renderOption: renderSlashCommandOption
46
+ }
47
+ };
48
+ autocompletionRegistry.add('jupyterlite-ai', autocompletionCommands);
49
+ return autocompletionRegistry;
50
+ }
51
+ };
52
+
21
53
  const chatPlugin: JupyterFrontEndPlugin<void> = {
22
54
  id: '@jupyterlite/ai:chat',
23
55
  description: 'LLM chat extension',
24
56
  autoStart: true,
57
+ requires: [IAIProvider, IRenderMimeRegistry, IAutocompletionRegistry],
25
58
  optional: [INotebookTracker, ISettingRegistry, IThemeManager],
26
- requires: [IAIProvider, IRenderMimeRegistry],
27
59
  activate: async (
28
60
  app: JupyterFrontEnd,
29
61
  aiProvider: IAIProvider,
30
62
  rmRegistry: IRenderMimeRegistry,
63
+ autocompletionRegistry: IAutocompletionRegistry,
31
64
  notebookTracker: INotebookTracker | null,
32
65
  settingsRegistry: ISettingRegistry | null,
33
66
  themeManager: IThemeManager | null
@@ -47,12 +80,17 @@ const chatPlugin: JupyterFrontEndPlugin<void> = {
47
80
 
48
81
  let sendWithShiftEnter = false;
49
82
  let enableCodeToolbar = true;
83
+ let personaName = 'AI';
50
84
 
51
85
  function loadSetting(setting: ISettingRegistry.ISettings): void {
52
86
  sendWithShiftEnter = setting.get('sendWithShiftEnter')
53
87
  .composite as boolean;
54
88
  enableCodeToolbar = setting.get('enableCodeToolbar').composite as boolean;
89
+ personaName = setting.get('personaName').composite as string;
90
+
91
+ // set the properties
55
92
  chatHandler.config = { sendWithShiftEnter, enableCodeToolbar };
93
+ chatHandler.personaName = personaName;
56
94
  }
57
95
 
58
96
  Promise.all([app.restored, settingsRegistry?.load(chatPlugin.id)])
@@ -77,9 +115,10 @@ const chatPlugin: JupyterFrontEndPlugin<void> = {
77
115
  chatWidget = buildChatSidebar({
78
116
  model: chatHandler,
79
117
  themeManager,
80
- rmRegistry
118
+ rmRegistry,
119
+ autocompletionRegistry
81
120
  });
82
- chatWidget.title.caption = 'Codestral Chat';
121
+ chatWidget.title.caption = 'Jupyterlite AI Chat';
83
122
  } catch (e) {
84
123
  chatWidget = buildErrorWidget(themeManager);
85
124
  }
@@ -105,11 +144,34 @@ const aiProviderPlugin: JupyterFrontEndPlugin<IAIProvider> = {
105
144
  requestCompletion: () => app.commands.execute('inline-completer:invoke')
106
145
  });
107
146
 
147
+ let currentProvider = 'None';
108
148
  settingRegistry
109
149
  .load(aiProviderPlugin.id)
110
150
  .then(settings => {
111
151
  const updateProvider = () => {
112
152
  const provider = settings.get('provider').composite as string;
153
+ if (provider !== currentProvider) {
154
+ // Update the settings panel.
155
+ currentProvider = provider;
156
+ const settingsProperties = settings.schema.properties;
157
+ if (settingsProperties) {
158
+ const schemaKeys = Object.keys(settingsProperties);
159
+ schemaKeys.forEach(key => {
160
+ if (key !== 'provider') {
161
+ delete settings.schema.properties?.[key];
162
+ }
163
+ });
164
+ const properties = getSettings(provider);
165
+ if (properties === null) {
166
+ return;
167
+ }
168
+ Object.entries(properties).forEach(([name, value], index) => {
169
+ settingsProperties[name] = value as ISettingRegistry.IProperty;
170
+ });
171
+ }
172
+ }
173
+
174
+ // Update the settings to the AI providers.
113
175
  aiProvider.setModels(provider, settings.composite);
114
176
  };
115
177
 
@@ -127,4 +189,4 @@ const aiProviderPlugin: JupyterFrontEndPlugin<IAIProvider> = {
127
189
  }
128
190
  };
129
191
 
130
- export default [chatPlugin, aiProviderPlugin];
192
+ export default [chatPlugin, autocompletionRegistryPlugin, aiProviderPlugin];
@@ -0,0 +1,75 @@
1
+ import {
2
+ CompletionHandler,
3
+ IInlineCompletionContext
4
+ } from '@jupyterlab/completer';
5
+ import { ChatAnthropic } from '@langchain/anthropic';
6
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
7
+ import { AIMessage, SystemMessage } from '@langchain/core/messages';
8
+
9
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
10
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
11
+
12
+ export class AnthropicCompleter implements IBaseCompleter {
13
+ constructor(options: BaseCompleter.IOptions) {
14
+ this._anthropicProvider = new ChatAnthropic({ ...options.settings });
15
+ }
16
+
17
+ get provider(): BaseChatModel {
18
+ return this._anthropicProvider;
19
+ }
20
+
21
+ /**
22
+ * Getter and setter for the initial prompt.
23
+ */
24
+ get prompt(): string {
25
+ return this._prompt;
26
+ }
27
+ set prompt(value: string) {
28
+ this._prompt = value;
29
+ }
30
+
31
+ async fetch(
32
+ request: CompletionHandler.IRequest,
33
+ context: IInlineCompletionContext
34
+ ) {
35
+ const { text, offset: cursorOffset } = request;
36
+ const prompt = text.slice(0, cursorOffset);
37
+
38
+ // Anthropic does not allow whitespace at the end of the AIMessage
39
+ const trimmedPrompt = prompt.trim();
40
+
41
+ const messages = [
42
+ new SystemMessage(this._prompt),
43
+ new AIMessage(trimmedPrompt)
44
+ ];
45
+
46
+ try {
47
+ const response = await this._anthropicProvider.invoke(messages);
48
+ const items = [];
49
+
50
+ // Anthropic can return string or complex content, a list of string/images/other.
51
+ if (typeof response.content === 'string') {
52
+ items.push({
53
+ insertText: response.content
54
+ });
55
+ } else {
56
+ response.content.forEach(content => {
57
+ if (content.type !== 'text') {
58
+ return;
59
+ }
60
+ items.push({
61
+ insertText: content.text,
62
+ filterText: prompt.substring(trimmedPrompt.length)
63
+ });
64
+ });
65
+ }
66
+ return { items };
67
+ } catch (error) {
68
+ console.error('Error fetching completions', error);
69
+ return { items: [] };
70
+ }
71
+ }
72
+
73
+ private _anthropicProvider: ChatAnthropic;
74
+ private _prompt: string = COMPLETION_SYSTEM_PROMPT;
75
+ }
@@ -2,14 +2,19 @@ import {
2
2
  CompletionHandler,
3
3
  IInlineCompletionContext
4
4
  } from '@jupyterlab/completer';
5
- import { LLM } from '@langchain/core/language_models/llms';
5
+ import { BaseLanguageModel } from '@langchain/core/language_models/base';
6
6
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
7
7
 
8
8
  export interface IBaseCompleter {
9
9
  /**
10
10
  * The LLM completer.
11
11
  */
12
- provider: LLM;
12
+ provider: BaseLanguageModel;
13
+
14
+ /**
15
+ * The completion prompt.
16
+ */
17
+ prompt: string;
13
18
 
14
19
  /**
15
20
  * The function to fetch a new completion.
@@ -0,0 +1,88 @@
1
+ import {
2
+ CompletionHandler,
3
+ IInlineCompletionContext
4
+ } from '@jupyterlab/completer';
5
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
6
+ import { LLM } from '@langchain/core/language_models/llms';
7
+ import { HumanMessage, SystemMessage } from '@langchain/core/messages';
8
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
9
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
10
+
11
+ /**
12
+ * Regular expression to match the '```' string at the start of a string.
13
+ * So the completions returned by the LLM can still be kept after removing the code block formatting.
14
+ *
15
+ * For example, if the response contains the following content after typing `import pandas`:
16
+ *
17
+ * ```python
18
+ * as pd
19
+ * ```
20
+ *
21
+ * The formatting string after removing the code block delimiters will be:
22
+ *
23
+ * as pd
24
+ */
25
+ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
26
+
27
+ /**
28
+ * Regular expression to match the '```' string at the end of a string.
29
+ */
30
+ const CODE_BLOCK_END_REGEX = /```$/;
31
+
32
+ export class ChromeCompleter implements IBaseCompleter {
33
+ constructor(options: BaseCompleter.IOptions) {
34
+ this._chromeProvider = new ChromeAI({ ...options.settings });
35
+ }
36
+
37
+ /**
38
+ * Getter and setter for the initial prompt.
39
+ */
40
+ get prompt(): string {
41
+ return this._prompt;
42
+ }
43
+ set prompt(value: string) {
44
+ this._prompt = value;
45
+ }
46
+
47
+ get provider(): LLM {
48
+ return this._chromeProvider;
49
+ }
50
+
51
+ async fetch(
52
+ request: CompletionHandler.IRequest,
53
+ context: IInlineCompletionContext
54
+ ) {
55
+ const { text, offset: cursorOffset } = request;
56
+ const prompt = text.slice(0, cursorOffset);
57
+
58
+ const trimmedPrompt = prompt.trim();
59
+
60
+ const messages = [
61
+ new SystemMessage(this._prompt),
62
+ new HumanMessage(trimmedPrompt)
63
+ ];
64
+
65
+ try {
66
+ let response = await this._chromeProvider.invoke(messages);
67
+
68
+ // ChromeAI sometimes returns a string starting with '```',
69
+ // so process the response to remove the code block delimiters
70
+ if (CODE_BLOCK_START_REGEX.test(response)) {
71
+ response = response
72
+ .replace(CODE_BLOCK_START_REGEX, '')
73
+ .replace(CODE_BLOCK_END_REGEX, '');
74
+ }
75
+
76
+ const items = [{ insertText: response }];
77
+ return {
78
+ items
79
+ };
80
+ } catch (error) {
81
+ console.error('Error fetching completion:', error);
82
+ return { items: [] };
83
+ }
84
+ }
85
+
86
+ private _chromeProvider: ChromeAI;
87
+ private _prompt: string = COMPLETION_SYSTEM_PROMPT;
88
+ }
@@ -2,72 +2,63 @@ import {
2
2
  CompletionHandler,
3
3
  IInlineCompletionContext
4
4
  } from '@jupyterlab/completer';
5
- import { LLM } from '@langchain/core/language_models/llms';
6
- import { MistralAI } from '@langchain/mistralai';
5
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
6
+ import {
7
+ BaseMessage,
8
+ HumanMessage,
9
+ SystemMessage
10
+ } from '@langchain/core/messages';
11
+ import { ChatMistralAI } from '@langchain/mistralai';
7
12
  import { Throttler } from '@lumino/polling';
8
- import { CompletionRequest } from '@mistralai/mistralai';
9
13
 
10
14
  import { BaseCompleter, IBaseCompleter } from './base-completer';
15
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
11
16
 
12
17
  /**
13
18
  * The Mistral API has a rate limit of 1 request per second
14
19
  */
15
20
  const INTERVAL = 1000;
16
21
 
17
- /**
18
- * Timeout to avoid endless requests
19
- */
20
- const REQUEST_TIMEOUT = 3000;
21
-
22
22
  export class CodestralCompleter implements IBaseCompleter {
23
23
  constructor(options: BaseCompleter.IOptions) {
24
- // this._requestCompletion = options.requestCompletion;
25
- this._mistralProvider = new MistralAI({ ...options.settings });
24
+ this._mistralProvider = new ChatMistralAI({ ...options.settings });
26
25
  this._throttler = new Throttler(
27
- async (data: CompletionRequest) => {
28
- const invokedData = data;
29
-
30
- // Request completion.
31
- const request = this._mistralProvider.completionWithRetry(
32
- data,
33
- {},
34
- false
35
- );
36
- const timeoutPromise = new Promise<null>(resolve => {
37
- return setTimeout(() => resolve(null), REQUEST_TIMEOUT);
38
- });
39
-
40
- // Fetch again if the request is too long or if the prompt has changed.
41
- const response = await Promise.race([request, timeoutPromise]);
42
- if (
43
- response === null ||
44
- invokedData.prompt !== this._currentData?.prompt
45
- ) {
46
- return {
47
- items: [],
48
- fetchAgain: true
49
- };
50
- }
51
-
26
+ async (messages: BaseMessage[]) => {
27
+ const response = await this._mistralProvider.invoke(messages);
52
28
  // Extract results of completion request.
53
- const items = response.choices.map((choice: any) => {
54
- return { insertText: choice.message.content as string };
55
- });
56
-
57
- return {
58
- items
59
- };
29
+ const items = [];
30
+ if (typeof response.content === 'string') {
31
+ items.push({
32
+ insertText: response.content
33
+ });
34
+ } else {
35
+ response.content.forEach(content => {
36
+ if (content.type !== 'text') {
37
+ return;
38
+ }
39
+ items.push({
40
+ insertText: content.text
41
+ });
42
+ });
43
+ }
44
+ return { items };
60
45
  },
61
46
  { limit: INTERVAL }
62
47
  );
63
48
  }
64
49
 
65
- get provider(): LLM {
50
+ get provider(): BaseChatModel {
66
51
  return this._mistralProvider;
67
52
  }
68
53
 
69
- set requestCompletion(value: () => void) {
70
- this._requestCompletion = value;
54
+ /**
55
+ * Getter and setter for the initial prompt.
56
+ */
57
+ get prompt(): string {
58
+ return this._prompt;
59
+ }
60
+ set prompt(value: string) {
61
+ this._prompt = value;
71
62
  }
72
63
 
73
64
  async fetch(
@@ -76,38 +67,21 @@ export class CodestralCompleter implements IBaseCompleter {
76
67
  ) {
77
68
  const { text, offset: cursorOffset } = request;
78
69
  const prompt = text.slice(0, cursorOffset);
79
- const suffix = text.slice(cursorOffset);
80
70
 
81
- const data = {
82
- prompt,
83
- suffix,
84
- model: this._mistralProvider.model,
85
- // temperature: 0,
86
- // top_p: 1,
87
- // max_tokens: 1024,
88
- // min_tokens: 0,
89
- stream: false,
90
- // random_seed: 1337,
91
- stop: []
92
- };
71
+ const messages: BaseMessage[] = [
72
+ new SystemMessage(this._prompt),
73
+ new HumanMessage(prompt)
74
+ ];
93
75
 
94
76
  try {
95
- this._currentData = data;
96
- const completionResult = await this._throttler.invoke(data);
97
- if (completionResult.fetchAgain) {
98
- if (this._requestCompletion) {
99
- this._requestCompletion();
100
- }
101
- }
102
- return { items: completionResult.items };
77
+ return await this._throttler.invoke(messages);
103
78
  } catch (error) {
104
79
  console.error('Error fetching completions', error);
105
80
  return { items: [] };
106
81
  }
107
82
  }
108
83
 
109
- private _requestCompletion?: () => void;
110
84
  private _throttler: Throttler;
111
- private _mistralProvider: MistralAI;
112
- private _currentData: CompletionRequest | null = null;
85
+ private _mistralProvider: ChatMistralAI;
86
+ private _prompt: string = COMPLETION_SYSTEM_PROMPT;
113
87
  }
@@ -0,0 +1,67 @@
1
+ import {
2
+ CompletionHandler,
3
+ IInlineCompletionContext
4
+ } from '@jupyterlab/completer';
5
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
6
+ import { AIMessage, SystemMessage } from '@langchain/core/messages';
7
+ import { ChatOpenAI } from '@langchain/openai';
8
+
9
+ import { BaseCompleter, IBaseCompleter } from './base-completer';
10
+ import { COMPLETION_SYSTEM_PROMPT } from '../provider';
11
+
12
+ export class OpenAICompleter implements IBaseCompleter {
13
+ constructor(options: BaseCompleter.IOptions) {
14
+ this._openAIProvider = new ChatOpenAI({ ...options.settings });
15
+ }
16
+
17
+ get provider(): BaseChatModel {
18
+ return this._openAIProvider;
19
+ }
20
+
21
+ /**
22
+ * Getter and setter for the initial prompt.
23
+ */
24
+ get prompt(): string {
25
+ return this._prompt;
26
+ }
27
+ set prompt(value: string) {
28
+ this._prompt = value;
29
+ }
30
+
31
+ async fetch(
32
+ request: CompletionHandler.IRequest,
33
+ context: IInlineCompletionContext
34
+ ) {
35
+ const { text, offset: cursorOffset } = request;
36
+ const prompt = text.slice(0, cursorOffset);
37
+
38
+ const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)];
39
+
40
+ try {
41
+ const response = await this._openAIProvider.invoke(messages);
42
+ const items = [];
43
+ if (typeof response.content === 'string') {
44
+ items.push({
45
+ insertText: response.content
46
+ });
47
+ } else {
48
+ response.content.forEach(content => {
49
+ if (content.type !== 'text') {
50
+ return;
51
+ }
52
+ items.push({
53
+ insertText: content.text,
54
+ filterText: prompt.substring(prompt.length)
55
+ });
56
+ });
57
+ }
58
+ return { items };
59
+ } catch (error) {
60
+ console.error('Error fetching completions', error);
61
+ return { items: [] };
62
+ }
63
+ }
64
+
65
+ private _openAIProvider: ChatOpenAI;
66
+ private _prompt: string = COMPLETION_SYSTEM_PROMPT;
67
+ }
@@ -0,0 +1,9 @@
1
+ /*
2
+ * Copyright (c) Jupyter Development Team.
3
+ * Distributed under the terms of the Modified BSD License.
4
+ */
5
+
6
+ declare module '*.svg' {
7
+ const value: string;
8
+ export default value;
9
+ }
@@ -1,8 +1,20 @@
1
+ import { ChatAnthropic } from '@langchain/anthropic';
2
+ import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
1
3
  import { BaseChatModel } from '@langchain/core/language_models/chat_models';
2
4
  import { ChatMistralAI } from '@langchain/mistralai';
5
+ import { ChatOpenAI } from '@langchain/openai';
6
+
3
7
  import { IBaseCompleter } from './base-completer';
8
+ import { AnthropicCompleter } from './anthropic-completer';
4
9
  import { CodestralCompleter } from './codestral-completer';
5
10
  import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
11
+ import { ChromeCompleter } from './chrome-completer';
12
+ import { OpenAICompleter } from './openai-completer';
13
+
14
+ import chromeAI from '../_provider-settings/chromeAI.json';
15
+ import mistralAI from '../_provider-settings/mistralAI.json';
16
+ import anthropic from '../_provider-settings/anthropic.json';
17
+ import openAI from '../_provider-settings/openAI.json';
6
18
 
7
19
  /**
8
20
  * Get an LLM completer from the name.
@@ -13,6 +25,12 @@ export function getCompleter(
13
25
  ): IBaseCompleter | null {
14
26
  if (name === 'MistralAI') {
15
27
  return new CodestralCompleter({ settings });
28
+ } else if (name === 'Anthropic') {
29
+ return new AnthropicCompleter({ settings });
30
+ } else if (name === 'ChromeAI') {
31
+ return new ChromeCompleter({ settings });
32
+ } else if (name === 'OpenAI') {
33
+ return new OpenAICompleter({ settings });
16
34
  }
17
35
  return null;
18
36
  }
@@ -26,6 +44,14 @@ export function getChatModel(
26
44
  ): BaseChatModel | null {
27
45
  if (name === 'MistralAI') {
28
46
  return new ChatMistralAI({ ...settings });
47
+ } else if (name === 'Anthropic') {
48
+ return new ChatAnthropic({ ...settings });
49
+ } else if (name === 'ChromeAI') {
50
+ // TODO: fix
51
+ // @ts-expect-error: missing properties
52
+ return new ChromeAI({ ...settings });
53
+ } else if (name === 'OpenAI') {
54
+ return new ChatOpenAI({ ...settings });
29
55
  }
30
56
  return null;
31
57
  }
@@ -36,6 +62,29 @@ export function getChatModel(
36
62
  export function getErrorMessage(name: string, error: any): string {
37
63
  if (name === 'MistralAI') {
38
64
  return error.message;
65
+ } else if (name === 'Anthropic') {
66
+ return error.error.error.message;
67
+ } else if (name === 'ChromeAI') {
68
+ return error.message;
69
+ } else if (name === 'OpenAI') {
70
+ return error.message;
39
71
  }
40
72
  return 'Unknown provider';
41
73
  }
74
+
75
+ /*
76
+ * Get an LLM completer from the name.
77
+ */
78
+ export function getSettings(name: string): any {
79
+ if (name === 'MistralAI') {
80
+ return mistralAI.properties;
81
+ } else if (name === 'Anthropic') {
82
+ return anthropic.properties;
83
+ } else if (name === 'ChromeAI') {
84
+ return chromeAI.properties;
85
+ } else if (name === 'OpenAI') {
86
+ return openAI.properties;
87
+ }
88
+
89
+ return null;
90
+ }