@jupyterlite/ai 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +48 -9
  2. package/lib/chat-handler.d.ts +15 -3
  3. package/lib/chat-handler.js +80 -28
  4. package/lib/completion-provider.d.ts +5 -18
  5. package/lib/completion-provider.js +8 -34
  6. package/lib/icons.d.ts +2 -0
  7. package/lib/icons.js +15 -0
  8. package/lib/index.d.ts +3 -2
  9. package/lib/index.js +79 -22
  10. package/lib/llm-models/anthropic-completer.d.ts +19 -0
  11. package/lib/llm-models/anthropic-completer.js +57 -0
  12. package/lib/llm-models/base-completer.d.ts +6 -2
  13. package/lib/llm-models/chrome-completer.d.ts +19 -0
  14. package/lib/llm-models/chrome-completer.js +67 -0
  15. package/lib/llm-models/codestral-completer.d.ts +9 -8
  16. package/lib/llm-models/codestral-completer.js +37 -54
  17. package/lib/llm-models/index.d.ts +3 -2
  18. package/lib/llm-models/index.js +42 -2
  19. package/lib/llm-models/openai-completer.d.ts +19 -0
  20. package/lib/llm-models/openai-completer.js +51 -0
  21. package/lib/provider.d.ts +54 -15
  22. package/lib/provider.js +123 -41
  23. package/lib/settings/instructions.d.ts +2 -0
  24. package/lib/settings/instructions.js +44 -0
  25. package/lib/settings/panel.d.ts +70 -0
  26. package/lib/settings/panel.js +190 -0
  27. package/lib/settings/schemas/_generated/Anthropic.json +70 -0
  28. package/lib/settings/schemas/_generated/ChromeAI.json +21 -0
  29. package/lib/settings/schemas/_generated/MistralAI.json +75 -0
  30. package/lib/settings/schemas/_generated/OpenAI.json +668 -0
  31. package/lib/settings/schemas/base.json +7 -0
  32. package/lib/settings/schemas/index.d.ts +3 -0
  33. package/lib/settings/schemas/index.js +11 -0
  34. package/lib/slash-commands.d.ts +16 -0
  35. package/lib/slash-commands.js +25 -0
  36. package/lib/tokens.d.ts +103 -0
  37. package/lib/tokens.js +5 -0
  38. package/package.json +27 -104
  39. package/schema/chat.json +8 -0
  40. package/schema/provider-registry.json +17 -0
  41. package/src/chat-handler.ts +103 -43
  42. package/src/completion-provider.ts +13 -37
  43. package/src/icons.ts +18 -0
  44. package/src/index.ts +101 -24
  45. package/src/llm-models/anthropic-completer.ts +75 -0
  46. package/src/llm-models/base-completer.ts +7 -2
  47. package/src/llm-models/chrome-completer.ts +88 -0
  48. package/src/llm-models/codestral-completer.ts +43 -69
  49. package/src/llm-models/index.ts +49 -2
  50. package/src/llm-models/openai-completer.ts +67 -0
  51. package/src/llm-models/svg.d.ts +9 -0
  52. package/src/provider.ts +138 -43
  53. package/src/settings/instructions.ts +48 -0
  54. package/src/settings/panel.tsx +257 -0
  55. package/src/settings/schemas/index.ts +15 -0
  56. package/src/slash-commands.tsx +55 -0
  57. package/src/tokens.ts +112 -0
  58. package/style/base.css +4 -0
  59. package/style/icons/jupyternaut-lite.svg +7 -0
  60. package/lib/llm-models/utils.d.ts +0 -15
  61. package/lib/llm-models/utils.js +0 -29
  62. package/lib/token.d.ts +0 -13
  63. package/lib/token.js +0 -2
  64. package/schema/ai-provider.json +0 -21
  65. package/src/llm-models/utils.ts +0 -41
  66. package/src/token.ts +0 -19
package/lib/provider.js CHANGED
@@ -1,40 +1,107 @@
1
1
  import { Signal } from '@lumino/signaling';
2
- import { CompletionProvider } from './completion-provider';
3
- import { getChatModel } from './llm-models';
4
- export class AIProvider {
5
- constructor(options) {
6
- this._llmChatModel = null;
2
+ export const chatSystemPrompt = (options) => `
3
+ You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
4
+ You are not a language model, but rather an application built on a foundation model from ${options.provider_name}.
5
+ You are talkative and you provide lots of specific details from the foundation model's context.
6
+ You may use Markdown to format your response.
7
+ If your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after).
8
+ If your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters.
9
+ All dollar quantities (of USD) must be formatted in LaTeX, with the \`$\` symbol escaped by a single backslash \`\\\`.
10
+ - Example prompt: \`If I have \\\\$100 and spend \\\\$20, how much money do I have left?\`
11
+ - **Correct** response: \`You have \\(\\$80\\) remaining.\`
12
+ - **Incorrect** response: \`You have $80 remaining.\`
13
+ If you do not know the answer to a question, answer truthfully by responding that you do not know.
14
+ The following is a friendly conversation between you and a human.
15
+ `;
16
+ export const COMPLETION_SYSTEM_PROMPT = `
17
+ You are an application built to provide helpful code completion suggestions.
18
+ You should only produce code. Keep comments to minimum, use the
19
+ programming language comment syntax. Produce clean code.
20
+ The code is written in JupyterLab, a data analysis and code development
21
+ environment which can execute code extended with additional syntax for
22
+ interactive features, such as magics.
23
+ Only give raw strings back, do not format the response using backticks.
24
+ The output should be a single string, and should correspond to what a human users
25
+ would write.
26
+ Do not include the prompt in the output, only the string that should be appended to the current input.
27
+ `;
28
+ export class AIProviderRegistry {
29
+ constructor() {
30
+ this._currentProvider = null;
31
+ this._completer = null;
32
+ this._chatModel = null;
7
33
  this._name = 'None';
8
- this._modelChange = new Signal(this);
34
+ this._providerChanged = new Signal(this);
9
35
  this._chatError = '';
10
36
  this._completerError = '';
11
- this._completionProvider = new CompletionProvider({
12
- name: 'None',
13
- settings: {},
14
- requestCompletion: options.requestCompletion
15
- });
16
- options.completionProviderManager.registerInlineProvider(this._completionProvider);
37
+ this._providers = new Map();
38
+ }
39
+ /**
40
+ * Get the list of provider names.
41
+ */
42
+ get providers() {
43
+ return Array.from(this._providers.keys());
44
+ }
45
+ /**
46
+ * Add a new provider.
47
+ */
48
+ add(provider) {
49
+ if (this._providers.has(provider.name)) {
50
+ throw new Error(`A AI provider named '${provider.name}' is already registered`);
51
+ }
52
+ this._providers.set(provider.name, provider);
17
53
  }
18
- get name() {
54
+ /**
55
+ * Get the current provider name.
56
+ */
57
+ get currentName() {
19
58
  return this._name;
20
59
  }
21
60
  /**
22
61
  * Get the current completer of the completion provider.
23
62
  */
24
- get completer() {
25
- if (this._name === null) {
63
+ get currentCompleter() {
64
+ if (this._name === 'None') {
26
65
  return null;
27
66
  }
28
- return this._completionProvider.completer;
67
+ return this._completer;
29
68
  }
30
69
  /**
31
70
  * Get the current llm chat model.
32
71
  */
33
- get chatModel() {
34
- if (this._name === null) {
72
+ get currentChatModel() {
73
+ if (this._name === 'None') {
35
74
  return null;
36
75
  }
37
- return this._llmChatModel;
76
+ return this._chatModel;
77
+ }
78
+ /**
79
+ * Get the settings schema of a given provider.
80
+ */
81
+ getSettingsSchema(provider) {
82
+ var _a, _b;
83
+ return (((_b = (_a = this._providers.get(provider)) === null || _a === void 0 ? void 0 : _a.settingsSchema) === null || _b === void 0 ? void 0 : _b.properties) ||
84
+ {});
85
+ }
86
+ /**
87
+ * Get the instructions of a given provider.
88
+ */
89
+ getInstructions(provider) {
90
+ var _a;
91
+ return (_a = this._providers.get(provider)) === null || _a === void 0 ? void 0 : _a.instructions;
92
+ }
93
+ /**
94
+ * Format an error message from the current provider.
95
+ */
96
+ formatErrorMessage(error) {
97
+ var _a, _b;
98
+ if ((_a = this._currentProvider) === null || _a === void 0 ? void 0 : _a.errorMessage) {
99
+ return (_b = this._currentProvider) === null || _b === void 0 ? void 0 : _b.errorMessage(error);
100
+ }
101
+ if (error.message) {
102
+ return error.message;
103
+ }
104
+ return error;
38
105
  }
39
106
  /**
40
107
  * Get the current chat error;
@@ -49,36 +116,51 @@ export class AIProvider {
49
116
  return this._completerError;
50
117
  }
51
118
  /**
52
- * Set the models (chat model and completer).
53
- * Creates the models if the name has changed, otherwise only updates their config.
119
+ * Set the providers (chat model and completer).
120
+ * Creates the providers if the name has changed, otherwise only updates their config.
54
121
  *
55
- * @param name - the name of the model to use.
122
+ * @param name - the name of the provider to use.
56
123
  * @param settings - the settings for the models.
57
124
  */
58
- setModels(name, settings) {
59
- try {
60
- this._completionProvider.setCompleter(name, settings);
61
- this._completerError = '';
125
+ setProvider(name, settings) {
126
+ var _a, _b, _c;
127
+ this._currentProvider = (_a = this._providers.get(name)) !== null && _a !== void 0 ? _a : null;
128
+ if (((_b = this._currentProvider) === null || _b === void 0 ? void 0 : _b.completer) !== undefined) {
129
+ try {
130
+ this._completer = new this._currentProvider.completer({ ...settings });
131
+ this._completerError = '';
132
+ }
133
+ catch (e) {
134
+ this._completerError = e.message;
135
+ }
62
136
  }
63
- catch (e) {
64
- this._completerError = e.message;
137
+ else {
138
+ this._completer = null;
65
139
  }
66
- try {
67
- this._llmChatModel = getChatModel(name, settings);
68
- this._chatError = '';
140
+ if (((_c = this._currentProvider) === null || _c === void 0 ? void 0 : _c.chatModel) !== undefined) {
141
+ try {
142
+ this._chatModel = new this._currentProvider.chatModel({ ...settings });
143
+ this._chatError = '';
144
+ }
145
+ catch (e) {
146
+ this._chatError = e.message;
147
+ this._chatModel = null;
148
+ }
69
149
  }
70
- catch (e) {
71
- this._chatError = e.message;
72
- this._llmChatModel = null;
150
+ else {
151
+ this._chatModel = null;
73
152
  }
74
153
  this._name = name;
75
- this._modelChange.emit();
154
+ this._providerChanged.emit();
76
155
  }
77
- get modelChange() {
78
- return this._modelChange;
156
+ /**
157
+ * A signal emitting when the provider or its settings has changed.
158
+ */
159
+ get providerChanged() {
160
+ return this._providerChanged;
79
161
  }
80
162
  }
81
- (function (AIProvider) {
163
+ (function (AIProviderRegistry) {
82
164
  /**
83
165
  * This function indicates whether a key is writable in an object.
84
166
  * https://stackoverflow.com/questions/54724875/can-we-check-whether-property-is-readonly-in-typescript
@@ -93,7 +175,7 @@ export class AIProvider {
93
175
  {};
94
176
  return Boolean(desc.writable);
95
177
  }
96
- AIProvider.isWritable = isWritable;
178
+ AIProviderRegistry.isWritable = isWritable;
97
179
  /**
98
180
  * Update the config of a language model.
99
181
  * It only updates the writable attributes of the model.
@@ -113,5 +195,5 @@ export class AIProvider {
113
195
  }
114
196
  });
115
197
  }
116
- AIProvider.updateConfig = updateConfig;
117
- })(AIProvider || (AIProvider = {}));
198
+ AIProviderRegistry.updateConfig = updateConfig;
199
+ })(AIProviderRegistry || (AIProviderRegistry = {}));
@@ -0,0 +1,2 @@
1
+ import { IDict } from '../tokens';
2
+ export declare const instructions: IDict;
@@ -0,0 +1,44 @@
1
+ const chromeAiInstructions = `
2
+ <i class="fas fa-exclamation-triangle"></i> Support for ChromeAI is still experimental and only available in Google Chrome.
3
+
4
+ You can test ChromeAI is enabled in your browser by going to the following URL: https://chromeai.org/
5
+
6
+ Enable the proper flags in Google Chrome.
7
+
8
+ - chrome://flags/#prompt-api-for-gemini-nano
9
+ - Select: \`Enabled\`
10
+ - chrome://flags/#optimization-guide-on-device-model
11
+ - Select: \`Enabled BypassPrefRequirement\`
12
+ - chrome://components
13
+ - Click \`Check for Update\` on Optimization Guide On Device Model to download the model
14
+ - [Optional] chrome://flags/#text-safety-classifier
15
+
16
+ <img src="https://github.com/user-attachments/assets/d48f46cc-52ee-4ce5-9eaf-c763cdbee04c" alt="A screenshot showing how to enable the ChromeAI flag in Google Chrome" width="500px">
17
+
18
+ Then restart Chrome for these changes to take effect.
19
+
20
+ <i class="fas fa-exclamation-triangle"></i> On first use, Chrome will download the on-device model, which can be as large as 22GB (according to their docs and at the time of writing).
21
+ During the download, ChromeAI may not be available via the extension.
22
+
23
+ <i class="fa fa-info-circle" aria-hidden="true"></i> For more information about Chrome Built-in AI: https://developer.chrome.com/docs/ai/get-started
24
+ `;
25
+ const mistralAIInstructions = `
26
+ <i class="fas fa-exclamation-triangle"></i> This extension is still very much experimental. It is not an official MistralAI extension.
27
+
28
+ 1. Go to https://console.mistral.ai/api-keys/ and create an API key.
29
+
30
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/1-api-key.png" alt="Screenshot showing how to create an API key" width="500px">
31
+
32
+ 2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`MistralAI\`
33
+ provider and the API key (required).
34
+
35
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/2-jupyterlab-settings.png" alt="Screenshot showing how to add the API key to the settings" width="500px">
36
+
37
+ 3. Open the chat, or use the inline completer
38
+
39
+ <img src="https://raw.githubusercontent.com/jupyterlite/ai/refs/heads/main/img/3-usage.png" alt="Screenshot showing how to use the chat" width="500px">
40
+ `;
41
+ export const instructions = {
42
+ ChromeAI: chromeAiInstructions,
43
+ MistralAI: mistralAIInstructions
44
+ };
@@ -0,0 +1,70 @@
1
+ import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
2
+ import { IFormRenderer } from '@jupyterlab/ui-components';
3
+ import type { FieldProps } from '@rjsf/utils';
4
+ import { JSONSchema7 } from 'json-schema';
5
+ import React from 'react';
6
+ import { IAIProviderRegistry, IDict } from '../tokens';
7
+ export declare const aiSettingsRenderer: (options: {
8
+ providerRegistry: IAIProviderRegistry;
9
+ rmRegistry?: IRenderMimeRegistry;
10
+ }) => IFormRenderer;
11
+ export interface ISettingsFormStates {
12
+ schema: JSONSchema7;
13
+ instruction: HTMLElement | null;
14
+ }
15
+ export declare class AiSettings extends React.Component<FieldProps, ISettingsFormStates> {
16
+ constructor(props: FieldProps);
17
+ /**
18
+ * Get the current provider from the local storage.
19
+ */
20
+ getCurrentProvider(): string;
21
+ /**
22
+ * Save the current provider to the local storage.
23
+ */
24
+ saveCurrentProvider(): void;
25
+ /**
26
+ * Get settings from local storage for a given provider.
27
+ */
28
+ getSettings(): IDict<any>;
29
+ /**
30
+ * Save settings in local storage for a given provider.
31
+ */
32
+ saveSettings(value: IDict<any>): void;
33
+ /**
34
+ * Update the UI schema of the form.
35
+ * Currently use to hide API keys.
36
+ */
37
+ private _updateUiSchema;
38
+ /**
39
+ * Build the schema for a given provider.
40
+ */
41
+ private _buildSchema;
42
+ /**
43
+ * Update the schema state for the given provider, that trigger the re-rendering of
44
+ * the component.
45
+ */
46
+ private _updateSchema;
47
+ /**
48
+ * Render the markdown instructions for the current provider.
49
+ */
50
+ private _renderInstruction;
51
+ /**
52
+ * Triggered when the provider hes changed, to update the schema and values.
53
+ * Update the Jupyterlab settings accordingly.
54
+ */
55
+ private _onProviderChanged;
56
+ /**
57
+ * Triggered when the form value has changed, to update the current settings and save
58
+ * it in local storage.
59
+ * Update the Jupyterlab settings accordingly.
60
+ */
61
+ private _onFormChange;
62
+ render(): JSX.Element;
63
+ private _providerRegistry;
64
+ private _provider;
65
+ private _providerSchema;
66
+ private _rmRegistry;
67
+ private _currentSettings;
68
+ private _uiSchema;
69
+ private _settings;
70
+ }
@@ -0,0 +1,190 @@
1
+ import { FormComponent } from '@jupyterlab/ui-components';
2
+ import { JSONExt } from '@lumino/coreutils';
3
+ import validator from '@rjsf/validator-ajv8';
4
+ import React from 'react';
5
+ import baseSettings from './schemas/base.json';
6
+ const MD_MIME_TYPE = 'text/markdown';
7
+ const STORAGE_NAME = '@jupyterlite/ai:settings';
8
+ const INSTRUCTION_CLASS = 'jp-AISettingsInstructions';
9
+ export const aiSettingsRenderer = (options) => {
10
+ return {
11
+ fieldRenderer: (props) => {
12
+ props.formContext = { ...props.formContext, ...options };
13
+ return React.createElement(AiSettings, { ...props });
14
+ }
15
+ };
16
+ };
17
+ const WrappedFormComponent = (props) => {
18
+ return React.createElement(FormComponent, { ...props, validator: validator });
19
+ };
20
+ export class AiSettings extends React.Component {
21
+ constructor(props) {
22
+ var _a, _b;
23
+ super(props);
24
+ /**
25
+ * Triggered when the provider hes changed, to update the schema and values.
26
+ * Update the Jupyterlab settings accordingly.
27
+ */
28
+ this._onProviderChanged = (e) => {
29
+ const provider = e.formData.provider;
30
+ if (provider === this._currentSettings.provider) {
31
+ return;
32
+ }
33
+ this._provider = provider;
34
+ this.saveCurrentProvider();
35
+ this._currentSettings = this.getSettings();
36
+ this._updateSchema();
37
+ this._renderInstruction();
38
+ this._settings
39
+ .set('AIprovider', { provider: this._provider, ...this._currentSettings })
40
+ .catch(console.error);
41
+ };
42
+ /**
43
+ * Triggered when the form value has changed, to update the current settings and save
44
+ * it in local storage.
45
+ * Update the Jupyterlab settings accordingly.
46
+ */
47
+ this._onFormChange = (e) => {
48
+ this._currentSettings = JSONExt.deepCopy(e.formData);
49
+ this.saveSettings(this._currentSettings);
50
+ this._settings
51
+ .set('AIprovider', { provider: this._provider, ...this._currentSettings })
52
+ .catch(console.error);
53
+ };
54
+ this._currentSettings = { provider: 'None' };
55
+ this._uiSchema = {};
56
+ if (!props.formContext.providerRegistry) {
57
+ throw new Error('The provider registry is needed to enable the jupyterlite-ai settings panel');
58
+ }
59
+ this._providerRegistry = props.formContext.providerRegistry;
60
+ this._rmRegistry = (_a = props.formContext.rmRegistry) !== null && _a !== void 0 ? _a : null;
61
+ this._settings = props.formContext.settings;
62
+ // Initialize the providers schema.
63
+ const providerSchema = JSONExt.deepCopy(baseSettings);
64
+ providerSchema.properties.provider = {
65
+ type: 'string',
66
+ title: 'Provider',
67
+ description: 'The AI provider to use for chat and completion',
68
+ default: 'None',
69
+ enum: ['None'].concat(this._providerRegistry.providers)
70
+ };
71
+ this._providerSchema = providerSchema;
72
+ // Check if there is saved values in local storage, otherwise use the settings from
73
+ // the setting registry (led to default if there are no user settings).
74
+ const storageSettings = localStorage.getItem(STORAGE_NAME);
75
+ if (storageSettings === null) {
76
+ const labSettings = this._settings.get('AIprovider').composite;
77
+ if (labSettings && Object.keys(labSettings).includes('provider')) {
78
+ // Get the provider name.
79
+ const provider = (_b = Object.entries(labSettings).find(v => v[0] === 'provider')) === null || _b === void 0 ? void 0 : _b[1];
80
+ // Save the settings.
81
+ const settings = {
82
+ _current: provider
83
+ };
84
+ settings[provider] = labSettings;
85
+ localStorage.setItem(STORAGE_NAME, JSON.stringify(settings));
86
+ }
87
+ }
88
+ // Initialize the settings from the saved ones.
89
+ this._provider = this.getCurrentProvider();
90
+ this._currentSettings = this.getSettings();
91
+ // Initialize the schema.
92
+ const schema = this._buildSchema();
93
+ this.state = { schema, instruction: null };
94
+ this._renderInstruction();
95
+ // Update the setting registry.
96
+ this._settings
97
+ .set('AIprovider', this._currentSettings)
98
+ .catch(console.error);
99
+ }
100
+ /**
101
+ * Get the current provider from the local storage.
102
+ */
103
+ getCurrentProvider() {
104
+ var _a;
105
+ const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}');
106
+ return (_a = settings['_current']) !== null && _a !== void 0 ? _a : 'None';
107
+ }
108
+ /**
109
+ * Save the current provider to the local storage.
110
+ */
111
+ saveCurrentProvider() {
112
+ const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}');
113
+ settings['_current'] = this._provider;
114
+ localStorage.setItem(STORAGE_NAME, JSON.stringify(settings));
115
+ }
116
+ /**
117
+ * Get settings from local storage for a given provider.
118
+ */
119
+ getSettings() {
120
+ var _a;
121
+ const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}');
122
+ return (_a = settings[this._provider]) !== null && _a !== void 0 ? _a : { provider: this._provider };
123
+ }
124
+ /**
125
+ * Save settings in local storage for a given provider.
126
+ */
127
+ saveSettings(value) {
128
+ var _a;
129
+ const settings = JSON.parse((_a = localStorage.getItem(STORAGE_NAME)) !== null && _a !== void 0 ? _a : '{}');
130
+ settings[this._provider] = value;
131
+ localStorage.setItem(STORAGE_NAME, JSON.stringify(settings));
132
+ }
133
+ /**
134
+ * Update the UI schema of the form.
135
+ * Currently use to hide API keys.
136
+ */
137
+ _updateUiSchema(key) {
138
+ if (key.toLowerCase().includes('key')) {
139
+ this._uiSchema[key] = { 'ui:widget': 'password' };
140
+ }
141
+ }
142
+ /**
143
+ * Build the schema for a given provider.
144
+ */
145
+ _buildSchema() {
146
+ const schema = JSONExt.deepCopy(baseSettings);
147
+ this._uiSchema = {};
148
+ const settingsSchema = this._providerRegistry.getSettingsSchema(this._provider);
149
+ if (settingsSchema) {
150
+ Object.entries(settingsSchema).forEach(([key, value]) => {
151
+ schema.properties[key] = value;
152
+ this._updateUiSchema(key);
153
+ });
154
+ }
155
+ return schema;
156
+ }
157
+ /**
158
+ * Update the schema state for the given provider, that trigger the re-rendering of
159
+ * the component.
160
+ */
161
+ _updateSchema() {
162
+ const schema = this._buildSchema();
163
+ this.setState({ schema });
164
+ }
165
+ /**
166
+ * Render the markdown instructions for the current provider.
167
+ */
168
+ async _renderInstruction() {
169
+ let instructions = this._providerRegistry.getInstructions(this._provider);
170
+ if (!this._rmRegistry || !instructions) {
171
+ this.setState({ instruction: null });
172
+ return;
173
+ }
174
+ instructions = `---\n\n${instructions}\n\n---`;
175
+ const renderer = this._rmRegistry.createRenderer(MD_MIME_TYPE);
176
+ const model = this._rmRegistry.createModel({
177
+ data: { [MD_MIME_TYPE]: instructions }
178
+ });
179
+ await renderer.renderModel(model);
180
+ this.setState({ instruction: renderer.node });
181
+ }
182
+ render() {
183
+ return (React.createElement(React.Fragment, null,
184
+ React.createElement(WrappedFormComponent, { formData: { provider: this._provider }, schema: this._providerSchema, onChange: this._onProviderChanged }),
185
+ this.state.instruction !== null && (React.createElement("details", null,
186
+ React.createElement("summary", { className: INSTRUCTION_CLASS }, "Instructions"),
187
+ React.createElement("span", { ref: node => node && node.replaceChildren(this.state.instruction) }))),
188
+ React.createElement(WrappedFormComponent, { formData: this._currentSettings, schema: this.state.schema, onChange: this._onFormChange, uiSchema: this._uiSchema })));
189
+ }
190
+ }
@@ -0,0 +1,70 @@
1
+ {
2
+ "$schema": "http://json-schema.org/draft-07/schema#",
3
+ "type": "object",
4
+ "properties": {
5
+ "temperature": {
6
+ "type": "number",
7
+ "description": "Amount of randomness injected into the response. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and temp closer to 1 for creative and generative tasks."
8
+ },
9
+ "topK": {
10
+ "type": "number",
11
+ "description": "Only sample from the top K options for each subsequent token. Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it."
12
+ },
13
+ "topP": {
14
+ "type": "number",
15
+ "description": "Does nucleus sampling, in which we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p. Defaults to -1, which disables it. Note that you should either alter temperature or top_p, but not both."
16
+ },
17
+ "maxTokens": {
18
+ "type": "number",
19
+ "description": "A maximum number of tokens to generate before stopping."
20
+ },
21
+ "maxTokensToSample": {
22
+ "type": "number",
23
+ "description": "A maximum number of tokens to generate before stopping.",
24
+ "deprecated": "Use \"maxTokens\" instead."
25
+ },
26
+ "stopSequences": {
27
+ "type": "array",
28
+ "items": {
29
+ "type": "string"
30
+ },
31
+ "description": "A list of strings upon which to stop generating. You probably want `[\"\\n\\nHuman:\"]`, as that's the cue for the next turn in the dialog agent."
32
+ },
33
+ "streaming": {
34
+ "type": "boolean",
35
+ "description": "Whether to stream the results or not"
36
+ },
37
+ "anthropicApiKey": {
38
+ "type": "string",
39
+ "description": "Anthropic API key"
40
+ },
41
+ "apiKey": {
42
+ "type": "string",
43
+ "description": "Anthropic API key"
44
+ },
45
+ "anthropicApiUrl": {
46
+ "type": "string",
47
+ "description": "Anthropic API URL"
48
+ },
49
+ "modelName": {
50
+ "type": "string",
51
+ "deprecated": "Use \"model\" instead"
52
+ },
53
+ "model": {
54
+ "type": "string",
55
+ "description": "Model name to use"
56
+ },
57
+ "invocationKwargs": {
58
+ "type": "object",
59
+ "description": "Holds any additional parameters that are valid to pass to {@link * https://console.anthropic.com/docs/api/reference | } * `anthropic.messages`} that are not explicitly specified on this class."
60
+ },
61
+ "streamUsage": {
62
+ "type": "boolean",
63
+ "description": "Whether or not to include token usage data in streamed chunks.",
64
+ "default": false
65
+ }
66
+ },
67
+ "additionalProperties": false,
68
+ "description": "Input to AnthropicChat class.",
69
+ "definitions": {}
70
+ }
@@ -0,0 +1,21 @@
1
+ {
2
+ "$schema": "http://json-schema.org/draft-07/schema#",
3
+ "type": "object",
4
+ "properties": {
5
+ "concurrency": {
6
+ "type": "number",
7
+ "deprecated": "Use `maxConcurrency` instead"
8
+ },
9
+ "topK": {
10
+ "type": "number"
11
+ },
12
+ "temperature": {
13
+ "type": "number"
14
+ },
15
+ "systemPrompt": {
16
+ "type": "string"
17
+ }
18
+ },
19
+ "additionalProperties": false,
20
+ "definitions": {}
21
+ }