@promptbook/remote-client 0.59.0-29 → 0.59.0-30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1 -1
- package/esm/typings/src/config.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +8 -0
- package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -0
- package/esm/typings/src/types/ModelRequirements.d.ts +1 -1
- package/esm/typings/src/types/PromptbookJson/MaterialKnowledgePieceJson.d.ts +2 -2
- package/package.json +2 -2
- package/umd/index.umd.js +1 -1
- package/umd/typings/src/config.d.ts +1 -1
- package/umd/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -0
- package/umd/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +8 -0
- package/umd/typings/src/llm-providers/openai/openai-models.d.ts +1 -0
- package/umd/typings/src/types/ModelRequirements.d.ts +1 -1
- package/umd/typings/src/types/PromptbookJson/MaterialKnowledgePieceJson.d.ts +2 -2
- /package/esm/typings/promptbook-library/{promptbook-library.d.ts → index.d.ts} +0 -0
- /package/umd/typings/promptbook-library/{promptbook-library.d.ts → index.d.ts} +0 -0
package/esm/index.es.js
CHANGED
|
@@ -156,7 +156,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
|
|
|
156
156
|
/**
|
|
157
157
|
* The version of the Promptbook library
|
|
158
158
|
*/
|
|
159
|
-
var PROMPTBOOK_VERSION = '0.59.0-
|
|
159
|
+
var PROMPTBOOK_VERSION = '0.59.0-29';
|
|
160
160
|
|
|
161
161
|
export { PROMPTBOOK_VERSION, RemoteLlmExecutionTools };
|
|
162
162
|
//# sourceMappingURL=index.es.js.map
|
|
@@ -9,4 +9,4 @@ export declare const CHARACTER_LOOP_LIMIT = 100000;
|
|
|
9
9
|
/**
|
|
10
10
|
* The name of the builded promptbook library made by CLI `promptbook make` and for lookup in `createLibraryFromDirectory`
|
|
11
11
|
*/
|
|
12
|
-
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "
|
|
12
|
+
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "index";
|
|
@@ -14,6 +14,7 @@ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
|
|
|
14
14
|
};
|
|
15
15
|
}>;
|
|
16
16
|
/**
|
|
17
|
+
* TODO: !!!! Add embedding models OR Anthropic has only chat+completion models?
|
|
17
18
|
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
18
19
|
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
19
20
|
* TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
@@ -27,6 +27,10 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
27
27
|
* Calls OpenAI API to use a complete model.
|
|
28
28
|
*/
|
|
29
29
|
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
|
|
30
|
+
/**
|
|
31
|
+
* !!!!
|
|
32
|
+
*/
|
|
33
|
+
embed(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
|
|
30
34
|
/**
|
|
31
35
|
* Default model for chat variant.
|
|
32
36
|
*/
|
|
@@ -35,6 +39,10 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
35
39
|
* Default model for completion variant.
|
|
36
40
|
*/
|
|
37
41
|
private getDefaultCompletionModel;
|
|
42
|
+
/**
|
|
43
|
+
* Default model for completion variant.
|
|
44
|
+
*/
|
|
45
|
+
private getDefaultEmbeddingModel;
|
|
38
46
|
/**
|
|
39
47
|
* List all available OpenAI models that can be used
|
|
40
48
|
*/
|
|
@@ -15,6 +15,7 @@ export declare const OPENAI_MODELS: Array<AvailableModel & {
|
|
|
15
15
|
};
|
|
16
16
|
}>;
|
|
17
17
|
/**
|
|
18
|
+
* TODO: !!!! Add embedding models
|
|
18
19
|
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
19
20
|
* TODO: [🕚][👮♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
20
21
|
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import type { string_model_name } from './typeAliases';
|
|
2
|
-
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT"];
|
|
2
|
+
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "EMBEDDING"];
|
|
3
3
|
/**
|
|
4
4
|
* Model variant describes the very general type of the model
|
|
5
5
|
*
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import type { IVectorData } from 'xyzt';
|
|
2
1
|
import type { string_keyword } from '../../utils/normalization/IKeywords';
|
|
3
2
|
import type { string_href } from '../typeAliases';
|
|
4
3
|
import type { string_markdown } from '../typeAliases';
|
|
@@ -12,7 +11,7 @@ export type MaterialKnowledgePieceJson = {
|
|
|
12
11
|
readonly keywords: Array<string_keyword>;
|
|
13
12
|
readonly index: Array<{
|
|
14
13
|
modelName: string_model_name;
|
|
15
|
-
position:
|
|
14
|
+
position: Array<number>;
|
|
16
15
|
}>;
|
|
17
16
|
readonly sources: Array<{
|
|
18
17
|
title: string_markdown_text;
|
|
@@ -20,6 +19,7 @@ export type MaterialKnowledgePieceJson = {
|
|
|
20
19
|
}>;
|
|
21
20
|
};
|
|
22
21
|
/**
|
|
22
|
+
* TODO: !!! Use or uninstall xyzt
|
|
23
23
|
* !!!! Annotate
|
|
24
24
|
* TODO: [🧠][🦪] Maybe allow internal linkes between (Material)KnowledgePieces withing the KnowledgeJson and maybe require to explicitelly reference the source of the knowledge
|
|
25
25
|
* TODO: [🧠] Make some non-material sources like external search engine or dialog to user
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/remote-client",
|
|
3
|
-
"version": "0.59.0-
|
|
3
|
+
"version": "0.59.0-30",
|
|
4
4
|
"description": "Library to supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
}
|
|
48
48
|
],
|
|
49
49
|
"peerDependencies": {
|
|
50
|
-
"@promptbook/core": "0.59.0-
|
|
50
|
+
"@promptbook/core": "0.59.0-30"
|
|
51
51
|
},
|
|
52
52
|
"main": "./umd/index.umd.js",
|
|
53
53
|
"module": "./esm/index.es.js",
|
package/umd/index.umd.js
CHANGED
|
@@ -160,7 +160,7 @@
|
|
|
160
160
|
/**
|
|
161
161
|
* The version of the Promptbook library
|
|
162
162
|
*/
|
|
163
|
-
var PROMPTBOOK_VERSION = '0.59.0-
|
|
163
|
+
var PROMPTBOOK_VERSION = '0.59.0-29';
|
|
164
164
|
|
|
165
165
|
exports.PROMPTBOOK_VERSION = PROMPTBOOK_VERSION;
|
|
166
166
|
exports.RemoteLlmExecutionTools = RemoteLlmExecutionTools;
|
|
@@ -9,4 +9,4 @@ export declare const CHARACTER_LOOP_LIMIT = 100000;
|
|
|
9
9
|
/**
|
|
10
10
|
* The name of the builded promptbook library made by CLI `promptbook make` and for lookup in `createLibraryFromDirectory`
|
|
11
11
|
*/
|
|
12
|
-
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "
|
|
12
|
+
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "index";
|
|
@@ -14,6 +14,7 @@ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
|
|
|
14
14
|
};
|
|
15
15
|
}>;
|
|
16
16
|
/**
|
|
17
|
+
* TODO: !!!! Add embedding models OR Anthropic has only chat+completion models?
|
|
17
18
|
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
18
19
|
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
19
20
|
* TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
@@ -27,6 +27,10 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
27
27
|
* Calls OpenAI API to use a complete model.
|
|
28
28
|
*/
|
|
29
29
|
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
|
|
30
|
+
/**
|
|
31
|
+
* !!!!
|
|
32
|
+
*/
|
|
33
|
+
embed(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
|
|
30
34
|
/**
|
|
31
35
|
* Default model for chat variant.
|
|
32
36
|
*/
|
|
@@ -35,6 +39,10 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
35
39
|
* Default model for completion variant.
|
|
36
40
|
*/
|
|
37
41
|
private getDefaultCompletionModel;
|
|
42
|
+
/**
|
|
43
|
+
* Default model for completion variant.
|
|
44
|
+
*/
|
|
45
|
+
private getDefaultEmbeddingModel;
|
|
38
46
|
/**
|
|
39
47
|
* List all available OpenAI models that can be used
|
|
40
48
|
*/
|
|
@@ -15,6 +15,7 @@ export declare const OPENAI_MODELS: Array<AvailableModel & {
|
|
|
15
15
|
};
|
|
16
16
|
}>;
|
|
17
17
|
/**
|
|
18
|
+
* TODO: !!!! Add embedding models
|
|
18
19
|
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
19
20
|
* TODO: [🕚][👮♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
20
21
|
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import type { string_model_name } from './typeAliases';
|
|
2
|
-
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT"];
|
|
2
|
+
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "EMBEDDING"];
|
|
3
3
|
/**
|
|
4
4
|
* Model variant describes the very general type of the model
|
|
5
5
|
*
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import type { IVectorData } from 'xyzt';
|
|
2
1
|
import type { string_keyword } from '../../utils/normalization/IKeywords';
|
|
3
2
|
import type { string_href } from '../typeAliases';
|
|
4
3
|
import type { string_markdown } from '../typeAliases';
|
|
@@ -12,7 +11,7 @@ export type MaterialKnowledgePieceJson = {
|
|
|
12
11
|
readonly keywords: Array<string_keyword>;
|
|
13
12
|
readonly index: Array<{
|
|
14
13
|
modelName: string_model_name;
|
|
15
|
-
position:
|
|
14
|
+
position: Array<number>;
|
|
16
15
|
}>;
|
|
17
16
|
readonly sources: Array<{
|
|
18
17
|
title: string_markdown_text;
|
|
@@ -20,6 +19,7 @@ export type MaterialKnowledgePieceJson = {
|
|
|
20
19
|
}>;
|
|
21
20
|
};
|
|
22
21
|
/**
|
|
22
|
+
* TODO: !!! Use or uninstall xyzt
|
|
23
23
|
* !!!! Annotate
|
|
24
24
|
* TODO: [🧠][🦪] Maybe allow internal linkes between (Material)KnowledgePieces withing the KnowledgeJson and maybe require to explicitelly reference the source of the knowledge
|
|
25
25
|
* TODO: [🧠] Make some non-material sources like external search engine or dialog to user
|
|
File without changes
|
|
File without changes
|