@promptbook/node 0.88.0 → 0.89.0-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/esm/index.es.js +11 -4
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +2 -2
- package/esm/typings/src/_packages/types.index.d.ts +10 -0
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +7 -0
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/{countTotalUsage.d.ts → countUsage.d.ts} +1 -1
- package/esm/typings/src/playground/BrjappConnector.d.ts +64 -0
- package/esm/typings/src/playground/brjapp-api-schema.d.ts +12879 -0
- package/esm/typings/src/playground/playground.d.ts +5 -0
- package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -1
- package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -3
- package/esm/typings/src/types/typeAliases.d.ts +2 -2
- package/package.json +2 -2
- package/umd/index.umd.js +11 -4
- package/umd/index.umd.js.map +1 -1
package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts
CHANGED
|
@@ -37,7 +37,8 @@ export type PromptbookServer_AnonymousIdentification = {
|
|
|
37
37
|
/**
|
|
38
38
|
* Identifier of the end user
|
|
39
39
|
*
|
|
40
|
-
* Note:
|
|
40
|
+
* Note: This can be either some id or email or any other identifier
|
|
41
|
+
* Note: In anonymous mode, this is passed to the certain model providers to identify misuse
|
|
41
42
|
* Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
|
|
42
43
|
*/
|
|
43
44
|
readonly userId?: string_user_id;
|
|
@@ -58,15 +58,27 @@ export type ApplicationRemoteServerOptions<TCustomOptions> = {
|
|
|
58
58
|
};
|
|
59
59
|
export type ApplicationRemoteServerClientOptions<TCustomOptions> = {
|
|
60
60
|
/**
|
|
61
|
-
*
|
|
61
|
+
* Identifier of the application
|
|
62
|
+
*
|
|
63
|
+
* Note: This is usefull when you use Promptbook remote server for multiple apps/frontends, if its used just for single app, use here just "app" or "your-app-name"
|
|
64
|
+
* Note: This can be some id or some semantic name like "email-agent"
|
|
62
65
|
*/
|
|
63
66
|
readonly appId: string_app_id | null;
|
|
64
67
|
/**
|
|
65
|
-
*
|
|
68
|
+
* Identifier of the end user
|
|
69
|
+
*
|
|
70
|
+
* Note: This can be either some id or email or any other identifier
|
|
71
|
+
* Note: This is also passed to the certain model providers to identify misuse
|
|
66
72
|
*/
|
|
67
73
|
readonly userId?: string_user_id;
|
|
68
74
|
/**
|
|
69
|
-
*
|
|
75
|
+
* Token of the user to verify its identity
|
|
76
|
+
*
|
|
77
|
+
* Note: This is passed for example to `createLlmExecutionTools`
|
|
78
|
+
*/
|
|
79
|
+
readonly userToken?: string_user_id;
|
|
80
|
+
/**
|
|
81
|
+
* Additional arbitrary options to identify the client or to pass custom metadata
|
|
70
82
|
*/
|
|
71
83
|
readonly customOptions?: TCustomOptions;
|
|
72
84
|
};
|
|
@@ -433,13 +433,13 @@ export type string_uuid = string & {
|
|
|
433
433
|
*
|
|
434
434
|
* @@@
|
|
435
435
|
*/
|
|
436
|
-
export type string_app_id = id;
|
|
436
|
+
export type string_app_id = id | 'app';
|
|
437
437
|
/**
|
|
438
438
|
* End user identifier
|
|
439
439
|
*
|
|
440
440
|
* @@@
|
|
441
441
|
*/
|
|
442
|
-
export type string_user_id = id;
|
|
442
|
+
export type string_user_id = id | string_email;
|
|
443
443
|
/**
|
|
444
444
|
* Semantic helper
|
|
445
445
|
*
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/node",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.89.0-1",
|
|
4
4
|
"description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
"module": "./esm/index.es.js",
|
|
48
48
|
"typings": "./esm/typings/src/_packages/node.index.d.ts",
|
|
49
49
|
"peerDependencies": {
|
|
50
|
-
"@promptbook/core": "0.
|
|
50
|
+
"@promptbook/core": "0.89.0-1"
|
|
51
51
|
},
|
|
52
52
|
"dependencies": {
|
|
53
53
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -46,7 +46,7 @@
|
|
|
46
46
|
* @generated
|
|
47
47
|
* @see https://github.com/webgptorg/promptbook
|
|
48
48
|
*/
|
|
49
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
49
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.89.0-1';
|
|
50
50
|
/**
|
|
51
51
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
52
52
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -4492,8 +4492,9 @@
|
|
|
4492
4492
|
* @returns LLM tools with same functionality with added total cost counting
|
|
4493
4493
|
* @public exported from `@promptbook/core`
|
|
4494
4494
|
*/
|
|
4495
|
-
function
|
|
4495
|
+
function countUsage(llmTools) {
|
|
4496
4496
|
let totalUsage = ZERO_USAGE;
|
|
4497
|
+
const spending = new rxjs.Subject();
|
|
4497
4498
|
const proxyTools = {
|
|
4498
4499
|
get title() {
|
|
4499
4500
|
// TODO: [🧠] Maybe put here some suffix
|
|
@@ -4503,12 +4504,15 @@
|
|
|
4503
4504
|
// TODO: [🧠] Maybe put here some suffix
|
|
4504
4505
|
return llmTools.description;
|
|
4505
4506
|
},
|
|
4506
|
-
|
|
4507
|
+
checkConfiguration() {
|
|
4507
4508
|
return /* not await */ llmTools.checkConfiguration();
|
|
4508
4509
|
},
|
|
4509
4510
|
listModels() {
|
|
4510
4511
|
return /* not await */ llmTools.listModels();
|
|
4511
4512
|
},
|
|
4513
|
+
spending() {
|
|
4514
|
+
return spending.asObservable();
|
|
4515
|
+
},
|
|
4512
4516
|
getTotalUsage() {
|
|
4513
4517
|
// <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
|
|
4514
4518
|
return totalUsage;
|
|
@@ -4519,6 +4523,7 @@
|
|
|
4519
4523
|
// console.info('[🚕] callChatModel through countTotalUsage');
|
|
4520
4524
|
const promptResult = await llmTools.callChatModel(prompt);
|
|
4521
4525
|
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
4526
|
+
spending.next(promptResult.usage);
|
|
4522
4527
|
return promptResult;
|
|
4523
4528
|
};
|
|
4524
4529
|
}
|
|
@@ -4527,6 +4532,7 @@
|
|
|
4527
4532
|
// console.info('[🚕] callCompletionModel through countTotalUsage');
|
|
4528
4533
|
const promptResult = await llmTools.callCompletionModel(prompt);
|
|
4529
4534
|
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
4535
|
+
spending.next(promptResult.usage);
|
|
4530
4536
|
return promptResult;
|
|
4531
4537
|
};
|
|
4532
4538
|
}
|
|
@@ -4535,6 +4541,7 @@
|
|
|
4535
4541
|
// console.info('[🚕] callEmbeddingModel through countTotalUsage');
|
|
4536
4542
|
const promptResult = await llmTools.callEmbeddingModel(prompt);
|
|
4537
4543
|
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
4544
|
+
spending.next(promptResult.usage);
|
|
4538
4545
|
return promptResult;
|
|
4539
4546
|
};
|
|
4540
4547
|
}
|
|
@@ -5347,7 +5354,7 @@
|
|
|
5347
5354
|
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
5348
5355
|
const _llms = arrayableToArray(tools.llm);
|
|
5349
5356
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5350
|
-
const llmToolsWithUsage =
|
|
5357
|
+
const llmToolsWithUsage = countUsage(llmTools);
|
|
5351
5358
|
// <- TODO: [🌯]
|
|
5352
5359
|
/*
|
|
5353
5360
|
TODO: [🧠][🪑][🔃] Should this be done or not
|