@promptbook/remote-server 0.88.0 → 0.89.0-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/esm/index.es.js +11 -4
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +2 -2
- package/esm/typings/src/_packages/types.index.d.ts +10 -0
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +7 -0
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/{countTotalUsage.d.ts → countUsage.d.ts} +1 -1
- package/esm/typings/src/playground/BrjappConnector.d.ts +64 -0
- package/esm/typings/src/playground/brjapp-api-schema.d.ts +12879 -0
- package/esm/typings/src/playground/playground.d.ts +5 -0
- package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -1
- package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -3
- package/esm/typings/src/types/typeAliases.d.ts +2 -2
- package/package.json +2 -2
- package/umd/index.umd.js +11 -4
- package/umd/index.umd.js.map +1 -1
package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts
CHANGED
|
@@ -37,7 +37,8 @@ export type PromptbookServer_AnonymousIdentification = {
|
|
|
37
37
|
/**
|
|
38
38
|
* Identifier of the end user
|
|
39
39
|
*
|
|
40
|
-
* Note:
|
|
40
|
+
* Note: This can be either some id or email or any other identifier
|
|
41
|
+
* Note: In anonymous mode, this is passed to the certain model providers to identify misuse
|
|
41
42
|
* Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
|
|
42
43
|
*/
|
|
43
44
|
readonly userId?: string_user_id;
|
|
@@ -58,15 +58,27 @@ export type ApplicationRemoteServerOptions<TCustomOptions> = {
|
|
|
58
58
|
};
|
|
59
59
|
export type ApplicationRemoteServerClientOptions<TCustomOptions> = {
|
|
60
60
|
/**
|
|
61
|
-
*
|
|
61
|
+
* Identifier of the application
|
|
62
|
+
*
|
|
63
|
+
* Note: This is usefull when you use Promptbook remote server for multiple apps/frontends, if its used just for single app, use here just "app" or "your-app-name"
|
|
64
|
+
* Note: This can be some id or some semantic name like "email-agent"
|
|
62
65
|
*/
|
|
63
66
|
readonly appId: string_app_id | null;
|
|
64
67
|
/**
|
|
65
|
-
*
|
|
68
|
+
* Identifier of the end user
|
|
69
|
+
*
|
|
70
|
+
* Note: This can be either some id or email or any other identifier
|
|
71
|
+
* Note: This is also passed to the certain model providers to identify misuse
|
|
66
72
|
*/
|
|
67
73
|
readonly userId?: string_user_id;
|
|
68
74
|
/**
|
|
69
|
-
*
|
|
75
|
+
* Token of the user to verify its identity
|
|
76
|
+
*
|
|
77
|
+
* Note: This is passed for example to `createLlmExecutionTools`
|
|
78
|
+
*/
|
|
79
|
+
readonly userToken?: string_user_id;
|
|
80
|
+
/**
|
|
81
|
+
* Additional arbitrary options to identify the client or to pass custom metadata
|
|
70
82
|
*/
|
|
71
83
|
readonly customOptions?: TCustomOptions;
|
|
72
84
|
};
|
|
@@ -433,13 +433,13 @@ export type string_uuid = string & {
|
|
|
433
433
|
*
|
|
434
434
|
* @@@
|
|
435
435
|
*/
|
|
436
|
-
export type string_app_id = id;
|
|
436
|
+
export type string_app_id = id | 'app';
|
|
437
437
|
/**
|
|
438
438
|
* End user identifier
|
|
439
439
|
*
|
|
440
440
|
* @@@
|
|
441
441
|
*/
|
|
442
|
-
export type string_user_id = id;
|
|
442
|
+
export type string_user_id = id | string_email;
|
|
443
443
|
/**
|
|
444
444
|
* Semantic helper
|
|
445
445
|
*
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/remote-server",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.89.0-1",
|
|
4
4
|
"description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
"module": "./esm/index.es.js",
|
|
48
48
|
"typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
|
|
49
49
|
"peerDependencies": {
|
|
50
|
-
"@promptbook/core": "0.
|
|
50
|
+
"@promptbook/core": "0.89.0-1"
|
|
51
51
|
},
|
|
52
52
|
"dependencies": {
|
|
53
53
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -28,7 +28,7 @@
|
|
|
28
28
|
* @generated
|
|
29
29
|
* @see https://github.com/webgptorg/promptbook
|
|
30
30
|
*/
|
|
31
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
31
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.89.0-1';
|
|
32
32
|
/**
|
|
33
33
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
34
34
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -2527,8 +2527,9 @@
|
|
|
2527
2527
|
* @returns LLM tools with same functionality with added total cost counting
|
|
2528
2528
|
* @public exported from `@promptbook/core`
|
|
2529
2529
|
*/
|
|
2530
|
-
function
|
|
2530
|
+
function countUsage(llmTools) {
|
|
2531
2531
|
let totalUsage = ZERO_USAGE;
|
|
2532
|
+
const spending = new rxjs.Subject();
|
|
2532
2533
|
const proxyTools = {
|
|
2533
2534
|
get title() {
|
|
2534
2535
|
// TODO: [🧠] Maybe put here some suffix
|
|
@@ -2538,12 +2539,15 @@
|
|
|
2538
2539
|
// TODO: [🧠] Maybe put here some suffix
|
|
2539
2540
|
return llmTools.description;
|
|
2540
2541
|
},
|
|
2541
|
-
|
|
2542
|
+
checkConfiguration() {
|
|
2542
2543
|
return /* not await */ llmTools.checkConfiguration();
|
|
2543
2544
|
},
|
|
2544
2545
|
listModels() {
|
|
2545
2546
|
return /* not await */ llmTools.listModels();
|
|
2546
2547
|
},
|
|
2548
|
+
spending() {
|
|
2549
|
+
return spending.asObservable();
|
|
2550
|
+
},
|
|
2547
2551
|
getTotalUsage() {
|
|
2548
2552
|
// <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
|
|
2549
2553
|
return totalUsage;
|
|
@@ -2554,6 +2558,7 @@
|
|
|
2554
2558
|
// console.info('[🚕] callChatModel through countTotalUsage');
|
|
2555
2559
|
const promptResult = await llmTools.callChatModel(prompt);
|
|
2556
2560
|
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
2561
|
+
spending.next(promptResult.usage);
|
|
2557
2562
|
return promptResult;
|
|
2558
2563
|
};
|
|
2559
2564
|
}
|
|
@@ -2562,6 +2567,7 @@
|
|
|
2562
2567
|
// console.info('[🚕] callCompletionModel through countTotalUsage');
|
|
2563
2568
|
const promptResult = await llmTools.callCompletionModel(prompt);
|
|
2564
2569
|
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
2570
|
+
spending.next(promptResult.usage);
|
|
2565
2571
|
return promptResult;
|
|
2566
2572
|
};
|
|
2567
2573
|
}
|
|
@@ -2570,6 +2576,7 @@
|
|
|
2570
2576
|
// console.info('[🚕] callEmbeddingModel through countTotalUsage');
|
|
2571
2577
|
const promptResult = await llmTools.callEmbeddingModel(prompt);
|
|
2572
2578
|
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
2579
|
+
spending.next(promptResult.usage);
|
|
2573
2580
|
return promptResult;
|
|
2574
2581
|
};
|
|
2575
2582
|
}
|
|
@@ -3852,7 +3859,7 @@
|
|
|
3852
3859
|
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
3853
3860
|
const _llms = arrayableToArray(tools.llm);
|
|
3854
3861
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
3855
|
-
const llmToolsWithUsage =
|
|
3862
|
+
const llmToolsWithUsage = countUsage(llmTools);
|
|
3856
3863
|
// <- TODO: [🌯]
|
|
3857
3864
|
/*
|
|
3858
3865
|
TODO: [🧠][🪑][🔃] Should this be done or not
|