@promptbook/remote-server 0.75.10 → 0.77.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/remote-server`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
@@ -236,6 +240,8 @@ Or you can install them separately:
236
240
  - **[@promptbook/execute-javascript](https://www.npmjs.com/package/@promptbook/execute-javascript)** - Execution tools for javascript inside promptbooks
237
241
  - **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
238
242
  - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
243
+ - **[@promptbook/vercel](https://www.npmjs.com/package/@promptbook/vercel)** - Adapter for Vercel functionalities
244
+ - **[@promptbook/gemini](https://www.npmjs.com/package/@promptbook/gemini)** - Integration with Google's Gemini API
239
245
  - **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API
240
246
  - **[@promptbook/langtail](https://www.npmjs.com/package/@promptbook/langtail)** - Execution tools for Langtail API, wrapper around Langtail SDK
241
247
  - **[@promptbook/fake-llm](https://www.npmjs.com/package/@promptbook/fake-llm)** - Mocked execution tools for testing the library and saving the tokens
@@ -255,11 +261,6 @@ Or you can install them separately:
255
261
 
256
262
  ## 📚 Dictionary
257
263
 
258
-
259
-
260
-
261
-
262
-
263
264
  ### 📚 Dictionary
264
265
 
265
266
  The following glossary is used to clarify certain concepts:
@@ -275,8 +276,6 @@ The following glossary is used to clarify certain concepts:
275
276
  - **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
276
277
  - **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
277
278
 
278
-
279
-
280
279
  _Note: Thos section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
281
280
 
282
281
  #### Promptbook core
@@ -337,8 +336,6 @@ _Note: Thos section is not complete dictionary, more list of general AI / LLM te
337
336
  - [👮 Agent adversary expectations](https://github.com/webgptorg/promptbook/discussions/39)
338
337
  - [view more](https://github.com/webgptorg/promptbook/discussions/categories/concepts)
339
338
 
340
-
341
-
342
339
  ### Terms specific to Promptbook TypeScript implementation
343
340
 
344
341
  - Anonymous mode
package/esm/index.es.js CHANGED
@@ -15,7 +15,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
15
15
  *
16
16
  * @see https://github.com/webgptorg/promptbook
17
17
  */
18
- var PROMPTBOOK_ENGINE_VERSION = '0.75.9';
18
+ var PROMPTBOOK_ENGINE_VERSION = '0.76.0';
19
19
  /**
20
20
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
21
21
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -0,0 +1,2 @@
1
+ import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
2
+ export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
@@ -8,7 +8,7 @@ import { deserializeError } from '../errors/utils/deserializeError';
8
8
  import { serializeError } from '../errors/utils/serializeError';
9
9
  import { forEachAsync } from '../execution/utils/forEachAsync';
10
10
  import { isValidJsonString } from '../formats/json/utils/isValidJsonString';
11
- import { $currentDate } from '../utils/$currentDate';
11
+ import { $getCurrentDate } from '../utils/$getCurrentDate';
12
12
  import { $isRunningInBrowser } from '../utils/environment/$isRunningInBrowser';
13
13
  import { $isRunningInNode } from '../utils/environment/$isRunningInNode';
14
14
  import { $isRunningInWebWorker } from '../utils/environment/$isRunningInWebWorker';
@@ -82,7 +82,7 @@ export { deserializeError };
82
82
  export { serializeError };
83
83
  export { forEachAsync };
84
84
  export { isValidJsonString };
85
- export { $currentDate };
85
+ export { $getCurrentDate };
86
86
  export { $isRunningInBrowser };
87
87
  export { $isRunningInNode };
88
88
  export { $isRunningInWebWorker };
@@ -0,0 +1,4 @@
1
+ import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
2
+ import { createExecutionToolsFromVercelProvider } from '../llm-providers/vercel/createExecutionToolsFromVercelProvider';
3
+ export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
4
+ export { createExecutionToolsFromVercelProvider };
@@ -1,15 +1,24 @@
1
+ import type { string_user_id } from '../types/typeAliases';
1
2
  /**
2
3
  * @@@
3
4
  *
4
5
  * Note: Keep it public to allow people to make their own execution tools
5
6
  */
6
7
  export type CommonToolsOptions = {
8
+ /**
9
+ * A unique identifier representing your end-user
10
+ *
11
+ * Note: For example it can help to detect abuse
12
+ * For example for OpenAi @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
13
+ */
14
+ readonly userId?: string_user_id;
7
15
  /**
8
16
  * If true, the internal executions will be logged
9
17
  */
10
18
  readonly isVerbose?: boolean;
11
19
  };
12
20
  /**
21
+ * TODO: [🧠][🤺] Maybe allow overriding of `userId` for each prompt
13
22
  * TODO: [🈁] Maybe add here `isDeterministic`
14
23
  * TODO: [🧠][💙] Distinct between options passed into ExecutionTools and to ExecutionTools.execute
15
24
  */
@@ -18,7 +18,7 @@ export type CreateLlmToolsFromConfigurationOptions = {
18
18
  *
19
19
  * Note: This is passed to the LLM tools providers to identify misuse
20
20
  */
21
- readonly userId?: string_user_id | null;
21
+ readonly userId?: string_user_id;
22
22
  };
23
23
  /**
24
24
  * @@@
@@ -33,5 +33,5 @@ export type AzureOpenAiExecutionToolsOptions = CommonToolsOptions & {
33
33
  *
34
34
  * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids (document from OpenAI not Azure, but same concept)
35
35
  */
36
- readonly userId: string_user_id | null;
36
+ readonly userId?: string_user_id;
37
37
  };
@@ -1,6 +1,5 @@
1
1
  import type { ClientOptions } from 'openai';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
- import type { string_user_id } from '../../types/typeAliases';
4
3
  /**
5
4
  * Options for `OpenAiExecutionTools`
6
5
  *
@@ -9,12 +8,4 @@ import type { string_user_id } from '../../types/typeAliases';
9
8
  *
10
9
  * @public exported from `@promptbook/openai`
11
10
  */
12
- export type OpenAiExecutionToolsOptions = CommonToolsOptions & ClientOptions & {
13
- /**
14
- * A unique identifier representing your end-user, which can help OpenAI to monitor
15
- * and detect abuse.
16
- *
17
- * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
18
- */
19
- userId: string_user_id | null;
20
- };
11
+ export type OpenAiExecutionToolsOptions = CommonToolsOptions & ClientOptions;
@@ -24,7 +24,7 @@ export type PromptbookServer_ListModels_AnonymousRequest = {
24
24
  * Note: this is passed to the certain model providers to identify misuse
25
25
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
26
26
  */
27
- readonly userId: string_user_id | null;
27
+ readonly userId?: string_user_id;
28
28
  /**
29
29
  * Configuration for the LLM tools
30
30
  */
@@ -29,7 +29,7 @@ export type PromptbookServer_Prompt_AnonymousRequest = {
29
29
  * Note: this is passed to the certain model providers to identify misuse
30
30
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
31
31
  */
32
- readonly userId: string_user_id | null;
32
+ readonly userId?: string_user_id;
33
33
  /**
34
34
  * Configuration for the LLM tools
35
35
  */
@@ -41,7 +41,7 @@ export type RemoteLlmExecutionToolsOptions<TCustomOptions> = CommonToolsOptions
41
41
  * Note: This is passed to the certain model providers to identify misuse
42
42
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode).
43
43
  */
44
- readonly userId: string_user_id | null;
44
+ readonly userId?: string_user_id;
45
45
  } | ({
46
46
  /**
47
47
  * Use anonymous server with client identification and fixed collection
@@ -62,7 +62,7 @@ export type CollectionRemoteServerClientOptions<TCustomOptions> = {
62
62
  /**
63
63
  * @@@
64
64
  */
65
- readonly userId: string_user_id | null;
65
+ readonly userId?: string_user_id;
66
66
  /**
67
67
  * @@@
68
68
  */
@@ -0,0 +1,11 @@
1
+ import type { createOpenAI } from '@ai-sdk/openai';
2
+ import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
+ type ProviderV1 = ReturnType<typeof createOpenAI>;
5
+ /**
6
+ * !!!!!!
7
+ *
8
+ * @public exported from `@promptbook/vercel`
9
+ */
10
+ export declare function createExecutionToolsFromVercelProvider(vercelProvider: ProviderV1, options?: CommonToolsOptions): LlmExecutionTools;
11
+ export {};
@@ -0,0 +1,6 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
3
+ /**
4
+ * TODO: [main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
+ * Note: [⚫] Code in this file should never be published in any package
6
+ */
@@ -7,4 +7,4 @@ import type { string_date_iso8601 } from '../types/typeAliases';
7
7
  * @returns string_date branded type
8
8
  * @public exported from `@promptbook/utils`
9
9
  */
10
- export declare function $currentDate(): string_date_iso8601;
10
+ export declare function $getCurrentDate(): string_date_iso8601;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.75.10",
3
+ "version": "0.77.0-0",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "--note-0": " <- [🐊]",
6
6
  "private": false,
@@ -54,7 +54,7 @@
54
54
  "module": "./esm/index.es.js",
55
55
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
56
56
  "peerDependencies": {
57
- "@promptbook/core": "0.75.10"
57
+ "@promptbook/core": "0.77.0-0"
58
58
  },
59
59
  "dependencies": {
60
60
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -22,7 +22,7 @@
22
22
  *
23
23
  * @see https://github.com/webgptorg/promptbook
24
24
  */
25
- var PROMPTBOOK_ENGINE_VERSION = '0.75.9';
25
+ var PROMPTBOOK_ENGINE_VERSION = '0.76.0';
26
26
  /**
27
27
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
28
28
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1,7 +0,0 @@
1
- import type { string_date_iso8601 } from '../types/typeAliases';
2
- /**
3
- * Get current date in ISO 8601 format
4
- *
5
- * @private internal utility
6
- */
7
- export declare function getCurrentIsoDate(): string_date_iso8601;