@promptbook/openai 0.76.0 β†’ 0.77.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/README.md +5 -1
  2. package/esm/index.es.js +15 -12
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +10 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +6 -0
  8. package/esm/typings/src/_packages/utils.index.d.ts +2 -2
  9. package/esm/typings/src/_packages/vercel.index.d.ts +4 -0
  10. package/esm/typings/src/execution/AvailableModel.d.ts +5 -1
  11. package/esm/typings/src/execution/CommonToolsOptions.d.ts +9 -0
  12. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +12 -0
  15. package/esm/typings/src/llm-providers/google/createGoogleExecutionTools.d.ts +18 -0
  16. package/esm/typings/src/llm-providers/google/register-configuration.d.ts +13 -0
  17. package/esm/typings/src/llm-providers/google/register-constructor.d.ts +14 -0
  18. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -10
  19. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +1 -1
  20. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_Prompt_Request.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -1
  22. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/vercel/VercelExecutionToolsOptions.d.ts +22 -0
  24. package/esm/typings/src/llm-providers/vercel/VercelProvider.d.ts +13 -0
  25. package/esm/typings/src/llm-providers/vercel/createExecutionToolsFromVercelProvider.d.ts +8 -0
  26. package/esm/typings/src/llm-providers/vercel/playground/playground.d.ts +6 -0
  27. package/esm/typings/src/utils/{$currentDate.d.ts β†’ $getCurrentDate.d.ts} +1 -1
  28. package/package.json +2 -2
  29. package/umd/index.umd.js +15 -12
  30. package/umd/index.umd.js.map +1 -1
  31. package/esm/typings/src/_packages/gemini.index.d.ts +0 -2
  32. package/esm/typings/src/utils/getCurrentIsoDate.d.ts +0 -7
@@ -33,5 +33,5 @@ export type AzureOpenAiExecutionToolsOptions = CommonToolsOptions & {
33
33
  *
34
34
  * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids (document from OpenAI not Azure, but same concept)
35
35
  */
36
- readonly userId: string_user_id | null;
36
+ readonly userId?: string_user_id;
37
37
  };
@@ -0,0 +1,12 @@
1
+ import type { createGoogleGenerativeAI } from '@ai-sdk/google';
2
+ import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
+ /**
4
+ * Options for `GoogleExecutionTools`
5
+ *
6
+ * !!!!!! This extends Google's `ClientOptions` with are directly passed to the Google generative AI client.
7
+ * @public exported from `@promptbook/google`
8
+ */
9
+ export type GoogleExecutionToolsOptions = CommonToolsOptions & Parameters<typeof createGoogleGenerativeAI>[0];
10
+ /**
11
+ * TODO: [🧠][🀺] Pass `userId`
12
+ */
@@ -0,0 +1,18 @@
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import type { GoogleExecutionToolsOptions } from './GoogleExecutionToolsOptions';
3
+ /**
4
+ * Execution Tools for calling Google Gemini API.
5
+ *
6
+ * @public exported from `@promptbook/google`
7
+ */
8
+ export declare const createGoogleExecutionTools: ((options: GoogleExecutionToolsOptions) => LlmExecutionTools) & {
9
+ packageName: string;
10
+ className: string;
11
+ };
12
+ /**
13
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
14
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new GoogleExecutionTools` -> `createGoogleExecutionTools` in manual
15
+ * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
16
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
17
+ * TODO: [🎢] Naming "constructor" vs "creator" vs "factory"
18
+ */
@@ -0,0 +1,13 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Registration of LLM provider metadata
4
+ *
5
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
6
+ *
7
+ * @public exported from `@promptbook/core`
8
+ * @public exported from `@promptbook/cli`
9
+ */
10
+ export declare const _GoogleMetadataRegistration: Registration;
11
+ /**
12
+ * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
13
+ */
@@ -0,0 +1,14 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Registration of LLM provider
4
+ *
5
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
6
+ *
7
+ * @public exported from `@promptbook/google`
8
+ * @public exported from `@promptbook/cli`
9
+ */
10
+ export declare const _GoogleRegistration: Registration;
11
+ /**
12
+ * TODO: [🎢] Naming "constructor" vs "creator" vs "factory"
13
+ * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
14
+ */
@@ -1,6 +1,5 @@
1
1
  import type { ClientOptions } from 'openai';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
- import type { string_user_id } from '../../types/typeAliases';
4
3
  /**
5
4
  * Options for `OpenAiExecutionTools`
6
5
  *
@@ -9,12 +8,4 @@ import type { string_user_id } from '../../types/typeAliases';
9
8
  *
10
9
  * @public exported from `@promptbook/openai`
11
10
  */
12
- export type OpenAiExecutionToolsOptions = CommonToolsOptions & ClientOptions & {
13
- /**
14
- * A unique identifier representing your end-user, which can help OpenAI to monitor
15
- * and detect abuse.
16
- *
17
- * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
18
- */
19
- userId: string_user_id | null;
20
- };
11
+ export type OpenAiExecutionToolsOptions = CommonToolsOptions & ClientOptions;
@@ -24,7 +24,7 @@ export type PromptbookServer_ListModels_AnonymousRequest = {
24
24
  * Note: this is passed to the certain model providers to identify misuse
25
25
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
26
26
  */
27
- readonly userId: string_user_id | null;
27
+ readonly userId?: string_user_id;
28
28
  /**
29
29
  * Configuration for the LLM tools
30
30
  */
@@ -29,7 +29,7 @@ export type PromptbookServer_Prompt_AnonymousRequest = {
29
29
  * Note: this is passed to the certain model providers to identify misuse
30
30
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
31
31
  */
32
- readonly userId: string_user_id | null;
32
+ readonly userId?: string_user_id;
33
33
  /**
34
34
  * Configuration for the LLM tools
35
35
  */
@@ -41,7 +41,7 @@ export type RemoteLlmExecutionToolsOptions<TCustomOptions> = CommonToolsOptions
41
41
  * Note: This is passed to the certain model providers to identify misuse
42
42
  * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode).
43
43
  */
44
- readonly userId: string_user_id | null;
44
+ readonly userId?: string_user_id;
45
45
  } | ({
46
46
  /**
47
47
  * Use anonymous server with client identification and fixed collection
@@ -62,7 +62,7 @@ export type CollectionRemoteServerClientOptions<TCustomOptions> = {
62
62
  /**
63
63
  * @@@
64
64
  */
65
- readonly userId: string_user_id | null;
65
+ readonly userId?: string_user_id;
66
66
  /**
67
67
  * @@@
68
68
  */
@@ -0,0 +1,22 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
+ import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
+ import type { VercelProvider } from './VercelProvider';
4
+ /**
5
+ * Options for `createExecutionToolsFromVercelProvider`
6
+ *
7
+ * @public exported from `@promptbook/google`
8
+ */
9
+ export type VercelExecutionToolsOptions = CommonToolsOptions & {
10
+ /**
11
+ * Vercel provider for the execution tools
12
+ */
13
+ readonly vercelProvider: VercelProvider;
14
+ /**
15
+ * List of available models for given Vercel provider
16
+ */
17
+ readonly availableModels: ReadonlyArray<AvailableModel>;
18
+ /**
19
+ * Additional settings for chat models when calling `vercelProvider.chat('model-name', settings)`
20
+ */
21
+ readonly additionalChatSettings?: Partial<Parameters<VercelProvider['chat']>[1]>;
22
+ };
@@ -0,0 +1,13 @@
1
+ import type { createGoogleGenerativeAI } from '@ai-sdk/google';
2
+ import type { createOpenAI } from '@ai-sdk/openai';
3
+ /**
4
+ * This is common interface for all v1 Vercel providers
5
+ *
6
+ * @public exported from `@promptbook/vercel`
7
+ */
8
+ export type VercelProvider = ReturnType<typeof createOpenAI> | ReturnType<typeof createGoogleGenerativeAI>;
9
+ /**
10
+ * ^^^^
11
+ * TODO: Is there some way to get the type of the provider directly,
12
+ * NOT this stupid way via inferring the return type from a specific vercel provider⁉
13
+ */
@@ -0,0 +1,8 @@
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import type { VercelExecutionToolsOptions } from './VercelExecutionToolsOptions';
3
+ /**
4
+ * !!!!!!
5
+ *
6
+ * @public exported from `@promptbook/vercel`
7
+ */
8
+ export declare function createExecutionToolsFromVercelProvider(options: VercelExecutionToolsOptions): LlmExecutionTools;
@@ -0,0 +1,6 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
3
+ /**
4
+ * TODO: [main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
+ * Note: [⚫] Code in this file should never be published in any package
6
+ */
@@ -7,4 +7,4 @@ import type { string_date_iso8601 } from '../types/typeAliases';
7
7
  * @returns string_date branded type
8
8
  * @public exported from `@promptbook/utils`
9
9
  */
10
- export declare function $currentDate(): string_date_iso8601;
10
+ export declare function $getCurrentDate(): string_date_iso8601;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/openai",
3
- "version": "0.76.0",
3
+ "version": "0.77.0-3",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "--note-0": " <- [🐊]",
6
6
  "private": false,
@@ -54,7 +54,7 @@
54
54
  "module": "./esm/index.es.js",
55
55
  "typings": "./esm/typings/src/_packages/openai.index.d.ts",
56
56
  "peerDependencies": {
57
- "@promptbook/core": "0.76.0"
57
+ "@promptbook/core": "0.77.0-3"
58
58
  },
59
59
  "dependencies": {
60
60
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -22,7 +22,7 @@
22
22
  *
23
23
  * @see https://github.com/webgptorg/promptbook
24
24
  */
25
- var PROMPTBOOK_ENGINE_VERSION = '0.75.10';
25
+ var PROMPTBOOK_ENGINE_VERSION = '0.77.0-2';
26
26
  /**
27
27
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
28
28
  * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
@@ -288,11 +288,14 @@
288
288
  */
289
289
 
290
290
  /**
291
- * Get current date in ISO 8601 format
291
+ * Simple wrapper `new Date().toISOString()`
292
292
  *
293
- * @private internal utility
293
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
294
+ *
295
+ * @returns string_date branded type
296
+ * @public exported from `@promptbook/utils`
294
297
  */
295
- function getCurrentIsoDate() {
298
+ function $getCurrentDate() {
296
299
  return new Date().toISOString();
297
300
  }
298
301
 
@@ -1577,7 +1580,7 @@
1577
1580
  content: rawPromptContent,
1578
1581
  },
1579
1582
  ], false), user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString() });
1580
- start = getCurrentIsoDate();
1583
+ start = $getCurrentDate();
1581
1584
  if (this.options.isVerbose) {
1582
1585
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1583
1586
  }
@@ -1601,7 +1604,7 @@
1601
1604
  }
1602
1605
  resultContent = rawResponse.choices[0].message.content;
1603
1606
  // eslint-disable-next-line prefer-const
1604
- complete = getCurrentIsoDate();
1607
+ complete = $getCurrentDate();
1605
1608
  usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1606
1609
  if (resultContent === null) {
1607
1610
  throw new PipelineExecutionError('No response message from OpenAI');
@@ -1656,7 +1659,7 @@
1656
1659
  };
1657
1660
  rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1658
1661
  rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString() });
1659
- start = getCurrentIsoDate();
1662
+ start = $getCurrentDate();
1660
1663
  if (this.options.isVerbose) {
1661
1664
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1662
1665
  }
@@ -1680,7 +1683,7 @@
1680
1683
  }
1681
1684
  resultContent = rawResponse.choices[0].text;
1682
1685
  // eslint-disable-next-line prefer-const
1683
- complete = getCurrentIsoDate();
1686
+ complete = $getCurrentDate();
1684
1687
  usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1685
1688
  return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools CompletionPromptResult', {
1686
1689
  content: resultContent,
@@ -1726,7 +1729,7 @@
1726
1729
  input: rawPromptContent,
1727
1730
  model: modelName,
1728
1731
  };
1729
- start = getCurrentIsoDate();
1732
+ start = $getCurrentDate();
1730
1733
  if (this.options.isVerbose) {
1731
1734
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1732
1735
  }
@@ -1746,7 +1749,7 @@
1746
1749
  }
1747
1750
  resultContent = rawResponse.data[0].embedding;
1748
1751
  // eslint-disable-next-line prefer-const
1749
- complete = getCurrentIsoDate();
1752
+ complete = $getCurrentDate();
1750
1753
  usage = computeOpenAiUsage(content || '', '',
1751
1754
  // <- Note: Embedding does not have result content
1752
1755
  rawResponse);
@@ -1899,7 +1902,7 @@
1899
1902
  },
1900
1903
  // <- TODO: Add user identification here> user: this.options.user,
1901
1904
  };
1902
- start = getCurrentIsoDate();
1905
+ start = $getCurrentDate();
1903
1906
  if (this.options.isVerbose) {
1904
1907
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1905
1908
  }
@@ -1950,7 +1953,7 @@
1950
1953
  resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
1951
1954
  // <- TODO: [🧠] There are also annotations, maybe use them
1952
1955
  // eslint-disable-next-line prefer-const
1953
- complete = getCurrentIsoDate();
1956
+ complete = $getCurrentDate();
1954
1957
  usage = UNCERTAIN_USAGE;
1955
1958
  // <- TODO: [πŸ₯˜] Compute real usage for assistant
1956
1959
  // ?> const usage = computeOpenAiUsage(content, resultContent || '', rawResponse);