@promptbook/openai 0.103.0-47 → 0.103.0-49

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/esm/index.es.js +165 -34
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/servers.d.ts +1 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +17 -3
  8. package/esm/typings/src/book-2.0/agent-source/AgentSourceParseResult.d.ts +2 -1
  9. package/esm/typings/src/book-2.0/agent-source/computeAgentHash.d.ts +8 -0
  10. package/esm/typings/src/book-2.0/agent-source/computeAgentHash.test.d.ts +1 -0
  11. package/esm/typings/src/book-2.0/agent-source/createDefaultAgentName.d.ts +8 -0
  12. package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.d.ts +9 -0
  13. package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.test.d.ts +1 -0
  14. package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.d.ts +1 -1
  15. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +14 -8
  16. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabaseOptions.d.ts +10 -0
  17. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +57 -32
  18. package/esm/typings/src/commitments/MESSAGE/InitialMessageCommitmentDefinition.d.ts +28 -0
  19. package/esm/typings/src/commitments/index.d.ts +2 -1
  20. package/esm/typings/src/config.d.ts +1 -0
  21. package/esm/typings/src/errors/DatabaseError.d.ts +2 -2
  22. package/esm/typings/src/errors/WrappedError.d.ts +2 -2
  23. package/esm/typings/src/execution/ExecutionTask.d.ts +2 -2
  24. package/esm/typings/src/execution/LlmExecutionTools.d.ts +6 -1
  25. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +2 -2
  26. package/esm/typings/src/llm-providers/_common/utils/assertUniqueModels.d.ts +12 -0
  27. package/esm/typings/src/llm-providers/agent/Agent.d.ts +17 -4
  28. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +10 -1
  29. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +6 -2
  30. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +30 -4
  31. package/esm/typings/src/llm-providers/openai/openai-models.test.d.ts +4 -0
  32. package/esm/typings/src/remote-server/startAgentServer.d.ts +2 -2
  33. package/esm/typings/src/remote-server/startRemoteServer.d.ts +1 -2
  34. package/esm/typings/src/transpilers/openai-sdk/register.d.ts +1 -1
  35. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  36. package/esm/typings/src/utils/color/Color.d.ts +7 -0
  37. package/esm/typings/src/utils/color/Color.test.d.ts +1 -0
  38. package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +2 -2
  39. package/esm/typings/src/utils/misc/computeHash.d.ts +11 -0
  40. package/esm/typings/src/utils/misc/computeHash.test.d.ts +1 -0
  41. package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +2 -0
  42. package/esm/typings/src/utils/normalization/normalizeTo_PascalCase.d.ts +3 -0
  43. package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +2 -0
  44. package/esm/typings/src/utils/normalization/titleToName.d.ts +2 -0
  45. package/esm/typings/src/utils/organization/$sideEffect.d.ts +2 -2
  46. package/esm/typings/src/utils/organization/$side_effect.d.ts +2 -2
  47. package/esm/typings/src/utils/organization/TODO_USE.d.ts +2 -2
  48. package/esm/typings/src/utils/organization/keepUnused.d.ts +2 -2
  49. package/esm/typings/src/utils/organization/preserve.d.ts +3 -3
  50. package/esm/typings/src/utils/organization/really_any.d.ts +7 -0
  51. package/esm/typings/src/utils/serialization/asSerializable.d.ts +2 -2
  52. package/esm/typings/src/version.d.ts +1 -1
  53. package/package.json +3 -4
  54. package/umd/index.umd.js +170 -38
  55. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -1,5 +1,4 @@
1
1
  import colors from 'colors';
2
- import { forEver } from 'waitasecond';
3
2
  import spaceTrim$1, { spaceTrim } from 'spacetrim';
4
3
  import { randomBytes } from 'crypto';
5
4
  import Bottleneck from 'bottleneck';
@@ -20,7 +19,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
20
19
  * @generated
21
20
  * @see https://github.com/webgptorg/promptbook
22
21
  */
23
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-47';
22
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-49';
24
23
  /**
25
24
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
26
25
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -575,6 +574,9 @@ class Color {
575
574
  if (hex.length === 3) {
576
575
  return Color.fromHex3(hex);
577
576
  }
577
+ if (hex.length === 4) {
578
+ return Color.fromHex4(hex);
579
+ }
578
580
  if (hex.length === 6) {
579
581
  return Color.fromHex6(hex);
580
582
  }
@@ -595,6 +597,19 @@ class Color {
595
597
  const b = parseInt(hex.substr(2, 1), 16) * 16;
596
598
  return take(new Color(r, g, b));
597
599
  }
600
+ /**
601
+ * Creates a new Color instance from color in hex format with 4 digits (with alpha channel)
602
+ *
603
+ * @param color in hex for example `09df`
604
+ * @returns Color object
605
+ */
606
+ static fromHex4(hex) {
607
+ const r = parseInt(hex.substr(0, 1), 16) * 16;
608
+ const g = parseInt(hex.substr(1, 1), 16) * 16;
609
+ const b = parseInt(hex.substr(2, 1), 16) * 16;
610
+ const a = parseInt(hex.substr(3, 1), 16) * 16;
611
+ return take(new Color(r, g, b, a));
612
+ }
598
613
  /**
599
614
  * Creates a new Color instance from color in hex format with 6 color digits (without alpha channel)
600
615
  *
@@ -785,7 +800,8 @@ class Color {
785
800
  * @returns true if the value is a valid hex color string (e.g., `#009edd`, `#fff`, etc.)
786
801
  */
787
802
  static isHexColorString(value) {
788
- return typeof value === 'string' && /^#(?:[0-9a-fA-F]{3}){1,2}$/.test(value);
803
+ return (typeof value === 'string' &&
804
+ /^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$/.test(value));
789
805
  }
790
806
  /**
791
807
  * Creates new Color object
@@ -1092,6 +1108,7 @@ const PROMPTBOOK_COLOR = Color.fromHex('#79EAFD');
1092
1108
  ({
1093
1109
  TITLE: Color.fromHex('#244EA8'),
1094
1110
  LINE: Color.fromHex('#eeeeee'),
1111
+ SEPARATOR: Color.fromHex('#cccccc'),
1095
1112
  COMMITMENT: Color.fromHex('#DA0F78'),
1096
1113
  PARAMETER: Color.fromHex('#8e44ad'),
1097
1114
  });
@@ -1471,7 +1488,7 @@ function deepClone(objectValue) {
1471
1488
  TODO: [🧠] Is there a better implementation?
1472
1489
  > const propertyNames = Object.getOwnPropertyNames(objectValue);
1473
1490
  > for (const propertyName of propertyNames) {
1474
- > const value = (objectValue as really_any)[propertyName];
1491
+ > const value = (objectValue as chococake)[propertyName];
1475
1492
  > if (value && typeof value === 'object') {
1476
1493
  > deepClone(value);
1477
1494
  > }
@@ -2361,17 +2378,17 @@ const OPENAI_MODELS = exportJson({
2361
2378
  },
2362
2379
  /**/
2363
2380
  /*/
2364
- {
2365
- modelTitle: 'tts-1-hd-1106',
2366
- modelName: 'tts-1-hd-1106',
2367
- },
2368
- /**/
2381
+ {
2382
+ modelTitle: 'tts-1-hd-1106',
2383
+ modelName: 'tts-1-hd-1106',
2384
+ },
2385
+ /**/
2369
2386
  /*/
2370
- {
2371
- modelTitle: 'tts-1-hd',
2372
- modelName: 'tts-1-hd',
2373
- },
2374
- /**/
2387
+ {
2388
+ modelTitle: 'tts-1-hd',
2389
+ modelName: 'tts-1-hd',
2390
+ },
2391
+ /**/
2375
2392
  /**/
2376
2393
  {
2377
2394
  modelVariant: 'CHAT',
@@ -3557,11 +3574,12 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
3557
3574
  *
3558
3575
  * This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
3559
3576
  *
3560
- * !!! Note: [🦖] There are several different things in Promptbook:
3577
+ * Note: [🦖] There are several different things in Promptbook:
3561
3578
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
3562
3579
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
3563
3580
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
3564
3581
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
3582
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
3565
3583
  *
3566
3584
  * @public exported from `@promptbook/openai`
3567
3585
  */
@@ -3596,6 +3614,12 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
3596
3614
  * Calls OpenAI API to use a chat model.
3597
3615
  */
3598
3616
  async callChatModel(prompt) {
3617
+ return this.callChatModelStream(prompt, () => { });
3618
+ }
3619
+ /**
3620
+ * Calls OpenAI API to use a chat model with streaming.
3621
+ */
3622
+ async callChatModelStream(prompt, onProgress) {
3599
3623
  var _a, _b, _c;
3600
3624
  if (this.options.isVerbose) {
3601
3625
  console.info('💬 OpenAI callChatModel call', { prompt });
@@ -3663,21 +3687,24 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
3663
3687
  console.info('connect', stream.currentEvent);
3664
3688
  }
3665
3689
  });
3666
- /*
3667
- stream.on('messageDelta', (messageDelta) => {
3668
- if (
3669
- this.options.isVerbose &&
3670
- messageDelta &&
3671
- messageDelta.content &&
3672
- messageDelta.content[0] &&
3673
- messageDelta.content[0].type === 'text'
3674
- ) {
3675
- console.info('messageDelta', messageDelta.content[0].text?.value);
3690
+ stream.on('textDelta', (textDelta, snapshot) => {
3691
+ if (this.options.isVerbose && textDelta.value) {
3692
+ console.info('textDelta', textDelta.value);
3676
3693
  }
3677
-
3678
- // <- TODO: [🐚] Make streaming and running tasks working
3694
+ const chunk = {
3695
+ content: textDelta.value || '',
3696
+ modelName: 'assistant',
3697
+ timing: {
3698
+ start,
3699
+ complete: $getCurrentDate(),
3700
+ },
3701
+ usage: UNCERTAIN_USAGE,
3702
+ rawPromptContent,
3703
+ rawRequest,
3704
+ rawResponse: snapshot,
3705
+ };
3706
+ onProgress(chunk);
3679
3707
  });
3680
- */
3681
3708
  stream.on('messageCreated', (message) => {
3682
3709
  if (this.options.isVerbose) {
3683
3710
  console.info('messageCreated', message);
@@ -3713,7 +3740,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
3713
3740
  }
3714
3741
  return exportJson({
3715
3742
  name: 'promptResult',
3716
- message: `Result of \`OpenAiAssistantExecutionTools.callChatModel\``,
3743
+ message: `Result of \`OpenAiAssistantExecutionTools.callChatModelStream\``,
3717
3744
  order: [],
3718
3745
  value: {
3719
3746
  content: resultContent,
@@ -3732,15 +3759,19 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
3732
3759
  },
3733
3760
  });
3734
3761
  }
3735
- async playground() {
3762
+ /*
3763
+ public async playground() {
3736
3764
  const client = await this.getClient();
3765
+
3737
3766
  // List all assistants
3738
3767
  const assistants = await client.beta.assistants.list();
3739
3768
  console.log('!!! Assistants:', assistants);
3769
+
3740
3770
  // Get details of a specific assistant
3741
3771
  const assistantId = 'asst_MO8fhZf4dGloCfXSHeLcIik0';
3742
3772
  const assistant = await client.beta.assistants.retrieve(assistantId);
3743
3773
  console.log('!!! Assistant Details:', assistant);
3774
+
3744
3775
  // Update an assistant
3745
3776
  const updatedAssistant = await client.beta.assistants.update(assistantId, {
3746
3777
  name: assistant.name + '(M)',
@@ -3750,8 +3781,19 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
3750
3781
  },
3751
3782
  });
3752
3783
  console.log('!!! Updated Assistant:', updatedAssistant);
3784
+
3753
3785
  await forEver();
3754
3786
  }
3787
+ */
3788
+ /**
3789
+ * Get an existing assistant tool wrapper
3790
+ */
3791
+ getAssistant(assistantId) {
3792
+ return new OpenAiAssistantExecutionTools({
3793
+ ...this.options,
3794
+ assistantId,
3795
+ });
3796
+ }
3755
3797
  async createNewAssistant(options) {
3756
3798
  if (!this.isCreatingNewAssistantsAllowed) {
3757
3799
  throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
@@ -3837,9 +3879,98 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
3837
3879
  }
3838
3880
  const assistant = await client.beta.assistants.create(assistantConfig);
3839
3881
  console.log(`✅ Assistant created: ${assistant.id}`);
3840
- // TODO: !!!! Try listing existing assistants
3841
- // TODO: !!!! Try marking existing assistants by DISCRIMINANT
3842
- // TODO: !!!! Allow to update and reconnect to existing assistants
3882
+ // TODO: [🐱‍🚀] Try listing existing assistants
3883
+ // TODO: [🐱‍🚀] Try marking existing assistants by DISCRIMINANT
3884
+ // TODO: [🐱‍🚀] Allow to update and reconnect to existing assistants
3885
+ return new OpenAiAssistantExecutionTools({
3886
+ ...this.options,
3887
+ isCreatingNewAssistantsAllowed: false,
3888
+ assistantId: assistant.id,
3889
+ });
3890
+ }
3891
+ async updateAssistant(options) {
3892
+ if (!this.isCreatingNewAssistantsAllowed) {
3893
+ throw new NotAllowed(`Updating assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
3894
+ }
3895
+ const { assistantId, name, instructions, knowledgeSources } = options;
3896
+ const client = await this.getClient();
3897
+ let vectorStoreId;
3898
+ // If knowledge sources are provided, create a vector store with them
3899
+ // TODO: [🧠] Reuse vector store creation logic from createNewAssistant
3900
+ if (knowledgeSources && knowledgeSources.length > 0) {
3901
+ if (this.options.isVerbose) {
3902
+ console.info(`📚 Creating vector store for update with ${knowledgeSources.length} knowledge sources...`);
3903
+ }
3904
+ // Create a vector store
3905
+ const vectorStore = await client.beta.vectorStores.create({
3906
+ name: `${name} Knowledge Base`,
3907
+ });
3908
+ vectorStoreId = vectorStore.id;
3909
+ if (this.options.isVerbose) {
3910
+ console.info(`✅ Vector store created: ${vectorStoreId}`);
3911
+ }
3912
+ // Upload files from knowledge sources to the vector store
3913
+ const fileStreams = [];
3914
+ for (const source of knowledgeSources) {
3915
+ try {
3916
+ // Check if it's a URL
3917
+ if (source.startsWith('http://') || source.startsWith('https://')) {
3918
+ // Download the file
3919
+ const response = await fetch(source);
3920
+ if (!response.ok) {
3921
+ console.error(`Failed to download ${source}: ${response.statusText}`);
3922
+ continue;
3923
+ }
3924
+ const buffer = await response.arrayBuffer();
3925
+ const filename = source.split('/').pop() || 'downloaded-file';
3926
+ const blob = new Blob([buffer]);
3927
+ const file = new File([blob], filename);
3928
+ fileStreams.push(file);
3929
+ }
3930
+ else {
3931
+ // Assume it's a local file path
3932
+ // Note: This will work in Node.js environment
3933
+ // For browser environments, this would need different handling
3934
+ const fs = await import('fs');
3935
+ const fileStream = fs.createReadStream(source);
3936
+ fileStreams.push(fileStream);
3937
+ }
3938
+ }
3939
+ catch (error) {
3940
+ console.error(`Error processing knowledge source ${source}:`, error);
3941
+ }
3942
+ }
3943
+ // Batch upload files to the vector store
3944
+ if (fileStreams.length > 0) {
3945
+ try {
3946
+ await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
3947
+ files: fileStreams,
3948
+ });
3949
+ if (this.options.isVerbose) {
3950
+ console.info(`✅ Uploaded ${fileStreams.length} files to vector store`);
3951
+ }
3952
+ }
3953
+ catch (error) {
3954
+ console.error('Error uploading files to vector store:', error);
3955
+ }
3956
+ }
3957
+ }
3958
+ const assistantUpdate = {
3959
+ name,
3960
+ instructions,
3961
+ tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
3962
+ };
3963
+ if (vectorStoreId) {
3964
+ assistantUpdate.tool_resources = {
3965
+ file_search: {
3966
+ vector_store_ids: [vectorStoreId],
3967
+ },
3968
+ };
3969
+ }
3970
+ const assistant = await client.beta.assistants.update(assistantId, assistantUpdate);
3971
+ if (this.options.isVerbose) {
3972
+ console.log(`✅ Assistant updated: ${assistant.id}`);
3973
+ }
3843
3974
  return new OpenAiAssistantExecutionTools({
3844
3975
  ...this.options,
3845
3976
  isCreatingNewAssistantsAllowed: false,
@@ -3963,7 +4094,7 @@ class DatabaseError extends Error {
3963
4094
  }
3964
4095
  }
3965
4096
  /**
3966
- * TODO: !!!! Explain that NotFoundError (!!! and other specific errors) has priority over DatabaseError in some contexts
4097
+ * TODO: [🐱‍🚀] Explain that NotFoundError ([🐱‍🚀] and other specific errors) has priority over DatabaseError in some contexts
3967
4098
  */
3968
4099
 
3969
4100
  /**