@promptbook/openai 0.110.0-8 → 0.110.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +0 -4
  2. package/esm/index.es.js +266 -8
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -2
  6. package/esm/typings/src/_packages/types.index.d.ts +10 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
  8. package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
  9. package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
  11. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
  12. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
  13. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +42 -0
  14. package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +0 -2
  15. package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
  16. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
  17. package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
  18. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
  19. package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
  20. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +6 -0
  21. package/esm/typings/src/book-components/Chat/hooks/useChatRatings.d.ts +24 -2
  22. package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +2 -10
  23. package/esm/typings/src/book-components/Chat/utils/parseCitationMarker.d.ts +75 -0
  24. package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +3 -1
  25. package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.test.d.ts +1 -0
  26. package/esm/typings/src/book-components/icons/ArrowIcon.d.ts +17 -4
  27. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
  28. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
  29. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +39 -0
  30. package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
  31. package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
  32. package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
  33. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
  34. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
  35. package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
  36. package/esm/typings/src/version.d.ts +1 -1
  37. package/package.json +2 -2
  38. package/umd/index.umd.js +266 -8
  39. package/umd/index.umd.js.map +1 -1
package/README.md CHANGED
@@ -27,10 +27,6 @@ Turn your company's scattered knowledge into AI ready Books
27
27
 
28
28
 
29
29
 
30
- <blockquote style="color: #ff8811">
31
- <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
32
- </blockquote>
33
-
34
30
  ## 📦 Package `@promptbook/openai`
35
31
 
36
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -23,7 +23,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
23
23
  * @generated
24
24
  * @see https://github.com/webgptorg/promptbook
25
25
  */
26
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-8';
26
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0';
27
27
  /**
28
28
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
29
29
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2725,6 +2725,66 @@ const OPENAI_MODELS = exportJson({
2725
2725
  },
2726
2726
  /**/
2727
2727
  /**/
2728
+ {
2729
+ modelVariant: 'CHAT',
2730
+ modelTitle: 'gpt-5.2-codex',
2731
+ modelName: 'gpt-5.2-codex',
2732
+ modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
2733
+ pricing: {
2734
+ prompt: pricing(`$1.75 / 1M tokens`),
2735
+ output: pricing(`$14.00 / 1M tokens`),
2736
+ },
2737
+ },
2738
+ /**/
2739
+ /**/
2740
+ {
2741
+ modelVariant: 'CHAT',
2742
+ modelTitle: 'gpt-5.1-codex-max',
2743
+ modelName: 'gpt-5.1-codex-max',
2744
+ modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
2745
+ pricing: {
2746
+ prompt: pricing(`$1.25 / 1M tokens`),
2747
+ output: pricing(`$10.00 / 1M tokens`),
2748
+ },
2749
+ },
2750
+ /**/
2751
+ /**/
2752
+ {
2753
+ modelVariant: 'CHAT',
2754
+ modelTitle: 'gpt-5.1-codex',
2755
+ modelName: 'gpt-5.1-codex',
2756
+ modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
2757
+ pricing: {
2758
+ prompt: pricing(`$1.25 / 1M tokens`),
2759
+ output: pricing(`$10.00 / 1M tokens`),
2760
+ },
2761
+ },
2762
+ /**/
2763
+ /**/
2764
+ {
2765
+ modelVariant: 'CHAT',
2766
+ modelTitle: 'gpt-5.1-codex-mini',
2767
+ modelName: 'gpt-5.1-codex-mini',
2768
+ modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
2769
+ pricing: {
2770
+ prompt: pricing(`$0.25 / 1M tokens`),
2771
+ output: pricing(`$2.00 / 1M tokens`),
2772
+ },
2773
+ },
2774
+ /**/
2775
+ /**/
2776
+ {
2777
+ modelVariant: 'CHAT',
2778
+ modelTitle: 'gpt-5-codex',
2779
+ modelName: 'gpt-5-codex',
2780
+ modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
2781
+ pricing: {
2782
+ prompt: pricing(`$1.25 / 1M tokens`),
2783
+ output: pricing(`$10.00 / 1M tokens`),
2784
+ },
2785
+ },
2786
+ /**/
2787
+ /**/
2728
2788
  {
2729
2789
  modelVariant: 'CHAT',
2730
2790
  modelTitle: 'gpt-5-mini',
@@ -3476,6 +3536,32 @@ function isUnsupportedParameterError(error) {
3476
3536
  errorMessage.includes('does not support'));
3477
3537
  }
3478
3538
 
3539
+ /**
3540
+ * Provides access to the structured clone implementation when available.
3541
+ */
3542
+ function getStructuredCloneFunction() {
3543
+ return globalThis.structuredClone;
3544
+ }
3545
+ /**
3546
+ * Checks whether the prompt is a chat prompt that carries file attachments.
3547
+ */
3548
+ function hasChatPromptFiles(prompt) {
3549
+ return 'files' in prompt && Array.isArray(prompt.files);
3550
+ }
3551
+ /**
3552
+ * Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
3553
+ */
3554
+ function clonePromptPreservingFiles(prompt) {
3555
+ const structuredCloneFn = getStructuredCloneFunction();
3556
+ if (typeof structuredCloneFn === 'function') {
3557
+ return structuredCloneFn(prompt);
3558
+ }
3559
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
3560
+ if (hasChatPromptFiles(prompt)) {
3561
+ clonedPrompt.files = prompt.files;
3562
+ }
3563
+ return clonedPrompt;
3564
+ }
3479
3565
  /**
3480
3566
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
3481
3567
  *
@@ -3560,7 +3646,7 @@ class OpenAiCompatibleExecutionTools {
3560
3646
  */
3561
3647
  async callChatModelStream(prompt, onProgress) {
3562
3648
  // Deep clone prompt and modelRequirements to avoid mutation across calls
3563
- const clonedPrompt = JSON.parse(JSON.stringify(prompt));
3649
+ const clonedPrompt = clonePromptPreservingFiles(prompt);
3564
3650
  // Use local Set for retried parameters to ensure independence and thread safety
3565
3651
  const retriedUnsupportedParameters = new Set();
3566
3652
  return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
@@ -3587,7 +3673,10 @@ class OpenAiCompatibleExecutionTools {
3587
3673
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
3588
3674
  // <- Note: [🧆]
3589
3675
  }; // <- TODO: [💩] Guard here types better
3590
- if (format === 'JSON') {
3676
+ if (currentModelRequirements.responseFormat !== undefined) {
3677
+ modelSettings.response_format = currentModelRequirements.responseFormat;
3678
+ }
3679
+ else if (format === 'JSON') {
3591
3680
  modelSettings.response_format = {
3592
3681
  type: 'json_object',
3593
3682
  };
@@ -4432,6 +4521,89 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
4432
4521
  }
4433
4522
  }
4434
4523
 
4524
+ /**
4525
+ * @@@
4526
+ *
4527
+ * @private thing of inline knowledge
4528
+ */
4529
+ const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
4530
+ /**
4531
+ * @@@
4532
+ *
4533
+ * @private thing of inline knowledge
4534
+ */
4535
+ const INLINE_KNOWLEDGE_EXTENSION = '.txt';
4536
+ /**
4537
+ * @@@
4538
+ *
4539
+ * @private thing of inline knowledge
4540
+ */
4541
+ const DATA_URL_PREFIX = 'data:';
4542
+ /**
4543
+ * Checks whether the provided source string is a data URL that can be decoded.
4544
+ *
4545
+ * @private thing of inline knowledge
4546
+ */
4547
+ function isDataUrlKnowledgeSource(source) {
4548
+ return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
4549
+ }
4550
+ /**
4551
+ * Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
4552
+ *
4553
+ * @private thing of inline knowledge
4554
+ */
4555
+ function parseDataUrlKnowledgeSource(source) {
4556
+ if (!isDataUrlKnowledgeSource(source)) {
4557
+ return null;
4558
+ }
4559
+ const commaIndex = source.indexOf(',');
4560
+ if (commaIndex === -1) {
4561
+ return null;
4562
+ }
4563
+ const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
4564
+ const payload = source.slice(commaIndex + 1);
4565
+ const tokens = header.split(';');
4566
+ const mediaType = tokens[0] || 'text/plain';
4567
+ let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
4568
+ let isBase64 = false;
4569
+ for (let i = 1; i < tokens.length; i++) {
4570
+ const token = tokens[i];
4571
+ if (!token) {
4572
+ continue;
4573
+ }
4574
+ if (token.toLowerCase() === 'base64') {
4575
+ isBase64 = true;
4576
+ continue;
4577
+ }
4578
+ const [key, value] = token.split('=');
4579
+ if (key === 'name' && value !== undefined) {
4580
+ try {
4581
+ filename = decodeURIComponent(value);
4582
+ }
4583
+ catch (_a) {
4584
+ filename = value;
4585
+ }
4586
+ }
4587
+ }
4588
+ if (!isBase64) {
4589
+ return null;
4590
+ }
4591
+ try {
4592
+ const buffer = Buffer.from(payload, 'base64');
4593
+ return {
4594
+ buffer,
4595
+ filename,
4596
+ mimeType: mediaType,
4597
+ };
4598
+ }
4599
+ catch (_b) {
4600
+ return null;
4601
+ }
4602
+ }
4603
+ /**
4604
+ * Note: [💞] Ignore a discrepancy between file name and entity name
4605
+ */
4606
+
4435
4607
  const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
4436
4608
  const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
4437
4609
  const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
@@ -5068,7 +5240,9 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
5068
5240
  const processingStartedAtMs = Date.now();
5069
5241
  for (const [index, source] of knowledgeSources.entries()) {
5070
5242
  try {
5071
- const sourceType = source.startsWith('http') || source.startsWith('https') ? 'url' : 'file';
5243
+ const isDataUrl = isDataUrlKnowledgeSource(source);
5244
+ const isHttp = source.startsWith('http://') || source.startsWith('https://');
5245
+ const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
5072
5246
  if (this.options.isVerbose) {
5073
5247
  console.info('[🤰]', 'Processing knowledge source', {
5074
5248
  index: index + 1,
@@ -5078,8 +5252,27 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
5078
5252
  logLabel,
5079
5253
  });
5080
5254
  }
5081
- // Check if it's a URL
5082
- if (source.startsWith('http://') || source.startsWith('https://')) {
5255
+ if (isDataUrl) {
5256
+ const parsed = parseDataUrlKnowledgeSource(source);
5257
+ if (!parsed) {
5258
+ skippedSources.push({ source, reason: 'invalid_data_url' });
5259
+ if (this.options.isVerbose) {
5260
+ console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
5261
+ source,
5262
+ sourceType,
5263
+ logLabel,
5264
+ });
5265
+ }
5266
+ continue;
5267
+ }
5268
+ const dataUrlFile = new File([parsed.buffer], parsed.filename, {
5269
+ type: parsed.mimeType,
5270
+ });
5271
+ fileStreams.push(dataUrlFile);
5272
+ totalBytes += parsed.buffer.length;
5273
+ continue;
5274
+ }
5275
+ if (isHttp) {
5083
5276
  const downloadResult = await this.downloadKnowledgeSourceFile({
5084
5277
  source,
5085
5278
  timeoutMs: downloadTimeoutMs,
@@ -6146,6 +6339,64 @@ const createOpenAiExecutionTools = Object.assign((options) => {
6146
6339
  */
6147
6340
 
6148
6341
  const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
6342
+ const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
6343
+ /*
6344
+ TODO: Use or remove
6345
+ const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
6346
+ type: 'object',
6347
+ properties: {},
6348
+ required: [],
6349
+ additionalProperties: true,
6350
+ };
6351
+ */
6352
+ function buildJsonSchemaDefinition(jsonSchema) {
6353
+ var _a, _b, _c;
6354
+ const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
6355
+ return {
6356
+ type: 'json_schema',
6357
+ name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
6358
+ strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
6359
+ schema: {
6360
+ type: 'object',
6361
+ properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
6362
+ required: Array.isArray(schema.required) ? schema.required : [],
6363
+ additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
6364
+ description: schema.description,
6365
+ },
6366
+ };
6367
+ }
6368
+ /**
6369
+ * Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
6370
+ * structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
6371
+ *
6372
+ * @param responseFormat - The OpenAI `response_format` payload from the user request.
6373
+ * @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
6374
+ * @private utility of Open AI
6375
+ */
6376
+ function mapResponseFormatToAgentOutputType(responseFormat) {
6377
+ if (!responseFormat) {
6378
+ return undefined;
6379
+ }
6380
+ if (typeof responseFormat === 'string') {
6381
+ if (responseFormat === 'text') {
6382
+ return 'text';
6383
+ }
6384
+ if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
6385
+ return buildJsonSchemaDefinition();
6386
+ }
6387
+ return 'text';
6388
+ }
6389
+ switch (responseFormat.type) {
6390
+ case 'text':
6391
+ return 'text';
6392
+ case 'json_schema':
6393
+ return buildJsonSchemaDefinition(responseFormat.json_schema);
6394
+ case 'json_object':
6395
+ return buildJsonSchemaDefinition();
6396
+ default:
6397
+ return undefined;
6398
+ }
6399
+ }
6149
6400
  /**
6150
6401
  * Execution tools for OpenAI AgentKit (Agents SDK).
6151
6402
  *
@@ -6193,6 +6444,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
6193
6444
  ...parameters,
6194
6445
  modelName: this.agentKitModelName,
6195
6446
  });
6447
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
6196
6448
  const preparedAgentKitAgent = await this.prepareAgentKitAgent({
6197
6449
  name: (prompt.title || 'Agent'),
6198
6450
  instructions: modelRequirements.systemMessage || '',
@@ -6204,6 +6456,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
6204
6456
  prompt,
6205
6457
  rawPromptContent,
6206
6458
  onProgress,
6459
+ responseFormatOutputType,
6207
6460
  });
6208
6461
  }
6209
6462
  /**
@@ -6385,16 +6638,21 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
6385
6638
  ...prompt.parameters,
6386
6639
  modelName: this.agentKitModelName,
6387
6640
  });
6641
+ const agentForRun = options.responseFormatOutputType !== undefined
6642
+ ? openAiAgentKitAgent.clone({
6643
+ outputType: options.responseFormatOutputType,
6644
+ })
6645
+ : openAiAgentKitAgent;
6388
6646
  const start = $getCurrentDate();
6389
6647
  let latestContent = '';
6390
6648
  const toolCalls = [];
6391
6649
  const toolCallIndexById = new Map();
6392
6650
  const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
6393
6651
  const rawRequest = {
6394
- agentName: openAiAgentKitAgent.name,
6652
+ agentName: agentForRun.name,
6395
6653
  input: inputItems,
6396
6654
  };
6397
- const streamResult = await run(openAiAgentKitAgent, inputItems, {
6655
+ const streamResult = await run(agentForRun, inputItems, {
6398
6656
  stream: true,
6399
6657
  context: { parameters: prompt.parameters },
6400
6658
  });