@promptbook/azure-openai 0.100.0-4 → 0.100.0-41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +1 -0
  2. package/esm/index.es.js +135 -5
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +14 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +26 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +34 -0
  7. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
  8. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
  9. package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
  10. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
  11. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
  12. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/FrontendRAGService.d.ts +48 -0
  13. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +51 -0
  14. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/RAGService.d.ts +54 -0
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/BaseKnowledgeProcessor.d.ts +45 -0
  16. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/PdfProcessor.d.ts +31 -0
  17. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/ProcessorFactory.d.ts +23 -0
  18. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/TextProcessor.d.ts +18 -0
  19. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/types.d.ts +56 -0
  20. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/utils/ragHelper.d.ts +34 -0
  21. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
  22. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
  23. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
  24. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
  25. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
  26. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
  27. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
  28. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
  29. package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
  30. package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
  31. package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
  32. package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
  33. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
  34. package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
  35. package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
  36. package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
  37. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +61 -0
  38. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +35 -0
  39. package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
  40. package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
  41. package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
  42. package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
  43. package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
  44. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +35 -0
  45. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChipFromSource.d.ts +21 -0
  46. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/index.d.ts +2 -0
  47. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +35 -0
  48. package/esm/typings/src/book-components/BookEditor/config.d.ts +10 -0
  49. package/esm/typings/src/book-components/BookEditor/injectCssModuleIntoShadowRoot.d.ts +11 -0
  50. package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +7 -0
  51. package/esm/typings/src/book-components/_common/react-utils/collectCssTextsForClass.d.ts +7 -0
  52. package/esm/typings/src/book-components/_common/react-utils/escapeHtml.d.ts +6 -0
  53. package/esm/typings/src/book-components/_common/react-utils/escapeRegex.d.ts +6 -0
  54. package/esm/typings/src/config.d.ts +6 -0
  55. package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
  56. package/esm/typings/src/execution/ExecutionTask.d.ts +27 -0
  57. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
  58. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +0 -5
  59. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  60. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  61. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  62. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
  63. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  64. package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
  65. package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
  66. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  67. package/esm/typings/src/version.d.ts +1 -1
  68. package/package.json +2 -2
  69. package/umd/index.umd.js +135 -5
  70. package/umd/index.umd.js.map +1 -1
@@ -0,0 +1,35 @@
1
+ import type { AgentBasicInformation } from '../../../book-2.0/agent-source/parseAgentSource';
2
+ import type { string_css_class } from '../../../types/typeAliases';
3
+ /**
4
+ * Props of `AvatarChip`
5
+ *
6
+ * @public exported from `@promptbook/components`
7
+ */
8
+ export type AvatarChipProps = {
9
+ /**
10
+ * Avatar to be shown
11
+ */
12
+ readonly avatarBasicInformation: AgentBasicInformation;
13
+ /**
14
+ * Whether this chip is a template avatar
15
+ */
16
+ readonly isTemplate: boolean;
17
+ /**
18
+ * Optional CSS class name which will be added to root <div> element
19
+ */
20
+ readonly className?: string_css_class;
21
+ /**
22
+ * Called when chip is clicked
23
+ */
24
+ readonly onSelect?: (avatar: AgentBasicInformation) => void;
25
+ /**
26
+ * Whether this chip is selected
27
+ */
28
+ readonly isSelected?: boolean;
29
+ };
30
+ /**
31
+ * Shows a chip with avatar's avatar and name
32
+ *
33
+ * @public exported from `@promptbook/components`
34
+ */
35
+ export declare function AvatarChip(props: AvatarChipProps): import("react/jsx-runtime").JSX.Element;
@@ -0,0 +1,21 @@
1
+ import type { string_book } from '../../../book-2.0/agent-source/string_book';
2
+ import type { AvatarChipProps } from './AvatarChip';
3
+ /**
4
+ * Props of `AvatarChipFromSource`
5
+ *
6
+ * @public exported from `@promptbook/components`
7
+ */
8
+ export type AvatarChipFromSource = Omit<AvatarChipProps, 'avatarBasicInformation'> & {
9
+ /**
10
+ * Avatar to be shown
11
+ */
12
+ readonly source: string_book;
13
+ };
14
+ /**
15
+ * Shows a chip with avatar's avatar and name based on the avatar source string
16
+ *
17
+ * This component is wrapped around the `<AvatarChip/>`, it just parses the avatar source string into `AvatarBasicInformation` and passes it to the `<AvatarChip/>` component.
18
+ *
19
+ * @public exported from `@promptbook/components`
20
+ */
21
+ export declare function AvatarChipFromSource(props: AvatarChipFromSource): import("react/jsx-runtime").JSX.Element;
@@ -0,0 +1,2 @@
1
+ export * from './AvatarChip';
2
+ export * from './AvatarChipFromSource';
@@ -0,0 +1,35 @@
1
+ import type { string_book } from '../../book-2.0/agent-source/string_book';
2
+ /**
3
+ * Props of `BookEditor`
4
+ *
5
+ * @public exported from `@promptbook/components`
6
+ */
7
+ export type BookEditorProps = {
8
+ /**
9
+ * Additional CSS classes to apply to the editor container.
10
+ */
11
+ readonly className?: string;
12
+ /**
13
+ * CSS className for a font (e.g. from next/font) to style the editor text.
14
+ * If omitted, defaults to system serif fonts.
15
+ */
16
+ readonly fontClassName?: string;
17
+ /**
18
+ * The book which is being edited.
19
+ */
20
+ readonly value?: string_book;
21
+ /**
22
+ * Callback function to handle changes in the book content.
23
+ */
24
+ onChange?(value: string_book): void;
25
+ /**
26
+ * If true, logs verbose debug info to the console and shows additional visual cues
27
+ */
28
+ readonly isVerbose?: boolean;
29
+ };
30
+ /**
31
+ * Renders a book editor
32
+ *
33
+ * @public exported from `@promptbook/components`
34
+ */
35
+ export declare function BookEditor(props: BookEditorProps): import("react/jsx-runtime").JSX.Element;
@@ -0,0 +1,10 @@
1
+ /**
2
+ * Default font class name for the BookEditor component
3
+ * In Next.js environments, you can override this by importing the font directly
4
+ *
5
+ * @public exported from `@promptbook/components`
6
+ */
7
+ export declare const DEFAULT_BOOK_FONT_CLASS: any;
8
+ /**
9
+ * Note: [💞] Ignore a discrepancy between file name and entity name
10
+ */
@@ -0,0 +1,11 @@
1
+ /**
2
+ * Inject the CSS module rules (derived from imported `styles`) into the provided shadow root.
3
+ * This allows CSS modules (which are normally emitted into the document head) to be
4
+ * available inside the component's shadow DOM.
5
+ *
6
+ * @private within the promptbook components <- TODO: Maybe make promptbook util from this
7
+ */
8
+ export declare function injectCssModuleIntoShadowRoot(shadowRoot: ShadowRoot): void;
9
+ /**
10
+ * TODO: Make some utility functions for working with CSS modules in shadow DOM independent of `BookEditor.module.css`
11
+ */
@@ -0,0 +1,7 @@
1
+ import type { string_css_class } from '../../../types/typeAliases';
2
+ /**
3
+ * Utility function for joining multiple truthy class names into one string
4
+ *
5
+ * @private within the `@promptbook/components`
6
+ */
7
+ export declare function classNames(...classes: Array<string_css_class | undefined | false | null>): string_css_class;
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Collect matching CSS texts from document stylesheets for a given class.
3
+ * This will skip cross-origin stylesheets (they throw when accessed).
4
+ *
5
+ * @private within the promptbook components <- TODO: Maybe make promptbook util from this
6
+ */
7
+ export declare function collectCssTextsForClass(className: string): string[];
@@ -0,0 +1,6 @@
1
+ /**
2
+ * Escape HTML to safely render user text inside a <pre> with dangerouslySetInnerHTML.
3
+ *
4
+ * @private within the promptbook components <- TODO: Maybe make promptbook util from this
5
+ */
6
+ export declare function escapeHtml(input: string): string;
@@ -0,0 +1,6 @@
1
+ /**
2
+ * Escape text for safe use inside a RegExp pattern.
3
+ *
4
+ * @private within the promptbook components <- TODO: Maybe make promptbook util from this
5
+ */
6
+ export declare function escapeRegex(input: string): string;
@@ -281,6 +281,12 @@ export declare function SET_IS_VERBOSE(isVerbose: boolean): void;
281
281
  * @public exported from `@promptbook/core`
282
282
  */
283
283
  export declare const DEFAULT_IS_AUTO_INSTALLED = false;
284
+ /**
285
+ * Default simulated duration for a task in milliseconds (used for progress reporting)
286
+ *
287
+ * @public exported from `@promptbook/core`
288
+ */
289
+ export declare const DEFAULT_TASK_SIMULATED_DURATION_MS: number;
284
290
  /**
285
291
  * Function name for generated function via `ptbk make` to get the pipeline collection
286
292
  *
@@ -40,6 +40,10 @@ export type AvailableModel = {
40
40
  readonly prompt: number_usd;
41
41
  readonly output: number_usd;
42
42
  };
43
+ /**
44
+ * If the model is deprecated, it should not be used for new tasks
45
+ */
46
+ readonly isDeprecated?: boolean;
43
47
  };
44
48
  /**
45
49
  * TODO: [🧠] Maybe rename to something else - like `ModelInformation` or `ModelMetadata`
@@ -1,5 +1,6 @@
1
1
  import type { Observable } from 'rxjs';
2
2
  import { PartialDeep } from 'type-fest';
3
+ import type { number_percent } from '../types/typeAliases';
3
4
  import type { task_id } from '../types/typeAliases';
4
5
  import type { string_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
5
6
  import type { string_promptbook_version } from '../version';
@@ -28,6 +29,19 @@ type CreateTaskOptions<TTaskResult extends AbstractTaskResult> = {
28
29
  */
29
30
  readonly title?: AbstractTask<TTaskResult>['title'];
30
31
  }) => void): Promise<TTaskResult>;
32
+ /**
33
+ * Optional callback to provide custom tldr information
34
+ * @param createdAt When the task was created
35
+ * @param status Current task status
36
+ * @param currentValue Current partial result
37
+ * @param errors Current errors
38
+ * @param warnings Current warnings
39
+ * @returns Custom tldr information
40
+ */
41
+ tldrProvider?(createdAt: Date, status: task_status, currentValue: PartialDeep<TTaskResult>, errors: Array<Error>, warnings: Array<Error>): {
42
+ readonly percent: number_percent;
43
+ readonly message: string;
44
+ };
31
45
  };
32
46
  /**
33
47
  * Helper to create a new task
@@ -80,6 +94,19 @@ export type AbstractTask<TTaskResult extends AbstractTaskResult> = {
80
94
  * Status of the task
81
95
  */
82
96
  readonly status: task_status;
97
+ /**
98
+ * Short summary of the task status for quick overview in the UI
99
+ */
100
+ readonly tldr: {
101
+ /**
102
+ * Progress in percentage from 0 to 1 (100%) that can be used to display a progress bar
103
+ */
104
+ readonly percent: number_percent;
105
+ /**
106
+ * Short summary message of the task status that can be displayed in the UI
107
+ */
108
+ readonly message: string;
109
+ };
83
110
  /**
84
111
  * Date when the task was created
85
112
  */
@@ -1,10 +1,11 @@
1
- import type { ReadonlyDeep, WritableDeep } from 'type-fest';
1
+ import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
4
  import type { Parameters } from '../../types/typeAliases';
5
5
  import type { string_parameter_name } from '../../types/typeAliases';
6
6
  import type { TODO_string } from '../../utils/organization/TODO_string';
7
7
  import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
8
+ import type { PipelineExecutorResult } from '../PipelineExecutorResult';
8
9
  import type { CreatePipelineExecutorOptions } from './00-CreatePipelineExecutorOptions';
9
10
  /**
10
11
  * Options for executing attempts of a pipeline task, including configuration for jokers, priority,
@@ -46,6 +47,10 @@ export type ExecuteAttemptsOptions = Required<Omit<CreatePipelineExecutorOptions
46
47
  * The pipeline structure prepared for execution, as a deeply immutable PipelineJson object.
47
48
  */
48
49
  readonly preparedPipeline: ReadonlyDeep<PipelineJson>;
50
+ /**
51
+ * Callback invoked with partial results as the execution progresses.
52
+ */
53
+ onProgress(newOngoingResult: PartialDeep<PipelineExecutorResult>): Promisable<void>;
49
54
  /**
50
55
  * The execution report object, which is updated during execution.
51
56
  */
@@ -2,7 +2,6 @@ import Anthropic from '@anthropic-ai/sdk';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
5
  import type { Prompt } from '../../types/Prompt';
7
6
  import type { string_markdown } from '../../types/typeAliases';
8
7
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -42,10 +41,6 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
42
41
  * Calls Anthropic Claude API to use a chat model.
43
42
  */
44
43
  callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
45
- /**
46
- * Calls Anthropic Claude API to use a completion model.
47
- */
48
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
49
44
  /**
50
45
  * Get the model that should be used as default
51
46
  */
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Anthropic Claude models with pricing
5
5
  *
6
- * Note: Done at 2025-05-06
6
+ * Note: Synced with official API docs at 2025-08-20
7
7
  *
8
8
  * @see https://docs.anthropic.com/en/docs/models-overview
9
9
  * @public exported from `@promptbook/anthropic-claude`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Deepseek models with descriptions
5
5
  *
6
- * Note: Done at 2025-05-06
6
+ * Note: Synced with official API docs at 2025-08-20
7
7
  *
8
8
  * @see https://www.deepseek.com/models
9
9
  * @public exported from `@promptbook/deepseek`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Google models with descriptions
5
5
  *
6
- * Note: Done at 2025-05-06
6
+ * Note: Synced with official API docs at 2025-08-20
7
7
  *
8
8
  * @see https://ai.google.dev/models/gemini
9
9
  * @public exported from `@promptbook/google`
@@ -2,7 +2,7 @@ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  /**
3
3
  * List of available models in Ollama library
4
4
  *
5
- * Note: Done at 2025-05-19
5
+ * Note: Synced with official API docs at 2025-08-20
6
6
  *
7
7
  * @see https://ollama.com/library
8
8
  * @public exported from `@promptbook/ollama`
@@ -2,7 +2,7 @@ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  /**
3
3
  * List of available OpenAI models with pricing
4
4
  *
5
- * Note: Done at 2025-05-06
5
+ * Note: Synced with official API docs at 2025-08-20
6
6
  *
7
7
  * @see https://platform.openai.com/docs/models/
8
8
  * @see https://openai.com/api/pricing/
@@ -1,3 +1,4 @@
1
+ import type { string_book } from '../book-2.0/agent-source/string_book';
1
2
  import type { PipelineString } from './PipelineString';
2
3
  /**
3
4
  * Tag function for notating a pipeline with a book\`...\ notation as template literal
@@ -12,7 +13,7 @@ import type { PipelineString } from './PipelineString';
12
13
  * @returns the pipeline string
13
14
  * @public exported from `@promptbook/core`
14
15
  */
15
- export declare function book(strings: TemplateStringsArray, ...values: Array<string>): PipelineString;
16
+ export declare function book(strings: TemplateStringsArray, ...values: Array<string>): string_book & PipelineString;
16
17
  /**
17
18
  * TODO: [🧠][🈴] Where is the best location for this file
18
19
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -87,8 +87,6 @@ export type CommonModelRequirements = {
87
87
  readonly seed?: number_seed;
88
88
  /**
89
89
  * Maximum number of tokens that can be generated by the model
90
- *
91
- * Note: [🌾]
92
90
  */
93
91
  readonly maxTokens?: number;
94
92
  };
@@ -136,6 +136,12 @@ export type ReservedParameters = Record<string_reserved_parameter_name, string_p
136
136
  * For example `"Ai*nautes"`
137
137
  */
138
138
  export type string_title = string;
139
+ /**
140
+ * Semantic helper
141
+ *
142
+ * For example `"My AI Assistant"`
143
+ */
144
+ export type string_agent_name = string;
139
145
  /**
140
146
  * Unstructured description of the persona
141
147
  *
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.100.0-3`).
18
+ * It follows semantic versioning (e.g., `0.100.0-40`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/azure-openai",
3
- "version": "0.100.0-4",
3
+ "version": "0.100.0-41",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -105,7 +105,7 @@
105
105
  "module": "./esm/index.es.js",
106
106
  "typings": "./esm/typings/src/_packages/azure-openai.index.d.ts",
107
107
  "peerDependencies": {
108
- "@promptbook/core": "0.100.0-4"
108
+ "@promptbook/core": "0.100.0-41"
109
109
  },
110
110
  "dependencies": {
111
111
  "@azure/openai": "1.0.0-beta.12",
package/umd/index.umd.js CHANGED
@@ -24,7 +24,7 @@
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-4';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-41';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1212,7 +1212,7 @@
1212
1212
  /**
1213
1213
  * List of available OpenAI models with pricing
1214
1214
  *
1215
- * Note: Done at 2025-05-06
1215
+ * Note: Synced with official API docs at 2025-08-20
1216
1216
  *
1217
1217
  * @see https://platform.openai.com/docs/models/
1218
1218
  * @see https://openai.com/api/pricing/
@@ -1221,6 +1221,138 @@
1221
1221
  const OPENAI_MODELS = exportJson({
1222
1222
  name: 'OPENAI_MODELS',
1223
1223
  value: [
1224
+ /**/
1225
+ {
1226
+ modelVariant: 'CHAT',
1227
+ modelTitle: 'gpt-5',
1228
+ modelName: 'gpt-5',
1229
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
1230
+ pricing: {
1231
+ prompt: pricing(`$1.25 / 1M tokens`),
1232
+ output: pricing(`$10.00 / 1M tokens`),
1233
+ },
1234
+ },
1235
+ /**/
1236
+ /**/
1237
+ {
1238
+ modelVariant: 'CHAT',
1239
+ modelTitle: 'gpt-5-mini',
1240
+ modelName: 'gpt-5-mini',
1241
+ modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
1242
+ pricing: {
1243
+ prompt: pricing(`$0.25 / 1M tokens`),
1244
+ output: pricing(`$2.00 / 1M tokens`),
1245
+ },
1246
+ },
1247
+ /**/
1248
+ /**/
1249
+ {
1250
+ modelVariant: 'CHAT',
1251
+ modelTitle: 'gpt-5-nano',
1252
+ modelName: 'gpt-5-nano',
1253
+ modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
1254
+ pricing: {
1255
+ prompt: pricing(`$0.05 / 1M tokens`),
1256
+ output: pricing(`$0.40 / 1M tokens`),
1257
+ },
1258
+ },
1259
+ /**/
1260
+ /**/
1261
+ {
1262
+ modelVariant: 'CHAT',
1263
+ modelTitle: 'gpt-4.1',
1264
+ modelName: 'gpt-4.1',
1265
+ modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
1266
+ pricing: {
1267
+ prompt: pricing(`$3.00 / 1M tokens`),
1268
+ output: pricing(`$12.00 / 1M tokens`),
1269
+ },
1270
+ },
1271
+ /**/
1272
+ /**/
1273
+ {
1274
+ modelVariant: 'CHAT',
1275
+ modelTitle: 'gpt-4.1-mini',
1276
+ modelName: 'gpt-4.1-mini',
1277
+ modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
1278
+ pricing: {
1279
+ prompt: pricing(`$0.80 / 1M tokens`),
1280
+ output: pricing(`$3.20 / 1M tokens`),
1281
+ },
1282
+ },
1283
+ /**/
1284
+ /**/
1285
+ {
1286
+ modelVariant: 'CHAT',
1287
+ modelTitle: 'gpt-4.1-nano',
1288
+ modelName: 'gpt-4.1-nano',
1289
+ modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
1290
+ pricing: {
1291
+ prompt: pricing(`$0.20 / 1M tokens`),
1292
+ output: pricing(`$0.80 / 1M tokens`),
1293
+ },
1294
+ },
1295
+ /**/
1296
+ /**/
1297
+ {
1298
+ modelVariant: 'CHAT',
1299
+ modelTitle: 'o3',
1300
+ modelName: 'o3',
1301
+ modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
1302
+ pricing: {
1303
+ prompt: pricing(`$15.00 / 1M tokens`),
1304
+ output: pricing(`$60.00 / 1M tokens`),
1305
+ },
1306
+ },
1307
+ /**/
1308
+ /**/
1309
+ {
1310
+ modelVariant: 'CHAT',
1311
+ modelTitle: 'o3-pro',
1312
+ modelName: 'o3-pro',
1313
+ modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
1314
+ pricing: {
1315
+ prompt: pricing(`$30.00 / 1M tokens`),
1316
+ output: pricing(`$120.00 / 1M tokens`),
1317
+ },
1318
+ },
1319
+ /**/
1320
+ /**/
1321
+ {
1322
+ modelVariant: 'CHAT',
1323
+ modelTitle: 'o4-mini',
1324
+ modelName: 'o4-mini',
1325
+ modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
1326
+ pricing: {
1327
+ prompt: pricing(`$4.00 / 1M tokens`),
1328
+ output: pricing(`$16.00 / 1M tokens`),
1329
+ },
1330
+ },
1331
+ /**/
1332
+ /**/
1333
+ {
1334
+ modelVariant: 'CHAT',
1335
+ modelTitle: 'o3-deep-research',
1336
+ modelName: 'o3-deep-research',
1337
+ modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
1338
+ pricing: {
1339
+ prompt: pricing(`$25.00 / 1M tokens`),
1340
+ output: pricing(`$100.00 / 1M tokens`),
1341
+ },
1342
+ },
1343
+ /**/
1344
+ /**/
1345
+ {
1346
+ modelVariant: 'CHAT',
1347
+ modelTitle: 'o4-mini-deep-research',
1348
+ modelName: 'o4-mini-deep-research',
1349
+ modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
1350
+ pricing: {
1351
+ prompt: pricing(`$12.00 / 1M tokens`),
1352
+ output: pricing(`$48.00 / 1M tokens`),
1353
+ },
1354
+ },
1355
+ /**/
1224
1356
  /*/
1225
1357
  {
1226
1358
  modelTitle: 'dall-e-3',
@@ -1741,7 +1873,6 @@
1741
1873
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1742
1874
  const modelSettings = {
1743
1875
  maxTokens: modelRequirements.maxTokens,
1744
- // <- TODO: [🌾] Make some global max cap for maxTokens
1745
1876
  temperature: modelRequirements.temperature,
1746
1877
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
1747
1878
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -1847,8 +1978,7 @@
1847
1978
  try {
1848
1979
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1849
1980
  const modelSettings = {
1850
- maxTokens: modelRequirements.maxTokens || 2000,
1851
- // <- TODO: [🌾] Make some global max cap for maxTokens
1981
+ maxTokens: modelRequirements.maxTokens,
1852
1982
  temperature: modelRequirements.temperature,
1853
1983
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
1854
1984
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools