@promptbook/ollama 0.100.0-3 → 0.100.0-31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +1 -0
  2. package/esm/index.es.js +154 -6
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +24 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +30 -0
  7. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
  8. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
  9. package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
  10. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
  11. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
  12. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/FrontendRAGService.d.ts +48 -0
  13. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +51 -0
  14. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/RAGService.d.ts +54 -0
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/BaseKnowledgeProcessor.d.ts +45 -0
  16. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/PdfProcessor.d.ts +31 -0
  17. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/ProcessorFactory.d.ts +23 -0
  18. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/TextProcessor.d.ts +18 -0
  19. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/types.d.ts +56 -0
  20. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/utils/ragHelper.d.ts +34 -0
  21. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
  22. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
  23. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
  24. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
  25. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
  26. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
  27. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
  28. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
  29. package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
  30. package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
  31. package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
  32. package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
  33. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
  34. package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
  35. package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
  36. package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
  37. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +61 -0
  38. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +35 -0
  39. package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
  40. package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
  41. package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
  42. package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
  43. package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
  44. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +26 -0
  45. package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
  46. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
  47. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +0 -5
  48. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  51. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
  52. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  53. package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
  54. package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
  55. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  56. package/esm/typings/src/version.d.ts +1 -1
  57. package/package.json +2 -2
  58. package/umd/index.umd.js +154 -6
  59. package/umd/index.umd.js.map +1 -1
@@ -0,0 +1,26 @@
1
+ import type { string_book } from '../../book-2.0/agent-source/string_book';
2
+ export interface BookEditorProps {
3
+ /**
4
+ * Additional CSS classes to apply to the editor container.
5
+ */
6
+ className?: string;
7
+ /**
8
+ * CSS className for a font (e.g. from next/font) to style the editor text.
9
+ * If omitted, defaults to system serif fonts.
10
+ */
11
+ fontClassName?: string;
12
+ /**
13
+ * The book which is being edited.
14
+ */
15
+ value?: string_book;
16
+ /**
17
+ * Callback function to handle changes in the book content.
18
+ */
19
+ onChange?: (value: string_book) => void;
20
+ }
21
+ /**
22
+ * Renders a book editor
23
+ *
24
+ * @public exported from `@promptbook/components`
25
+ */
26
+ export declare function BookEditor(props: BookEditorProps): import("react/jsx-runtime").JSX.Element;
@@ -40,6 +40,10 @@ export type AvailableModel = {
40
40
  readonly prompt: number_usd;
41
41
  readonly output: number_usd;
42
42
  };
43
+ /**
44
+ * If the model is deprecated, it should not be used for new tasks
45
+ */
46
+ readonly isDeprecated?: boolean;
43
47
  };
44
48
  /**
45
49
  * TODO: [🧠] Maybe rename to something else - like `ModelInformation` or `ModelMetadata`
@@ -1,10 +1,11 @@
1
- import type { ReadonlyDeep, WritableDeep } from 'type-fest';
1
+ import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
4
  import type { Parameters } from '../../types/typeAliases';
5
5
  import type { string_parameter_name } from '../../types/typeAliases';
6
6
  import type { TODO_string } from '../../utils/organization/TODO_string';
7
7
  import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
8
+ import type { PipelineExecutorResult } from '../PipelineExecutorResult';
8
9
  import type { CreatePipelineExecutorOptions } from './00-CreatePipelineExecutorOptions';
9
10
  /**
10
11
  * Options for executing attempts of a pipeline task, including configuration for jokers, priority,
@@ -46,6 +47,10 @@ export type ExecuteAttemptsOptions = Required<Omit<CreatePipelineExecutorOptions
46
47
  * The pipeline structure prepared for execution, as a deeply immutable PipelineJson object.
47
48
  */
48
49
  readonly preparedPipeline: ReadonlyDeep<PipelineJson>;
50
+ /**
51
+ * Callback invoked with partial results as the execution progresses.
52
+ */
53
+ onProgress(newOngoingResult: PartialDeep<PipelineExecutorResult>): Promisable<void>;
49
54
  /**
50
55
  * The execution report object, which is updated during execution.
51
56
  */
@@ -2,7 +2,6 @@ import Anthropic from '@anthropic-ai/sdk';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
5
  import type { Prompt } from '../../types/Prompt';
7
6
  import type { string_markdown } from '../../types/typeAliases';
8
7
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -42,10 +41,6 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
42
41
  * Calls Anthropic Claude API to use a chat model.
43
42
  */
44
43
  callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
45
- /**
46
- * Calls Anthropic Claude API to use a completion model.
47
- */
48
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
49
44
  /**
50
45
  * Get the model that should be used as default
51
46
  */
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Anthropic Claude models with pricing
5
5
  *
6
- * Note: Done at 2025-05-06
6
+ * Note: Synced with official API docs at 2025-08-20
7
7
  *
8
8
  * @see https://docs.anthropic.com/en/docs/models-overview
9
9
  * @public exported from `@promptbook/anthropic-claude`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Deepseek models with descriptions
5
5
  *
6
- * Note: Done at 2025-05-06
6
+ * Note: Synced with official API docs at 2025-08-20
7
7
  *
8
8
  * @see https://www.deepseek.com/models
9
9
  * @public exported from `@promptbook/deepseek`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Google models with descriptions
5
5
  *
6
- * Note: Done at 2025-05-06
6
+ * Note: Synced with official API docs at 2025-08-20
7
7
  *
8
8
  * @see https://ai.google.dev/models/gemini
9
9
  * @public exported from `@promptbook/google`
@@ -2,7 +2,7 @@ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  /**
3
3
  * List of available models in Ollama library
4
4
  *
5
- * Note: Done at 2025-05-19
5
+ * Note: Synced with official API docs at 2025-08-20
6
6
  *
7
7
  * @see https://ollama.com/library
8
8
  * @public exported from `@promptbook/ollama`
@@ -2,7 +2,7 @@ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  /**
3
3
  * List of available OpenAI models with pricing
4
4
  *
5
- * Note: Done at 2025-05-06
5
+ * Note: Synced with official API docs at 2025-08-20
6
6
  *
7
7
  * @see https://platform.openai.com/docs/models/
8
8
  * @see https://openai.com/api/pricing/
@@ -1,3 +1,4 @@
1
+ import type { string_book } from '../book-2.0/agent-source/string_book';
1
2
  import type { PipelineString } from './PipelineString';
2
3
  /**
3
4
  * Tag function for notating a pipeline with a book\`...\ notation as template literal
@@ -12,7 +13,7 @@ import type { PipelineString } from './PipelineString';
12
13
  * @returns the pipeline string
13
14
  * @public exported from `@promptbook/core`
14
15
  */
15
- export declare function book(strings: TemplateStringsArray, ...values: Array<string>): PipelineString;
16
+ export declare function book(strings: TemplateStringsArray, ...values: Array<string>): string_book & PipelineString;
16
17
  /**
17
18
  * TODO: [🧠][🈴] Where is the best location for this file
18
19
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -87,8 +87,6 @@ export type CommonModelRequirements = {
87
87
  readonly seed?: number_seed;
88
88
  /**
89
89
  * Maximum number of tokens that can be generated by the model
90
- *
91
- * Note: [🌾]
92
90
  */
93
91
  readonly maxTokens?: number;
94
92
  };
@@ -136,6 +136,12 @@ export type ReservedParameters = Record<string_reserved_parameter_name, string_p
136
136
  * For example `"Ai*nautes"`
137
137
  */
138
138
  export type string_title = string;
139
+ /**
140
+ * Semantic helper
141
+ *
142
+ * For example `"My AI Assistant"`
143
+ */
144
+ export type string_agent_name = string;
139
145
  /**
140
146
  * Unstructured description of the persona
141
147
  *
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.100.0-2`).
18
+ * It follows semantic versioning (e.g., `0.100.0-30`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/ollama",
3
- "version": "0.100.0-3",
3
+ "version": "0.100.0-31",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -94,7 +94,7 @@
94
94
  "module": "./esm/index.es.js",
95
95
  "typings": "./esm/typings/src/_packages/ollama.index.d.ts",
96
96
  "peerDependencies": {
97
- "@promptbook/core": "0.100.0-3"
97
+ "@promptbook/core": "0.100.0-31"
98
98
  },
99
99
  "dependencies": {
100
100
  "bottleneck": "^2.19.5",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-3';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-31';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1076,7 +1076,7 @@
1076
1076
  /**
1077
1077
  * List of available OpenAI models with pricing
1078
1078
  *
1079
- * Note: Done at 2025-05-06
1079
+ * Note: Synced with official API docs at 2025-08-20
1080
1080
  *
1081
1081
  * @see https://platform.openai.com/docs/models/
1082
1082
  * @see https://openai.com/api/pricing/
@@ -1085,6 +1085,138 @@
1085
1085
  const OPENAI_MODELS = exportJson({
1086
1086
  name: 'OPENAI_MODELS',
1087
1087
  value: [
1088
+ /**/
1089
+ {
1090
+ modelVariant: 'CHAT',
1091
+ modelTitle: 'gpt-5',
1092
+ modelName: 'gpt-5',
1093
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
1094
+ pricing: {
1095
+ prompt: pricing(`$1.25 / 1M tokens`),
1096
+ output: pricing(`$10.00 / 1M tokens`),
1097
+ },
1098
+ },
1099
+ /**/
1100
+ /**/
1101
+ {
1102
+ modelVariant: 'CHAT',
1103
+ modelTitle: 'gpt-5-mini',
1104
+ modelName: 'gpt-5-mini',
1105
+ modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
1106
+ pricing: {
1107
+ prompt: pricing(`$0.25 / 1M tokens`),
1108
+ output: pricing(`$2.00 / 1M tokens`),
1109
+ },
1110
+ },
1111
+ /**/
1112
+ /**/
1113
+ {
1114
+ modelVariant: 'CHAT',
1115
+ modelTitle: 'gpt-5-nano',
1116
+ modelName: 'gpt-5-nano',
1117
+ modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
1118
+ pricing: {
1119
+ prompt: pricing(`$0.05 / 1M tokens`),
1120
+ output: pricing(`$0.40 / 1M tokens`),
1121
+ },
1122
+ },
1123
+ /**/
1124
+ /**/
1125
+ {
1126
+ modelVariant: 'CHAT',
1127
+ modelTitle: 'gpt-4.1',
1128
+ modelName: 'gpt-4.1',
1129
+ modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
1130
+ pricing: {
1131
+ prompt: pricing(`$3.00 / 1M tokens`),
1132
+ output: pricing(`$12.00 / 1M tokens`),
1133
+ },
1134
+ },
1135
+ /**/
1136
+ /**/
1137
+ {
1138
+ modelVariant: 'CHAT',
1139
+ modelTitle: 'gpt-4.1-mini',
1140
+ modelName: 'gpt-4.1-mini',
1141
+ modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
1142
+ pricing: {
1143
+ prompt: pricing(`$0.80 / 1M tokens`),
1144
+ output: pricing(`$3.20 / 1M tokens`),
1145
+ },
1146
+ },
1147
+ /**/
1148
+ /**/
1149
+ {
1150
+ modelVariant: 'CHAT',
1151
+ modelTitle: 'gpt-4.1-nano',
1152
+ modelName: 'gpt-4.1-nano',
1153
+ modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
1154
+ pricing: {
1155
+ prompt: pricing(`$0.20 / 1M tokens`),
1156
+ output: pricing(`$0.80 / 1M tokens`),
1157
+ },
1158
+ },
1159
+ /**/
1160
+ /**/
1161
+ {
1162
+ modelVariant: 'CHAT',
1163
+ modelTitle: 'o3',
1164
+ modelName: 'o3',
1165
+ modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
1166
+ pricing: {
1167
+ prompt: pricing(`$15.00 / 1M tokens`),
1168
+ output: pricing(`$60.00 / 1M tokens`),
1169
+ },
1170
+ },
1171
+ /**/
1172
+ /**/
1173
+ {
1174
+ modelVariant: 'CHAT',
1175
+ modelTitle: 'o3-pro',
1176
+ modelName: 'o3-pro',
1177
+ modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
1178
+ pricing: {
1179
+ prompt: pricing(`$30.00 / 1M tokens`),
1180
+ output: pricing(`$120.00 / 1M tokens`),
1181
+ },
1182
+ },
1183
+ /**/
1184
+ /**/
1185
+ {
1186
+ modelVariant: 'CHAT',
1187
+ modelTitle: 'o4-mini',
1188
+ modelName: 'o4-mini',
1189
+ modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
1190
+ pricing: {
1191
+ prompt: pricing(`$4.00 / 1M tokens`),
1192
+ output: pricing(`$16.00 / 1M tokens`),
1193
+ },
1194
+ },
1195
+ /**/
1196
+ /**/
1197
+ {
1198
+ modelVariant: 'CHAT',
1199
+ modelTitle: 'o3-deep-research',
1200
+ modelName: 'o3-deep-research',
1201
+ modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
1202
+ pricing: {
1203
+ prompt: pricing(`$25.00 / 1M tokens`),
1204
+ output: pricing(`$100.00 / 1M tokens`),
1205
+ },
1206
+ },
1207
+ /**/
1208
+ /**/
1209
+ {
1210
+ modelVariant: 'CHAT',
1211
+ modelTitle: 'o4-mini-deep-research',
1212
+ modelName: 'o4-mini-deep-research',
1213
+ modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
1214
+ pricing: {
1215
+ prompt: pricing(`$12.00 / 1M tokens`),
1216
+ output: pricing(`$48.00 / 1M tokens`),
1217
+ },
1218
+ },
1219
+ /**/
1088
1220
  /*/
1089
1221
  {
1090
1222
  modelTitle: 'dall-e-3',
@@ -1844,7 +1976,6 @@
1844
1976
  const modelSettings = {
1845
1977
  model: modelName,
1846
1978
  max_tokens: modelRequirements.maxTokens,
1847
- // <- TODO: [🌾] Make some global max cap for maxTokens
1848
1979
  temperature: modelRequirements.temperature,
1849
1980
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1850
1981
  // <- Note: [🧆]
@@ -1940,8 +2071,7 @@
1940
2071
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1941
2072
  const modelSettings = {
1942
2073
  model: modelName,
1943
- max_tokens: modelRequirements.maxTokens || 2000,
1944
- // <- TODO: [🌾] Make some global max cap for maxTokens
2074
+ max_tokens: modelRequirements.maxTokens,
1945
2075
  temperature: modelRequirements.temperature,
1946
2076
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1947
2077
  // <- Note: [🧆]
@@ -2090,7 +2220,7 @@
2090
2220
  /**
2091
2221
  * List of available models in Ollama library
2092
2222
  *
2093
- * Note: Done at 2025-05-19
2223
+ * Note: Synced with official API docs at 2025-08-20
2094
2224
  *
2095
2225
  * @see https://ollama.com/library
2096
2226
  * @public exported from `@promptbook/ollama`
@@ -2098,6 +2228,24 @@
2098
2228
  const OLLAMA_MODELS = exportJson({
2099
2229
  name: 'OLLAMA_MODELS',
2100
2230
  value: [
2231
+ {
2232
+ modelVariant: 'CHAT',
2233
+ modelTitle: 'llama3.3',
2234
+ modelName: 'llama3.3',
2235
+ modelDescription: 'Meta Llama 3.3 (70B parameters) with 128K context window. Latest generation foundation model with significantly enhanced reasoning, instruction following, and multilingual capabilities. Features improved performance on complex tasks and better factual accuracy compared to Llama 3.1.',
2236
+ },
2237
+ {
2238
+ modelVariant: 'CHAT',
2239
+ modelTitle: 'llama3.2',
2240
+ modelName: 'llama3.2',
2241
+ modelDescription: 'Meta Llama 3.2 (1B-90B parameters) with 128K context window. Enhanced model with improved reasoning capabilities, better instruction following, and multimodal support in larger variants. Features significant performance improvements over Llama 3.1 across diverse tasks.',
2242
+ },
2243
+ {
2244
+ modelVariant: 'CHAT',
2245
+ modelTitle: 'llama3.1',
2246
+ modelName: 'llama3.1',
2247
+ modelDescription: 'Meta Llama 3.1 (8B-405B parameters) with 128K context window. Advanced foundation model with enhanced reasoning, improved multilingual capabilities, and better performance on complex tasks. Features significant improvements in code generation and mathematical reasoning.',
2248
+ },
2101
2249
  {
2102
2250
  modelVariant: 'CHAT',
2103
2251
  modelTitle: 'llama3',