@promptbook/cli 0.89.0 → 0.92.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/cli`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -46,7 +46,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
46
46
  * @generated
47
47
  * @see https://github.com/webgptorg/promptbook
48
48
  */
49
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0';
49
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-3';
50
50
  /**
51
51
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
52
52
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -14167,6 +14167,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14167
14167
  modelVariant: 'CHAT',
14168
14168
  modelTitle: 'Claude 3.5 Sonnet',
14169
14169
  modelName: 'claude-3-5-sonnet-20240620',
14170
+ modelDescription: 'Latest Claude model with great reasoning, coding, and language understanding capabilities. 200K context window. Optimized balance of intelligence and speed.',
14170
14171
  pricing: {
14171
14172
  prompt: computeUsage(`$3.00 / 1M tokens`),
14172
14173
  output: computeUsage(`$15.00 / 1M tokens`),
@@ -14176,6 +14177,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14176
14177
  modelVariant: 'CHAT',
14177
14178
  modelTitle: 'Claude 3 Opus',
14178
14179
  modelName: 'claude-3-opus-20240229',
14180
+ modelDescription: 'Most capable Claude model excelling at complex reasoning, coding, and detailed instruction following. 200K context window. Best for sophisticated tasks requiring nuanced understanding.',
14179
14181
  pricing: {
14180
14182
  prompt: computeUsage(`$15.00 / 1M tokens`),
14181
14183
  output: computeUsage(`$75.00 / 1M tokens`),
@@ -14185,6 +14187,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14185
14187
  modelVariant: 'CHAT',
14186
14188
  modelTitle: 'Claude 3 Sonnet',
14187
14189
  modelName: 'claude-3-sonnet-20240229',
14190
+ modelDescription: 'Strong general-purpose model with excellent performance across reasoning, conversation, and coding tasks. 200K context window. Good balance of intelligence and cost-efficiency.',
14188
14191
  pricing: {
14189
14192
  prompt: computeUsage(`$3.00 / 1M tokens`),
14190
14193
  output: computeUsage(`$15.00 / 1M tokens`),
@@ -14194,6 +14197,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14194
14197
  modelVariant: 'CHAT',
14195
14198
  modelTitle: 'Claude 3 Haiku',
14196
14199
  modelName: ' claude-3-haiku-20240307',
14200
+ modelDescription: 'Fastest and most compact Claude model optimized for responsiveness in interactive applications. 200K context window. Excellent for quick responses and lightweight applications.',
14197
14201
  pricing: {
14198
14202
  prompt: computeUsage(`$0.25 / 1M tokens`),
14199
14203
  output: computeUsage(`$1.25 / 1M tokens`),
@@ -14203,6 +14207,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14203
14207
  modelVariant: 'CHAT',
14204
14208
  modelTitle: 'Claude 2.1',
14205
14209
  modelName: 'claude-2.1',
14210
+ modelDescription: 'Improved version of Claude 2 with better performance across reasoning and truthfulness. 100K context window. Legacy model with strong reliability.',
14206
14211
  pricing: {
14207
14212
  prompt: computeUsage(`$8.00 / 1M tokens`),
14208
14213
  output: computeUsage(`$24.00 / 1M tokens`),
@@ -14212,6 +14217,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14212
14217
  modelVariant: 'CHAT',
14213
14218
  modelTitle: 'Claude 2',
14214
14219
  modelName: 'claude-2.0',
14220
+ modelDescription: 'Legacy model with strong general reasoning and language capabilities. 100K context window. Superseded by newer Claude 3 models.',
14215
14221
  pricing: {
14216
14222
  prompt: computeUsage(`$8.00 / 1M tokens`),
14217
14223
  output: computeUsage(`$24.00 / 1M tokens`),
@@ -14221,6 +14227,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14221
14227
  modelVariant: 'CHAT',
14222
14228
  modelTitle: ' Claude Instant 1.2',
14223
14229
  modelName: 'claude-instant-1.2',
14230
+ modelDescription: 'Older, faster Claude model optimized for high throughput applications. Lower cost but less capable than newer models. 100K context window.',
14224
14231
  pricing: {
14225
14232
  prompt: computeUsage(`$0.80 / 1M tokens`),
14226
14233
  output: computeUsage(`$2.40 / 1M tokens`),
@@ -14230,6 +14237,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14230
14237
  modelVariant: 'CHAT',
14231
14238
  modelTitle: 'Claude 3.7 Sonnet',
14232
14239
  modelName: 'claude-3-7-sonnet-20250219',
14240
+ modelDescription: 'Latest generation Claude model with advanced reasoning and language understanding. Enhanced capabilities over 3.5 with improved domain knowledge. 200K context window.',
14233
14241
  pricing: {
14234
14242
  prompt: computeUsage(`$3.00 / 1M tokens`),
14235
14243
  output: computeUsage(`$15.00 / 1M tokens`),
@@ -14239,6 +14247,7 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
14239
14247
  modelVariant: 'CHAT',
14240
14248
  modelTitle: 'Claude 3.5 Haiku',
14241
14249
  modelName: 'claude-3-5-haiku-20241022',
14250
+ modelDescription: 'Fast and efficient Claude 3.5 variant optimized for speed and cost-effectiveness. Great for interactive applications requiring quick responses. 200K context window.',
14242
14251
  pricing: {
14243
14252
  prompt: computeUsage(`$0.25 / 1M tokens`),
14244
14253
  output: computeUsage(`$1.25 / 1M tokens`),
@@ -14723,6 +14732,7 @@ const OPENAI_MODELS = exportJson({
14723
14732
  modelVariant: 'COMPLETION',
14724
14733
  modelTitle: 'davinci-002',
14725
14734
  modelName: 'davinci-002',
14735
+ modelDescription: 'Legacy completion model with strong performance on text generation tasks. Optimized for complex instructions and longer outputs.',
14726
14736
  pricing: {
14727
14737
  prompt: computeUsage(`$2.00 / 1M tokens`),
14728
14738
  output: computeUsage(`$2.00 / 1M tokens`), // <- not sure
@@ -14740,6 +14750,7 @@ const OPENAI_MODELS = exportJson({
14740
14750
  modelVariant: 'CHAT',
14741
14751
  modelTitle: 'gpt-3.5-turbo-16k',
14742
14752
  modelName: 'gpt-3.5-turbo-16k',
14753
+ modelDescription: 'GPT-3.5 Turbo with extended 16k token context length for handling longer conversations and documents.',
14743
14754
  pricing: {
14744
14755
  prompt: computeUsage(`$3.00 / 1M tokens`),
14745
14756
  output: computeUsage(`$4.00 / 1M tokens`),
@@ -14763,6 +14774,7 @@ const OPENAI_MODELS = exportJson({
14763
14774
  modelVariant: 'CHAT',
14764
14775
  modelTitle: 'gpt-4',
14765
14776
  modelName: 'gpt-4',
14777
+ modelDescription: 'GPT-4 is a powerful language model with enhanced reasoning, instruction-following capabilities, and 8K context window. Optimized for complex tasks requiring deep understanding.',
14766
14778
  pricing: {
14767
14779
  prompt: computeUsage(`$30.00 / 1M tokens`),
14768
14780
  output: computeUsage(`$60.00 / 1M tokens`),
@@ -14774,6 +14786,7 @@ const OPENAI_MODELS = exportJson({
14774
14786
  modelVariant: 'CHAT',
14775
14787
  modelTitle: 'gpt-4-32k',
14776
14788
  modelName: 'gpt-4-32k',
14789
+ modelDescription: 'Extended context version of GPT-4 with a 32K token window for processing very long inputs and generating comprehensive responses for complex tasks.',
14777
14790
  pricing: {
14778
14791
  prompt: computeUsage(`$60.00 / 1M tokens`),
14779
14792
  output: computeUsage(`$120.00 / 1M tokens`),
@@ -14796,6 +14809,7 @@ const OPENAI_MODELS = exportJson({
14796
14809
  modelVariant: 'CHAT',
14797
14810
  modelTitle: 'gpt-4-turbo-2024-04-09',
14798
14811
  modelName: 'gpt-4-turbo-2024-04-09',
14812
+ modelDescription: 'Latest stable GPT-4 Turbo model from April 2024 with enhanced reasoning and context handling capabilities. Offers 128K context window and improved performance.',
14799
14813
  pricing: {
14800
14814
  prompt: computeUsage(`$10.00 / 1M tokens`),
14801
14815
  output: computeUsage(`$30.00 / 1M tokens`),
@@ -14807,6 +14821,7 @@ const OPENAI_MODELS = exportJson({
14807
14821
  modelVariant: 'CHAT',
14808
14822
  modelTitle: 'gpt-3.5-turbo-1106',
14809
14823
  modelName: 'gpt-3.5-turbo-1106',
14824
+ modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and a 16K token context window.',
14810
14825
  pricing: {
14811
14826
  prompt: computeUsage(`$1.00 / 1M tokens`),
14812
14827
  output: computeUsage(`$2.00 / 1M tokens`),
@@ -14818,6 +14833,7 @@ const OPENAI_MODELS = exportJson({
14818
14833
  modelVariant: 'CHAT',
14819
14834
  modelTitle: 'gpt-4-turbo',
14820
14835
  modelName: 'gpt-4-turbo',
14836
+ modelDescription: 'More capable model than GPT-4 with improved instruction following, function calling and a 128K token context window for handling very large documents.',
14821
14837
  pricing: {
14822
14838
  prompt: computeUsage(`$10.00 / 1M tokens`),
14823
14839
  output: computeUsage(`$30.00 / 1M tokens`),
@@ -14829,6 +14845,7 @@ const OPENAI_MODELS = exportJson({
14829
14845
  modelVariant: 'COMPLETION',
14830
14846
  modelTitle: 'gpt-3.5-turbo-instruct-0914',
14831
14847
  modelName: 'gpt-3.5-turbo-instruct-0914',
14848
+ modelDescription: 'September 2023 version of GPT-3.5 Turbo optimized for completion-style instruction following with a 4K context window.',
14832
14849
  pricing: {
14833
14850
  prompt: computeUsage(`$1.50 / 1M tokens`),
14834
14851
  output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
@@ -14840,6 +14857,7 @@ const OPENAI_MODELS = exportJson({
14840
14857
  modelVariant: 'COMPLETION',
14841
14858
  modelTitle: 'gpt-3.5-turbo-instruct',
14842
14859
  modelName: 'gpt-3.5-turbo-instruct',
14860
+ modelDescription: 'Optimized version of GPT-3.5 for completion-style API with good instruction following and a 4K token context window.',
14843
14861
  pricing: {
14844
14862
  prompt: computeUsage(`$1.50 / 1M tokens`),
14845
14863
  output: computeUsage(`$2.00 / 1M tokens`),
@@ -14857,6 +14875,7 @@ const OPENAI_MODELS = exportJson({
14857
14875
  modelVariant: 'CHAT',
14858
14876
  modelTitle: 'gpt-3.5-turbo',
14859
14877
  modelName: 'gpt-3.5-turbo',
14878
+ modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance and instruction following capabilities. Default 4K context window with options for 16K.',
14860
14879
  pricing: {
14861
14880
  prompt: computeUsage(`$3.00 / 1M tokens`),
14862
14881
  output: computeUsage(`$6.00 / 1M tokens`), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
@@ -14868,6 +14887,7 @@ const OPENAI_MODELS = exportJson({
14868
14887
  modelVariant: 'CHAT',
14869
14888
  modelTitle: 'gpt-3.5-turbo-0301',
14870
14889
  modelName: 'gpt-3.5-turbo-0301',
14890
+ modelDescription: 'March 2023 version of GPT-3.5 Turbo with a 4K token context window. Legacy model maintained for backward compatibility.',
14871
14891
  pricing: {
14872
14892
  prompt: computeUsage(`$1.50 / 1M tokens`),
14873
14893
  output: computeUsage(`$2.00 / 1M tokens`),
@@ -14879,6 +14899,7 @@ const OPENAI_MODELS = exportJson({
14879
14899
  modelVariant: 'COMPLETION',
14880
14900
  modelTitle: 'babbage-002',
14881
14901
  modelName: 'babbage-002',
14902
+ modelDescription: 'Efficient legacy completion model with a good balance of performance and speed. Suitable for straightforward text generation tasks.',
14882
14903
  pricing: {
14883
14904
  prompt: computeUsage(`$0.40 / 1M tokens`),
14884
14905
  output: computeUsage(`$0.40 / 1M tokens`), // <- Not sure
@@ -14890,6 +14911,7 @@ const OPENAI_MODELS = exportJson({
14890
14911
  modelVariant: 'CHAT',
14891
14912
  modelTitle: 'gpt-4-1106-preview',
14892
14913
  modelName: 'gpt-4-1106-preview',
14914
+ modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and a 128K token context window.',
14893
14915
  pricing: {
14894
14916
  prompt: computeUsage(`$10.00 / 1M tokens`),
14895
14917
  output: computeUsage(`$30.00 / 1M tokens`),
@@ -14901,6 +14923,7 @@ const OPENAI_MODELS = exportJson({
14901
14923
  modelVariant: 'CHAT',
14902
14924
  modelTitle: 'gpt-4-0125-preview',
14903
14925
  modelName: 'gpt-4-0125-preview',
14926
+ modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and a 128K token context window.',
14904
14927
  pricing: {
14905
14928
  prompt: computeUsage(`$10.00 / 1M tokens`),
14906
14929
  output: computeUsage(`$30.00 / 1M tokens`),
@@ -14918,6 +14941,7 @@ const OPENAI_MODELS = exportJson({
14918
14941
  modelVariant: 'CHAT',
14919
14942
  modelTitle: 'gpt-3.5-turbo-0125',
14920
14943
  modelName: 'gpt-3.5-turbo-0125',
14944
+ modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and a 16K token context window.',
14921
14945
  pricing: {
14922
14946
  prompt: computeUsage(`$0.50 / 1M tokens`),
14923
14947
  output: computeUsage(`$1.50 / 1M tokens`),
@@ -14929,6 +14953,7 @@ const OPENAI_MODELS = exportJson({
14929
14953
  modelVariant: 'CHAT',
14930
14954
  modelTitle: 'gpt-4-turbo-preview',
14931
14955
  modelName: 'gpt-4-turbo-preview',
14956
+ modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model version. Features improved instruction following, 128K token context window and lower latency.',
14932
14957
  pricing: {
14933
14958
  prompt: computeUsage(`$10.00 / 1M tokens`),
14934
14959
  output: computeUsage(`$30.00 / 1M tokens`), // <- Not sure, just for gpt-4-turbo
@@ -14940,6 +14965,7 @@ const OPENAI_MODELS = exportJson({
14940
14965
  modelVariant: 'EMBEDDING',
14941
14966
  modelTitle: 'text-embedding-3-large',
14942
14967
  modelName: 'text-embedding-3-large',
14968
+ modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
14943
14969
  pricing: {
14944
14970
  prompt: computeUsage(`$0.13 / 1M tokens`),
14945
14971
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
@@ -14952,6 +14978,7 @@ const OPENAI_MODELS = exportJson({
14952
14978
  modelVariant: 'EMBEDDING',
14953
14979
  modelTitle: 'text-embedding-3-small',
14954
14980
  modelName: 'text-embedding-3-small',
14981
+ modelDescription: 'Cost-effective embedding model with good performance for simpler tasks like text similarity and retrieval. Good balance of quality and efficiency.',
14955
14982
  pricing: {
14956
14983
  prompt: computeUsage(`$0.02 / 1M tokens`),
14957
14984
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
@@ -14964,6 +14991,7 @@ const OPENAI_MODELS = exportJson({
14964
14991
  modelVariant: 'CHAT',
14965
14992
  modelTitle: 'gpt-3.5-turbo-0613',
14966
14993
  modelName: 'gpt-3.5-turbo-0613',
14994
+ modelDescription: 'June 2023 version of GPT-3.5 Turbo with function calling capabilities and a 4K token context window.',
14967
14995
  pricing: {
14968
14996
  prompt: computeUsage(`$1.50 / 1M tokens`),
14969
14997
  output: computeUsage(`$2.00 / 1M tokens`),
@@ -14975,6 +15003,7 @@ const OPENAI_MODELS = exportJson({
14975
15003
  modelVariant: 'EMBEDDING',
14976
15004
  modelTitle: 'text-embedding-ada-002',
14977
15005
  modelName: 'text-embedding-ada-002',
15006
+ modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval augmented generation use cases. Replaced by newer embedding-3 models.',
14978
15007
  pricing: {
14979
15008
  prompt: computeUsage(`$0.1 / 1M tokens`),
14980
15009
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
@@ -15005,6 +15034,7 @@ const OPENAI_MODELS = exportJson({
15005
15034
  modelVariant: 'CHAT',
15006
15035
  modelTitle: 'gpt-4o-2024-05-13',
15007
15036
  modelName: 'gpt-4o-2024-05-13',
15037
+ modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities, improved reasoning, and optimized for vision, audio and chat at lower latencies.',
15008
15038
  pricing: {
15009
15039
  prompt: computeUsage(`$5.00 / 1M tokens`),
15010
15040
  output: computeUsage(`$15.00 / 1M tokens`),
@@ -15016,6 +15046,7 @@ const OPENAI_MODELS = exportJson({
15016
15046
  modelVariant: 'CHAT',
15017
15047
  modelTitle: 'gpt-4o',
15018
15048
  modelName: 'gpt-4o',
15049
+ modelDescription: "OpenAI's most advanced multimodal model optimized for performance, speed, and cost. Capable of vision, reasoning, and high quality text generation.",
15019
15050
  pricing: {
15020
15051
  prompt: computeUsage(`$5.00 / 1M tokens`),
15021
15052
  output: computeUsage(`$15.00 / 1M tokens`),
@@ -15027,6 +15058,7 @@ const OPENAI_MODELS = exportJson({
15027
15058
  modelVariant: 'CHAT',
15028
15059
  modelTitle: 'gpt-4o-mini',
15029
15060
  modelName: 'gpt-4o-mini',
15061
+ modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
15030
15062
  pricing: {
15031
15063
  prompt: computeUsage(`$3.00 / 1M tokens`),
15032
15064
  output: computeUsage(`$9.00 / 1M tokens`),
@@ -15038,6 +15070,7 @@ const OPENAI_MODELS = exportJson({
15038
15070
  modelVariant: 'CHAT',
15039
15071
  modelTitle: 'o1-preview',
15040
15072
  modelName: 'o1-preview',
15073
+ modelDescription: 'Advanced reasoning model with exceptional performance on complex logical, mathematical, and analytical tasks. Built for deep reasoning and specialized professional tasks.',
15041
15074
  pricing: {
15042
15075
  prompt: computeUsage(`$15.00 / 1M tokens`),
15043
15076
  output: computeUsage(`$60.00 / 1M tokens`),
@@ -15049,6 +15082,7 @@ const OPENAI_MODELS = exportJson({
15049
15082
  modelVariant: 'CHAT',
15050
15083
  modelTitle: 'o1-preview-2024-09-12',
15051
15084
  modelName: 'o1-preview-2024-09-12',
15085
+ modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for complex tasks requiring precise analytical thinking.',
15052
15086
  // <- TODO: [💩] Some better system to organize theese date suffixes and versions
15053
15087
  pricing: {
15054
15088
  prompt: computeUsage(`$15.00 / 1M tokens`),
@@ -15061,6 +15095,7 @@ const OPENAI_MODELS = exportJson({
15061
15095
  modelVariant: 'CHAT',
15062
15096
  modelTitle: 'o1-mini',
15063
15097
  modelName: 'o1-mini',
15098
+ modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on reasoning tasks while maintaining efficiency for everyday analytical use.',
15064
15099
  pricing: {
15065
15100
  prompt: computeUsage(`$3.00 / 1M tokens`),
15066
15101
  output: computeUsage(`$12.00 / 1M tokens`),
@@ -15084,6 +15119,7 @@ const OPENAI_MODELS = exportJson({
15084
15119
  modelVariant: 'CHAT',
15085
15120
  modelTitle: 'o3-mini',
15086
15121
  modelName: 'o3-mini',
15122
+ modelDescription: 'Compact and efficient reasoning model specializing in problem-solving with a focus on research and analysis tasks.',
15087
15123
  pricing: {
15088
15124
  prompt: computeUsage(`$3.00 / 1M tokens`),
15089
15125
  output: computeUsage(`$12.00 / 1M tokens`),
@@ -15096,6 +15132,7 @@ const OPENAI_MODELS = exportJson({
15096
15132
  modelVariant: 'CHAT',
15097
15133
  modelTitle: 'o1-mini-2024-09-12',
15098
15134
  modelName: 'o1-mini-2024-09-12',
15135
+ modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency. Good for analytical tasks that don't require the full O1 model.",
15099
15136
  pricing: {
15100
15137
  prompt: computeUsage(`$3.00 / 1M tokens`),
15101
15138
  output: computeUsage(`$12.00 / 1M tokens`),
@@ -15107,6 +15144,7 @@ const OPENAI_MODELS = exportJson({
15107
15144
  modelVariant: 'CHAT',
15108
15145
  modelTitle: 'gpt-3.5-turbo-16k-0613',
15109
15146
  modelName: 'gpt-3.5-turbo-16k-0613',
15147
+ modelDescription: 'June 2023 version of GPT-3.5 Turbo with extended 16k token context window for processing longer conversations and documents.',
15110
15148
  pricing: {
15111
15149
  prompt: computeUsage(`$3.00 / 1M tokens`),
15112
15150
  output: computeUsage(`$4.00 / 1M tokens`),