modelfusion 0.30.1 → 0.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +14 -20
  2. package/model-function/Model.d.ts +1 -1
  3. package/model-function/SuccessfulModelCall.cjs +2 -9
  4. package/model-function/SuccessfulModelCall.d.ts +10 -7
  5. package/model-function/SuccessfulModelCall.js +2 -9
  6. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +2 -2
  7. package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +5 -9
  8. package/model-function/generate-structure/StructureFromTextGenerationModel.js +2 -2
  9. package/model-function/generate-structure/StructureGenerationModel.d.ts +3 -2
  10. package/model-function/generate-structure/StructureOrTextGenerationModel.d.ts +4 -5
  11. package/model-function/generate-structure/generateStructure.cjs +5 -2
  12. package/model-function/generate-structure/generateStructure.d.ts +1 -1
  13. package/model-function/generate-structure/generateStructure.js +5 -2
  14. package/model-function/generate-structure/generateStructureOrText.cjs +8 -5
  15. package/model-function/generate-structure/generateStructureOrText.d.ts +2 -2
  16. package/model-function/generate-structure/generateStructureOrText.js +8 -5
  17. package/model-function/index.cjs +2 -3
  18. package/model-function/index.d.ts +2 -3
  19. package/model-function/index.js +2 -3
  20. package/model-provider/openai/OpenAICostCalculator.cjs +6 -5
  21. package/model-provider/openai/OpenAICostCalculator.js +6 -5
  22. package/model-provider/openai/OpenAITextGenerationModel.cjs +41 -17
  23. package/model-provider/openai/OpenAITextGenerationModel.d.ts +32 -14
  24. package/model-provider/openai/OpenAITextGenerationModel.js +41 -17
  25. package/model-provider/openai/TikTokenTokenizer.cjs +3 -2
  26. package/model-provider/openai/TikTokenTokenizer.js +3 -2
  27. package/model-provider/openai/chat/OpenAIChatModel.cjs +47 -9
  28. package/model-provider/openai/chat/OpenAIChatModel.d.ts +15 -5
  29. package/model-provider/openai/chat/OpenAIChatModel.js +47 -9
  30. package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +78 -0
  31. package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +12 -0
  32. package/model-provider/openai/chat/OpenAIChatPromptFormat.js +73 -0
  33. package/model-provider/openai/index.cjs +2 -3
  34. package/model-provider/openai/index.d.ts +1 -1
  35. package/model-provider/openai/index.js +1 -1
  36. package/model-provider/stability/StabilityImageGenerationModel.cjs +6 -0
  37. package/model-provider/stability/StabilityImageGenerationModel.d.ts +4 -2
  38. package/model-provider/stability/StabilityImageGenerationModel.js +6 -0
  39. package/package.json +2 -2
  40. package/prompt/AlpacaPromptFormat.cjs +23 -21
  41. package/prompt/AlpacaPromptFormat.d.ts +1 -1
  42. package/prompt/AlpacaPromptFormat.js +21 -19
  43. package/prompt/InstructionPrompt.d.ts +9 -0
  44. package/prompt/Llama2PromptFormat.cjs +44 -40
  45. package/prompt/Llama2PromptFormat.d.ts +2 -2
  46. package/prompt/Llama2PromptFormat.js +41 -37
  47. package/prompt/TextPromptFormat.cjs +5 -5
  48. package/prompt/TextPromptFormat.d.ts +2 -2
  49. package/prompt/TextPromptFormat.js +2 -2
  50. package/prompt/VicunaPromptFormat.cjs +39 -37
  51. package/prompt/VicunaPromptFormat.d.ts +1 -1
  52. package/prompt/VicunaPromptFormat.js +37 -35
  53. package/prompt/index.cjs +0 -1
  54. package/prompt/index.d.ts +0 -1
  55. package/prompt/index.js +0 -1
  56. package/tool/useTool.cjs +5 -1
  57. package/tool/useTool.d.ts +1 -1
  58. package/tool/useTool.js +5 -1
  59. package/tool/useToolOrGenerateText.cjs +5 -2
  60. package/tool/useToolOrGenerateText.d.ts +2 -2
  61. package/tool/useToolOrGenerateText.js +5 -2
  62. package/model-function/generate-structure/InstructionWithStructurePrompt.cjs +0 -17
  63. package/model-function/generate-structure/InstructionWithStructurePrompt.d.ts +0 -17
  64. package/model-function/generate-structure/InstructionWithStructurePrompt.js +0 -14
  65. package/model-provider/openai/chat/OpenAIChatPrompt.cjs +0 -135
  66. package/model-provider/openai/chat/OpenAIChatPrompt.d.ts +0 -96
  67. package/model-provider/openai/chat/OpenAIChatPrompt.js +0 -127
  68. package/prompt/OpenAIChatPromptFormat.cjs +0 -74
  69. package/prompt/OpenAIChatPromptFormat.d.ts +0 -12
  70. package/prompt/OpenAIChatPromptFormat.js +0 -69
@@ -21,55 +21,72 @@ const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
21
21
  * @see https://openai.com/pricing
22
22
  */
23
23
  exports.OPENAI_TEXT_GENERATION_MODELS = {
24
+ "gpt-3.5-turbo-instruct": {
25
+ contextWindowSize: 4097,
26
+ promptTokenCostInMillicents: 0.15,
27
+ completionTokenCostInMillicents: 0.2,
28
+ },
24
29
  "davinci-002": {
25
30
  contextWindowSize: 16384,
26
- tokenCostInMillicents: 0.2,
31
+ promptTokenCostInMillicents: 0.2,
32
+ completionTokenCostInMillicents: 0.2,
27
33
  fineTunedTokenCostInMillicents: 1.2,
28
34
  },
29
35
  "babbage-002": {
30
36
  contextWindowSize: 16384,
31
- tokenCostInMillicents: 0.04,
37
+ promptTokenCostInMillicents: 0.04,
38
+ completionTokenCostInMillicents: 0.04,
32
39
  fineTunedTokenCostInMillicents: 0.16,
33
40
  },
34
41
  "text-davinci-003": {
35
42
  contextWindowSize: 4096,
36
- tokenCostInMillicents: 2,
43
+ promptTokenCostInMillicents: 2,
44
+ completionTokenCostInMillicents: 2,
37
45
  },
38
46
  "text-davinci-002": {
39
47
  contextWindowSize: 4096,
40
- tokenCostInMillicents: 2,
48
+ promptTokenCostInMillicents: 2,
49
+ completionTokenCostInMillicents: 2,
41
50
  },
42
51
  "code-davinci-002": {
43
52
  contextWindowSize: 8000,
44
- tokenCostInMillicents: 2,
53
+ promptTokenCostInMillicents: 2,
54
+ completionTokenCostInMillicents: 2,
45
55
  },
46
56
  davinci: {
47
57
  contextWindowSize: 2048,
48
- tokenCostInMillicents: 2,
58
+ promptTokenCostInMillicents: 2,
59
+ completionTokenCostInMillicents: 2,
49
60
  },
50
61
  "text-curie-001": {
51
62
  contextWindowSize: 2048,
52
- tokenCostInMillicents: 0.2,
63
+ promptTokenCostInMillicents: 0.2,
64
+ completionTokenCostInMillicents: 0.2,
53
65
  },
54
66
  curie: {
55
67
  contextWindowSize: 2048,
56
- tokenCostInMillicents: 0.2,
68
+ promptTokenCostInMillicents: 0.2,
69
+ completionTokenCostInMillicents: 0.2,
57
70
  },
58
71
  "text-babbage-001": {
59
72
  contextWindowSize: 2048,
60
- tokenCostInMillicents: 0.05,
73
+ promptTokenCostInMillicents: 0.05,
74
+ completionTokenCostInMillicents: 0.05,
61
75
  },
62
76
  babbage: {
63
77
  contextWindowSize: 2048,
64
- tokenCostInMillicents: 0.05,
78
+ promptTokenCostInMillicents: 0.05,
79
+ completionTokenCostInMillicents: 0.05,
65
80
  },
66
81
  "text-ada-001": {
67
82
  contextWindowSize: 2048,
68
- tokenCostInMillicents: 0.04,
83
+ promptTokenCostInMillicents: 0.04,
84
+ completionTokenCostInMillicents: 0.04,
69
85
  },
70
86
  ada: {
71
87
  contextWindowSize: 2048,
72
- tokenCostInMillicents: 0.04,
88
+ promptTokenCostInMillicents: 0.04,
89
+ completionTokenCostInMillicents: 0.04,
73
90
  },
74
91
  };
75
92
  function getOpenAITextGenerationModelInformation(model) {
@@ -80,7 +97,8 @@ function getOpenAITextGenerationModelInformation(model) {
80
97
  baseModel: model,
81
98
  isFineTuned: false,
82
99
  contextWindowSize: baseModelInformation.contextWindowSize,
83
- tokenCostInMillicents: baseModelInformation.tokenCostInMillicents,
100
+ promptTokenCostInMillicents: baseModelInformation.promptTokenCostInMillicents,
101
+ completionTokenCostInMillicents: baseModelInformation.completionTokenCostInMillicents,
84
102
  };
85
103
  }
86
104
  // Extract the base model from the fine-tuned model:
@@ -92,7 +110,8 @@ function getOpenAITextGenerationModelInformation(model) {
92
110
  baseModel: baseModel,
93
111
  isFineTuned: true,
94
112
  contextWindowSize: baseModelInformation.contextWindowSize,
95
- tokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
113
+ promptTokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
114
+ completionTokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
96
115
  };
97
116
  }
98
117
  throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
@@ -102,8 +121,13 @@ const isOpenAITextGenerationModel = (model) => model in exports.OPENAI_TEXT_GENE
102
121
  model.startsWith("ft:davinci-002:") ||
103
122
  model.startsWith("ft:babbage-002:");
104
123
  exports.isOpenAITextGenerationModel = isOpenAITextGenerationModel;
105
- const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => response.usage.total_tokens *
106
- getOpenAITextGenerationModelInformation(model).tokenCostInMillicents;
124
+ const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => {
125
+ const modelInformation = getOpenAITextGenerationModelInformation(model);
126
+ return (response.usage.prompt_tokens *
127
+ modelInformation.promptTokenCostInMillicents +
128
+ response.usage.completion_tokens *
129
+ modelInformation.completionTokenCostInMillicents);
130
+ };
107
131
  exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGenerationCostInMillicents;
108
132
  /**
109
133
  * Create a text generation model that calls the OpenAI text completion API.
@@ -112,7 +136,7 @@ exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGener
112
136
  *
113
137
  * @example
114
138
  * const model = new OpenAITextGenerationModel({
115
- * model: "text-davinci-003",
139
+ * model: "gpt-3.5-turbo-instruct",
116
140
  * temperature: 0.7,
117
141
  * maxCompletionTokens: 500,
118
142
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
@@ -14,62 +14,80 @@ import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
14
14
  * @see https://openai.com/pricing
15
15
  */
16
16
  export declare const OPENAI_TEXT_GENERATION_MODELS: {
17
+ "gpt-3.5-turbo-instruct": {
18
+ contextWindowSize: number;
19
+ promptTokenCostInMillicents: number;
20
+ completionTokenCostInMillicents: number;
21
+ };
17
22
  "davinci-002": {
18
23
  contextWindowSize: number;
19
- tokenCostInMillicents: number;
24
+ promptTokenCostInMillicents: number;
25
+ completionTokenCostInMillicents: number;
20
26
  fineTunedTokenCostInMillicents: number;
21
27
  };
22
28
  "babbage-002": {
23
29
  contextWindowSize: number;
24
- tokenCostInMillicents: number;
30
+ promptTokenCostInMillicents: number;
31
+ completionTokenCostInMillicents: number;
25
32
  fineTunedTokenCostInMillicents: number;
26
33
  };
27
34
  "text-davinci-003": {
28
35
  contextWindowSize: number;
29
- tokenCostInMillicents: number;
36
+ promptTokenCostInMillicents: number;
37
+ completionTokenCostInMillicents: number;
30
38
  };
31
39
  "text-davinci-002": {
32
40
  contextWindowSize: number;
33
- tokenCostInMillicents: number;
41
+ promptTokenCostInMillicents: number;
42
+ completionTokenCostInMillicents: number;
34
43
  };
35
44
  "code-davinci-002": {
36
45
  contextWindowSize: number;
37
- tokenCostInMillicents: number;
46
+ promptTokenCostInMillicents: number;
47
+ completionTokenCostInMillicents: number;
38
48
  };
39
49
  davinci: {
40
50
  contextWindowSize: number;
41
- tokenCostInMillicents: number;
51
+ promptTokenCostInMillicents: number;
52
+ completionTokenCostInMillicents: number;
42
53
  };
43
54
  "text-curie-001": {
44
55
  contextWindowSize: number;
45
- tokenCostInMillicents: number;
56
+ promptTokenCostInMillicents: number;
57
+ completionTokenCostInMillicents: number;
46
58
  };
47
59
  curie: {
48
60
  contextWindowSize: number;
49
- tokenCostInMillicents: number;
61
+ promptTokenCostInMillicents: number;
62
+ completionTokenCostInMillicents: number;
50
63
  };
51
64
  "text-babbage-001": {
52
65
  contextWindowSize: number;
53
- tokenCostInMillicents: number;
66
+ promptTokenCostInMillicents: number;
67
+ completionTokenCostInMillicents: number;
54
68
  };
55
69
  babbage: {
56
70
  contextWindowSize: number;
57
- tokenCostInMillicents: number;
71
+ promptTokenCostInMillicents: number;
72
+ completionTokenCostInMillicents: number;
58
73
  };
59
74
  "text-ada-001": {
60
75
  contextWindowSize: number;
61
- tokenCostInMillicents: number;
76
+ promptTokenCostInMillicents: number;
77
+ completionTokenCostInMillicents: number;
62
78
  };
63
79
  ada: {
64
80
  contextWindowSize: number;
65
- tokenCostInMillicents: number;
81
+ promptTokenCostInMillicents: number;
82
+ completionTokenCostInMillicents: number;
66
83
  };
67
84
  };
68
85
  export declare function getOpenAITextGenerationModelInformation(model: OpenAITextGenerationModelType): {
69
86
  baseModel: OpenAITextGenerationBaseModelType;
70
87
  isFineTuned: boolean;
71
88
  contextWindowSize: number;
72
- tokenCostInMillicents: number;
89
+ promptTokenCostInMillicents: number;
90
+ completionTokenCostInMillicents: number;
73
91
  };
74
92
  type FineTuneableOpenAITextGenerationModelType = "davinci-002" | "babbage-002";
75
93
  type FineTunedOpenAITextGenerationModelType = `ft:${FineTuneableOpenAITextGenerationModelType}:${string}:${string}:${string}`;
@@ -106,7 +124,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
106
124
  *
107
125
  * @example
108
126
  * const model = new OpenAITextGenerationModel({
109
- * model: "text-davinci-003",
127
+ * model: "gpt-3.5-turbo-instruct",
110
128
  * temperature: 0.7,
111
129
  * maxCompletionTokens: 500,
112
130
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
@@ -15,55 +15,72 @@ import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
15
15
  * @see https://openai.com/pricing
16
16
  */
17
17
  export const OPENAI_TEXT_GENERATION_MODELS = {
18
+ "gpt-3.5-turbo-instruct": {
19
+ contextWindowSize: 4097,
20
+ promptTokenCostInMillicents: 0.15,
21
+ completionTokenCostInMillicents: 0.2,
22
+ },
18
23
  "davinci-002": {
19
24
  contextWindowSize: 16384,
20
- tokenCostInMillicents: 0.2,
25
+ promptTokenCostInMillicents: 0.2,
26
+ completionTokenCostInMillicents: 0.2,
21
27
  fineTunedTokenCostInMillicents: 1.2,
22
28
  },
23
29
  "babbage-002": {
24
30
  contextWindowSize: 16384,
25
- tokenCostInMillicents: 0.04,
31
+ promptTokenCostInMillicents: 0.04,
32
+ completionTokenCostInMillicents: 0.04,
26
33
  fineTunedTokenCostInMillicents: 0.16,
27
34
  },
28
35
  "text-davinci-003": {
29
36
  contextWindowSize: 4096,
30
- tokenCostInMillicents: 2,
37
+ promptTokenCostInMillicents: 2,
38
+ completionTokenCostInMillicents: 2,
31
39
  },
32
40
  "text-davinci-002": {
33
41
  contextWindowSize: 4096,
34
- tokenCostInMillicents: 2,
42
+ promptTokenCostInMillicents: 2,
43
+ completionTokenCostInMillicents: 2,
35
44
  },
36
45
  "code-davinci-002": {
37
46
  contextWindowSize: 8000,
38
- tokenCostInMillicents: 2,
47
+ promptTokenCostInMillicents: 2,
48
+ completionTokenCostInMillicents: 2,
39
49
  },
40
50
  davinci: {
41
51
  contextWindowSize: 2048,
42
- tokenCostInMillicents: 2,
52
+ promptTokenCostInMillicents: 2,
53
+ completionTokenCostInMillicents: 2,
43
54
  },
44
55
  "text-curie-001": {
45
56
  contextWindowSize: 2048,
46
- tokenCostInMillicents: 0.2,
57
+ promptTokenCostInMillicents: 0.2,
58
+ completionTokenCostInMillicents: 0.2,
47
59
  },
48
60
  curie: {
49
61
  contextWindowSize: 2048,
50
- tokenCostInMillicents: 0.2,
62
+ promptTokenCostInMillicents: 0.2,
63
+ completionTokenCostInMillicents: 0.2,
51
64
  },
52
65
  "text-babbage-001": {
53
66
  contextWindowSize: 2048,
54
- tokenCostInMillicents: 0.05,
67
+ promptTokenCostInMillicents: 0.05,
68
+ completionTokenCostInMillicents: 0.05,
55
69
  },
56
70
  babbage: {
57
71
  contextWindowSize: 2048,
58
- tokenCostInMillicents: 0.05,
72
+ promptTokenCostInMillicents: 0.05,
73
+ completionTokenCostInMillicents: 0.05,
59
74
  },
60
75
  "text-ada-001": {
61
76
  contextWindowSize: 2048,
62
- tokenCostInMillicents: 0.04,
77
+ promptTokenCostInMillicents: 0.04,
78
+ completionTokenCostInMillicents: 0.04,
63
79
  },
64
80
  ada: {
65
81
  contextWindowSize: 2048,
66
- tokenCostInMillicents: 0.04,
82
+ promptTokenCostInMillicents: 0.04,
83
+ completionTokenCostInMillicents: 0.04,
67
84
  },
68
85
  };
69
86
  export function getOpenAITextGenerationModelInformation(model) {
@@ -74,7 +91,8 @@ export function getOpenAITextGenerationModelInformation(model) {
74
91
  baseModel: model,
75
92
  isFineTuned: false,
76
93
  contextWindowSize: baseModelInformation.contextWindowSize,
77
- tokenCostInMillicents: baseModelInformation.tokenCostInMillicents,
94
+ promptTokenCostInMillicents: baseModelInformation.promptTokenCostInMillicents,
95
+ completionTokenCostInMillicents: baseModelInformation.completionTokenCostInMillicents,
78
96
  };
79
97
  }
80
98
  // Extract the base model from the fine-tuned model:
@@ -86,7 +104,8 @@ export function getOpenAITextGenerationModelInformation(model) {
86
104
  baseModel: baseModel,
87
105
  isFineTuned: true,
88
106
  contextWindowSize: baseModelInformation.contextWindowSize,
89
- tokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
107
+ promptTokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
108
+ completionTokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
90
109
  };
91
110
  }
92
111
  throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
@@ -94,8 +113,13 @@ export function getOpenAITextGenerationModelInformation(model) {
94
113
  export const isOpenAITextGenerationModel = (model) => model in OPENAI_TEXT_GENERATION_MODELS ||
95
114
  model.startsWith("ft:davinci-002:") ||
96
115
  model.startsWith("ft:babbage-002:");
97
- export const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => response.usage.total_tokens *
98
- getOpenAITextGenerationModelInformation(model).tokenCostInMillicents;
116
+ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => {
117
+ const modelInformation = getOpenAITextGenerationModelInformation(model);
118
+ return (response.usage.prompt_tokens *
119
+ modelInformation.promptTokenCostInMillicents +
120
+ response.usage.completion_tokens *
121
+ modelInformation.completionTokenCostInMillicents);
122
+ };
99
123
  /**
100
124
  * Create a text generation model that calls the OpenAI text completion API.
101
125
  *
@@ -103,7 +127,7 @@ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response,
103
127
  *
104
128
  * @example
105
129
  * const model = new OpenAITextGenerationModel({
106
- * model: "text-davinci-003",
130
+ * model: "gpt-3.5-turbo-instruct",
107
131
  * temperature: 0.7,
108
132
  * maxCompletionTokens: 500,
109
133
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
@@ -57,8 +57,6 @@ function getEncodingNameForModel(model) {
57
57
  case "text-davinci-003": {
58
58
  return "p50k_base";
59
59
  }
60
- case "babbage-002":
61
- case "davinci-002":
62
60
  case "ada":
63
61
  case "babbage":
64
62
  case "curie":
@@ -68,11 +66,14 @@ function getEncodingNameForModel(model) {
68
66
  case "text-curie-001": {
69
67
  return "r50k_base";
70
68
  }
69
+ case "babbage-002":
70
+ case "davinci-002":
71
71
  case "gpt-3.5-turbo":
72
72
  case "gpt-3.5-turbo-0301":
73
73
  case "gpt-3.5-turbo-0613":
74
74
  case "gpt-3.5-turbo-16k":
75
75
  case "gpt-3.5-turbo-16k-0613":
76
+ case "gpt-3.5-turbo-instruct":
76
77
  case "gpt-4":
77
78
  case "gpt-4-0314":
78
79
  case "gpt-4-0613":
@@ -53,8 +53,6 @@ function getEncodingNameForModel(model) {
53
53
  case "text-davinci-003": {
54
54
  return "p50k_base";
55
55
  }
56
- case "babbage-002":
57
- case "davinci-002":
58
56
  case "ada":
59
57
  case "babbage":
60
58
  case "curie":
@@ -64,11 +62,14 @@ function getEncodingNameForModel(model) {
64
62
  case "text-curie-001": {
65
63
  return "r50k_base";
66
64
  }
65
+ case "babbage-002":
66
+ case "davinci-002":
67
67
  case "gpt-3.5-turbo":
68
68
  case "gpt-3.5-turbo-0301":
69
69
  case "gpt-3.5-turbo-0613":
70
70
  case "gpt-3.5-turbo-16k":
71
71
  case "gpt-3.5-turbo-16k-0613":
72
+ case "gpt-3.5-turbo-instruct":
72
73
  case "gpt-4":
73
74
  case "gpt-4-0314":
74
75
  case "gpt-4-0613":
@@ -6,10 +6,10 @@ Object.defineProperty(exports, "__esModule", { value: true });
6
6
  exports.OpenAIChatResponseFormat = exports.OpenAIChatModel = exports.calculateOpenAIChatCostInMillicents = exports.isOpenAIChatModel = exports.getOpenAIChatModelInformation = exports.OPENAI_CHAT_MODELS = void 0;
7
7
  const secure_json_parse_1 = __importDefault(require("secure-json-parse"));
8
8
  const zod_1 = __importDefault(require("zod"));
9
- const AbstractModel_js_1 = require("../../../model-function/AbstractModel.cjs");
10
- const PromptFormatTextGenerationModel_js_1 = require("../../../prompt/PromptFormatTextGenerationModel.cjs");
11
9
  const callWithRetryAndThrottle_js_1 = require("../../../core/api/callWithRetryAndThrottle.cjs");
12
10
  const postToApi_js_1 = require("../../../core/api/postToApi.cjs");
11
+ const AbstractModel_js_1 = require("../../../model-function/AbstractModel.cjs");
12
+ const PromptFormatTextGenerationModel_js_1 = require("../../../prompt/PromptFormatTextGenerationModel.cjs");
13
13
  const OpenAIApiConfiguration_js_1 = require("../OpenAIApiConfiguration.cjs");
14
14
  const OpenAIError_js_1 = require("../OpenAIError.cjs");
15
15
  const TikTokenTokenizer_js_1 = require("../TikTokenTokenizer.cjs");
@@ -245,15 +245,21 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
245
245
  *
246
246
  * @see https://platform.openai.com/docs/guides/gpt/function-calling
247
247
  */
248
- generateStructureResponse(prompt, options) {
249
- const settingsWithFunctionCall = Object.assign({}, options, {
250
- functionCall: prompt.functionCall,
251
- functions: prompt.functions,
252
- });
253
- return this.callAPI(prompt.messages, {
248
+ generateStructureResponse(structureDefinition, prompt, options) {
249
+ return this.callAPI(prompt, {
254
250
  responseFormat: exports.OpenAIChatResponseFormat.json,
255
251
  functionId: options?.functionId,
256
- settings: settingsWithFunctionCall,
252
+ settings: {
253
+ ...options,
254
+ functionCall: { name: structureDefinition.name },
255
+ functions: [
256
+ {
257
+ name: structureDefinition.name,
258
+ description: structureDefinition.description,
259
+ parameters: structureDefinition.schema.getJsonSchema(),
260
+ },
261
+ ],
262
+ },
257
263
  run: options?.run,
258
264
  });
259
265
  }
@@ -261,6 +267,38 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
261
267
  const jsonText = response.choices[0].message.function_call.arguments;
262
268
  return secure_json_parse_1.default.parse(jsonText);
263
269
  }
270
+ generateStructureOrTextResponse(structureDefinitions, prompt, options) {
271
+ return this.callAPI(prompt, {
272
+ responseFormat: exports.OpenAIChatResponseFormat.json,
273
+ functionId: options?.functionId,
274
+ settings: {
275
+ ...options,
276
+ functionCall: "auto",
277
+ functions: structureDefinitions.map((structureDefinition) => ({
278
+ name: structureDefinition.name,
279
+ description: structureDefinition.description,
280
+ parameters: structureDefinition.schema.getJsonSchema(),
281
+ })),
282
+ },
283
+ run: options?.run,
284
+ });
285
+ }
286
+ extractStructureAndText(response) {
287
+ const message = response.choices[0].message;
288
+ const content = message.content;
289
+ const functionCall = message.function_call;
290
+ return functionCall == null
291
+ ? {
292
+ structure: null,
293
+ value: null,
294
+ text: content ?? "",
295
+ }
296
+ : {
297
+ structure: functionCall.name,
298
+ value: secure_json_parse_1.default.parse(functionCall.arguments),
299
+ text: content,
300
+ };
301
+ }
264
302
  extractUsage(response) {
265
303
  return {
266
304
  promptTokens: response.usage.prompt_tokens,
@@ -1,17 +1,17 @@
1
1
  import z from "zod";
2
+ import { ApiConfiguration } from "../../../core/api/ApiConfiguration.js";
3
+ import { ResponseHandler } from "../../../core/api/postToApi.js";
4
+ import { StructureDefinition } from "../../../core/structure/StructureDefinition.js";
2
5
  import { AbstractModel } from "../../../model-function/AbstractModel.js";
3
6
  import { ModelFunctionOptions } from "../../../model-function/ModelFunctionOptions.js";
4
- import { ApiConfiguration } from "../../../core/api/ApiConfiguration.js";
5
7
  import { StructureGenerationModel } from "../../../model-function/generate-structure/StructureGenerationModel.js";
6
8
  import { StructureOrTextGenerationModel } from "../../../model-function/generate-structure/StructureOrTextGenerationModel.js";
7
9
  import { DeltaEvent } from "../../../model-function/generate-text/DeltaEvent.js";
8
10
  import { TextGenerationModel, TextGenerationModelSettings } from "../../../model-function/generate-text/TextGenerationModel.js";
9
11
  import { PromptFormat } from "../../../prompt/PromptFormat.js";
10
12
  import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
11
- import { ResponseHandler } from "../../../core/api/postToApi.js";
12
13
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
13
14
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
14
- import { OpenAIChatAutoFunctionPrompt, OpenAIChatSingleFunctionPrompt, OpenAIFunctionDescription } from "./OpenAIChatPrompt.js";
15
15
  import { OpenAIChatDelta } from "./OpenAIChatStreamIterable.js";
16
16
  export declare const OPENAI_CHAT_MODELS: {
17
17
  "gpt-4": {
@@ -132,7 +132,7 @@ export interface OpenAIChatSettings extends TextGenerationModelSettings, Omit<Op
132
132
  * ),
133
133
  * ]);
134
134
  */
135
- export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> implements TextGenerationModel<OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings>, StructureGenerationModel<OpenAIChatSingleFunctionPrompt<unknown>, OpenAIChatResponse, OpenAIChatSettings>, StructureOrTextGenerationModel<OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, OpenAIChatResponse, OpenAIChatSettings> {
135
+ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> implements TextGenerationModel<OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings>, StructureGenerationModel<OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatSettings>, StructureOrTextGenerationModel<OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatSettings> {
136
136
  constructor(settings: OpenAIChatSettings);
137
137
  readonly provider: "openai";
138
138
  get modelName(): OpenAIChatModelType;
@@ -183,8 +183,18 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
183
183
  *
184
184
  * @see https://platform.openai.com/docs/guides/gpt/function-calling
185
185
  */
186
- generateStructureResponse(prompt: OpenAIChatSingleFunctionPrompt<unknown> | OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, options?: ModelFunctionOptions<OpenAIChatSettings> | undefined): PromiseLike<OpenAIChatResponse>;
186
+ generateStructureResponse(structureDefinition: StructureDefinition<string, unknown>, prompt: OpenAIChatMessage[], options?: ModelFunctionOptions<OpenAIChatSettings> | undefined): PromiseLike<OpenAIChatResponse>;
187
187
  extractStructure(response: OpenAIChatResponse): unknown;
188
+ generateStructureOrTextResponse(structureDefinitions: Array<StructureDefinition<string, unknown>>, prompt: OpenAIChatMessage[], options?: ModelFunctionOptions<OpenAIChatSettings> | undefined): PromiseLike<OpenAIChatResponse>;
189
+ extractStructureAndText(response: OpenAIChatResponse): {
190
+ structure: null;
191
+ value: null;
192
+ text: string;
193
+ } | {
194
+ structure: string;
195
+ value: any;
196
+ text: string | null;
197
+ };
188
198
  extractUsage(response: OpenAIChatResponse): {
189
199
  promptTokens: number;
190
200
  completionTokens: number;
@@ -1,9 +1,9 @@
1
1
  import SecureJSON from "secure-json-parse";
2
2
  import z from "zod";
3
- import { AbstractModel } from "../../../model-function/AbstractModel.js";
4
- import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
5
3
  import { callWithRetryAndThrottle } from "../../../core/api/callWithRetryAndThrottle.js";
6
4
  import { createJsonResponseHandler, postJsonToApi, } from "../../../core/api/postToApi.js";
5
+ import { AbstractModel } from "../../../model-function/AbstractModel.js";
6
+ import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
7
7
  import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
8
8
  import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
9
9
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
@@ -236,15 +236,21 @@ export class OpenAIChatModel extends AbstractModel {
236
236
  *
237
237
  * @see https://platform.openai.com/docs/guides/gpt/function-calling
238
238
  */
239
- generateStructureResponse(prompt, options) {
240
- const settingsWithFunctionCall = Object.assign({}, options, {
241
- functionCall: prompt.functionCall,
242
- functions: prompt.functions,
243
- });
244
- return this.callAPI(prompt.messages, {
239
+ generateStructureResponse(structureDefinition, prompt, options) {
240
+ return this.callAPI(prompt, {
245
241
  responseFormat: OpenAIChatResponseFormat.json,
246
242
  functionId: options?.functionId,
247
- settings: settingsWithFunctionCall,
243
+ settings: {
244
+ ...options,
245
+ functionCall: { name: structureDefinition.name },
246
+ functions: [
247
+ {
248
+ name: structureDefinition.name,
249
+ description: structureDefinition.description,
250
+ parameters: structureDefinition.schema.getJsonSchema(),
251
+ },
252
+ ],
253
+ },
248
254
  run: options?.run,
249
255
  });
250
256
  }
@@ -252,6 +258,38 @@ export class OpenAIChatModel extends AbstractModel {
252
258
  const jsonText = response.choices[0].message.function_call.arguments;
253
259
  return SecureJSON.parse(jsonText);
254
260
  }
261
+ generateStructureOrTextResponse(structureDefinitions, prompt, options) {
262
+ return this.callAPI(prompt, {
263
+ responseFormat: OpenAIChatResponseFormat.json,
264
+ functionId: options?.functionId,
265
+ settings: {
266
+ ...options,
267
+ functionCall: "auto",
268
+ functions: structureDefinitions.map((structureDefinition) => ({
269
+ name: structureDefinition.name,
270
+ description: structureDefinition.description,
271
+ parameters: structureDefinition.schema.getJsonSchema(),
272
+ })),
273
+ },
274
+ run: options?.run,
275
+ });
276
+ }
277
+ extractStructureAndText(response) {
278
+ const message = response.choices[0].message;
279
+ const content = message.content;
280
+ const functionCall = message.function_call;
281
+ return functionCall == null
282
+ ? {
283
+ structure: null,
284
+ value: null,
285
+ text: content ?? "",
286
+ }
287
+ : {
288
+ structure: functionCall.name,
289
+ value: SecureJSON.parse(functionCall.arguments),
290
+ text: content,
291
+ };
292
+ }
255
293
  extractUsage(response) {
256
294
  return {
257
295
  promptTokens: response.usage.prompt_tokens,