modelfusion 0.74.1 → 0.75.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/README.md +40 -32
  2. package/guard/fixStructure.cjs +1 -1
  3. package/guard/fixStructure.d.ts +1 -1
  4. package/guard/fixStructure.js +1 -1
  5. package/model-function/embed/EmbeddingModel.d.ts +1 -1
  6. package/model-function/embed/embed.cjs +1 -1
  7. package/model-function/embed/embed.d.ts +2 -2
  8. package/model-function/embed/embed.js +1 -1
  9. package/model-function/generate-image/generateImage.d.ts +1 -1
  10. package/model-function/generate-speech/generateSpeech.d.ts +1 -1
  11. package/model-function/generate-speech/streamSpeech.d.ts +1 -1
  12. package/model-function/generate-structure/generateStructure.d.ts +1 -1
  13. package/model-function/generate-structure/streamStructure.d.ts +1 -1
  14. package/model-function/generate-text/generateText.d.ts +1 -1
  15. package/model-function/generate-text/streamText.d.ts +1 -1
  16. package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
  17. package/model-provider/anthropic/AnthropicFacade.cjs +15 -0
  18. package/model-provider/anthropic/AnthropicFacade.d.ts +9 -0
  19. package/model-provider/anthropic/AnthropicFacade.js +11 -0
  20. package/model-provider/anthropic/index.cjs +2 -1
  21. package/model-provider/anthropic/index.d.ts +1 -0
  22. package/model-provider/anthropic/index.js +1 -0
  23. package/model-provider/automatic1111/Automatic1111Facade.cjs +15 -0
  24. package/model-provider/automatic1111/Automatic1111Facade.d.ts +9 -0
  25. package/model-provider/automatic1111/Automatic1111Facade.js +11 -0
  26. package/model-provider/automatic1111/index.cjs +14 -1
  27. package/model-provider/automatic1111/index.d.ts +1 -0
  28. package/model-provider/automatic1111/index.js +1 -0
  29. package/model-provider/cohere/CohereFacade.cjs +71 -0
  30. package/model-provider/cohere/CohereFacade.d.ts +59 -0
  31. package/model-provider/cohere/CohereFacade.js +65 -0
  32. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
  33. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
  34. package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
  35. package/model-provider/cohere/index.cjs +14 -1
  36. package/model-provider/cohere/index.d.ts +1 -0
  37. package/model-provider/cohere/index.js +1 -0
  38. package/model-provider/elevenlabs/ElevenLabsFacade.cjs +18 -0
  39. package/model-provider/elevenlabs/ElevenLabsFacade.d.ts +12 -0
  40. package/model-provider/elevenlabs/ElevenLabsFacade.js +14 -0
  41. package/model-provider/elevenlabs/index.cjs +14 -0
  42. package/model-provider/elevenlabs/index.d.ts +1 -0
  43. package/model-provider/elevenlabs/index.js +1 -0
  44. package/model-provider/huggingface/HuggingFaceFacade.cjs +55 -0
  45. package/model-provider/huggingface/HuggingFaceFacade.d.ts +46 -0
  46. package/model-provider/huggingface/HuggingFaceFacade.js +50 -0
  47. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
  48. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
  49. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
  50. package/model-provider/huggingface/index.cjs +14 -2
  51. package/model-provider/huggingface/index.d.ts +1 -1
  52. package/model-provider/huggingface/index.js +1 -1
  53. package/model-provider/llamacpp/LlamaCppFacade.cjs +19 -0
  54. package/model-provider/llamacpp/LlamaCppFacade.d.ts +7 -0
  55. package/model-provider/llamacpp/LlamaCppFacade.js +13 -0
  56. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +2 -2
  57. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +2 -2
  58. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +2 -2
  59. package/model-provider/llamacpp/index.cjs +2 -1
  60. package/model-provider/llamacpp/index.d.ts +1 -0
  61. package/model-provider/llamacpp/index.js +1 -0
  62. package/model-provider/lmnt/LmntFacade.cjs +15 -0
  63. package/model-provider/lmnt/LmntFacade.d.ts +9 -0
  64. package/model-provider/lmnt/LmntFacade.js +11 -0
  65. package/model-provider/lmnt/index.cjs +14 -0
  66. package/model-provider/lmnt/index.d.ts +1 -0
  67. package/model-provider/lmnt/index.js +1 -0
  68. package/model-provider/ollama/OllamaFacade.cjs +13 -0
  69. package/model-provider/ollama/OllamaFacade.d.ts +4 -0
  70. package/model-provider/ollama/OllamaFacade.js +8 -0
  71. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +2 -2
  72. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +2 -2
  73. package/model-provider/ollama/OllamaTextEmbeddingModel.js +2 -2
  74. package/model-provider/ollama/index.cjs +14 -1
  75. package/model-provider/ollama/index.d.ts +1 -0
  76. package/model-provider/ollama/index.js +1 -0
  77. package/model-provider/openai/OpenAIFacade.cjs +148 -0
  78. package/model-provider/openai/OpenAIFacade.d.ts +124 -0
  79. package/model-provider/openai/OpenAIFacade.js +138 -0
  80. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
  81. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
  82. package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
  83. package/model-provider/openai/TikTokenTokenizer.cjs +2 -2
  84. package/model-provider/openai/TikTokenTokenizer.d.ts +4 -3
  85. package/model-provider/openai/TikTokenTokenizer.js +2 -2
  86. package/model-provider/openai/index.cjs +2 -1
  87. package/model-provider/openai/index.d.ts +1 -0
  88. package/model-provider/openai/index.js +1 -0
  89. package/model-provider/stability/StabilityFacade.cjs +32 -0
  90. package/model-provider/stability/StabilityFacade.d.ts +26 -0
  91. package/model-provider/stability/StabilityFacade.js +28 -0
  92. package/model-provider/stability/index.cjs +14 -1
  93. package/model-provider/stability/index.d.ts +1 -0
  94. package/model-provider/stability/index.js +1 -0
  95. package/package.json +1 -1
  96. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +0 -94
  97. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +0 -44
  98. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +0 -90
@@ -0,0 +1,15 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.Speech = void 0;
4
+ const LmntSpeechModel_js_1 = require("./LmntSpeechModel.cjs");
5
+ /**
6
+ * Synthesize speech using the LMNT API.
7
+ *
8
+ * @see https://www.lmnt.com/docs/rest/#synthesize-speech
9
+ *
10
+ * @returns A new instance of {@link LmntSpeechModel}.
11
+ */
12
+ function Speech(settings) {
13
+ return new LmntSpeechModel_js_1.LmntSpeechModel(settings);
14
+ }
15
+ exports.Speech = Speech;
@@ -0,0 +1,9 @@
1
+ import { LmntSpeechModel, LmntSpeechModelSettings } from "./LmntSpeechModel.js";
2
+ /**
3
+ * Synthesize speech using the LMNT API.
4
+ *
5
+ * @see https://www.lmnt.com/docs/rest/#synthesize-speech
6
+ *
7
+ * @returns A new instance of {@link LmntSpeechModel}.
8
+ */
9
+ export declare function Speech(settings: LmntSpeechModelSettings): LmntSpeechModel;
@@ -0,0 +1,11 @@
1
+ import { LmntSpeechModel } from "./LmntSpeechModel.js";
2
+ /**
3
+ * Synthesize speech using the LMNT API.
4
+ *
5
+ * @see https://www.lmnt.com/docs/rest/#synthesize-speech
6
+ *
7
+ * @returns A new instance of {@link LmntSpeechModel}.
8
+ */
9
+ export function Speech(settings) {
10
+ return new LmntSpeechModel(settings);
11
+ }
@@ -10,9 +10,23 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
10
10
  if (k2 === undefined) k2 = k;
11
11
  o[k2] = m[k];
12
12
  }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
13
18
  var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
19
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
20
  };
21
+ var __importStar = (this && this.__importStar) || function (mod) {
22
+ if (mod && mod.__esModule) return mod;
23
+ var result = {};
24
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
25
+ __setModuleDefault(result, mod);
26
+ return result;
27
+ };
16
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
+ exports.lmnt = void 0;
17
30
  __exportStar(require("./LmntApiConfiguration.cjs"), exports);
31
+ exports.lmnt = __importStar(require("./LmntFacade.cjs"));
18
32
  __exportStar(require("./LmntSpeechModel.cjs"), exports);
@@ -1,2 +1,3 @@
1
1
  export * from "./LmntApiConfiguration.js";
2
+ export * as lmnt from "./LmntFacade.js";
2
3
  export * from "./LmntSpeechModel.js";
@@ -1,2 +1,3 @@
1
1
  export * from "./LmntApiConfiguration.js";
2
+ export * as lmnt from "./LmntFacade.js";
2
3
  export * from "./LmntSpeechModel.js";
@@ -0,0 +1,13 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.TextEmbedder = exports.TextGenerator = void 0;
4
+ const OllamaTextEmbeddingModel_js_1 = require("./OllamaTextEmbeddingModel.cjs");
5
+ const OllamaTextGenerationModel_js_1 = require("./OllamaTextGenerationModel.cjs");
6
+ function TextGenerator(settings) {
7
+ return new OllamaTextGenerationModel_js_1.OllamaTextGenerationModel(settings);
8
+ }
9
+ exports.TextGenerator = TextGenerator;
10
+ function TextEmbedder(settings) {
11
+ return new OllamaTextEmbeddingModel_js_1.OllamaTextEmbeddingModel(settings);
12
+ }
13
+ exports.TextEmbedder = TextEmbedder;
@@ -0,0 +1,4 @@
1
+ import { OllamaTextEmbeddingModel, OllamaTextEmbeddingModelSettings } from "./OllamaTextEmbeddingModel.js";
2
+ import { OllamaTextGenerationModel, OllamaTextGenerationModelSettings } from "./OllamaTextGenerationModel.js";
3
+ export declare function TextGenerator<CONTEXT_WINDOW_SIZE extends number>(settings: OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>): OllamaTextGenerationModel<CONTEXT_WINDOW_SIZE>;
4
+ export declare function TextEmbedder(settings: OllamaTextEmbeddingModelSettings): OllamaTextEmbeddingModel;
@@ -0,0 +1,8 @@
1
+ import { OllamaTextEmbeddingModel, } from "./OllamaTextEmbeddingModel.js";
2
+ import { OllamaTextGenerationModel, } from "./OllamaTextGenerationModel.js";
3
+ export function TextGenerator(settings) {
4
+ return new OllamaTextGenerationModel(settings);
5
+ }
6
+ export function TextEmbedder(settings) {
7
+ return new OllamaTextEmbeddingModel(settings);
8
+ }
@@ -32,8 +32,8 @@ class OllamaTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
32
32
  get modelName() {
33
33
  return null;
34
34
  }
35
- get isParallizable() {
36
- return this.settings.isParallizable ?? false;
35
+ get isParallelizable() {
36
+ return this.settings.isParallelizable ?? false;
37
37
  }
38
38
  get embeddingDimensions() {
39
39
  return this.settings.embeddingDimensions;
@@ -7,14 +7,14 @@ export interface OllamaTextEmbeddingModelSettings extends EmbeddingModelSettings
7
7
  api?: ApiConfiguration;
8
8
  model: string;
9
9
  embeddingDimensions?: number;
10
- isParallizable?: boolean;
10
+ isParallelizable?: boolean;
11
11
  }
12
12
  export declare class OllamaTextEmbeddingModel extends AbstractModel<OllamaTextEmbeddingModelSettings> implements EmbeddingModel<string, OllamaTextEmbeddingModelSettings> {
13
13
  constructor(settings: OllamaTextEmbeddingModelSettings);
14
14
  readonly provider: "ollama";
15
15
  get modelName(): null;
16
16
  readonly maxValuesPerCall = 1;
17
- get isParallizable(): boolean;
17
+ get isParallelizable(): boolean;
18
18
  readonly contextWindowSize: undefined;
19
19
  get embeddingDimensions(): number | undefined;
20
20
  callAPI(texts: Array<string>, options?: FunctionOptions): Promise<OllamaTextEmbeddingResponse>;
@@ -29,8 +29,8 @@ export class OllamaTextEmbeddingModel extends AbstractModel {
29
29
  get modelName() {
30
30
  return null;
31
31
  }
32
- get isParallizable() {
33
- return this.settings.isParallizable ?? false;
32
+ get isParallelizable() {
33
+ return this.settings.isParallelizable ?? false;
34
34
  }
35
35
  get embeddingDimensions() {
36
36
  return this.settings.embeddingDimensions;
@@ -10,13 +10,26 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
10
10
  if (k2 === undefined) k2 = k;
11
11
  o[k2] = m[k];
12
12
  }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
13
18
  var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
19
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
20
  };
21
+ var __importStar = (this && this.__importStar) || function (mod) {
22
+ if (mod && mod.__esModule) return mod;
23
+ var result = {};
24
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
25
+ __setModuleDefault(result, mod);
26
+ return result;
27
+ };
16
28
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.OllamaError = void 0;
29
+ exports.ollama = exports.OllamaError = void 0;
18
30
  __exportStar(require("./OllamaApiConfiguration.cjs"), exports);
19
31
  var OllamaError_js_1 = require("./OllamaError.cjs");
20
32
  Object.defineProperty(exports, "OllamaError", { enumerable: true, get: function () { return OllamaError_js_1.OllamaError; } });
33
+ exports.ollama = __importStar(require("./OllamaFacade.cjs"));
21
34
  __exportStar(require("./OllamaTextEmbeddingModel.cjs"), exports);
22
35
  __exportStar(require("./OllamaTextGenerationModel.cjs"), exports);
@@ -1,4 +1,5 @@
1
1
  export * from "./OllamaApiConfiguration.js";
2
2
  export { OllamaError } from "./OllamaError.js";
3
+ export * as ollama from "./OllamaFacade.js";
3
4
  export * from "./OllamaTextEmbeddingModel.js";
4
5
  export * from "./OllamaTextGenerationModel.js";
@@ -1,4 +1,5 @@
1
1
  export * from "./OllamaApiConfiguration.js";
2
2
  export { OllamaError } from "./OllamaError.js";
3
+ export * as ollama from "./OllamaFacade.js";
3
4
  export * from "./OllamaTextEmbeddingModel.js";
4
5
  export * from "./OllamaTextGenerationModel.js";
@@ -0,0 +1,148 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.Tokenizer = exports.ImageGenerator = exports.Transcription = exports.Speech = exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = void 0;
4
+ const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
5
+ const OpenAIImageGenerationModel_js_1 = require("./OpenAIImageGenerationModel.cjs");
6
+ const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
7
+ const OpenAITextEmbeddingModel_js_1 = require("./OpenAITextEmbeddingModel.cjs");
8
+ const OpenAITranscriptionModel_js_1 = require("./OpenAITranscriptionModel.cjs");
9
+ const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
10
+ const OpenAIChatModel_js_1 = require("./chat/OpenAIChatModel.cjs");
11
+ /**
12
+ * Create a text generation model that calls the OpenAI text completion API.
13
+ *
14
+ * @see https://platform.openai.com/docs/api-reference/completions/create
15
+ *
16
+ * @example
17
+ * const model = openai.CompletionTextGenerator({
18
+ * model: "gpt-3.5-turbo-instruct",
19
+ * temperature: 0.7,
20
+ * maxCompletionTokens: 500,
21
+ * retry: retryWithExponentialBackoff({ maxTries: 5 }),
22
+ * });
23
+ *
24
+ * const text = await generateText(
25
+ * model,
26
+ * "Write a short story about a robot learning to love:\n\n"
27
+ * );
28
+ *
29
+ * @return A new instance of {@link OpenAICompletionModel}.
30
+ */
31
+ function CompletionTextGenerator(settings) {
32
+ return new OpenAICompletionModel_js_1.OpenAICompletionModel(settings);
33
+ }
34
+ exports.CompletionTextGenerator = CompletionTextGenerator;
35
+ /**
36
+ * Create a text generation model that calls the OpenAI chat completion API.
37
+ *
38
+ * @see https://platform.openai.com/docs/api-reference/chat/create
39
+ *
40
+ * @example
41
+ * const model = openai.ChatTextGenerator({
42
+ * model: "gpt-3.5-turbo",
43
+ * temperature: 0.7,
44
+ * maxCompletionTokens: 500,
45
+ * });
46
+ *
47
+ * const text = await generateText([
48
+ * model,
49
+ * OpenAIChatMessage.system(
50
+ * "Write a short story about a robot learning to love:"
51
+ * ),
52
+ * ]);
53
+ */
54
+ function ChatTextGenerator(settings) {
55
+ return new OpenAIChatModel_js_1.OpenAIChatModel(settings);
56
+ }
57
+ exports.ChatTextGenerator = ChatTextGenerator;
58
+ /**
59
+ * Create a text embedding model that calls the OpenAI embedding API.
60
+ *
61
+ * @see https://platform.openai.com/docs/api-reference/embeddings
62
+ *
63
+ * @example
64
+ * const embeddings = await embedMany(
65
+ * openai.TextEmbedder({ model: "text-embedding-ada-002" }),
66
+ * [
67
+ * "At first, Nox didn't know what to do with the pup.",
68
+ * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
69
+ * ]
70
+ * );
71
+ *
72
+ * @returns A new instance of {@link OpenAITextEmbeddingModel}.
73
+ */
74
+ function TextEmbedder(settings) {
75
+ return new OpenAITextEmbeddingModel_js_1.OpenAITextEmbeddingModel(settings);
76
+ }
77
+ exports.TextEmbedder = TextEmbedder;
78
+ /**
79
+ * Synthesize speech using the OpenAI API.
80
+ *
81
+ * @see https://platform.openai.com/docs/api-reference/audio/createSpeech
82
+ *
83
+ * @returns A new instance of {@link OpenAISpeechModel}.
84
+ */
85
+ function Speech(settings) {
86
+ return new OpenAISpeechModel_js_1.OpenAISpeechModel(settings);
87
+ }
88
+ exports.Speech = Speech;
89
+ /**
90
+ * Create a transcription model that calls the OpenAI transcription API.
91
+ *
92
+ * @see https://platform.openai.com/docs/api-reference/audio/create
93
+ *
94
+ * @example
95
+ * const data = await fs.promises.readFile("data/test.mp3");
96
+ *
97
+ * const transcription = await transcribe(
98
+ * openai.Transcription({ model: "whisper-1" }),
99
+ * {
100
+ * type: "mp3",
101
+ * data,
102
+ * }
103
+ * );
104
+ *
105
+ * @returns A new instance of {@link OpenAITranscriptionModel}.
106
+ */
107
+ function Transcription(settings) {
108
+ return new OpenAITranscriptionModel_js_1.OpenAITranscriptionModel(settings);
109
+ }
110
+ exports.Transcription = Transcription;
111
+ /**
112
+ * Create an image generation model that calls the OpenAI AI image creation API.
113
+ *
114
+ * @see https://platform.openai.com/docs/api-reference/images/create
115
+ *
116
+ * @example
117
+ * const image = await generateImage(
118
+ * new OpenAIImageGenerationModel({ size: "512x512" }),
119
+ * "the wicked witch of the west in the style of early 19th century painting"
120
+ * );
121
+ *
122
+ * @returns A new instance of {@link OpenAIImageGenerationModel}.
123
+ */
124
+ function ImageGenerator(settings) {
125
+ return new OpenAIImageGenerationModel_js_1.OpenAIImageGenerationModel(settings);
126
+ }
127
+ exports.ImageGenerator = ImageGenerator;
128
+ /**
129
+ * Creates a TikToken tokenizer for OpenAI language models.
130
+ *
131
+ * @see https://github.com/openai/tiktoken
132
+ *
133
+ * @example
134
+ * const tokenizer = openai.Tokenizer({ model: "gpt-4" });
135
+ *
136
+ * const text = "At first, Nox didn't know what to do with the pup.";
137
+ *
138
+ * const tokenCount = await countTokens(tokenizer, text);
139
+ * const tokens = await tokenizer.tokenize(text);
140
+ * const tokensAndTokenTexts = await tokenizer.tokenizeWithTexts(text);
141
+ * const reconstructedText = await tokenizer.detokenize(tokens);
142
+ *
143
+ * @returns A new instance of {@link TikTokenTokenizer}.
144
+ */
145
+ function Tokenizer(settings) {
146
+ return new TikTokenTokenizer_js_1.TikTokenTokenizer(settings);
147
+ }
148
+ exports.Tokenizer = Tokenizer;
@@ -0,0 +1,124 @@
1
+ import { OpenAICompletionModel, OpenAICompletionModelSettings } from "./OpenAICompletionModel.js";
2
+ import { OpenAIImageGenerationModel, OpenAIImageGenerationSettings } from "./OpenAIImageGenerationModel.js";
3
+ import { OpenAISpeechModel, OpenAISpeechModelSettings } from "./OpenAISpeechModel.js";
4
+ import { OpenAITextEmbeddingModel, OpenAITextEmbeddingModelSettings } from "./OpenAITextEmbeddingModel.js";
5
+ import { OpenAITranscriptionModel, OpenAITranscriptionModelSettings } from "./OpenAITranscriptionModel.js";
6
+ import { TikTokenTokenizer, TikTokenTokenizerSettings } from "./TikTokenTokenizer.js";
7
+ import { OpenAIChatModel, OpenAIChatSettings } from "./chat/OpenAIChatModel.js";
8
+ /**
9
+ * Create a text generation model that calls the OpenAI text completion API.
10
+ *
11
+ * @see https://platform.openai.com/docs/api-reference/completions/create
12
+ *
13
+ * @example
14
+ * const model = openai.CompletionTextGenerator({
15
+ * model: "gpt-3.5-turbo-instruct",
16
+ * temperature: 0.7,
17
+ * maxCompletionTokens: 500,
18
+ * retry: retryWithExponentialBackoff({ maxTries: 5 }),
19
+ * });
20
+ *
21
+ * const text = await generateText(
22
+ * model,
23
+ * "Write a short story about a robot learning to love:\n\n"
24
+ * );
25
+ *
26
+ * @return A new instance of {@link OpenAICompletionModel}.
27
+ */
28
+ export declare function CompletionTextGenerator(settings: OpenAICompletionModelSettings): OpenAICompletionModel;
29
+ /**
30
+ * Create a text generation model that calls the OpenAI chat completion API.
31
+ *
32
+ * @see https://platform.openai.com/docs/api-reference/chat/create
33
+ *
34
+ * @example
35
+ * const model = openai.ChatTextGenerator({
36
+ * model: "gpt-3.5-turbo",
37
+ * temperature: 0.7,
38
+ * maxCompletionTokens: 500,
39
+ * });
40
+ *
41
+ * const text = await generateText([
42
+ * model,
43
+ * OpenAIChatMessage.system(
44
+ * "Write a short story about a robot learning to love:"
45
+ * ),
46
+ * ]);
47
+ */
48
+ export declare function ChatTextGenerator(settings: OpenAIChatSettings): OpenAIChatModel;
49
+ /**
50
+ * Create a text embedding model that calls the OpenAI embedding API.
51
+ *
52
+ * @see https://platform.openai.com/docs/api-reference/embeddings
53
+ *
54
+ * @example
55
+ * const embeddings = await embedMany(
56
+ * openai.TextEmbedder({ model: "text-embedding-ada-002" }),
57
+ * [
58
+ * "At first, Nox didn't know what to do with the pup.",
59
+ * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
60
+ * ]
61
+ * );
62
+ *
63
+ * @returns A new instance of {@link OpenAITextEmbeddingModel}.
64
+ */
65
+ export declare function TextEmbedder(settings: OpenAITextEmbeddingModelSettings): OpenAITextEmbeddingModel;
66
+ /**
67
+ * Synthesize speech using the OpenAI API.
68
+ *
69
+ * @see https://platform.openai.com/docs/api-reference/audio/createSpeech
70
+ *
71
+ * @returns A new instance of {@link OpenAISpeechModel}.
72
+ */
73
+ export declare function Speech(settings: OpenAISpeechModelSettings): OpenAISpeechModel;
74
+ /**
75
+ * Create a transcription model that calls the OpenAI transcription API.
76
+ *
77
+ * @see https://platform.openai.com/docs/api-reference/audio/create
78
+ *
79
+ * @example
80
+ * const data = await fs.promises.readFile("data/test.mp3");
81
+ *
82
+ * const transcription = await transcribe(
83
+ * openai.Transcription({ model: "whisper-1" }),
84
+ * {
85
+ * type: "mp3",
86
+ * data,
87
+ * }
88
+ * );
89
+ *
90
+ * @returns A new instance of {@link OpenAITranscriptionModel}.
91
+ */
92
+ export declare function Transcription(settings: OpenAITranscriptionModelSettings): OpenAITranscriptionModel;
93
+ /**
94
+ * Create an image generation model that calls the OpenAI AI image creation API.
95
+ *
96
+ * @see https://platform.openai.com/docs/api-reference/images/create
97
+ *
98
+ * @example
99
+ * const image = await generateImage(
100
+ * new OpenAIImageGenerationModel({ size: "512x512" }),
101
+ * "the wicked witch of the west in the style of early 19th century painting"
102
+ * );
103
+ *
104
+ * @returns A new instance of {@link OpenAIImageGenerationModel}.
105
+ */
106
+ export declare function ImageGenerator(settings: OpenAIImageGenerationSettings): OpenAIImageGenerationModel;
107
+ /**
108
+ * Creates a TikToken tokenizer for OpenAI language models.
109
+ *
110
+ * @see https://github.com/openai/tiktoken
111
+ *
112
+ * @example
113
+ * const tokenizer = openai.Tokenizer({ model: "gpt-4" });
114
+ *
115
+ * const text = "At first, Nox didn't know what to do with the pup.";
116
+ *
117
+ * const tokenCount = await countTokens(tokenizer, text);
118
+ * const tokens = await tokenizer.tokenize(text);
119
+ * const tokensAndTokenTexts = await tokenizer.tokenizeWithTexts(text);
120
+ * const reconstructedText = await tokenizer.detokenize(tokens);
121
+ *
122
+ * @returns A new instance of {@link TikTokenTokenizer}.
123
+ */
124
+ export declare function Tokenizer(settings: TikTokenTokenizerSettings): TikTokenTokenizer;
@@ -0,0 +1,138 @@
1
+ import { OpenAICompletionModel, } from "./OpenAICompletionModel.js";
2
+ import { OpenAIImageGenerationModel, } from "./OpenAIImageGenerationModel.js";
3
+ import { OpenAISpeechModel, } from "./OpenAISpeechModel.js";
4
+ import { OpenAITextEmbeddingModel, } from "./OpenAITextEmbeddingModel.js";
5
+ import { OpenAITranscriptionModel, } from "./OpenAITranscriptionModel.js";
6
+ import { TikTokenTokenizer, } from "./TikTokenTokenizer.js";
7
+ import { OpenAIChatModel } from "./chat/OpenAIChatModel.js";
8
+ /**
9
+ * Create a text generation model that calls the OpenAI text completion API.
10
+ *
11
+ * @see https://platform.openai.com/docs/api-reference/completions/create
12
+ *
13
+ * @example
14
+ * const model = openai.CompletionTextGenerator({
15
+ * model: "gpt-3.5-turbo-instruct",
16
+ * temperature: 0.7,
17
+ * maxCompletionTokens: 500,
18
+ * retry: retryWithExponentialBackoff({ maxTries: 5 }),
19
+ * });
20
+ *
21
+ * const text = await generateText(
22
+ * model,
23
+ * "Write a short story about a robot learning to love:\n\n"
24
+ * );
25
+ *
26
+ * @return A new instance of {@link OpenAICompletionModel}.
27
+ */
28
+ export function CompletionTextGenerator(settings) {
29
+ return new OpenAICompletionModel(settings);
30
+ }
31
+ /**
32
+ * Create a text generation model that calls the OpenAI chat completion API.
33
+ *
34
+ * @see https://platform.openai.com/docs/api-reference/chat/create
35
+ *
36
+ * @example
37
+ * const model = openai.ChatTextGenerator({
38
+ * model: "gpt-3.5-turbo",
39
+ * temperature: 0.7,
40
+ * maxCompletionTokens: 500,
41
+ * });
42
+ *
43
+ * const text = await generateText([
44
+ * model,
45
+ * OpenAIChatMessage.system(
46
+ * "Write a short story about a robot learning to love:"
47
+ * ),
48
+ * ]);
49
+ */
50
+ export function ChatTextGenerator(settings) {
51
+ return new OpenAIChatModel(settings);
52
+ }
53
+ /**
54
+ * Create a text embedding model that calls the OpenAI embedding API.
55
+ *
56
+ * @see https://platform.openai.com/docs/api-reference/embeddings
57
+ *
58
+ * @example
59
+ * const embeddings = await embedMany(
60
+ * openai.TextEmbedder({ model: "text-embedding-ada-002" }),
61
+ * [
62
+ * "At first, Nox didn't know what to do with the pup.",
63
+ * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
64
+ * ]
65
+ * );
66
+ *
67
+ * @returns A new instance of {@link OpenAITextEmbeddingModel}.
68
+ */
69
+ export function TextEmbedder(settings) {
70
+ return new OpenAITextEmbeddingModel(settings);
71
+ }
72
+ /**
73
+ * Synthesize speech using the OpenAI API.
74
+ *
75
+ * @see https://platform.openai.com/docs/api-reference/audio/createSpeech
76
+ *
77
+ * @returns A new instance of {@link OpenAISpeechModel}.
78
+ */
79
+ export function Speech(settings) {
80
+ return new OpenAISpeechModel(settings);
81
+ }
82
+ /**
83
+ * Create a transcription model that calls the OpenAI transcription API.
84
+ *
85
+ * @see https://platform.openai.com/docs/api-reference/audio/create
86
+ *
87
+ * @example
88
+ * const data = await fs.promises.readFile("data/test.mp3");
89
+ *
90
+ * const transcription = await transcribe(
91
+ * openai.Transcription({ model: "whisper-1" }),
92
+ * {
93
+ * type: "mp3",
94
+ * data,
95
+ * }
96
+ * );
97
+ *
98
+ * @returns A new instance of {@link OpenAITranscriptionModel}.
99
+ */
100
+ export function Transcription(settings) {
101
+ return new OpenAITranscriptionModel(settings);
102
+ }
103
+ /**
104
+ * Create an image generation model that calls the OpenAI AI image creation API.
105
+ *
106
+ * @see https://platform.openai.com/docs/api-reference/images/create
107
+ *
108
+ * @example
109
+ * const image = await generateImage(
110
+ * new OpenAIImageGenerationModel({ size: "512x512" }),
111
+ * "the wicked witch of the west in the style of early 19th century painting"
112
+ * );
113
+ *
114
+ * @returns A new instance of {@link OpenAIImageGenerationModel}.
115
+ */
116
+ export function ImageGenerator(settings) {
117
+ return new OpenAIImageGenerationModel(settings);
118
+ }
119
+ /**
120
+ * Creates a TikToken tokenizer for OpenAI language models.
121
+ *
122
+ * @see https://github.com/openai/tiktoken
123
+ *
124
+ * @example
125
+ * const tokenizer = openai.Tokenizer({ model: "gpt-4" });
126
+ *
127
+ * const text = "At first, Nox didn't know what to do with the pup.";
128
+ *
129
+ * const tokenCount = await countTokens(tokenizer, text);
130
+ * const tokens = await tokenizer.tokenize(text);
131
+ * const tokensAndTokenTexts = await tokenizer.tokenizeWithTexts(text);
132
+ * const reconstructedText = await tokenizer.detokenize(tokens);
133
+ *
134
+ * @returns A new instance of {@link TikTokenTokenizer}.
135
+ */
136
+ export function Tokenizer(settings) {
137
+ return new TikTokenTokenizer(settings);
138
+ }
@@ -57,7 +57,7 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
57
57
  writable: true,
58
58
  value: 2048
59
59
  });
60
- Object.defineProperty(this, "isParallizable", {
60
+ Object.defineProperty(this, "isParallelizable", {
61
61
  enumerable: true,
62
62
  configurable: true,
63
63
  writable: true,
@@ -41,7 +41,7 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
41
41
  readonly provider: "openai";
42
42
  get modelName(): "text-embedding-ada-002";
43
43
  readonly maxValuesPerCall = 2048;
44
- readonly isParallizable = true;
44
+ readonly isParallelizable = true;
45
45
  readonly embeddingDimensions: number;
46
46
  readonly tokenizer: TikTokenTokenizer;
47
47
  readonly contextWindowSize: number;
@@ -52,7 +52,7 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
52
52
  writable: true,
53
53
  value: 2048
54
54
  });
55
- Object.defineProperty(this, "isParallizable", {
55
+ Object.defineProperty(this, "isParallelizable", {
56
56
  enumerable: true,
57
57
  configurable: true,
58
58
  writable: true,
@@ -28,14 +28,14 @@ class TikTokenTokenizer {
28
28
  /**
29
29
  * Get a TikToken tokenizer for a specific model or encoding.
30
30
  */
31
- constructor(options) {
31
+ constructor(settings) {
32
32
  Object.defineProperty(this, "tiktoken", {
33
33
  enumerable: true,
34
34
  configurable: true,
35
35
  writable: true,
36
36
  value: void 0
37
37
  });
38
- this.tiktoken = new lite_1.Tiktoken(getTiktokenBPE(options.model));
38
+ this.tiktoken = new lite_1.Tiktoken(getTiktokenBPE(settings.model));
39
39
  }
40
40
  async tokenize(text) {
41
41
  return this.tiktoken.encode(text);
@@ -2,6 +2,9 @@ import { FullTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
2
2
  import { OpenAITextEmbeddingModelType } from "./OpenAITextEmbeddingModel.js";
3
3
  import { OpenAICompletionBaseModelType } from "./OpenAICompletionModel.js";
4
4
  import { OpenAIChatBaseModelType } from "./chat/OpenAIChatModel.js";
5
+ export type TikTokenTokenizerSettings = {
6
+ model: OpenAIChatBaseModelType | OpenAICompletionBaseModelType | OpenAITextEmbeddingModelType;
7
+ };
5
8
  /**
6
9
  * TikToken tokenizer for OpenAI language models.
7
10
  *
@@ -21,9 +24,7 @@ export declare class TikTokenTokenizer implements FullTokenizer {
21
24
  /**
22
25
  * Get a TikToken tokenizer for a specific model or encoding.
23
26
  */
24
- constructor(options: {
25
- model: OpenAIChatBaseModelType | OpenAICompletionBaseModelType | OpenAITextEmbeddingModelType;
26
- });
27
+ constructor(settings: TikTokenTokenizerSettings);
27
28
  private readonly tiktoken;
28
29
  tokenize(text: string): Promise<number[]>;
29
30
  tokenizeWithTexts(text: string): Promise<{