@ai-sdk/togetherai 0.0.0-1c33ba03-20260114162300

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright 2023 Vercel, Inc.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
package/README.md ADDED
@@ -0,0 +1,35 @@
1
+ # AI SDK - Together.ai Provider
2
+
3
+ The **[Together.ai provider](https://ai-sdk.dev/providers/ai-sdk-providers/togetherai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the [Together.ai](https://together.ai) platform.
4
+
5
+ ## Setup
6
+
7
+ The Together.ai provider is available in the `@ai-sdk/togetherai` module. You can install it with
8
+
9
+ ```bash
10
+ npm i @ai-sdk/togetherai
11
+ ```
12
+
13
+ ## Provider Instance
14
+
15
+ You can import the default provider instance `togetherai` from `@ai-sdk/togetherai`:
16
+
17
+ ```ts
18
+ import { togetherai } from '@ai-sdk/togetherai';
19
+ ```
20
+
21
+ ## Example
22
+
23
+ ```ts
24
+ import { togetherai } from '@ai-sdk/togetherai';
25
+ import { generateText } from 'ai';
26
+
27
+ const { text } = await generateText({
28
+ model: togetherai('meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo'),
29
+ prompt: 'Write a Python function that sorts a list:',
30
+ });
31
+ ```
32
+
33
+ ## Documentation
34
+
35
+ Please check out the **[Together.ai provider](https://ai-sdk.dev/providers/ai-sdk-providers/togetherai)** for more information.
@@ -0,0 +1,103 @@
1
+ export { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';
2
+ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, RerankingModelV3 } from '@ai-sdk/provider';
3
+ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
+ import { FetchFunction, InferSchema } from '@ai-sdk/provider-utils';
5
+
6
+ type TogetherAIRerankingModelId = 'Salesforce/Llama-Rank-v1' | 'mixedbread-ai/Mxbai-Rerank-Large-V2' | (string & {});
7
+ type TogetherAIRerankingOptions = {
8
+ /**
9
+ * List of keys in the JSON Object document to rank by.
10
+ * Defaults to use all supplied keys for ranking.
11
+ *
12
+ * @example ["title", "text"]
13
+ */
14
+ rankFields?: string[];
15
+ };
16
+
17
+ type TogetherAIChatModelId = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' | 'meta-llama/Llama-3-8b-chat-hf' | 'meta-llama/Llama-3-70b-chat-hf' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'microsoft/WizardLM-2-8x22B' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'databricks/dbrx-instruct' | 'deepseek-ai/deepseek-llm-67b-chat' | 'deepseek-ai/DeepSeek-V3' | 'google/gemma-2b-it' | 'Gryphe/MythoMax-L2-13b' | 'meta-llama/Llama-2-13b-chat-hf' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' | 'Qwen/Qwen2.5-7B-Instruct-Turbo' | 'Qwen/Qwen2.5-72B-Instruct-Turbo' | 'Qwen/Qwen2-72B-Instruct' | 'upstage/SOLAR-10.7B-Instruct-v1.0' | (string & {});
18
+
19
+ type TogetherAICompletionModelId = 'meta-llama/Llama-2-70b-hf' | 'mistralai/Mistral-7B-v0.1' | 'mistralai/Mixtral-8x7B-v0.1' | 'Meta-Llama/Llama-Guard-7b' | 'codellama/CodeLlama-34b-Instruct-hf' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | (string & {});
20
+
21
+ type TogetherAIEmbeddingModelId = 'togethercomputer/m2-bert-80M-2k-retrieval' | 'togethercomputer/m2-bert-80M-32k-retrieval' | 'togethercomputer/m2-bert-80M-8k-retrieval' | 'WhereIsAI/UAE-Large-V1' | 'BAAI/bge-large-en-v1.5' | 'BAAI/bge-base-en-v1.5' | 'sentence-transformers/msmarco-bert-base-dot-v5' | 'bert-base-uncased' | (string & {});
22
+
23
+ type TogetherAIImageModelId = 'stabilityai/stable-diffusion-xl-base-1.0' | 'black-forest-labs/FLUX.1-dev' | 'black-forest-labs/FLUX.1-dev-lora' | 'black-forest-labs/FLUX.1-schnell' | 'black-forest-labs/FLUX.1-canny' | 'black-forest-labs/FLUX.1-depth' | 'black-forest-labs/FLUX.1-redux' | 'black-forest-labs/FLUX.1.1-pro' | 'black-forest-labs/FLUX.1-pro' | 'black-forest-labs/FLUX.1-schnell-Free' | 'black-forest-labs/FLUX.1-kontext-pro' | 'black-forest-labs/FLUX.1-kontext-max' | 'black-forest-labs/FLUX.1-kontext-dev' | (string & {});
24
+
25
+ interface TogetherAIProviderSettings {
26
+ /**
27
+ TogetherAI API key.
28
+ */
29
+ apiKey?: string;
30
+ /**
31
+ Base URL for the API calls.
32
+ */
33
+ baseURL?: string;
34
+ /**
35
+ Custom headers to include in the requests.
36
+ */
37
+ headers?: Record<string, string>;
38
+ /**
39
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
40
+ or to provide a custom fetch implementation for e.g. testing.
41
+ */
42
+ fetch?: FetchFunction;
43
+ }
44
+ interface TogetherAIProvider extends ProviderV3 {
45
+ /**
46
+ Creates a model for text generation.
47
+ */
48
+ (modelId: TogetherAIChatModelId): LanguageModelV3;
49
+ /**
50
+ Creates a chat model for text generation.
51
+ */
52
+ chatModel(modelId: TogetherAIChatModelId): LanguageModelV3;
53
+ /**
54
+ Creates a chat model for text generation.
55
+ */
56
+ languageModel(modelId: TogetherAIChatModelId): LanguageModelV3;
57
+ /**
58
+ Creates a completion model for text generation.
59
+ */
60
+ completionModel(modelId: TogetherAICompletionModelId): LanguageModelV3;
61
+ /**
62
+ Creates a text embedding model for text generation.
63
+ */
64
+ embeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV3;
65
+ /**
66
+ * @deprecated Use `embeddingModel` instead.
67
+ */
68
+ textEmbeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV3;
69
+ /**
70
+ Creates a model for image generation.
71
+ */
72
+ image(modelId: TogetherAIImageModelId): ImageModelV3;
73
+ /**
74
+ Creates a model for image generation.
75
+ */
76
+ imageModel(modelId: TogetherAIImageModelId): ImageModelV3;
77
+ /**
78
+ * Creates a model for reranking.
79
+ */
80
+ reranking(modelId: TogetherAIRerankingModelId): RerankingModelV3;
81
+ /**
82
+ * Creates a model for reranking.
83
+ */
84
+ rerankingModel(modelId: TogetherAIRerankingModelId): RerankingModelV3;
85
+ }
86
+ declare function createTogetherAI(options?: TogetherAIProviderSettings): TogetherAIProvider;
87
+ declare const togetherai: TogetherAIProvider;
88
+
89
+ /**
90
+ * Provider options schema for Together AI image generation.
91
+ */
92
+ declare const togetheraiImageProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
93
+ [x: string]: unknown;
94
+ steps?: number | null | undefined;
95
+ guidance?: number | null | undefined;
96
+ negative_prompt?: string | null | undefined;
97
+ disable_safety_checker?: boolean | null | undefined;
98
+ }>;
99
+ type TogetherAIImageProviderOptions = InferSchema<typeof togetheraiImageProviderOptionsSchema>;
100
+
101
+ declare const VERSION: string;
102
+
103
+ export { type TogetherAIImageProviderOptions, type TogetherAIProvider, type TogetherAIProviderSettings, type TogetherAIRerankingOptions, VERSION, createTogetherAI, togetherai };
@@ -0,0 +1,103 @@
1
+ export { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';
2
+ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, RerankingModelV3 } from '@ai-sdk/provider';
3
+ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
+ import { FetchFunction, InferSchema } from '@ai-sdk/provider-utils';
5
+
6
+ type TogetherAIRerankingModelId = 'Salesforce/Llama-Rank-v1' | 'mixedbread-ai/Mxbai-Rerank-Large-V2' | (string & {});
7
+ type TogetherAIRerankingOptions = {
8
+ /**
9
+ * List of keys in the JSON Object document to rank by.
10
+ * Defaults to use all supplied keys for ranking.
11
+ *
12
+ * @example ["title", "text"]
13
+ */
14
+ rankFields?: string[];
15
+ };
16
+
17
+ type TogetherAIChatModelId = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' | 'meta-llama/Llama-3-8b-chat-hf' | 'meta-llama/Llama-3-70b-chat-hf' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'microsoft/WizardLM-2-8x22B' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'databricks/dbrx-instruct' | 'deepseek-ai/deepseek-llm-67b-chat' | 'deepseek-ai/DeepSeek-V3' | 'google/gemma-2b-it' | 'Gryphe/MythoMax-L2-13b' | 'meta-llama/Llama-2-13b-chat-hf' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' | 'Qwen/Qwen2.5-7B-Instruct-Turbo' | 'Qwen/Qwen2.5-72B-Instruct-Turbo' | 'Qwen/Qwen2-72B-Instruct' | 'upstage/SOLAR-10.7B-Instruct-v1.0' | (string & {});
18
+
19
+ type TogetherAICompletionModelId = 'meta-llama/Llama-2-70b-hf' | 'mistralai/Mistral-7B-v0.1' | 'mistralai/Mixtral-8x7B-v0.1' | 'Meta-Llama/Llama-Guard-7b' | 'codellama/CodeLlama-34b-Instruct-hf' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | (string & {});
20
+
21
+ type TogetherAIEmbeddingModelId = 'togethercomputer/m2-bert-80M-2k-retrieval' | 'togethercomputer/m2-bert-80M-32k-retrieval' | 'togethercomputer/m2-bert-80M-8k-retrieval' | 'WhereIsAI/UAE-Large-V1' | 'BAAI/bge-large-en-v1.5' | 'BAAI/bge-base-en-v1.5' | 'sentence-transformers/msmarco-bert-base-dot-v5' | 'bert-base-uncased' | (string & {});
22
+
23
+ type TogetherAIImageModelId = 'stabilityai/stable-diffusion-xl-base-1.0' | 'black-forest-labs/FLUX.1-dev' | 'black-forest-labs/FLUX.1-dev-lora' | 'black-forest-labs/FLUX.1-schnell' | 'black-forest-labs/FLUX.1-canny' | 'black-forest-labs/FLUX.1-depth' | 'black-forest-labs/FLUX.1-redux' | 'black-forest-labs/FLUX.1.1-pro' | 'black-forest-labs/FLUX.1-pro' | 'black-forest-labs/FLUX.1-schnell-Free' | 'black-forest-labs/FLUX.1-kontext-pro' | 'black-forest-labs/FLUX.1-kontext-max' | 'black-forest-labs/FLUX.1-kontext-dev' | (string & {});
24
+
25
+ interface TogetherAIProviderSettings {
26
+ /**
27
+ TogetherAI API key.
28
+ */
29
+ apiKey?: string;
30
+ /**
31
+ Base URL for the API calls.
32
+ */
33
+ baseURL?: string;
34
+ /**
35
+ Custom headers to include in the requests.
36
+ */
37
+ headers?: Record<string, string>;
38
+ /**
39
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
40
+ or to provide a custom fetch implementation for e.g. testing.
41
+ */
42
+ fetch?: FetchFunction;
43
+ }
44
+ interface TogetherAIProvider extends ProviderV3 {
45
+ /**
46
+ Creates a model for text generation.
47
+ */
48
+ (modelId: TogetherAIChatModelId): LanguageModelV3;
49
+ /**
50
+ Creates a chat model for text generation.
51
+ */
52
+ chatModel(modelId: TogetherAIChatModelId): LanguageModelV3;
53
+ /**
54
+ Creates a chat model for text generation.
55
+ */
56
+ languageModel(modelId: TogetherAIChatModelId): LanguageModelV3;
57
+ /**
58
+ Creates a completion model for text generation.
59
+ */
60
+ completionModel(modelId: TogetherAICompletionModelId): LanguageModelV3;
61
+ /**
62
+ Creates a text embedding model for text generation.
63
+ */
64
+ embeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV3;
65
+ /**
66
+ * @deprecated Use `embeddingModel` instead.
67
+ */
68
+ textEmbeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV3;
69
+ /**
70
+ Creates a model for image generation.
71
+ */
72
+ image(modelId: TogetherAIImageModelId): ImageModelV3;
73
+ /**
74
+ Creates a model for image generation.
75
+ */
76
+ imageModel(modelId: TogetherAIImageModelId): ImageModelV3;
77
+ /**
78
+ * Creates a model for reranking.
79
+ */
80
+ reranking(modelId: TogetherAIRerankingModelId): RerankingModelV3;
81
+ /**
82
+ * Creates a model for reranking.
83
+ */
84
+ rerankingModel(modelId: TogetherAIRerankingModelId): RerankingModelV3;
85
+ }
86
+ declare function createTogetherAI(options?: TogetherAIProviderSettings): TogetherAIProvider;
87
+ declare const togetherai: TogetherAIProvider;
88
+
89
+ /**
90
+ * Provider options schema for Together AI image generation.
91
+ */
92
+ declare const togetheraiImageProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
93
+ [x: string]: unknown;
94
+ steps?: number | null | undefined;
95
+ guidance?: number | null | undefined;
96
+ negative_prompt?: string | null | undefined;
97
+ disable_safety_checker?: boolean | null | undefined;
98
+ }>;
99
+ type TogetherAIImageProviderOptions = InferSchema<typeof togetheraiImageProviderOptionsSchema>;
100
+
101
+ declare const VERSION: string;
102
+
103
+ export { type TogetherAIImageProviderOptions, type TogetherAIProvider, type TogetherAIProviderSettings, type TogetherAIRerankingOptions, VERSION, createTogetherAI, togetherai };
package/dist/index.js ADDED
@@ -0,0 +1,342 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ VERSION: () => VERSION,
24
+ createTogetherAI: () => createTogetherAI,
25
+ togetherai: () => togetherai
26
+ });
27
+ module.exports = __toCommonJS(src_exports);
28
+
29
+ // src/togetherai-provider.ts
30
+ var import_openai_compatible = require("@ai-sdk/openai-compatible");
31
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
32
+
33
+ // src/reranking/togetherai-reranking-model.ts
34
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
+
36
+ // src/reranking/togetherai-reranking-api.ts
37
+ var import_provider_utils = require("@ai-sdk/provider-utils");
38
+ var import_v4 = require("zod/v4");
39
+ var togetheraiErrorSchema = (0, import_provider_utils.lazySchema)(
40
+ () => (0, import_provider_utils.zodSchema)(
41
+ import_v4.z.object({
42
+ error: import_v4.z.object({
43
+ message: import_v4.z.string()
44
+ })
45
+ })
46
+ )
47
+ );
48
+ var togetheraiRerankingResponseSchema = (0, import_provider_utils.lazySchema)(
49
+ () => (0, import_provider_utils.zodSchema)(
50
+ import_v4.z.object({
51
+ id: import_v4.z.string().nullish(),
52
+ model: import_v4.z.string().nullish(),
53
+ results: import_v4.z.array(
54
+ import_v4.z.object({
55
+ index: import_v4.z.number(),
56
+ relevance_score: import_v4.z.number()
57
+ })
58
+ ),
59
+ usage: import_v4.z.object({
60
+ prompt_tokens: import_v4.z.number(),
61
+ completion_tokens: import_v4.z.number(),
62
+ total_tokens: import_v4.z.number()
63
+ })
64
+ })
65
+ )
66
+ );
67
+
68
+ // src/reranking/togetherai-reranking-options.ts
69
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
70
+ var import_v42 = require("zod/v4");
71
+ var togetheraiRerankingOptionsSchema = (0, import_provider_utils2.lazySchema)(
72
+ () => (0, import_provider_utils2.zodSchema)(
73
+ import_v42.z.object({
74
+ rankFields: import_v42.z.array(import_v42.z.string()).optional()
75
+ })
76
+ )
77
+ );
78
+
79
+ // src/reranking/togetherai-reranking-model.ts
80
+ var TogetherAIRerankingModel = class {
81
+ constructor(modelId, config) {
82
+ this.specificationVersion = "v3";
83
+ this.modelId = modelId;
84
+ this.config = config;
85
+ }
86
+ get provider() {
87
+ return this.config.provider;
88
+ }
89
+ // see https://docs.together.ai/reference/rerank-1
90
+ async doRerank({
91
+ documents,
92
+ headers,
93
+ query,
94
+ topN,
95
+ abortSignal,
96
+ providerOptions
97
+ }) {
98
+ var _a, _b;
99
+ const rerankingOptions = await (0, import_provider_utils3.parseProviderOptions)({
100
+ provider: "togetherai",
101
+ providerOptions,
102
+ schema: togetheraiRerankingOptionsSchema
103
+ });
104
+ const {
105
+ responseHeaders,
106
+ value: response,
107
+ rawValue
108
+ } = await (0, import_provider_utils3.postJsonToApi)({
109
+ url: `${this.config.baseURL}/rerank`,
110
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), headers),
111
+ body: {
112
+ model: this.modelId,
113
+ documents: documents.values,
114
+ query,
115
+ top_n: topN,
116
+ rank_fields: rerankingOptions == null ? void 0 : rerankingOptions.rankFields,
117
+ return_documents: false
118
+ // reduce response size
119
+ },
120
+ failedResponseHandler: (0, import_provider_utils3.createJsonErrorResponseHandler)({
121
+ errorSchema: togetheraiErrorSchema,
122
+ errorToMessage: (data) => data.error.message
123
+ }),
124
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
125
+ togetheraiRerankingResponseSchema
126
+ ),
127
+ abortSignal,
128
+ fetch: this.config.fetch
129
+ });
130
+ return {
131
+ ranking: response.results.map((result) => ({
132
+ index: result.index,
133
+ relevanceScore: result.relevance_score
134
+ })),
135
+ response: {
136
+ id: (_a = response.id) != null ? _a : void 0,
137
+ modelId: (_b = response.model) != null ? _b : void 0,
138
+ headers: responseHeaders,
139
+ body: rawValue
140
+ }
141
+ };
142
+ }
143
+ };
144
+
145
+ // src/togetherai-image-model.ts
146
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
147
+ var import_v43 = require("zod/v4");
148
+ var TogetherAIImageModel = class {
149
+ constructor(modelId, config) {
150
+ this.modelId = modelId;
151
+ this.config = config;
152
+ this.specificationVersion = "v3";
153
+ this.maxImagesPerCall = 1;
154
+ }
155
+ get provider() {
156
+ return this.config.provider;
157
+ }
158
+ async doGenerate({
159
+ prompt,
160
+ n,
161
+ size,
162
+ seed,
163
+ providerOptions,
164
+ headers,
165
+ abortSignal,
166
+ files,
167
+ mask
168
+ }) {
169
+ var _a, _b, _c;
170
+ const warnings = [];
171
+ if (mask != null) {
172
+ throw new Error(
173
+ "Together AI does not support mask-based image editing. Use FLUX Kontext models (e.g., black-forest-labs/FLUX.1-kontext-pro) with a reference image and descriptive prompt instead."
174
+ );
175
+ }
176
+ if (size != null) {
177
+ warnings.push({
178
+ type: "unsupported",
179
+ feature: "aspectRatio",
180
+ details: "This model does not support the `aspectRatio` option. Use `size` instead."
181
+ });
182
+ }
183
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
184
+ const togetheraiOptions = await (0, import_provider_utils4.parseProviderOptions)({
185
+ provider: "togetherai",
186
+ providerOptions,
187
+ schema: togetheraiImageProviderOptionsSchema
188
+ });
189
+ let imageUrl;
190
+ if (files != null && files.length > 0) {
191
+ imageUrl = (0, import_provider_utils4.convertImageModelFileToDataUri)(files[0]);
192
+ if (files.length > 1) {
193
+ warnings.push({
194
+ type: "other",
195
+ message: "Together AI only supports a single input image. Additional images are ignored."
196
+ });
197
+ }
198
+ }
199
+ const splitSize = size == null ? void 0 : size.split("x");
200
+ const { value: response, responseHeaders } = await (0, import_provider_utils4.postJsonToApi)({
201
+ url: `${this.config.baseURL}/images/generations`,
202
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
203
+ body: {
204
+ model: this.modelId,
205
+ prompt,
206
+ seed,
207
+ n,
208
+ ...splitSize && {
209
+ width: parseInt(splitSize[0]),
210
+ height: parseInt(splitSize[1])
211
+ },
212
+ ...imageUrl != null ? { image_url: imageUrl } : {},
213
+ response_format: "base64",
214
+ ...togetheraiOptions != null ? togetheraiOptions : {}
215
+ },
216
+ failedResponseHandler: (0, import_provider_utils4.createJsonErrorResponseHandler)({
217
+ errorSchema: togetheraiErrorSchema2,
218
+ errorToMessage: (data) => data.error.message
219
+ }),
220
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
221
+ togetheraiImageResponseSchema
222
+ ),
223
+ abortSignal,
224
+ fetch: this.config.fetch
225
+ });
226
+ return {
227
+ images: response.data.map((item) => item.b64_json),
228
+ warnings,
229
+ response: {
230
+ timestamp: currentDate,
231
+ modelId: this.modelId,
232
+ headers: responseHeaders
233
+ }
234
+ };
235
+ }
236
+ };
237
+ var togetheraiImageResponseSchema = import_v43.z.object({
238
+ data: import_v43.z.array(
239
+ import_v43.z.object({
240
+ b64_json: import_v43.z.string()
241
+ })
242
+ )
243
+ });
244
+ var togetheraiErrorSchema2 = import_v43.z.object({
245
+ error: import_v43.z.object({
246
+ message: import_v43.z.string()
247
+ })
248
+ });
249
+ var togetheraiImageProviderOptionsSchema = (0, import_provider_utils4.lazySchema)(
250
+ () => (0, import_provider_utils4.zodSchema)(
251
+ import_v43.z.object({
252
+ /**
253
+ * Number of generation steps. Higher values can improve quality.
254
+ */
255
+ steps: import_v43.z.number().nullish(),
256
+ /**
257
+ * Guidance scale for image generation.
258
+ */
259
+ guidance: import_v43.z.number().nullish(),
260
+ /**
261
+ * Negative prompt to guide what to avoid.
262
+ */
263
+ negative_prompt: import_v43.z.string().nullish(),
264
+ /**
265
+ * Disable the safety checker for image generation.
266
+ * When true, the API will not reject images flagged as potentially NSFW.
267
+ * Not available for Flux Schnell Free and Flux Pro models.
268
+ */
269
+ disable_safety_checker: import_v43.z.boolean().nullish()
270
+ }).passthrough()
271
+ )
272
+ );
273
+
274
+ // src/version.ts
275
+ var VERSION = true ? "0.0.0-1c33ba03-20260114162300" : "0.0.0-test";
276
+
277
+ // src/togetherai-provider.ts
278
+ function createTogetherAI(options = {}) {
279
+ var _a;
280
+ const baseURL = (0, import_provider_utils5.withoutTrailingSlash)(
281
+ (_a = options.baseURL) != null ? _a : "https://api.together.xyz/v1/"
282
+ );
283
+ const getHeaders = () => (0, import_provider_utils5.withUserAgentSuffix)(
284
+ {
285
+ Authorization: `Bearer ${(0, import_provider_utils5.loadApiKey)({
286
+ apiKey: options.apiKey,
287
+ environmentVariableName: "TOGETHER_AI_API_KEY",
288
+ description: "TogetherAI"
289
+ })}`,
290
+ ...options.headers
291
+ },
292
+ `ai-sdk/togetherai/${VERSION}`
293
+ );
294
+ const getCommonModelConfig = (modelType) => ({
295
+ provider: `togetherai.${modelType}`,
296
+ url: ({ path }) => `${baseURL}${path}`,
297
+ headers: getHeaders,
298
+ fetch: options.fetch
299
+ });
300
+ const createChatModel = (modelId) => {
301
+ return new import_openai_compatible.OpenAICompatibleChatLanguageModel(
302
+ modelId,
303
+ getCommonModelConfig("chat")
304
+ );
305
+ };
306
+ const createCompletionModel = (modelId) => new import_openai_compatible.OpenAICompatibleCompletionLanguageModel(
307
+ modelId,
308
+ getCommonModelConfig("completion")
309
+ );
310
+ const createEmbeddingModel = (modelId) => new import_openai_compatible.OpenAICompatibleEmbeddingModel(
311
+ modelId,
312
+ getCommonModelConfig("embedding")
313
+ );
314
+ const createImageModel = (modelId) => new TogetherAIImageModel(modelId, {
315
+ ...getCommonModelConfig("image"),
316
+ baseURL: baseURL != null ? baseURL : "https://api.together.xyz/v1/"
317
+ });
318
+ const createRerankingModel = (modelId) => new TogetherAIRerankingModel(modelId, {
319
+ ...getCommonModelConfig("reranking"),
320
+ baseURL: baseURL != null ? baseURL : "https://api.together.xyz/v1/"
321
+ });
322
+ const provider = (modelId) => createChatModel(modelId);
323
+ provider.specificationVersion = "v3";
324
+ provider.completionModel = createCompletionModel;
325
+ provider.languageModel = createChatModel;
326
+ provider.chatModel = createChatModel;
327
+ provider.embeddingModel = createEmbeddingModel;
328
+ provider.textEmbeddingModel = createEmbeddingModel;
329
+ provider.image = createImageModel;
330
+ provider.imageModel = createImageModel;
331
+ provider.reranking = createRerankingModel;
332
+ provider.rerankingModel = createRerankingModel;
333
+ return provider;
334
+ }
335
+ var togetherai = createTogetherAI();
336
+ // Annotate the CommonJS export names for ESM import in node:
337
+ 0 && (module.exports = {
338
+ VERSION,
339
+ createTogetherAI,
340
+ togetherai
341
+ });
342
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/togetherai-provider.ts","../src/reranking/togetherai-reranking-model.ts","../src/reranking/togetherai-reranking-api.ts","../src/reranking/togetherai-reranking-options.ts","../src/togetherai-image-model.ts","../src/version.ts"],"sourcesContent":["export type { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';\nexport type { TogetherAIRerankingOptions } from './reranking/togetherai-reranking-options';\nexport { createTogetherAI, togetherai } from './togetherai-provider';\nexport type {\n TogetherAIProvider,\n TogetherAIProviderSettings,\n} from './togetherai-provider';\nexport type { TogetherAIImageProviderOptions } from './togetherai-image-model';\nexport { VERSION } from './version';\n","import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV3,\n ImageModelV3,\n LanguageModelV3,\n ProviderV3,\n RerankingModelV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIRerankingModel } from './reranking/togetherai-reranking-model';\nimport { TogetherAIRerankingModelId } from './reranking/togetherai-reranking-options';\nimport { TogetherAIChatModelId } from './togetherai-chat-options';\nimport { TogetherAICompletionModelId } from './togetherai-completion-options';\nimport { TogetherAIEmbeddingModelId } from './togetherai-embedding-options';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\nimport { VERSION } from './version';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV3 {\n /**\nCreates a model for text generation.\n*/\n (modelId: TogetherAIChatModelId): LanguageModelV3;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId: TogetherAIChatModelId): LanguageModelV3;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(modelId: TogetherAIChatModelId): LanguageModelV3;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(modelId: TogetherAICompletionModelId): LanguageModelV3;\n\n /**\nCreates a text embedding model for text generation.\n*/\n embeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\n * @deprecated Use `embeddingModel` instead.\n */\n textEmbeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV3;\n\n /**\nCreates a model for image generation.\n*/\n image(modelId: TogetherAIImageModelId): ImageModelV3;\n\n /**\nCreates a model for image generation.\n*/\n imageModel(modelId: TogetherAIImageModelId): ImageModelV3;\n\n /**\n * Creates a model for reranking.\n */\n reranking(modelId: TogetherAIRerankingModelId): RerankingModelV3;\n\n /**\n * Creates a model for reranking.\n */\n rerankingModel(modelId: TogetherAIRerankingModelId): RerankingModelV3;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n },\n `ai-sdk/togetherai/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId: TogetherAIChatModelId) => {\n return new OpenAICompatibleChatLanguageModel(\n modelId,\n getCommonModelConfig('chat'),\n );\n };\n\n const createCompletionModel = (modelId: TogetherAICompletionModelId) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n getCommonModelConfig('completion'),\n );\n\n const createEmbeddingModel = (modelId: TogetherAIEmbeddingModelId) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (modelId: TogetherAIImageModelId) =>\n new TogetherAIImageModel(modelId, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const createRerankingModel = (modelId: TogetherAIRerankingModelId) =>\n new TogetherAIRerankingModel(modelId, {\n ...getCommonModelConfig('reranking'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (modelId: TogetherAIChatModelId) => createChatModel(modelId);\n\n provider.specificationVersion = 'v3' as const;\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.embeddingModel = createEmbeddingModel;\n provider.textEmbeddingModel = createEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n provider.reranking = createRerankingModel;\n provider.rerankingModel = createRerankingModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { RerankingModelV3 } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonErrorResponseHandler,\n createJsonResponseHandler,\n FetchFunction,\n parseProviderOptions,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport {\n togetheraiErrorSchema,\n TogetherAIRerankingInput,\n togetheraiRerankingResponseSchema,\n} from './togetherai-reranking-api';\nimport {\n TogetherAIRerankingModelId,\n togetheraiRerankingOptionsSchema,\n} from './togetherai-reranking-options';\n\ntype TogetherAIRerankingConfig = {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string | undefined>;\n fetch?: FetchFunction;\n};\n\nexport class TogetherAIRerankingModel implements RerankingModelV3 {\n readonly specificationVersion = 'v3';\n readonly modelId: TogetherAIRerankingModelId;\n\n private readonly config: TogetherAIRerankingConfig;\n\n constructor(\n modelId: TogetherAIRerankingModelId,\n config: TogetherAIRerankingConfig,\n ) {\n this.modelId = modelId;\n this.config = config;\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n // see https://docs.together.ai/reference/rerank-1\n async doRerank({\n documents,\n headers,\n query,\n topN,\n abortSignal,\n providerOptions,\n }: Parameters<RerankingModelV3['doRerank']>[0]): Promise<\n Awaited<ReturnType<RerankingModelV3['doRerank']>>\n > {\n const rerankingOptions = await parseProviderOptions({\n provider: 'togetherai',\n providerOptions,\n schema: togetheraiRerankingOptionsSchema,\n });\n\n const {\n responseHeaders,\n value: response,\n rawValue,\n } = await postJsonToApi({\n url: `${this.config.baseURL}/rerank`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n documents: documents.values,\n query,\n top_n: topN,\n rank_fields: rerankingOptions?.rankFields,\n return_documents: false, // reduce response size\n } satisfies TogetherAIRerankingInput,\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiRerankingResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n ranking: response.results.map(result => ({\n index: result.index,\n relevanceScore: result.relevance_score,\n })),\n response: {\n id: response.id ?? undefined,\n modelId: response.model ?? undefined,\n headers: responseHeaders,\n body: rawValue,\n },\n };\n }\n}\n","import { JSONObject } from '@ai-sdk/provider';\nimport { lazySchema, zodSchema } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\n// https://docs.together.ai/reference/rerank-1\nexport type TogetherAIRerankingInput = {\n model: string;\n query: string;\n documents: JSONObject[] | string[];\n top_n: number | undefined;\n return_documents: boolean | undefined;\n rank_fields: string[] | undefined;\n};\n\nexport const togetheraiErrorSchema = lazySchema(() =>\n zodSchema(\n z.object({\n error: z.object({\n message: z.string(),\n }),\n }),\n ),\n);\n\nexport const togetheraiRerankingResponseSchema = lazySchema(() =>\n zodSchema(\n z.object({\n id: z.string().nullish(),\n model: z.string().nullish(),\n results: z.array(\n z.object({\n index: z.number(),\n relevance_score: z.number(),\n }),\n ),\n usage: z.object({\n prompt_tokens: z.number(),\n completion_tokens: z.number(),\n total_tokens: z.number(),\n }),\n }),\n ),\n);\n","import { FlexibleSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\n\n// see https://docs.together.ai/docs/serverless-models#rerank-models\nexport type TogetherAIRerankingModelId =\n | 'Salesforce/Llama-Rank-v1'\n | 'mixedbread-ai/Mxbai-Rerank-Large-V2'\n | (string & {});\n\nexport type TogetherAIRerankingOptions = {\n /**\n * List of keys in the JSON Object document to rank by.\n * Defaults to use all supplied keys for ranking.\n *\n * @example [\"title\", \"text\"]\n */\n rankFields?: string[];\n};\n\nexport const togetheraiRerankingOptionsSchema: FlexibleSchema<TogetherAIRerankingOptions> =\n lazySchema(() =>\n zodSchema(\n z.object({\n rankFields: z.array(z.string()).optional(),\n }),\n ),\n );\n","import { ImageModelV3, SharedV3Warning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n convertImageModelFileToDataUri,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n InferSchema,\n lazySchema,\n parseProviderOptions,\n postJsonToApi,\n zodSchema,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\nimport { z } from 'zod/v4';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV3 {\n readonly specificationVersion = 'v3';\n readonly maxImagesPerCall = 1;\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n files,\n mask,\n }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV3['doGenerate']>>\n > {\n const warnings: Array<SharedV3Warning> = [];\n\n if (mask != null) {\n throw new Error(\n 'Together AI does not support mask-based image editing. ' +\n 'Use FLUX Kontext models (e.g., black-forest-labs/FLUX.1-kontext-pro) ' +\n 'with a reference image and descriptive prompt instead.',\n );\n }\n\n if (size != null) {\n warnings.push({\n type: 'unsupported',\n feature: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n\n const togetheraiOptions = await parseProviderOptions({\n provider: 'togetherai',\n providerOptions,\n schema: togetheraiImageProviderOptionsSchema,\n });\n\n // Handle image input from files\n let imageUrl: string | undefined;\n if (files != null && files.length > 0) {\n imageUrl = convertImageModelFileToDataUri(files[0]);\n\n if (files.length > 1) {\n warnings.push({\n type: 'other',\n message:\n 'Together AI only supports a single input image. Additional images are ignored.',\n });\n }\n }\n\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n ...(imageUrl != null ? { image_url: imageUrl } : {}),\n response_format: 'base64',\n ...(togetheraiOptions ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n\n/**\n * Provider options schema for Together AI image generation.\n */\nexport const togetheraiImageProviderOptionsSchema = lazySchema(() =>\n zodSchema(\n z\n .object({\n /**\n * Number of generation steps. Higher values can improve quality.\n */\n steps: z.number().nullish(),\n\n /**\n * Guidance scale for image generation.\n */\n guidance: z.number().nullish(),\n\n /**\n * Negative prompt to guide what to avoid.\n */\n negative_prompt: z.string().nullish(),\n\n /**\n * Disable the safety checker for image generation.\n * When true, the API will not reject images flagged as potentially NSFW.\n * Not available for Flux Schnell Free and Flux Pro models.\n */\n disable_safety_checker: z.boolean().nullish(),\n })\n .passthrough(),\n ),\n);\n\nexport type TogetherAIImageProviderOptions = InferSchema<\n typeof togetheraiImageProviderOptionsSchema\n>;\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,+BAIO;AAQP,IAAAA,yBAKO;;;AChBP,IAAAC,yBAOO;;;ACPP,4BAAsC;AACtC,gBAAkB;AAYX,IAAM,4BAAwB;AAAA,EAAW,UAC9C;AAAA,IACE,YAAE,OAAO;AAAA,MACP,OAAO,YAAE,OAAO;AAAA,QACd,SAAS,YAAE,OAAO;AAAA,MACpB,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AACF;AAEO,IAAM,wCAAoC;AAAA,EAAW,UAC1D;AAAA,IACE,YAAE,OAAO;AAAA,MACP,IAAI,YAAE,OAAO,EAAE,QAAQ;AAAA,MACvB,OAAO,YAAE,OAAO,EAAE,QAAQ;AAAA,MAC1B,SAAS,YAAE;AAAA,QACT,YAAE,OAAO;AAAA,UACP,OAAO,YAAE,OAAO;AAAA,UAChB,iBAAiB,YAAE,OAAO;AAAA,QAC5B,CAAC;AAAA,MACH;AAAA,MACA,OAAO,YAAE,OAAO;AAAA,QACd,eAAe,YAAE,OAAO;AAAA,QACxB,mBAAmB,YAAE,OAAO;AAAA,QAC5B,cAAc,YAAE,OAAO;AAAA,MACzB,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AACF;;;AC1CA,IAAAC,yBAAsD;AACtD,IAAAC,aAAkB;AAkBX,IAAM,uCACX;AAAA,EAAW,UACT;AAAA,IACE,aAAE,OAAO;AAAA,MACP,YAAY,aAAE,MAAM,aAAE,OAAO,CAAC,EAAE,SAAS;AAAA,IAC3C,CAAC;AAAA,EACH;AACF;;;AFAK,IAAM,2BAAN,MAA2D;AAAA,EAMhE,YACE,SACA,QACA;AARF,SAAS,uBAAuB;AAS9B,SAAK,UAAU;AACf,SAAK,SAAS;AAAA,EAChB;AAAA,EAEA,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA;AAAA,EAGA,MAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AAtDJ;AAuDI,UAAM,mBAAmB,UAAM,6CAAqB;AAAA,MAClD,UAAU;AAAA,MACV;AAAA,MACA,QAAQ;AAAA,IACV,CAAC;AAED,UAAM;AAAA,MACJ;AAAA,MACA,OAAO;AAAA,MACP;AAAA,IACF,IAAI,UAAM,sCAAc;AAAA,MACtB,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,aAAS,uCAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ,WAAW,UAAU;AAAA,QACrB;AAAA,QACA,OAAO;AAAA,QACP,aAAa,qDAAkB;AAAA,QAC/B,kBAAkB;AAAA;AAAA,MACpB;AAAA,MACA,2BAAuB,uDAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,+BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,SAAS,SAAS,QAAQ,IAAI,aAAW;AAAA,QACvC,OAAO,OAAO;AAAA,QACd,gBAAgB,OAAO;AAAA,MACzB,EAAE;AAAA,MACF,UAAU;AAAA,QACR,KAAI,cAAS,OAAT,YAAe;AAAA,QACnB,UAAS,cAAS,UAAT,YAAkB;AAAA,QAC3B,SAAS;AAAA,QACT,MAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACF;;;AGnGA,IAAAC,yBAWO;AAEP,IAAAC,aAAkB;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAQxD,YACW,SACD,QACR;AAFS;AACD;AATV,SAAS,uBAAuB;AAChC,SAAS,mBAAmB;AAAA,EASzB;AAAA,EAPH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAOA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AAnDJ;AAoDI,UAAM,WAAmC,CAAC;AAE1C,QAAI,QAAQ,MAAM;AAChB,YAAM,IAAI;AAAA,QACR;AAAA,MAGF;AAAA,IACF;AAEA,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AAEvE,UAAM,oBAAoB,UAAM,6CAAqB;AAAA,MACnD,UAAU;AAAA,MACV;AAAA,MACA,QAAQ;AAAA,IACV,CAAC;AAGD,QAAI;AACJ,QAAI,SAAS,QAAQ,MAAM,SAAS,GAAG;AACrC,qBAAW,uDAA+B,MAAM,CAAC,CAAC;AAElD,UAAI,MAAM,SAAS,GAAG;AACpB,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,SACE;AAAA,QACJ,CAAC;AAAA,MACH;AAAA,IACF;AAEA,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,UAAM,sCAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,aAAS,uCAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,GAAI,YAAY,OAAO,EAAE,WAAW,SAAS,IAAI,CAAC;AAAA,QAClD,iBAAiB;AAAA,QACjB,GAAI,gDAAqB,CAAC;AAAA,MAC5B;AAAA,MACA,2BAAuB,uDAA+B;AAAA,QACpD,aAAaC;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,+BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,aAAE,OAAO;AAAA,EAC7C,MAAM,aAAE;AAAA,IACN,aAAE,OAAO;AAAA,MACP,UAAU,aAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAMA,yBAAwB,aAAE,OAAO;AAAA,EACrC,OAAO,aAAE,OAAO;AAAA,IACd,SAAS,aAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;AAKM,IAAM,2CAAuC;AAAA,EAAW,UAC7D;AAAA,IACE,aACG,OAAO;AAAA;AAAA;AAAA;AAAA,MAIN,OAAO,aAAE,OAAO,EAAE,QAAQ;AAAA;AAAA;AAAA;AAAA,MAK1B,UAAU,aAAE,OAAO,EAAE,QAAQ;AAAA;AAAA;AAAA;AAAA,MAK7B,iBAAiB,aAAE,OAAO,EAAE,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAOpC,wBAAwB,aAAE,QAAQ,EAAE,QAAQ;AAAA,IAC9C,CAAC,EACA,YAAY;AAAA,EACjB;AACF;;;ACrLO,IAAM,UACX,OACI,kCACA;;;AL8FC,SAAS,iBACd,UAAsC,CAAC,GACnB;AArGtB;AAsGE,QAAM,cAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,UACjB;AAAA,IACE;AAAA,MACE,eAAe,cAAU,mCAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,qBAAqB,OAAO;AAAA,EAC9B;AASF,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAmC;AAC1D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,qBAAqB,MAAM;AAAA,IAC7B;AAAA,EACF;AAEA,QAAM,wBAAwB,CAAC,YAC7B,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,uBAAuB,CAAC,YAC5B,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CAAC,YACxB,IAAI,qBAAqB,SAAS;AAAA,IAChC,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,uBAAuB,CAAC,YAC5B,IAAI,yBAAyB,SAAS;AAAA,IACpC,GAAG,qBAAqB,WAAW;AAAA,IACnC,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CAAC,YAAmC,gBAAgB,OAAO;AAE5E,WAAS,uBAAuB;AAChC,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,iBAAiB;AAC1B,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AACtB,WAAS,YAAY;AACrB,WAAS,iBAAiB;AAE1B,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":["import_provider_utils","import_provider_utils","import_provider_utils","import_v4","import_provider_utils","import_v4","togetheraiErrorSchema"]}