open-model-selector 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,304 @@
1
+ /** Union of all supported model type identifiers. */
2
+ type ModelType = 'text' | 'image' | 'video' | 'inpaint' | 'embedding' | 'tts' | 'asr' | 'upscale';
3
+ interface Deprecation {
4
+ /** ISO 8601 date string indicating when the model is/was deprecated */
5
+ date: string;
6
+ }
7
+ interface BaseModel {
8
+ id: string;
9
+ name: string;
10
+ provider: string;
11
+ created: number;
12
+ type: ModelType;
13
+ description?: string;
14
+ privacy?: 'private' | 'anonymized';
15
+ offline?: boolean;
16
+ betaModel?: boolean;
17
+ /** Whether the user has marked this model as a favorite.
18
+ * CLIENT-SIDE OVERLAY — this field is NEVER returned by any API.
19
+ * It is a UI-layer concern managed via localStorage in the ModelSelector component.
20
+ * Normalizers MUST default this to `false`; the component hydrates the real value
21
+ * from localStorage at render time. */
22
+ is_favorite?: boolean;
23
+ modelSource?: string;
24
+ traits?: string[];
25
+ /** Deprecation info if this model is scheduled for or has been deprecated. */
26
+ deprecation?: Deprecation;
27
+ }
28
+
29
+ interface TextPricing {
30
+ prompt?: number;
31
+ completion?: number;
32
+ cache_input?: number;
33
+ cache_write?: number;
34
+ }
35
+ /** Feature flags indicating what a text model supports (vision, reasoning, function calling, etc.). */
36
+ interface TextCapabilities {
37
+ optimizedForCode?: boolean;
38
+ supportsVision?: boolean;
39
+ supportsReasoning?: boolean;
40
+ supportsFunctionCalling?: boolean;
41
+ supportsResponseSchema?: boolean;
42
+ supportsLogProbs?: boolean;
43
+ supportsAudioInput?: boolean;
44
+ supportsVideoInput?: boolean;
45
+ supportsWebSearch?: boolean;
46
+ quantization?: string;
47
+ }
48
+ /** Default sampling parameter values for a text model. */
49
+ interface TextConstraints {
50
+ temperature?: {
51
+ default: number;
52
+ };
53
+ top_p?: {
54
+ default: number;
55
+ };
56
+ }
57
+ /** A text/chat completion model with context length, pricing, and optional capabilities. */
58
+ interface TextModel extends BaseModel {
59
+ type: 'text';
60
+ context_length: number;
61
+ pricing: TextPricing;
62
+ capabilities?: TextCapabilities;
63
+ constraints?: TextConstraints;
64
+ }
65
+
66
+ interface ImagePricing {
67
+ generation?: number;
68
+ upscale_2x?: number;
69
+ upscale_4x?: number;
70
+ resolutions?: Record<string, number>;
71
+ }
72
+ /** Generation constraints for an image model (prompt limits, aspect ratios, resolutions, etc.). */
73
+ interface ImageConstraints {
74
+ promptCharacterLimit?: number;
75
+ steps?: {
76
+ default: number;
77
+ max: number;
78
+ };
79
+ widthHeightDivisor?: number;
80
+ aspectRatios?: string[];
81
+ defaultAspectRatio?: string;
82
+ resolutions?: string[];
83
+ defaultResolution?: string;
84
+ }
85
+ interface ImageModel extends BaseModel {
86
+ type: 'image';
87
+ pricing: ImagePricing;
88
+ constraints?: ImageConstraints;
89
+ /** Whether web search is supported for this image model.
90
+ * In the Venice API, this field lives at `model_spec.supportsWebSearch`
91
+ * (a sibling of `model_spec.constraints`), NOT inside `model_spec.constraints`. */
92
+ supportsWebSearch?: boolean;
93
+ }
94
+
95
+ /** Generation constraints for a video model (aspect ratios, durations, audio support, etc.). */
96
+ interface VideoConstraints {
97
+ model_type?: 'text-to-video' | 'image-to-video' | 'video';
98
+ aspect_ratios?: string[];
99
+ resolutions?: string[];
100
+ durations?: string[];
101
+ audio?: boolean;
102
+ audio_configurable?: boolean;
103
+ audio_input?: boolean;
104
+ video_input?: boolean;
105
+ }
106
+ /** A video generation model. Note: no pricing data — the Venice API omits it for video models. */
107
+ interface VideoModel extends BaseModel {
108
+ type: 'video';
109
+ constraints?: VideoConstraints;
110
+ model_sets?: string[];
111
+ }
112
+
113
+ interface InpaintPricing {
114
+ generation?: number;
115
+ }
116
+ /** Generation constraints for an inpainting model (aspect ratios, image combining). */
117
+ interface InpaintConstraints {
118
+ aspectRatios?: string[];
119
+ combineImages?: boolean;
120
+ }
121
+ /** An image inpainting/editing model with pricing and generation constraints. */
122
+ interface InpaintModel extends BaseModel {
123
+ type: 'inpaint';
124
+ pricing: InpaintPricing;
125
+ constraints?: InpaintConstraints;
126
+ }
127
+
128
+ interface EmbeddingPricing {
129
+ input?: number;
130
+ output?: number;
131
+ }
132
+ /** A text embedding model with per-token pricing. */
133
+ interface EmbeddingModel extends BaseModel {
134
+ type: 'embedding';
135
+ pricing: EmbeddingPricing;
136
+ }
137
+
138
+ interface TtsPricing {
139
+ input?: number;
140
+ }
141
+ /** A text-to-speech model with pricing and available voice options. */
142
+ interface TtsModel extends BaseModel {
143
+ type: 'tts';
144
+ pricing: TtsPricing;
145
+ voices?: string[];
146
+ }
147
+
148
+ interface AsrPricing {
149
+ per_audio_second?: number;
150
+ }
151
+ /** An automatic speech recognition (speech-to-text) model with per-second pricing. */
152
+ interface AsrModel extends BaseModel {
153
+ type: 'asr';
154
+ pricing: AsrPricing;
155
+ }
156
+
157
+ interface UpscalePricing {
158
+ generation?: number;
159
+ }
160
+ /** An image upscaling model with flat per-generation pricing. */
161
+ interface UpscaleModel extends BaseModel {
162
+ type: 'upscale';
163
+ pricing: UpscalePricing;
164
+ }
165
+
166
+ /** Discriminated union of all supported model types. Use `model.type` to narrow. */
167
+ type AnyModel = TextModel | ImageModel | VideoModel | InpaintModel | EmbeddingModel | TtsModel | AsrModel | UpscaleModel;
168
+
169
+ /** Safely coerce an unknown value to a number. Returns undefined for non-numeric/NaN. */
170
+ declare function toNum(v: unknown): number | undefined;
171
+ /** Extract shared BaseModel fields from a raw API response object.
172
+ * Venice nests most metadata under model_spec; other providers use top-level fields.
173
+ * This helper checks both locations with model_spec taking priority for Venice-specific fields. */
174
+ declare function extractBaseFields(raw: Record<string, unknown>): Omit<BaseModel, 'type'>;
175
+
176
+ /** Known model ID patterns for heuristic type inference.
177
+ * Used when the API response lacks an explicit `type` field (non-Venice providers).
178
+ * Exported so consumers can inspect or extend. */
179
+ declare const MODEL_ID_TYPE_PATTERNS: Array<[RegExp, ModelType]>;
180
+ /** Infer model type from its ID using naming conventions.
181
+ * Returns undefined if no pattern matches (caller should fall back to 'text'). */
182
+ declare function inferTypeFromId(id: string): ModelType | undefined;
183
+
184
+ /** Normalize a raw API response object into a TextModel. */
185
+ declare function normalizeTextModel(raw: Record<string, unknown>): TextModel;
186
+
187
+ /** Normalize a raw API response object into an ImageModel. */
188
+ declare function normalizeImageModel(raw: Record<string, unknown>): ImageModel;
189
+
190
+ /** Normalize a raw API response object into a VideoModel.
191
+ * Note: Video models have NO pricing data from the Venice API. */
192
+ declare function normalizeVideoModel(raw: Record<string, unknown>): VideoModel;
193
+
194
+ /** Normalize a raw API response object into an InpaintModel. */
195
+ declare function normalizeInpaintModel(raw: Record<string, unknown>): InpaintModel;
196
+
197
+ /** Normalize a raw API response object into an EmbeddingModel.
198
+ * Note: Venice embedding models have BOTH input and output pricing. */
199
+ declare function normalizeEmbeddingModel(raw: Record<string, unknown>): EmbeddingModel;
200
+
201
+ /** Normalize a raw API response object into a TtsModel. */
202
+ declare function normalizeTtsModel(raw: Record<string, unknown>): TtsModel;
203
+
204
+ /** Normalize a raw API response object into an AsrModel. */
205
+ declare function normalizeAsrModel(raw: Record<string, unknown>): AsrModel;
206
+
207
+ /** Normalize a raw API response object into an UpscaleModel. */
208
+ declare function normalizeUpscaleModel(raw: Record<string, unknown>): UpscaleModel;
209
+
210
+ /** Function that extracts the array of raw model objects from an API response body. */
211
+ type ResponseExtractor = (body: Record<string, unknown> | unknown[]) => Record<string, unknown>[];
212
+ /** Function that normalizes a single raw model object into an AnyModel. */
213
+ type ModelNormalizer = (raw: Record<string, unknown>) => AnyModel;
214
+ /** Default response extractor handling common API response shapes:
215
+ * - Top-level array → return as-is
216
+ * - `{ data: [...] }` (OpenAI standard) → return data
217
+ * - `{ models: [...] }` → return models
218
+ * - Fallback → empty array */
219
+ declare function defaultResponseExtractor(body: Record<string, unknown> | unknown[]): Record<string, unknown>[];
220
+ /** Default dispatching model normalizer.
221
+ * Uses three-tier type resolution:
222
+ * 1. Explicit `raw.type` field (Venice, custom providers)
223
+ * 2. Heuristic inference from model ID patterns (OpenAI, OpenRouter, etc.)
224
+ * 3. Fallback to 'text' — safe default for most providers */
225
+ declare function defaultModelNormalizer(raw: Record<string, unknown>): AnyModel;
226
+
227
+ /** Check whether a deprecation date is in the past.
228
+ * Date-only ISO 8601 strings ("2025-01-15") are parsed as UTC midnight per the
229
+ * ES spec, but we normalize explicitly to avoid any cross-engine ambiguity.
230
+ * Returns false for invalid / unparseable date strings. */
231
+ declare function isDeprecated(dateStr: string): boolean;
232
+
233
+ /**
234
+ * Formats a per-token price into a human-readable per-million-tokens string.
235
+ *
236
+ * Accepts the price as a per-token value (matching the OpenAI / OpenRouter
237
+ * convention) and multiplies by 1,000,000 to display the per-million rate.
238
+ *
239
+ * @param value - Price per token as a number or numeric string.
240
+ * Accepts `undefined`, `null`, empty string, or `NaN` — all return `"—"`.
241
+ * @returns A formatted dollar string (e.g. `"$30.00"`).
242
+ * - Returns `"—"` for missing, empty, or non-numeric input.
243
+ * - Uses 2 decimal places when the per-million value is ≥ $0.01.
244
+ * - Uses 6 decimal places when the per-million value is < $0.01
245
+ * (to preserve precision for very cheap models).
246
+ *
247
+ * @example
248
+ * ```ts
249
+ * formatPrice(0.00003) // "$30.00" (per-token → per-million)
250
+ * formatPrice("0.000015") // "$15.00"
251
+ * formatPrice(1e-12) // "$0.000001"
252
+ * formatPrice(undefined) // "—"
253
+ * ```
254
+ *
255
+ */
256
+ declare function formatPrice(value: string | number | undefined | null): string;
257
+ /**
258
+ * Formats a token count into a compact human-readable string.
259
+ *
260
+ * @param tokens - Raw token count (e.g. `128000`, `1_000_000`).
261
+ * @returns A compact string like `"128k"` or `"1M"`.
262
+ * - Values ≥ 1,000,000 are shown in millions (e.g. `"1M"`, `"1.5M"`).
263
+ * - Values < 1,000 are shown as exact numbers (e.g. `500` → `"500"`).
264
+ * - Values ≥ 1,000 and < 1,000,000 are divided by 1,000 and rounded to the nearest
265
+ * integer (e.g. `8192` → `"8k"`, `1000` → `"1k"`).
266
+ *
267
+ * @example
268
+ * ```ts
269
+ * formatContextLength(128000) // "128k"
270
+ * formatContextLength(1_000_000) // "1M"
271
+ * formatContextLength(1_500_000) // "1.5M"
272
+ * ```
273
+ *
274
+ */
275
+ declare function formatContextLength(tokens: number): string;
276
+ /**
277
+ * Format a flat USD price (per-generation, per-inpaint, per-upscale).
278
+ * Unlike formatPrice which handles per-token values, this formats absolute USD amounts.
279
+ * Examples: 0.04 → "$0.04", 0.18 → "$0.18", 0 → "$0.00"
280
+ */
281
+ declare function formatFlatPrice(usd: number | undefined | null): string;
282
+ /**
283
+ * Format a per-audio-second price.
284
+ * Examples: 0.01 → "$0.01 / sec", 0.006 → "$0.006 / sec"
285
+ */
286
+ declare function formatAudioPrice(perSecondUsd: number | undefined | null): string;
287
+ /**
288
+ * Format an array of duration strings into a range.
289
+ * Examples: ["5", "10", "30"] → "5s – 30s", ["10"] → "10s", [] → "—"
290
+ * Handles both numeric strings and strings that may already have units.
291
+ */
292
+ declare function formatDuration(durations: string[] | undefined | null): string;
293
+ /**
294
+ * Format an array of resolution strings into a comma-separated list.
295
+ * Examples: ["720p", "1080p", "4K"] → "720p, 1080p, 4K", [] → "—"
296
+ */
297
+ declare function formatResolutions(resolutions: string[] | undefined | null): string;
298
+ /**
299
+ * Format an array of aspect ratio strings into a comma-separated list.
300
+ * Examples: ["16:9", "9:16", "1:1"] → "16:9, 9:16, 1:1", [] → "—"
301
+ */
302
+ declare function formatAspectRatios(ratios: string[] | undefined | null): string;
303
+
304
+ export { type AnyModel, type AsrModel, type AsrPricing, type BaseModel, type Deprecation, type EmbeddingModel, type EmbeddingPricing, type ImageConstraints, type ImageModel, type ImagePricing, type InpaintConstraints, type InpaintModel, type InpaintPricing, MODEL_ID_TYPE_PATTERNS, type ModelNormalizer, type ModelType, type ResponseExtractor, type TextCapabilities, type TextConstraints, type TextModel, type TextPricing, type TtsModel, type TtsPricing, type UpscaleModel, type UpscalePricing, type VideoConstraints, type VideoModel, defaultModelNormalizer, defaultResponseExtractor, extractBaseFields, formatAspectRatios, formatAudioPrice, formatContextLength, formatDuration, formatFlatPrice, formatPrice, formatResolutions, inferTypeFromId, isDeprecated, normalizeAsrModel, normalizeEmbeddingModel, normalizeImageModel, normalizeInpaintModel, normalizeTextModel, normalizeTtsModel, normalizeUpscaleModel, normalizeVideoModel, toNum };
package/dist/utils.js ADDED
@@ -0,0 +1,371 @@
1
+ // src/utils/normalizers/type-inference.ts
2
+ var MODEL_ID_TYPE_PATTERNS = [
3
+ [/\b(embed|embedding)\b/i, "embedding"],
4
+ [/\b(dall-e|stable-diffusion|sdxl|midjourney|flux)\b/i, "image"],
5
+ [/\b(tts)\b/i, "tts"],
6
+ [/\b(whisper|asr)\b/i, "asr"],
7
+ [/\b(sora|video|wan)\b/i, "video"],
8
+ [/\b(inpaint)\b/i, "inpaint"],
9
+ [/\b(upscale|esrgan)\b/i, "upscale"]
10
+ ];
11
+ function inferTypeFromId(id) {
12
+ for (const [pattern, type] of MODEL_ID_TYPE_PATTERNS) {
13
+ if (pattern.test(id)) return type;
14
+ }
15
+ return void 0;
16
+ }
17
+
18
+ // src/utils/normalizers/base.ts
19
+ function toNum(v) {
20
+ if (v === null || v === void 0 || v === "") return void 0;
21
+ const n = Number(v);
22
+ return Number.isNaN(n) ? void 0 : n;
23
+ }
24
+ function extractBaseFields(raw) {
25
+ const spec = raw.model_spec;
26
+ const id = raw.id || raw.model_id || "";
27
+ if (!id) throw new Error("Model missing required id field");
28
+ const provider = id.includes("/") ? id.split("/")[0] : raw.owned_by || "Unknown";
29
+ return {
30
+ id,
31
+ name: spec?.name || raw.name || id,
32
+ provider,
33
+ created: toNum(raw.created) ?? Math.floor(Date.now() / 1e3),
34
+ description: spec?.description || raw.description || void 0,
35
+ // Venice-specific fields — always nested under model_spec, never top-level
36
+ betaModel: spec?.betaModel ?? raw.betaModel ?? void 0,
37
+ privacy: spec?.privacy ?? raw.privacy ?? void 0,
38
+ offline: spec?.offline ?? raw.offline ?? void 0,
39
+ modelSource: spec?.modelSource ?? raw.modelSource ?? void 0,
40
+ traits: spec?.traits ?? raw.traits ?? void 0,
41
+ // Deprecation info — lifecycle field, can appear on any model type.
42
+ deprecation: spec?.deprecation ?? void 0,
43
+ // CLIENT-SIDE OVERLAY: is_favorite is never returned by any API.
44
+ is_favorite: false
45
+ };
46
+ }
47
+
48
+ // src/utils/normalizers/text.ts
49
+ function normalizeTextModel(raw) {
50
+ const base = extractBaseFields(raw);
51
+ const spec = raw.model_spec;
52
+ const specPricing = spec?.pricing;
53
+ const context_length = toNum(raw.context_length) ?? toNum(raw.context_window) ?? toNum(spec?.availableContextTokens) ?? 0;
54
+ const rawPricing = raw.pricing;
55
+ const metaPricing = raw.metadata?.pricing;
56
+ const costPricing = raw.cost;
57
+ let prompt;
58
+ let completion;
59
+ let cache_input;
60
+ let cache_write;
61
+ if (rawPricing) {
62
+ prompt = toNum(rawPricing.prompt);
63
+ completion = toNum(rawPricing.completion);
64
+ cache_input = toNum(rawPricing.cache_input);
65
+ cache_write = toNum(rawPricing.cache_write);
66
+ } else if (metaPricing) {
67
+ prompt = toNum(metaPricing.prompt);
68
+ completion = toNum(metaPricing.completion);
69
+ } else if (costPricing) {
70
+ prompt = toNum(costPricing.prompt);
71
+ completion = toNum(costPricing.completion);
72
+ } else if (specPricing) {
73
+ const input = specPricing.input;
74
+ const output = specPricing.output;
75
+ const cacheInput = specPricing.cache_input;
76
+ const cacheWrite = specPricing.cache_write;
77
+ prompt = input?.usd !== void 0 ? input.usd / 1e6 : void 0;
78
+ completion = output?.usd !== void 0 ? output.usd / 1e6 : void 0;
79
+ cache_input = cacheInput?.usd !== void 0 ? cacheInput.usd / 1e6 : void 0;
80
+ cache_write = cacheWrite?.usd !== void 0 ? cacheWrite.usd / 1e6 : void 0;
81
+ }
82
+ const caps = spec?.capabilities;
83
+ const constraints = spec?.constraints;
84
+ return {
85
+ ...base,
86
+ type: "text",
87
+ context_length,
88
+ pricing: {
89
+ prompt,
90
+ completion,
91
+ cache_input,
92
+ cache_write
93
+ },
94
+ capabilities: caps ? {
95
+ optimizedForCode: caps.optimizedForCode,
96
+ supportsVision: caps.supportsVision,
97
+ supportsReasoning: caps.supportsReasoning,
98
+ supportsFunctionCalling: caps.supportsFunctionCalling,
99
+ supportsResponseSchema: caps.supportsResponseSchema,
100
+ supportsLogProbs: caps.supportsLogProbs,
101
+ supportsAudioInput: caps.supportsAudioInput,
102
+ supportsVideoInput: caps.supportsVideoInput,
103
+ supportsWebSearch: caps.supportsWebSearch,
104
+ quantization: caps.quantization
105
+ } : void 0,
106
+ constraints: constraints ? {
107
+ temperature: constraints.temperature,
108
+ top_p: constraints.top_p
109
+ } : void 0
110
+ };
111
+ }
112
+
113
+ // src/utils/normalizers/image.ts
114
+ function normalizeImageModel(raw) {
115
+ const base = extractBaseFields(raw);
116
+ const spec = raw.model_spec;
117
+ const pricing = spec?.pricing;
118
+ const constraints = spec?.constraints;
119
+ const generation = pricing?.generation;
120
+ const upscale = pricing?.upscale;
121
+ const rawResolutions = pricing?.resolutions;
122
+ let normalizedResolutions;
123
+ if (rawResolutions && typeof rawResolutions === "object") {
124
+ normalizedResolutions = {};
125
+ for (const [key, value] of Object.entries(rawResolutions)) {
126
+ if (value && typeof value === "object" && "usd" in value) {
127
+ normalizedResolutions[key] = value.usd ?? 0;
128
+ }
129
+ }
130
+ }
131
+ return {
132
+ ...base,
133
+ type: "image",
134
+ pricing: {
135
+ generation: generation?.usd,
136
+ upscale_2x: upscale?.["2x"]?.usd,
137
+ upscale_4x: upscale?.["4x"]?.usd,
138
+ resolutions: normalizedResolutions
139
+ },
140
+ constraints: constraints ? {
141
+ promptCharacterLimit: constraints.promptCharacterLimit,
142
+ steps: constraints.steps,
143
+ widthHeightDivisor: constraints.widthHeightDivisor,
144
+ aspectRatios: constraints.aspectRatios,
145
+ defaultAspectRatio: constraints.defaultAspectRatio,
146
+ resolutions: constraints.resolutions,
147
+ defaultResolution: constraints.defaultResolution
148
+ } : void 0,
149
+ // supportsWebSearch lives at model_spec.supportsWebSearch (sibling of constraints),
150
+ // NOT inside model_spec.constraints. Extract from spec, not constraints.
151
+ supportsWebSearch: spec?.supportsWebSearch ?? void 0
152
+ };
153
+ }
154
+
155
+ // src/utils/normalizers/video.ts
156
+ function normalizeVideoModel(raw) {
157
+ const base = extractBaseFields(raw);
158
+ const spec = raw.model_spec;
159
+ const constraints = spec?.constraints;
160
+ return {
161
+ ...base,
162
+ type: "video",
163
+ constraints: constraints ? {
164
+ model_type: constraints.model_type,
165
+ aspect_ratios: constraints.aspect_ratios,
166
+ resolutions: constraints.resolutions,
167
+ durations: constraints.durations,
168
+ audio: constraints.audio,
169
+ audio_configurable: constraints.audio_configurable,
170
+ audio_input: constraints.audio_input,
171
+ video_input: constraints.video_input
172
+ } : void 0,
173
+ model_sets: spec?.model_sets ?? raw.model_sets ?? void 0
174
+ };
175
+ }
176
+
177
+ // src/utils/normalizers/inpaint.ts
178
+ function normalizeInpaintModel(raw) {
179
+ const base = extractBaseFields(raw);
180
+ const spec = raw.model_spec;
181
+ const pricing = spec?.pricing;
182
+ const constraints = spec?.constraints;
183
+ const generation = pricing?.generation;
184
+ return {
185
+ ...base,
186
+ type: "inpaint",
187
+ pricing: {
188
+ generation: generation?.usd
189
+ },
190
+ constraints: constraints ? {
191
+ aspectRatios: constraints.aspectRatios,
192
+ combineImages: constraints.combineImages
193
+ } : void 0
194
+ };
195
+ }
196
+
197
+ // src/utils/normalizers/embedding.ts
198
+ function normalizeEmbeddingModel(raw) {
199
+ const base = extractBaseFields(raw);
200
+ const spec = raw.model_spec;
201
+ const specPricing = spec?.pricing;
202
+ const input = specPricing?.input;
203
+ const output = specPricing?.output;
204
+ return {
205
+ ...base,
206
+ type: "embedding",
207
+ pricing: {
208
+ input: input?.usd !== void 0 ? input.usd / 1e6 : void 0,
209
+ output: output?.usd !== void 0 ? output.usd / 1e6 : void 0
210
+ }
211
+ };
212
+ }
213
+
214
+ // src/utils/normalizers/tts.ts
215
+ function normalizeTtsModel(raw) {
216
+ const base = extractBaseFields(raw);
217
+ const spec = raw.model_spec;
218
+ const specPricing = spec?.pricing;
219
+ const input = specPricing?.input;
220
+ return {
221
+ ...base,
222
+ type: "tts",
223
+ pricing: {
224
+ input: input?.usd !== void 0 ? input.usd / 1e6 : void 0
225
+ },
226
+ voices: spec?.voices ?? void 0
227
+ };
228
+ }
229
+
230
+ // src/utils/normalizers/asr.ts
231
+ function normalizeAsrModel(raw) {
232
+ const base = extractBaseFields(raw);
233
+ const spec = raw.model_spec;
234
+ const specPricing = spec?.pricing;
235
+ const perAudioSecond = specPricing?.per_audio_second;
236
+ return {
237
+ ...base,
238
+ type: "asr",
239
+ pricing: {
240
+ per_audio_second: perAudioSecond?.usd
241
+ }
242
+ };
243
+ }
244
+
245
+ // src/utils/normalizers/upscale.ts
246
+ function normalizeUpscaleModel(raw) {
247
+ const base = extractBaseFields(raw);
248
+ const spec = raw.model_spec;
249
+ const pricing = spec?.pricing;
250
+ const generation = pricing?.generation;
251
+ return {
252
+ ...base,
253
+ type: "upscale",
254
+ pricing: {
255
+ generation: generation?.usd
256
+ }
257
+ };
258
+ }
259
+
260
+ // src/utils/normalizers/index.ts
261
+ function defaultResponseExtractor(body) {
262
+ if (Array.isArray(body)) return body;
263
+ if (body && typeof body === "object") {
264
+ if (Array.isArray(body.data)) return body.data;
265
+ if (Array.isArray(body.models)) return body.models;
266
+ }
267
+ return [];
268
+ }
269
+ var VALID_TYPES = /* @__PURE__ */ new Set(["text", "image", "video", "inpaint", "embedding", "tts", "asr", "upscale"]);
270
+ function defaultModelNormalizer(raw) {
271
+ const id = raw.id ?? raw.model_id ?? "";
272
+ const rawType = raw.type;
273
+ const type = (rawType && VALID_TYPES.has(rawType) ? rawType : void 0) ?? inferTypeFromId(id) ?? "text";
274
+ switch (type) {
275
+ case "text":
276
+ return normalizeTextModel(raw);
277
+ case "image":
278
+ return normalizeImageModel(raw);
279
+ case "video":
280
+ return normalizeVideoModel(raw);
281
+ case "inpaint":
282
+ return normalizeInpaintModel(raw);
283
+ case "embedding":
284
+ return normalizeEmbeddingModel(raw);
285
+ case "tts":
286
+ return normalizeTtsModel(raw);
287
+ case "asr":
288
+ return normalizeAsrModel(raw);
289
+ case "upscale":
290
+ return normalizeUpscaleModel(raw);
291
+ default:
292
+ return normalizeTextModel(raw);
293
+ }
294
+ }
295
+
296
+ // src/utils/helpers.ts
297
+ function isDeprecated(dateStr) {
298
+ const normalized = /^\d{4}-\d{2}-\d{2}$/.test(dateStr) ? dateStr + "T00:00:00Z" : dateStr;
299
+ const ts = new Date(normalized).getTime();
300
+ return !Number.isNaN(ts) && ts < Date.now();
301
+ }
302
+
303
+ // src/utils/format.ts
304
+ function formatPrice(value) {
305
+ if (value === void 0 || value === null || value === "") return "\u2014";
306
+ const num = typeof value === "string" ? parseFloat(value) : value;
307
+ if (isNaN(num)) return "\u2014";
308
+ if (num < 0) return "\u2014";
309
+ if (num === 0) return "Free";
310
+ const perMillion = num * 1e6;
311
+ if (perMillion < 0.01) return "$" + perMillion.toFixed(6);
312
+ return "$" + perMillion.toFixed(2);
313
+ }
314
+ function formatContextLength(tokens) {
315
+ if (!tokens || tokens <= 0 || isNaN(tokens)) return "N/A";
316
+ if (tokens < 1e3) return `${Math.round(tokens)}`;
317
+ if (tokens >= 1e6) {
318
+ const millions = tokens / 1e6;
319
+ return millions % 1 === 0 ? `${millions}M` : `${millions.toFixed(1)}M`;
320
+ }
321
+ const k = Math.round(tokens / 1e3);
322
+ return `${k}k`;
323
+ }
324
+ function formatFlatPrice(usd) {
325
+ if (usd === void 0 || usd === null || Number.isNaN(usd)) return "\u2014";
326
+ return `$${usd.toFixed(2)}`;
327
+ }
328
+ function formatAudioPrice(perSecondUsd) {
329
+ if (perSecondUsd === void 0 || perSecondUsd === null || Number.isNaN(perSecondUsd)) return "\u2014";
330
+ const formatted = perSecondUsd >= 0.01 ? perSecondUsd.toFixed(2) : perSecondUsd.toFixed(4);
331
+ return `$${formatted} / sec`;
332
+ }
333
+ function formatDuration(durations) {
334
+ if (!durations || durations.length === 0) return "\u2014";
335
+ const nums = durations.map((d) => parseFloat(d)).filter((n) => !Number.isNaN(n)).sort((a, b) => a - b);
336
+ if (nums.length === 0) return "\u2014";
337
+ if (nums.length === 1) return `${nums[0]}s`;
338
+ return `${nums[0]}s \u2013 ${nums[nums.length - 1]}s`;
339
+ }
340
+ function formatResolutions(resolutions) {
341
+ if (!resolutions || resolutions.length === 0) return "\u2014";
342
+ return resolutions.join(", ");
343
+ }
344
+ function formatAspectRatios(ratios) {
345
+ if (!ratios || ratios.length === 0) return "\u2014";
346
+ return ratios.join(", ");
347
+ }
348
+ export {
349
+ MODEL_ID_TYPE_PATTERNS,
350
+ defaultModelNormalizer,
351
+ defaultResponseExtractor,
352
+ extractBaseFields,
353
+ formatAspectRatios,
354
+ formatAudioPrice,
355
+ formatContextLength,
356
+ formatDuration,
357
+ formatFlatPrice,
358
+ formatPrice,
359
+ formatResolutions,
360
+ inferTypeFromId,
361
+ isDeprecated,
362
+ normalizeAsrModel,
363
+ normalizeEmbeddingModel,
364
+ normalizeImageModel,
365
+ normalizeInpaintModel,
366
+ normalizeTextModel,
367
+ normalizeTtsModel,
368
+ normalizeUpscaleModel,
369
+ normalizeVideoModel,
370
+ toNum
371
+ };