kimi-vercel-ai-sdk-provider 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -205,11 +205,30 @@ export class KimiChatLanguageModel implements LanguageModelV3 {
205
205
  messages.unshift({ role: 'system', content: toolChoiceSystemMessage });
206
206
  }
207
207
 
208
+ // Apply model-specific defaults and constraints
209
+ const caps = this.capabilities;
210
+
211
+ // Resolve temperature: thinking models require locked temperature
212
+ let resolvedTemperature = temperature;
213
+ if (caps.temperatureLocked && caps.defaultTemperature !== undefined) {
214
+ if (temperature !== undefined && temperature !== caps.defaultTemperature) {
215
+ warnings.push({
216
+ type: 'compatibility',
217
+ feature: 'temperature',
218
+ details: `Thinking models require temperature=${caps.defaultTemperature}. Your value (${temperature}) will be overridden.`
219
+ });
220
+ }
221
+ resolvedTemperature = caps.defaultTemperature;
222
+ }
223
+
224
+ // Resolve max_tokens: use model default if not specified
225
+ const resolvedMaxTokens = maxOutputTokens ?? caps.defaultMaxOutputTokens;
226
+
208
227
  const body = removeUndefinedEntries({
209
228
  model: this.modelId,
210
229
  messages,
211
- max_tokens: maxOutputTokens,
212
- temperature,
230
+ max_tokens: resolvedMaxTokens,
231
+ temperature: resolvedTemperature,
213
232
  top_p: topP,
214
233
  frequency_penalty: frequencyPenalty,
215
234
  presence_penalty: presencePenalty,
package/src/core/index.ts CHANGED
@@ -13,7 +13,7 @@ export type {
13
13
  KimiTokenUsage
14
14
  } from './types';
15
15
  // Utilities
16
- export type { KimiExtendedUsage } from './utils';
16
+ export type { KimiExtendedUsage, ReasoningAnalysis } from './utils';
17
17
  // Errors
18
18
  export {
19
19
  KimiAuthenticationError,
@@ -26,11 +26,18 @@ export {
26
26
  kimiErrorSchema,
27
27
  kimiFailedResponseHandler
28
28
  } from './errors';
29
- export { inferModelCapabilities } from './types';
30
29
  export {
30
+ STANDARD_MODEL_DEFAULT_MAX_TOKENS,
31
+ THINKING_MODEL_DEFAULT_MAX_TOKENS,
32
+ THINKING_MODEL_TEMPERATURE,
33
+ inferModelCapabilities
34
+ } from './types';
35
+ export {
36
+ analyzeReasoningPreservation,
31
37
  convertKimiUsage,
32
38
  extractMessageContent,
33
39
  getKimiRequestId,
34
40
  getResponseMetadata,
35
- mapKimiFinishReason
41
+ mapKimiFinishReason,
42
+ recommendThinkingModel
36
43
  } from './utils';
package/src/core/types.ts CHANGED
@@ -70,18 +70,68 @@ export interface KimiModelCapabilities {
70
70
  * Whether the model supports structured outputs.
71
71
  */
72
72
  structuredOutputs?: boolean;
73
+
74
+ /**
75
+ * Default temperature for the model.
76
+ * Thinking models require temperature=1.0 for optimal reasoning.
77
+ */
78
+ defaultTemperature?: number;
79
+
80
+ /**
81
+ * Whether temperature is locked (cannot be changed).
82
+ * Thinking models have this set to true.
83
+ */
84
+ temperatureLocked?: boolean;
85
+
86
+ /**
87
+ * Default max output tokens for the model.
88
+ * Thinking models need higher limits to avoid truncated reasoning.
89
+ */
90
+ defaultMaxOutputTokens?: number;
73
91
  }
74
92
 
93
+ /**
94
+ * Default temperature for thinking models.
95
+ * Kimi thinking models require temperature=1.0 for optimal reasoning quality.
96
+ */
97
+ export const THINKING_MODEL_TEMPERATURE = 1.0;
98
+
99
+ /**
100
+ * Default max output tokens for thinking models.
101
+ * Higher limit ensures reasoning traces aren't truncated.
102
+ */
103
+ export const THINKING_MODEL_DEFAULT_MAX_TOKENS = 32768;
104
+
105
+ /**
106
+ * Default max output tokens for standard models.
107
+ */
108
+ export const STANDARD_MODEL_DEFAULT_MAX_TOKENS = 4096;
109
+
75
110
  /**
76
111
  * Infer model capabilities from the model ID.
77
112
  *
78
113
  * @param modelId - The model identifier
79
114
  * @returns Inferred capabilities based on model name patterns
80
115
  *
116
+ * @remarks
117
+ * This function automatically detects model capabilities and sets
118
+ * appropriate defaults:
119
+ * - Thinking models (`-thinking` suffix) get temperature=1.0 locked
120
+ * - Thinking models get 32k default max_tokens to avoid truncation
121
+ * - K2.5 models get video input support
122
+ *
81
123
  * @example
82
124
  * ```ts
83
125
  * const caps = inferModelCapabilities('kimi-k2.5-thinking');
84
- * // { thinking: true, alwaysThinking: true, videoInput: true, ... }
126
+ * // {
127
+ * // thinking: true,
128
+ * // alwaysThinking: true,
129
+ * // videoInput: true,
130
+ * // temperatureLocked: true,
131
+ * // defaultTemperature: 1.0,
132
+ * // defaultMaxOutputTokens: 32768,
133
+ * // ...
134
+ * // }
85
135
  * ```
86
136
  */
87
137
  export function inferModelCapabilities(modelId: string): KimiModelCapabilities {
@@ -96,7 +146,12 @@ export function inferModelCapabilities(modelId: string): KimiModelCapabilities {
96
146
  maxContextSize: 256_000, // 256k context window
97
147
  toolCalling: true,
98
148
  jsonMode: true,
99
- structuredOutputs: true
149
+ structuredOutputs: true,
150
+ // Thinking models require temperature=1.0 for optimal reasoning
151
+ defaultTemperature: isThinkingModel ? THINKING_MODEL_TEMPERATURE : undefined,
152
+ temperatureLocked: isThinkingModel,
153
+ // Thinking models need higher token limits to avoid truncated reasoning
154
+ defaultMaxOutputTokens: isThinkingModel ? THINKING_MODEL_DEFAULT_MAX_TOKENS : STANDARD_MODEL_DEFAULT_MAX_TOKENS
100
155
  };
101
156
  }
102
157
 
package/src/core/utils.ts CHANGED
@@ -208,3 +208,141 @@ export function extractMessageContent(message: {
208
208
 
209
209
  return { text, reasoning };
210
210
  }
211
+
212
+ // ============================================================================
213
+ // Multi-turn Reasoning Utilities
214
+ // ============================================================================
215
+
216
+ /**
217
+ * Information about reasoning content in a conversation.
218
+ */
219
+ export interface ReasoningAnalysis {
220
+ /** Total number of messages with reasoning content */
221
+ messagesWithReasoning: number;
222
+ /** Total reasoning tokens (estimated by character count / 4) */
223
+ estimatedReasoningTokens: number;
224
+ /** Whether reasoning is properly preserved in the conversation */
225
+ isPreserved: boolean;
226
+ /** Messages that are missing expected reasoning content */
227
+ missingReasoningIndices: number[];
228
+ }
229
+
230
+ /**
231
+ * Analyze reasoning content preservation in a conversation.
232
+ *
233
+ * This utility helps verify that reasoning content is being properly
234
+ * preserved across multi-turn conversations with thinking models.
235
+ * Kimi requires reasoning content to be maintained in the message
236
+ * history for logical continuity in agentic/tool-calling scenarios.
237
+ *
238
+ * @param messages - Array of messages to analyze
239
+ * @returns Analysis of reasoning preservation
240
+ *
241
+ * @example
242
+ * ```ts
243
+ * const analysis = analyzeReasoningPreservation(messages);
244
+ * if (!analysis.isPreserved) {
245
+ * console.warn('Reasoning content missing from messages:', analysis.missingReasoningIndices);
246
+ * }
247
+ * ```
248
+ */
249
+ export function analyzeReasoningPreservation(
250
+ messages: Array<{
251
+ role: string;
252
+ content?: unknown;
253
+ reasoning_content?: string | null;
254
+ reasoning?: string | null;
255
+ }>
256
+ ): ReasoningAnalysis {
257
+ let messagesWithReasoning = 0;
258
+ let totalReasoningChars = 0;
259
+ const missingReasoningIndices: number[] = [];
260
+
261
+ // Track whether we've seen a tool call that should have reasoning preserved
262
+ let expectReasoningAfterToolCall = false;
263
+
264
+ for (let i = 0; i < messages.length; i++) {
265
+ const message = messages[i];
266
+
267
+ if (message.role === 'assistant') {
268
+ const { reasoning } = extractMessageContent(message);
269
+
270
+ if (reasoning.length > 0) {
271
+ messagesWithReasoning++;
272
+ totalReasoningChars += reasoning.length;
273
+ expectReasoningAfterToolCall = false;
274
+ } else if (expectReasoningAfterToolCall) {
275
+ // This assistant message should have reasoning from the previous turn
276
+ missingReasoningIndices.push(i);
277
+ }
278
+
279
+ // Check if this message has tool calls
280
+ if ('tool_calls' in message && Array.isArray(message.tool_calls) && message.tool_calls.length > 0) {
281
+ expectReasoningAfterToolCall = true;
282
+ }
283
+ } else if (message.role === 'tool') {
284
+ // After a tool response, we expect the next assistant message to potentially have reasoning
285
+ expectReasoningAfterToolCall = true;
286
+ }
287
+ }
288
+
289
+ return {
290
+ messagesWithReasoning,
291
+ estimatedReasoningTokens: Math.ceil(totalReasoningChars / 4),
292
+ isPreserved: missingReasoningIndices.length === 0,
293
+ missingReasoningIndices
294
+ };
295
+ }
296
+
297
+ /**
298
+ * Check if a conversation is suitable for thinking models.
299
+ *
300
+ * Thinking models work best with:
301
+ * - Complex reasoning tasks
302
+ * - Multi-step problem solving
303
+ * - Tasks requiring chain-of-thought
304
+ *
305
+ * This helper provides guidance on whether a thinking model would benefit
306
+ * the conversation.
307
+ *
308
+ * @param messageCount - Number of messages in the conversation
309
+ * @param hasToolCalls - Whether the conversation includes tool calls
310
+ * @param estimatedComplexity - Estimated task complexity (0-1)
311
+ * @returns Recommendation on using thinking models
312
+ */
313
+ export function recommendThinkingModel(
314
+ messageCount: number,
315
+ hasToolCalls: boolean,
316
+ estimatedComplexity: number
317
+ ): { recommended: boolean; reason: string } {
318
+ // Thinking models are recommended for:
319
+ // 1. Complex tasks (complexity > 0.5)
320
+ // 2. Agentic scenarios with tool calls
321
+ // 3. Multi-turn conversations where reasoning continuity matters
322
+
323
+ if (estimatedComplexity > 0.7) {
324
+ return {
325
+ recommended: true,
326
+ reason: 'High complexity task benefits from extended reasoning'
327
+ };
328
+ }
329
+
330
+ if (hasToolCalls && messageCount > 2) {
331
+ return {
332
+ recommended: true,
333
+ reason: 'Multi-turn tool usage benefits from reasoning preservation'
334
+ };
335
+ }
336
+
337
+ if (estimatedComplexity > 0.5) {
338
+ return {
339
+ recommended: true,
340
+ reason: 'Moderate complexity may benefit from reasoning'
341
+ };
342
+ }
343
+
344
+ return {
345
+ recommended: false,
346
+ reason: 'Standard model sufficient for this task'
347
+ };
348
+ }
@@ -4,6 +4,7 @@
4
4
  * @module
5
5
  */
6
6
 
7
+ import { type FileCache, type FileCacheEntry, generateCacheKey, getDefaultFileCache } from './file-cache';
7
8
  import {
8
9
  getExtensionFromPath,
9
10
  getMediaTypeFromExtension,
@@ -64,6 +65,13 @@ export interface ProcessAttachmentsOptions {
64
65
  uploadImages?: boolean;
65
66
  /** Whether to delete files after extraction (cleanup) */
66
67
  cleanupAfterExtract?: boolean;
68
+ /**
69
+ * Enable caching of uploaded files.
70
+ * When true, uses the default global cache.
71
+ * When a FileCache instance, uses that cache.
72
+ * @default false
73
+ */
74
+ cache?: boolean | FileCache;
67
75
  }
68
76
 
69
77
  // ============================================================================
@@ -101,9 +109,13 @@ export async function processAttachments(options: ProcessAttachmentsOptions): Pr
101
109
  clientConfig,
102
110
  autoUploadDocuments = true,
103
111
  uploadImages = false,
104
- cleanupAfterExtract = false
112
+ cleanupAfterExtract = false,
113
+ cache = false
105
114
  } = options;
106
115
 
116
+ // Resolve cache instance
117
+ const cacheInstance = cache === true ? getDefaultFileCache() : cache === false ? null : cache;
118
+
107
119
  const results: ProcessedAttachment[] = [];
108
120
  const client = new KimiFileClient(clientConfig);
109
121
 
@@ -112,7 +124,8 @@ export async function processAttachments(options: ProcessAttachmentsOptions): Pr
112
124
  const processed = await processAttachment(attachment, client, {
113
125
  autoUploadDocuments,
114
126
  uploadImages,
115
- cleanupAfterExtract
127
+ cleanupAfterExtract,
128
+ cache: cacheInstance
116
129
  });
117
130
  results.push(processed);
118
131
  } catch (error) {
@@ -134,7 +147,12 @@ export async function processAttachments(options: ProcessAttachmentsOptions): Pr
134
147
  async function processAttachment(
135
148
  attachment: Attachment,
136
149
  client: KimiFileClient,
137
- options: { autoUploadDocuments: boolean; uploadImages: boolean; cleanupAfterExtract: boolean }
150
+ options: {
151
+ autoUploadDocuments: boolean;
152
+ uploadImages: boolean;
153
+ cleanupAfterExtract: boolean;
154
+ cache: FileCache | null;
155
+ }
138
156
  ): Promise<ProcessedAttachment> {
139
157
  // Determine content type
140
158
  const contentType = resolveContentType(attachment);
@@ -196,14 +214,43 @@ async function processAttachment(
196
214
  };
197
215
  }
198
216
 
217
+ const filename = attachment.name ?? guessFilename(attachment, contentType);
218
+
219
+ // Check cache if enabled
220
+ if (options.cache) {
221
+ const cacheKey = generateCacheKey(data, filename);
222
+ const cached = options.cache.get(cacheKey);
223
+
224
+ if (cached) {
225
+ return {
226
+ original: attachment,
227
+ type: 'text-inject',
228
+ textContent: cached.content,
229
+ fileId: cached.fileId
230
+ };
231
+ }
232
+ }
233
+
199
234
  // Upload and extract content
200
235
  const result = await client.uploadAndExtract({
201
236
  data,
202
- filename: attachment.name ?? guessFilename(attachment, contentType),
237
+ filename,
203
238
  mediaType: contentType,
204
239
  purpose: 'file-extract'
205
240
  });
206
241
 
242
+ // Store in cache if enabled (before cleanup)
243
+ if (options.cache && result.content) {
244
+ const cacheKey = generateCacheKey(data, filename);
245
+ const cacheEntry: FileCacheEntry = {
246
+ fileId: result.file.id,
247
+ content: result.content,
248
+ createdAt: Date.now(),
249
+ purpose: 'file-extract'
250
+ };
251
+ options.cache.set(cacheKey, cacheEntry);
252
+ }
253
+
207
254
  // Cleanup if requested
208
255
  if (options.cleanupAfterExtract && result.file.id) {
209
256
  try {
@@ -0,0 +1,260 @@
1
+ /**
2
+ * File content caching for efficient re-use of uploaded files.
3
+ * @module
4
+ */
5
+
6
+ // ============================================================================
7
+ // Types
8
+ // ============================================================================
9
+
10
+ /**
11
+ * Entry in the file cache.
12
+ */
13
+ export interface FileCacheEntry {
14
+ /** The Kimi file ID */
15
+ fileId: string;
16
+ /** Extracted text content (for documents) */
17
+ content?: string;
18
+ /** Unix timestamp of creation */
19
+ createdAt: number;
20
+ /** File purpose */
21
+ purpose: 'file-extract' | 'image' | 'video';
22
+ }
23
+
24
+ /**
25
+ * Options for configuring the file cache.
26
+ */
27
+ export interface FileCacheOptions {
28
+ /**
29
+ * Maximum number of entries in the cache.
30
+ * When exceeded, least recently used entries are evicted.
31
+ * @default 100
32
+ */
33
+ maxSize?: number;
34
+
35
+ /**
36
+ * Time-to-live for cache entries in milliseconds.
37
+ * Entries older than this are considered stale.
38
+ * @default 3600000 (1 hour)
39
+ */
40
+ ttlMs?: number;
41
+ }
42
+
43
+ // ============================================================================
44
+ // LRU Cache Implementation
45
+ // ============================================================================
46
+
47
+ /**
48
+ * A simple LRU (Least Recently Used) cache for file content.
49
+ *
50
+ * This cache helps avoid re-uploading the same files multiple times
51
+ * by storing the mapping between content hashes and Kimi file IDs.
52
+ *
53
+ * @example
54
+ * ```ts
55
+ * const cache = new FileCache({ maxSize: 50, ttlMs: 30 * 60 * 1000 });
56
+ *
57
+ * // Check if we have this file cached
58
+ * const cached = cache.get(contentHash);
59
+ * if (cached) {
60
+ * console.log('Using cached file:', cached.fileId);
61
+ * }
62
+ *
63
+ * // Store a new file
64
+ * cache.set(contentHash, {
65
+ * fileId: 'file_abc123',
66
+ * content: 'extracted text...',
67
+ * purpose: 'file-extract',
68
+ * createdAt: Date.now()
69
+ * });
70
+ * ```
71
+ */
72
+ export class FileCache {
73
+ private readonly maxSize: number;
74
+ private readonly ttlMs: number;
75
+ private readonly cache: Map<string, FileCacheEntry>;
76
+
77
+ constructor(options: FileCacheOptions = {}) {
78
+ this.maxSize = options.maxSize ?? 100;
79
+ this.ttlMs = options.ttlMs ?? 3600000; // 1 hour
80
+ this.cache = new Map();
81
+ }
82
+
83
+ /**
84
+ * Get a cached entry by content hash.
85
+ * Returns undefined if not found or expired.
86
+ * Moves the entry to the end (most recently used).
87
+ */
88
+ get(contentHash: string): FileCacheEntry | undefined {
89
+ const entry = this.cache.get(contentHash);
90
+
91
+ if (!entry) {
92
+ return undefined;
93
+ }
94
+
95
+ // Check if entry has expired
96
+ if (this.isExpired(entry)) {
97
+ this.cache.delete(contentHash);
98
+ return undefined;
99
+ }
100
+
101
+ // Move to end (most recently used)
102
+ this.cache.delete(contentHash);
103
+ this.cache.set(contentHash, entry);
104
+
105
+ return entry;
106
+ }
107
+
108
+ /**
109
+ * Set a cache entry.
110
+ * Evicts the least recently used entry if cache is full.
111
+ */
112
+ set(contentHash: string, entry: FileCacheEntry): void {
113
+ // Delete existing entry to update position
114
+ this.cache.delete(contentHash);
115
+
116
+ // Evict oldest entries if at capacity
117
+ while (this.cache.size >= this.maxSize) {
118
+ const oldestKey = this.cache.keys().next().value;
119
+ if (oldestKey !== undefined) {
120
+ this.cache.delete(oldestKey);
121
+ } else {
122
+ break;
123
+ }
124
+ }
125
+
126
+ this.cache.set(contentHash, entry);
127
+ }
128
+
129
+ /**
130
+ * Check if an entry exists and is not expired.
131
+ */
132
+ has(contentHash: string): boolean {
133
+ return this.get(contentHash) !== undefined;
134
+ }
135
+
136
+ /**
137
+ * Delete a specific entry.
138
+ */
139
+ delete(contentHash: string): boolean {
140
+ return this.cache.delete(contentHash);
141
+ }
142
+
143
+ /**
144
+ * Clear all entries.
145
+ */
146
+ clear(): void {
147
+ this.cache.clear();
148
+ }
149
+
150
+ /**
151
+ * Get the current cache size.
152
+ */
153
+ get size(): number {
154
+ return this.cache.size;
155
+ }
156
+
157
+ /**
158
+ * Remove all expired entries.
159
+ */
160
+ prune(): number {
161
+ let pruned = 0;
162
+ for (const [key, entry] of this.cache) {
163
+ if (this.isExpired(entry)) {
164
+ this.cache.delete(key);
165
+ pruned++;
166
+ }
167
+ }
168
+ return pruned;
169
+ }
170
+
171
+ /**
172
+ * Check if an entry is expired.
173
+ */
174
+ private isExpired(entry: FileCacheEntry): boolean {
175
+ return Date.now() - entry.createdAt > this.ttlMs;
176
+ }
177
+ }
178
+
179
+ // ============================================================================
180
+ // Hash Utilities
181
+ // ============================================================================
182
+
183
+ /**
184
+ * Generate a hash from file content for cache lookups.
185
+ * Uses a simple but fast hash algorithm suitable for deduplication.
186
+ *
187
+ * @param data - The file content as Uint8Array or string
188
+ * @returns A hex string hash
189
+ */
190
+ export function generateContentHash(data: Uint8Array | string): string {
191
+ const bytes = typeof data === 'string' ? new TextEncoder().encode(data) : data;
192
+
193
+ // Simple FNV-1a hash (fast and good distribution for deduplication)
194
+ let hash = 2166136261; // FNV offset basis
195
+
196
+ for (let i = 0; i < bytes.length; i++) {
197
+ hash ^= bytes[i];
198
+ hash = Math.imul(hash, 16777619); // FNV prime
199
+ }
200
+
201
+ // Include length to differentiate files with same content hash but different lengths
202
+ hash ^= bytes.length;
203
+
204
+ // Convert to hex string
205
+ return (hash >>> 0).toString(16).padStart(8, '0');
206
+ }
207
+
208
+ /**
209
+ * Generate a more unique cache key that includes filename and size.
210
+ * This helps differentiate files that might have similar beginnings.
211
+ *
212
+ * @param data - The file content
213
+ * @param filename - The filename
214
+ * @returns A cache key string
215
+ */
216
+ export function generateCacheKey(data: Uint8Array | string, filename: string): string {
217
+ const bytes = typeof data === 'string' ? new TextEncoder().encode(data) : data;
218
+ const contentHash = generateContentHash(data);
219
+ const normalizedFilename = filename.toLowerCase().replace(/[^a-z0-9.]/g, '_');
220
+
221
+ return `${contentHash}_${bytes.length}_${normalizedFilename}`;
222
+ }
223
+
224
+ // ============================================================================
225
+ // Global Cache Instance
226
+ // ============================================================================
227
+
228
+ /**
229
+ * Default global file cache instance.
230
+ * This is used by the attachment processor when caching is enabled.
231
+ */
232
+ let defaultCache: FileCache | null = null;
233
+
234
+ /**
235
+ * Get the default global file cache.
236
+ * Creates one if it doesn't exist.
237
+ */
238
+ export function getDefaultFileCache(): FileCache {
239
+ if (!defaultCache) {
240
+ defaultCache = new FileCache();
241
+ }
242
+ return defaultCache;
243
+ }
244
+
245
+ /**
246
+ * Set a custom default file cache.
247
+ * Useful for testing or custom configurations.
248
+ */
249
+ export function setDefaultFileCache(cache: FileCache | null): void {
250
+ defaultCache = cache;
251
+ }
252
+
253
+ /**
254
+ * Clear the default file cache.
255
+ */
256
+ export function clearDefaultFileCache(): void {
257
+ if (defaultCache) {
258
+ defaultCache.clear();
259
+ }
260
+ }
@@ -4,7 +4,22 @@
4
4
  * @module
5
5
  */
6
6
 
7
- export { type Attachment, type ProcessedAttachment, processAttachments } from './attachment-processor';
7
+ export {
8
+ type Attachment,
9
+ type ProcessAttachmentsOptions,
10
+ type ProcessedAttachment,
11
+ processAttachments
12
+ } from './attachment-processor';
13
+ export {
14
+ FileCache,
15
+ type FileCacheEntry,
16
+ type FileCacheOptions,
17
+ clearDefaultFileCache,
18
+ generateCacheKey,
19
+ generateContentHash,
20
+ getDefaultFileCache,
21
+ setDefaultFileCache
22
+ } from './file-cache';
8
23
  export {
9
24
  SUPPORTED_FILE_EXTENSIONS,
10
25
  SUPPORTED_MIME_TYPES,