@ai-sdk/openai 2.0.0-canary.10 → 2.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,16 +11,6 @@ declare const openaiProviderOptions: z.ZodObject<{
11
11
  * the GPT tokenizer) to an associated bias value from -100 to 100.
12
12
  */
13
13
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
14
- /**
15
- * Return the log probabilities of the tokens.
16
- *
17
- * Setting to true will return the log probabilities of the tokens that
18
- * were generated.
19
- *
20
- * Setting to a number will return the log probabilities of the top n
21
- * tokens that were generated.
22
- */
23
- logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
24
14
  /**
25
15
  * Whether to enable parallel function calling during tool use. Default to true.
26
16
  */
@@ -53,7 +43,6 @@ declare const openaiProviderOptions: z.ZodObject<{
53
43
  }, "strip", z.ZodTypeAny, {
54
44
  user?: string | undefined;
55
45
  logitBias?: Record<number, number> | undefined;
56
- logprobs?: number | boolean | undefined;
57
46
  parallelToolCalls?: boolean | undefined;
58
47
  reasoningEffort?: "low" | "medium" | "high" | undefined;
59
48
  maxCompletionTokens?: number | undefined;
@@ -63,7 +52,6 @@ declare const openaiProviderOptions: z.ZodObject<{
63
52
  }, {
64
53
  user?: string | undefined;
65
54
  logitBias?: Record<number, number> | undefined;
66
- logprobs?: number | boolean | undefined;
67
55
  parallelToolCalls?: boolean | undefined;
68
56
  reasoningEffort?: "low" | "medium" | "high" | undefined;
69
57
  maxCompletionTokens?: number | undefined;
@@ -126,18 +114,6 @@ interface OpenAICompletionSettings {
126
114
  */
127
115
  logitBias?: Record<number, number>;
128
116
  /**
129
- Return the log probabilities of the tokens. Including logprobs will increase
130
- the response size and can slow down response times. However, it can
131
- be useful to better understand how the model is behaving.
132
-
133
- Setting to true will return the log probabilities of the tokens that
134
- were generated.
135
-
136
- Setting to a number will return the log probabilities of the top n
137
- tokens that were generated.
138
- */
139
- logprobs?: boolean | number;
140
- /**
141
117
  The suffix that comes after a completion of inserted text.
142
118
  */
143
119
  suffix?: string;
@@ -366,6 +342,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
366
342
  reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
367
343
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
368
344
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
345
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
369
346
  }, "strip", z.ZodTypeAny, {
370
347
  user?: string | null | undefined;
371
348
  parallelToolCalls?: boolean | null | undefined;
@@ -375,6 +352,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
375
352
  instructions?: string | null | undefined;
376
353
  previousResponseId?: string | null | undefined;
377
354
  strictSchemas?: boolean | null | undefined;
355
+ reasoningSummary?: string | null | undefined;
378
356
  }, {
379
357
  user?: string | null | undefined;
380
358
  parallelToolCalls?: boolean | null | undefined;
@@ -384,6 +362,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
384
362
  instructions?: string | null | undefined;
385
363
  previousResponseId?: string | null | undefined;
386
364
  strictSchemas?: boolean | null | undefined;
365
+ reasoningSummary?: string | null | undefined;
387
366
  }>;
388
367
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
389
368
 
@@ -11,16 +11,6 @@ declare const openaiProviderOptions: z.ZodObject<{
11
11
  * the GPT tokenizer) to an associated bias value from -100 to 100.
12
12
  */
13
13
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
14
- /**
15
- * Return the log probabilities of the tokens.
16
- *
17
- * Setting to true will return the log probabilities of the tokens that
18
- * were generated.
19
- *
20
- * Setting to a number will return the log probabilities of the top n
21
- * tokens that were generated.
22
- */
23
- logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
24
14
  /**
25
15
  * Whether to enable parallel function calling during tool use. Default to true.
26
16
  */
@@ -53,7 +43,6 @@ declare const openaiProviderOptions: z.ZodObject<{
53
43
  }, "strip", z.ZodTypeAny, {
54
44
  user?: string | undefined;
55
45
  logitBias?: Record<number, number> | undefined;
56
- logprobs?: number | boolean | undefined;
57
46
  parallelToolCalls?: boolean | undefined;
58
47
  reasoningEffort?: "low" | "medium" | "high" | undefined;
59
48
  maxCompletionTokens?: number | undefined;
@@ -63,7 +52,6 @@ declare const openaiProviderOptions: z.ZodObject<{
63
52
  }, {
64
53
  user?: string | undefined;
65
54
  logitBias?: Record<number, number> | undefined;
66
- logprobs?: number | boolean | undefined;
67
55
  parallelToolCalls?: boolean | undefined;
68
56
  reasoningEffort?: "low" | "medium" | "high" | undefined;
69
57
  maxCompletionTokens?: number | undefined;
@@ -126,18 +114,6 @@ interface OpenAICompletionSettings {
126
114
  */
127
115
  logitBias?: Record<number, number>;
128
116
  /**
129
- Return the log probabilities of the tokens. Including logprobs will increase
130
- the response size and can slow down response times. However, it can
131
- be useful to better understand how the model is behaving.
132
-
133
- Setting to true will return the log probabilities of the tokens that
134
- were generated.
135
-
136
- Setting to a number will return the log probabilities of the top n
137
- tokens that were generated.
138
- */
139
- logprobs?: boolean | number;
140
- /**
141
117
  The suffix that comes after a completion of inserted text.
142
118
  */
143
119
  suffix?: string;
@@ -366,6 +342,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
366
342
  reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
367
343
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
368
344
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
345
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
369
346
  }, "strip", z.ZodTypeAny, {
370
347
  user?: string | null | undefined;
371
348
  parallelToolCalls?: boolean | null | undefined;
@@ -375,6 +352,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
375
352
  instructions?: string | null | undefined;
376
353
  previousResponseId?: string | null | undefined;
377
354
  strictSchemas?: boolean | null | undefined;
355
+ reasoningSummary?: string | null | undefined;
378
356
  }, {
379
357
  user?: string | null | undefined;
380
358
  parallelToolCalls?: boolean | null | undefined;
@@ -384,6 +362,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
384
362
  instructions?: string | null | undefined;
385
363
  previousResponseId?: string | null | undefined;
386
364
  strictSchemas?: boolean | null | undefined;
365
+ reasoningSummary?: string | null | undefined;
387
366
  }>;
388
367
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
389
368
 
@@ -216,19 +216,6 @@ function getResponseMetadata({
216
216
  };
217
217
  }
218
218
 
219
- // src/map-openai-chat-logprobs.ts
220
- function mapOpenAIChatLogProbsOutput(logprobs) {
221
- var _a, _b;
222
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
223
- token,
224
- logprob,
225
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
226
- token: token2,
227
- logprob: logprob2
228
- })) : []
229
- }))) != null ? _b : void 0;
230
- }
231
-
232
219
  // src/map-openai-finish-reason.ts
233
220
  function mapOpenAIFinishReason(finishReason) {
234
221
  switch (finishReason) {
@@ -256,16 +243,6 @@ var openaiProviderOptions = import_zod.z.object({
256
243
  * the GPT tokenizer) to an associated bias value from -100 to 100.
257
244
  */
258
245
  logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
259
- /**
260
- * Return the log probabilities of the tokens.
261
- *
262
- * Setting to true will return the log probabilities of the tokens that
263
- * were generated.
264
- *
265
- * Setting to a number will return the log probabilities of the top n
266
- * tokens that were generated.
267
- */
268
- logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
269
246
  /**
270
247
  * Whether to enable parallel function calling during tool use. Default to true.
271
248
  */
@@ -436,8 +413,6 @@ var OpenAIChatLanguageModel = class {
436
413
  model: this.modelId,
437
414
  // model specific settings:
438
415
  logit_bias: openaiOptions.logitBias,
439
- logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
440
- top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
441
416
  user: openaiOptions.user,
442
417
  parallel_tool_calls: openaiOptions.parallelToolCalls,
443
418
  // standardized settings:
@@ -510,20 +485,6 @@ var OpenAIChatLanguageModel = class {
510
485
  message: "logitBias is not supported for reasoning models"
511
486
  });
512
487
  }
513
- if (baseArgs.logprobs != null) {
514
- baseArgs.logprobs = void 0;
515
- warnings.push({
516
- type: "other",
517
- message: "logprobs is not supported for reasoning models"
518
- });
519
- }
520
- if (baseArgs.top_logprobs != null) {
521
- baseArgs.top_logprobs = void 0;
522
- warnings.push({
523
- type: "other",
524
- message: "topLogprobs is not supported for reasoning models"
525
- });
526
- }
527
488
  if (baseArgs.max_tokens != null) {
528
489
  if (baseArgs.max_completion_tokens == null) {
529
490
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
@@ -623,7 +584,6 @@ var OpenAIChatLanguageModel = class {
623
584
  body: rawResponse
624
585
  },
625
586
  warnings,
626
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
627
587
  providerMetadata
628
588
  };
629
589
  }
@@ -656,7 +616,6 @@ var OpenAIChatLanguageModel = class {
656
616
  inputTokens: void 0,
657
617
  outputTokens: void 0
658
618
  };
659
- let logprobs;
660
619
  let isFirstChunk = true;
661
620
  const providerMetadata = { openai: {} };
662
621
  return {
@@ -721,13 +680,6 @@ var OpenAIChatLanguageModel = class {
721
680
  text: delta.content
722
681
  });
723
682
  }
724
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
725
- choice == null ? void 0 : choice.logprobs
726
- );
727
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
728
- if (logprobs === void 0) logprobs = [];
729
- logprobs.push(...mappedLogprobs);
730
- }
731
683
  if (delta.tool_calls != null) {
732
684
  for (const toolCallDelta of delta.tool_calls) {
733
685
  const index = toolCallDelta.index;
@@ -814,7 +766,6 @@ var OpenAIChatLanguageModel = class {
814
766
  controller.enqueue({
815
767
  type: "finish",
816
768
  finishReason,
817
- logprobs,
818
769
  usage,
819
770
  ...providerMetadata != null ? { providerMetadata } : {}
820
771
  });
@@ -859,20 +810,6 @@ var openaiChatResponseSchema = import_zod3.z.object({
859
810
  ).nullish()
860
811
  }),
861
812
  index: import_zod3.z.number(),
862
- logprobs: import_zod3.z.object({
863
- content: import_zod3.z.array(
864
- import_zod3.z.object({
865
- token: import_zod3.z.string(),
866
- logprob: import_zod3.z.number(),
867
- top_logprobs: import_zod3.z.array(
868
- import_zod3.z.object({
869
- token: import_zod3.z.string(),
870
- logprob: import_zod3.z.number()
871
- })
872
- )
873
- })
874
- ).nullable()
875
- }).nullish(),
876
813
  finish_reason: import_zod3.z.string().nullish()
877
814
  })
878
815
  ),
@@ -900,20 +837,6 @@ var openaiChatChunkSchema = import_zod3.z.union([
900
837
  })
901
838
  ).nullish()
902
839
  }).nullish(),
903
- logprobs: import_zod3.z.object({
904
- content: import_zod3.z.array(
905
- import_zod3.z.object({
906
- token: import_zod3.z.string(),
907
- logprob: import_zod3.z.number(),
908
- top_logprobs: import_zod3.z.array(
909
- import_zod3.z.object({
910
- token: import_zod3.z.string(),
911
- logprob: import_zod3.z.number()
912
- })
913
- )
914
- })
915
- ).nullable()
916
- }).nullish(),
917
840
  finish_reason: import_zod3.z.string().nullable().optional(),
918
841
  index: import_zod3.z.number()
919
842
  })
@@ -1036,20 +959,6 @@ ${user}:`]
1036
959
  };
1037
960
  }
1038
961
 
1039
- // src/map-openai-completion-logprobs.ts
1040
- function mapOpenAICompletionLogProbs(logprobs) {
1041
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1042
- token,
1043
- logprob: logprobs.token_logprobs[index],
1044
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1045
- ([token2, logprob]) => ({
1046
- token: token2,
1047
- logprob
1048
- })
1049
- ) : []
1050
- }));
1051
- }
1052
-
1053
962
  // src/openai-completion-language-model.ts
1054
963
  var OpenAICompletionLanguageModel = class {
1055
964
  constructor(modelId, settings, config) {
@@ -1107,7 +1016,6 @@ var OpenAICompletionLanguageModel = class {
1107
1016
  // model specific settings:
1108
1017
  echo: this.settings.echo,
1109
1018
  logit_bias: this.settings.logitBias,
1110
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1111
1019
  suffix: this.settings.suffix,
1112
1020
  user: this.settings.user,
1113
1021
  // standardized settings:
@@ -1153,7 +1061,6 @@ var OpenAICompletionLanguageModel = class {
1153
1061
  outputTokens: response.usage.completion_tokens
1154
1062
  },
1155
1063
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1156
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1157
1064
  request: { body: args },
1158
1065
  response: {
1159
1066
  ...getResponseMetadata(response),
@@ -1190,7 +1097,6 @@ var OpenAICompletionLanguageModel = class {
1190
1097
  inputTokens: void 0,
1191
1098
  outputTokens: void 0
1192
1099
  };
1193
- let logprobs;
1194
1100
  let isFirstChunk = true;
1195
1101
  return {
1196
1102
  stream: response.pipeThrough(
@@ -1231,19 +1137,11 @@ var OpenAICompletionLanguageModel = class {
1231
1137
  text: choice.text
1232
1138
  });
1233
1139
  }
1234
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1235
- choice == null ? void 0 : choice.logprobs
1236
- );
1237
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1238
- if (logprobs === void 0) logprobs = [];
1239
- logprobs.push(...mappedLogprobs);
1240
- }
1241
1140
  },
1242
1141
  flush(controller) {
1243
1142
  controller.enqueue({
1244
1143
  type: "finish",
1245
1144
  finishReason,
1246
- logprobs,
1247
1145
  usage
1248
1146
  });
1249
1147
  }
@@ -1261,12 +1159,7 @@ var openaiCompletionResponseSchema = import_zod4.z.object({
1261
1159
  choices: import_zod4.z.array(
1262
1160
  import_zod4.z.object({
1263
1161
  text: import_zod4.z.string(),
1264
- finish_reason: import_zod4.z.string(),
1265
- logprobs: import_zod4.z.object({
1266
- tokens: import_zod4.z.array(import_zod4.z.string()),
1267
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1268
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1269
- }).nullish()
1162
+ finish_reason: import_zod4.z.string()
1270
1163
  })
1271
1164
  ),
1272
1165
  usage: import_zod4.z.object({
@@ -1283,12 +1176,7 @@ var openaiCompletionChunkSchema = import_zod4.z.union([
1283
1176
  import_zod4.z.object({
1284
1177
  text: import_zod4.z.string(),
1285
1178
  finish_reason: import_zod4.z.string().nullish(),
1286
- index: import_zod4.z.number(),
1287
- logprobs: import_zod4.z.object({
1288
- tokens: import_zod4.z.array(import_zod4.z.string()),
1289
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1290
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1291
- }).nullish()
1179
+ index: import_zod4.z.number()
1292
1180
  })
1293
1181
  ),
1294
1182
  usage: import_zod4.z.object({
@@ -2043,8 +1931,15 @@ var OpenAIResponsesLanguageModel = class {
2043
1931
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2044
1932
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2045
1933
  // model-specific settings:
2046
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2047
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1934
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
1935
+ reasoning: {
1936
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1937
+ effort: openaiOptions.reasoningEffort
1938
+ },
1939
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
1940
+ summary: openaiOptions.reasoningSummary
1941
+ }
1942
+ }
2048
1943
  },
2049
1944
  ...modelConfig.requiredAutoTruncation && {
2050
1945
  truncation: "auto"
@@ -2140,7 +2035,13 @@ var OpenAIResponsesLanguageModel = class {
2140
2035
  type: import_zod10.z.literal("computer_call")
2141
2036
  }),
2142
2037
  import_zod10.z.object({
2143
- type: import_zod10.z.literal("reasoning")
2038
+ type: import_zod10.z.literal("reasoning"),
2039
+ summary: import_zod10.z.array(
2040
+ import_zod10.z.object({
2041
+ type: import_zod10.z.literal("summary_text"),
2042
+ text: import_zod10.z.string()
2043
+ })
2044
+ )
2144
2045
  })
2145
2046
  ])
2146
2047
  ),
@@ -2154,6 +2055,14 @@ var OpenAIResponsesLanguageModel = class {
2154
2055
  const content = [];
2155
2056
  for (const part of response.output) {
2156
2057
  switch (part.type) {
2058
+ case "reasoning": {
2059
+ content.push({
2060
+ type: "reasoning",
2061
+ reasoningType: "text",
2062
+ text: part.summary.map((summary) => summary.text).join()
2063
+ });
2064
+ break;
2065
+ }
2157
2066
  case "message": {
2158
2067
  for (const contentPart of part.content) {
2159
2068
  content.push({
@@ -2294,6 +2203,12 @@ var OpenAIResponsesLanguageModel = class {
2294
2203
  type: "text",
2295
2204
  text: value.delta
2296
2205
  });
2206
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2207
+ controller.enqueue({
2208
+ type: "reasoning",
2209
+ reasoningType: "text",
2210
+ text: value.delta
2211
+ });
2297
2212
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2298
2213
  ongoingToolCalls[value.output_index] = void 0;
2299
2214
  hasToolCalls = true;
@@ -2418,6 +2333,13 @@ var responseAnnotationAddedSchema = import_zod10.z.object({
2418
2333
  title: import_zod10.z.string()
2419
2334
  })
2420
2335
  });
2336
+ var responseReasoningSummaryTextDeltaSchema = import_zod10.z.object({
2337
+ type: import_zod10.z.literal("response.reasoning_summary_text.delta"),
2338
+ item_id: import_zod10.z.string(),
2339
+ output_index: import_zod10.z.number(),
2340
+ summary_index: import_zod10.z.number(),
2341
+ delta: import_zod10.z.string()
2342
+ });
2421
2343
  var openaiResponsesChunkSchema = import_zod10.z.union([
2422
2344
  textDeltaChunkSchema,
2423
2345
  responseFinishedChunkSchema,
@@ -2426,6 +2348,7 @@ var openaiResponsesChunkSchema = import_zod10.z.union([
2426
2348
  responseFunctionCallArgumentsDeltaSchema,
2427
2349
  responseOutputItemAddedSchema,
2428
2350
  responseAnnotationAddedSchema,
2351
+ responseReasoningSummaryTextDeltaSchema,
2429
2352
  import_zod10.z.object({ type: import_zod10.z.string() }).passthrough()
2430
2353
  // fallback for unknown chunks
2431
2354
  ]);
@@ -2450,6 +2373,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2450
2373
  function isResponseAnnotationAddedChunk(chunk) {
2451
2374
  return chunk.type === "response.output_text.annotation.added";
2452
2375
  }
2376
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2377
+ return chunk.type === "response.reasoning_summary_text.delta";
2378
+ }
2453
2379
  function getResponsesModelConfig(modelId) {
2454
2380
  if (modelId.startsWith("o")) {
2455
2381
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2479,7 +2405,8 @@ var openaiResponsesProviderOptionsSchema = import_zod10.z.object({
2479
2405
  user: import_zod10.z.string().nullish(),
2480
2406
  reasoningEffort: import_zod10.z.string().nullish(),
2481
2407
  strictSchemas: import_zod10.z.boolean().nullish(),
2482
- instructions: import_zod10.z.string().nullish()
2408
+ instructions: import_zod10.z.string().nullish(),
2409
+ reasoningSummary: import_zod10.z.string().nullish()
2483
2410
  });
2484
2411
  // Annotate the CommonJS export names for ESM import in node:
2485
2412
  0 && (module.exports = {