@leikeduntech/leiai-js 4.0.2 → 4.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/index.d.ts +294 -26
- package/build/index.js +36 -101
- package/package.json +1 -1
package/build/index.d.ts
CHANGED
|
@@ -202,17 +202,29 @@ declare namespace openai {
|
|
|
202
202
|
*/
|
|
203
203
|
role?: ChatCompletionRequestMessageRoleEnum;
|
|
204
204
|
/**
|
|
205
|
-
* The contents of the message
|
|
206
|
-
* @type {string}
|
|
205
|
+
* The contents of the message. Can be a string or an array of content parts (text, images, etc.)
|
|
206
|
+
* @type {string | Array<any>}
|
|
207
207
|
* @memberof ChatCompletionRequestMessage
|
|
208
208
|
*/
|
|
209
|
-
content?: string
|
|
209
|
+
content?: string | Array<any>;
|
|
210
210
|
/**
|
|
211
211
|
* The name of the user in a multi-user chat
|
|
212
212
|
* @type {string}
|
|
213
213
|
* @memberof ChatCompletionRequestMessage
|
|
214
214
|
*/
|
|
215
215
|
name?: string;
|
|
216
|
+
/**
|
|
217
|
+
* Tool call ID for tool role messages
|
|
218
|
+
* @type {string}
|
|
219
|
+
* @memberof ChatCompletionRequestMessage
|
|
220
|
+
*/
|
|
221
|
+
tool_call_id?: string;
|
|
222
|
+
/**
|
|
223
|
+
* Tool calls made by the assistant
|
|
224
|
+
* @type {Array<any>}
|
|
225
|
+
* @memberof ChatCompletionRequestMessage
|
|
226
|
+
*/
|
|
227
|
+
tool_calls?: Array<any>;
|
|
216
228
|
/**
|
|
217
229
|
* 阿里云的参数 input = {prompt: '哪个公园距离我更近', history: [
|
|
218
230
|
*
|
|
@@ -235,7 +247,6 @@ declare namespace openai {
|
|
|
235
247
|
* ]}
|
|
236
248
|
*/
|
|
237
249
|
input?: object;
|
|
238
|
-
tool_call_id?: string;
|
|
239
250
|
}
|
|
240
251
|
const ChatCompletionRequestMessageRoleEnum: {
|
|
241
252
|
readonly System: 'system';
|
|
@@ -261,7 +272,31 @@ declare namespace openai {
|
|
|
261
272
|
* @type {string}
|
|
262
273
|
* @memberof ChatCompletionResponseMessage
|
|
263
274
|
*/
|
|
264
|
-
content: string;
|
|
275
|
+
content: string | null;
|
|
276
|
+
/**
|
|
277
|
+
* Tool calls made by the assistant
|
|
278
|
+
* @type {Array<any>}
|
|
279
|
+
* @memberof ChatCompletionResponseMessage
|
|
280
|
+
*/
|
|
281
|
+
tool_calls?: Array<any> | null;
|
|
282
|
+
/**
|
|
283
|
+
* Deprecated: Use tool_calls instead
|
|
284
|
+
* @type {object}
|
|
285
|
+
* @memberof ChatCompletionResponseMessage
|
|
286
|
+
*/
|
|
287
|
+
function_call?: object | null;
|
|
288
|
+
/**
|
|
289
|
+
* Refusal information
|
|
290
|
+
* @type {string | null}
|
|
291
|
+
* @memberof ChatCompletionResponseMessage
|
|
292
|
+
*/
|
|
293
|
+
refusal?: string | null;
|
|
294
|
+
/**
|
|
295
|
+
* Annotations
|
|
296
|
+
* @type {Array<any>}
|
|
297
|
+
* @memberof ChatCompletionResponseMessage
|
|
298
|
+
*/
|
|
299
|
+
annotations?: Array<any> | null;
|
|
265
300
|
}
|
|
266
301
|
const ChatCompletionResponseMessageRoleEnum: {
|
|
267
302
|
readonly System: 'system';
|
|
@@ -276,7 +311,7 @@ declare namespace openai {
|
|
|
276
311
|
*/
|
|
277
312
|
interface CreateChatCompletionRequest {
|
|
278
313
|
/**
|
|
279
|
-
* ID of the model to use.
|
|
314
|
+
* ID of the model to use.
|
|
280
315
|
* @type {string}
|
|
281
316
|
* @memberof CreateChatCompletionRequest
|
|
282
317
|
*/
|
|
@@ -312,17 +347,29 @@ declare namespace openai {
|
|
|
312
347
|
*/
|
|
313
348
|
stream?: boolean | null;
|
|
314
349
|
/**
|
|
315
|
-
*
|
|
350
|
+
* Options for streaming response. Only set this when you set `stream: true`.
|
|
351
|
+
* @type {object}
|
|
352
|
+
* @memberof CreateChatCompletionRequest
|
|
353
|
+
*/
|
|
354
|
+
stream_options?: object | null;
|
|
355
|
+
/**
|
|
356
|
+
* Up to 4 sequences where the API will stop generating further tokens.
|
|
316
357
|
* @type {CreateChatCompletionRequestStop}
|
|
317
358
|
* @memberof CreateChatCompletionRequest
|
|
318
359
|
*/
|
|
319
360
|
stop?: CreateChatCompletionRequestStop;
|
|
320
361
|
/**
|
|
321
|
-
* The maximum number of tokens allowed for the generated answer.
|
|
362
|
+
* The maximum number of tokens allowed for the generated answer. Deprecated in favor of `max_completion_tokens`.
|
|
322
363
|
* @type {number}
|
|
323
364
|
* @memberof CreateChatCompletionRequest
|
|
324
365
|
*/
|
|
325
|
-
max_tokens?: number;
|
|
366
|
+
max_tokens?: number | null;
|
|
367
|
+
/**
|
|
368
|
+
* An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
|
|
369
|
+
* @type {number}
|
|
370
|
+
* @memberof CreateChatCompletionRequest
|
|
371
|
+
*/
|
|
372
|
+
max_completion_tokens?: number | null;
|
|
326
373
|
/**
|
|
327
374
|
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
|
328
375
|
* @type {number}
|
|
@@ -342,13 +389,148 @@ declare namespace openai {
|
|
|
342
389
|
*/
|
|
343
390
|
logit_bias?: object | null;
|
|
344
391
|
/**
|
|
345
|
-
*
|
|
392
|
+
* Whether to return log probabilities of the output tokens or not.
|
|
393
|
+
* @type {boolean}
|
|
394
|
+
* @memberof CreateChatCompletionRequest
|
|
395
|
+
*/
|
|
396
|
+
logprobs?: boolean | null;
|
|
397
|
+
/**
|
|
398
|
+
* An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.
|
|
399
|
+
* @type {number}
|
|
400
|
+
* @memberof CreateChatCompletionRequest
|
|
401
|
+
*/
|
|
402
|
+
top_logprobs?: number | null;
|
|
403
|
+
/**
|
|
404
|
+
* A list of tools the model may call. You can provide either custom tools or function tools.
|
|
405
|
+
* @type {Array<any>}
|
|
406
|
+
* @memberof CreateChatCompletionRequest
|
|
407
|
+
*/
|
|
408
|
+
tools?: Array<any> | null;
|
|
409
|
+
/**
|
|
410
|
+
* Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.
|
|
411
|
+
* @type {string | object}
|
|
412
|
+
* @memberof CreateChatCompletionRequest
|
|
413
|
+
*/
|
|
414
|
+
tool_choice?: string | object | null;
|
|
415
|
+
/**
|
|
416
|
+
* Whether to enable parallel function calling during tool use.
|
|
417
|
+
* @type {boolean}
|
|
418
|
+
* @memberof CreateChatCompletionRequest
|
|
419
|
+
*/
|
|
420
|
+
parallel_tool_calls?: boolean | null;
|
|
421
|
+
/**
|
|
422
|
+
* Parameters for audio output. Required when audio output is requested with `modalities: ["audio"]`.
|
|
423
|
+
* @type {object}
|
|
424
|
+
* @memberof CreateChatCompletionRequest
|
|
425
|
+
*/
|
|
426
|
+
audio?: object | null;
|
|
427
|
+
/**
|
|
428
|
+
* Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `["text"]`. The `gpt-4o-audio-preview` model can also be used to generate audio.
|
|
429
|
+
* @type {Array<string>}
|
|
430
|
+
* @memberof CreateChatCompletionRequest
|
|
431
|
+
*/
|
|
432
|
+
modalities?: Array<string> | null;
|
|
433
|
+
/**
|
|
434
|
+
* An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs.
|
|
435
|
+
* @type {object}
|
|
436
|
+
* @memberof CreateChatCompletionRequest
|
|
437
|
+
*/
|
|
438
|
+
response_format?: object | null;
|
|
439
|
+
/**
|
|
440
|
+
* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format.
|
|
441
|
+
* @type {object}
|
|
442
|
+
* @memberof CreateChatCompletionRequest
|
|
443
|
+
*/
|
|
444
|
+
metadata?: object | null;
|
|
445
|
+
/**
|
|
446
|
+
* A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.
|
|
447
|
+
* @type {string}
|
|
448
|
+
* @memberof CreateChatCompletionRequest
|
|
449
|
+
*/
|
|
450
|
+
safety_identifier?: string | null;
|
|
451
|
+
/**
|
|
452
|
+
* Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field.
|
|
453
|
+
* @type {string}
|
|
454
|
+
* @memberof CreateChatCompletionRequest
|
|
455
|
+
*/
|
|
456
|
+
prompt_cache_key?: string | null;
|
|
457
|
+
/**
|
|
458
|
+
* The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching.
|
|
459
|
+
* @type {string}
|
|
460
|
+
* @memberof CreateChatCompletionRequest
|
|
461
|
+
*/
|
|
462
|
+
prompt_cache_retention?: string | null;
|
|
463
|
+
/**
|
|
464
|
+
* Constrains effort on reasoning for reasoning models. Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
346
465
|
* @type {string}
|
|
347
466
|
* @memberof CreateChatCompletionRequest
|
|
348
467
|
*/
|
|
349
|
-
|
|
350
|
-
|
|
468
|
+
reasoning_effort?: string | null;
|
|
469
|
+
/**
|
|
470
|
+
* Specifies the processing type used for serving the request. Options: 'auto', 'default', 'flex', 'priority'.
|
|
471
|
+
* @type {string}
|
|
472
|
+
* @memberof CreateChatCompletionRequest
|
|
473
|
+
*/
|
|
474
|
+
service_tier?: string | null;
|
|
475
|
+
/**
|
|
476
|
+
* Whether or not to store the output of this chat completion request for use in model distillation or evals products.
|
|
477
|
+
* @type {boolean}
|
|
478
|
+
* @memberof CreateChatCompletionRequest
|
|
479
|
+
*/
|
|
480
|
+
store?: boolean | null;
|
|
481
|
+
/**
|
|
482
|
+
* Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses. Currently supported values are `low`, `medium`, and `high`.
|
|
483
|
+
* @type {string}
|
|
484
|
+
* @memberof CreateChatCompletionRequest
|
|
485
|
+
*/
|
|
486
|
+
verbosity?: string | null;
|
|
487
|
+
/**
|
|
488
|
+
* Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time.
|
|
489
|
+
* @type {object}
|
|
490
|
+
* @memberof CreateChatCompletionRequest
|
|
491
|
+
*/
|
|
492
|
+
prediction?: object | null;
|
|
493
|
+
/**
|
|
494
|
+
* This tool searches the web for relevant results to use in a response.
|
|
495
|
+
* @type {object}
|
|
496
|
+
* @memberof CreateChatCompletionRequest
|
|
497
|
+
*/
|
|
498
|
+
web_search_options?: object | null;
|
|
499
|
+
/**
|
|
500
|
+
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Deprecated in favor of `safety_identifier` and `prompt_cache_key`.
|
|
501
|
+
* @type {string}
|
|
502
|
+
* @memberof CreateChatCompletionRequest
|
|
503
|
+
*/
|
|
504
|
+
user?: string | null;
|
|
505
|
+
/**
|
|
506
|
+
* Deprecated: Use `tools` instead. A list of functions the model may generate JSON inputs for.
|
|
507
|
+
* @type {Array<any>}
|
|
508
|
+
* @memberof CreateChatCompletionRequest
|
|
509
|
+
*/
|
|
510
|
+
functions?: Array<any> | null;
|
|
511
|
+
/**
|
|
512
|
+
* Deprecated: Use `tool_choice` instead. Controls which (if any) function is called by the model.
|
|
513
|
+
* @type {string | object}
|
|
514
|
+
* @memberof CreateChatCompletionRequest
|
|
515
|
+
*/
|
|
516
|
+
function_call?: string | object | null;
|
|
517
|
+
/**
|
|
518
|
+
* Deprecated: Use `seed` instead. This feature is in Beta.
|
|
519
|
+
* @type {number}
|
|
520
|
+
* @memberof CreateChatCompletionRequest
|
|
521
|
+
*/
|
|
522
|
+
seed?: number | null;
|
|
523
|
+
/**
|
|
524
|
+
* Custom file list (non-standard parameter)
|
|
525
|
+
* @type {Array<any>}
|
|
526
|
+
* @memberof CreateChatCompletionRequest
|
|
527
|
+
*/
|
|
351
528
|
fileList?: Array<any>;
|
|
529
|
+
/**
|
|
530
|
+
* Custom extended parameters (non-standard parameter)
|
|
531
|
+
* @type {any}
|
|
532
|
+
* @memberof CreateChatCompletionRequest
|
|
533
|
+
*/
|
|
352
534
|
extParams?: any;
|
|
353
535
|
}
|
|
354
536
|
/**
|
|
@@ -364,45 +546,100 @@ declare namespace openai {
|
|
|
364
546
|
*/
|
|
365
547
|
interface CreateChatCompletionResponse {
|
|
366
548
|
/**
|
|
367
|
-
*
|
|
549
|
+
* A unique identifier for the chat completion.
|
|
368
550
|
* @type {string}
|
|
369
551
|
* @memberof CreateChatCompletionResponse
|
|
370
552
|
*/
|
|
371
553
|
id?: string;
|
|
372
554
|
/**
|
|
373
|
-
*
|
|
555
|
+
* The object type, which is always `chat.completion`.
|
|
374
556
|
* @type {string}
|
|
375
557
|
* @memberof CreateChatCompletionResponse
|
|
376
558
|
*/
|
|
377
559
|
object?: string;
|
|
378
560
|
/**
|
|
379
|
-
*
|
|
561
|
+
* The Unix timestamp (in seconds) of when the chat completion was created.
|
|
380
562
|
* @type {number}
|
|
381
563
|
* @memberof CreateChatCompletionResponse
|
|
382
564
|
*/
|
|
383
565
|
created?: number;
|
|
384
566
|
/**
|
|
385
|
-
*
|
|
567
|
+
* The model used for the chat completion.
|
|
386
568
|
* @type {string}
|
|
387
569
|
* @memberof CreateChatCompletionResponse
|
|
388
570
|
*/
|
|
389
571
|
model?: string;
|
|
390
572
|
/**
|
|
391
|
-
*
|
|
573
|
+
* A list of chat completion choices.
|
|
392
574
|
* @type {Array<CreateChatCompletionResponseChoicesInner>}
|
|
393
575
|
* @memberof CreateChatCompletionResponse
|
|
394
576
|
*/
|
|
395
577
|
choices?: Array<CreateChatCompletionResponseChoicesInner>;
|
|
396
578
|
/**
|
|
397
|
-
*
|
|
579
|
+
* Usage statistics for the completion request.
|
|
398
580
|
* @type {CreateCompletionResponseUsage}
|
|
399
581
|
* @memberof CreateChatCompletionResponse
|
|
400
582
|
*/
|
|
401
583
|
usage?: CreateCompletionResponseUsage;
|
|
584
|
+
/**
|
|
585
|
+
* Specifies the processing type used for serving the request.
|
|
586
|
+
* @type {string}
|
|
587
|
+
* @memberof CreateChatCompletionResponse
|
|
588
|
+
*/
|
|
589
|
+
service_tier?: string | null;
|
|
590
|
+
/**
|
|
591
|
+
* System fingerprint representing the backend configuration.
|
|
592
|
+
* @type {string}
|
|
593
|
+
* @memberof CreateChatCompletionResponse
|
|
594
|
+
*/
|
|
595
|
+
system_fingerprint?: string | null;
|
|
596
|
+
/**
|
|
597
|
+
* The tools used in this completion.
|
|
598
|
+
* @type {Array<any>}
|
|
599
|
+
* @memberof CreateChatCompletionResponse
|
|
600
|
+
*/
|
|
601
|
+
tools?: Array<any> | null;
|
|
602
|
+
/**
|
|
603
|
+
* The tool choice used in this completion.
|
|
604
|
+
* @type {string | object}
|
|
605
|
+
* @memberof CreateChatCompletionResponse
|
|
606
|
+
*/
|
|
607
|
+
tool_choice?: string | object | null;
|
|
608
|
+
/**
|
|
609
|
+
* Metadata attached to the completion.
|
|
610
|
+
* @type {object}
|
|
611
|
+
* @memberof CreateChatCompletionResponse
|
|
612
|
+
*/
|
|
613
|
+
metadata?: object | null;
|
|
614
|
+
/**
|
|
615
|
+
* Custom result field (non-standard)
|
|
616
|
+
* @type {string}
|
|
617
|
+
* @memberof CreateChatCompletionResponse
|
|
618
|
+
*/
|
|
402
619
|
result?: string;
|
|
620
|
+
/**
|
|
621
|
+
* Custom is_truncated field (non-standard)
|
|
622
|
+
* @type {boolean}
|
|
623
|
+
* @memberof CreateChatCompletionResponse
|
|
624
|
+
*/
|
|
403
625
|
is_truncated?: boolean;
|
|
626
|
+
/**
|
|
627
|
+
* Custom need_clear_history field (non-standard)
|
|
628
|
+
* @type {boolean}
|
|
629
|
+
* @memberof CreateChatCompletionResponse
|
|
630
|
+
*/
|
|
404
631
|
need_clear_history?: boolean;
|
|
632
|
+
/**
|
|
633
|
+
* Custom output field (non-standard)
|
|
634
|
+
* @type {any}
|
|
635
|
+
* @memberof CreateChatCompletionResponse
|
|
636
|
+
*/
|
|
405
637
|
output?: any;
|
|
638
|
+
/**
|
|
639
|
+
* Custom request_id field (non-standard)
|
|
640
|
+
* @type {string}
|
|
641
|
+
* @memberof CreateChatCompletionResponse
|
|
642
|
+
*/
|
|
406
643
|
request_id?: string;
|
|
407
644
|
}
|
|
408
645
|
/**
|
|
@@ -412,23 +649,29 @@ declare namespace openai {
|
|
|
412
649
|
*/
|
|
413
650
|
interface CreateChatCompletionResponseChoicesInner {
|
|
414
651
|
/**
|
|
415
|
-
*
|
|
652
|
+
* The index of the choice in the choices array.
|
|
416
653
|
* @type {number}
|
|
417
654
|
* @memberof CreateChatCompletionResponseChoicesInner
|
|
418
655
|
*/
|
|
419
656
|
index?: number;
|
|
420
657
|
/**
|
|
421
|
-
*
|
|
658
|
+
* The message generated by the model.
|
|
422
659
|
* @type {ChatCompletionResponseMessage}
|
|
423
660
|
* @memberof CreateChatCompletionResponseChoicesInner
|
|
424
661
|
*/
|
|
425
662
|
message?: ChatCompletionResponseMessage;
|
|
426
663
|
/**
|
|
427
|
-
*
|
|
664
|
+
* The reason the model stopped generating tokens.
|
|
428
665
|
* @type {string}
|
|
429
666
|
* @memberof CreateChatCompletionResponseChoicesInner
|
|
430
667
|
*/
|
|
431
|
-
finish_reason?: string;
|
|
668
|
+
finish_reason?: string | null;
|
|
669
|
+
/**
|
|
670
|
+
* Log probability information for the choice.
|
|
671
|
+
* @type {object}
|
|
672
|
+
* @memberof CreateChatCompletionResponseChoicesInner
|
|
673
|
+
*/
|
|
674
|
+
logprobs?: object | null;
|
|
432
675
|
}
|
|
433
676
|
/**
|
|
434
677
|
*
|
|
@@ -437,23 +680,49 @@ declare namespace openai {
|
|
|
437
680
|
*/
|
|
438
681
|
interface CreateCompletionResponseUsage {
|
|
439
682
|
/**
|
|
440
|
-
*
|
|
683
|
+
* Number of tokens in the prompt.
|
|
441
684
|
* @type {number}
|
|
442
685
|
* @memberof CreateCompletionResponseUsage
|
|
443
686
|
*/
|
|
444
687
|
prompt_tokens: number;
|
|
445
688
|
/**
|
|
446
|
-
*
|
|
689
|
+
* Number of tokens in the completion.
|
|
447
690
|
* @type {number}
|
|
448
691
|
* @memberof CreateCompletionResponseUsage
|
|
449
692
|
*/
|
|
450
693
|
completion_tokens: number;
|
|
451
694
|
/**
|
|
452
|
-
*
|
|
695
|
+
* Total number of tokens used.
|
|
453
696
|
* @type {number}
|
|
454
697
|
* @memberof CreateCompletionResponseUsage
|
|
455
698
|
*/
|
|
456
699
|
total_tokens: number;
|
|
700
|
+
/**
|
|
701
|
+
* Details about prompt tokens (cached tokens, audio tokens, etc.)
|
|
702
|
+
* @type {object}
|
|
703
|
+
* @memberof CreateCompletionResponseUsage
|
|
704
|
+
*/
|
|
705
|
+
prompt_tokens_details?: {
|
|
706
|
+
cached_tokens?: number;
|
|
707
|
+
audio_tokens?: number;
|
|
708
|
+
};
|
|
709
|
+
/**
|
|
710
|
+
* Details about completion tokens (reasoning tokens, audio tokens, etc.)
|
|
711
|
+
* @type {object}
|
|
712
|
+
* @memberof CreateCompletionResponseUsage
|
|
713
|
+
*/
|
|
714
|
+
completion_tokens_details?: {
|
|
715
|
+
reasoning_tokens?: number;
|
|
716
|
+
audio_tokens?: number;
|
|
717
|
+
accepted_prediction_tokens?: number;
|
|
718
|
+
rejected_prediction_tokens?: number;
|
|
719
|
+
};
|
|
720
|
+
/**
|
|
721
|
+
* Whether the usage is estimated.
|
|
722
|
+
* @type {boolean}
|
|
723
|
+
* @memberof CreateCompletionResponseUsage
|
|
724
|
+
*/
|
|
725
|
+
estimated?: boolean;
|
|
457
726
|
}
|
|
458
727
|
}
|
|
459
728
|
|
|
@@ -487,7 +756,6 @@ declare class ChatGPTAPI {
|
|
|
487
756
|
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
|
|
488
757
|
*/
|
|
489
758
|
constructor(opts: ChatGPTAPIOptions);
|
|
490
|
-
pluginListMap(pluginList: any): any;
|
|
491
759
|
/**
|
|
492
760
|
* Sends a message to the OpenAI chat completions endpoint, waits for the response
|
|
493
761
|
* to resolve, and returns the response.
|
package/build/index.js
CHANGED
|
@@ -225,19 +225,6 @@ var ChatGPTAPI = class {
|
|
|
225
225
|
throw new Error('Invalid "fetch" is not a function');
|
|
226
226
|
}
|
|
227
227
|
}
|
|
228
|
-
pluginListMap(pluginList) {
|
|
229
|
-
const list = {
|
|
230
|
-
"dall-e-3": {
|
|
231
|
-
name: "DallE3Fun",
|
|
232
|
-
description: "\u4F7F\u7528DALL-E3\u6A21\u578B\u6839\u636E\u7528\u6237\u63CF\u8FF0\u63D0\u793A\u521B\u4F5C\u4E00\u5F20\u65B0\u7684\u56FE\u7247"
|
|
233
|
-
},
|
|
234
|
-
"dall-e-2": {
|
|
235
|
-
name: "DallE2Fun",
|
|
236
|
-
description: "\u4F7F\u7528DALL-E2\u6A21\u578B\u628A\u7528\u6237\u63D0\u4F9B\u7684\u56FE\u7247\u548C\u6839\u636E\u63D0\u793A\u8BCD\u91CD\u65B0\u753B\u56FE\u4FEE\u6539\u56FE\u7247\u7F16\u8F91\u56FE\u7247\u66FF\u6362\u56FE\u7247\u90E8\u4EFD\u5185\u5BB9\u6362\u6539\u91CD\u753B\u56FE\u7247"
|
|
237
|
-
}
|
|
238
|
-
};
|
|
239
|
-
return list[pluginList];
|
|
240
|
-
}
|
|
241
228
|
/**
|
|
242
229
|
* Sends a message to the OpenAI chat completions endpoint, waits for the response
|
|
243
230
|
* to resolve, and returns the response.
|
|
@@ -345,7 +332,7 @@ var ChatGPTAPI = class {
|
|
|
345
332
|
};
|
|
346
333
|
const responseP = new Promise(
|
|
347
334
|
async (resolve, reject) => {
|
|
348
|
-
var _a, _b, _c, _d, _e, _f, _g, _h
|
|
335
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
349
336
|
let url = `${this._apiBaseUrl}/chat/completions`;
|
|
350
337
|
const headers = {
|
|
351
338
|
"Content-Type": "application/json",
|
|
@@ -373,12 +360,6 @@ var ChatGPTAPI = class {
|
|
|
373
360
|
} else if (this._manufacturer.toLocaleLowerCase() === "anthropic") {
|
|
374
361
|
url = this._apiBaseUrl;
|
|
375
362
|
}
|
|
376
|
-
let pluginList;
|
|
377
|
-
if (completionParams.pluginList) {
|
|
378
|
-
pluginList = completionParams.pluginList;
|
|
379
|
-
}
|
|
380
|
-
if (typeof completionParams.pluginList !== "undefined")
|
|
381
|
-
delete completionParams.pluginList;
|
|
382
363
|
if (typeof completionParams.fileList !== "undefined")
|
|
383
364
|
delete completionParams.fileList;
|
|
384
365
|
let body = {
|
|
@@ -391,73 +372,36 @@ var ChatGPTAPI = class {
|
|
|
391
372
|
if (!isPositiveNumber(maxTokens)) {
|
|
392
373
|
delete body.max_tokens;
|
|
393
374
|
}
|
|
394
|
-
if (
|
|
395
|
-
if (body.model) delete body.model;
|
|
396
|
-
if (body.max_tokens) delete body.max_tokens;
|
|
397
|
-
if (body.temperature) delete body.temperature;
|
|
398
|
-
} else {
|
|
399
|
-
if (typeof body.plugins !== void 0) delete body.plugins;
|
|
400
|
-
}
|
|
375
|
+
if (typeof body.plugins !== void 0) delete body.plugins;
|
|
401
376
|
if (["openai", "azure"].indexOf(this._manufacturer.toLowerCase()) > -1) {
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
funArr = [
|
|
408
|
-
{
|
|
409
|
-
type: "function",
|
|
410
|
-
function: {
|
|
411
|
-
name: "DallE2Fun",
|
|
412
|
-
description: this.pluginListMap(pluginList).description,
|
|
413
|
-
parameters: {
|
|
414
|
-
type: "object",
|
|
415
|
-
properties: {
|
|
416
|
-
prompt: {
|
|
417
|
-
type: "string",
|
|
418
|
-
description: "\u7528\u6237\u5BF9\u9700\u8981\u7F16\u8F91\u7684\u56FE\u7247\u4FEE\u6539\u7684\u63D0\u793A\u5185\u5BB9"
|
|
419
|
-
},
|
|
420
|
-
image_url: {
|
|
421
|
-
type: "string",
|
|
422
|
-
description: "\u4ECE\u5BF9\u8BDD\u5386\u53F2\u8BB0\u5F55\u548C\u7528\u6237\u63D0\u793A\u8BCD\u91CC\u5339\u914D\u6700\u8FD1\u4E00\u6B21\u51FA\u73B0\u7684\u4EE5http\u5F00\u5934\u7684\u56FE\u7247\u7684\u94FE\u63A5\u5730\u5740"
|
|
423
|
-
}
|
|
424
|
-
},
|
|
425
|
-
required: ["prompt", "image_url"]
|
|
426
|
-
}
|
|
427
|
-
}
|
|
428
|
-
},
|
|
429
|
-
{
|
|
430
|
-
type: "function",
|
|
431
|
-
function: {
|
|
432
|
-
name: "DallE3Fun",
|
|
433
|
-
description: this.pluginListMap(pluginList).description,
|
|
434
|
-
parameters: {
|
|
435
|
-
type: "object",
|
|
436
|
-
properties: {
|
|
437
|
-
prompt: {
|
|
438
|
-
type: "string",
|
|
439
|
-
description: "\u7ED8\u56FE\u7684\u63D0\u793A\u8BCD"
|
|
440
|
-
}
|
|
441
|
-
},
|
|
442
|
-
required: ["prompt"]
|
|
443
|
-
}
|
|
444
|
-
}
|
|
377
|
+
if (body.tools && Array.isArray(body.tools) && body.tools.length > 0) {
|
|
378
|
+
if (this._manufacturer.toLowerCase() === "azure") {
|
|
379
|
+
body.functions = body.tools.map((tool) => {
|
|
380
|
+
if (tool.type === "function" && tool.function) {
|
|
381
|
+
return tool.function;
|
|
445
382
|
}
|
|
446
|
-
|
|
447
|
-
if (this._manufacturer.toLowerCase() === "azure") {
|
|
448
|
-
funArr = funArr.map((item) => {
|
|
449
|
-
return item.function;
|
|
450
|
-
});
|
|
451
|
-
}
|
|
452
|
-
body = Object.assign(body, {
|
|
453
|
-
[tools_key]: funArr,
|
|
454
|
-
[tool_choice_key]: "auto"
|
|
383
|
+
return tool;
|
|
455
384
|
});
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
385
|
+
delete body.tools;
|
|
386
|
+
if (body.tool_choice) {
|
|
387
|
+
if (typeof body.tool_choice === "string") {
|
|
388
|
+
body.function_call = body.tool_choice === "none" ? "none" : "auto";
|
|
389
|
+
} else if (body.tool_choice && typeof body.tool_choice === "object" && body.tool_choice.function) {
|
|
390
|
+
body.function_call = { name: body.tool_choice.function.name };
|
|
391
|
+
}
|
|
392
|
+
delete body.tool_choice;
|
|
393
|
+
}
|
|
394
|
+
}
|
|
459
395
|
}
|
|
460
|
-
}
|
|
396
|
+
}
|
|
397
|
+
if (["openai", "azure"].indexOf(this._manufacturer.toLowerCase()) === -1) {
|
|
398
|
+
if (body.tools) delete body.tools;
|
|
399
|
+
if (body.tool_choice) delete body.tool_choice;
|
|
400
|
+
if (body.functions) delete body.functions;
|
|
401
|
+
if (body.function_call) delete body.function_call;
|
|
402
|
+
if (body.parallel_tool_calls) delete body.parallel_tool_calls;
|
|
403
|
+
}
|
|
404
|
+
if (this._manufacturer.toLowerCase() === "aliyun") {
|
|
461
405
|
body = Object.assign(body, {
|
|
462
406
|
parameters: { result_format: "message" },
|
|
463
407
|
input: { messages }
|
|
@@ -483,20 +427,11 @@ var ChatGPTAPI = class {
|
|
|
483
427
|
delete body.model;
|
|
484
428
|
delete body.max_tokens;
|
|
485
429
|
headers["Authorization"] = signTencentHunyuan(body, url, keyList);
|
|
486
|
-
} else if (this._manufacturer.toLowerCase() === "baidu") {
|
|
487
|
-
if (pluginList && pluginList.indexOf("zhishiku") > -1) {
|
|
488
|
-
let query = messages.splice(messages.length - 1, 1);
|
|
489
|
-
body = Object.assign(body, {
|
|
490
|
-
query: (_a = query[0]) == null ? void 0 : _a.content,
|
|
491
|
-
history: messages
|
|
492
|
-
});
|
|
493
|
-
delete body.messages;
|
|
494
|
-
}
|
|
495
430
|
} else if (this._manufacturer.toLowerCase() === "chatdoc") {
|
|
496
431
|
let query = messages.splice(messages.length - 1, 1);
|
|
497
432
|
body = Object.assign(body, {
|
|
498
|
-
upload_id: (
|
|
499
|
-
question: (
|
|
433
|
+
upload_id: (_a = completionParams == null ? void 0 : completionParams.extParams) == null ? void 0 : _a.upload_id,
|
|
434
|
+
question: (_b = query[0]) == null ? void 0 : _b.content,
|
|
500
435
|
history: messages
|
|
501
436
|
});
|
|
502
437
|
if (body.extParams) delete body.extParams;
|
|
@@ -569,7 +504,7 @@ var ChatGPTAPI = class {
|
|
|
569
504
|
body: JSON.stringify(body),
|
|
570
505
|
signal: abortSignal,
|
|
571
506
|
onMessage: (data) => {
|
|
572
|
-
var _a2, _b2, _c2, _d2, _e2, _f2, _g2, _h2,
|
|
507
|
+
var _a2, _b2, _c2, _d2, _e2, _f2, _g2, _h2, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B;
|
|
573
508
|
if (data === "[DONE]") {
|
|
574
509
|
result.text = result.text.trim();
|
|
575
510
|
return resolve(result);
|
|
@@ -615,7 +550,7 @@ var ChatGPTAPI = class {
|
|
|
615
550
|
}
|
|
616
551
|
}
|
|
617
552
|
} else if (this._manufacturer.toLowerCase() === "tencent") {
|
|
618
|
-
if (((
|
|
553
|
+
if (((_i = response.choices[0]) == null ? void 0 : _i.finish_reason) === "stop") {
|
|
619
554
|
result.text += (_k = (_j = response == null ? void 0 : response.choices[0]) == null ? void 0 : _j.delta) == null ? void 0 : _k.content.trim();
|
|
620
555
|
return resolve(result);
|
|
621
556
|
}
|
|
@@ -747,7 +682,7 @@ var ChatGPTAPI = class {
|
|
|
747
682
|
console.log(
|
|
748
683
|
`row data ${typeof response} : `,
|
|
749
684
|
response,
|
|
750
|
-
(response == null ? void 0 : response.choices) && ((
|
|
685
|
+
(response == null ? void 0 : response.choices) && ((_c = response == null ? void 0 : response.choices[0]) == null ? void 0 : _c.message)
|
|
751
686
|
);
|
|
752
687
|
}
|
|
753
688
|
if (this._manufacturer.toLowerCase() === "aliyun") {
|
|
@@ -759,7 +694,7 @@ var ChatGPTAPI = class {
|
|
|
759
694
|
result.id = response.id;
|
|
760
695
|
}
|
|
761
696
|
}
|
|
762
|
-
if (((
|
|
697
|
+
if (((_d = response == null ? void 0 : response.choices) == null ? void 0 : _d.length) && ["openai", "azure"].indexOf(this._manufacturer.toLowerCase()) > -1) {
|
|
763
698
|
const message2 = response.choices[0].message;
|
|
764
699
|
result.text = message2.content;
|
|
765
700
|
if (message2.role) {
|
|
@@ -768,14 +703,14 @@ var ChatGPTAPI = class {
|
|
|
768
703
|
} else if ((response == null ? void 0 : response.result) && this._manufacturer.toLowerCase() === "baidu") {
|
|
769
704
|
result.text = response.result;
|
|
770
705
|
result.role = "assistant";
|
|
771
|
-
} else if (((
|
|
772
|
-
result.text = (
|
|
706
|
+
} else if (((_e = response == null ? void 0 : response.output) == null ? void 0 : _e.text) && this._manufacturer.toLowerCase() === "aliyun") {
|
|
707
|
+
result.text = (_f = response == null ? void 0 : response.output) == null ? void 0 : _f.text;
|
|
773
708
|
result.role = "assistant";
|
|
774
709
|
} else {
|
|
775
710
|
const res2 = response;
|
|
776
711
|
return reject(
|
|
777
712
|
new Error(
|
|
778
|
-
`${this._manufacturer} error: ${((
|
|
713
|
+
`${this._manufacturer} error: ${((_g = res2 == null ? void 0 : res2.detail) == null ? void 0 : _g.message) || ((_h = res2 == null ? void 0 : res2.detail) == null ? void 0 : _h.error_msg) || (res2 == null ? void 0 : res2.detail) || "unknown"}`
|
|
779
714
|
)
|
|
780
715
|
);
|
|
781
716
|
}
|