@fugood/bricks-project 2.21.0-beta.21 → 2.21.0-beta.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/compile/action-name-map.ts +14 -1
- package/package.json +2 -2
- package/types/data.ts +1 -1
- package/types/generators.ts +136 -63
|
@@ -441,11 +441,18 @@ export const templateActionNameMap = {
|
|
|
441
441
|
},
|
|
442
442
|
GENERATOR_SPEECH_INFERENCE: {
|
|
443
443
|
GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_FILE: {
|
|
444
|
+
fileUrl: 'GENERATOR_SPEECH_INFERENCE_FILE_URL',
|
|
445
|
+
prompt: 'GENERATOR_SPEECH_INFERENCE_PROMPT',
|
|
446
|
+
beamSize: 'GENERATOR_SPEECH_INFERENCE_BEAM_SIZE',
|
|
447
|
+
language: 'GENERATOR_SPEECH_INFERENCE_LANGUAGE',
|
|
448
|
+
translate: 'GENERATOR_SPEECH_INFERENCE_TRANSLATE',
|
|
449
|
+
},
|
|
450
|
+
GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_DATA: {
|
|
451
|
+
data: 'GENERATOR_SPEECH_INFERENCE_DATA',
|
|
444
452
|
prompt: 'GENERATOR_SPEECH_INFERENCE_PROMPT',
|
|
445
453
|
beamSize: 'GENERATOR_SPEECH_INFERENCE_BEAM_SIZE',
|
|
446
454
|
language: 'GENERATOR_SPEECH_INFERENCE_LANGUAGE',
|
|
447
455
|
translate: 'GENERATOR_SPEECH_INFERENCE_TRANSLATE',
|
|
448
|
-
fileUrl: 'GENERATOR_SPEECH_INFERENCE_FILE_URL',
|
|
449
456
|
},
|
|
450
457
|
GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_REALTIME: {
|
|
451
458
|
prompt: 'GENERATOR_SPEECH_INFERENCE_PROMPT',
|
|
@@ -464,6 +471,7 @@ export const templateActionNameMap = {
|
|
|
464
471
|
},
|
|
465
472
|
GENERATOR_LLM: {
|
|
466
473
|
GENERATOR_LLM_PROCESS_PROMPT: {
|
|
474
|
+
sessionKey: 'GENERATOR_LLM_SESSION_KEY',
|
|
467
475
|
mode: 'GENERATOR_LLM_MODE',
|
|
468
476
|
messages: 'GENERATOR_LLM_MESSAGES',
|
|
469
477
|
prompt: 'GENERATOR_LLM_PROMPT',
|
|
@@ -471,6 +479,7 @@ export const templateActionNameMap = {
|
|
|
471
479
|
promptTemplateType: 'GENERATOR_LLM_PROMPT_TEMPLATE_TYPE',
|
|
472
480
|
},
|
|
473
481
|
GENERATOR_LLM_COMPLETION: {
|
|
482
|
+
sessionKey: 'GENERATOR_LLM_SESSION_KEY',
|
|
474
483
|
mode: 'GENERATOR_LLM_MODE',
|
|
475
484
|
messages: 'GENERATOR_LLM_MESSAGES',
|
|
476
485
|
prompt: 'GENERATOR_LLM_PROMPT',
|
|
@@ -499,5 +508,9 @@ export const templateActionNameMap = {
|
|
|
499
508
|
functionCallEnabled: 'GENERATOR_LLM_FUNCTION_CALL_ENABLED',
|
|
500
509
|
functionCallSchema: 'GENERATOR_LLM_FUNCTION_CALL_SCHEMA',
|
|
501
510
|
},
|
|
511
|
+
GENERATOR_LLM_CLEAR_SESSION: {
|
|
512
|
+
sessionId: 'GENERATOR_LLM_SESSION_ID',
|
|
513
|
+
sessionCustomKey: 'GENERATOR_LLM_SESSION_CUSTOM_KEY',
|
|
514
|
+
},
|
|
502
515
|
},
|
|
503
516
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fugood/bricks-project",
|
|
3
|
-
"version": "2.21.0-beta.
|
|
3
|
+
"version": "2.21.0-beta.23",
|
|
4
4
|
"main": "index.ts",
|
|
5
5
|
"scripts": {
|
|
6
6
|
"build": "node scripts/build.js"
|
|
@@ -13,5 +13,5 @@
|
|
|
13
13
|
"lodash": "^4.17.4",
|
|
14
14
|
"uuid": "^8.3.1"
|
|
15
15
|
},
|
|
16
|
-
"gitHead": "
|
|
16
|
+
"gitHead": "d7d83d4425e118960bcbde76a7e66a2c2f4e7bdb"
|
|
17
17
|
}
|
package/types/data.ts
CHANGED
package/types/generators.ts
CHANGED
|
@@ -438,7 +438,7 @@ Default property:
|
|
|
438
438
|
/* Hash value */
|
|
439
439
|
hash?: string | DataLink
|
|
440
440
|
/* Hash type */
|
|
441
|
-
hashType?: 'md5' | 'sha256' | DataLink
|
|
441
|
+
hashType?: 'md5' | 'sha256' | 'sha1' | DataLink
|
|
442
442
|
/* Request method */
|
|
443
443
|
method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH' | DataLink
|
|
444
444
|
/* Header */
|
|
@@ -455,7 +455,7 @@ Default property:
|
|
|
455
455
|
encoding?: 'utf8' | 'base64' | DataLink
|
|
456
456
|
url?: string | DataLink
|
|
457
457
|
hash?: string | DataLink
|
|
458
|
-
hashType?: 'sha256' | 'md5' | DataLink
|
|
458
|
+
hashType?: 'sha256' | 'sha1' | 'md5' | DataLink
|
|
459
459
|
method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH' | DataLink
|
|
460
460
|
header?: {} | DataLink
|
|
461
461
|
fields?: {} | DataLink
|
|
@@ -4260,6 +4260,7 @@ interface GeneratorQuestionDef {
|
|
|
4260
4260
|
/*
|
|
4261
4261
|
Default property:
|
|
4262
4262
|
{
|
|
4263
|
+
"modalMode": "root",
|
|
4263
4264
|
"showClose": true,
|
|
4264
4265
|
"showBack": true,
|
|
4265
4266
|
"cameraType": "back",
|
|
@@ -4271,6 +4272,8 @@ Default property:
|
|
|
4271
4272
|
}
|
|
4272
4273
|
*/
|
|
4273
4274
|
property?: {
|
|
4275
|
+
/* Modal mode */
|
|
4276
|
+
modalMode?: 'root' | 'in-subspace' | DataLink
|
|
4274
4277
|
/* Inquirer schema
|
|
4275
4278
|
`key`: Field key (unique, required)
|
|
4276
4279
|
`kind`: Field type (required)
|
|
@@ -5711,6 +5714,11 @@ export type GeneratorSpeechInferenceActionLoadModel = Action & {
|
|
|
5711
5714
|
export type GeneratorSpeechInferenceActionTranscribeFile = ActionWithParams & {
|
|
5712
5715
|
__actionName: 'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_FILE'
|
|
5713
5716
|
params?: Array<
|
|
5717
|
+
| {
|
|
5718
|
+
input: 'fileUrl'
|
|
5719
|
+
value?: string | DataLink
|
|
5720
|
+
mapping?: string
|
|
5721
|
+
}
|
|
5714
5722
|
| {
|
|
5715
5723
|
input: 'prompt'
|
|
5716
5724
|
value?: string | DataLink
|
|
@@ -5731,11 +5739,38 @@ export type GeneratorSpeechInferenceActionTranscribeFile = ActionWithParams & {
|
|
|
5731
5739
|
value?: boolean | DataLink
|
|
5732
5740
|
mapping?: string
|
|
5733
5741
|
}
|
|
5742
|
+
>
|
|
5743
|
+
}
|
|
5744
|
+
|
|
5745
|
+
/* Transcribe audio data. Currently only support base64 encoded audio data (16-bit PCM, mono, 16kHz) */
|
|
5746
|
+
export type GeneratorSpeechInferenceActionTranscribeData = ActionWithParams & {
|
|
5747
|
+
__actionName: 'GENERATOR_SPEECH_INFERENCE_TRANSCRIBE_DATA'
|
|
5748
|
+
params?: Array<
|
|
5734
5749
|
| {
|
|
5735
|
-
input: '
|
|
5750
|
+
input: 'data'
|
|
5751
|
+
value?: any
|
|
5752
|
+
mapping?: string
|
|
5753
|
+
}
|
|
5754
|
+
| {
|
|
5755
|
+
input: 'prompt'
|
|
5736
5756
|
value?: string | DataLink
|
|
5737
5757
|
mapping?: string
|
|
5738
5758
|
}
|
|
5759
|
+
| {
|
|
5760
|
+
input: 'beamSize'
|
|
5761
|
+
value?: number | DataLink
|
|
5762
|
+
mapping?: string
|
|
5763
|
+
}
|
|
5764
|
+
| {
|
|
5765
|
+
input: 'language'
|
|
5766
|
+
value?: string | DataLink
|
|
5767
|
+
mapping?: string
|
|
5768
|
+
}
|
|
5769
|
+
| {
|
|
5770
|
+
input: 'translate'
|
|
5771
|
+
value?: boolean | DataLink
|
|
5772
|
+
mapping?: string
|
|
5773
|
+
}
|
|
5739
5774
|
>
|
|
5740
5775
|
}
|
|
5741
5776
|
|
|
@@ -5826,10 +5861,10 @@ interface GeneratorSpeechInferenceDef {
|
|
|
5826
5861
|
Default property:
|
|
5827
5862
|
{
|
|
5828
5863
|
"init": false,
|
|
5829
|
-
"
|
|
5830
|
-
"modelQuantizedType": "none",
|
|
5864
|
+
"modelName": "base-q8_0",
|
|
5831
5865
|
"modelUseCoreML": false,
|
|
5832
5866
|
"modelUseGPU": true,
|
|
5867
|
+
"modelUseFlashAttn": false,
|
|
5833
5868
|
"inferLanguage": "Auto",
|
|
5834
5869
|
"inferRealtimeAudioSeconds": 30,
|
|
5835
5870
|
"inferRealtimeAudioSliceSeconds": 30,
|
|
@@ -5844,35 +5879,71 @@ Default property:
|
|
|
5844
5879
|
/* Initialize the Whisper context on generator initialization
|
|
5845
5880
|
Please note that it will take some RAM depending on the model size */
|
|
5846
5881
|
init?: boolean | DataLink
|
|
5847
|
-
/* Use model
|
|
5848
|
-
We used `ggml` format model, please refer to https://
|
|
5849
|
-
You can also choose `
|
|
5850
|
-
|
|
5851
|
-
| '
|
|
5852
|
-
| '
|
|
5853
|
-
| '
|
|
5854
|
-
| '
|
|
5855
|
-
| '
|
|
5856
|
-
| '
|
|
5857
|
-
| '
|
|
5858
|
-
| '
|
|
5859
|
-
| '
|
|
5860
|
-
| '
|
|
5861
|
-
| '
|
|
5862
|
-
| '
|
|
5863
|
-
| '
|
|
5882
|
+
/* Use model name, the model download progress will be done in preload stage or the generator initialization stage.
|
|
5883
|
+
We used `ggml` format model, please refer to https://huggingface.co/BricksDisplay/whisper-ggml
|
|
5884
|
+
You can also choose `custom` option and set `Model URL` and `Model MD5` to use your own model */
|
|
5885
|
+
modelName?:
|
|
5886
|
+
| 'custom'
|
|
5887
|
+
| 'tiny'
|
|
5888
|
+
| 'tiny-q5_1'
|
|
5889
|
+
| 'tiny-q8_0'
|
|
5890
|
+
| 'tiny.en'
|
|
5891
|
+
| 'tiny.en-q5_1'
|
|
5892
|
+
| 'tiny.en-q8_0'
|
|
5893
|
+
| 'base'
|
|
5894
|
+
| 'base-q5_1'
|
|
5895
|
+
| 'base-q8_0'
|
|
5896
|
+
| 'base.en'
|
|
5897
|
+
| 'base.en-q5_1'
|
|
5898
|
+
| 'base.en-q8_0'
|
|
5899
|
+
| 'small'
|
|
5900
|
+
| 'small-q5_1'
|
|
5901
|
+
| 'small-q8_0'
|
|
5902
|
+
| 'small.en'
|
|
5903
|
+
| 'small.en-q5_1'
|
|
5904
|
+
| 'small.en-q8_0'
|
|
5905
|
+
| 'medium'
|
|
5906
|
+
| 'medium-q5_0'
|
|
5907
|
+
| 'medium-q8_0'
|
|
5908
|
+
| 'medium.en'
|
|
5909
|
+
| 'medium.en-q5_1'
|
|
5910
|
+
| 'medium.en-q8_0'
|
|
5911
|
+
| 'large-v1'
|
|
5912
|
+
| 'large-v2'
|
|
5913
|
+
| 'large-v2-q5_0'
|
|
5914
|
+
| 'large-v2-q8_0'
|
|
5915
|
+
| 'large-v3'
|
|
5916
|
+
| 'large-v3-q5_0'
|
|
5917
|
+
| 'large-v3-q8_0'
|
|
5918
|
+
| 'large-v3-turbo'
|
|
5919
|
+
| 'large-v3-turbo-q5_0'
|
|
5920
|
+
| 'large-v3-turbo-q8_0'
|
|
5921
|
+
| 'small.en-tdrz'
|
|
5922
|
+
| 'small.en-tdrz-q5_0'
|
|
5923
|
+
| 'small.en-tdrz-q8_0'
|
|
5924
|
+
| 'distil-small.en'
|
|
5925
|
+
| 'distil-small.en-q5_0'
|
|
5926
|
+
| 'distil-small.en-q8_0'
|
|
5927
|
+
| 'distil-medium.en'
|
|
5928
|
+
| 'distil-medium.en-q5_0'
|
|
5929
|
+
| 'distil-medium.en-q8_0'
|
|
5930
|
+
| 'distil-large-v3'
|
|
5931
|
+
| 'distil-large-v3-q5_0'
|
|
5932
|
+
| 'distil-large-v3-q8_0'
|
|
5864
5933
|
| DataLink
|
|
5865
|
-
/* Use quantized model */
|
|
5866
|
-
modelQuantizedType?: 'none' | 'q4_0' | 'q4_1' | 'q5_0' | 'q5_1' | 'q8_0' | DataLink
|
|
5867
5934
|
/* The URL or path of model
|
|
5868
5935
|
We used `ggml` format model, please refer to https://github.com/ggerganov/whisper.cpp/tree/master/models */
|
|
5869
5936
|
modelUrl?: string | DataLink
|
|
5870
|
-
/*
|
|
5871
|
-
|
|
5937
|
+
/* Hash type of model */
|
|
5938
|
+
modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
|
|
5939
|
+
/* Hash of model */
|
|
5940
|
+
modelHash?: string | DataLink
|
|
5872
5941
|
/* [Unstable] iOS: Use CoreML model for inference */
|
|
5873
5942
|
modelUseCoreML?: boolean | DataLink
|
|
5874
5943
|
/* Use GPU Acceleration for inference. Currently iOS only, if it's enabled, Core ML option will be ignored. */
|
|
5875
5944
|
modelUseGPU?: boolean | DataLink
|
|
5945
|
+
/* Use Flash Attention for inference (Recommended with GPU enabled) */
|
|
5946
|
+
modelUseFlashAttn?: boolean | DataLink
|
|
5876
5947
|
/* The language of the file to be inferred */
|
|
5877
5948
|
inferLanguage?:
|
|
5878
5949
|
| 'Auto'
|
|
@@ -5986,6 +6057,8 @@ Default property:
|
|
|
5986
6057
|
inferMaxThreads?: number | DataLink
|
|
5987
6058
|
/* Output token-level timestamps in details outlet */
|
|
5988
6059
|
inferTokenTimestamps?: boolean | DataLink
|
|
6060
|
+
/* Speaker diarization (Please use small.en-tdrz model) */
|
|
6061
|
+
inferTdrz?: boolean | DataLink
|
|
5989
6062
|
/* Maximum segment length in characters */
|
|
5990
6063
|
inferMaxLength?: number | DataLink
|
|
5991
6064
|
/* Audio time offset in milliseconds */
|
|
@@ -6076,7 +6149,7 @@ Default property:
|
|
|
6076
6149
|
/* Is transcribing */
|
|
6077
6150
|
isTranscribing?: () => Data
|
|
6078
6151
|
/* Progress of transcribe audio (0-100) */
|
|
6079
|
-
|
|
6152
|
+
transcribeProgress?: () => Data
|
|
6080
6153
|
/* Inference result */
|
|
6081
6154
|
transcribeResult?: () => Data
|
|
6082
6155
|
/* Inference result details */
|
|
@@ -6104,7 +6177,7 @@ export type GeneratorSpeechInference = Generator &
|
|
|
6104
6177
|
| 'contextState'
|
|
6105
6178
|
| 'contextDetails'
|
|
6106
6179
|
| 'isTranscribing'
|
|
6107
|
-
| '
|
|
6180
|
+
| 'transcribeProgress'
|
|
6108
6181
|
| 'transcribeResult'
|
|
6109
6182
|
| 'transcribeDetails'
|
|
6110
6183
|
| 'recordedPath'
|
|
@@ -6124,6 +6197,11 @@ export type GeneratorLLMActionLoadModel = Action & {
|
|
|
6124
6197
|
export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
|
|
6125
6198
|
__actionName: 'GENERATOR_LLM_PROCESS_PROMPT'
|
|
6126
6199
|
params?: Array<
|
|
6200
|
+
| {
|
|
6201
|
+
input: 'sessionKey'
|
|
6202
|
+
value?: string | DataLink
|
|
6203
|
+
mapping?: string
|
|
6204
|
+
}
|
|
6127
6205
|
| {
|
|
6128
6206
|
input: 'mode'
|
|
6129
6207
|
value?: string | DataLink
|
|
@@ -6156,6 +6234,11 @@ export type GeneratorLLMActionProcessPrompt = ActionWithParams & {
|
|
|
6156
6234
|
export type GeneratorLLMActionCompletion = ActionWithParams & {
|
|
6157
6235
|
__actionName: 'GENERATOR_LLM_COMPLETION'
|
|
6158
6236
|
params?: Array<
|
|
6237
|
+
| {
|
|
6238
|
+
input: 'sessionKey'
|
|
6239
|
+
value?: string | DataLink
|
|
6240
|
+
mapping?: string
|
|
6241
|
+
}
|
|
6159
6242
|
| {
|
|
6160
6243
|
input: 'mode'
|
|
6161
6244
|
value?: string | DataLink
|
|
@@ -6294,6 +6377,23 @@ export type GeneratorLLMActionCompletion = ActionWithParams & {
|
|
|
6294
6377
|
>
|
|
6295
6378
|
}
|
|
6296
6379
|
|
|
6380
|
+
/* Clear session with session key or session ID */
|
|
6381
|
+
export type GeneratorLLMActionClearSession = ActionWithParams & {
|
|
6382
|
+
__actionName: 'GENERATOR_LLM_CLEAR_SESSION'
|
|
6383
|
+
params?: Array<
|
|
6384
|
+
| {
|
|
6385
|
+
input: 'sessionId'
|
|
6386
|
+
value?: string | DataLink
|
|
6387
|
+
mapping?: string
|
|
6388
|
+
}
|
|
6389
|
+
| {
|
|
6390
|
+
input: 'sessionCustomKey'
|
|
6391
|
+
value?: string | DataLink
|
|
6392
|
+
mapping?: string
|
|
6393
|
+
}
|
|
6394
|
+
>
|
|
6395
|
+
}
|
|
6396
|
+
|
|
6297
6397
|
/* Stop text completion */
|
|
6298
6398
|
export type GeneratorLLMActionStopCompletion = Action & {
|
|
6299
6399
|
__actionName: 'GENERATOR_LLM_STOP_COMPLETION'
|
|
@@ -6314,8 +6414,6 @@ interface GeneratorLLMDef {
|
|
|
6314
6414
|
Default property:
|
|
6315
6415
|
{
|
|
6316
6416
|
"init": false,
|
|
6317
|
-
"modelType": "Custom",
|
|
6318
|
-
"modelQuantizedType": "q2_k",
|
|
6319
6417
|
"contextSize": 512,
|
|
6320
6418
|
"batchSize": 512,
|
|
6321
6419
|
"accelVariant": "default",
|
|
@@ -6363,38 +6461,13 @@ Default property:
|
|
|
6363
6461
|
/* Initialize the Llama context on generator initialization
|
|
6364
6462
|
Please note that it will take some RAM depending on the model size */
|
|
6365
6463
|
init?: boolean | DataLink
|
|
6366
|
-
/* Use model type, the model download progress will be done in preload stage or the generator initialization stage.
|
|
6367
|
-
We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description
|
|
6368
|
-
You can also choose `Custom` option and set `Model URL` and `Model MD5` to use your own model.
|
|
6369
|
-
Please also read model license for fair use. */
|
|
6370
|
-
modelType?:
|
|
6371
|
-
| 'Custom'
|
|
6372
|
-
| '[Local] Llama 3 8B'
|
|
6373
|
-
| '[Local] Llama 3 8B Instruct'
|
|
6374
|
-
| '[Local] Llama 2 7B Chat'
|
|
6375
|
-
| '[Local] Llama 2 7B'
|
|
6376
|
-
| '[Local] Mistral 7B Instruct v0.2'
|
|
6377
|
-
| '[Local] Mistral 7B v0.1'
|
|
6378
|
-
| '[Local] Breeze 7B Instruct 64k v0.1'
|
|
6379
|
-
| '[Local] Falcon 7B'
|
|
6380
|
-
| '[Local] Gemma 2B Instruct'
|
|
6381
|
-
| '[Local] Gemma 2B'
|
|
6382
|
-
| '[Local] Phi-2 3B'
|
|
6383
|
-
| '[Local] OpenLLaMA 2 3B'
|
|
6384
|
-
| '[Local] TinyLlama 1.1B intermediate-step-1195k-token-2.5T'
|
|
6385
|
-
| DataLink
|
|
6386
|
-
/* Select quantized model, currently only provided `q2_k` / `q3_k` / `q4_0` (Default to `q2_k`, will use `q4_0` instead if K-Quants not supported) */
|
|
6387
|
-
modelQuantizedType?: 'q2_k' | 'q3_k' | 'q4_0' | DataLink
|
|
6388
6464
|
/* The URL or path of model
|
|
6389
6465
|
We used GGUF format model, please refer to https://github.com/ggerganov/llama.cpp/tree/master#description */
|
|
6390
6466
|
modelUrl?: string | DataLink
|
|
6391
|
-
/* Hash of model
|
|
6392
|
-
|
|
6393
|
-
/* Hash of model
|
|
6394
|
-
|
|
6395
|
-
/* Pre-process the prompt after model loaded, this can speed up the completion action.
|
|
6396
|
-
This doing the same thing as PROCESS_PROMPT action. */
|
|
6397
|
-
modelPreprocessPrompt?: boolean | DataLink
|
|
6467
|
+
/* Hash type of model */
|
|
6468
|
+
modelHashType?: 'md5' | 'sha256' | 'sha1' | DataLink
|
|
6469
|
+
/* Hash of model */
|
|
6470
|
+
modelHash?: string | DataLink
|
|
6398
6471
|
/* Context size (0 ~ 4096) (Default to 512) */
|
|
6399
6472
|
contextSize?: number | DataLink
|
|
6400
6473
|
/* Batch size */
|
|
@@ -6451,12 +6524,14 @@ Default property:
|
|
|
6451
6524
|
completionPromptTemplateData?: {} | DataLink
|
|
6452
6525
|
/* The prompt template type */
|
|
6453
6526
|
completionPromptTemplateType?: '${}' | '{{}}' | DataLink
|
|
6454
|
-
/* Grammar */
|
|
6455
|
-
completionGrammar?: string | DataLink
|
|
6456
6527
|
/* Stop words */
|
|
6457
6528
|
completionStopWords?: Array<string | DataLink> | DataLink
|
|
6458
6529
|
/* Number of tokens to predict */
|
|
6459
6530
|
completionPredict?: number | DataLink
|
|
6531
|
+
/* Throttle time for completion result (in milliseconds) */
|
|
6532
|
+
completionResultThrottle?: number | DataLink
|
|
6533
|
+
/* Grammar (GBNF: Please refer to https://github.com/ggerganov/llama.cpp/tree/master/grammars) */
|
|
6534
|
+
completionGrammar?: string | DataLink
|
|
6460
6535
|
/* Temperature */
|
|
6461
6536
|
completionTemperature?: number | DataLink
|
|
6462
6537
|
/* Number of probablites to show for each token in the completion details */
|
|
@@ -6493,8 +6568,6 @@ Default property:
|
|
|
6493
6568
|
completionTypicalP?: number | DataLink
|
|
6494
6569
|
/* Repeat alpha frequency penalty (default: 0.1, 0.0 = disabled) */
|
|
6495
6570
|
completionIgnoreEOS?: boolean | DataLink
|
|
6496
|
-
/* Throttle time for completion result (in milliseconds) */
|
|
6497
|
-
completionResultThrottle?: number | DataLink
|
|
6498
6571
|
}
|
|
6499
6572
|
events?: {
|
|
6500
6573
|
/* Event triggered when load is done */
|