@cloudflare/workers-types 4.20250913.0 → 4.20250918.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/latest/index.d.ts CHANGED
@@ -296,13 +296,6 @@ interface ServiceWorkerGlobalScope extends WorkerGlobalScope {
296
296
  FixedLengthStream: typeof FixedLengthStream;
297
297
  IdentityTransformStream: typeof IdentityTransformStream;
298
298
  HTMLRewriter: typeof HTMLRewriter;
299
- Performance: typeof Performance;
300
- PerformanceEntry: typeof PerformanceEntry;
301
- PerformanceMark: typeof PerformanceMark;
302
- PerformanceMeasure: typeof PerformanceMeasure;
303
- PerformanceResourceTiming: typeof PerformanceResourceTiming;
304
- PerformanceObserver: typeof PerformanceObserver;
305
- PerformanceObserverEntryList: typeof PerformanceObserverEntryList;
306
299
  }
307
300
  declare function addEventListener<Type extends keyof WorkerGlobalScopeEventMap>(
308
301
  type: Type,
@@ -477,6 +470,18 @@ declare abstract class Navigator {
477
470
  readonly language: string;
478
471
  readonly languages: string[];
479
472
  }
473
+ /**
474
+ * The Workers runtime supports a subset of the Performance API, used to measure timing and performance,
475
+ * as well as timing of subrequests and other operations.
476
+ *
477
+ * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/)
478
+ */
479
+ interface Performance {
480
+ /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */
481
+ readonly timeOrigin: number;
482
+ /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */
483
+ now(): number;
484
+ }
480
485
  interface AlarmInvocationInfo {
481
486
  readonly isRetry: boolean;
482
487
  readonly retryCount: number;
@@ -3190,171 +3195,6 @@ interface WorkerLoaderWorkerCode {
3190
3195
  tails?: Fetcher[];
3191
3196
  streamingTails?: Fetcher[];
3192
3197
  }
3193
- /**
3194
- * The Workers runtime supports a subset of the Performance API, used to measure timing and performance,
3195
- * as well as timing of subrequests and other operations.
3196
- *
3197
- * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/)
3198
- */
3199
- declare abstract class Performance extends EventTarget {
3200
- /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */
3201
- get timeOrigin(): number;
3202
- /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */
3203
- now(): number;
3204
- get eventCounts(): EventCounts;
3205
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/clearMarks) */
3206
- clearMarks(name?: string): void;
3207
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/clearMeasures) */
3208
- clearMeasures(name?: string): void;
3209
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/clearResourceTimings) */
3210
- clearResourceTimings(): void;
3211
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/getEntries) */
3212
- getEntries(): PerformanceEntry[];
3213
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/getEntriesByName) */
3214
- getEntriesByName(name: string, type?: string): PerformanceEntry[];
3215
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/getEntriesByType) */
3216
- getEntriesByType(type: string): PerformanceEntry[];
3217
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/mark) */
3218
- mark(name: string, options?: PerformanceMarkOptions): PerformanceMark;
3219
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/measure) */
3220
- measure(
3221
- measureName: string,
3222
- measureOptionsOrStartMark: PerformanceMeasureOptions | string,
3223
- maybeEndMark?: string,
3224
- ): PerformanceMeasure;
3225
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/setResourceTimingBufferSize) */
3226
- setResourceTimingBufferSize(size: number): void;
3227
- }
3228
- /**
3229
- * PerformanceMark is an abstract interface for PerformanceEntry objects with an entryType of "mark". Entries of this type are created by calling performance.mark() to add a named DOMHighResTimeStamp (the mark) to the browser's performance timeline.
3230
- *
3231
- * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceMark)
3232
- */
3233
- declare class PerformanceMark extends PerformanceEntry {
3234
- constructor(name: string, maybeOptions?: PerformanceMarkOptions);
3235
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceMark/detail) */
3236
- get detail(): any | undefined;
3237
- toJSON(): any;
3238
- }
3239
- /**
3240
- * PerformanceMeasure is an abstract interface for PerformanceEntry objects with an entryType of "measure". Entries of this type are created by calling performance.measure() to add a named DOMHighResTimeStamp (the measure) between two marks to the browser's performance timeline.
3241
- *
3242
- * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceMeasure)
3243
- */
3244
- declare abstract class PerformanceMeasure extends PerformanceEntry {
3245
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceMeasure/detail) */
3246
- get detail(): any | undefined;
3247
- toJSON(): any;
3248
- }
3249
- interface PerformanceMarkOptions {
3250
- detail?: any;
3251
- startTime?: number;
3252
- }
3253
- interface PerformanceMeasureOptions {
3254
- detail?: any;
3255
- start?: number;
3256
- duration?: number;
3257
- end?: number;
3258
- }
3259
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserverEntryList) */
3260
- declare abstract class PerformanceObserverEntryList {
3261
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserverEntryList/getEntries) */
3262
- getEntries(): PerformanceEntry[];
3263
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserverEntryList/getEntriesByType) */
3264
- getEntriesByType(type: string): PerformanceEntry[];
3265
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserverEntryList/getEntriesByName) */
3266
- getEntriesByName(name: string, type?: string): PerformanceEntry[];
3267
- }
3268
- /**
3269
- * Encapsulates a single performance metric that is part of the performance timeline. A performance entry can be directly created by making a performance mark or measure (for example by calling the mark() method) at an explicit point in an application. Performance entries are also created in indirect ways such as loading a resource (such as an image).
3270
- *
3271
- * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry)
3272
- */
3273
- declare abstract class PerformanceEntry {
3274
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/name) */
3275
- get name(): string;
3276
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/entryType) */
3277
- get entryType(): string;
3278
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/startTime) */
3279
- get startTime(): number;
3280
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/duration) */
3281
- get duration(): number;
3282
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/toJSON) */
3283
- toJSON(): any;
3284
- }
3285
- /**
3286
- * Enables retrieval and analysis of detailed network timing data regarding the loading of an application's resources. An application can use the timing metrics to determine, for example, the length of time it takes to fetch a specific resource, such as an XMLHttpRequest, <SVG>, image, or script.
3287
- *
3288
- * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming)
3289
- */
3290
- declare abstract class PerformanceResourceTiming extends PerformanceEntry {
3291
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/connectEnd) */
3292
- get connectEnd(): number;
3293
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/connectStart) */
3294
- get connectStart(): number;
3295
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/decodedBodySize) */
3296
- get decodedBodySize(): number;
3297
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/domainLookupEnd) */
3298
- get domainLookupEnd(): number;
3299
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/domainLookupStart) */
3300
- get domainLookupStart(): number;
3301
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/encodedBodySize) */
3302
- get encodedBodySize(): number;
3303
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/fetchStart) */
3304
- get fetchStart(): number;
3305
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/initiatorType) */
3306
- get initiatorType(): string;
3307
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/nextHopProtocol) */
3308
- get nextHopProtocol(): string;
3309
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/redirectEnd) */
3310
- get redirectEnd(): number;
3311
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/redirectStart) */
3312
- get redirectStart(): number;
3313
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/requestStart) */
3314
- get requestStart(): number;
3315
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/responseEnd) */
3316
- get responseEnd(): number;
3317
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/responseStart) */
3318
- get responseStart(): number;
3319
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/responseStatus) */
3320
- get responseStatus(): number;
3321
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/secureConnectionStart) */
3322
- get secureConnectionStart(): number | undefined;
3323
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/transferSize) */
3324
- get transferSize(): number;
3325
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/workerStart) */
3326
- get workerStart(): number;
3327
- }
3328
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserver) */
3329
- declare class PerformanceObserver {
3330
- constructor(callback: any);
3331
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserver/disconnect) */
3332
- disconnect(): void;
3333
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserver/observe) */
3334
- observe(options?: PerformanceObserverObserveOptions): void;
3335
- /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserver/takeRecords) */
3336
- takeRecords(): PerformanceEntry[];
3337
- readonly supportedEntryTypes: string[];
3338
- }
3339
- interface PerformanceObserverObserveOptions {
3340
- buffered?: boolean;
3341
- durationThreshold?: number;
3342
- entryTypes?: string[];
3343
- type?: string;
3344
- }
3345
- interface EventCounts {
3346
- get size(): number;
3347
- get(eventType: string): number | undefined;
3348
- has(eventType: string): boolean;
3349
- entries(): IterableIterator<string[]>;
3350
- keys(): IterableIterator<string>;
3351
- values(): IterableIterator<number>;
3352
- forEach(
3353
- param1: (param0: number, param1: string, param2: EventCounts) => void,
3354
- param2?: any,
3355
- ): void;
3356
- [Symbol.iterator](): IterableIterator<string[]>;
3357
- }
3358
3198
  type AiImageClassificationInput = {
3359
3199
  image: number[];
3360
3200
  };
@@ -3409,6 +3249,18 @@ declare abstract class BaseAiImageTextToText {
3409
3249
  inputs: AiImageTextToTextInput;
3410
3250
  postProcessedOutputs: AiImageTextToTextOutput;
3411
3251
  }
3252
+ type AiMultimodalEmbeddingsInput = {
3253
+ image: string;
3254
+ text: string[];
3255
+ };
3256
+ type AiIMultimodalEmbeddingsOutput = {
3257
+ data: number[][];
3258
+ shape: number[];
3259
+ };
3260
+ declare abstract class BaseAiMultimodalEmbeddings {
3261
+ inputs: AiImageTextToTextInput;
3262
+ postProcessedOutputs: AiImageTextToTextOutput;
3263
+ }
3412
3264
  type AiObjectDetectionInput = {
3413
3265
  image: number[];
3414
3266
  };
@@ -3547,12 +3399,28 @@ type AiTextGenerationInput = {
3547
3399
  | (object & NonNullable<unknown>);
3548
3400
  functions?: AiTextGenerationFunctionsInput[];
3549
3401
  };
3402
+ type AiTextGenerationToolLegacyOutput = {
3403
+ name: string;
3404
+ arguments: unknown;
3405
+ };
3406
+ type AiTextGenerationToolOutput = {
3407
+ id: string;
3408
+ type: "function";
3409
+ function: {
3410
+ name: string;
3411
+ arguments: string;
3412
+ };
3413
+ };
3414
+ type UsageTags = {
3415
+ prompt_tokens: number;
3416
+ completion_tokens: number;
3417
+ total_tokens: number;
3418
+ };
3550
3419
  type AiTextGenerationOutput = {
3551
3420
  response?: string;
3552
- tool_calls?: {
3553
- name: string;
3554
- arguments: unknown;
3555
- }[];
3421
+ tool_calls?: AiTextGenerationToolLegacyOutput[] &
3422
+ AiTextGenerationToolOutput[];
3423
+ usage?: UsageTags;
3556
3424
  };
3557
3425
  declare abstract class BaseAiTextGeneration {
3558
3426
  inputs: AiTextGenerationInput;
@@ -4641,6 +4509,7 @@ type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output =
4641
4509
  name?: string;
4642
4510
  }[];
4643
4511
  }
4512
+ | string
4644
4513
  | AsyncResponse;
4645
4514
  declare abstract class Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast {
4646
4515
  inputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input;
@@ -4717,7 +4586,6 @@ interface Ai_Cf_Baai_Bge_Reranker_Base_Input {
4717
4586
  /**
4718
4587
  * A query you wish to perform against the provided contexts.
4719
4588
  */
4720
- query: string;
4721
4589
  /**
4722
4590
  * Number of returned results starting with the best score.
4723
4591
  */
@@ -5810,7 +5678,8 @@ declare abstract class Base_Ai_Cf_Google_Gemma_3_12B_It {
5810
5678
  }
5811
5679
  type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input =
5812
5680
  | Ai_Cf_Meta_Llama_4_Prompt
5813
- | Ai_Cf_Meta_Llama_4_Messages;
5681
+ | Ai_Cf_Meta_Llama_4_Messages
5682
+ | Ai_Cf_Meta_Llama_4_Async_Batch;
5814
5683
  interface Ai_Cf_Meta_Llama_4_Prompt {
5815
5684
  /**
5816
5685
  * The input text prompt for the model to generate a response.
@@ -6044,130 +5913,812 @@ interface Ai_Cf_Meta_Llama_4_Messages {
6044
5913
  */
6045
5914
  presence_penalty?: number;
6046
5915
  }
6047
- type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = {
5916
+ interface Ai_Cf_Meta_Llama_4_Async_Batch {
5917
+ requests: (
5918
+ | Ai_Cf_Meta_Llama_4_Prompt_Inner
5919
+ | Ai_Cf_Meta_Llama_4_Messages_Inner
5920
+ )[];
5921
+ }
5922
+ interface Ai_Cf_Meta_Llama_4_Prompt_Inner {
6048
5923
  /**
6049
- * The generated text response from the model
5924
+ * The input text prompt for the model to generate a response.
6050
5925
  */
6051
- response: string;
5926
+ prompt: string;
6052
5927
  /**
6053
- * Usage statistics for the inference request
5928
+ * JSON schema that should be fulfilled for the response.
6054
5929
  */
6055
- usage?: {
6056
- /**
6057
- * Total number of tokens in input
6058
- */
6059
- prompt_tokens?: number;
6060
- /**
6061
- * Total number of tokens in output
6062
- */
6063
- completion_tokens?: number;
6064
- /**
6065
- * Total number of input and output tokens
6066
- */
6067
- total_tokens?: number;
6068
- };
5930
+ guided_json?: object;
5931
+ response_format?: JSONMode;
6069
5932
  /**
6070
- * An array of tool calls requests made during the response generation
5933
+ * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.
6071
5934
  */
6072
- tool_calls?: {
6073
- /**
6074
- * The tool call id.
6075
- */
6076
- id?: string;
5935
+ raw?: boolean;
5936
+ /**
5937
+ * If true, the response will be streamed back incrementally using SSE, Server Sent Events.
5938
+ */
5939
+ stream?: boolean;
5940
+ /**
5941
+ * The maximum number of tokens to generate in the response.
5942
+ */
5943
+ max_tokens?: number;
5944
+ /**
5945
+ * Controls the randomness of the output; higher values produce more random results.
5946
+ */
5947
+ temperature?: number;
5948
+ /**
5949
+ * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.
5950
+ */
5951
+ top_p?: number;
5952
+ /**
5953
+ * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.
5954
+ */
5955
+ top_k?: number;
5956
+ /**
5957
+ * Random seed for reproducibility of the generation.
5958
+ */
5959
+ seed?: number;
5960
+ /**
5961
+ * Penalty for repeated tokens; higher values discourage repetition.
5962
+ */
5963
+ repetition_penalty?: number;
5964
+ /**
5965
+ * Decreases the likelihood of the model repeating the same lines verbatim.
5966
+ */
5967
+ frequency_penalty?: number;
5968
+ /**
5969
+ * Increases the likelihood of the model introducing new topics.
5970
+ */
5971
+ presence_penalty?: number;
5972
+ }
5973
+ interface Ai_Cf_Meta_Llama_4_Messages_Inner {
5974
+ /**
5975
+ * An array of message objects representing the conversation history.
5976
+ */
5977
+ messages: {
6077
5978
  /**
6078
- * Specifies the type of tool (e.g., 'function').
5979
+ * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').
6079
5980
  */
6080
- type?: string;
5981
+ role?: string;
6081
5982
  /**
6082
- * Details of the function tool.
5983
+ * The tool call id. If you don't know what to put here you can fall back to 000000001
6083
5984
  */
6084
- function?: {
6085
- /**
6086
- * The name of the tool to be called
6087
- */
6088
- name?: string;
6089
- /**
6090
- * The arguments passed to be passed to the tool call request
6091
- */
6092
- arguments?: object;
6093
- };
5985
+ tool_call_id?: string;
5986
+ content?:
5987
+ | string
5988
+ | {
5989
+ /**
5990
+ * Type of the content provided
5991
+ */
5992
+ type?: string;
5993
+ text?: string;
5994
+ image_url?: {
5995
+ /**
5996
+ * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted
5997
+ */
5998
+ url?: string;
5999
+ };
6000
+ }[]
6001
+ | {
6002
+ /**
6003
+ * Type of the content provided
6004
+ */
6005
+ type?: string;
6006
+ text?: string;
6007
+ image_url?: {
6008
+ /**
6009
+ * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted
6010
+ */
6011
+ url?: string;
6012
+ };
6013
+ };
6094
6014
  }[];
6095
- };
6096
- declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct {
6097
- inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input;
6098
- postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output;
6099
- }
6100
- interface AiModels {
6101
- "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification;
6102
- "@cf/stabilityai/stable-diffusion-xl-base-1.0": BaseAiTextToImage;
6103
- "@cf/runwayml/stable-diffusion-v1-5-inpainting": BaseAiTextToImage;
6104
- "@cf/runwayml/stable-diffusion-v1-5-img2img": BaseAiTextToImage;
6105
- "@cf/lykon/dreamshaper-8-lcm": BaseAiTextToImage;
6106
- "@cf/bytedance/stable-diffusion-xl-lightning": BaseAiTextToImage;
6107
- "@cf/myshell-ai/melotts": BaseAiTextToSpeech;
6108
- "@cf/microsoft/resnet-50": BaseAiImageClassification;
6109
- "@cf/facebook/detr-resnet-50": BaseAiObjectDetection;
6110
- "@cf/meta/llama-2-7b-chat-int8": BaseAiTextGeneration;
6111
- "@cf/mistral/mistral-7b-instruct-v0.1": BaseAiTextGeneration;
6112
- "@cf/meta/llama-2-7b-chat-fp16": BaseAiTextGeneration;
6113
- "@hf/thebloke/llama-2-13b-chat-awq": BaseAiTextGeneration;
6114
- "@hf/thebloke/mistral-7b-instruct-v0.1-awq": BaseAiTextGeneration;
6115
- "@hf/thebloke/zephyr-7b-beta-awq": BaseAiTextGeneration;
6116
- "@hf/thebloke/openhermes-2.5-mistral-7b-awq": BaseAiTextGeneration;
6117
- "@hf/thebloke/neural-chat-7b-v3-1-awq": BaseAiTextGeneration;
6118
- "@hf/thebloke/llamaguard-7b-awq": BaseAiTextGeneration;
6119
- "@hf/thebloke/deepseek-coder-6.7b-base-awq": BaseAiTextGeneration;
6120
- "@hf/thebloke/deepseek-coder-6.7b-instruct-awq": BaseAiTextGeneration;
6121
- "@cf/deepseek-ai/deepseek-math-7b-instruct": BaseAiTextGeneration;
6122
- "@cf/defog/sqlcoder-7b-2": BaseAiTextGeneration;
6123
- "@cf/openchat/openchat-3.5-0106": BaseAiTextGeneration;
6124
- "@cf/tiiuae/falcon-7b-instruct": BaseAiTextGeneration;
6125
- "@cf/thebloke/discolm-german-7b-v1-awq": BaseAiTextGeneration;
6126
- "@cf/qwen/qwen1.5-0.5b-chat": BaseAiTextGeneration;
6127
- "@cf/qwen/qwen1.5-7b-chat-awq": BaseAiTextGeneration;
6128
- "@cf/qwen/qwen1.5-14b-chat-awq": BaseAiTextGeneration;
6129
- "@cf/tinyllama/tinyllama-1.1b-chat-v1.0": BaseAiTextGeneration;
6130
- "@cf/microsoft/phi-2": BaseAiTextGeneration;
6131
- "@cf/qwen/qwen1.5-1.8b-chat": BaseAiTextGeneration;
6132
- "@cf/mistral/mistral-7b-instruct-v0.2-lora": BaseAiTextGeneration;
6133
- "@hf/nousresearch/hermes-2-pro-mistral-7b": BaseAiTextGeneration;
6134
- "@hf/nexusflow/starling-lm-7b-beta": BaseAiTextGeneration;
6135
- "@hf/google/gemma-7b-it": BaseAiTextGeneration;
6136
- "@cf/meta-llama/llama-2-7b-chat-hf-lora": BaseAiTextGeneration;
6137
- "@cf/google/gemma-2b-it-lora": BaseAiTextGeneration;
6138
- "@cf/google/gemma-7b-it-lora": BaseAiTextGeneration;
6139
- "@hf/mistral/mistral-7b-instruct-v0.2": BaseAiTextGeneration;
6140
- "@cf/meta/llama-3-8b-instruct": BaseAiTextGeneration;
6141
- "@cf/fblgit/una-cybertron-7b-v2-bf16": BaseAiTextGeneration;
6142
- "@cf/meta/llama-3-8b-instruct-awq": BaseAiTextGeneration;
6143
- "@hf/meta-llama/meta-llama-3-8b-instruct": BaseAiTextGeneration;
6144
- "@cf/meta/llama-3.1-8b-instruct": BaseAiTextGeneration;
6145
- "@cf/meta/llama-3.1-8b-instruct-fp8": BaseAiTextGeneration;
6146
- "@cf/meta/llama-3.1-8b-instruct-awq": BaseAiTextGeneration;
6147
- "@cf/meta/llama-3.2-3b-instruct": BaseAiTextGeneration;
6148
- "@cf/meta/llama-3.2-1b-instruct": BaseAiTextGeneration;
6149
- "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b": BaseAiTextGeneration;
6150
- "@cf/facebook/bart-large-cnn": BaseAiSummarization;
6151
- "@cf/llava-hf/llava-1.5-7b-hf": BaseAiImageToText;
6152
- "@cf/baai/bge-base-en-v1.5": Base_Ai_Cf_Baai_Bge_Base_En_V1_5;
6153
- "@cf/openai/whisper": Base_Ai_Cf_Openai_Whisper;
6154
- "@cf/meta/m2m100-1.2b": Base_Ai_Cf_Meta_M2M100_1_2B;
6155
- "@cf/baai/bge-small-en-v1.5": Base_Ai_Cf_Baai_Bge_Small_En_V1_5;
6156
- "@cf/baai/bge-large-en-v1.5": Base_Ai_Cf_Baai_Bge_Large_En_V1_5;
6157
- "@cf/unum/uform-gen2-qwen-500m": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M;
6158
- "@cf/openai/whisper-tiny-en": Base_Ai_Cf_Openai_Whisper_Tiny_En;
6159
- "@cf/openai/whisper-large-v3-turbo": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo;
6160
- "@cf/baai/bge-m3": Base_Ai_Cf_Baai_Bge_M3;
6161
- "@cf/black-forest-labs/flux-1-schnell": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell;
6162
- "@cf/meta/llama-3.2-11b-vision-instruct": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct;
6163
- "@cf/meta/llama-3.3-70b-instruct-fp8-fast": Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast;
6164
- "@cf/meta/llama-guard-3-8b": Base_Ai_Cf_Meta_Llama_Guard_3_8B;
6165
- "@cf/baai/bge-reranker-base": Base_Ai_Cf_Baai_Bge_Reranker_Base;
6166
- "@cf/qwen/qwen2.5-coder-32b-instruct": Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct;
6015
+ functions?: {
6016
+ name: string;
6017
+ code: string;
6018
+ }[];
6019
+ /**
6020
+ * A list of tools available for the assistant to use.
6021
+ */
6022
+ tools?: (
6023
+ | {
6024
+ /**
6025
+ * The name of the tool. More descriptive the better.
6026
+ */
6027
+ name: string;
6028
+ /**
6029
+ * A brief description of what the tool does.
6030
+ */
6031
+ description: string;
6032
+ /**
6033
+ * Schema defining the parameters accepted by the tool.
6034
+ */
6035
+ parameters: {
6036
+ /**
6037
+ * The type of the parameters object (usually 'object').
6038
+ */
6039
+ type: string;
6040
+ /**
6041
+ * List of required parameter names.
6042
+ */
6043
+ required?: string[];
6044
+ /**
6045
+ * Definitions of each parameter.
6046
+ */
6047
+ properties: {
6048
+ [k: string]: {
6049
+ /**
6050
+ * The data type of the parameter.
6051
+ */
6052
+ type: string;
6053
+ /**
6054
+ * A description of the expected parameter.
6055
+ */
6056
+ description: string;
6057
+ };
6058
+ };
6059
+ };
6060
+ }
6061
+ | {
6062
+ /**
6063
+ * Specifies the type of tool (e.g., 'function').
6064
+ */
6065
+ type: string;
6066
+ /**
6067
+ * Details of the function tool.
6068
+ */
6069
+ function: {
6070
+ /**
6071
+ * The name of the function.
6072
+ */
6073
+ name: string;
6074
+ /**
6075
+ * A brief description of what the function does.
6076
+ */
6077
+ description: string;
6078
+ /**
6079
+ * Schema defining the parameters accepted by the function.
6080
+ */
6081
+ parameters: {
6082
+ /**
6083
+ * The type of the parameters object (usually 'object').
6084
+ */
6085
+ type: string;
6086
+ /**
6087
+ * List of required parameter names.
6088
+ */
6089
+ required?: string[];
6090
+ /**
6091
+ * Definitions of each parameter.
6092
+ */
6093
+ properties: {
6094
+ [k: string]: {
6095
+ /**
6096
+ * The data type of the parameter.
6097
+ */
6098
+ type: string;
6099
+ /**
6100
+ * A description of the expected parameter.
6101
+ */
6102
+ description: string;
6103
+ };
6104
+ };
6105
+ };
6106
+ };
6107
+ }
6108
+ )[];
6109
+ response_format?: JSONMode;
6110
+ /**
6111
+ * JSON schema that should be fufilled for the response.
6112
+ */
6113
+ guided_json?: object;
6114
+ /**
6115
+ * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.
6116
+ */
6117
+ raw?: boolean;
6118
+ /**
6119
+ * If true, the response will be streamed back incrementally using SSE, Server Sent Events.
6120
+ */
6121
+ stream?: boolean;
6122
+ /**
6123
+ * The maximum number of tokens to generate in the response.
6124
+ */
6125
+ max_tokens?: number;
6126
+ /**
6127
+ * Controls the randomness of the output; higher values produce more random results.
6128
+ */
6129
+ temperature?: number;
6130
+ /**
6131
+ * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.
6132
+ */
6133
+ top_p?: number;
6134
+ /**
6135
+ * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.
6136
+ */
6137
+ top_k?: number;
6138
+ /**
6139
+ * Random seed for reproducibility of the generation.
6140
+ */
6141
+ seed?: number;
6142
+ /**
6143
+ * Penalty for repeated tokens; higher values discourage repetition.
6144
+ */
6145
+ repetition_penalty?: number;
6146
+ /**
6147
+ * Decreases the likelihood of the model repeating the same lines verbatim.
6148
+ */
6149
+ frequency_penalty?: number;
6150
+ /**
6151
+ * Increases the likelihood of the model introducing new topics.
6152
+ */
6153
+ presence_penalty?: number;
6154
+ }
6155
+ type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = {
6156
+ /**
6157
+ * The generated text response from the model
6158
+ */
6159
+ response: string;
6160
+ /**
6161
+ * Usage statistics for the inference request
6162
+ */
6163
+ usage?: {
6164
+ /**
6165
+ * Total number of tokens in input
6166
+ */
6167
+ prompt_tokens?: number;
6168
+ /**
6169
+ * Total number of tokens in output
6170
+ */
6171
+ completion_tokens?: number;
6172
+ /**
6173
+ * Total number of input and output tokens
6174
+ */
6175
+ total_tokens?: number;
6176
+ };
6177
+ /**
6178
+ * An array of tool calls requests made during the response generation
6179
+ */
6180
+ tool_calls?: {
6181
+ /**
6182
+ * The tool call id.
6183
+ */
6184
+ id?: string;
6185
+ /**
6186
+ * Specifies the type of tool (e.g., 'function').
6187
+ */
6188
+ type?: string;
6189
+ /**
6190
+ * Details of the function tool.
6191
+ */
6192
+ function?: {
6193
+ /**
6194
+ * The name of the tool to be called
6195
+ */
6196
+ name?: string;
6197
+ /**
6198
+ * The arguments passed to be passed to the tool call request
6199
+ */
6200
+ arguments?: object;
6201
+ };
6202
+ }[];
6203
+ };
6204
+ declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct {
6205
+ inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input;
6206
+ postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output;
6207
+ }
6208
+ interface Ai_Cf_Deepgram_Nova_3_Input {
6209
+ audio: {
6210
+ body: object;
6211
+ contentType: string;
6212
+ };
6213
+ /**
6214
+ * Sets how the model will interpret strings submitted to the custom_topic param. When strict, the model will only return topics submitted using the custom_topic param. When extended, the model will return its own detected topics in addition to those submitted using the custom_topic param.
6215
+ */
6216
+ custom_topic_mode?: "extended" | "strict";
6217
+ /**
6218
+ * Custom topics you want the model to detect within your input audio or text if present Submit up to 100
6219
+ */
6220
+ custom_topic?: string;
6221
+ /**
6222
+ * Sets how the model will interpret intents submitted to the custom_intent param. When strict, the model will only return intents submitted using the custom_intent param. When extended, the model will return its own detected intents in addition those submitted using the custom_intents param
6223
+ */
6224
+ custom_intent_mode?: "extended" | "strict";
6225
+ /**
6226
+ * Custom intents you want the model to detect within your input audio if present
6227
+ */
6228
+ custom_intent?: string;
6229
+ /**
6230
+ * Identifies and extracts key entities from content in submitted audio
6231
+ */
6232
+ detect_entities?: boolean;
6233
+ /**
6234
+ * Identifies the dominant language spoken in submitted audio
6235
+ */
6236
+ detect_language?: boolean;
6237
+ /**
6238
+ * Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0
6239
+ */
6240
+ diarize?: boolean;
6241
+ /**
6242
+ * Identify and extract key entities from content in submitted audio
6243
+ */
6244
+ dictation?: boolean;
6245
+ /**
6246
+ * Specify the expected encoding of your submitted audio
6247
+ */
6248
+ encoding?:
6249
+ | "linear16"
6250
+ | "flac"
6251
+ | "mulaw"
6252
+ | "amr-nb"
6253
+ | "amr-wb"
6254
+ | "opus"
6255
+ | "speex"
6256
+ | "g729";
6257
+ /**
6258
+ * Arbitrary key-value pairs that are attached to the API response for usage in downstream processing
6259
+ */
6260
+ extra?: string;
6261
+ /**
6262
+ * Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um'
6263
+ */
6264
+ filler_words?: boolean;
6265
+ /**
6266
+ * Key term prompting can boost or suppress specialized terminology and brands.
6267
+ */
6268
+ keyterm?: string;
6269
+ /**
6270
+ * Keywords can boost or suppress specialized terminology and brands.
6271
+ */
6272
+ keywords?: string;
6273
+ /**
6274
+ * The BCP-47 language tag that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available.
6275
+ */
6276
+ language?: string;
6277
+ /**
6278
+ * Spoken measurements will be converted to their corresponding abbreviations.
6279
+ */
6280
+ measurements?: boolean;
6281
+ /**
6282
+ * Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip.
6283
+ */
6284
+ mip_opt_out?: boolean;
6285
+ /**
6286
+ * Mode of operation for the model representing broad area of topic that will be talked about in the supplied audio
6287
+ */
6288
+ mode?: "general" | "medical" | "finance";
6289
+ /**
6290
+ * Transcribe each audio channel independently.
6291
+ */
6292
+ multichannel?: boolean;
6293
+ /**
6294
+ * Numerals converts numbers from written format to numerical format.
6295
+ */
6296
+ numerals?: boolean;
6297
+ /**
6298
+ * Splits audio into paragraphs to improve transcript readability.
6299
+ */
6300
+ paragraphs?: boolean;
6301
+ /**
6302
+ * Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely.
6303
+ */
6304
+ profanity_filter?: boolean;
6305
+ /**
6306
+ * Add punctuation and capitalization to the transcript.
6307
+ */
6308
+ punctuate?: boolean;
6309
+ /**
6310
+ * Redaction removes sensitive information from your transcripts.
6311
+ */
6312
+ redact?: string;
6313
+ /**
6314
+ * Search for terms or phrases in submitted audio and replaces them.
6315
+ */
6316
+ replace?: string;
6317
+ /**
6318
+ * Search for terms or phrases in submitted audio.
6319
+ */
6320
+ search?: string;
6321
+ /**
6322
+ * Recognizes the sentiment throughout a transcript or text.
6323
+ */
6324
+ sentiment?: boolean;
6325
+ /**
6326
+ * Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability.
6327
+ */
6328
+ smart_format?: boolean;
6329
+ /**
6330
+ * Detect topics throughout a transcript or text.
6331
+ */
6332
+ topics?: boolean;
6333
+ /**
6334
+ * Segments speech into meaningful semantic units.
6335
+ */
6336
+ utterances?: boolean;
6337
+ /**
6338
+ * Seconds to wait before detecting a pause between words in submitted audio.
6339
+ */
6340
+ utt_split?: number;
6341
+ /**
6342
+ * The number of channels in the submitted audio
6343
+ */
6344
+ channels?: number;
6345
+ /**
6346
+ * Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets.
6347
+ */
6348
+ interim_results?: boolean;
6349
+ /**
6350
+ * Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing
6351
+ */
6352
+ endpointing?: string;
6353
+ /**
6354
+ * Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets.
6355
+ */
6356
+ vad_events?: boolean;
6357
+ /**
6358
+ * Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets.
6359
+ */
6360
+ utterance_end_ms?: boolean;
6361
+ }
6362
+ interface Ai_Cf_Deepgram_Nova_3_Output {
6363
+ results?: {
6364
+ channels?: {
6365
+ alternatives?: {
6366
+ confidence?: number;
6367
+ transcript?: string;
6368
+ words?: {
6369
+ confidence?: number;
6370
+ end?: number;
6371
+ start?: number;
6372
+ word?: string;
6373
+ }[];
6374
+ }[];
6375
+ }[];
6376
+ summary?: {
6377
+ result?: string;
6378
+ short?: string;
6379
+ };
6380
+ sentiments?: {
6381
+ segments?: {
6382
+ text?: string;
6383
+ start_word?: number;
6384
+ end_word?: number;
6385
+ sentiment?: string;
6386
+ sentiment_score?: number;
6387
+ }[];
6388
+ average?: {
6389
+ sentiment?: string;
6390
+ sentiment_score?: number;
6391
+ };
6392
+ };
6393
+ };
6394
+ }
6395
+ declare abstract class Base_Ai_Cf_Deepgram_Nova_3 {
6396
+ inputs: Ai_Cf_Deepgram_Nova_3_Input;
6397
+ postProcessedOutputs: Ai_Cf_Deepgram_Nova_3_Output;
6398
+ }
6399
+ type Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input =
6400
+ | {
6401
+ /**
6402
+ * readable stream with audio data and content-type specified for that data
6403
+ */
6404
+ audio: {
6405
+ body: object;
6406
+ contentType: string;
6407
+ };
6408
+ /**
6409
+ * type of data PCM data that's sent to the inference server as raw array
6410
+ */
6411
+ dtype?: "uint8" | "float32" | "float64";
6412
+ }
6413
+ | {
6414
+ /**
6415
+ * base64 encoded audio data
6416
+ */
6417
+ audio: string;
6418
+ /**
6419
+ * type of data PCM data that's sent to the inference server as raw array
6420
+ */
6421
+ dtype?: "uint8" | "float32" | "float64";
6422
+ };
6423
+ interface Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output {
6424
+ /**
6425
+ * if true, end-of-turn was detected
6426
+ */
6427
+ is_complete?: boolean;
6428
+ /**
6429
+ * probability of the end-of-turn detection
6430
+ */
6431
+ probability?: number;
6432
+ }
6433
+ declare abstract class Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2 {
6434
+ inputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input;
6435
+ postProcessedOutputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output;
6436
+ }
6437
+ type Ai_Cf_Openai_Gpt_Oss_120B_Input =
6438
+ | GPT_OSS_120B_Responses
6439
+ | GPT_OSS_120B_Responses_Async;
6440
+ interface GPT_OSS_120B_Responses {
6441
+ /**
6442
+ * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types
6443
+ */
6444
+ input: string | unknown[];
6445
+ reasoning?: {
6446
+ /**
6447
+ * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
6448
+ */
6449
+ effort?: "low" | "medium" | "high";
6450
+ /**
6451
+ * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.
6452
+ */
6453
+ summary?: "auto" | "concise" | "detailed";
6454
+ };
6455
+ }
6456
+ interface GPT_OSS_120B_Responses_Async {
6457
+ requests: {
6458
+ /**
6459
+ * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types
6460
+ */
6461
+ input: string | unknown[];
6462
+ reasoning?: {
6463
+ /**
6464
+ * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
6465
+ */
6466
+ effort?: "low" | "medium" | "high";
6467
+ /**
6468
+ * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.
6469
+ */
6470
+ summary?: "auto" | "concise" | "detailed";
6471
+ };
6472
+ }[];
6473
+ }
6474
+ type Ai_Cf_Openai_Gpt_Oss_120B_Output = {} | (string & NonNullable<unknown>);
6475
+ declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_120B {
6476
+ inputs: Ai_Cf_Openai_Gpt_Oss_120B_Input;
6477
+ postProcessedOutputs: Ai_Cf_Openai_Gpt_Oss_120B_Output;
6478
+ }
6479
+ type Ai_Cf_Openai_Gpt_Oss_20B_Input =
6480
+ | GPT_OSS_20B_Responses
6481
+ | GPT_OSS_20B_Responses_Async;
6482
+ interface GPT_OSS_20B_Responses {
6483
+ /**
6484
+ * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types
6485
+ */
6486
+ input: string | unknown[];
6487
+ reasoning?: {
6488
+ /**
6489
+ * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
6490
+ */
6491
+ effort?: "low" | "medium" | "high";
6492
+ /**
6493
+ * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.
6494
+ */
6495
+ summary?: "auto" | "concise" | "detailed";
6496
+ };
6497
+ }
6498
+ interface GPT_OSS_20B_Responses_Async {
6499
+ requests: {
6500
+ /**
6501
+ * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types
6502
+ */
6503
+ input: string | unknown[];
6504
+ reasoning?: {
6505
+ /**
6506
+ * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
6507
+ */
6508
+ effort?: "low" | "medium" | "high";
6509
+ /**
6510
+ * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.
6511
+ */
6512
+ summary?: "auto" | "concise" | "detailed";
6513
+ };
6514
+ }[];
6515
+ }
6516
+ type Ai_Cf_Openai_Gpt_Oss_20B_Output = {} | (string & NonNullable<unknown>);
6517
+ declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_20B {
6518
+ inputs: Ai_Cf_Openai_Gpt_Oss_20B_Input;
6519
+ postProcessedOutputs: Ai_Cf_Openai_Gpt_Oss_20B_Output;
6520
+ }
6521
+ interface Ai_Cf_Leonardo_Phoenix_1_0_Input {
6522
+ /**
6523
+ * A text description of the image you want to generate.
6524
+ */
6525
+ prompt: string;
6526
+ /**
6527
+ * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt
6528
+ */
6529
+ guidance?: number;
6530
+ /**
6531
+ * Random seed for reproducibility of the image generation
6532
+ */
6533
+ seed?: number;
6534
+ /**
6535
+ * The height of the generated image in pixels
6536
+ */
6537
+ height?: number;
6538
+ /**
6539
+ * The width of the generated image in pixels
6540
+ */
6541
+ width?: number;
6542
+ /**
6543
+ * The number of diffusion steps; higher values can improve quality but take longer
6544
+ */
6545
+ num_steps?: number;
6546
+ /**
6547
+ * Specify what to exclude from the generated images
6548
+ */
6549
+ negative_prompt?: string;
6550
+ }
6551
+ /**
6552
+ * The generated image in JPEG format
6553
+ */
6554
+ type Ai_Cf_Leonardo_Phoenix_1_0_Output = string;
6555
+ declare abstract class Base_Ai_Cf_Leonardo_Phoenix_1_0 {
6556
+ inputs: Ai_Cf_Leonardo_Phoenix_1_0_Input;
6557
+ postProcessedOutputs: Ai_Cf_Leonardo_Phoenix_1_0_Output;
6558
+ }
6559
+ interface Ai_Cf_Leonardo_Lucid_Origin_Input {
6560
+ /**
6561
+ * A text description of the image you want to generate.
6562
+ */
6563
+ prompt: string;
6564
+ /**
6565
+ * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt
6566
+ */
6567
+ guidance?: number;
6568
+ /**
6569
+ * Random seed for reproducibility of the image generation
6570
+ */
6571
+ seed?: number;
6572
+ /**
6573
+ * The height of the generated image in pixels
6574
+ */
6575
+ height?: number;
6576
+ /**
6577
+ * The width of the generated image in pixels
6578
+ */
6579
+ width?: number;
6580
+ /**
6581
+ * The number of diffusion steps; higher values can improve quality but take longer
6582
+ */
6583
+ num_steps?: number;
6584
+ /**
6585
+ * The number of diffusion steps; higher values can improve quality but take longer
6586
+ */
6587
+ steps?: number;
6588
+ }
6589
+ interface Ai_Cf_Leonardo_Lucid_Origin_Output {
6590
+ /**
6591
+ * The generated image in Base64 format.
6592
+ */
6593
+ image?: string;
6594
+ }
6595
+ declare abstract class Base_Ai_Cf_Leonardo_Lucid_Origin {
6596
+ inputs: Ai_Cf_Leonardo_Lucid_Origin_Input;
6597
+ postProcessedOutputs: Ai_Cf_Leonardo_Lucid_Origin_Output;
6598
+ }
6599
+ interface Ai_Cf_Deepgram_Aura_1_Input {
6600
+ /**
6601
+ * Speaker used to produce the audio.
6602
+ */
6603
+ speaker?:
6604
+ | "angus"
6605
+ | "asteria"
6606
+ | "arcas"
6607
+ | "orion"
6608
+ | "orpheus"
6609
+ | "athena"
6610
+ | "luna"
6611
+ | "zeus"
6612
+ | "perseus"
6613
+ | "helios"
6614
+ | "hera"
6615
+ | "stella";
6616
+ /**
6617
+ * Encoding of the output audio.
6618
+ */
6619
+ encoding?: "linear16" | "flac" | "mulaw" | "alaw" | "mp3" | "opus" | "aac";
6620
+ /**
6621
+ * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type..
6622
+ */
6623
+ container?: "none" | "wav" | "ogg";
6624
+ /**
6625
+ * The text content to be converted to speech
6626
+ */
6627
+ text: string;
6628
+ /**
6629
+ * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable
6630
+ */
6631
+ sample_rate?: number;
6632
+ /**
6633
+ * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type.
6634
+ */
6635
+ bit_rate?: number;
6636
+ }
6637
+ /**
6638
+ * The generated audio in MP3 format
6639
+ */
6640
+ type Ai_Cf_Deepgram_Aura_1_Output = string;
6641
+ declare abstract class Base_Ai_Cf_Deepgram_Aura_1 {
6642
+ inputs: Ai_Cf_Deepgram_Aura_1_Input;
6643
+ postProcessedOutputs: Ai_Cf_Deepgram_Aura_1_Output;
6644
+ }
6645
+ interface AiModels {
6646
+ "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification;
6647
+ "@cf/stabilityai/stable-diffusion-xl-base-1.0": BaseAiTextToImage;
6648
+ "@cf/runwayml/stable-diffusion-v1-5-inpainting": BaseAiTextToImage;
6649
+ "@cf/runwayml/stable-diffusion-v1-5-img2img": BaseAiTextToImage;
6650
+ "@cf/lykon/dreamshaper-8-lcm": BaseAiTextToImage;
6651
+ "@cf/bytedance/stable-diffusion-xl-lightning": BaseAiTextToImage;
6652
+ "@cf/myshell-ai/melotts": BaseAiTextToSpeech;
6653
+ "@cf/google/embeddinggemma-300m": BaseAiTextEmbeddings;
6654
+ "@cf/microsoft/resnet-50": BaseAiImageClassification;
6655
+ "@cf/meta/llama-2-7b-chat-int8": BaseAiTextGeneration;
6656
+ "@cf/mistral/mistral-7b-instruct-v0.1": BaseAiTextGeneration;
6657
+ "@cf/meta/llama-2-7b-chat-fp16": BaseAiTextGeneration;
6658
+ "@hf/thebloke/llama-2-13b-chat-awq": BaseAiTextGeneration;
6659
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq": BaseAiTextGeneration;
6660
+ "@hf/thebloke/zephyr-7b-beta-awq": BaseAiTextGeneration;
6661
+ "@hf/thebloke/openhermes-2.5-mistral-7b-awq": BaseAiTextGeneration;
6662
+ "@hf/thebloke/neural-chat-7b-v3-1-awq": BaseAiTextGeneration;
6663
+ "@hf/thebloke/llamaguard-7b-awq": BaseAiTextGeneration;
6664
+ "@hf/thebloke/deepseek-coder-6.7b-base-awq": BaseAiTextGeneration;
6665
+ "@hf/thebloke/deepseek-coder-6.7b-instruct-awq": BaseAiTextGeneration;
6666
+ "@cf/deepseek-ai/deepseek-math-7b-instruct": BaseAiTextGeneration;
6667
+ "@cf/defog/sqlcoder-7b-2": BaseAiTextGeneration;
6668
+ "@cf/openchat/openchat-3.5-0106": BaseAiTextGeneration;
6669
+ "@cf/tiiuae/falcon-7b-instruct": BaseAiTextGeneration;
6670
+ "@cf/thebloke/discolm-german-7b-v1-awq": BaseAiTextGeneration;
6671
+ "@cf/qwen/qwen1.5-0.5b-chat": BaseAiTextGeneration;
6672
+ "@cf/qwen/qwen1.5-7b-chat-awq": BaseAiTextGeneration;
6673
+ "@cf/qwen/qwen1.5-14b-chat-awq": BaseAiTextGeneration;
6674
+ "@cf/tinyllama/tinyllama-1.1b-chat-v1.0": BaseAiTextGeneration;
6675
+ "@cf/microsoft/phi-2": BaseAiTextGeneration;
6676
+ "@cf/qwen/qwen1.5-1.8b-chat": BaseAiTextGeneration;
6677
+ "@cf/mistral/mistral-7b-instruct-v0.2-lora": BaseAiTextGeneration;
6678
+ "@hf/nousresearch/hermes-2-pro-mistral-7b": BaseAiTextGeneration;
6679
+ "@hf/nexusflow/starling-lm-7b-beta": BaseAiTextGeneration;
6680
+ "@hf/google/gemma-7b-it": BaseAiTextGeneration;
6681
+ "@cf/meta-llama/llama-2-7b-chat-hf-lora": BaseAiTextGeneration;
6682
+ "@cf/google/gemma-2b-it-lora": BaseAiTextGeneration;
6683
+ "@cf/google/gemma-7b-it-lora": BaseAiTextGeneration;
6684
+ "@hf/mistral/mistral-7b-instruct-v0.2": BaseAiTextGeneration;
6685
+ "@cf/meta/llama-3-8b-instruct": BaseAiTextGeneration;
6686
+ "@cf/fblgit/una-cybertron-7b-v2-bf16": BaseAiTextGeneration;
6687
+ "@cf/meta/llama-3-8b-instruct-awq": BaseAiTextGeneration;
6688
+ "@hf/meta-llama/meta-llama-3-8b-instruct": BaseAiTextGeneration;
6689
+ "@cf/meta/llama-3.1-8b-instruct-fp8": BaseAiTextGeneration;
6690
+ "@cf/meta/llama-3.1-8b-instruct-awq": BaseAiTextGeneration;
6691
+ "@cf/meta/llama-3.2-3b-instruct": BaseAiTextGeneration;
6692
+ "@cf/meta/llama-3.2-1b-instruct": BaseAiTextGeneration;
6693
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b": BaseAiTextGeneration;
6694
+ "@cf/facebook/bart-large-cnn": BaseAiSummarization;
6695
+ "@cf/llava-hf/llava-1.5-7b-hf": BaseAiImageToText;
6696
+ "@cf/baai/bge-base-en-v1.5": Base_Ai_Cf_Baai_Bge_Base_En_V1_5;
6697
+ "@cf/openai/whisper": Base_Ai_Cf_Openai_Whisper;
6698
+ "@cf/meta/m2m100-1.2b": Base_Ai_Cf_Meta_M2M100_1_2B;
6699
+ "@cf/baai/bge-small-en-v1.5": Base_Ai_Cf_Baai_Bge_Small_En_V1_5;
6700
+ "@cf/baai/bge-large-en-v1.5": Base_Ai_Cf_Baai_Bge_Large_En_V1_5;
6701
+ "@cf/unum/uform-gen2-qwen-500m": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M;
6702
+ "@cf/openai/whisper-tiny-en": Base_Ai_Cf_Openai_Whisper_Tiny_En;
6703
+ "@cf/openai/whisper-large-v3-turbo": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo;
6704
+ "@cf/baai/bge-m3": Base_Ai_Cf_Baai_Bge_M3;
6705
+ "@cf/black-forest-labs/flux-1-schnell": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell;
6706
+ "@cf/meta/llama-3.2-11b-vision-instruct": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct;
6707
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast": Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast;
6708
+ "@cf/meta/llama-guard-3-8b": Base_Ai_Cf_Meta_Llama_Guard_3_8B;
6709
+ "@cf/baai/bge-reranker-base": Base_Ai_Cf_Baai_Bge_Reranker_Base;
6710
+ "@cf/qwen/qwen2.5-coder-32b-instruct": Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct;
6167
6711
  "@cf/qwen/qwq-32b": Base_Ai_Cf_Qwen_Qwq_32B;
6168
6712
  "@cf/mistralai/mistral-small-3.1-24b-instruct": Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct;
6169
6713
  "@cf/google/gemma-3-12b-it": Base_Ai_Cf_Google_Gemma_3_12B_It;
6170
6714
  "@cf/meta/llama-4-scout-17b-16e-instruct": Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct;
6715
+ "@cf/deepgram/nova-3": Base_Ai_Cf_Deepgram_Nova_3;
6716
+ "@cf/pipecat-ai/smart-turn-v2": Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2;
6717
+ "@cf/openai/gpt-oss-120b": Base_Ai_Cf_Openai_Gpt_Oss_120B;
6718
+ "@cf/openai/gpt-oss-20b": Base_Ai_Cf_Openai_Gpt_Oss_20B;
6719
+ "@cf/leonardo/phoenix-1.0": Base_Ai_Cf_Leonardo_Phoenix_1_0;
6720
+ "@cf/leonardo/lucid-origin": Base_Ai_Cf_Leonardo_Lucid_Origin;
6721
+ "@cf/deepgram/aura-1": Base_Ai_Cf_Deepgram_Aura_1;
6171
6722
  }
6172
6723
  type AiOptions = {
6173
6724
  /**
@@ -6175,6 +6726,10 @@ type AiOptions = {
6175
6726
  * https://developers.cloudflare.com/workers-ai/features/batch-api
6176
6727
  */
6177
6728
  queueRequest?: boolean;
6729
+ /**
6730
+ * Establish websocket connections, only works for supported models
6731
+ */
6732
+ websocket?: boolean;
6178
6733
  gateway?: GatewayOptions;
6179
6734
  returnRawResponse?: boolean;
6180
6735
  prefix?: string;
@@ -6218,7 +6773,7 @@ type AiModelListType = Record<string, any>;
6218
6773
  declare abstract class Ai<AiModelList extends AiModelListType = AiModels> {
6219
6774
  aiGatewayLogId: string | null;
6220
6775
  gateway(gatewayId: string): AiGateway;
6221
- autorag(autoragId?: string): AutoRAG;
6776
+ autorag(autoragId: string): AutoRAG;
6222
6777
  run<
6223
6778
  Name extends keyof AiModelList,
6224
6779
  Options extends AiOptions,
@@ -6228,9 +6783,13 @@ declare abstract class Ai<AiModelList extends AiModelListType = AiModels> {
6228
6783
  inputs: InputOptions,
6229
6784
  options?: Options,
6230
6785
  ): Promise<
6231
- Options extends {
6232
- returnRawResponse: true;
6233
- }
6786
+ Options extends
6787
+ | {
6788
+ returnRawResponse: true;
6789
+ }
6790
+ | {
6791
+ websocket: true;
6792
+ }
6234
6793
  ? Response
6235
6794
  : InputOptions extends {
6236
6795
  stream: true;