@cloudflare/workers-types 4.20250917.0 → 4.20250919.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -301,6 +301,13 @@ interface ServiceWorkerGlobalScope extends WorkerGlobalScope {
301
301
  FixedLengthStream: typeof FixedLengthStream;
302
302
  IdentityTransformStream: typeof IdentityTransformStream;
303
303
  HTMLRewriter: typeof HTMLRewriter;
304
+ Performance: typeof Performance;
305
+ PerformanceEntry: typeof PerformanceEntry;
306
+ PerformanceMark: typeof PerformanceMark;
307
+ PerformanceMeasure: typeof PerformanceMeasure;
308
+ PerformanceResourceTiming: typeof PerformanceResourceTiming;
309
+ PerformanceObserver: typeof PerformanceObserver;
310
+ PerformanceObserverEntryList: typeof PerformanceObserverEntryList;
304
311
  }
305
312
  declare function addEventListener<Type extends keyof WorkerGlobalScopeEventMap>(
306
313
  type: Type,
@@ -478,18 +485,6 @@ declare abstract class Navigator {
478
485
  readonly languages: string[];
479
486
  readonly storage: StorageManager;
480
487
  }
481
- /**
482
- * The Workers runtime supports a subset of the Performance API, used to measure timing and performance,
483
- * as well as timing of subrequests and other operations.
484
- *
485
- * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/)
486
- */
487
- interface Performance {
488
- /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */
489
- readonly timeOrigin: number;
490
- /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */
491
- now(): number;
492
- }
493
488
  interface AlarmInvocationInfo {
494
489
  readonly isRetry: boolean;
495
490
  readonly retryCount: number;
@@ -3426,6 +3421,174 @@ interface WorkerLoaderWorkerCode {
3426
3421
  tails?: Fetcher[];
3427
3422
  streamingTails?: Fetcher[];
3428
3423
  }
3424
+ /**
3425
+ * The Workers runtime supports a subset of the Performance API, used to measure timing and performance,
3426
+ * as well as timing of subrequests and other operations.
3427
+ *
3428
+ * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/)
3429
+ */
3430
+ declare abstract class Performance extends EventTarget {
3431
+ /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */
3432
+ get timeOrigin(): number;
3433
+ /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */
3434
+ now(): number;
3435
+ get eventCounts(): EventCounts;
3436
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/clearMarks) */
3437
+ clearMarks(name?: string): void;
3438
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/clearMeasures) */
3439
+ clearMeasures(name?: string): void;
3440
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/clearResourceTimings) */
3441
+ clearResourceTimings(): void;
3442
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/getEntries) */
3443
+ getEntries(): PerformanceEntry[];
3444
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/getEntriesByName) */
3445
+ getEntriesByName(name: string, type?: string): PerformanceEntry[];
3446
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/getEntriesByType) */
3447
+ getEntriesByType(type: string): PerformanceEntry[];
3448
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/mark) */
3449
+ mark(name: string, options?: PerformanceMarkOptions): PerformanceMark;
3450
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/measure) */
3451
+ measure(
3452
+ measureName: string,
3453
+ measureOptionsOrStartMark: PerformanceMeasureOptions | string,
3454
+ maybeEndMark?: string,
3455
+ ): PerformanceMeasure;
3456
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/setResourceTimingBufferSize) */
3457
+ setResourceTimingBufferSize(size: number): void;
3458
+ eventLoopUtilization(): void;
3459
+ markResourceTiming(): void;
3460
+ timerify(fn: () => void): () => void;
3461
+ }
3462
+ /**
3463
+ * PerformanceMark is an abstract interface for PerformanceEntry objects with an entryType of "mark". Entries of this type are created by calling performance.mark() to add a named DOMHighResTimeStamp (the mark) to the browser's performance timeline.
3464
+ *
3465
+ * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceMark)
3466
+ */
3467
+ declare class PerformanceMark extends PerformanceEntry {
3468
+ constructor(name: string, maybeOptions?: PerformanceMarkOptions);
3469
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceMark/detail) */
3470
+ get detail(): any | undefined;
3471
+ toJSON(): any;
3472
+ }
3473
+ /**
3474
+ * PerformanceMeasure is an abstract interface for PerformanceEntry objects with an entryType of "measure". Entries of this type are created by calling performance.measure() to add a named DOMHighResTimeStamp (the measure) between two marks to the browser's performance timeline.
3475
+ *
3476
+ * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceMeasure)
3477
+ */
3478
+ declare abstract class PerformanceMeasure extends PerformanceEntry {
3479
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceMeasure/detail) */
3480
+ get detail(): any | undefined;
3481
+ toJSON(): any;
3482
+ }
3483
+ interface PerformanceMarkOptions {
3484
+ detail?: any;
3485
+ startTime?: number;
3486
+ }
3487
+ interface PerformanceMeasureOptions {
3488
+ detail?: any;
3489
+ start?: number;
3490
+ duration?: number;
3491
+ end?: number;
3492
+ }
3493
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserverEntryList) */
3494
+ declare abstract class PerformanceObserverEntryList {
3495
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserverEntryList/getEntries) */
3496
+ getEntries(): PerformanceEntry[];
3497
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserverEntryList/getEntriesByType) */
3498
+ getEntriesByType(type: string): PerformanceEntry[];
3499
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserverEntryList/getEntriesByName) */
3500
+ getEntriesByName(name: string, type?: string): PerformanceEntry[];
3501
+ }
3502
+ /**
3503
+ * Encapsulates a single performance metric that is part of the performance timeline. A performance entry can be directly created by making a performance mark or measure (for example by calling the mark() method) at an explicit point in an application. Performance entries are also created in indirect ways such as loading a resource (such as an image).
3504
+ *
3505
+ * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry)
3506
+ */
3507
+ declare abstract class PerformanceEntry {
3508
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/name) */
3509
+ get name(): string;
3510
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/entryType) */
3511
+ get entryType(): string;
3512
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/startTime) */
3513
+ get startTime(): number;
3514
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/duration) */
3515
+ get duration(): number;
3516
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceEntry/toJSON) */
3517
+ toJSON(): any;
3518
+ }
3519
+ /**
3520
+ * Enables retrieval and analysis of detailed network timing data regarding the loading of an application's resources. An application can use the timing metrics to determine, for example, the length of time it takes to fetch a specific resource, such as an XMLHttpRequest, <SVG>, image, or script.
3521
+ *
3522
+ * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming)
3523
+ */
3524
+ declare abstract class PerformanceResourceTiming extends PerformanceEntry {
3525
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/connectEnd) */
3526
+ get connectEnd(): number;
3527
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/connectStart) */
3528
+ get connectStart(): number;
3529
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/decodedBodySize) */
3530
+ get decodedBodySize(): number;
3531
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/domainLookupEnd) */
3532
+ get domainLookupEnd(): number;
3533
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/domainLookupStart) */
3534
+ get domainLookupStart(): number;
3535
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/encodedBodySize) */
3536
+ get encodedBodySize(): number;
3537
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/fetchStart) */
3538
+ get fetchStart(): number;
3539
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/initiatorType) */
3540
+ get initiatorType(): string;
3541
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/nextHopProtocol) */
3542
+ get nextHopProtocol(): string;
3543
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/redirectEnd) */
3544
+ get redirectEnd(): number;
3545
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/redirectStart) */
3546
+ get redirectStart(): number;
3547
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/requestStart) */
3548
+ get requestStart(): number;
3549
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/responseEnd) */
3550
+ get responseEnd(): number;
3551
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/responseStart) */
3552
+ get responseStart(): number;
3553
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/responseStatus) */
3554
+ get responseStatus(): number;
3555
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/secureConnectionStart) */
3556
+ get secureConnectionStart(): number | undefined;
3557
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/transferSize) */
3558
+ get transferSize(): number;
3559
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceResourceTiming/workerStart) */
3560
+ get workerStart(): number;
3561
+ }
3562
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserver) */
3563
+ declare class PerformanceObserver {
3564
+ constructor(callback: any);
3565
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserver/disconnect) */
3566
+ disconnect(): void;
3567
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserver/observe) */
3568
+ observe(options?: PerformanceObserverObserveOptions): void;
3569
+ /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PerformanceObserver/takeRecords) */
3570
+ takeRecords(): PerformanceEntry[];
3571
+ readonly supportedEntryTypes: string[];
3572
+ }
3573
+ interface PerformanceObserverObserveOptions {
3574
+ buffered?: boolean;
3575
+ durationThreshold?: number;
3576
+ entryTypes?: string[];
3577
+ type?: string;
3578
+ }
3579
+ interface EventCounts {
3580
+ get size(): number;
3581
+ get(eventType: string): number | undefined;
3582
+ has(eventType: string): boolean;
3583
+ entries(): IterableIterator<string[]>;
3584
+ keys(): IterableIterator<string>;
3585
+ values(): IterableIterator<number>;
3586
+ forEach(
3587
+ param1: (param0: number, param1: string, param2: EventCounts) => void,
3588
+ param2?: any,
3589
+ ): void;
3590
+ [Symbol.iterator](): IterableIterator<string[]>;
3591
+ }
3429
3592
  type AiImageClassificationInput = {
3430
3593
  image: number[];
3431
3594
  };
@@ -3480,6 +3643,18 @@ declare abstract class BaseAiImageTextToText {
3480
3643
  inputs: AiImageTextToTextInput;
3481
3644
  postProcessedOutputs: AiImageTextToTextOutput;
3482
3645
  }
3646
+ type AiMultimodalEmbeddingsInput = {
3647
+ image: string;
3648
+ text: string[];
3649
+ };
3650
+ type AiIMultimodalEmbeddingsOutput = {
3651
+ data: number[][];
3652
+ shape: number[];
3653
+ };
3654
+ declare abstract class BaseAiMultimodalEmbeddings {
3655
+ inputs: AiImageTextToTextInput;
3656
+ postProcessedOutputs: AiImageTextToTextOutput;
3657
+ }
3483
3658
  type AiObjectDetectionInput = {
3484
3659
  image: number[];
3485
3660
  };
@@ -3618,12 +3793,28 @@ type AiTextGenerationInput = {
3618
3793
  | (object & NonNullable<unknown>);
3619
3794
  functions?: AiTextGenerationFunctionsInput[];
3620
3795
  };
3796
+ type AiTextGenerationToolLegacyOutput = {
3797
+ name: string;
3798
+ arguments: unknown;
3799
+ };
3800
+ type AiTextGenerationToolOutput = {
3801
+ id: string;
3802
+ type: "function";
3803
+ function: {
3804
+ name: string;
3805
+ arguments: string;
3806
+ };
3807
+ };
3808
+ type UsageTags = {
3809
+ prompt_tokens: number;
3810
+ completion_tokens: number;
3811
+ total_tokens: number;
3812
+ };
3621
3813
  type AiTextGenerationOutput = {
3622
3814
  response?: string;
3623
- tool_calls?: {
3624
- name: string;
3625
- arguments: unknown;
3626
- }[];
3815
+ tool_calls?: AiTextGenerationToolLegacyOutput[] &
3816
+ AiTextGenerationToolOutput[];
3817
+ usage?: UsageTags;
3627
3818
  };
3628
3819
  declare abstract class BaseAiTextGeneration {
3629
3820
  inputs: AiTextGenerationInput;
@@ -4712,6 +4903,7 @@ type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output =
4712
4903
  name?: string;
4713
4904
  }[];
4714
4905
  }
4906
+ | string
4715
4907
  | AsyncResponse;
4716
4908
  declare abstract class Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast {
4717
4909
  inputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input;
@@ -4788,7 +4980,6 @@ interface Ai_Cf_Baai_Bge_Reranker_Base_Input {
4788
4980
  /**
4789
4981
  * A query you wish to perform against the provided contexts.
4790
4982
  */
4791
- query: string;
4792
4983
  /**
4793
4984
  * Number of returned results starting with the best score.
4794
4985
  */
@@ -5881,7 +6072,8 @@ declare abstract class Base_Ai_Cf_Google_Gemma_3_12B_It {
5881
6072
  }
5882
6073
  type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input =
5883
6074
  | Ai_Cf_Meta_Llama_4_Prompt
5884
- | Ai_Cf_Meta_Llama_4_Messages;
6075
+ | Ai_Cf_Meta_Llama_4_Messages
6076
+ | Ai_Cf_Meta_Llama_4_Async_Batch;
5885
6077
  interface Ai_Cf_Meta_Llama_4_Prompt {
5886
6078
  /**
5887
6079
  * The input text prompt for the model to generate a response.
@@ -6115,130 +6307,812 @@ interface Ai_Cf_Meta_Llama_4_Messages {
6115
6307
  */
6116
6308
  presence_penalty?: number;
6117
6309
  }
6118
- type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = {
6310
+ interface Ai_Cf_Meta_Llama_4_Async_Batch {
6311
+ requests: (
6312
+ | Ai_Cf_Meta_Llama_4_Prompt_Inner
6313
+ | Ai_Cf_Meta_Llama_4_Messages_Inner
6314
+ )[];
6315
+ }
6316
+ interface Ai_Cf_Meta_Llama_4_Prompt_Inner {
6119
6317
  /**
6120
- * The generated text response from the model
6318
+ * The input text prompt for the model to generate a response.
6121
6319
  */
6122
- response: string;
6320
+ prompt: string;
6123
6321
  /**
6124
- * Usage statistics for the inference request
6322
+ * JSON schema that should be fulfilled for the response.
6125
6323
  */
6126
- usage?: {
6127
- /**
6128
- * Total number of tokens in input
6129
- */
6130
- prompt_tokens?: number;
6131
- /**
6132
- * Total number of tokens in output
6133
- */
6134
- completion_tokens?: number;
6135
- /**
6136
- * Total number of input and output tokens
6137
- */
6138
- total_tokens?: number;
6139
- };
6324
+ guided_json?: object;
6325
+ response_format?: JSONMode;
6140
6326
  /**
6141
- * An array of tool calls requests made during the response generation
6327
+ * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.
6142
6328
  */
6143
- tool_calls?: {
6144
- /**
6145
- * The tool call id.
6146
- */
6147
- id?: string;
6329
+ raw?: boolean;
6330
+ /**
6331
+ * If true, the response will be streamed back incrementally using SSE, Server Sent Events.
6332
+ */
6333
+ stream?: boolean;
6334
+ /**
6335
+ * The maximum number of tokens to generate in the response.
6336
+ */
6337
+ max_tokens?: number;
6338
+ /**
6339
+ * Controls the randomness of the output; higher values produce more random results.
6340
+ */
6341
+ temperature?: number;
6342
+ /**
6343
+ * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.
6344
+ */
6345
+ top_p?: number;
6346
+ /**
6347
+ * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.
6348
+ */
6349
+ top_k?: number;
6350
+ /**
6351
+ * Random seed for reproducibility of the generation.
6352
+ */
6353
+ seed?: number;
6354
+ /**
6355
+ * Penalty for repeated tokens; higher values discourage repetition.
6356
+ */
6357
+ repetition_penalty?: number;
6358
+ /**
6359
+ * Decreases the likelihood of the model repeating the same lines verbatim.
6360
+ */
6361
+ frequency_penalty?: number;
6362
+ /**
6363
+ * Increases the likelihood of the model introducing new topics.
6364
+ */
6365
+ presence_penalty?: number;
6366
+ }
6367
+ interface Ai_Cf_Meta_Llama_4_Messages_Inner {
6368
+ /**
6369
+ * An array of message objects representing the conversation history.
6370
+ */
6371
+ messages: {
6148
6372
  /**
6149
- * Specifies the type of tool (e.g., 'function').
6373
+ * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').
6150
6374
  */
6151
- type?: string;
6375
+ role?: string;
6152
6376
  /**
6153
- * Details of the function tool.
6377
+ * The tool call id. If you don't know what to put here you can fall back to 000000001
6154
6378
  */
6155
- function?: {
6156
- /**
6157
- * The name of the tool to be called
6158
- */
6159
- name?: string;
6160
- /**
6161
- * The arguments passed to be passed to the tool call request
6162
- */
6163
- arguments?: object;
6164
- };
6379
+ tool_call_id?: string;
6380
+ content?:
6381
+ | string
6382
+ | {
6383
+ /**
6384
+ * Type of the content provided
6385
+ */
6386
+ type?: string;
6387
+ text?: string;
6388
+ image_url?: {
6389
+ /**
6390
+ * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted
6391
+ */
6392
+ url?: string;
6393
+ };
6394
+ }[]
6395
+ | {
6396
+ /**
6397
+ * Type of the content provided
6398
+ */
6399
+ type?: string;
6400
+ text?: string;
6401
+ image_url?: {
6402
+ /**
6403
+ * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted
6404
+ */
6405
+ url?: string;
6406
+ };
6407
+ };
6165
6408
  }[];
6166
- };
6167
- declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct {
6168
- inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input;
6169
- postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output;
6170
- }
6171
- interface AiModels {
6172
- "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification;
6173
- "@cf/stabilityai/stable-diffusion-xl-base-1.0": BaseAiTextToImage;
6174
- "@cf/runwayml/stable-diffusion-v1-5-inpainting": BaseAiTextToImage;
6175
- "@cf/runwayml/stable-diffusion-v1-5-img2img": BaseAiTextToImage;
6176
- "@cf/lykon/dreamshaper-8-lcm": BaseAiTextToImage;
6177
- "@cf/bytedance/stable-diffusion-xl-lightning": BaseAiTextToImage;
6178
- "@cf/myshell-ai/melotts": BaseAiTextToSpeech;
6179
- "@cf/microsoft/resnet-50": BaseAiImageClassification;
6180
- "@cf/facebook/detr-resnet-50": BaseAiObjectDetection;
6181
- "@cf/meta/llama-2-7b-chat-int8": BaseAiTextGeneration;
6182
- "@cf/mistral/mistral-7b-instruct-v0.1": BaseAiTextGeneration;
6183
- "@cf/meta/llama-2-7b-chat-fp16": BaseAiTextGeneration;
6184
- "@hf/thebloke/llama-2-13b-chat-awq": BaseAiTextGeneration;
6185
- "@hf/thebloke/mistral-7b-instruct-v0.1-awq": BaseAiTextGeneration;
6186
- "@hf/thebloke/zephyr-7b-beta-awq": BaseAiTextGeneration;
6187
- "@hf/thebloke/openhermes-2.5-mistral-7b-awq": BaseAiTextGeneration;
6188
- "@hf/thebloke/neural-chat-7b-v3-1-awq": BaseAiTextGeneration;
6189
- "@hf/thebloke/llamaguard-7b-awq": BaseAiTextGeneration;
6190
- "@hf/thebloke/deepseek-coder-6.7b-base-awq": BaseAiTextGeneration;
6191
- "@hf/thebloke/deepseek-coder-6.7b-instruct-awq": BaseAiTextGeneration;
6192
- "@cf/deepseek-ai/deepseek-math-7b-instruct": BaseAiTextGeneration;
6193
- "@cf/defog/sqlcoder-7b-2": BaseAiTextGeneration;
6194
- "@cf/openchat/openchat-3.5-0106": BaseAiTextGeneration;
6195
- "@cf/tiiuae/falcon-7b-instruct": BaseAiTextGeneration;
6196
- "@cf/thebloke/discolm-german-7b-v1-awq": BaseAiTextGeneration;
6197
- "@cf/qwen/qwen1.5-0.5b-chat": BaseAiTextGeneration;
6198
- "@cf/qwen/qwen1.5-7b-chat-awq": BaseAiTextGeneration;
6199
- "@cf/qwen/qwen1.5-14b-chat-awq": BaseAiTextGeneration;
6200
- "@cf/tinyllama/tinyllama-1.1b-chat-v1.0": BaseAiTextGeneration;
6201
- "@cf/microsoft/phi-2": BaseAiTextGeneration;
6202
- "@cf/qwen/qwen1.5-1.8b-chat": BaseAiTextGeneration;
6203
- "@cf/mistral/mistral-7b-instruct-v0.2-lora": BaseAiTextGeneration;
6204
- "@hf/nousresearch/hermes-2-pro-mistral-7b": BaseAiTextGeneration;
6205
- "@hf/nexusflow/starling-lm-7b-beta": BaseAiTextGeneration;
6206
- "@hf/google/gemma-7b-it": BaseAiTextGeneration;
6207
- "@cf/meta-llama/llama-2-7b-chat-hf-lora": BaseAiTextGeneration;
6208
- "@cf/google/gemma-2b-it-lora": BaseAiTextGeneration;
6209
- "@cf/google/gemma-7b-it-lora": BaseAiTextGeneration;
6210
- "@hf/mistral/mistral-7b-instruct-v0.2": BaseAiTextGeneration;
6211
- "@cf/meta/llama-3-8b-instruct": BaseAiTextGeneration;
6212
- "@cf/fblgit/una-cybertron-7b-v2-bf16": BaseAiTextGeneration;
6213
- "@cf/meta/llama-3-8b-instruct-awq": BaseAiTextGeneration;
6214
- "@hf/meta-llama/meta-llama-3-8b-instruct": BaseAiTextGeneration;
6215
- "@cf/meta/llama-3.1-8b-instruct": BaseAiTextGeneration;
6216
- "@cf/meta/llama-3.1-8b-instruct-fp8": BaseAiTextGeneration;
6217
- "@cf/meta/llama-3.1-8b-instruct-awq": BaseAiTextGeneration;
6218
- "@cf/meta/llama-3.2-3b-instruct": BaseAiTextGeneration;
6219
- "@cf/meta/llama-3.2-1b-instruct": BaseAiTextGeneration;
6220
- "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b": BaseAiTextGeneration;
6221
- "@cf/facebook/bart-large-cnn": BaseAiSummarization;
6222
- "@cf/llava-hf/llava-1.5-7b-hf": BaseAiImageToText;
6223
- "@cf/baai/bge-base-en-v1.5": Base_Ai_Cf_Baai_Bge_Base_En_V1_5;
6224
- "@cf/openai/whisper": Base_Ai_Cf_Openai_Whisper;
6225
- "@cf/meta/m2m100-1.2b": Base_Ai_Cf_Meta_M2M100_1_2B;
6226
- "@cf/baai/bge-small-en-v1.5": Base_Ai_Cf_Baai_Bge_Small_En_V1_5;
6227
- "@cf/baai/bge-large-en-v1.5": Base_Ai_Cf_Baai_Bge_Large_En_V1_5;
6228
- "@cf/unum/uform-gen2-qwen-500m": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M;
6229
- "@cf/openai/whisper-tiny-en": Base_Ai_Cf_Openai_Whisper_Tiny_En;
6230
- "@cf/openai/whisper-large-v3-turbo": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo;
6231
- "@cf/baai/bge-m3": Base_Ai_Cf_Baai_Bge_M3;
6232
- "@cf/black-forest-labs/flux-1-schnell": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell;
6233
- "@cf/meta/llama-3.2-11b-vision-instruct": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct;
6234
- "@cf/meta/llama-3.3-70b-instruct-fp8-fast": Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast;
6235
- "@cf/meta/llama-guard-3-8b": Base_Ai_Cf_Meta_Llama_Guard_3_8B;
6236
- "@cf/baai/bge-reranker-base": Base_Ai_Cf_Baai_Bge_Reranker_Base;
6409
+ functions?: {
6410
+ name: string;
6411
+ code: string;
6412
+ }[];
6413
+ /**
6414
+ * A list of tools available for the assistant to use.
6415
+ */
6416
+ tools?: (
6417
+ | {
6418
+ /**
6419
+ * The name of the tool. More descriptive the better.
6420
+ */
6421
+ name: string;
6422
+ /**
6423
+ * A brief description of what the tool does.
6424
+ */
6425
+ description: string;
6426
+ /**
6427
+ * Schema defining the parameters accepted by the tool.
6428
+ */
6429
+ parameters: {
6430
+ /**
6431
+ * The type of the parameters object (usually 'object').
6432
+ */
6433
+ type: string;
6434
+ /**
6435
+ * List of required parameter names.
6436
+ */
6437
+ required?: string[];
6438
+ /**
6439
+ * Definitions of each parameter.
6440
+ */
6441
+ properties: {
6442
+ [k: string]: {
6443
+ /**
6444
+ * The data type of the parameter.
6445
+ */
6446
+ type: string;
6447
+ /**
6448
+ * A description of the expected parameter.
6449
+ */
6450
+ description: string;
6451
+ };
6452
+ };
6453
+ };
6454
+ }
6455
+ | {
6456
+ /**
6457
+ * Specifies the type of tool (e.g., 'function').
6458
+ */
6459
+ type: string;
6460
+ /**
6461
+ * Details of the function tool.
6462
+ */
6463
+ function: {
6464
+ /**
6465
+ * The name of the function.
6466
+ */
6467
+ name: string;
6468
+ /**
6469
+ * A brief description of what the function does.
6470
+ */
6471
+ description: string;
6472
+ /**
6473
+ * Schema defining the parameters accepted by the function.
6474
+ */
6475
+ parameters: {
6476
+ /**
6477
+ * The type of the parameters object (usually 'object').
6478
+ */
6479
+ type: string;
6480
+ /**
6481
+ * List of required parameter names.
6482
+ */
6483
+ required?: string[];
6484
+ /**
6485
+ * Definitions of each parameter.
6486
+ */
6487
+ properties: {
6488
+ [k: string]: {
6489
+ /**
6490
+ * The data type of the parameter.
6491
+ */
6492
+ type: string;
6493
+ /**
6494
+ * A description of the expected parameter.
6495
+ */
6496
+ description: string;
6497
+ };
6498
+ };
6499
+ };
6500
+ };
6501
+ }
6502
+ )[];
6503
+ response_format?: JSONMode;
6504
+ /**
6505
+ * JSON schema that should be fufilled for the response.
6506
+ */
6507
+ guided_json?: object;
6508
+ /**
6509
+ * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.
6510
+ */
6511
+ raw?: boolean;
6512
+ /**
6513
+ * If true, the response will be streamed back incrementally using SSE, Server Sent Events.
6514
+ */
6515
+ stream?: boolean;
6516
+ /**
6517
+ * The maximum number of tokens to generate in the response.
6518
+ */
6519
+ max_tokens?: number;
6520
+ /**
6521
+ * Controls the randomness of the output; higher values produce more random results.
6522
+ */
6523
+ temperature?: number;
6524
+ /**
6525
+ * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.
6526
+ */
6527
+ top_p?: number;
6528
+ /**
6529
+ * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.
6530
+ */
6531
+ top_k?: number;
6532
+ /**
6533
+ * Random seed for reproducibility of the generation.
6534
+ */
6535
+ seed?: number;
6536
+ /**
6537
+ * Penalty for repeated tokens; higher values discourage repetition.
6538
+ */
6539
+ repetition_penalty?: number;
6540
+ /**
6541
+ * Decreases the likelihood of the model repeating the same lines verbatim.
6542
+ */
6543
+ frequency_penalty?: number;
6544
+ /**
6545
+ * Increases the likelihood of the model introducing new topics.
6546
+ */
6547
+ presence_penalty?: number;
6548
+ }
6549
+ type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = {
6550
+ /**
6551
+ * The generated text response from the model
6552
+ */
6553
+ response: string;
6554
+ /**
6555
+ * Usage statistics for the inference request
6556
+ */
6557
+ usage?: {
6558
+ /**
6559
+ * Total number of tokens in input
6560
+ */
6561
+ prompt_tokens?: number;
6562
+ /**
6563
+ * Total number of tokens in output
6564
+ */
6565
+ completion_tokens?: number;
6566
+ /**
6567
+ * Total number of input and output tokens
6568
+ */
6569
+ total_tokens?: number;
6570
+ };
6571
+ /**
6572
+ * An array of tool calls requests made during the response generation
6573
+ */
6574
+ tool_calls?: {
6575
+ /**
6576
+ * The tool call id.
6577
+ */
6578
+ id?: string;
6579
+ /**
6580
+ * Specifies the type of tool (e.g., 'function').
6581
+ */
6582
+ type?: string;
6583
+ /**
6584
+ * Details of the function tool.
6585
+ */
6586
+ function?: {
6587
+ /**
6588
+ * The name of the tool to be called
6589
+ */
6590
+ name?: string;
6591
+ /**
6592
+ * The arguments passed to be passed to the tool call request
6593
+ */
6594
+ arguments?: object;
6595
+ };
6596
+ }[];
6597
+ };
6598
+ declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct {
6599
+ inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input;
6600
+ postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output;
6601
+ }
6602
+ interface Ai_Cf_Deepgram_Nova_3_Input {
6603
+ audio: {
6604
+ body: object;
6605
+ contentType: string;
6606
+ };
6607
+ /**
6608
+ * Sets how the model will interpret strings submitted to the custom_topic param. When strict, the model will only return topics submitted using the custom_topic param. When extended, the model will return its own detected topics in addition to those submitted using the custom_topic param.
6609
+ */
6610
+ custom_topic_mode?: "extended" | "strict";
6611
+ /**
6612
+ * Custom topics you want the model to detect within your input audio or text if present Submit up to 100
6613
+ */
6614
+ custom_topic?: string;
6615
+ /**
6616
+ * Sets how the model will interpret intents submitted to the custom_intent param. When strict, the model will only return intents submitted using the custom_intent param. When extended, the model will return its own detected intents in addition those submitted using the custom_intents param
6617
+ */
6618
+ custom_intent_mode?: "extended" | "strict";
6619
+ /**
6620
+ * Custom intents you want the model to detect within your input audio if present
6621
+ */
6622
+ custom_intent?: string;
6623
+ /**
6624
+ * Identifies and extracts key entities from content in submitted audio
6625
+ */
6626
+ detect_entities?: boolean;
6627
+ /**
6628
+ * Identifies the dominant language spoken in submitted audio
6629
+ */
6630
+ detect_language?: boolean;
6631
+ /**
6632
+ * Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0
6633
+ */
6634
+ diarize?: boolean;
6635
+ /**
6636
+ * Identify and extract key entities from content in submitted audio
6637
+ */
6638
+ dictation?: boolean;
6639
+ /**
6640
+ * Specify the expected encoding of your submitted audio
6641
+ */
6642
+ encoding?:
6643
+ | "linear16"
6644
+ | "flac"
6645
+ | "mulaw"
6646
+ | "amr-nb"
6647
+ | "amr-wb"
6648
+ | "opus"
6649
+ | "speex"
6650
+ | "g729";
6651
+ /**
6652
+ * Arbitrary key-value pairs that are attached to the API response for usage in downstream processing
6653
+ */
6654
+ extra?: string;
6655
+ /**
6656
+ * Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um'
6657
+ */
6658
+ filler_words?: boolean;
6659
+ /**
6660
+ * Key term prompting can boost or suppress specialized terminology and brands.
6661
+ */
6662
+ keyterm?: string;
6663
+ /**
6664
+ * Keywords can boost or suppress specialized terminology and brands.
6665
+ */
6666
+ keywords?: string;
6667
+ /**
6668
+ * The BCP-47 language tag that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available.
6669
+ */
6670
+ language?: string;
6671
+ /**
6672
+ * Spoken measurements will be converted to their corresponding abbreviations.
6673
+ */
6674
+ measurements?: boolean;
6675
+ /**
6676
+ * Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip.
6677
+ */
6678
+ mip_opt_out?: boolean;
6679
+ /**
6680
+ * Mode of operation for the model representing broad area of topic that will be talked about in the supplied audio
6681
+ */
6682
+ mode?: "general" | "medical" | "finance";
6683
+ /**
6684
+ * Transcribe each audio channel independently.
6685
+ */
6686
+ multichannel?: boolean;
6687
+ /**
6688
+ * Numerals converts numbers from written format to numerical format.
6689
+ */
6690
+ numerals?: boolean;
6691
+ /**
6692
+ * Splits audio into paragraphs to improve transcript readability.
6693
+ */
6694
+ paragraphs?: boolean;
6695
+ /**
6696
+ * Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely.
6697
+ */
6698
+ profanity_filter?: boolean;
6699
+ /**
6700
+ * Add punctuation and capitalization to the transcript.
6701
+ */
6702
+ punctuate?: boolean;
6703
+ /**
6704
+ * Redaction removes sensitive information from your transcripts.
6705
+ */
6706
+ redact?: string;
6707
+ /**
6708
+ * Search for terms or phrases in submitted audio and replaces them.
6709
+ */
6710
+ replace?: string;
6711
+ /**
6712
+ * Search for terms or phrases in submitted audio.
6713
+ */
6714
+ search?: string;
6715
+ /**
6716
+ * Recognizes the sentiment throughout a transcript or text.
6717
+ */
6718
+ sentiment?: boolean;
6719
+ /**
6720
+ * Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability.
6721
+ */
6722
+ smart_format?: boolean;
6723
+ /**
6724
+ * Detect topics throughout a transcript or text.
6725
+ */
6726
+ topics?: boolean;
6727
+ /**
6728
+ * Segments speech into meaningful semantic units.
6729
+ */
6730
+ utterances?: boolean;
6731
+ /**
6732
+ * Seconds to wait before detecting a pause between words in submitted audio.
6733
+ */
6734
+ utt_split?: number;
6735
+ /**
6736
+ * The number of channels in the submitted audio
6737
+ */
6738
+ channels?: number;
6739
+ /**
6740
+ * Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets.
6741
+ */
6742
+ interim_results?: boolean;
6743
+ /**
6744
+ * Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing
6745
+ */
6746
+ endpointing?: string;
6747
+ /**
6748
+ * Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets.
6749
+ */
6750
+ vad_events?: boolean;
6751
+ /**
6752
+ * Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets.
6753
+ */
6754
+ utterance_end_ms?: boolean;
6755
+ }
6756
+ interface Ai_Cf_Deepgram_Nova_3_Output {
6757
+ results?: {
6758
+ channels?: {
6759
+ alternatives?: {
6760
+ confidence?: number;
6761
+ transcript?: string;
6762
+ words?: {
6763
+ confidence?: number;
6764
+ end?: number;
6765
+ start?: number;
6766
+ word?: string;
6767
+ }[];
6768
+ }[];
6769
+ }[];
6770
+ summary?: {
6771
+ result?: string;
6772
+ short?: string;
6773
+ };
6774
+ sentiments?: {
6775
+ segments?: {
6776
+ text?: string;
6777
+ start_word?: number;
6778
+ end_word?: number;
6779
+ sentiment?: string;
6780
+ sentiment_score?: number;
6781
+ }[];
6782
+ average?: {
6783
+ sentiment?: string;
6784
+ sentiment_score?: number;
6785
+ };
6786
+ };
6787
+ };
6788
+ }
6789
+ declare abstract class Base_Ai_Cf_Deepgram_Nova_3 {
6790
+ inputs: Ai_Cf_Deepgram_Nova_3_Input;
6791
+ postProcessedOutputs: Ai_Cf_Deepgram_Nova_3_Output;
6792
+ }
6793
+ type Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input =
6794
+ | {
6795
+ /**
6796
+ * readable stream with audio data and content-type specified for that data
6797
+ */
6798
+ audio: {
6799
+ body: object;
6800
+ contentType: string;
6801
+ };
6802
+ /**
6803
+ * type of data PCM data that's sent to the inference server as raw array
6804
+ */
6805
+ dtype?: "uint8" | "float32" | "float64";
6806
+ }
6807
+ | {
6808
+ /**
6809
+ * base64 encoded audio data
6810
+ */
6811
+ audio: string;
6812
+ /**
6813
+ * type of data PCM data that's sent to the inference server as raw array
6814
+ */
6815
+ dtype?: "uint8" | "float32" | "float64";
6816
+ };
6817
+ interface Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output {
6818
+ /**
6819
+ * if true, end-of-turn was detected
6820
+ */
6821
+ is_complete?: boolean;
6822
+ /**
6823
+ * probability of the end-of-turn detection
6824
+ */
6825
+ probability?: number;
6826
+ }
6827
+ declare abstract class Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2 {
6828
+ inputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input;
6829
+ postProcessedOutputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output;
6830
+ }
6831
+ type Ai_Cf_Openai_Gpt_Oss_120B_Input =
6832
+ | GPT_OSS_120B_Responses
6833
+ | GPT_OSS_120B_Responses_Async;
6834
+ interface GPT_OSS_120B_Responses {
6835
+ /**
6836
+ * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types
6837
+ */
6838
+ input: string | unknown[];
6839
+ reasoning?: {
6840
+ /**
6841
+ * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
6842
+ */
6843
+ effort?: "low" | "medium" | "high";
6844
+ /**
6845
+ * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.
6846
+ */
6847
+ summary?: "auto" | "concise" | "detailed";
6848
+ };
6849
+ }
6850
+ interface GPT_OSS_120B_Responses_Async {
6851
+ requests: {
6852
+ /**
6853
+ * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types
6854
+ */
6855
+ input: string | unknown[];
6856
+ reasoning?: {
6857
+ /**
6858
+ * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
6859
+ */
6860
+ effort?: "low" | "medium" | "high";
6861
+ /**
6862
+ * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.
6863
+ */
6864
+ summary?: "auto" | "concise" | "detailed";
6865
+ };
6866
+ }[];
6867
+ }
6868
+ type Ai_Cf_Openai_Gpt_Oss_120B_Output = {} | (string & NonNullable<unknown>);
6869
+ declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_120B {
6870
+ inputs: Ai_Cf_Openai_Gpt_Oss_120B_Input;
6871
+ postProcessedOutputs: Ai_Cf_Openai_Gpt_Oss_120B_Output;
6872
+ }
6873
+ type Ai_Cf_Openai_Gpt_Oss_20B_Input =
6874
+ | GPT_OSS_20B_Responses
6875
+ | GPT_OSS_20B_Responses_Async;
6876
+ interface GPT_OSS_20B_Responses {
6877
+ /**
6878
+ * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types
6879
+ */
6880
+ input: string | unknown[];
6881
+ reasoning?: {
6882
+ /**
6883
+ * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
6884
+ */
6885
+ effort?: "low" | "medium" | "high";
6886
+ /**
6887
+ * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.
6888
+ */
6889
+ summary?: "auto" | "concise" | "detailed";
6890
+ };
6891
+ }
6892
+ interface GPT_OSS_20B_Responses_Async {
6893
+ requests: {
6894
+ /**
6895
+ * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types
6896
+ */
6897
+ input: string | unknown[];
6898
+ reasoning?: {
6899
+ /**
6900
+ * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
6901
+ */
6902
+ effort?: "low" | "medium" | "high";
6903
+ /**
6904
+ * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.
6905
+ */
6906
+ summary?: "auto" | "concise" | "detailed";
6907
+ };
6908
+ }[];
6909
+ }
6910
+ type Ai_Cf_Openai_Gpt_Oss_20B_Output = {} | (string & NonNullable<unknown>);
6911
+ declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_20B {
6912
+ inputs: Ai_Cf_Openai_Gpt_Oss_20B_Input;
6913
+ postProcessedOutputs: Ai_Cf_Openai_Gpt_Oss_20B_Output;
6914
+ }
6915
+ interface Ai_Cf_Leonardo_Phoenix_1_0_Input {
6916
+ /**
6917
+ * A text description of the image you want to generate.
6918
+ */
6919
+ prompt: string;
6920
+ /**
6921
+ * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt
6922
+ */
6923
+ guidance?: number;
6924
+ /**
6925
+ * Random seed for reproducibility of the image generation
6926
+ */
6927
+ seed?: number;
6928
+ /**
6929
+ * The height of the generated image in pixels
6930
+ */
6931
+ height?: number;
6932
+ /**
6933
+ * The width of the generated image in pixels
6934
+ */
6935
+ width?: number;
6936
+ /**
6937
+ * The number of diffusion steps; higher values can improve quality but take longer
6938
+ */
6939
+ num_steps?: number;
6940
+ /**
6941
+ * Specify what to exclude from the generated images
6942
+ */
6943
+ negative_prompt?: string;
6944
+ }
6945
+ /**
6946
+ * The generated image in JPEG format
6947
+ */
6948
+ type Ai_Cf_Leonardo_Phoenix_1_0_Output = string;
6949
+ declare abstract class Base_Ai_Cf_Leonardo_Phoenix_1_0 {
6950
+ inputs: Ai_Cf_Leonardo_Phoenix_1_0_Input;
6951
+ postProcessedOutputs: Ai_Cf_Leonardo_Phoenix_1_0_Output;
6952
+ }
6953
+ interface Ai_Cf_Leonardo_Lucid_Origin_Input {
6954
+ /**
6955
+ * A text description of the image you want to generate.
6956
+ */
6957
+ prompt: string;
6958
+ /**
6959
+ * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt
6960
+ */
6961
+ guidance?: number;
6962
+ /**
6963
+ * Random seed for reproducibility of the image generation
6964
+ */
6965
+ seed?: number;
6966
+ /**
6967
+ * The height of the generated image in pixels
6968
+ */
6969
+ height?: number;
6970
+ /**
6971
+ * The width of the generated image in pixels
6972
+ */
6973
+ width?: number;
6974
+ /**
6975
+ * The number of diffusion steps; higher values can improve quality but take longer
6976
+ */
6977
+ num_steps?: number;
6978
+ /**
6979
+ * The number of diffusion steps; higher values can improve quality but take longer
6980
+ */
6981
+ steps?: number;
6982
+ }
6983
+ interface Ai_Cf_Leonardo_Lucid_Origin_Output {
6984
+ /**
6985
+ * The generated image in Base64 format.
6986
+ */
6987
+ image?: string;
6988
+ }
6989
+ declare abstract class Base_Ai_Cf_Leonardo_Lucid_Origin {
6990
+ inputs: Ai_Cf_Leonardo_Lucid_Origin_Input;
6991
+ postProcessedOutputs: Ai_Cf_Leonardo_Lucid_Origin_Output;
6992
+ }
6993
+ interface Ai_Cf_Deepgram_Aura_1_Input {
6994
+ /**
6995
+ * Speaker used to produce the audio.
6996
+ */
6997
+ speaker?:
6998
+ | "angus"
6999
+ | "asteria"
7000
+ | "arcas"
7001
+ | "orion"
7002
+ | "orpheus"
7003
+ | "athena"
7004
+ | "luna"
7005
+ | "zeus"
7006
+ | "perseus"
7007
+ | "helios"
7008
+ | "hera"
7009
+ | "stella";
7010
+ /**
7011
+ * Encoding of the output audio.
7012
+ */
7013
+ encoding?: "linear16" | "flac" | "mulaw" | "alaw" | "mp3" | "opus" | "aac";
7014
+ /**
7015
+ * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type..
7016
+ */
7017
+ container?: "none" | "wav" | "ogg";
7018
+ /**
7019
+ * The text content to be converted to speech
7020
+ */
7021
+ text: string;
7022
+ /**
7023
+ * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable
7024
+ */
7025
+ sample_rate?: number;
7026
+ /**
7027
+ * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type.
7028
+ */
7029
+ bit_rate?: number;
7030
+ }
7031
+ /**
7032
+ * The generated audio in MP3 format
7033
+ */
7034
+ type Ai_Cf_Deepgram_Aura_1_Output = string;
7035
+ declare abstract class Base_Ai_Cf_Deepgram_Aura_1 {
7036
+ inputs: Ai_Cf_Deepgram_Aura_1_Input;
7037
+ postProcessedOutputs: Ai_Cf_Deepgram_Aura_1_Output;
7038
+ }
7039
+ interface AiModels {
7040
+ "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification;
7041
+ "@cf/stabilityai/stable-diffusion-xl-base-1.0": BaseAiTextToImage;
7042
+ "@cf/runwayml/stable-diffusion-v1-5-inpainting": BaseAiTextToImage;
7043
+ "@cf/runwayml/stable-diffusion-v1-5-img2img": BaseAiTextToImage;
7044
+ "@cf/lykon/dreamshaper-8-lcm": BaseAiTextToImage;
7045
+ "@cf/bytedance/stable-diffusion-xl-lightning": BaseAiTextToImage;
7046
+ "@cf/myshell-ai/melotts": BaseAiTextToSpeech;
7047
+ "@cf/google/embeddinggemma-300m": BaseAiTextEmbeddings;
7048
+ "@cf/microsoft/resnet-50": BaseAiImageClassification;
7049
+ "@cf/meta/llama-2-7b-chat-int8": BaseAiTextGeneration;
7050
+ "@cf/mistral/mistral-7b-instruct-v0.1": BaseAiTextGeneration;
7051
+ "@cf/meta/llama-2-7b-chat-fp16": BaseAiTextGeneration;
7052
+ "@hf/thebloke/llama-2-13b-chat-awq": BaseAiTextGeneration;
7053
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq": BaseAiTextGeneration;
7054
+ "@hf/thebloke/zephyr-7b-beta-awq": BaseAiTextGeneration;
7055
+ "@hf/thebloke/openhermes-2.5-mistral-7b-awq": BaseAiTextGeneration;
7056
+ "@hf/thebloke/neural-chat-7b-v3-1-awq": BaseAiTextGeneration;
7057
+ "@hf/thebloke/llamaguard-7b-awq": BaseAiTextGeneration;
7058
+ "@hf/thebloke/deepseek-coder-6.7b-base-awq": BaseAiTextGeneration;
7059
+ "@hf/thebloke/deepseek-coder-6.7b-instruct-awq": BaseAiTextGeneration;
7060
+ "@cf/deepseek-ai/deepseek-math-7b-instruct": BaseAiTextGeneration;
7061
+ "@cf/defog/sqlcoder-7b-2": BaseAiTextGeneration;
7062
+ "@cf/openchat/openchat-3.5-0106": BaseAiTextGeneration;
7063
+ "@cf/tiiuae/falcon-7b-instruct": BaseAiTextGeneration;
7064
+ "@cf/thebloke/discolm-german-7b-v1-awq": BaseAiTextGeneration;
7065
+ "@cf/qwen/qwen1.5-0.5b-chat": BaseAiTextGeneration;
7066
+ "@cf/qwen/qwen1.5-7b-chat-awq": BaseAiTextGeneration;
7067
+ "@cf/qwen/qwen1.5-14b-chat-awq": BaseAiTextGeneration;
7068
+ "@cf/tinyllama/tinyllama-1.1b-chat-v1.0": BaseAiTextGeneration;
7069
+ "@cf/microsoft/phi-2": BaseAiTextGeneration;
7070
+ "@cf/qwen/qwen1.5-1.8b-chat": BaseAiTextGeneration;
7071
+ "@cf/mistral/mistral-7b-instruct-v0.2-lora": BaseAiTextGeneration;
7072
+ "@hf/nousresearch/hermes-2-pro-mistral-7b": BaseAiTextGeneration;
7073
+ "@hf/nexusflow/starling-lm-7b-beta": BaseAiTextGeneration;
7074
+ "@hf/google/gemma-7b-it": BaseAiTextGeneration;
7075
+ "@cf/meta-llama/llama-2-7b-chat-hf-lora": BaseAiTextGeneration;
7076
+ "@cf/google/gemma-2b-it-lora": BaseAiTextGeneration;
7077
+ "@cf/google/gemma-7b-it-lora": BaseAiTextGeneration;
7078
+ "@hf/mistral/mistral-7b-instruct-v0.2": BaseAiTextGeneration;
7079
+ "@cf/meta/llama-3-8b-instruct": BaseAiTextGeneration;
7080
+ "@cf/fblgit/una-cybertron-7b-v2-bf16": BaseAiTextGeneration;
7081
+ "@cf/meta/llama-3-8b-instruct-awq": BaseAiTextGeneration;
7082
+ "@hf/meta-llama/meta-llama-3-8b-instruct": BaseAiTextGeneration;
7083
+ "@cf/meta/llama-3.1-8b-instruct-fp8": BaseAiTextGeneration;
7084
+ "@cf/meta/llama-3.1-8b-instruct-awq": BaseAiTextGeneration;
7085
+ "@cf/meta/llama-3.2-3b-instruct": BaseAiTextGeneration;
7086
+ "@cf/meta/llama-3.2-1b-instruct": BaseAiTextGeneration;
7087
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b": BaseAiTextGeneration;
7088
+ "@cf/facebook/bart-large-cnn": BaseAiSummarization;
7089
+ "@cf/llava-hf/llava-1.5-7b-hf": BaseAiImageToText;
7090
+ "@cf/baai/bge-base-en-v1.5": Base_Ai_Cf_Baai_Bge_Base_En_V1_5;
7091
+ "@cf/openai/whisper": Base_Ai_Cf_Openai_Whisper;
7092
+ "@cf/meta/m2m100-1.2b": Base_Ai_Cf_Meta_M2M100_1_2B;
7093
+ "@cf/baai/bge-small-en-v1.5": Base_Ai_Cf_Baai_Bge_Small_En_V1_5;
7094
+ "@cf/baai/bge-large-en-v1.5": Base_Ai_Cf_Baai_Bge_Large_En_V1_5;
7095
+ "@cf/unum/uform-gen2-qwen-500m": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M;
7096
+ "@cf/openai/whisper-tiny-en": Base_Ai_Cf_Openai_Whisper_Tiny_En;
7097
+ "@cf/openai/whisper-large-v3-turbo": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo;
7098
+ "@cf/baai/bge-m3": Base_Ai_Cf_Baai_Bge_M3;
7099
+ "@cf/black-forest-labs/flux-1-schnell": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell;
7100
+ "@cf/meta/llama-3.2-11b-vision-instruct": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct;
7101
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast": Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast;
7102
+ "@cf/meta/llama-guard-3-8b": Base_Ai_Cf_Meta_Llama_Guard_3_8B;
7103
+ "@cf/baai/bge-reranker-base": Base_Ai_Cf_Baai_Bge_Reranker_Base;
6237
7104
  "@cf/qwen/qwen2.5-coder-32b-instruct": Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct;
6238
7105
  "@cf/qwen/qwq-32b": Base_Ai_Cf_Qwen_Qwq_32B;
6239
7106
  "@cf/mistralai/mistral-small-3.1-24b-instruct": Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct;
6240
7107
  "@cf/google/gemma-3-12b-it": Base_Ai_Cf_Google_Gemma_3_12B_It;
6241
7108
  "@cf/meta/llama-4-scout-17b-16e-instruct": Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct;
7109
+ "@cf/deepgram/nova-3": Base_Ai_Cf_Deepgram_Nova_3;
7110
+ "@cf/pipecat-ai/smart-turn-v2": Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2;
7111
+ "@cf/openai/gpt-oss-120b": Base_Ai_Cf_Openai_Gpt_Oss_120B;
7112
+ "@cf/openai/gpt-oss-20b": Base_Ai_Cf_Openai_Gpt_Oss_20B;
7113
+ "@cf/leonardo/phoenix-1.0": Base_Ai_Cf_Leonardo_Phoenix_1_0;
7114
+ "@cf/leonardo/lucid-origin": Base_Ai_Cf_Leonardo_Lucid_Origin;
7115
+ "@cf/deepgram/aura-1": Base_Ai_Cf_Deepgram_Aura_1;
6242
7116
  }
6243
7117
  type AiOptions = {
6244
7118
  /**
@@ -6246,6 +7120,10 @@ type AiOptions = {
6246
7120
  * https://developers.cloudflare.com/workers-ai/features/batch-api
6247
7121
  */
6248
7122
  queueRequest?: boolean;
7123
+ /**
7124
+ * Establish websocket connections, only works for supported models
7125
+ */
7126
+ websocket?: boolean;
6249
7127
  gateway?: GatewayOptions;
6250
7128
  returnRawResponse?: boolean;
6251
7129
  prefix?: string;
@@ -6289,7 +7167,7 @@ type AiModelListType = Record<string, any>;
6289
7167
  declare abstract class Ai<AiModelList extends AiModelListType = AiModels> {
6290
7168
  aiGatewayLogId: string | null;
6291
7169
  gateway(gatewayId: string): AiGateway;
6292
- autorag(autoragId?: string): AutoRAG;
7170
+ autorag(autoragId: string): AutoRAG;
6293
7171
  run<
6294
7172
  Name extends keyof AiModelList,
6295
7173
  Options extends AiOptions,
@@ -6299,9 +7177,13 @@ declare abstract class Ai<AiModelList extends AiModelListType = AiModels> {
6299
7177
  inputs: InputOptions,
6300
7178
  options?: Options,
6301
7179
  ): Promise<
6302
- Options extends {
6303
- returnRawResponse: true;
6304
- }
7180
+ Options extends
7181
+ | {
7182
+ returnRawResponse: true;
7183
+ }
7184
+ | {
7185
+ websocket: true;
7186
+ }
6305
7187
  ? Response
6306
7188
  : InputOptions extends {
6307
7189
  stream: true;
@@ -8545,7 +9427,6 @@ declare namespace TailStream {
8545
9427
  }
8546
9428
  interface JsRpcEventInfo {
8547
9429
  readonly type: "jsrpc";
8548
- readonly methodName: string;
8549
9430
  }
8550
9431
  interface ScheduledEventInfo {
8551
9432
  readonly type: "scheduled";