@workers-community/workers-types 4.20250402.0 → 4.20250403.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/index.d.ts +128 -3
  2. package/index.ts +130 -3
  3. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -3554,6 +3554,28 @@ declare abstract class BaseAiImageToText {
3554
3554
  inputs: AiImageToTextInput;
3555
3555
  postProcessedOutputs: AiImageToTextOutput;
3556
3556
  }
3557
+ type AiImageTextToTextInput = {
3558
+ image: string;
3559
+ prompt?: string;
3560
+ max_tokens?: number;
3561
+ temperature?: number;
3562
+ ignore_eos?: boolean;
3563
+ top_p?: number;
3564
+ top_k?: number;
3565
+ seed?: number;
3566
+ repetition_penalty?: number;
3567
+ frequency_penalty?: number;
3568
+ presence_penalty?: number;
3569
+ raw?: boolean;
3570
+ messages?: RoleScopedChatInput[];
3571
+ };
3572
+ type AiImageTextToTextOutput = {
3573
+ description: string;
3574
+ };
3575
+ declare abstract class BaseAiImageTextToText {
3576
+ inputs: AiImageTextToTextInput;
3577
+ postProcessedOutputs: AiImageTextToTextOutput;
3578
+ }
3557
3579
  type AiObjectDetectionInput = {
3558
3580
  image: number[];
3559
3581
  };
@@ -3964,6 +3986,72 @@ declare abstract class Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo {
3964
3986
  inputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input;
3965
3987
  postProcessedOutputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output;
3966
3988
  }
3989
+ type Ai_Cf_Baai_Bge_M3_Input = BGEM3InputQueryAndContexts | BGEM3InputEmbedding;
3990
+ interface BGEM3InputQueryAndContexts {
3991
+ /**
3992
+ * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts
3993
+ */
3994
+ query?: string;
3995
+ /**
3996
+ * List of provided contexts. Note that the index in this array is important, as the response will refer to it.
3997
+ */
3998
+ contexts: {
3999
+ /**
4000
+ * One of the provided context content
4001
+ */
4002
+ text?: string;
4003
+ }[];
4004
+ /**
4005
+ * When provided with too long context should the model error out or truncate the context to fit?
4006
+ */
4007
+ truncate_inputs?: boolean;
4008
+ }
4009
+ interface BGEM3InputEmbedding {
4010
+ text: string | string[];
4011
+ /**
4012
+ * When provided with too long context should the model error out or truncate the context to fit?
4013
+ */
4014
+ truncate_inputs?: boolean;
4015
+ }
4016
+ type Ai_Cf_Baai_Bge_M3_Output =
4017
+ | BGEM3OuputQuery
4018
+ | BGEM3OutputEmbeddingForContexts
4019
+ | BGEM3OuputEmbedding;
4020
+ interface BGEM3OuputQuery {
4021
+ response?: {
4022
+ /**
4023
+ * Index of the context in the request
4024
+ */
4025
+ id?: number;
4026
+ /**
4027
+ * Score of the context under the index.
4028
+ */
4029
+ score?: number;
4030
+ }[];
4031
+ }
4032
+ interface BGEM3OutputEmbeddingForContexts {
4033
+ response?: number[][];
4034
+ shape?: number[];
4035
+ /**
4036
+ * The pooling method used in the embedding process.
4037
+ */
4038
+ pooling?: "mean" | "cls";
4039
+ }
4040
+ interface BGEM3OuputEmbedding {
4041
+ shape?: number[];
4042
+ /**
4043
+ * Embeddings of the requested text values
4044
+ */
4045
+ data?: number[][];
4046
+ /**
4047
+ * The pooling method used in the embedding process.
4048
+ */
4049
+ pooling?: "mean" | "cls";
4050
+ }
4051
+ declare abstract class Base_Ai_Cf_Baai_Bge_M3 {
4052
+ inputs: Ai_Cf_Baai_Bge_M3_Input;
4053
+ postProcessedOutputs: Ai_Cf_Baai_Bge_M3_Output;
4054
+ }
3967
4055
  interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input {
3968
4056
  /**
3969
4057
  * A text description of the image you want to generate.
@@ -4274,6 +4362,40 @@ declare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B {
4274
4362
  inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input;
4275
4363
  postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output;
4276
4364
  }
4365
+ interface Ai_Cf_Baai_Bge_Reranker_Base_Input {
4366
+ /**
4367
+ * A query you wish to perform against the provided contexts.
4368
+ */
4369
+ /**
4370
+ * Number of returned results starting with the best score.
4371
+ */
4372
+ top_k?: number;
4373
+ /**
4374
+ * List of provided contexts. Note that the index in this array is important, as the response will refer to it.
4375
+ */
4376
+ contexts: {
4377
+ /**
4378
+ * One of the provided context content
4379
+ */
4380
+ text?: string;
4381
+ }[];
4382
+ }
4383
+ interface Ai_Cf_Baai_Bge_Reranker_Base_Output {
4384
+ response?: {
4385
+ /**
4386
+ * Index of the context in the request
4387
+ */
4388
+ id?: number;
4389
+ /**
4390
+ * Score of the context under the index.
4391
+ */
4392
+ score?: number;
4393
+ }[];
4394
+ }
4395
+ declare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base {
4396
+ inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input;
4397
+ postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output;
4398
+ }
4277
4399
  interface AiModels {
4278
4400
  "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification;
4279
4401
  "@cf/stabilityai/stable-diffusion-xl-base-1.0": BaseAiTextToImage;
@@ -4281,6 +4403,7 @@ interface AiModels {
4281
4403
  "@cf/runwayml/stable-diffusion-v1-5-img2img": BaseAiTextToImage;
4282
4404
  "@cf/lykon/dreamshaper-8-lcm": BaseAiTextToImage;
4283
4405
  "@cf/bytedance/stable-diffusion-xl-lightning": BaseAiTextToImage;
4406
+ "@cf/myshell-ai/melotts": BaseAiTextToSpeech;
4284
4407
  "@cf/baai/bge-base-en-v1.5": BaseAiTextEmbeddings;
4285
4408
  "@cf/baai/bge-small-en-v1.5": BaseAiTextEmbeddings;
4286
4409
  "@cf/baai/bge-large-en-v1.5": BaseAiTextEmbeddings;
@@ -4334,9 +4457,11 @@ interface AiModels {
4334
4457
  "@cf/unum/uform-gen2-qwen-500m": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M;
4335
4458
  "@cf/openai/whisper-tiny-en": Base_Ai_Cf_Openai_Whisper_Tiny_En;
4336
4459
  "@cf/openai/whisper-large-v3-turbo": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo;
4460
+ "@cf/baai/bge-m3": Base_Ai_Cf_Baai_Bge_M3;
4337
4461
  "@cf/black-forest-labs/flux-1-schnell": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell;
4338
4462
  "@cf/meta/llama-3.2-11b-vision-instruct": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct;
4339
4463
  "@cf/meta/llama-guard-3-8b": Base_Ai_Cf_Meta_Llama_Guard_3_8B;
4464
+ "@cf/baai/bge-reranker-base": Base_Ai_Cf_Baai_Bge_Reranker_Base;
4340
4465
  }
4341
4466
  type AiOptions = {
4342
4467
  gateway?: GatewayOptions;
@@ -4394,8 +4519,8 @@ declare abstract class Ai<AiModelList extends AiModelListType = AiModels> {
4394
4519
  ? Response
4395
4520
  : AiModelList[Name]["postProcessedOutputs"]
4396
4521
  >;
4397
- public models(params?: AiModelsSearchParams): Promise<AiModelsSearchObject[]>;
4398
- public toMarkdown(
4522
+ models(params?: AiModelsSearchParams): Promise<AiModelsSearchObject[]>;
4523
+ toMarkdown(
4399
4524
  files: {
4400
4525
  name: string;
4401
4526
  blob: Blob;
@@ -4405,7 +4530,7 @@ declare abstract class Ai<AiModelList extends AiModelListType = AiModels> {
4405
4530
  extraHeaders?: object;
4406
4531
  },
4407
4532
  ): Promise<ConversionResponse[]>;
4408
- public toMarkdown(
4533
+ toMarkdown(
4409
4534
  files: {
4410
4535
  name: string;
4411
4536
  blob: Blob;
package/index.ts CHANGED
@@ -3566,6 +3566,28 @@ export declare abstract class BaseAiImageToText {
3566
3566
  inputs: AiImageToTextInput;
3567
3567
  postProcessedOutputs: AiImageToTextOutput;
3568
3568
  }
3569
+ export type AiImageTextToTextInput = {
3570
+ image: string;
3571
+ prompt?: string;
3572
+ max_tokens?: number;
3573
+ temperature?: number;
3574
+ ignore_eos?: boolean;
3575
+ top_p?: number;
3576
+ top_k?: number;
3577
+ seed?: number;
3578
+ repetition_penalty?: number;
3579
+ frequency_penalty?: number;
3580
+ presence_penalty?: number;
3581
+ raw?: boolean;
3582
+ messages?: RoleScopedChatInput[];
3583
+ };
3584
+ export type AiImageTextToTextOutput = {
3585
+ description: string;
3586
+ };
3587
+ export declare abstract class BaseAiImageTextToText {
3588
+ inputs: AiImageTextToTextInput;
3589
+ postProcessedOutputs: AiImageTextToTextOutput;
3590
+ }
3569
3591
  export type AiObjectDetectionInput = {
3570
3592
  image: number[];
3571
3593
  };
@@ -3976,6 +3998,74 @@ export declare abstract class Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo {
3976
3998
  inputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input;
3977
3999
  postProcessedOutputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output;
3978
4000
  }
4001
+ export type Ai_Cf_Baai_Bge_M3_Input =
4002
+ | BGEM3InputQueryAndContexts
4003
+ | BGEM3InputEmbedding;
4004
+ export interface BGEM3InputQueryAndContexts {
4005
+ /**
4006
+ * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts
4007
+ */
4008
+ query?: string;
4009
+ /**
4010
+ * List of provided contexts. Note that the index in this array is important, as the response will refer to it.
4011
+ */
4012
+ contexts: {
4013
+ /**
4014
+ * One of the provided context content
4015
+ */
4016
+ text?: string;
4017
+ }[];
4018
+ /**
4019
+ * When provided with too long context should the model error out or truncate the context to fit?
4020
+ */
4021
+ truncate_inputs?: boolean;
4022
+ }
4023
+ export interface BGEM3InputEmbedding {
4024
+ text: string | string[];
4025
+ /**
4026
+ * When provided with too long context should the model error out or truncate the context to fit?
4027
+ */
4028
+ truncate_inputs?: boolean;
4029
+ }
4030
+ export type Ai_Cf_Baai_Bge_M3_Output =
4031
+ | BGEM3OuputQuery
4032
+ | BGEM3OutputEmbeddingForContexts
4033
+ | BGEM3OuputEmbedding;
4034
+ export interface BGEM3OuputQuery {
4035
+ response?: {
4036
+ /**
4037
+ * Index of the context in the request
4038
+ */
4039
+ id?: number;
4040
+ /**
4041
+ * Score of the context under the index.
4042
+ */
4043
+ score?: number;
4044
+ }[];
4045
+ }
4046
+ export interface BGEM3OutputEmbeddingForContexts {
4047
+ response?: number[][];
4048
+ shape?: number[];
4049
+ /**
4050
+ * The pooling method used in the embedding process.
4051
+ */
4052
+ pooling?: "mean" | "cls";
4053
+ }
4054
+ export interface BGEM3OuputEmbedding {
4055
+ shape?: number[];
4056
+ /**
4057
+ * Embeddings of the requested text values
4058
+ */
4059
+ data?: number[][];
4060
+ /**
4061
+ * The pooling method used in the embedding process.
4062
+ */
4063
+ pooling?: "mean" | "cls";
4064
+ }
4065
+ export declare abstract class Base_Ai_Cf_Baai_Bge_M3 {
4066
+ inputs: Ai_Cf_Baai_Bge_M3_Input;
4067
+ postProcessedOutputs: Ai_Cf_Baai_Bge_M3_Output;
4068
+ }
3979
4069
  export interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input {
3980
4070
  /**
3981
4071
  * A text description of the image you want to generate.
@@ -4286,6 +4376,40 @@ export declare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B {
4286
4376
  inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input;
4287
4377
  postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output;
4288
4378
  }
4379
+ export interface Ai_Cf_Baai_Bge_Reranker_Base_Input {
4380
+ /**
4381
+ * A query you wish to perform against the provided contexts.
4382
+ */
4383
+ /**
4384
+ * Number of returned results starting with the best score.
4385
+ */
4386
+ top_k?: number;
4387
+ /**
4388
+ * List of provided contexts. Note that the index in this array is important, as the response will refer to it.
4389
+ */
4390
+ contexts: {
4391
+ /**
4392
+ * One of the provided context content
4393
+ */
4394
+ text?: string;
4395
+ }[];
4396
+ }
4397
+ export interface Ai_Cf_Baai_Bge_Reranker_Base_Output {
4398
+ response?: {
4399
+ /**
4400
+ * Index of the context in the request
4401
+ */
4402
+ id?: number;
4403
+ /**
4404
+ * Score of the context under the index.
4405
+ */
4406
+ score?: number;
4407
+ }[];
4408
+ }
4409
+ export declare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base {
4410
+ inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input;
4411
+ postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output;
4412
+ }
4289
4413
  export interface AiModels {
4290
4414
  "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification;
4291
4415
  "@cf/stabilityai/stable-diffusion-xl-base-1.0": BaseAiTextToImage;
@@ -4293,6 +4417,7 @@ export interface AiModels {
4293
4417
  "@cf/runwayml/stable-diffusion-v1-5-img2img": BaseAiTextToImage;
4294
4418
  "@cf/lykon/dreamshaper-8-lcm": BaseAiTextToImage;
4295
4419
  "@cf/bytedance/stable-diffusion-xl-lightning": BaseAiTextToImage;
4420
+ "@cf/myshell-ai/melotts": BaseAiTextToSpeech;
4296
4421
  "@cf/baai/bge-base-en-v1.5": BaseAiTextEmbeddings;
4297
4422
  "@cf/baai/bge-small-en-v1.5": BaseAiTextEmbeddings;
4298
4423
  "@cf/baai/bge-large-en-v1.5": BaseAiTextEmbeddings;
@@ -4346,9 +4471,11 @@ export interface AiModels {
4346
4471
  "@cf/unum/uform-gen2-qwen-500m": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M;
4347
4472
  "@cf/openai/whisper-tiny-en": Base_Ai_Cf_Openai_Whisper_Tiny_En;
4348
4473
  "@cf/openai/whisper-large-v3-turbo": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo;
4474
+ "@cf/baai/bge-m3": Base_Ai_Cf_Baai_Bge_M3;
4349
4475
  "@cf/black-forest-labs/flux-1-schnell": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell;
4350
4476
  "@cf/meta/llama-3.2-11b-vision-instruct": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct;
4351
4477
  "@cf/meta/llama-guard-3-8b": Base_Ai_Cf_Meta_Llama_Guard_3_8B;
4478
+ "@cf/baai/bge-reranker-base": Base_Ai_Cf_Baai_Bge_Reranker_Base;
4352
4479
  }
4353
4480
  export type AiOptions = {
4354
4481
  gateway?: GatewayOptions;
@@ -4408,8 +4535,8 @@ export declare abstract class Ai<
4408
4535
  ? Response
4409
4536
  : AiModelList[Name]["postProcessedOutputs"]
4410
4537
  >;
4411
- public models(params?: AiModelsSearchParams): Promise<AiModelsSearchObject[]>;
4412
- public toMarkdown(
4538
+ models(params?: AiModelsSearchParams): Promise<AiModelsSearchObject[]>;
4539
+ toMarkdown(
4413
4540
  files: {
4414
4541
  name: string;
4415
4542
  blob: Blob;
@@ -4419,7 +4546,7 @@ export declare abstract class Ai<
4419
4546
  extraHeaders?: object;
4420
4547
  },
4421
4548
  ): Promise<ConversionResponse[]>;
4422
- public toMarkdown(
4549
+ toMarkdown(
4423
4550
  files: {
4424
4551
  name: string;
4425
4552
  blob: Blob;
package/package.json CHANGED
@@ -7,7 +7,7 @@
7
7
  },
8
8
  "author": "Workers Community",
9
9
  "license": "MIT OR Apache-2.0",
10
- "version": "4.20250402.0",
10
+ "version": "4.20250403.0",
11
11
  "exports": {
12
12
  ".": {
13
13
  "types": "./index.d.ts",