graphlit-client 1.0.20241007002 → 1.0.20241021001

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -304,6 +304,8 @@ export type AmazonFeedPropertiesUpdateInput = {
304
304
  /** Represents Anthropic model properties. */
305
305
  export type AnthropicModelProperties = {
306
306
  __typename?: 'AnthropicModelProperties';
307
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
308
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
307
309
  /** The limit of tokens generated by prompt completion. */
308
310
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
309
311
  /** The Anthropic API key, if using developer's own account. */
@@ -499,6 +501,8 @@ export type AudioMetadataInput = {
499
501
  /** Represents Azure AI model properties. */
500
502
  export type AzureAiModelProperties = {
501
503
  __typename?: 'AzureAIModelProperties';
504
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
505
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
502
506
  /** The limit of tokens generated by prompt completion. */
503
507
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
504
508
  /** The Azure AI API endpoint. */
@@ -514,6 +518,8 @@ export type AzureAiModelProperties = {
514
518
  };
515
519
  /** Represents Azure AI model properties. */
516
520
  export type AzureAiModelPropertiesInput = {
521
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
522
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
517
523
  /** The limit of tokens generated by prompt completion. */
518
524
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
519
525
  /** The Azure AI API endpoint. */
@@ -529,6 +535,8 @@ export type AzureAiModelPropertiesInput = {
529
535
  };
530
536
  /** Represents Azure AI model properties. */
531
537
  export type AzureAiModelPropertiesUpdateInput = {
538
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
539
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
532
540
  /** The limit of tokens generated by prompt completion. */
533
541
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
534
542
  /** The Azure AI API endpoint. */
@@ -692,6 +700,8 @@ export type AzureImageExtractionPropertiesInput = {
692
700
  /** Represents Azure OpenAI model properties. */
693
701
  export type AzureOpenAiModelProperties = {
694
702
  __typename?: 'AzureOpenAIModelProperties';
703
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
704
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
695
705
  /** The limit of tokens generated by prompt completion. */
696
706
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
697
707
  /** The Azure OpenAI deployment name, if using developer's own account. */
@@ -711,6 +721,8 @@ export type AzureOpenAiModelProperties = {
711
721
  };
712
722
  /** Represents Azure OpenAI model properties. */
713
723
  export type AzureOpenAiModelPropertiesInput = {
724
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
725
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
714
726
  /** The limit of tokens generated by prompt completion. */
715
727
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
716
728
  /** The Azure OpenAI deployment name, if using developer's own account. */
@@ -730,6 +742,8 @@ export type AzureOpenAiModelPropertiesInput = {
730
742
  };
731
743
  /** Represents Azure OpenAI model properties. */
732
744
  export type AzureOpenAiModelPropertiesUpdateInput = {
745
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
746
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
733
747
  /** The limit of tokens generated by prompt completion. */
734
748
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
735
749
  /** The Azure OpenAI deployment name, if using developer's own account. */
@@ -751,11 +765,11 @@ export type AzureOpenAiModelPropertiesUpdateInput = {
751
765
  export declare enum AzureOpenAiModels {
752
766
  /** Developer-specified deployment */
753
767
  Custom = "CUSTOM",
754
- /** GPT-4 (Latest version) */
768
+ /** GPT-4 (Latest) */
755
769
  Gpt4 = "GPT4",
756
- /** GPT-4 Turbo 128k (Latest version) */
770
+ /** GPT-4 Turbo 128k (Latest) */
757
771
  Gpt4Turbo_128K = "GPT4_TURBO_128K",
758
- /** GPT-3.5 Turbo 16k (Latest version) */
772
+ /** GPT-3.5 Turbo 16k (Latest) */
759
773
  Gpt35Turbo_16K = "GPT35_TURBO_16K"
760
774
  }
761
775
  /** Represents an Azure Cognitive Services text entity extraction connector. */
@@ -973,6 +987,8 @@ export declare enum CerebrasModels {
973
987
  /** Represents Cohere model properties. */
974
988
  export type CohereModelProperties = {
975
989
  __typename?: 'CohereModelProperties';
990
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
991
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
976
992
  /** The limit of tokens generated by prompt completion. */
977
993
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
978
994
  /** The Cohere API key, if using developer's own account. */
@@ -990,6 +1006,8 @@ export type CohereModelProperties = {
990
1006
  };
991
1007
  /** Represents Cohere model properties. */
992
1008
  export type CohereModelPropertiesInput = {
1009
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
1010
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
993
1011
  /** The limit of tokens generated by prompt completion. */
994
1012
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
995
1013
  /** The Cohere API key, if using developer's own account. */
@@ -1007,6 +1025,8 @@ export type CohereModelPropertiesInput = {
1007
1025
  };
1008
1026
  /** Represents Cohere model properties. */
1009
1027
  export type CohereModelPropertiesUpdateInput = {
1028
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
1029
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
1010
1030
  /** The limit of tokens generated by prompt completion. */
1011
1031
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
1012
1032
  /** The Cohere API key, if using developer's own account. */
@@ -1037,7 +1057,11 @@ export declare enum CohereModels {
1037
1057
  /** Command R+ (2024-08 version) */
1038
1058
  CommandRPlus_202408 = "COMMAND_R_PLUS_202408",
1039
1059
  /** Developer-specified model */
1040
- Custom = "CUSTOM"
1060
+ Custom = "CUSTOM",
1061
+ /** Embed English 3.0 */
1062
+ EmbedEnglish_3_0 = "EMBED_ENGLISH_3_0",
1063
+ /** Embed Multilingual 3.0 */
1064
+ EmbedMultilingual_3_0 = "EMBED_MULTILINGUAL_3_0"
1041
1065
  }
1042
1066
  /** Represents a collection. */
1043
1067
  export type Collection = {
@@ -1837,14 +1861,10 @@ export declare enum ConversationSearchTypes {
1837
1861
  /** Represents a conversation strategy. */
1838
1862
  export type ConversationStrategy = {
1839
1863
  __typename?: 'ConversationStrategy';
1840
- /** @deprecated Moved to retrieval strategy. */
1841
- contentLimit?: Maybe<Scalars['Int']['output']>;
1842
1864
  /** The weight of contents within prompt context, in range [0.0 - 1.0]. */
1843
1865
  contentsWeight?: Maybe<Scalars['Float']['output']>;
1844
1866
  /** Embed content citations into completed converation messages. */
1845
1867
  embedCitations?: Maybe<Scalars['Boolean']['output']>;
1846
- /** @deprecated Moved to retrieval strategy. */
1847
- enableExpandedRetrieval?: Maybe<Scalars['Boolean']['output']>;
1848
1868
  /** Provide content facets with completed conversation. */
1849
1869
  enableFacets?: Maybe<Scalars['Boolean']['output']>;
1850
1870
  /** The maximum number of retrieval user messages to provide with prompt context. Defaults to 5. */
@@ -2437,13 +2457,19 @@ export type EmailPreparationPropertiesInput = {
2437
2457
  /** Represents the embeddings strategy. */
2438
2458
  export type EmbeddingsStrategy = {
2439
2459
  __typename?: 'EmbeddingsStrategy';
2440
- /** The limit of tokens per embedded text chunk, defaults to 600. */
2460
+ /** @deprecated The limit of tokens per embedded text chunk has been removed from embeddings strategy. Assign in text embeddings specification instead. */
2441
2461
  chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
2462
+ /** The LLM specification used for image embeddings. */
2463
+ imageSpecification?: Maybe<EntityReference>;
2464
+ /** The LLM specification used for text embeddings. */
2465
+ textSpecification?: Maybe<EntityReference>;
2442
2466
  };
2443
2467
  /** Represents the embeddings strategy. */
2444
2468
  export type EmbeddingsStrategyInput = {
2445
- /** The limit of tokens per embedded text chunk, defaults to 600. */
2446
- chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
2469
+ /** The LLM specification used for image embeddings. */
2470
+ imageSpecification?: InputMaybe<EntityReferenceInput>;
2471
+ /** The LLM specification used for text embeddings. */
2472
+ textSpecification?: InputMaybe<EntityReferenceInput>;
2447
2473
  };
2448
2474
  /** Represents an enrichment workflow job. */
2449
2475
  export type EnrichmentWorkflowJob = {
@@ -2520,10 +2546,7 @@ export type EntityExtractionConnector = {
2520
2546
  modelImage?: Maybe<ModelImageExtractionProperties>;
2521
2547
  /** The specific properties for LLM text entity extraction. */
2522
2548
  modelText?: Maybe<ModelTextExtractionProperties>;
2523
- /**
2524
- * The specific properties for OpenAI image entity extraction.
2525
- * @deprecated Use MODEL_IMAGE instead.
2526
- */
2549
+ /** @deprecated The specific properties for OpenAI image entity extraction have been removed. Use LLM image entity extraction instead. */
2527
2550
  openAIImage?: Maybe<OpenAiImageExtractionProperties>;
2528
2551
  /** The entity extraction connector service type. */
2529
2552
  type: EntityExtractionServiceTypes;
@@ -3187,6 +3210,8 @@ export declare enum FeedServiceTypes {
3187
3210
  AzureBlob = "AZURE_BLOB",
3188
3211
  /** Azure File feed service */
3189
3212
  AzureFile = "AZURE_FILE",
3213
+ /** GitHub feed service */
3214
+ GitHub = "GIT_HUB",
3190
3215
  /** GitHub Issues feed service */
3191
3216
  GitHubIssues = "GIT_HUB_ISSUES",
3192
3217
  /** Google Cloud Blob feed service */
@@ -3303,7 +3328,7 @@ export type FilePreparationConnectorInput = {
3303
3328
  /** The file types to be prepared. */
3304
3329
  fileTypes?: InputMaybe<Array<FileTypes>>;
3305
3330
  /** The specific properties for LLM document preparation. */
3306
- modelDocument?: InputMaybe<ModelDocumentPreparationInputProperties>;
3331
+ modelDocument?: InputMaybe<ModelDocumentPreparationPropertiesInput>;
3307
3332
  /** The file preparation service type. */
3308
3333
  type: FilePreparationServiceTypes;
3309
3334
  };
@@ -3374,6 +3399,46 @@ export type GeometryMetadataInput = {
3374
3399
  /** The geometry vertex count. */
3375
3400
  vertexCount?: InputMaybe<Scalars['Long']['input']>;
3376
3401
  };
3402
+ /** Represents GitHub properties. */
3403
+ export type GitHubFeedProperties = {
3404
+ __typename?: 'GitHubFeedProperties';
3405
+ /** GitHub personal access token. Either refresh token or personal access token is required to avoid GitHub rate-limiting. */
3406
+ personalAccessToken?: Maybe<Scalars['String']['output']>;
3407
+ /** GitHub refresh token. Either refresh token or personal access token is required to avoid GitHub rate-limiting. */
3408
+ refreshToken?: Maybe<Scalars['String']['output']>;
3409
+ /** GitHub repository name. */
3410
+ repositoryName: Scalars['String']['output'];
3411
+ /** GitHub repository owner. */
3412
+ repositoryOwner: Scalars['String']['output'];
3413
+ /** GitHub Enterprise URI, optional. */
3414
+ uri?: Maybe<Scalars['URL']['output']>;
3415
+ };
3416
+ /** Represents GitHub properties. */
3417
+ export type GitHubFeedPropertiesInput = {
3418
+ /** GitHub personal access token. Either refresh token or personal access token is required to avoid GitHub rate-limiting. */
3419
+ personalAccessToken?: InputMaybe<Scalars['String']['input']>;
3420
+ /** GitHub refresh token. Either refresh token or personal access token is required to avoid GitHub rate-limiting. */
3421
+ refreshToken?: InputMaybe<Scalars['String']['input']>;
3422
+ /** GitHub repository name. */
3423
+ repositoryName: Scalars['String']['input'];
3424
+ /** GitHub repository owner. */
3425
+ repositoryOwner: Scalars['String']['input'];
3426
+ /** GitHub Enterprise URI, optional. */
3427
+ uri?: InputMaybe<Scalars['URL']['input']>;
3428
+ };
3429
+ /** Represents GitHub properties. */
3430
+ export type GitHubFeedPropertiesUpdateInput = {
3431
+ /** GitHub personal access token. Either refresh token or personal access token is required to avoid GitHub rate-limiting. */
3432
+ personalAccessToken?: InputMaybe<Scalars['String']['input']>;
3433
+ /** GitHub refresh token. Either refresh token or personal access token is required to avoid GitHub rate-limiting. */
3434
+ refreshToken?: InputMaybe<Scalars['String']['input']>;
3435
+ /** GitHub repository name. */
3436
+ repositoryName?: InputMaybe<Scalars['String']['input']>;
3437
+ /** GitHub repository owner. */
3438
+ repositoryOwner?: InputMaybe<Scalars['String']['input']>;
3439
+ /** GitHub Enterprise URI, optional. */
3440
+ uri?: InputMaybe<Scalars['URL']['input']>;
3441
+ };
3377
3442
  /** Represents GitHub Issues feed properties. */
3378
3443
  export type GitHubIssuesFeedProperties = {
3379
3444
  __typename?: 'GitHubIssuesFeedProperties';
@@ -3513,6 +3578,8 @@ export type GoogleFeedPropertiesUpdateInput = {
3513
3578
  /** Represents Google model properties. */
3514
3579
  export type GoogleModelProperties = {
3515
3580
  __typename?: 'GoogleModelProperties';
3581
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
3582
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
3516
3583
  /** The limit of tokens generated by prompt completion. */
3517
3584
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
3518
3585
  /** The Google API key, if using developer's own account. */
@@ -3530,6 +3597,8 @@ export type GoogleModelProperties = {
3530
3597
  };
3531
3598
  /** Represents Google model properties. */
3532
3599
  export type GoogleModelPropertiesInput = {
3600
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
3601
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
3533
3602
  /** The limit of tokens generated by prompt completion. */
3534
3603
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
3535
3604
  /** The Google API key, if using developer's own account. */
@@ -3547,6 +3616,8 @@ export type GoogleModelPropertiesInput = {
3547
3616
  };
3548
3617
  /** Represents Google model properties. */
3549
3618
  export type GoogleModelPropertiesUpdateInput = {
3619
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
3620
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
3550
3621
  /** The limit of tokens generated by prompt completion. */
3551
3622
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
3552
3623
  /** The Google API key, if using developer's own account. */
@@ -3566,6 +3637,8 @@ export type GoogleModelPropertiesUpdateInput = {
3566
3637
  export declare enum GoogleModels {
3567
3638
  /** Developer-specified model */
3568
3639
  Custom = "CUSTOM",
3640
+ /** Embed (004 version) */
3641
+ Embedding_004 = "EMBEDDING_004",
3569
3642
  /** Gemini 1.5 Flash (Latest) */
3570
3643
  Gemini_1_5Flash = "GEMINI_1_5_FLASH",
3571
3644
  /** Gemini 1.5 Flash (001 version) */
@@ -4151,6 +4224,51 @@ export type IssueMetadataInput = {
4151
4224
  /** The issue type, i.e. epic, story, task. */
4152
4225
  type?: InputMaybe<Scalars['String']['input']>;
4153
4226
  };
4227
+ /** Represents Jina model properties. */
4228
+ export type JinaModelProperties = {
4229
+ __typename?: 'JinaModelProperties';
4230
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
4231
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
4232
+ /** The Jina API key, if using developer's own account. */
4233
+ key?: Maybe<Scalars['String']['output']>;
4234
+ /** The Jina model, or custom, when using developer's own account. */
4235
+ model: JinaModels;
4236
+ /** The Jina model name, if using developer's own account. */
4237
+ modelName?: Maybe<Scalars['String']['output']>;
4238
+ };
4239
+ /** Represents Jina model properties. */
4240
+ export type JinaModelPropertiesInput = {
4241
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
4242
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
4243
+ /** The Jina API key, if using developer's own account. */
4244
+ key?: InputMaybe<Scalars['String']['input']>;
4245
+ /** The Jina model, or custom, when using developer's own account. */
4246
+ model: JinaModels;
4247
+ /** The Jina model name, if using developer's own account. */
4248
+ modelName?: InputMaybe<Scalars['String']['input']>;
4249
+ };
4250
+ /** Represents Jina model properties. */
4251
+ export type JinaModelPropertiesUpdateInput = {
4252
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
4253
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
4254
+ /** The Jina API key, if using developer's own account. */
4255
+ key?: InputMaybe<Scalars['String']['input']>;
4256
+ /** The Jina model, or custom, when using developer's own account. */
4257
+ model?: InputMaybe<JinaModels>;
4258
+ /** The Jina model name, if using developer's own account. */
4259
+ modelName?: InputMaybe<Scalars['String']['input']>;
4260
+ };
4261
+ /** Jina model type */
4262
+ export declare enum JinaModels {
4263
+ /** CLIP Image */
4264
+ ClipImage = "CLIP_IMAGE",
4265
+ /** Developer-specified model */
4266
+ Custom = "CUSTOM",
4267
+ /** Embed (Latest) */
4268
+ Embed = "EMBED",
4269
+ /** Embed 3.0 */
4270
+ Embed_3_0 = "EMBED_3_0"
4271
+ }
4154
4272
  /** Represents a label. */
4155
4273
  export type Label = {
4156
4274
  __typename?: 'Label';
@@ -6212,6 +6330,8 @@ export type MicrosoftTeamsTeamsInput = {
6212
6330
  /** Represents Mistral model properties. */
6213
6331
  export type MistralModelProperties = {
6214
6332
  __typename?: 'MistralModelProperties';
6333
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
6334
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
6215
6335
  /** The limit of tokens generated by prompt completion. */
6216
6336
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
6217
6337
  /** The Mistral API endpoint, if using developer's own account. */
@@ -6231,6 +6351,8 @@ export type MistralModelProperties = {
6231
6351
  };
6232
6352
  /** Represents Mistral model properties. */
6233
6353
  export type MistralModelPropertiesInput = {
6354
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
6355
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
6234
6356
  /** The limit of tokens generated by prompt completion. */
6235
6357
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
6236
6358
  /** The Mistral API endpoint, if using developer's own account. */
@@ -6250,6 +6372,8 @@ export type MistralModelPropertiesInput = {
6250
6372
  };
6251
6373
  /** Represents Mistral model properties. */
6252
6374
  export type MistralModelPropertiesUpdateInput = {
6375
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
6376
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
6253
6377
  /** The limit of tokens generated by prompt completion. */
6254
6378
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
6255
6379
  /** The Mistral API endpoint, if using developer's own account. */
@@ -6271,6 +6395,8 @@ export type MistralModelPropertiesUpdateInput = {
6271
6395
  export declare enum MistralModels {
6272
6396
  /** Developer-specified model */
6273
6397
  Custom = "CUSTOM",
6398
+ /** Mistral Embed */
6399
+ MistralEmbed = "MISTRAL_EMBED",
6274
6400
  /** Mistral Large */
6275
6401
  MistralLarge = "MISTRAL_LARGE",
6276
6402
  /** Mistral Medium */
@@ -6285,16 +6411,16 @@ export declare enum MistralModels {
6285
6411
  Pixtral_12B_2409 = "PIXTRAL_12B_2409"
6286
6412
  }
6287
6413
  /** Represents the LLM document preparation properties. */
6288
- export type ModelDocumentPreparationInputProperties = {
6289
- /** The LLM specification, optional. */
6290
- specification?: InputMaybe<EntityReferenceInput>;
6291
- };
6292
- /** Represents the LLM document preparation properties. */
6293
6414
  export type ModelDocumentPreparationProperties = {
6294
6415
  __typename?: 'ModelDocumentPreparationProperties';
6295
6416
  /** The LLM specification, optional. */
6296
6417
  specification?: Maybe<EntityReference>;
6297
6418
  };
6419
+ /** Represents the LLM document preparation properties. */
6420
+ export type ModelDocumentPreparationPropertiesInput = {
6421
+ /** The LLM specification, optional. */
6422
+ specification?: InputMaybe<EntityReferenceInput>;
6423
+ };
6298
6424
  /** Represents an LLM image entity extraction connector. */
6299
6425
  export type ModelImageExtractionProperties = {
6300
6426
  __typename?: 'ModelImageExtractionProperties';
@@ -6324,12 +6450,16 @@ export declare enum ModelServiceTypes {
6324
6450
  Google = "GOOGLE",
6325
6451
  /** Groq */
6326
6452
  Groq = "GROQ",
6453
+ /** Jina */
6454
+ Jina = "JINA",
6327
6455
  /** Mistral */
6328
6456
  Mistral = "MISTRAL",
6329
6457
  /** OpenAI */
6330
6458
  OpenAi = "OPEN_AI",
6331
6459
  /** Replicate */
6332
- Replicate = "REPLICATE"
6460
+ Replicate = "REPLICATE",
6461
+ /** Voyage */
6462
+ Voyage = "VOYAGE"
6333
6463
  }
6334
6464
  /** Represents an LLM text entity extraction connector. */
6335
6465
  export type ModelTextExtractionProperties = {
@@ -6603,6 +6733,7 @@ export type Mutation = {
6603
6733
  ingestPage?: Maybe<Content>;
6604
6734
  /** Ingests text. */
6605
6735
  ingestText?: Maybe<Content>;
6736
+ ingestTextBatch?: Maybe<Array<Maybe<Content>>>;
6606
6737
  /** Ingests content by URI. Supports files and webpages. */
6607
6738
  ingestUri?: Maybe<Content>;
6608
6739
  /** Opens an existing collection. */
@@ -7192,6 +7323,13 @@ export type MutationIngestTextArgs = {
7192
7323
  uri?: InputMaybe<Scalars['URL']['input']>;
7193
7324
  workflow?: InputMaybe<EntityReferenceInput>;
7194
7325
  };
7326
+ export type MutationIngestTextBatchArgs = {
7327
+ batch: Array<TextContentInput>;
7328
+ collections?: InputMaybe<Array<EntityReferenceInput>>;
7329
+ correlationId?: InputMaybe<Scalars['String']['input']>;
7330
+ textType?: InputMaybe<TextTypes>;
7331
+ workflow?: InputMaybe<EntityReferenceInput>;
7332
+ };
7195
7333
  export type MutationIngestUriArgs = {
7196
7334
  collections?: InputMaybe<Array<EntityReferenceInput>>;
7197
7335
  correlationId?: InputMaybe<Scalars['String']['input']>;
@@ -7655,21 +7793,14 @@ export type OpenAiImageExtractionProperties = {
7655
7793
  /** The OpenAI vision detail mode. */
7656
7794
  detailLevel?: Maybe<OpenAiVisionDetailLevels>;
7657
7795
  };
7658
- /** Represents an OpenAI image entity extraction connector. */
7659
- export type OpenAiImageExtractionPropertiesInput = {
7660
- /** The confidence threshold for entity extraction. */
7661
- confidenceThreshold?: InputMaybe<Scalars['Float']['input']>;
7662
- /** Custom instructions which are injected into the LLM prompt. */
7663
- customInstructions?: InputMaybe<Scalars['String']['input']>;
7664
- /** The OpenAI vision detail mode. */
7665
- detailLevel?: InputMaybe<OpenAiVisionDetailLevels>;
7666
- };
7667
7796
  /** Represents OpenAI model properties. */
7668
7797
  export type OpenAiModelProperties = {
7669
7798
  __typename?: 'OpenAIModelProperties';
7799
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
7800
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
7670
7801
  /** The limit of tokens generated by prompt completion. */
7671
7802
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
7672
- /** The OpenAI vision detail mode. Only applies when using OpenAI for image analysis. */
7803
+ /** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
7673
7804
  detailLevel?: Maybe<OpenAiVisionDetailLevels>;
7674
7805
  /** The OpenAI API key, if using developer's own account. */
7675
7806
  key?: Maybe<Scalars['String']['output']>;
@@ -7686,9 +7817,11 @@ export type OpenAiModelProperties = {
7686
7817
  };
7687
7818
  /** Represents OpenAI model properties. */
7688
7819
  export type OpenAiModelPropertiesInput = {
7820
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
7821
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
7689
7822
  /** The limit of tokens generated by prompt completion. */
7690
7823
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
7691
- /** The OpenAI vision detail mode. Only applies when using OpenAI for image analysis. */
7824
+ /** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
7692
7825
  detailLevel?: InputMaybe<OpenAiVisionDetailLevels>;
7693
7826
  /** The OpenAI API key, if using developer's own account. */
7694
7827
  key?: InputMaybe<Scalars['String']['input']>;
@@ -7705,9 +7838,11 @@ export type OpenAiModelPropertiesInput = {
7705
7838
  };
7706
7839
  /** Represents OpenAI model properties. */
7707
7840
  export type OpenAiModelPropertiesUpdateInput = {
7841
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
7842
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
7708
7843
  /** The limit of tokens generated by prompt completion. */
7709
7844
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
7710
- /** The OpenAI vision detail mode. Only applies when using OpenAI for image analysis. */
7845
+ /** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
7711
7846
  detailLevel?: InputMaybe<OpenAiVisionDetailLevels>;
7712
7847
  /** The OpenAI API key, if using developer's own account. */
7713
7848
  key?: InputMaybe<Scalars['String']['input']>;
@@ -7724,8 +7859,14 @@ export type OpenAiModelPropertiesUpdateInput = {
7724
7859
  };
7725
7860
  /** OpenAI model type */
7726
7861
  export declare enum OpenAiModels {
7862
+ /** Embedding Ada-002 */
7863
+ Ada_002 = "ADA_002",
7727
7864
  /** Developer-specified model */
7728
7865
  Custom = "CUSTOM",
7866
+ /** Embedding 3 Large */
7867
+ Embedding_3Large = "EMBEDDING_3_LARGE",
7868
+ /** Embedding 3 Small */
7869
+ Embedding_3Small = "EMBEDDING_3_SMALL",
7729
7870
  /**
7730
7871
  * GPT-4 (Latest)
7731
7872
  * @deprecated OpenAI has deprecated this model. Use the GPT-4o model instead.
@@ -8749,6 +8890,8 @@ export type Project = {
8749
8890
  creationDate: Scalars['DateTime']['output'];
8750
8891
  /** The project credit usage. */
8751
8892
  credits?: Maybe<Scalars['Long']['output']>;
8893
+ /** The project vector storage embeddings strategy. */
8894
+ embeddings?: Maybe<EmbeddingsStrategy>;
8752
8895
  /** The project environment type. */
8753
8896
  environmentType?: Maybe<EnvironmentTypes>;
8754
8897
  /** The ID of the project. */
@@ -8995,6 +9138,8 @@ export type ProjectStorageFileFacet = {
8995
9138
  export type ProjectUpdateInput = {
8996
9139
  /** The project callback URI, optional. The platform will callback to this webhook upon credit charges. */
8997
9140
  callbackUri?: InputMaybe<Scalars['URL']['input']>;
9141
+ /** The project vector storage embeddings strategy. */
9142
+ embeddings?: InputMaybe<EmbeddingsStrategyInput>;
8998
9143
  /** The default LLM specification for conversations. */
8999
9144
  specification?: InputMaybe<EntityReferenceInput>;
9000
9145
  /** The default content workflow. */
@@ -10040,8 +10185,8 @@ export declare enum RerankingModelServiceTypes {
10040
10185
  Cohere = "COHERE",
10041
10186
  /** Jina */
10042
10187
  Jina = "JINA",
10043
- /** Pongo */
10044
- Pongo = "PONGO"
10188
+ /** Voyage */
10189
+ Voyage = "VOYAGE"
10045
10190
  }
10046
10191
  /** Represents a reranking strategy. */
10047
10192
  export type RerankingStrategy = {
@@ -10331,6 +10476,8 @@ export type SiteFeedProperties = {
10331
10476
  azureFile?: Maybe<AzureFileFeedProperties>;
10332
10477
  /** Feed connector type. */
10333
10478
  connectorType: FeedConnectorTypes;
10479
+ /** GitHub properties. */
10480
+ github?: Maybe<GitHubFeedProperties>;
10334
10481
  /** Google Cloud blob properties. */
10335
10482
  google?: Maybe<GoogleFeedProperties>;
10336
10483
  /** Google Drive properties. */
@@ -10356,6 +10503,8 @@ export type SiteFeedPropertiesInput = {
10356
10503
  azureBlob?: InputMaybe<AzureBlobFeedPropertiesInput>;
10357
10504
  /** Microsoft Azure file share properties. */
10358
10505
  azureFile?: InputMaybe<AzureFileFeedPropertiesInput>;
10506
+ /** GitHub properties. */
10507
+ github?: InputMaybe<GitHubFeedPropertiesInput>;
10359
10508
  /** Google Cloud blob properties. */
10360
10509
  google?: InputMaybe<GoogleFeedPropertiesInput>;
10361
10510
  /** Google Drive properties. */
@@ -10379,6 +10528,8 @@ export type SiteFeedPropertiesUpdateInput = {
10379
10528
  azureBlob?: InputMaybe<AzureBlobFeedPropertiesUpdateInput>;
10380
10529
  /** Microsoft Azure file share properties. */
10381
10530
  azureFile?: InputMaybe<AzureFileFeedPropertiesUpdateInput>;
10531
+ /** GitHub properties. */
10532
+ github?: InputMaybe<GitHubFeedPropertiesUpdateInput>;
10382
10533
  /** Google Cloud blob properties. */
10383
10534
  google?: InputMaybe<GoogleFeedPropertiesUpdateInput>;
10384
10535
  /** Google Drive properties. */
@@ -10627,6 +10778,8 @@ export type Specification = {
10627
10778
  groq?: Maybe<GroqModelProperties>;
10628
10779
  /** The ID of the specification. */
10629
10780
  id: Scalars['ID']['output'];
10781
+ /** The Jina model properties. */
10782
+ jina?: Maybe<JinaModelProperties>;
10630
10783
  /** The Mistral model properties. */
10631
10784
  mistral?: Maybe<MistralModelProperties>;
10632
10785
  /** The modified date of the specification. */
@@ -10661,13 +10814,12 @@ export type Specification = {
10661
10814
  strategy?: Maybe<ConversationStrategy>;
10662
10815
  /** The LLM system prompt. */
10663
10816
  systemPrompt?: Maybe<Scalars['String']['output']>;
10664
- /**
10665
- * The tool definitions.
10666
- * @deprecated Tool definitions have been moved to prompt conversation mutation.
10667
- */
10817
+ /** @deprecated The tool definitions have been removed. Tools are now provided to the promptConversation or extractContents mutations. */
10668
10818
  tools?: Maybe<Array<ToolDefinition>>;
10669
10819
  /** The specification type. */
10670
10820
  type?: Maybe<SpecificationTypes>;
10821
+ /** The Voyage model properties. */
10822
+ voyage?: Maybe<VoyageModelProperties>;
10671
10823
  };
10672
10824
  /** Represents a filter for LLM specifications. */
10673
10825
  export type SpecificationFilter = {
@@ -10718,6 +10870,8 @@ export type SpecificationInput = {
10718
10870
  graphStrategy?: InputMaybe<GraphStrategyInput>;
10719
10871
  /** The Groq model properties. */
10720
10872
  groq?: InputMaybe<GroqModelPropertiesInput>;
10873
+ /** The Jina model properties. */
10874
+ jina?: InputMaybe<JinaModelPropertiesInput>;
10721
10875
  /** The Mistral model properties. */
10722
10876
  mistral?: InputMaybe<MistralModelPropertiesInput>;
10723
10877
  /** The name of the specification. */
@@ -10746,6 +10900,8 @@ export type SpecificationInput = {
10746
10900
  systemPrompt?: InputMaybe<Scalars['String']['input']>;
10747
10901
  /** The specification type. */
10748
10902
  type?: InputMaybe<SpecificationTypes>;
10903
+ /** The Voyage model properties. */
10904
+ voyage?: InputMaybe<VoyageModelPropertiesInput>;
10749
10905
  };
10750
10906
  /** Represents LLM specification query results. */
10751
10907
  export type SpecificationResults = {
@@ -10759,8 +10915,12 @@ export declare enum SpecificationTypes {
10759
10915
  Completion = "COMPLETION",
10760
10916
  /** Data extraction */
10761
10917
  Extraction = "EXTRACTION",
10918
+ /** Image embedding */
10919
+ ImageEmbedding = "IMAGE_EMBEDDING",
10762
10920
  /** Document preparation */
10763
- Preparation = "PREPARATION"
10921
+ Preparation = "PREPARATION",
10922
+ /** Text embedding */
10923
+ TextEmbedding = "TEXT_EMBEDDING"
10764
10924
  }
10765
10925
  /** Represents an LLM specification. */
10766
10926
  export type SpecificationUpdateInput = {
@@ -10788,6 +10948,8 @@ export type SpecificationUpdateInput = {
10788
10948
  groq?: InputMaybe<GroqModelPropertiesUpdateInput>;
10789
10949
  /** The ID of the specification to update. */
10790
10950
  id: Scalars['ID']['input'];
10951
+ /** The Jina model properties. */
10952
+ jina?: InputMaybe<JinaModelPropertiesUpdateInput>;
10791
10953
  /** The Mistral model properties. */
10792
10954
  mistral?: InputMaybe<MistralModelPropertiesUpdateInput>;
10793
10955
  /** The name of the specification. */
@@ -10816,18 +10978,15 @@ export type SpecificationUpdateInput = {
10816
10978
  systemPrompt?: InputMaybe<Scalars['String']['input']>;
10817
10979
  /** The specification type. */
10818
10980
  type?: InputMaybe<SpecificationTypes>;
10981
+ /** The Voyage model properties. */
10982
+ voyage?: InputMaybe<VoyageModelPropertiesUpdateInput>;
10819
10983
  };
10820
10984
  /** Represents the storage workflow stage. */
10821
10985
  export type StorageWorkflowStage = {
10822
10986
  __typename?: 'StorageWorkflowStage';
10823
- /** The vector storage embeddings strategy. */
10987
+ /** @deprecated The vector storage embeddings strategy has been removed. Embeddings can be configured through the project embeddings strategy. */
10824
10988
  embeddings?: Maybe<EmbeddingsStrategy>;
10825
10989
  };
10826
- /** Represents the storage workflow stage. */
10827
- export type StorageWorkflowStageInput = {
10828
- /** The vector storage embeddings strategy. */
10829
- embeddings?: InputMaybe<EmbeddingsStrategyInput>;
10830
- };
10831
10990
  /** Represents a range of string values. */
10832
10991
  export type StringRange = {
10833
10992
  __typename?: 'StringRange';
@@ -10918,6 +11077,13 @@ export type TextChunk = {
10918
11077
  /** The text chunk. */
10919
11078
  text?: Maybe<Scalars['String']['output']>;
10920
11079
  };
11080
+ /** Represents text content. */
11081
+ export type TextContentInput = {
11082
+ /** The content name. */
11083
+ name: Scalars['String']['input'];
11084
+ /** The content text. */
11085
+ text: Scalars['String']['input'];
11086
+ };
10921
11087
  /** Represents a frame of image or video. */
10922
11088
  export type TextFrame = {
10923
11089
  __typename?: 'TextFrame';
@@ -11069,15 +11235,6 @@ export type ToolDefinitionInput = {
11069
11235
  /** The tool schema. */
11070
11236
  schema: Scalars['String']['input'];
11071
11237
  };
11072
- /** Represents a tool definition. */
11073
- export type ToolDefinitionUpdateInput = {
11074
- /** The tool description. */
11075
- description?: InputMaybe<Scalars['String']['input']>;
11076
- /** The tool name. */
11077
- name?: InputMaybe<Scalars['String']['input']>;
11078
- /** The tool schema. */
11079
- schema?: InputMaybe<Scalars['String']['input']>;
11080
- };
11081
11238
  /** Unit types */
11082
11239
  export declare enum UnitTypes {
11083
11240
  /** Angstrom */
@@ -11176,6 +11333,59 @@ export type VideoMetadataInput = {
11176
11333
  /** The video width. */
11177
11334
  width?: InputMaybe<Scalars['Int']['input']>;
11178
11335
  };
11336
+ /** Represents Voyage model properties. */
11337
+ export type VoyageModelProperties = {
11338
+ __typename?: 'VoyageModelProperties';
11339
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
11340
+ chunkTokenLimit?: Maybe<Scalars['Int']['output']>;
11341
+ /** The Voyage API key, if using developer's own account. */
11342
+ key?: Maybe<Scalars['String']['output']>;
11343
+ /** The Voyage model, or custom, when using developer's own account. */
11344
+ model: VoyageModels;
11345
+ /** The Voyage model name, if using developer's own account. */
11346
+ modelName?: Maybe<Scalars['String']['output']>;
11347
+ };
11348
+ /** Represents Voyage model properties. */
11349
+ export type VoyageModelPropertiesInput = {
11350
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
11351
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
11352
+ /** The Voyage API key, if using developer's own account. */
11353
+ key?: InputMaybe<Scalars['String']['input']>;
11354
+ /** The Voyage model, or custom, when using developer's own account. */
11355
+ model: VoyageModels;
11356
+ /** The Voyage model name, if using developer's own account. */
11357
+ modelName?: InputMaybe<Scalars['String']['input']>;
11358
+ };
11359
+ /** Represents Voyage model properties. */
11360
+ export type VoyageModelPropertiesUpdateInput = {
11361
+ /** The limit of tokens per embedded text chunk, defaults to 600. */
11362
+ chunkTokenLimit?: InputMaybe<Scalars['Int']['input']>;
11363
+ /** The Voyage API key, if using developer's own account. */
11364
+ key?: InputMaybe<Scalars['String']['input']>;
11365
+ /** The Voyage model, or custom, when using developer's own account. */
11366
+ model?: InputMaybe<VoyageModels>;
11367
+ /** The Voyage model name, if using developer's own account. */
11368
+ modelName?: InputMaybe<Scalars['String']['input']>;
11369
+ };
11370
+ /** Voyage model type */
11371
+ export declare enum VoyageModels {
11372
+ /** Developer-specified model */
11373
+ Custom = "CUSTOM",
11374
+ /** Voyage (Latest) */
11375
+ Voyage = "VOYAGE",
11376
+ /** Voyage 3.0 */
11377
+ Voyage_3_0 = "VOYAGE_3_0",
11378
+ /** Voyage Code 2.0 */
11379
+ VoyageCode_2_0 = "VOYAGE_CODE_2_0",
11380
+ /** Voyage Finance 2.0 */
11381
+ VoyageFinance_2_0 = "VOYAGE_FINANCE_2_0",
11382
+ /** Voyage Law 2.0 */
11383
+ VoyageLaw_2_0 = "VOYAGE_LAW_2_0",
11384
+ /** Voyage Lite 3.0 */
11385
+ VoyageLite_3_0 = "VOYAGE_LITE_3_0",
11386
+ /** Voyage Multilingual 2.0 */
11387
+ VoyageMultilingual_2_0 = "VOYAGE_MULTILINGUAL_2_0"
11388
+ }
11179
11389
  /** Represents web feed properties. */
11180
11390
  export type WebFeedProperties = {
11181
11391
  __typename?: 'WebFeedProperties';
@@ -11245,7 +11455,7 @@ export type Workflow = {
11245
11455
  relevance?: Maybe<Scalars['Float']['output']>;
11246
11456
  /** The state of the workflow (i.e. created, finished). */
11247
11457
  state: EntityState;
11248
- /** The storage stage of the content workflow. */
11458
+ /** @deprecated The storage stage of the content workflow has been removed. Embeddings can be configured through the project embeddings strategy. */
11249
11459
  storage?: Maybe<StorageWorkflowStage>;
11250
11460
  };
11251
11461
  /** Represents the workflow action. */
@@ -11296,8 +11506,6 @@ export type WorkflowInput = {
11296
11506
  name: Scalars['String']['input'];
11297
11507
  /** The preparation stage of the content workflow. */
11298
11508
  preparation?: InputMaybe<PreparationWorkflowStageInput>;
11299
- /** The storage stage of the content workflow. */
11300
- storage?: InputMaybe<StorageWorkflowStageInput>;
11301
11509
  };
11302
11510
  /** Represents workflow query results. */
11303
11511
  export type WorkflowResults = {
@@ -11323,8 +11531,6 @@ export type WorkflowUpdateInput = {
11323
11531
  name?: InputMaybe<Scalars['String']['input']>;
11324
11532
  /** The preparation stage of the content workflow. */
11325
11533
  preparation?: InputMaybe<PreparationWorkflowStageInput>;
11326
- /** The storage stage of the content workflow. */
11327
- storage?: InputMaybe<StorageWorkflowStageInput>;
11328
11534
  };
11329
11535
  /** Represents YouTube feed properties. */
11330
11536
  export type YouTubeFeedProperties = {
@@ -12442,6 +12648,31 @@ export type IngestTextMutation = {
12442
12648
  } | null> | null;
12443
12649
  } | null;
12444
12650
  };
12651
+ export type IngestTextBatchMutationVariables = Exact<{
12652
+ batch: Array<TextContentInput> | TextContentInput;
12653
+ textType?: InputMaybe<TextTypes>;
12654
+ workflow?: InputMaybe<EntityReferenceInput>;
12655
+ collections?: InputMaybe<Array<EntityReferenceInput> | EntityReferenceInput>;
12656
+ correlationId?: InputMaybe<Scalars['String']['input']>;
12657
+ }>;
12658
+ export type IngestTextBatchMutation = {
12659
+ __typename?: 'Mutation';
12660
+ ingestTextBatch?: Array<{
12661
+ __typename?: 'Content';
12662
+ id: string;
12663
+ name: string;
12664
+ state: EntityState;
12665
+ type?: ContentTypes | null;
12666
+ fileType?: FileTypes | null;
12667
+ mimeType?: string | null;
12668
+ uri?: any | null;
12669
+ collections?: Array<{
12670
+ __typename?: 'Collection';
12671
+ id: string;
12672
+ name: string;
12673
+ } | null> | null;
12674
+ } | null> | null;
12675
+ };
12445
12676
  export type IngestUriMutationVariables = Exact<{
12446
12677
  name?: InputMaybe<Scalars['String']['input']>;
12447
12678
  uri: Scalars['URL']['input'];
@@ -14656,6 +14887,14 @@ export type GetFeedQuery = {
14656
14887
  clientId: string;
14657
14888
  clientSecret: string;
14658
14889
  } | null;
14890
+ github?: {
14891
+ __typename?: 'GitHubFeedProperties';
14892
+ uri?: any | null;
14893
+ repositoryOwner: string;
14894
+ repositoryName: string;
14895
+ refreshToken?: string | null;
14896
+ personalAccessToken?: string | null;
14897
+ } | null;
14659
14898
  } | null;
14660
14899
  email?: {
14661
14900
  __typename?: 'EmailFeedProperties';
@@ -14869,6 +15108,14 @@ export type QueryFeedsQuery = {
14869
15108
  clientId: string;
14870
15109
  clientSecret: string;
14871
15110
  } | null;
15111
+ github?: {
15112
+ __typename?: 'GitHubFeedProperties';
15113
+ uri?: any | null;
15114
+ repositoryOwner: string;
15115
+ repositoryName: string;
15116
+ refreshToken?: string | null;
15117
+ personalAccessToken?: string | null;
15118
+ } | null;
14872
15119
  } | null;
14873
15120
  email?: {
14874
15121
  __typename?: 'EmailFeedProperties';
@@ -16872,6 +17119,17 @@ export type GetProjectQuery = {
16872
17119
  id: string;
16873
17120
  name: string;
16874
17121
  } | null;
17122
+ embeddings?: {
17123
+ __typename?: 'EmbeddingsStrategy';
17124
+ textSpecification?: {
17125
+ __typename?: 'EntityReference';
17126
+ id: string;
17127
+ } | null;
17128
+ imageSpecification?: {
17129
+ __typename?: 'EntityReference';
17130
+ id: string;
17131
+ } | null;
17132
+ } | null;
16875
17133
  quota?: {
16876
17134
  __typename?: 'ProjectQuota';
16877
17135
  storage?: any | null;
@@ -17357,6 +17615,7 @@ export type GetSpecificationQuery = {
17357
17615
  endpoint: any;
17358
17616
  temperature?: number | null;
17359
17617
  probability?: number | null;
17618
+ chunkTokenLimit?: number | null;
17360
17619
  } | null;
17361
17620
  openAI?: {
17362
17621
  __typename?: 'OpenAIModelProperties';
@@ -17367,6 +17626,7 @@ export type GetSpecificationQuery = {
17367
17626
  modelName?: string | null;
17368
17627
  temperature?: number | null;
17369
17628
  probability?: number | null;
17629
+ chunkTokenLimit?: number | null;
17370
17630
  } | null;
17371
17631
  azureOpenAI?: {
17372
17632
  __typename?: 'AzureOpenAIModelProperties';
@@ -17378,6 +17638,7 @@ export type GetSpecificationQuery = {
17378
17638
  deploymentName?: string | null;
17379
17639
  temperature?: number | null;
17380
17640
  probability?: number | null;
17641
+ chunkTokenLimit?: number | null;
17381
17642
  } | null;
17382
17643
  cohere?: {
17383
17644
  __typename?: 'CohereModelProperties';
@@ -17388,6 +17649,7 @@ export type GetSpecificationQuery = {
17388
17649
  modelName?: string | null;
17389
17650
  temperature?: number | null;
17390
17651
  probability?: number | null;
17652
+ chunkTokenLimit?: number | null;
17391
17653
  } | null;
17392
17654
  anthropic?: {
17393
17655
  __typename?: 'AnthropicModelProperties';
@@ -17408,6 +17670,7 @@ export type GetSpecificationQuery = {
17408
17670
  modelName?: string | null;
17409
17671
  temperature?: number | null;
17410
17672
  probability?: number | null;
17673
+ chunkTokenLimit?: number | null;
17411
17674
  } | null;
17412
17675
  replicate?: {
17413
17676
  __typename?: 'ReplicateModelProperties';
@@ -17429,6 +17692,7 @@ export type GetSpecificationQuery = {
17429
17692
  endpoint?: any | null;
17430
17693
  temperature?: number | null;
17431
17694
  probability?: number | null;
17695
+ chunkTokenLimit?: number | null;
17432
17696
  } | null;
17433
17697
  groq?: {
17434
17698
  __typename?: 'GroqModelProperties';
@@ -17462,12 +17726,20 @@ export type GetSpecificationQuery = {
17462
17726
  temperature?: number | null;
17463
17727
  probability?: number | null;
17464
17728
  } | null;
17465
- tools?: Array<{
17466
- __typename?: 'ToolDefinition';
17467
- name: string;
17468
- description?: string | null;
17469
- schema: string;
17470
- }> | null;
17729
+ jina?: {
17730
+ __typename?: 'JinaModelProperties';
17731
+ model: JinaModels;
17732
+ key?: string | null;
17733
+ modelName?: string | null;
17734
+ chunkTokenLimit?: number | null;
17735
+ } | null;
17736
+ voyage?: {
17737
+ __typename?: 'VoyageModelProperties';
17738
+ model: VoyageModels;
17739
+ key?: string | null;
17740
+ modelName?: string | null;
17741
+ chunkTokenLimit?: number | null;
17742
+ } | null;
17471
17743
  } | null;
17472
17744
  };
17473
17745
  export type PromptSpecificationsMutationVariables = Exact<{
@@ -17682,6 +17954,7 @@ export type QuerySpecificationsQuery = {
17682
17954
  endpoint: any;
17683
17955
  temperature?: number | null;
17684
17956
  probability?: number | null;
17957
+ chunkTokenLimit?: number | null;
17685
17958
  } | null;
17686
17959
  openAI?: {
17687
17960
  __typename?: 'OpenAIModelProperties';
@@ -17692,6 +17965,7 @@ export type QuerySpecificationsQuery = {
17692
17965
  modelName?: string | null;
17693
17966
  temperature?: number | null;
17694
17967
  probability?: number | null;
17968
+ chunkTokenLimit?: number | null;
17695
17969
  } | null;
17696
17970
  azureOpenAI?: {
17697
17971
  __typename?: 'AzureOpenAIModelProperties';
@@ -17703,6 +17977,7 @@ export type QuerySpecificationsQuery = {
17703
17977
  deploymentName?: string | null;
17704
17978
  temperature?: number | null;
17705
17979
  probability?: number | null;
17980
+ chunkTokenLimit?: number | null;
17706
17981
  } | null;
17707
17982
  cohere?: {
17708
17983
  __typename?: 'CohereModelProperties';
@@ -17713,6 +17988,7 @@ export type QuerySpecificationsQuery = {
17713
17988
  modelName?: string | null;
17714
17989
  temperature?: number | null;
17715
17990
  probability?: number | null;
17991
+ chunkTokenLimit?: number | null;
17716
17992
  } | null;
17717
17993
  anthropic?: {
17718
17994
  __typename?: 'AnthropicModelProperties';
@@ -17733,6 +18009,7 @@ export type QuerySpecificationsQuery = {
17733
18009
  modelName?: string | null;
17734
18010
  temperature?: number | null;
17735
18011
  probability?: number | null;
18012
+ chunkTokenLimit?: number | null;
17736
18013
  } | null;
17737
18014
  replicate?: {
17738
18015
  __typename?: 'ReplicateModelProperties';
@@ -17754,6 +18031,7 @@ export type QuerySpecificationsQuery = {
17754
18031
  endpoint?: any | null;
17755
18032
  temperature?: number | null;
17756
18033
  probability?: number | null;
18034
+ chunkTokenLimit?: number | null;
17757
18035
  } | null;
17758
18036
  groq?: {
17759
18037
  __typename?: 'GroqModelProperties';
@@ -17787,12 +18065,20 @@ export type QuerySpecificationsQuery = {
17787
18065
  temperature?: number | null;
17788
18066
  probability?: number | null;
17789
18067
  } | null;
17790
- tools?: Array<{
17791
- __typename?: 'ToolDefinition';
17792
- name: string;
17793
- description?: string | null;
17794
- schema: string;
17795
- }> | null;
18068
+ jina?: {
18069
+ __typename?: 'JinaModelProperties';
18070
+ model: JinaModels;
18071
+ key?: string | null;
18072
+ modelName?: string | null;
18073
+ chunkTokenLimit?: number | null;
18074
+ } | null;
18075
+ voyage?: {
18076
+ __typename?: 'VoyageModelProperties';
18077
+ model: VoyageModels;
18078
+ key?: string | null;
18079
+ modelName?: string | null;
18080
+ chunkTokenLimit?: number | null;
18081
+ } | null;
17796
18082
  } | null> | null;
17797
18083
  } | null;
17798
18084
  };
@@ -17928,12 +18214,6 @@ export type CreateWorkflowMutation = {
17928
18214
  __typename?: 'AzureImageExtractionProperties';
17929
18215
  confidenceThreshold?: number | null;
17930
18216
  } | null;
17931
- openAIImage?: {
17932
- __typename?: 'OpenAIImageExtractionProperties';
17933
- confidenceThreshold?: number | null;
17934
- detailLevel?: OpenAiVisionDetailLevels | null;
17935
- customInstructions?: string | null;
17936
- } | null;
17937
18217
  modelImage?: {
17938
18218
  __typename?: 'ModelImageExtractionProperties';
17939
18219
  specification?: {
@@ -17980,13 +18260,6 @@ export type CreateWorkflowMutation = {
17980
18260
  } | null;
17981
18261
  } | null> | null;
17982
18262
  } | null;
17983
- storage?: {
17984
- __typename?: 'StorageWorkflowStage';
17985
- embeddings?: {
17986
- __typename?: 'EmbeddingsStrategy';
17987
- chunkTokenLimit?: number | null;
17988
- } | null;
17989
- } | null;
17990
18263
  actions?: Array<{
17991
18264
  __typename?: 'WorkflowAction';
17992
18265
  connector?: {
@@ -18152,12 +18425,6 @@ export type GetWorkflowQuery = {
18152
18425
  __typename?: 'AzureImageExtractionProperties';
18153
18426
  confidenceThreshold?: number | null;
18154
18427
  } | null;
18155
- openAIImage?: {
18156
- __typename?: 'OpenAIImageExtractionProperties';
18157
- confidenceThreshold?: number | null;
18158
- detailLevel?: OpenAiVisionDetailLevels | null;
18159
- customInstructions?: string | null;
18160
- } | null;
18161
18428
  modelImage?: {
18162
18429
  __typename?: 'ModelImageExtractionProperties';
18163
18430
  specification?: {
@@ -18204,13 +18471,6 @@ export type GetWorkflowQuery = {
18204
18471
  } | null;
18205
18472
  } | null> | null;
18206
18473
  } | null;
18207
- storage?: {
18208
- __typename?: 'StorageWorkflowStage';
18209
- embeddings?: {
18210
- __typename?: 'EmbeddingsStrategy';
18211
- chunkTokenLimit?: number | null;
18212
- } | null;
18213
- } | null;
18214
18474
  actions?: Array<{
18215
18475
  __typename?: 'WorkflowAction';
18216
18476
  connector?: {
@@ -18342,12 +18602,6 @@ export type QueryWorkflowsQuery = {
18342
18602
  __typename?: 'AzureImageExtractionProperties';
18343
18603
  confidenceThreshold?: number | null;
18344
18604
  } | null;
18345
- openAIImage?: {
18346
- __typename?: 'OpenAIImageExtractionProperties';
18347
- confidenceThreshold?: number | null;
18348
- detailLevel?: OpenAiVisionDetailLevels | null;
18349
- customInstructions?: string | null;
18350
- } | null;
18351
18605
  modelImage?: {
18352
18606
  __typename?: 'ModelImageExtractionProperties';
18353
18607
  specification?: {
@@ -18394,13 +18648,6 @@ export type QueryWorkflowsQuery = {
18394
18648
  } | null;
18395
18649
  } | null> | null;
18396
18650
  } | null;
18397
- storage?: {
18398
- __typename?: 'StorageWorkflowStage';
18399
- embeddings?: {
18400
- __typename?: 'EmbeddingsStrategy';
18401
- chunkTokenLimit?: number | null;
18402
- } | null;
18403
- } | null;
18404
18651
  actions?: Array<{
18405
18652
  __typename?: 'WorkflowAction';
18406
18653
  connector?: {
@@ -18525,12 +18772,6 @@ export type UpdateWorkflowMutation = {
18525
18772
  __typename?: 'AzureImageExtractionProperties';
18526
18773
  confidenceThreshold?: number | null;
18527
18774
  } | null;
18528
- openAIImage?: {
18529
- __typename?: 'OpenAIImageExtractionProperties';
18530
- confidenceThreshold?: number | null;
18531
- detailLevel?: OpenAiVisionDetailLevels | null;
18532
- customInstructions?: string | null;
18533
- } | null;
18534
18775
  modelImage?: {
18535
18776
  __typename?: 'ModelImageExtractionProperties';
18536
18777
  specification?: {
@@ -18577,13 +18818,6 @@ export type UpdateWorkflowMutation = {
18577
18818
  } | null;
18578
18819
  } | null> | null;
18579
18820
  } | null;
18580
- storage?: {
18581
- __typename?: 'StorageWorkflowStage';
18582
- embeddings?: {
18583
- __typename?: 'EmbeddingsStrategy';
18584
- chunkTokenLimit?: number | null;
18585
- } | null;
18586
- } | null;
18587
18821
  actions?: Array<{
18588
18822
  __typename?: 'WorkflowAction';
18589
18823
  connector?: {