groundx 2.6.0__py3-none-any.whl → 2.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of groundx might be problematic. Click here for more details.

Files changed (38) hide show
  1. groundx/__init__.py +14 -18
  2. groundx/client.py +3 -3
  3. groundx/core/client_wrapper.py +2 -2
  4. groundx/extract/classes/document.py +33 -16
  5. groundx/extract/classes/groundx.py +37 -18
  6. groundx/extract/services/logging_cfg.py +0 -2
  7. groundx/extract/services/upload.py +1 -6
  8. groundx/extract/services/upload_s3.py +10 -3
  9. groundx/extract/settings/settings.py +51 -9
  10. groundx/extract/settings/test_settings.py +0 -3
  11. groundx/ingest.py +100 -37
  12. groundx/types/__init__.py +10 -14
  13. groundx/types/workflow_detail.py +4 -0
  14. groundx/types/workflow_detail_chunk_strategy.py +5 -0
  15. groundx/types/workflow_prompt.py +1 -3
  16. groundx/types/workflow_prompt_role.py +1 -1
  17. groundx/types/{workflow_steps_doc_summary.py → workflow_request.py} +12 -4
  18. groundx/types/workflow_request_chunk_strategy.py +5 -0
  19. groundx/types/workflow_step.py +11 -4
  20. groundx/types/workflow_step_config.py +33 -0
  21. groundx/types/workflow_step_config_field.py +8 -0
  22. groundx/types/workflow_steps.py +12 -24
  23. groundx/{workflow → workflows}/__init__.py +2 -2
  24. groundx/{workflow → workflows}/client.py +67 -74
  25. groundx/{workflow → workflows}/raw_client.py +30 -23
  26. groundx/workflows/types/__init__.py +7 -0
  27. groundx/{workflow/types/workflow_get_request_id.py → workflows/types/workflows_get_request_id.py} +1 -1
  28. {groundx-2.6.0.dist-info → groundx-2.7.8.dist-info}/METADATA +1 -1
  29. {groundx-2.6.0.dist-info → groundx-2.7.8.dist-info}/RECORD +31 -33
  30. groundx/types/workflow_steps_chunk_instruct.py +0 -24
  31. groundx/types/workflow_steps_chunk_summary.py +0 -26
  32. groundx/types/workflow_steps_doc_keys.py +0 -22
  33. groundx/types/workflow_steps_search_query.py +0 -22
  34. groundx/types/workflow_steps_sect_instruct.py +0 -20
  35. groundx/types/workflow_steps_sect_summary.py +0 -23
  36. groundx/workflow/types/__init__.py +0 -7
  37. {groundx-2.6.0.dist-info → groundx-2.7.8.dist-info}/LICENSE +0 -0
  38. {groundx-2.6.0.dist-info → groundx-2.7.8.dist-info}/WHEEL +0 -0
groundx/ingest.py CHANGED
@@ -57,6 +57,7 @@ MAX_BATCH_SIZE = 50
57
57
  MIN_BATCH_SIZE = 1
58
58
  MAX_BATCH_SIZE_BYTES = 50 * 1024 * 1024
59
59
 
60
+
60
61
  def get_presigned_url(
61
62
  endpoint: str,
62
63
  file_name: str,
@@ -68,6 +69,7 @@ def get_presigned_url(
68
69
 
69
70
  return response.json()
70
71
 
72
+
71
73
  def strip_query_params(
72
74
  url: str,
73
75
  ) -> str:
@@ -76,6 +78,7 @@ def strip_query_params(
76
78
 
77
79
  return clean_url
78
80
 
81
+
79
82
  def prep_documents(
80
83
  documents: typing.Sequence[Document],
81
84
  ) -> typing.Tuple[
@@ -127,16 +130,16 @@ def prep_documents(
127
130
 
128
131
  def split_doc(file: Path) -> typing.List[Path]:
129
132
  if file.is_file() and (
130
- file.suffix.lower() in ALLOWED_SUFFIXES
131
- or file.suffix.lower() in SUFFIX_ALIASES
133
+ file.suffix.lower() in ALLOWED_SUFFIXES or file.suffix.lower() in SUFFIX_ALIASES
132
134
  ):
133
135
  if file.suffix.lower() in CSV_SPLITS:
134
136
  return CSVSplitter(filepath=file).split()
135
137
  elif file.suffix.lower() in TSV_SPLITS:
136
- return CSVSplitter(filepath=file, delimiter='\t').split()
138
+ return CSVSplitter(filepath=file, delimiter="\t").split()
137
139
  return [file]
138
140
  return []
139
141
 
142
+
140
143
  class GroundX(GroundXBase):
141
144
  def ingest(
142
145
  self,
@@ -207,11 +210,19 @@ class GroundX(GroundXBase):
207
210
  raise ValueError("No valid documents were provided")
208
211
 
209
212
  if wait_for_complete:
210
- with tqdm(total=len(remote_documents) + len(local_documents), desc="Ingesting Files", unit="file") as pbar:
211
- n = max(MIN_BATCH_SIZE, min(batch_size or MIN_BATCH_SIZE, MAX_BATCH_SIZE))
213
+ with tqdm(
214
+ total=len(remote_documents) + len(local_documents),
215
+ desc="Ingesting Files",
216
+ unit="file",
217
+ ) as pbar:
218
+ n = max(
219
+ MIN_BATCH_SIZE, min(batch_size or MIN_BATCH_SIZE, MAX_BATCH_SIZE)
220
+ )
212
221
 
213
222
  remote_batch: typing.List[IngestRemoteDocument] = []
214
- ingest = IngestResponse(ingest=IngestStatus(process_id="",status="queued"))
223
+ ingest = IngestResponse(
224
+ ingest=IngestStatus(process_id="", status="queued")
225
+ )
215
226
 
216
227
  progress = float(len(remote_documents))
217
228
  for rd in remote_documents:
@@ -239,7 +250,6 @@ class GroundX(GroundXBase):
239
250
  )
240
251
  ingest, progress = self._monitor_batch(ingest, progress, pbar)
241
252
 
242
-
243
253
  if progress > 0:
244
254
  pbar.update(progress)
245
255
 
@@ -251,8 +261,12 @@ class GroundX(GroundXBase):
251
261
  fp = Path(os.path.expanduser(ld.file_path))
252
262
  file_size = fp.stat().st_size
253
263
 
254
- if (current_batch_size + file_size > MAX_BATCH_SIZE_BYTES) or (len(local_batch) >= n):
255
- up_docs, progress = self._process_local(local_batch, upload_api, progress, pbar)
264
+ if (current_batch_size + file_size > MAX_BATCH_SIZE_BYTES) or (
265
+ len(local_batch) >= n
266
+ ):
267
+ up_docs, progress = self._process_local(
268
+ local_batch, upload_api, progress, pbar
269
+ )
256
270
 
257
271
  ingest = self.documents.ingest_remote(
258
272
  documents=up_docs,
@@ -269,7 +283,9 @@ class GroundX(GroundXBase):
269
283
  current_batch_size += file_size
270
284
 
271
285
  if local_batch:
272
- up_docs, progress = self._process_local(local_batch, upload_api, progress, pbar)
286
+ up_docs, progress = self._process_local(
287
+ local_batch, upload_api, progress, pbar
288
+ )
273
289
 
274
290
  ingest = self.documents.ingest_remote(
275
291
  documents=up_docs,
@@ -286,7 +302,6 @@ class GroundX(GroundXBase):
286
302
  elif len(remote_documents) + len(local_documents) > MAX_BATCH_SIZE:
287
303
  raise ValueError("You have sent too many documents in this request")
288
304
 
289
-
290
305
  up_docs, _ = self._process_local(local_documents, upload_api, 0, None)
291
306
  remote_documents.extend(up_docs)
292
307
 
@@ -360,9 +375,9 @@ class GroundX(GroundXBase):
360
375
  matched_files: typing.List[Path] = []
361
376
  for file in dir_path.rglob("*"):
362
377
  for sd in split_doc(file):
363
- matched_files.append(sd)
378
+ matched_files.append(sd)
364
379
 
365
- return matched_files
380
+ return matched_files
366
381
 
367
382
  if bucket_id < 1:
368
383
  raise ValueError(f"Invalid bucket_id: {bucket_id}")
@@ -384,8 +399,18 @@ class GroundX(GroundXBase):
384
399
  for file in files:
385
400
  file_size = file.stat().st_size
386
401
 
387
- if (current_batch_size + file_size > MAX_BATCH_SIZE_BYTES) or (len(current_batch) >= n):
388
- self._upload_file_batch(bucket_id, current_batch, upload_api, callback_url, callback_data, request_options, pbar)
402
+ if (current_batch_size + file_size > MAX_BATCH_SIZE_BYTES) or (
403
+ len(current_batch) >= n
404
+ ):
405
+ self._upload_file_batch(
406
+ bucket_id,
407
+ current_batch,
408
+ upload_api,
409
+ callback_url,
410
+ callback_data,
411
+ request_options,
412
+ pbar,
413
+ )
389
414
  current_batch = []
390
415
  current_batch_size = 0
391
416
 
@@ -393,7 +418,15 @@ class GroundX(GroundXBase):
393
418
  current_batch_size += file_size
394
419
 
395
420
  if current_batch:
396
- self._upload_file_batch(bucket_id, current_batch, upload_api, callback_url, callback_data, request_options, pbar)
421
+ self._upload_file_batch(
422
+ bucket_id,
423
+ current_batch,
424
+ upload_api,
425
+ callback_url,
426
+ callback_data,
427
+ request_options,
428
+ pbar,
429
+ )
397
430
 
398
431
  def _upload_file(
399
432
  self,
@@ -408,12 +441,13 @@ class GroundX(GroundXBase):
408
441
  presigned_info = get_presigned_url(endpoint, file_name, file_extension)
409
442
 
410
443
  upload_url = presigned_info["URL"]
411
- headers = presigned_info.get("Header", {})
444
+ hd = presigned_info.get("Header", {})
412
445
  method = presigned_info.get("Method", "PUT").upper()
413
446
 
414
- for key, value in headers.items():
447
+ headers: typing.Dict[str, typing.Any] = {}
448
+ for key, value in hd.items():
415
449
  if isinstance(value, list):
416
- headers[key] = value[0]
450
+ headers[key.upper()] = value[0]
417
451
 
418
452
  try:
419
453
  with open(file_path, "rb") as f:
@@ -431,6 +465,9 @@ class GroundX(GroundXBase):
431
465
  f"Upload failed: {upload_response.status_code} - {upload_response.text}"
432
466
  )
433
467
 
468
+ if "GX-HOSTED-URL" in headers:
469
+ return headers["GX-HOSTED-URL"]
470
+
434
471
  return strip_query_params(upload_url)
435
472
 
436
473
  def _process_local(
@@ -481,39 +518,62 @@ class GroundX(GroundXBase):
481
518
  ) -> typing.Tuple[IngestResponse, float]:
482
519
  completed_files: typing.Set[str] = set()
483
520
 
484
- while (
485
- ingest.ingest.status not in ["complete", "error", "cancelled"]
486
- ):
521
+ while ingest.ingest.status not in ["complete", "error", "cancelled"]:
487
522
  time.sleep(3)
488
- ingest = self.documents.get_processing_status_by_id(ingest.ingest.process_id)
523
+ ingest = self.documents.get_processing_status_by_id(
524
+ ingest.ingest.process_id
525
+ )
489
526
 
490
527
  if ingest.ingest.progress:
491
- if ingest.ingest.progress.processing and ingest.ingest.progress.processing.documents:
528
+ if (
529
+ ingest.ingest.progress.processing
530
+ and ingest.ingest.progress.processing.documents
531
+ ):
492
532
  for doc in ingest.ingest.progress.processing.documents:
493
- if doc.status in ["complete", "error", "cancelled"] and doc.document_id not in completed_files:
533
+ if (
534
+ doc.status in ["complete", "error", "cancelled"]
535
+ and doc.document_id not in completed_files
536
+ ):
494
537
  pbar.update(0.75)
495
538
  progress -= 0.75
496
539
  completed_files.add(doc.document_id)
497
- if ingest.ingest.progress.complete and ingest.ingest.progress.complete.documents:
540
+ if (
541
+ ingest.ingest.progress.complete
542
+ and ingest.ingest.progress.complete.documents
543
+ ):
498
544
  for doc in ingest.ingest.progress.complete.documents:
499
- if doc.status in ["complete", "error", "cancelled"] and doc.document_id not in completed_files:
545
+ if (
546
+ doc.status in ["complete", "error", "cancelled"]
547
+ and doc.document_id not in completed_files
548
+ ):
500
549
  pbar.update(0.75)
501
550
  progress -= 0.75
502
551
  completed_files.add(doc.document_id)
503
- if ingest.ingest.progress.cancelled and ingest.ingest.progress.cancelled.documents:
552
+ if (
553
+ ingest.ingest.progress.cancelled
554
+ and ingest.ingest.progress.cancelled.documents
555
+ ):
504
556
  for doc in ingest.ingest.progress.cancelled.documents:
505
- if doc.status in ["complete", "error", "cancelled"] and doc.document_id not in completed_files:
557
+ if (
558
+ doc.status in ["complete", "error", "cancelled"]
559
+ and doc.document_id not in completed_files
560
+ ):
506
561
  pbar.update(0.75)
507
562
  progress -= 0.75
508
563
  completed_files.add(doc.document_id)
509
- if ingest.ingest.progress.errors and ingest.ingest.progress.errors.documents:
564
+ if (
565
+ ingest.ingest.progress.errors
566
+ and ingest.ingest.progress.errors.documents
567
+ ):
510
568
  for doc in ingest.ingest.progress.errors.documents:
511
- if doc.status in ["complete", "error", "cancelled"] and doc.document_id not in completed_files:
569
+ if (
570
+ doc.status in ["complete", "error", "cancelled"]
571
+ and doc.document_id not in completed_files
572
+ ):
512
573
  pbar.update(0.75)
513
574
  progress -= 0.75
514
575
  completed_files.add(doc.document_id)
515
576
 
516
-
517
577
  if ingest.ingest.status in ["error", "cancelled"]:
518
578
  raise ValueError(f"Ingest failed with status: {ingest.ingest.status}")
519
579
 
@@ -531,7 +591,7 @@ class GroundX(GroundXBase):
531
591
  ) -> None:
532
592
  docs: typing.List[Document] = []
533
593
 
534
- progress = float(len(batch))
594
+ progress = float(len(batch))
535
595
  for file in batch:
536
596
  url = self._upload_file(upload_api, file)
537
597
  if file.suffix.lower() in SUFFIX_ALIASES:
@@ -567,7 +627,6 @@ class GroundX(GroundXBase):
567
627
  pbar.update(progress)
568
628
 
569
629
 
570
-
571
630
  class AsyncGroundX(AsyncGroundXBase):
572
631
  async def ingest(
573
632
  self,
@@ -682,12 +741,13 @@ class AsyncGroundX(AsyncGroundXBase):
682
741
  presigned_info = get_presigned_url(endpoint, file_name, file_extension)
683
742
 
684
743
  upload_url = presigned_info["URL"]
685
- headers = presigned_info.get("Header", {})
744
+ hd = presigned_info.get("Header", {})
686
745
  method = presigned_info.get("Method", "PUT").upper()
687
746
 
688
- for key, value in headers.items():
747
+ headers: typing.Dict[str, typing.Any] = {}
748
+ for key, value in hd.items():
689
749
  if isinstance(value, list):
690
- headers[key] = value[0]
750
+ headers[key.upper()] = value[0]
691
751
 
692
752
  try:
693
753
  with open(file_path, "rb") as f:
@@ -705,4 +765,7 @@ class AsyncGroundX(AsyncGroundXBase):
705
765
  f"Upload failed: {upload_response.status_code} - {upload_response.text}"
706
766
  )
707
767
 
768
+ if "GX-HOSTED-URL" in headers:
769
+ return headers["GX-HOSTED-URL"]
770
+
708
771
  return strip_query_params(upload_url)
groundx/types/__init__.py CHANGED
@@ -51,6 +51,7 @@ from .subscription_detail_meters import SubscriptionDetailMeters
51
51
  from .website_source import WebsiteSource
52
52
  from .workflow_apply_request import WorkflowApplyRequest
53
53
  from .workflow_detail import WorkflowDetail
54
+ from .workflow_detail_chunk_strategy import WorkflowDetailChunkStrategy
54
55
  from .workflow_detail_relationships import WorkflowDetailRelationships
55
56
  from .workflow_engine import WorkflowEngine
56
57
  from .workflow_engine_reasoning_effort import WorkflowEngineReasoningEffort
@@ -58,16 +59,13 @@ from .workflow_engine_service import WorkflowEngineService
58
59
  from .workflow_prompt import WorkflowPrompt
59
60
  from .workflow_prompt_group import WorkflowPromptGroup
60
61
  from .workflow_prompt_role import WorkflowPromptRole
62
+ from .workflow_request import WorkflowRequest
63
+ from .workflow_request_chunk_strategy import WorkflowRequestChunkStrategy
61
64
  from .workflow_response import WorkflowResponse
62
65
  from .workflow_step import WorkflowStep
66
+ from .workflow_step_config import WorkflowStepConfig
67
+ from .workflow_step_config_field import WorkflowStepConfigField
63
68
  from .workflow_steps import WorkflowSteps
64
- from .workflow_steps_chunk_instruct import WorkflowStepsChunkInstruct
65
- from .workflow_steps_chunk_summary import WorkflowStepsChunkSummary
66
- from .workflow_steps_doc_keys import WorkflowStepsDocKeys
67
- from .workflow_steps_doc_summary import WorkflowStepsDocSummary
68
- from .workflow_steps_search_query import WorkflowStepsSearchQuery
69
- from .workflow_steps_sect_instruct import WorkflowStepsSectInstruct
70
- from .workflow_steps_sect_summary import WorkflowStepsSectSummary
71
69
  from .workflows_response import WorkflowsResponse
72
70
 
73
71
  __all__ = [
@@ -120,6 +118,7 @@ __all__ = [
120
118
  "WebsiteSource",
121
119
  "WorkflowApplyRequest",
122
120
  "WorkflowDetail",
121
+ "WorkflowDetailChunkStrategy",
123
122
  "WorkflowDetailRelationships",
124
123
  "WorkflowEngine",
125
124
  "WorkflowEngineReasoningEffort",
@@ -127,15 +126,12 @@ __all__ = [
127
126
  "WorkflowPrompt",
128
127
  "WorkflowPromptGroup",
129
128
  "WorkflowPromptRole",
129
+ "WorkflowRequest",
130
+ "WorkflowRequestChunkStrategy",
130
131
  "WorkflowResponse",
131
132
  "WorkflowStep",
133
+ "WorkflowStepConfig",
134
+ "WorkflowStepConfigField",
132
135
  "WorkflowSteps",
133
- "WorkflowStepsChunkInstruct",
134
- "WorkflowStepsChunkSummary",
135
- "WorkflowStepsDocKeys",
136
- "WorkflowStepsDocSummary",
137
- "WorkflowStepsSearchQuery",
138
- "WorkflowStepsSectInstruct",
139
- "WorkflowStepsSectSummary",
140
136
  "WorkflowsResponse",
141
137
  ]
@@ -6,6 +6,7 @@ import pydantic
6
6
  import typing_extensions
7
7
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
  from ..core.serialization import FieldMetadata
9
+ from .workflow_detail_chunk_strategy import WorkflowDetailChunkStrategy
9
10
  from .workflow_detail_relationships import WorkflowDetailRelationships
10
11
  from .workflow_steps import WorkflowSteps
11
12
 
@@ -15,6 +16,9 @@ class WorkflowDetail(UniversalBaseModel):
15
16
  Workflow information
16
17
  """
17
18
 
19
+ chunk_strategy: typing_extensions.Annotated[
20
+ typing.Optional[WorkflowDetailChunkStrategy], FieldMetadata(alias="chunkStrategy")
21
+ ] = None
18
22
  document_id: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="documentId")] = pydantic.Field(
19
23
  default=None
20
24
  )
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ WorkflowDetailChunkStrategy = typing.Union[typing.Literal["element", "size"], typing.Any]
@@ -3,9 +3,7 @@
3
3
  import typing
4
4
 
5
5
  import pydantic
6
- import typing_extensions
7
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
- from ..core.serialization import FieldMetadata
9
7
  from .workflow_prompt_role import WorkflowPromptRole
10
8
 
11
9
 
@@ -19,7 +17,7 @@ class WorkflowPrompt(UniversalBaseModel):
19
17
  A short version of the prompt that is included in historical chat transcripts as part of the prompt context
20
18
  """
21
19
 
22
- long_: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="long")] = pydantic.Field(default=None)
20
+ prompt: typing.Optional[str] = pydantic.Field(default=None)
23
21
  """
24
22
  The prompt that is sent to the LLM
25
23
  """
@@ -2,4 +2,4 @@
2
2
 
3
3
  import typing
4
4
 
5
- WorkflowPromptRole = typing.Union[typing.Literal["assistant", "system", "user"], typing.Any]
5
+ WorkflowPromptRole = typing.Union[typing.Literal["assistant", "developer", "system", "user"], typing.Any]
@@ -6,12 +6,20 @@ import pydantic
6
6
  import typing_extensions
7
7
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
  from ..core.serialization import FieldMetadata
9
- from .workflow_step import WorkflowStep
9
+ from .workflow_request_chunk_strategy import WorkflowRequestChunkStrategy
10
+ from .workflow_steps import WorkflowSteps
10
11
 
11
12
 
12
- class WorkflowStepsDocSummary(UniversalBaseModel):
13
- all_: typing_extensions.Annotated[typing.Optional[WorkflowStep], FieldMetadata(alias="all")] = None
14
- json_: typing_extensions.Annotated[typing.Optional[WorkflowStep], FieldMetadata(alias="json")] = None
13
+ class WorkflowRequest(UniversalBaseModel):
14
+ chunk_strategy: typing_extensions.Annotated[
15
+ typing.Optional[WorkflowRequestChunkStrategy], FieldMetadata(alias="chunkStrategy")
16
+ ] = None
17
+ name: typing.Optional[str] = pydantic.Field(default=None)
18
+ """
19
+ The name of the workflow being created.
20
+ """
21
+
22
+ steps: typing.Optional[WorkflowSteps] = None
15
23
 
16
24
  if IS_PYDANTIC_V2:
17
25
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ WorkflowRequestChunkStrategy = typing.Union[typing.Literal["element", "size"], typing.Any]
@@ -3,9 +3,10 @@
3
3
  import typing
4
4
 
5
5
  import pydantic
6
+ import typing_extensions
6
7
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .workflow_engine import WorkflowEngine
8
- from .workflow_prompt_group import WorkflowPromptGroup
8
+ from ..core.serialization import FieldMetadata
9
+ from .workflow_step_config import WorkflowStepConfig
9
10
 
10
11
 
11
12
  class WorkflowStep(UniversalBaseModel):
@@ -13,8 +14,14 @@ class WorkflowStep(UniversalBaseModel):
13
14
  Configurations for an agent, including LLM information and prompts
14
15
  """
15
16
 
16
- engine: typing.Optional[WorkflowEngine] = None
17
- prompt: typing.Optional[WorkflowPromptGroup] = None
17
+ all_: typing_extensions.Annotated[typing.Optional[WorkflowStepConfig], FieldMetadata(alias="all")] = None
18
+ figure: typing.Optional[WorkflowStepConfig] = None
19
+ json_: typing_extensions.Annotated[typing.Optional[WorkflowStepConfig], FieldMetadata(alias="json")] = None
20
+ paragraph: typing.Optional[WorkflowStepConfig] = None
21
+ table: typing.Optional[WorkflowStepConfig] = None
22
+ table_figure: typing_extensions.Annotated[
23
+ typing.Optional[WorkflowStepConfig], FieldMetadata(alias="table-figure")
24
+ ] = None
18
25
 
19
26
  if IS_PYDANTIC_V2:
20
27
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .workflow_engine import WorkflowEngine
8
+ from .workflow_prompt_group import WorkflowPromptGroup
9
+ from .workflow_step_config_field import WorkflowStepConfigField
10
+
11
+
12
+ class WorkflowStepConfig(UniversalBaseModel):
13
+ """
14
+ Configurations for an agent, including LLM information and prompts
15
+ """
16
+
17
+ engine: typing.Optional[WorkflowEngine] = None
18
+ field: typing.Optional[WorkflowStepConfigField] = pydantic.Field(default=None)
19
+ """
20
+ The field where agent output will be saved
21
+ """
22
+
23
+ includes: typing.Optional[typing.Dict[str, bool]] = None
24
+ prompt: typing.Optional[WorkflowPromptGroup] = None
25
+
26
+ if IS_PYDANTIC_V2:
27
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
28
+ else:
29
+
30
+ class Config:
31
+ frozen = True
32
+ smart_union = True
33
+ extra = pydantic.Extra.allow
@@ -0,0 +1,8 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ WorkflowStepConfigField = typing.Union[
6
+ typing.Literal["doc-sum", "doc-keys", "sect-sum", "sect-keys", "chunk-sum", "chunk-keys", "chunk-instruct", "text"],
7
+ typing.Any,
8
+ ]
@@ -6,13 +6,7 @@ import pydantic
6
6
  import typing_extensions
7
7
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
  from ..core.serialization import FieldMetadata
9
- from .workflow_steps_chunk_instruct import WorkflowStepsChunkInstruct
10
- from .workflow_steps_chunk_summary import WorkflowStepsChunkSummary
11
- from .workflow_steps_doc_keys import WorkflowStepsDocKeys
12
- from .workflow_steps_doc_summary import WorkflowStepsDocSummary
13
- from .workflow_steps_search_query import WorkflowStepsSearchQuery
14
- from .workflow_steps_sect_instruct import WorkflowStepsSectInstruct
15
- from .workflow_steps_sect_summary import WorkflowStepsSectSummary
9
+ from .workflow_step import WorkflowStep
16
10
 
17
11
 
18
12
  class WorkflowSteps(UniversalBaseModel):
@@ -21,24 +15,18 @@ class WorkflowSteps(UniversalBaseModel):
21
15
  """
22
16
 
23
17
  chunk_instruct: typing_extensions.Annotated[
24
- typing.Optional[WorkflowStepsChunkInstruct], FieldMetadata(alias="chunk-instruct")
25
- ] = None
26
- chunk_summary: typing_extensions.Annotated[
27
- typing.Optional[WorkflowStepsChunkSummary], FieldMetadata(alias="chunk-summary")
28
- ] = None
29
- doc_keys: typing_extensions.Annotated[typing.Optional[WorkflowStepsDocKeys], FieldMetadata(alias="doc-keys")] = None
30
- doc_summary: typing_extensions.Annotated[
31
- typing.Optional[WorkflowStepsDocSummary], FieldMetadata(alias="doc-summary")
32
- ] = None
33
- search_query: typing_extensions.Annotated[
34
- typing.Optional[WorkflowStepsSearchQuery], FieldMetadata(alias="search-query")
35
- ] = None
36
- sect_instruct: typing_extensions.Annotated[
37
- typing.Optional[WorkflowStepsSectInstruct], FieldMetadata(alias="sect-instruct")
38
- ] = None
39
- sect_summary: typing_extensions.Annotated[
40
- typing.Optional[WorkflowStepsSectSummary], FieldMetadata(alias="sect-summary")
18
+ typing.Optional[WorkflowStep], FieldMetadata(alias="chunk-instruct")
41
19
  ] = None
20
+ chunk_summary: typing_extensions.Annotated[typing.Optional[WorkflowStep], FieldMetadata(alias="chunk-summary")] = (
21
+ None
22
+ )
23
+ doc_keys: typing_extensions.Annotated[typing.Optional[WorkflowStep], FieldMetadata(alias="doc-keys")] = None
24
+ doc_summary: typing_extensions.Annotated[typing.Optional[WorkflowStep], FieldMetadata(alias="doc-summary")] = None
25
+ search_query: typing_extensions.Annotated[typing.Optional[WorkflowStep], FieldMetadata(alias="search-query")] = None
26
+ sect_instruct: typing_extensions.Annotated[typing.Optional[WorkflowStep], FieldMetadata(alias="sect-instruct")] = (
27
+ None
28
+ )
29
+ sect_summary: typing_extensions.Annotated[typing.Optional[WorkflowStep], FieldMetadata(alias="sect-summary")] = None
42
30
 
43
31
  if IS_PYDANTIC_V2:
44
32
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -2,6 +2,6 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
- from .types import WorkflowGetRequestId
5
+ from .types import WorkflowsGetRequestId
6
6
 
7
- __all__ = ["WorkflowGetRequestId"]
7
+ __all__ = ["WorkflowsGetRequestId"]