groundx 2.0.15__py3-none-any.whl → 2.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. groundx/__init__.py +73 -21
  2. groundx/buckets/__init__.py +2 -0
  3. groundx/buckets/client.py +55 -388
  4. groundx/buckets/raw_client.py +628 -0
  5. groundx/client.py +22 -21
  6. groundx/core/__init__.py +5 -0
  7. groundx/core/api_error.py +13 -5
  8. groundx/core/client_wrapper.py +4 -3
  9. groundx/core/force_multipart.py +16 -0
  10. groundx/core/http_client.py +76 -32
  11. groundx/core/http_response.py +55 -0
  12. groundx/core/jsonable_encoder.py +0 -1
  13. groundx/core/pydantic_utilities.py +71 -112
  14. groundx/core/serialization.py +7 -3
  15. groundx/csv_splitter.py +64 -0
  16. groundx/customer/__init__.py +2 -0
  17. groundx/customer/client.py +31 -43
  18. groundx/customer/raw_client.py +91 -0
  19. groundx/documents/__init__.py +1 -2
  20. groundx/documents/client.py +455 -953
  21. groundx/documents/raw_client.py +1450 -0
  22. groundx/errors/__init__.py +2 -0
  23. groundx/errors/bad_request_error.py +4 -3
  24. groundx/errors/unauthorized_error.py +4 -3
  25. groundx/extract/__init__.py +48 -0
  26. groundx/extract/agents/__init__.py +7 -0
  27. groundx/extract/agents/agent.py +202 -0
  28. groundx/extract/classes/__init__.py +24 -0
  29. groundx/extract/classes/agent.py +23 -0
  30. groundx/extract/classes/api.py +15 -0
  31. groundx/extract/classes/document.py +338 -0
  32. groundx/extract/classes/field.py +88 -0
  33. groundx/extract/classes/groundx.py +147 -0
  34. groundx/extract/classes/prompt.py +36 -0
  35. groundx/extract/classes/test_document.py +109 -0
  36. groundx/extract/classes/test_field.py +43 -0
  37. groundx/extract/classes/test_groundx.py +223 -0
  38. groundx/extract/classes/test_prompt.py +68 -0
  39. groundx/extract/post_process/__init__.py +7 -0
  40. groundx/extract/post_process/post_process.py +33 -0
  41. groundx/extract/services/.DS_Store +0 -0
  42. groundx/extract/services/__init__.py +14 -0
  43. groundx/extract/services/csv.py +76 -0
  44. groundx/extract/services/logger.py +126 -0
  45. groundx/extract/services/logging_cfg.py +53 -0
  46. groundx/extract/services/ratelimit.py +104 -0
  47. groundx/extract/services/sheets_client.py +160 -0
  48. groundx/extract/services/status.py +197 -0
  49. groundx/extract/services/upload.py +68 -0
  50. groundx/extract/services/upload_minio.py +122 -0
  51. groundx/extract/services/upload_s3.py +91 -0
  52. groundx/extract/services/utility.py +52 -0
  53. groundx/extract/settings/__init__.py +15 -0
  54. groundx/extract/settings/settings.py +212 -0
  55. groundx/extract/settings/test_settings.py +512 -0
  56. groundx/extract/tasks/__init__.py +6 -0
  57. groundx/extract/tasks/utility.py +27 -0
  58. groundx/extract/utility/__init__.py +15 -0
  59. groundx/extract/utility/classes.py +193 -0
  60. groundx/extract/utility/test_utility.py +81 -0
  61. groundx/groups/__init__.py +2 -0
  62. groundx/groups/client.py +63 -550
  63. groundx/groups/raw_client.py +901 -0
  64. groundx/health/__init__.py +2 -0
  65. groundx/health/client.py +35 -101
  66. groundx/health/raw_client.py +193 -0
  67. groundx/ingest.py +771 -0
  68. groundx/search/__init__.py +2 -0
  69. groundx/search/client.py +94 -227
  70. groundx/search/raw_client.py +442 -0
  71. groundx/search/types/__init__.py +2 -0
  72. groundx/types/__init__.py +68 -16
  73. groundx/types/bounding_box_detail.py +4 -4
  74. groundx/types/bucket_detail.py +5 -5
  75. groundx/types/bucket_list_response.py +17 -3
  76. groundx/types/bucket_response.py +3 -3
  77. groundx/types/bucket_update_detail.py +4 -4
  78. groundx/types/bucket_update_response.py +3 -3
  79. groundx/types/customer_detail.py +2 -2
  80. groundx/types/customer_response.py +3 -3
  81. groundx/types/document.py +54 -0
  82. groundx/types/document_detail.py +16 -4
  83. groundx/types/document_list_response.py +4 -4
  84. groundx/types/document_local_ingest_request.py +7 -0
  85. groundx/types/document_lookup_response.py +8 -3
  86. groundx/types/document_response.py +3 -3
  87. groundx/types/document_type.py +21 -1
  88. groundx/types/group_detail.py +4 -4
  89. groundx/types/group_list_response.py +17 -3
  90. groundx/types/group_response.py +3 -3
  91. groundx/types/health_response.py +3 -3
  92. groundx/types/health_response_health.py +3 -3
  93. groundx/types/health_service.py +5 -5
  94. groundx/types/ingest_local_document.py +25 -0
  95. groundx/types/ingest_local_document_metadata.py +51 -0
  96. groundx/types/ingest_remote_document.py +15 -6
  97. groundx/types/ingest_response.py +4 -4
  98. groundx/types/{process_status_response_ingest.py → ingest_status.py} +8 -7
  99. groundx/types/{ingest_response_ingest.py → ingest_status_light.py} +7 -5
  100. groundx/types/ingest_status_progress.py +26 -0
  101. groundx/types/{process_status_response_ingest_progress_errors.py → ingest_status_progress_cancelled.py} +4 -4
  102. groundx/types/{process_status_response_ingest_progress_complete.py → ingest_status_progress_complete.py} +4 -4
  103. groundx/types/{process_status_response_ingest_progress_cancelled.py → ingest_status_progress_errors.py} +4 -4
  104. groundx/types/{process_status_response_ingest_progress_processing.py → ingest_status_progress_processing.py} +4 -4
  105. groundx/types/message_response.py +2 -2
  106. groundx/types/meter_detail.py +2 -2
  107. groundx/types/process_level.py +5 -0
  108. groundx/types/{process_status_response.py → processes_status_response.py} +8 -5
  109. groundx/types/processing_status.py +3 -1
  110. groundx/types/search_response.py +3 -3
  111. groundx/types/search_response_search.py +3 -3
  112. groundx/types/search_result_item.py +7 -5
  113. groundx/types/search_result_item_pages_item.py +41 -0
  114. groundx/types/subscription_detail.py +3 -3
  115. groundx/types/subscription_detail_meters.py +5 -5
  116. groundx/{documents/types/website_crawl_request_websites_item.py → types/website_source.py} +7 -7
  117. groundx/types/workflow_apply_request.py +24 -0
  118. groundx/types/workflow_detail.py +59 -0
  119. groundx/types/workflow_detail_chunk_strategy.py +5 -0
  120. groundx/types/workflow_detail_relationships.py +36 -0
  121. groundx/types/workflow_engine.py +58 -0
  122. groundx/types/workflow_engine_reasoning_effort.py +5 -0
  123. groundx/types/workflow_engine_service.py +7 -0
  124. groundx/types/workflow_prompt.py +37 -0
  125. groundx/types/workflow_prompt_group.py +25 -0
  126. groundx/types/workflow_prompt_role.py +5 -0
  127. groundx/types/workflow_request.py +31 -0
  128. groundx/types/workflow_request_chunk_strategy.py +5 -0
  129. groundx/types/workflow_response.py +20 -0
  130. groundx/types/workflow_step.py +33 -0
  131. groundx/types/workflow_step_config.py +33 -0
  132. groundx/types/workflow_step_config_field.py +8 -0
  133. groundx/types/workflow_steps.py +38 -0
  134. groundx/types/workflows_response.py +20 -0
  135. groundx/workflows/__init__.py +7 -0
  136. groundx/workflows/client.py +736 -0
  137. groundx/workflows/raw_client.py +841 -0
  138. groundx/workflows/types/__init__.py +7 -0
  139. groundx/workflows/types/workflows_get_request_id.py +5 -0
  140. {groundx-2.0.15.dist-info → groundx-2.7.7.dist-info}/LICENSE +1 -1
  141. {groundx-2.0.15.dist-info → groundx-2.7.7.dist-info}/METADATA +39 -22
  142. groundx-2.7.7.dist-info/RECORD +155 -0
  143. groundx/documents/types/__init__.py +0 -6
  144. groundx/documents/types/documents_ingest_local_request_files_item.py +0 -43
  145. groundx/types/process_status_response_ingest_progress.py +0 -26
  146. groundx-2.0.15.dist-info/RECORD +0 -82
  147. {groundx-2.0.15.dist-info → groundx-2.7.7.dist-info}/WHEEL +0 -0
@@ -0,0 +1,442 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ from json.decoder import JSONDecodeError
5
+
6
+ from ..core.api_error import ApiError
7
+ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
+ from ..core.http_response import AsyncHttpResponse, HttpResponse
9
+ from ..core.jsonable_encoder import jsonable_encoder
10
+ from ..core.pydantic_utilities import parse_obj_as
11
+ from ..core.request_options import RequestOptions
12
+ from ..errors.bad_request_error import BadRequestError
13
+ from ..errors.unauthorized_error import UnauthorizedError
14
+ from ..types.search_response import SearchResponse
15
+ from .types.search_content_request_id import SearchContentRequestId
16
+
17
+ # this is used as the default value for optional parameters
18
+ OMIT = typing.cast(typing.Any, ...)
19
+
20
+
21
+ class RawSearchClient:
22
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
23
+ self._client_wrapper = client_wrapper
24
+
25
+ def content(
26
+ self,
27
+ id: SearchContentRequestId,
28
+ *,
29
+ query: str,
30
+ n: typing.Optional[int] = None,
31
+ next_token: typing.Optional[str] = None,
32
+ verbosity: typing.Optional[int] = None,
33
+ filter: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
34
+ relevance: typing.Optional[float] = OMIT,
35
+ request_options: typing.Optional[RequestOptions] = None,
36
+ ) -> HttpResponse[SearchResponse]:
37
+ """
38
+ Search documents on GroundX for the most relevant information to a given query.
39
+ The result of this query is typically used in one of two ways; `result.search.text` can be used to provide context to a language model, facilitating RAG, or `result.search.results` can be used to observe chunks of text which are relevant to the query, facilitating citation.
40
+
41
+ Parameters
42
+ ----------
43
+ id : SearchContentRequestId
44
+ The bucketId, groupId, or documentId to be searched. The document or documents within the specified container will be compared to the query, and relevant information will be extracted.
45
+
46
+ query : str
47
+ The search query to be used to find relevant documentation.
48
+
49
+ n : typing.Optional[int]
50
+ The maximum number of returned search results. Accepts 1-100 with a default of 20.
51
+
52
+ next_token : typing.Optional[str]
53
+ A token for pagination. If the number of search results for a given query is larger than n, the response will include a "nextToken" value. That token can be included in this field to retrieve the next batch of n search results.
54
+
55
+ verbosity : typing.Optional[int]
56
+ The amount of data returned with each search result. 0 == no search results, only the recommended context. 1 == search results but no searchData. 2 == search results and searchData.
57
+
58
+ filter : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
59
+ A dictionary of key-value pairs that can be used to pre-filter documents prior to a search.
60
+
61
+ relevance : typing.Optional[float]
62
+ The minimum search relevance score required to include the result. By default, this is 10.0.
63
+
64
+ request_options : typing.Optional[RequestOptions]
65
+ Request-specific configuration.
66
+
67
+ Returns
68
+ -------
69
+ HttpResponse[SearchResponse]
70
+ Search query success
71
+ """
72
+ _response = self._client_wrapper.httpx_client.request(
73
+ f"v1/search/{jsonable_encoder(id)}",
74
+ method="POST",
75
+ params={
76
+ "n": n,
77
+ "nextToken": next_token,
78
+ "verbosity": verbosity,
79
+ },
80
+ json={
81
+ "query": query,
82
+ "filter": filter,
83
+ "relevance": relevance,
84
+ },
85
+ headers={
86
+ "content-type": "application/json",
87
+ },
88
+ request_options=request_options,
89
+ omit=OMIT,
90
+ )
91
+ try:
92
+ if 200 <= _response.status_code < 300:
93
+ _data = typing.cast(
94
+ SearchResponse,
95
+ parse_obj_as(
96
+ type_=SearchResponse, # type: ignore
97
+ object_=_response.json(),
98
+ ),
99
+ )
100
+ return HttpResponse(response=_response, data=_data)
101
+ if _response.status_code == 400:
102
+ raise BadRequestError(
103
+ headers=dict(_response.headers),
104
+ body=typing.cast(
105
+ typing.Optional[typing.Any],
106
+ parse_obj_as(
107
+ type_=typing.Optional[typing.Any], # type: ignore
108
+ object_=_response.json(),
109
+ ),
110
+ ),
111
+ )
112
+ if _response.status_code == 401:
113
+ raise UnauthorizedError(
114
+ headers=dict(_response.headers),
115
+ body=typing.cast(
116
+ typing.Optional[typing.Any],
117
+ parse_obj_as(
118
+ type_=typing.Optional[typing.Any], # type: ignore
119
+ object_=_response.json(),
120
+ ),
121
+ ),
122
+ )
123
+ _response_json = _response.json()
124
+ except JSONDecodeError:
125
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
126
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
127
+
128
+ def documents(
129
+ self,
130
+ *,
131
+ query: str,
132
+ document_ids: typing.Sequence[str],
133
+ n: typing.Optional[int] = None,
134
+ next_token: typing.Optional[str] = None,
135
+ verbosity: typing.Optional[int] = None,
136
+ filter: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
137
+ relevance: typing.Optional[float] = OMIT,
138
+ request_options: typing.Optional[RequestOptions] = None,
139
+ ) -> HttpResponse[SearchResponse]:
140
+ """
141
+ Search documents on GroundX for the most relevant information to a given query by documentId(s).
142
+ The result of this query is typically used in one of two ways; `result.search.text` can be used to provide context to a language model, facilitating RAG, or `result.search.results` can be used to observe chunks of text which are relevant to the query, facilitating citation.
143
+
144
+ Parameters
145
+ ----------
146
+ query : str
147
+ The search query to be used to find relevant documentation.
148
+
149
+ document_ids : typing.Sequence[str]
150
+ An array of unique documentIds to be searched.
151
+
152
+ n : typing.Optional[int]
153
+ The maximum number of returned search results. Accepts 1-100 with a default of 20.
154
+
155
+ next_token : typing.Optional[str]
156
+ A token for pagination. If the number of search results for a given query is larger than n, the response will include a "nextToken" value. That token can be included in this field to retrieve the next batch of n search results.
157
+
158
+ verbosity : typing.Optional[int]
159
+ The amount of data returned with each search result. 0 == no search results, only the recommended context. 1 == search results but no searchData. 2 == search results and searchData.
160
+
161
+ filter : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
162
+ A dictionary of key-value pairs that can be used to pre-filter documents prior to a search.
163
+
164
+ relevance : typing.Optional[float]
165
+ The minimum search relevance score required to include the result. By default, this is 10.0.
166
+
167
+ request_options : typing.Optional[RequestOptions]
168
+ Request-specific configuration.
169
+
170
+ Returns
171
+ -------
172
+ HttpResponse[SearchResponse]
173
+ Search query success
174
+ """
175
+ _response = self._client_wrapper.httpx_client.request(
176
+ "v1/search/documents",
177
+ method="POST",
178
+ params={
179
+ "n": n,
180
+ "nextToken": next_token,
181
+ "verbosity": verbosity,
182
+ },
183
+ json={
184
+ "query": query,
185
+ "documentIds": document_ids,
186
+ "filter": filter,
187
+ "relevance": relevance,
188
+ },
189
+ headers={
190
+ "content-type": "application/json",
191
+ },
192
+ request_options=request_options,
193
+ omit=OMIT,
194
+ )
195
+ try:
196
+ if 200 <= _response.status_code < 300:
197
+ _data = typing.cast(
198
+ SearchResponse,
199
+ parse_obj_as(
200
+ type_=SearchResponse, # type: ignore
201
+ object_=_response.json(),
202
+ ),
203
+ )
204
+ return HttpResponse(response=_response, data=_data)
205
+ if _response.status_code == 400:
206
+ raise BadRequestError(
207
+ headers=dict(_response.headers),
208
+ body=typing.cast(
209
+ typing.Optional[typing.Any],
210
+ parse_obj_as(
211
+ type_=typing.Optional[typing.Any], # type: ignore
212
+ object_=_response.json(),
213
+ ),
214
+ ),
215
+ )
216
+ if _response.status_code == 401:
217
+ raise UnauthorizedError(
218
+ headers=dict(_response.headers),
219
+ body=typing.cast(
220
+ typing.Optional[typing.Any],
221
+ parse_obj_as(
222
+ type_=typing.Optional[typing.Any], # type: ignore
223
+ object_=_response.json(),
224
+ ),
225
+ ),
226
+ )
227
+ _response_json = _response.json()
228
+ except JSONDecodeError:
229
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
230
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
231
+
232
+
233
+ class AsyncRawSearchClient:
234
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
235
+ self._client_wrapper = client_wrapper
236
+
237
+ async def content(
238
+ self,
239
+ id: SearchContentRequestId,
240
+ *,
241
+ query: str,
242
+ n: typing.Optional[int] = None,
243
+ next_token: typing.Optional[str] = None,
244
+ verbosity: typing.Optional[int] = None,
245
+ filter: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
246
+ relevance: typing.Optional[float] = OMIT,
247
+ request_options: typing.Optional[RequestOptions] = None,
248
+ ) -> AsyncHttpResponse[SearchResponse]:
249
+ """
250
+ Search documents on GroundX for the most relevant information to a given query.
251
+ The result of this query is typically used in one of two ways; `result.search.text` can be used to provide context to a language model, facilitating RAG, or `result.search.results` can be used to observe chunks of text which are relevant to the query, facilitating citation.
252
+
253
+ Parameters
254
+ ----------
255
+ id : SearchContentRequestId
256
+ The bucketId, groupId, or documentId to be searched. The document or documents within the specified container will be compared to the query, and relevant information will be extracted.
257
+
258
+ query : str
259
+ The search query to be used to find relevant documentation.
260
+
261
+ n : typing.Optional[int]
262
+ The maximum number of returned search results. Accepts 1-100 with a default of 20.
263
+
264
+ next_token : typing.Optional[str]
265
+ A token for pagination. If the number of search results for a given query is larger than n, the response will include a "nextToken" value. That token can be included in this field to retrieve the next batch of n search results.
266
+
267
+ verbosity : typing.Optional[int]
268
+ The amount of data returned with each search result. 0 == no search results, only the recommended context. 1 == search results but no searchData. 2 == search results and searchData.
269
+
270
+ filter : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
271
+ A dictionary of key-value pairs that can be used to pre-filter documents prior to a search.
272
+
273
+ relevance : typing.Optional[float]
274
+ The minimum search relevance score required to include the result. By default, this is 10.0.
275
+
276
+ request_options : typing.Optional[RequestOptions]
277
+ Request-specific configuration.
278
+
279
+ Returns
280
+ -------
281
+ AsyncHttpResponse[SearchResponse]
282
+ Search query success
283
+ """
284
+ _response = await self._client_wrapper.httpx_client.request(
285
+ f"v1/search/{jsonable_encoder(id)}",
286
+ method="POST",
287
+ params={
288
+ "n": n,
289
+ "nextToken": next_token,
290
+ "verbosity": verbosity,
291
+ },
292
+ json={
293
+ "query": query,
294
+ "filter": filter,
295
+ "relevance": relevance,
296
+ },
297
+ headers={
298
+ "content-type": "application/json",
299
+ },
300
+ request_options=request_options,
301
+ omit=OMIT,
302
+ )
303
+ try:
304
+ if 200 <= _response.status_code < 300:
305
+ _data = typing.cast(
306
+ SearchResponse,
307
+ parse_obj_as(
308
+ type_=SearchResponse, # type: ignore
309
+ object_=_response.json(),
310
+ ),
311
+ )
312
+ return AsyncHttpResponse(response=_response, data=_data)
313
+ if _response.status_code == 400:
314
+ raise BadRequestError(
315
+ headers=dict(_response.headers),
316
+ body=typing.cast(
317
+ typing.Optional[typing.Any],
318
+ parse_obj_as(
319
+ type_=typing.Optional[typing.Any], # type: ignore
320
+ object_=_response.json(),
321
+ ),
322
+ ),
323
+ )
324
+ if _response.status_code == 401:
325
+ raise UnauthorizedError(
326
+ headers=dict(_response.headers),
327
+ body=typing.cast(
328
+ typing.Optional[typing.Any],
329
+ parse_obj_as(
330
+ type_=typing.Optional[typing.Any], # type: ignore
331
+ object_=_response.json(),
332
+ ),
333
+ ),
334
+ )
335
+ _response_json = _response.json()
336
+ except JSONDecodeError:
337
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
338
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
339
+
340
+ async def documents(
341
+ self,
342
+ *,
343
+ query: str,
344
+ document_ids: typing.Sequence[str],
345
+ n: typing.Optional[int] = None,
346
+ next_token: typing.Optional[str] = None,
347
+ verbosity: typing.Optional[int] = None,
348
+ filter: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
349
+ relevance: typing.Optional[float] = OMIT,
350
+ request_options: typing.Optional[RequestOptions] = None,
351
+ ) -> AsyncHttpResponse[SearchResponse]:
352
+ """
353
+ Search documents on GroundX for the most relevant information to a given query by documentId(s).
354
+ The result of this query is typically used in one of two ways; `result.search.text` can be used to provide context to a language model, facilitating RAG, or `result.search.results` can be used to observe chunks of text which are relevant to the query, facilitating citation.
355
+
356
+ Parameters
357
+ ----------
358
+ query : str
359
+ The search query to be used to find relevant documentation.
360
+
361
+ document_ids : typing.Sequence[str]
362
+ An array of unique documentIds to be searched.
363
+
364
+ n : typing.Optional[int]
365
+ The maximum number of returned search results. Accepts 1-100 with a default of 20.
366
+
367
+ next_token : typing.Optional[str]
368
+ A token for pagination. If the number of search results for a given query is larger than n, the response will include a "nextToken" value. That token can be included in this field to retrieve the next batch of n search results.
369
+
370
+ verbosity : typing.Optional[int]
371
+ The amount of data returned with each search result. 0 == no search results, only the recommended context. 1 == search results but no searchData. 2 == search results and searchData.
372
+
373
+ filter : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
374
+ A dictionary of key-value pairs that can be used to pre-filter documents prior to a search.
375
+
376
+ relevance : typing.Optional[float]
377
+ The minimum search relevance score required to include the result. By default, this is 10.0.
378
+
379
+ request_options : typing.Optional[RequestOptions]
380
+ Request-specific configuration.
381
+
382
+ Returns
383
+ -------
384
+ AsyncHttpResponse[SearchResponse]
385
+ Search query success
386
+ """
387
+ _response = await self._client_wrapper.httpx_client.request(
388
+ "v1/search/documents",
389
+ method="POST",
390
+ params={
391
+ "n": n,
392
+ "nextToken": next_token,
393
+ "verbosity": verbosity,
394
+ },
395
+ json={
396
+ "query": query,
397
+ "documentIds": document_ids,
398
+ "filter": filter,
399
+ "relevance": relevance,
400
+ },
401
+ headers={
402
+ "content-type": "application/json",
403
+ },
404
+ request_options=request_options,
405
+ omit=OMIT,
406
+ )
407
+ try:
408
+ if 200 <= _response.status_code < 300:
409
+ _data = typing.cast(
410
+ SearchResponse,
411
+ parse_obj_as(
412
+ type_=SearchResponse, # type: ignore
413
+ object_=_response.json(),
414
+ ),
415
+ )
416
+ return AsyncHttpResponse(response=_response, data=_data)
417
+ if _response.status_code == 400:
418
+ raise BadRequestError(
419
+ headers=dict(_response.headers),
420
+ body=typing.cast(
421
+ typing.Optional[typing.Any],
422
+ parse_obj_as(
423
+ type_=typing.Optional[typing.Any], # type: ignore
424
+ object_=_response.json(),
425
+ ),
426
+ ),
427
+ )
428
+ if _response.status_code == 401:
429
+ raise UnauthorizedError(
430
+ headers=dict(_response.headers),
431
+ body=typing.cast(
432
+ typing.Optional[typing.Any],
433
+ parse_obj_as(
434
+ type_=typing.Optional[typing.Any], # type: ignore
435
+ object_=_response.json(),
436
+ ),
437
+ ),
438
+ )
439
+ _response_json = _response.json()
440
+ except JSONDecodeError:
441
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
442
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@@ -1,5 +1,7 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ # isort: skip_file
4
+
3
5
  from .search_content_request_id import SearchContentRequestId
4
6
 
5
7
  __all__ = ["SearchContentRequestId"]
groundx/types/__init__.py CHANGED
@@ -1,5 +1,7 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ # isort: skip_file
4
+
3
5
  from .bounding_box_detail import BoundingBoxDetail
4
6
  from .bucket_detail import BucketDetail
5
7
  from .bucket_list_response import BucketListResponse
@@ -8,8 +10,10 @@ from .bucket_update_detail import BucketUpdateDetail
8
10
  from .bucket_update_response import BucketUpdateResponse
9
11
  from .customer_detail import CustomerDetail
10
12
  from .customer_response import CustomerResponse
13
+ from .document import Document
11
14
  from .document_detail import DocumentDetail
12
15
  from .document_list_response import DocumentListResponse
16
+ from .document_local_ingest_request import DocumentLocalIngestRequest
13
17
  from .document_lookup_response import DocumentLookupResponse
14
18
  from .document_response import DocumentResponse
15
19
  from .document_type import DocumentType
@@ -20,26 +24,49 @@ from .health_response import HealthResponse
20
24
  from .health_response_health import HealthResponseHealth
21
25
  from .health_service import HealthService
22
26
  from .health_service_status import HealthServiceStatus
27
+ from .ingest_local_document import IngestLocalDocument
28
+ from .ingest_local_document_metadata import IngestLocalDocumentMetadata
23
29
  from .ingest_remote_document import IngestRemoteDocument
24
30
  from .ingest_response import IngestResponse
25
- from .ingest_response_ingest import IngestResponseIngest
31
+ from .ingest_status import IngestStatus
32
+ from .ingest_status_light import IngestStatusLight
33
+ from .ingest_status_progress import IngestStatusProgress
34
+ from .ingest_status_progress_cancelled import IngestStatusProgressCancelled
35
+ from .ingest_status_progress_complete import IngestStatusProgressComplete
36
+ from .ingest_status_progress_errors import IngestStatusProgressErrors
37
+ from .ingest_status_progress_processing import IngestStatusProgressProcessing
26
38
  from .message_response import MessageResponse
27
39
  from .meter_detail import MeterDetail
28
- from .process_status_response import ProcessStatusResponse
29
- from .process_status_response_ingest import ProcessStatusResponseIngest
30
- from .process_status_response_ingest_progress import ProcessStatusResponseIngestProgress
31
- from .process_status_response_ingest_progress_cancelled import ProcessStatusResponseIngestProgressCancelled
32
- from .process_status_response_ingest_progress_complete import ProcessStatusResponseIngestProgressComplete
33
- from .process_status_response_ingest_progress_errors import ProcessStatusResponseIngestProgressErrors
34
- from .process_status_response_ingest_progress_processing import ProcessStatusResponseIngestProgressProcessing
40
+ from .process_level import ProcessLevel
41
+ from .processes_status_response import ProcessesStatusResponse
35
42
  from .processing_status import ProcessingStatus
36
43
  from .search_response import SearchResponse
37
44
  from .search_response_search import SearchResponseSearch
38
45
  from .search_result_item import SearchResultItem
46
+ from .search_result_item_pages_item import SearchResultItemPagesItem
39
47
  from .sort import Sort
40
48
  from .sort_order import SortOrder
41
49
  from .subscription_detail import SubscriptionDetail
42
50
  from .subscription_detail_meters import SubscriptionDetailMeters
51
+ from .website_source import WebsiteSource
52
+ from .workflow_apply_request import WorkflowApplyRequest
53
+ from .workflow_detail import WorkflowDetail
54
+ from .workflow_detail_chunk_strategy import WorkflowDetailChunkStrategy
55
+ from .workflow_detail_relationships import WorkflowDetailRelationships
56
+ from .workflow_engine import WorkflowEngine
57
+ from .workflow_engine_reasoning_effort import WorkflowEngineReasoningEffort
58
+ from .workflow_engine_service import WorkflowEngineService
59
+ from .workflow_prompt import WorkflowPrompt
60
+ from .workflow_prompt_group import WorkflowPromptGroup
61
+ from .workflow_prompt_role import WorkflowPromptRole
62
+ from .workflow_request import WorkflowRequest
63
+ from .workflow_request_chunk_strategy import WorkflowRequestChunkStrategy
64
+ from .workflow_response import WorkflowResponse
65
+ from .workflow_step import WorkflowStep
66
+ from .workflow_step_config import WorkflowStepConfig
67
+ from .workflow_step_config_field import WorkflowStepConfigField
68
+ from .workflow_steps import WorkflowSteps
69
+ from .workflows_response import WorkflowsResponse
43
70
 
44
71
  __all__ = [
45
72
  "BoundingBoxDetail",
@@ -50,8 +77,10 @@ __all__ = [
50
77
  "BucketUpdateResponse",
51
78
  "CustomerDetail",
52
79
  "CustomerResponse",
80
+ "Document",
53
81
  "DocumentDetail",
54
82
  "DocumentListResponse",
83
+ "DocumentLocalIngestRequest",
55
84
  "DocumentLookupResponse",
56
85
  "DocumentResponse",
57
86
  "DocumentType",
@@ -62,24 +91,47 @@ __all__ = [
62
91
  "HealthResponseHealth",
63
92
  "HealthService",
64
93
  "HealthServiceStatus",
94
+ "IngestLocalDocument",
95
+ "IngestLocalDocumentMetadata",
65
96
  "IngestRemoteDocument",
66
97
  "IngestResponse",
67
- "IngestResponseIngest",
98
+ "IngestStatus",
99
+ "IngestStatusLight",
100
+ "IngestStatusProgress",
101
+ "IngestStatusProgressCancelled",
102
+ "IngestStatusProgressComplete",
103
+ "IngestStatusProgressErrors",
104
+ "IngestStatusProgressProcessing",
68
105
  "MessageResponse",
69
106
  "MeterDetail",
70
- "ProcessStatusResponse",
71
- "ProcessStatusResponseIngest",
72
- "ProcessStatusResponseIngestProgress",
73
- "ProcessStatusResponseIngestProgressCancelled",
74
- "ProcessStatusResponseIngestProgressComplete",
75
- "ProcessStatusResponseIngestProgressErrors",
76
- "ProcessStatusResponseIngestProgressProcessing",
107
+ "ProcessLevel",
108
+ "ProcessesStatusResponse",
77
109
  "ProcessingStatus",
78
110
  "SearchResponse",
79
111
  "SearchResponseSearch",
80
112
  "SearchResultItem",
113
+ "SearchResultItemPagesItem",
81
114
  "Sort",
82
115
  "SortOrder",
83
116
  "SubscriptionDetail",
84
117
  "SubscriptionDetailMeters",
118
+ "WebsiteSource",
119
+ "WorkflowApplyRequest",
120
+ "WorkflowDetail",
121
+ "WorkflowDetailChunkStrategy",
122
+ "WorkflowDetailRelationships",
123
+ "WorkflowEngine",
124
+ "WorkflowEngineReasoningEffort",
125
+ "WorkflowEngineService",
126
+ "WorkflowPrompt",
127
+ "WorkflowPromptGroup",
128
+ "WorkflowPromptRole",
129
+ "WorkflowRequest",
130
+ "WorkflowRequestChunkStrategy",
131
+ "WorkflowResponse",
132
+ "WorkflowStep",
133
+ "WorkflowStepConfig",
134
+ "WorkflowStepConfigField",
135
+ "WorkflowSteps",
136
+ "WorkflowsResponse",
85
137
  ]
@@ -1,11 +1,11 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- import typing_extensions
5
3
  import typing
6
- from ..core.serialization import FieldMetadata
4
+
7
5
  import pydantic
8
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
+ import typing_extensions
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
+ from ..core.serialization import FieldMetadata
9
9
 
10
10
 
11
11
  class BoundingBoxDetail(UniversalBaseModel):
@@ -1,12 +1,12 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- import typing_extensions
5
- from ..core.serialization import FieldMetadata
6
- import typing
7
3
  import datetime as dt
4
+ import typing
5
+
8
6
  import pydantic
9
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
+ import typing_extensions
8
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
9
+ from ..core.serialization import FieldMetadata
10
10
 
11
11
 
12
12
  class BucketDetail(UniversalBaseModel):
@@ -1,14 +1,28 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
3
  import typing
5
- from .bucket_detail import BucketDetail
6
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
4
+
7
5
  import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .bucket_detail import BucketDetail
8
8
 
9
9
 
10
10
  class BucketListResponse(UniversalBaseModel):
11
11
  buckets: typing.Optional[typing.List[BucketDetail]] = None
12
+ count: typing.Optional[int] = pydantic.Field(default=None)
13
+ """
14
+ The number of buckets returned in the current response
15
+ """
16
+
17
+ remaining: typing.Optional[int] = pydantic.Field(default=None)
18
+ """
19
+ The number of buckets that have not been returned yet, will be null if there are no remaining buckets
20
+ """
21
+
22
+ total: typing.Optional[int] = pydantic.Field(default=None)
23
+ """
24
+ The total number of buckets found
25
+ """
12
26
 
13
27
  if IS_PYDANTIC_V2:
14
28
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -1,10 +1,10 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- from .bucket_detail import BucketDetail
5
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
3
  import typing
4
+
7
5
  import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .bucket_detail import BucketDetail
8
8
 
9
9
 
10
10
  class BucketResponse(UniversalBaseModel):