groundx 2.3.0__py3-none-any.whl → 2.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. groundx/__init__.py +16 -16
  2. groundx/buckets/__init__.py +2 -0
  3. groundx/buckets/client.py +47 -366
  4. groundx/buckets/raw_client.py +628 -0
  5. groundx/client.py +15 -17
  6. groundx/core/__init__.py +5 -0
  7. groundx/core/api_error.py +13 -5
  8. groundx/core/client_wrapper.py +4 -3
  9. groundx/core/force_multipart.py +16 -0
  10. groundx/core/http_client.py +70 -26
  11. groundx/core/http_response.py +55 -0
  12. groundx/core/jsonable_encoder.py +0 -1
  13. groundx/core/pydantic_utilities.py +69 -110
  14. groundx/core/serialization.py +7 -3
  15. groundx/customer/__init__.py +2 -0
  16. groundx/customer/client.py +31 -43
  17. groundx/customer/raw_client.py +91 -0
  18. groundx/documents/__init__.py +2 -0
  19. groundx/documents/client.py +122 -789
  20. groundx/documents/raw_client.py +1404 -0
  21. groundx/errors/__init__.py +2 -0
  22. groundx/errors/bad_request_error.py +4 -3
  23. groundx/errors/unauthorized_error.py +4 -3
  24. groundx/groups/__init__.py +2 -0
  25. groundx/groups/client.py +55 -520
  26. groundx/groups/raw_client.py +901 -0
  27. groundx/health/__init__.py +2 -0
  28. groundx/health/client.py +35 -101
  29. groundx/health/raw_client.py +193 -0
  30. groundx/ingest.py +2 -2
  31. groundx/search/__init__.py +2 -0
  32. groundx/search/client.py +82 -211
  33. groundx/search/raw_client.py +442 -0
  34. groundx/search/types/__init__.py +2 -0
  35. groundx/types/__init__.py +16 -16
  36. groundx/types/bounding_box_detail.py +4 -4
  37. groundx/types/bucket_detail.py +5 -5
  38. groundx/types/bucket_list_response.py +17 -3
  39. groundx/types/bucket_response.py +3 -3
  40. groundx/types/bucket_update_detail.py +4 -4
  41. groundx/types/bucket_update_response.py +3 -3
  42. groundx/types/customer_detail.py +2 -2
  43. groundx/types/customer_response.py +3 -3
  44. groundx/types/document.py +4 -4
  45. groundx/types/document_detail.py +9 -4
  46. groundx/types/document_list_response.py +4 -4
  47. groundx/types/document_local_ingest_request.py +1 -0
  48. groundx/types/document_lookup_response.py +8 -3
  49. groundx/types/document_response.py +3 -3
  50. groundx/types/group_detail.py +4 -4
  51. groundx/types/group_list_response.py +17 -3
  52. groundx/types/group_response.py +3 -3
  53. groundx/types/health_response.py +3 -3
  54. groundx/types/health_response_health.py +3 -3
  55. groundx/types/health_service.py +5 -5
  56. groundx/types/ingest_local_document.py +3 -3
  57. groundx/types/ingest_local_document_metadata.py +9 -4
  58. groundx/types/ingest_remote_document.py +10 -5
  59. groundx/types/ingest_response.py +4 -4
  60. groundx/types/{process_status_response_ingest.py → ingest_status.py} +8 -7
  61. groundx/types/{ingest_response_ingest.py → ingest_status_light.py} +7 -5
  62. groundx/types/ingest_status_progress.py +26 -0
  63. groundx/types/{process_status_response_ingest_progress_errors.py → ingest_status_progress_cancelled.py} +4 -4
  64. groundx/types/{process_status_response_ingest_progress_complete.py → ingest_status_progress_complete.py} +4 -4
  65. groundx/types/{process_status_response_ingest_progress_cancelled.py → ingest_status_progress_errors.py} +4 -4
  66. groundx/types/{process_status_response_ingest_progress_processing.py → ingest_status_progress_processing.py} +4 -4
  67. groundx/types/message_response.py +2 -2
  68. groundx/types/meter_detail.py +2 -2
  69. groundx/types/processes_status_response.py +19 -2
  70. groundx/types/search_response.py +3 -3
  71. groundx/types/search_response_search.py +3 -3
  72. groundx/types/search_result_item.py +5 -5
  73. groundx/types/subscription_detail.py +3 -3
  74. groundx/types/subscription_detail_meters.py +5 -5
  75. groundx/types/website_source.py +4 -4
  76. {groundx-2.3.0.dist-info → groundx-2.3.5.dist-info}/METADATA +1 -1
  77. groundx-2.3.5.dist-info/RECORD +95 -0
  78. groundx/types/process_status_response.py +0 -20
  79. groundx/types/process_status_response_ingest_progress.py +0 -26
  80. groundx-2.3.0.dist-info/RECORD +0 -88
  81. {groundx-2.3.0.dist-info → groundx-2.3.5.dist-info}/LICENSE +0 -0
  82. {groundx-2.3.0.dist-info → groundx-2.3.5.dist-info}/WHEEL +0 -0
@@ -0,0 +1,442 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ from json.decoder import JSONDecodeError
5
+
6
+ from ..core.api_error import ApiError
7
+ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
+ from ..core.http_response import AsyncHttpResponse, HttpResponse
9
+ from ..core.jsonable_encoder import jsonable_encoder
10
+ from ..core.pydantic_utilities import parse_obj_as
11
+ from ..core.request_options import RequestOptions
12
+ from ..errors.bad_request_error import BadRequestError
13
+ from ..errors.unauthorized_error import UnauthorizedError
14
+ from ..types.search_response import SearchResponse
15
+ from .types.search_content_request_id import SearchContentRequestId
16
+
17
+ # this is used as the default value for optional parameters
18
+ OMIT = typing.cast(typing.Any, ...)
19
+
20
+
21
+ class RawSearchClient:
22
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
23
+ self._client_wrapper = client_wrapper
24
+
25
+ def content(
26
+ self,
27
+ id: SearchContentRequestId,
28
+ *,
29
+ query: str,
30
+ n: typing.Optional[int] = None,
31
+ next_token: typing.Optional[str] = None,
32
+ verbosity: typing.Optional[int] = None,
33
+ filter: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
34
+ relevance: typing.Optional[float] = OMIT,
35
+ request_options: typing.Optional[RequestOptions] = None,
36
+ ) -> HttpResponse[SearchResponse]:
37
+ """
38
+ Search documents on GroundX for the most relevant information to a given query.
39
+ The result of this query is typically used in one of two ways; `result.search.text` can be used to provide context to a language model, facilitating RAG, or `result.search.results` can be used to observe chunks of text which are relevant to the query, facilitating citation.
40
+
41
+ Parameters
42
+ ----------
43
+ id : SearchContentRequestId
44
+ The bucketId, groupId, or documentId to be searched. The document or documents within the specified container will be compared to the query, and relevant information will be extracted.
45
+
46
+ query : str
47
+ The search query to be used to find relevant documentation.
48
+
49
+ n : typing.Optional[int]
50
+ The maximum number of returned search results. Accepts 1-100 with a default of 20.
51
+
52
+ next_token : typing.Optional[str]
53
+ A token for pagination. If the number of search results for a given query is larger than n, the response will include a "nextToken" value. That token can be included in this field to retrieve the next batch of n search results.
54
+
55
+ verbosity : typing.Optional[int]
56
+ The amount of data returned with each search result. 0 == no search results, only the recommended context. 1 == search results but no searchData. 2 == search results and searchData.
57
+
58
+ filter : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
59
+ A dictionary of key-value pairs that can be used to pre-filter documents prior to a search.
60
+
61
+ relevance : typing.Optional[float]
62
+ The minimum search relevance score required to include the result. By default, this is 10.0.
63
+
64
+ request_options : typing.Optional[RequestOptions]
65
+ Request-specific configuration.
66
+
67
+ Returns
68
+ -------
69
+ HttpResponse[SearchResponse]
70
+ Search query success
71
+ """
72
+ _response = self._client_wrapper.httpx_client.request(
73
+ f"v1/search/{jsonable_encoder(id)}",
74
+ method="POST",
75
+ params={
76
+ "n": n,
77
+ "nextToken": next_token,
78
+ "verbosity": verbosity,
79
+ },
80
+ json={
81
+ "query": query,
82
+ "filter": filter,
83
+ "relevance": relevance,
84
+ },
85
+ headers={
86
+ "content-type": "application/json",
87
+ },
88
+ request_options=request_options,
89
+ omit=OMIT,
90
+ )
91
+ try:
92
+ if 200 <= _response.status_code < 300:
93
+ _data = typing.cast(
94
+ SearchResponse,
95
+ parse_obj_as(
96
+ type_=SearchResponse, # type: ignore
97
+ object_=_response.json(),
98
+ ),
99
+ )
100
+ return HttpResponse(response=_response, data=_data)
101
+ if _response.status_code == 400:
102
+ raise BadRequestError(
103
+ headers=dict(_response.headers),
104
+ body=typing.cast(
105
+ typing.Optional[typing.Any],
106
+ parse_obj_as(
107
+ type_=typing.Optional[typing.Any], # type: ignore
108
+ object_=_response.json(),
109
+ ),
110
+ ),
111
+ )
112
+ if _response.status_code == 401:
113
+ raise UnauthorizedError(
114
+ headers=dict(_response.headers),
115
+ body=typing.cast(
116
+ typing.Optional[typing.Any],
117
+ parse_obj_as(
118
+ type_=typing.Optional[typing.Any], # type: ignore
119
+ object_=_response.json(),
120
+ ),
121
+ ),
122
+ )
123
+ _response_json = _response.json()
124
+ except JSONDecodeError:
125
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
126
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
127
+
128
+ def documents(
129
+ self,
130
+ *,
131
+ query: str,
132
+ document_ids: typing.Sequence[str],
133
+ n: typing.Optional[int] = None,
134
+ next_token: typing.Optional[str] = None,
135
+ verbosity: typing.Optional[int] = None,
136
+ filter: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
137
+ relevance: typing.Optional[float] = OMIT,
138
+ request_options: typing.Optional[RequestOptions] = None,
139
+ ) -> HttpResponse[SearchResponse]:
140
+ """
141
+ Search documents on GroundX for the most relevant information to a given query by documentId(s).
142
+ The result of this query is typically used in one of two ways; `result.search.text` can be used to provide context to a language model, facilitating RAG, or `result.search.results` can be used to observe chunks of text which are relevant to the query, facilitating citation.
143
+
144
+ Parameters
145
+ ----------
146
+ query : str
147
+ The search query to be used to find relevant documentation.
148
+
149
+ document_ids : typing.Sequence[str]
150
+ An array of unique documentIds to be searched.
151
+
152
+ n : typing.Optional[int]
153
+ The maximum number of returned search results. Accepts 1-100 with a default of 20.
154
+
155
+ next_token : typing.Optional[str]
156
+ A token for pagination. If the number of search results for a given query is larger than n, the response will include a "nextToken" value. That token can be included in this field to retrieve the next batch of n search results.
157
+
158
+ verbosity : typing.Optional[int]
159
+ The amount of data returned with each search result. 0 == no search results, only the recommended context. 1 == search results but no searchData. 2 == search results and searchData.
160
+
161
+ filter : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
162
+ A dictionary of key-value pairs that can be used to pre-filter documents prior to a search.
163
+
164
+ relevance : typing.Optional[float]
165
+ The minimum search relevance score required to include the result. By default, this is 10.0.
166
+
167
+ request_options : typing.Optional[RequestOptions]
168
+ Request-specific configuration.
169
+
170
+ Returns
171
+ -------
172
+ HttpResponse[SearchResponse]
173
+ Search query success
174
+ """
175
+ _response = self._client_wrapper.httpx_client.request(
176
+ "v1/search/documents",
177
+ method="POST",
178
+ params={
179
+ "n": n,
180
+ "nextToken": next_token,
181
+ "verbosity": verbosity,
182
+ },
183
+ json={
184
+ "query": query,
185
+ "documentIds": document_ids,
186
+ "filter": filter,
187
+ "relevance": relevance,
188
+ },
189
+ headers={
190
+ "content-type": "application/json",
191
+ },
192
+ request_options=request_options,
193
+ omit=OMIT,
194
+ )
195
+ try:
196
+ if 200 <= _response.status_code < 300:
197
+ _data = typing.cast(
198
+ SearchResponse,
199
+ parse_obj_as(
200
+ type_=SearchResponse, # type: ignore
201
+ object_=_response.json(),
202
+ ),
203
+ )
204
+ return HttpResponse(response=_response, data=_data)
205
+ if _response.status_code == 400:
206
+ raise BadRequestError(
207
+ headers=dict(_response.headers),
208
+ body=typing.cast(
209
+ typing.Optional[typing.Any],
210
+ parse_obj_as(
211
+ type_=typing.Optional[typing.Any], # type: ignore
212
+ object_=_response.json(),
213
+ ),
214
+ ),
215
+ )
216
+ if _response.status_code == 401:
217
+ raise UnauthorizedError(
218
+ headers=dict(_response.headers),
219
+ body=typing.cast(
220
+ typing.Optional[typing.Any],
221
+ parse_obj_as(
222
+ type_=typing.Optional[typing.Any], # type: ignore
223
+ object_=_response.json(),
224
+ ),
225
+ ),
226
+ )
227
+ _response_json = _response.json()
228
+ except JSONDecodeError:
229
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
230
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
231
+
232
+
233
+ class AsyncRawSearchClient:
234
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
235
+ self._client_wrapper = client_wrapper
236
+
237
+ async def content(
238
+ self,
239
+ id: SearchContentRequestId,
240
+ *,
241
+ query: str,
242
+ n: typing.Optional[int] = None,
243
+ next_token: typing.Optional[str] = None,
244
+ verbosity: typing.Optional[int] = None,
245
+ filter: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
246
+ relevance: typing.Optional[float] = OMIT,
247
+ request_options: typing.Optional[RequestOptions] = None,
248
+ ) -> AsyncHttpResponse[SearchResponse]:
249
+ """
250
+ Search documents on GroundX for the most relevant information to a given query.
251
+ The result of this query is typically used in one of two ways; `result.search.text` can be used to provide context to a language model, facilitating RAG, or `result.search.results` can be used to observe chunks of text which are relevant to the query, facilitating citation.
252
+
253
+ Parameters
254
+ ----------
255
+ id : SearchContentRequestId
256
+ The bucketId, groupId, or documentId to be searched. The document or documents within the specified container will be compared to the query, and relevant information will be extracted.
257
+
258
+ query : str
259
+ The search query to be used to find relevant documentation.
260
+
261
+ n : typing.Optional[int]
262
+ The maximum number of returned search results. Accepts 1-100 with a default of 20.
263
+
264
+ next_token : typing.Optional[str]
265
+ A token for pagination. If the number of search results for a given query is larger than n, the response will include a "nextToken" value. That token can be included in this field to retrieve the next batch of n search results.
266
+
267
+ verbosity : typing.Optional[int]
268
+ The amount of data returned with each search result. 0 == no search results, only the recommended context. 1 == search results but no searchData. 2 == search results and searchData.
269
+
270
+ filter : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
271
+ A dictionary of key-value pairs that can be used to pre-filter documents prior to a search.
272
+
273
+ relevance : typing.Optional[float]
274
+ The minimum search relevance score required to include the result. By default, this is 10.0.
275
+
276
+ request_options : typing.Optional[RequestOptions]
277
+ Request-specific configuration.
278
+
279
+ Returns
280
+ -------
281
+ AsyncHttpResponse[SearchResponse]
282
+ Search query success
283
+ """
284
+ _response = await self._client_wrapper.httpx_client.request(
285
+ f"v1/search/{jsonable_encoder(id)}",
286
+ method="POST",
287
+ params={
288
+ "n": n,
289
+ "nextToken": next_token,
290
+ "verbosity": verbosity,
291
+ },
292
+ json={
293
+ "query": query,
294
+ "filter": filter,
295
+ "relevance": relevance,
296
+ },
297
+ headers={
298
+ "content-type": "application/json",
299
+ },
300
+ request_options=request_options,
301
+ omit=OMIT,
302
+ )
303
+ try:
304
+ if 200 <= _response.status_code < 300:
305
+ _data = typing.cast(
306
+ SearchResponse,
307
+ parse_obj_as(
308
+ type_=SearchResponse, # type: ignore
309
+ object_=_response.json(),
310
+ ),
311
+ )
312
+ return AsyncHttpResponse(response=_response, data=_data)
313
+ if _response.status_code == 400:
314
+ raise BadRequestError(
315
+ headers=dict(_response.headers),
316
+ body=typing.cast(
317
+ typing.Optional[typing.Any],
318
+ parse_obj_as(
319
+ type_=typing.Optional[typing.Any], # type: ignore
320
+ object_=_response.json(),
321
+ ),
322
+ ),
323
+ )
324
+ if _response.status_code == 401:
325
+ raise UnauthorizedError(
326
+ headers=dict(_response.headers),
327
+ body=typing.cast(
328
+ typing.Optional[typing.Any],
329
+ parse_obj_as(
330
+ type_=typing.Optional[typing.Any], # type: ignore
331
+ object_=_response.json(),
332
+ ),
333
+ ),
334
+ )
335
+ _response_json = _response.json()
336
+ except JSONDecodeError:
337
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
338
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
339
+
340
+ async def documents(
341
+ self,
342
+ *,
343
+ query: str,
344
+ document_ids: typing.Sequence[str],
345
+ n: typing.Optional[int] = None,
346
+ next_token: typing.Optional[str] = None,
347
+ verbosity: typing.Optional[int] = None,
348
+ filter: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
349
+ relevance: typing.Optional[float] = OMIT,
350
+ request_options: typing.Optional[RequestOptions] = None,
351
+ ) -> AsyncHttpResponse[SearchResponse]:
352
+ """
353
+ Search documents on GroundX for the most relevant information to a given query by documentId(s).
354
+ The result of this query is typically used in one of two ways; `result.search.text` can be used to provide context to a language model, facilitating RAG, or `result.search.results` can be used to observe chunks of text which are relevant to the query, facilitating citation.
355
+
356
+ Parameters
357
+ ----------
358
+ query : str
359
+ The search query to be used to find relevant documentation.
360
+
361
+ document_ids : typing.Sequence[str]
362
+ An array of unique documentIds to be searched.
363
+
364
+ n : typing.Optional[int]
365
+ The maximum number of returned search results. Accepts 1-100 with a default of 20.
366
+
367
+ next_token : typing.Optional[str]
368
+ A token for pagination. If the number of search results for a given query is larger than n, the response will include a "nextToken" value. That token can be included in this field to retrieve the next batch of n search results.
369
+
370
+ verbosity : typing.Optional[int]
371
+ The amount of data returned with each search result. 0 == no search results, only the recommended context. 1 == search results but no searchData. 2 == search results and searchData.
372
+
373
+ filter : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
374
+ A dictionary of key-value pairs that can be used to pre-filter documents prior to a search.
375
+
376
+ relevance : typing.Optional[float]
377
+ The minimum search relevance score required to include the result. By default, this is 10.0.
378
+
379
+ request_options : typing.Optional[RequestOptions]
380
+ Request-specific configuration.
381
+
382
+ Returns
383
+ -------
384
+ AsyncHttpResponse[SearchResponse]
385
+ Search query success
386
+ """
387
+ _response = await self._client_wrapper.httpx_client.request(
388
+ "v1/search/documents",
389
+ method="POST",
390
+ params={
391
+ "n": n,
392
+ "nextToken": next_token,
393
+ "verbosity": verbosity,
394
+ },
395
+ json={
396
+ "query": query,
397
+ "documentIds": document_ids,
398
+ "filter": filter,
399
+ "relevance": relevance,
400
+ },
401
+ headers={
402
+ "content-type": "application/json",
403
+ },
404
+ request_options=request_options,
405
+ omit=OMIT,
406
+ )
407
+ try:
408
+ if 200 <= _response.status_code < 300:
409
+ _data = typing.cast(
410
+ SearchResponse,
411
+ parse_obj_as(
412
+ type_=SearchResponse, # type: ignore
413
+ object_=_response.json(),
414
+ ),
415
+ )
416
+ return AsyncHttpResponse(response=_response, data=_data)
417
+ if _response.status_code == 400:
418
+ raise BadRequestError(
419
+ headers=dict(_response.headers),
420
+ body=typing.cast(
421
+ typing.Optional[typing.Any],
422
+ parse_obj_as(
423
+ type_=typing.Optional[typing.Any], # type: ignore
424
+ object_=_response.json(),
425
+ ),
426
+ ),
427
+ )
428
+ if _response.status_code == 401:
429
+ raise UnauthorizedError(
430
+ headers=dict(_response.headers),
431
+ body=typing.cast(
432
+ typing.Optional[typing.Any],
433
+ parse_obj_as(
434
+ type_=typing.Optional[typing.Any], # type: ignore
435
+ object_=_response.json(),
436
+ ),
437
+ ),
438
+ )
439
+ _response_json = _response.json()
440
+ except JSONDecodeError:
441
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
442
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@@ -1,5 +1,7 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ # isort: skip_file
4
+
3
5
  from .search_content_request_id import SearchContentRequestId
4
6
 
5
7
  __all__ = ["SearchContentRequestId"]
groundx/types/__init__.py CHANGED
@@ -1,5 +1,7 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ # isort: skip_file
4
+
3
5
  from .bounding_box_detail import BoundingBoxDetail
4
6
  from .bucket_detail import BucketDetail
5
7
  from .bucket_list_response import BucketListResponse
@@ -26,17 +28,16 @@ from .ingest_local_document import IngestLocalDocument
26
28
  from .ingest_local_document_metadata import IngestLocalDocumentMetadata
27
29
  from .ingest_remote_document import IngestRemoteDocument
28
30
  from .ingest_response import IngestResponse
29
- from .ingest_response_ingest import IngestResponseIngest
31
+ from .ingest_status import IngestStatus
32
+ from .ingest_status_light import IngestStatusLight
33
+ from .ingest_status_progress import IngestStatusProgress
34
+ from .ingest_status_progress_cancelled import IngestStatusProgressCancelled
35
+ from .ingest_status_progress_complete import IngestStatusProgressComplete
36
+ from .ingest_status_progress_errors import IngestStatusProgressErrors
37
+ from .ingest_status_progress_processing import IngestStatusProgressProcessing
30
38
  from .message_response import MessageResponse
31
39
  from .meter_detail import MeterDetail
32
40
  from .process_level import ProcessLevel
33
- from .process_status_response import ProcessStatusResponse
34
- from .process_status_response_ingest import ProcessStatusResponseIngest
35
- from .process_status_response_ingest_progress import ProcessStatusResponseIngestProgress
36
- from .process_status_response_ingest_progress_cancelled import ProcessStatusResponseIngestProgressCancelled
37
- from .process_status_response_ingest_progress_complete import ProcessStatusResponseIngestProgressComplete
38
- from .process_status_response_ingest_progress_errors import ProcessStatusResponseIngestProgressErrors
39
- from .process_status_response_ingest_progress_processing import ProcessStatusResponseIngestProgressProcessing
40
41
  from .processes_status_response import ProcessesStatusResponse
41
42
  from .processing_status import ProcessingStatus
42
43
  from .search_response import SearchResponse
@@ -75,17 +76,16 @@ __all__ = [
75
76
  "IngestLocalDocumentMetadata",
76
77
  "IngestRemoteDocument",
77
78
  "IngestResponse",
78
- "IngestResponseIngest",
79
+ "IngestStatus",
80
+ "IngestStatusLight",
81
+ "IngestStatusProgress",
82
+ "IngestStatusProgressCancelled",
83
+ "IngestStatusProgressComplete",
84
+ "IngestStatusProgressErrors",
85
+ "IngestStatusProgressProcessing",
79
86
  "MessageResponse",
80
87
  "MeterDetail",
81
88
  "ProcessLevel",
82
- "ProcessStatusResponse",
83
- "ProcessStatusResponseIngest",
84
- "ProcessStatusResponseIngestProgress",
85
- "ProcessStatusResponseIngestProgressCancelled",
86
- "ProcessStatusResponseIngestProgressComplete",
87
- "ProcessStatusResponseIngestProgressErrors",
88
- "ProcessStatusResponseIngestProgressProcessing",
89
89
  "ProcessesStatusResponse",
90
90
  "ProcessingStatus",
91
91
  "SearchResponse",
@@ -1,11 +1,11 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- import typing_extensions
5
3
  import typing
6
- from ..core.serialization import FieldMetadata
4
+
7
5
  import pydantic
8
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
+ import typing_extensions
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
+ from ..core.serialization import FieldMetadata
9
9
 
10
10
 
11
11
  class BoundingBoxDetail(UniversalBaseModel):
@@ -1,12 +1,12 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- import typing_extensions
5
- from ..core.serialization import FieldMetadata
6
- import typing
7
3
  import datetime as dt
4
+ import typing
5
+
8
6
  import pydantic
9
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
+ import typing_extensions
8
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
9
+ from ..core.serialization import FieldMetadata
10
10
 
11
11
 
12
12
  class BucketDetail(UniversalBaseModel):
@@ -1,14 +1,28 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
3
  import typing
5
- from .bucket_detail import BucketDetail
6
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
4
+
7
5
  import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .bucket_detail import BucketDetail
8
8
 
9
9
 
10
10
  class BucketListResponse(UniversalBaseModel):
11
11
  buckets: typing.Optional[typing.List[BucketDetail]] = None
12
+ count: typing.Optional[int] = pydantic.Field(default=None)
13
+ """
14
+ The number of buckets returned in the current response
15
+ """
16
+
17
+ remaining: typing.Optional[int] = pydantic.Field(default=None)
18
+ """
19
+ The number of buckets that have not been returned yet, will be null if there are no remaining buckets
20
+ """
21
+
22
+ total: typing.Optional[int] = pydantic.Field(default=None)
23
+ """
24
+ The total number of buckets found
25
+ """
12
26
 
13
27
  if IS_PYDANTIC_V2:
14
28
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -1,10 +1,10 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- from .bucket_detail import BucketDetail
5
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
3
  import typing
4
+
7
5
  import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .bucket_detail import BucketDetail
8
8
 
9
9
 
10
10
  class BucketResponse(UniversalBaseModel):
@@ -1,11 +1,11 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- import typing_extensions
5
- from ..core.serialization import FieldMetadata
6
3
  import typing
7
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
4
+
8
5
  import pydantic
6
+ import typing_extensions
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
+ from ..core.serialization import FieldMetadata
9
9
 
10
10
 
11
11
  class BucketUpdateDetail(UniversalBaseModel):
@@ -1,10 +1,10 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- from .bucket_update_detail import BucketUpdateDetail
5
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
3
  import typing
4
+
7
5
  import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .bucket_update_detail import BucketUpdateDetail
8
8
 
9
9
 
10
10
  class BucketUpdateResponse(UniversalBaseModel):
@@ -1,10 +1,10 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
3
  import typing
4
+
5
5
  import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
6
7
  from .subscription_detail import SubscriptionDetail
7
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
8
 
9
9
 
10
10
  class CustomerDetail(UniversalBaseModel):
@@ -1,10 +1,10 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
4
- from .customer_detail import CustomerDetail
5
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
6
3
  import typing
4
+
7
5
  import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .customer_detail import CustomerDetail
8
8
 
9
9
 
10
10
  class CustomerResponse(UniversalBaseModel):
groundx/types/document.py CHANGED
@@ -1,13 +1,13 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ..core.pydantic_utilities import UniversalBaseModel
3
+ import typing
4
+
5
+ import pydantic
4
6
  import typing_extensions
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
5
8
  from ..core.serialization import FieldMetadata
6
- import pydantic
7
- import typing
8
9
  from .document_type import DocumentType
9
10
  from .process_level import ProcessLevel
10
- from ..core.pydantic_utilities import IS_PYDANTIC_V2
11
11
 
12
12
 
13
13
  class Document(UniversalBaseModel):