cartography-client 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cartography-client might be problematic. Click here for more details.

Files changed (70) hide show
  1. cartography/__init__.py +100 -0
  2. cartography/_base_client.py +1995 -0
  3. cartography/_client.py +444 -0
  4. cartography/_compat.py +219 -0
  5. cartography/_constants.py +14 -0
  6. cartography/_exceptions.py +108 -0
  7. cartography/_files.py +123 -0
  8. cartography/_models.py +829 -0
  9. cartography/_qs.py +150 -0
  10. cartography/_resource.py +43 -0
  11. cartography/_response.py +832 -0
  12. cartography/_streaming.py +333 -0
  13. cartography/_types.py +219 -0
  14. cartography/_utils/__init__.py +57 -0
  15. cartography/_utils/_logs.py +25 -0
  16. cartography/_utils/_proxy.py +65 -0
  17. cartography/_utils/_reflection.py +42 -0
  18. cartography/_utils/_resources_proxy.py +24 -0
  19. cartography/_utils/_streams.py +12 -0
  20. cartography/_utils/_sync.py +86 -0
  21. cartography/_utils/_transform.py +447 -0
  22. cartography/_utils/_typing.py +151 -0
  23. cartography/_utils/_utils.py +422 -0
  24. cartography/_version.py +4 -0
  25. cartography/lib/.keep +4 -0
  26. cartography/py.typed +0 -0
  27. cartography/resources/__init__.py +89 -0
  28. cartography/resources/api_info.py +135 -0
  29. cartography/resources/crawl.py +279 -0
  30. cartography/resources/download.py +376 -0
  31. cartography/resources/health.py +143 -0
  32. cartography/resources/scrape.py +331 -0
  33. cartography/resources/workflows/__init__.py +33 -0
  34. cartography/resources/workflows/request/__init__.py +33 -0
  35. cartography/resources/workflows/request/crawl.py +295 -0
  36. cartography/resources/workflows/request/request.py +221 -0
  37. cartography/resources/workflows/workflows.py +274 -0
  38. cartography/types/__init__.py +23 -0
  39. cartography/types/api_info_retrieve_response.py +8 -0
  40. cartography/types/bulk_download_result.py +23 -0
  41. cartography/types/bulk_scrape_result.py +19 -0
  42. cartography/types/crawl_create_graph_params.py +46 -0
  43. cartography/types/crawl_create_graph_response.py +37 -0
  44. cartography/types/download_create_bulk_params.py +37 -0
  45. cartography/types/download_create_bulk_response.py +41 -0
  46. cartography/types/download_create_single_params.py +32 -0
  47. cartography/types/download_create_single_response.py +21 -0
  48. cartography/types/downloader_type.py +7 -0
  49. cartography/types/health_check_response.py +8 -0
  50. cartography/types/scrape_engine_param.py +28 -0
  51. cartography/types/scrape_scrape_bulk_params.py +33 -0
  52. cartography/types/scrape_scrape_bulk_response.py +41 -0
  53. cartography/types/scrape_scrape_single_params.py +17 -0
  54. cartography/types/scrape_scrape_single_response.py +23 -0
  55. cartography/types/wait_until.py +7 -0
  56. cartography/types/workflow_describe_response.py +8 -0
  57. cartography/types/workflow_results_response.py +8 -0
  58. cartography/types/workflows/__init__.py +6 -0
  59. cartography/types/workflows/request/__init__.py +9 -0
  60. cartography/types/workflows/request/crawl_create_bulk_params.py +14 -0
  61. cartography/types/workflows/request/crawl_create_bulk_response.py +22 -0
  62. cartography/types/workflows/request/crawl_create_params.py +32 -0
  63. cartography/types/workflows/request/crawl_request_param.py +32 -0
  64. cartography/types/workflows/request/workflow_result.py +11 -0
  65. cartography/types/workflows/request_create_download_params.py +18 -0
  66. cartography/types/workflows/request_create_download_response.py +8 -0
  67. cartography_client-0.0.1.dist-info/METADATA +399 -0
  68. cartography_client-0.0.1.dist-info/RECORD +70 -0
  69. cartography_client-0.0.1.dist-info/WHEEL +4 -0
  70. cartography_client-0.0.1.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,331 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List, Iterable
6
+
7
+ import httpx
8
+
9
+ from ..types import scrape_scrape_bulk_params, scrape_scrape_single_params
10
+ from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
11
+ from .._utils import maybe_transform, async_maybe_transform
12
+ from .._compat import cached_property
13
+ from .._resource import SyncAPIResource, AsyncAPIResource
14
+ from .._response import (
15
+ to_raw_response_wrapper,
16
+ to_streamed_response_wrapper,
17
+ async_to_raw_response_wrapper,
18
+ async_to_streamed_response_wrapper,
19
+ )
20
+ from .._base_client import make_request_options
21
+ from ..types.scrape_engine_param import ScrapeEngineParam
22
+ from ..types.scrape_scrape_bulk_response import ScrapeScrapeBulkResponse
23
+ from ..types.scrape_scrape_single_response import ScrapeScrapeSingleResponse
24
+
25
+ __all__ = ["ScrapeResource", "AsyncScrapeResource"]
26
+
27
+
28
+ class ScrapeResource(SyncAPIResource):
29
+ @cached_property
30
+ def with_raw_response(self) -> ScrapeResourceWithRawResponse:
31
+ """
32
+ This property can be used as a prefix for any HTTP method call to return
33
+ the raw response object instead of the parsed content.
34
+
35
+ For more information, see https://www.github.com/evrimai/cartography-client#accessing-raw-response-data-eg-headers
36
+ """
37
+ return ScrapeResourceWithRawResponse(self)
38
+
39
+ @cached_property
40
+ def with_streaming_response(self) -> ScrapeResourceWithStreamingResponse:
41
+ """
42
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
43
+
44
+ For more information, see https://www.github.com/evrimai/cartography-client#with_streaming_response
45
+ """
46
+ return ScrapeResourceWithStreamingResponse(self)
47
+
48
+ def scrape_bulk(
49
+ self,
50
+ *,
51
+ crawl_id: str,
52
+ engines: Iterable[ScrapeEngineParam],
53
+ s3_bucket: str,
54
+ urls: List[str],
55
+ batch_size: int | NotGiven = NOT_GIVEN,
56
+ debug: bool | NotGiven = NOT_GIVEN,
57
+ max_workers: int | NotGiven = NOT_GIVEN,
58
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
59
+ # The extra values given here take precedence over values defined on the client or passed to this method.
60
+ extra_headers: Headers | None = None,
61
+ extra_query: Query | None = None,
62
+ extra_body: Body | None = None,
63
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
64
+ ) -> ScrapeScrapeBulkResponse:
65
+ """
66
+ Bulk scrape multiple URLs with checkpointing to S3
67
+
68
+ Requires permission: scrape:write
69
+
70
+ Args:
71
+ crawl_id: Unique identifier for this crawl
72
+
73
+ engines: List of engines to use
74
+
75
+ s3_bucket: S3 bucket for checkpointing
76
+
77
+ urls: List of URLs to scrape
78
+
79
+ batch_size: URLs per batch
80
+
81
+ debug: Enable debug information
82
+
83
+ max_workers: Maximum concurrent workers
84
+
85
+ extra_headers: Send extra headers
86
+
87
+ extra_query: Add additional query parameters to the request
88
+
89
+ extra_body: Add additional JSON properties to the request
90
+
91
+ timeout: Override the client-level default timeout for this request, in seconds
92
+ """
93
+ return self._post(
94
+ "/scrape/bulk",
95
+ body=maybe_transform(
96
+ {
97
+ "crawl_id": crawl_id,
98
+ "engines": engines,
99
+ "s3_bucket": s3_bucket,
100
+ "urls": urls,
101
+ "batch_size": batch_size,
102
+ "debug": debug,
103
+ "max_workers": max_workers,
104
+ },
105
+ scrape_scrape_bulk_params.ScrapeScrapeBulkParams,
106
+ ),
107
+ options=make_request_options(
108
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
109
+ ),
110
+ cast_to=ScrapeScrapeBulkResponse,
111
+ )
112
+
113
+ def scrape_single(
114
+ self,
115
+ *,
116
+ engines: Iterable[ScrapeEngineParam],
117
+ url: str,
118
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
119
+ # The extra values given here take precedence over values defined on the client or passed to this method.
120
+ extra_headers: Headers | None = None,
121
+ extra_query: Query | None = None,
122
+ extra_body: Body | None = None,
123
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
124
+ ) -> ScrapeScrapeSingleResponse:
125
+ """
126
+ Scrape a single URL using the specified engines
127
+
128
+ Requires permission: scrape:read
129
+
130
+ Args:
131
+ engines: List of engines to use
132
+
133
+ extra_headers: Send extra headers
134
+
135
+ extra_query: Add additional query parameters to the request
136
+
137
+ extra_body: Add additional JSON properties to the request
138
+
139
+ timeout: Override the client-level default timeout for this request, in seconds
140
+ """
141
+ return self._post(
142
+ "/scrape/single",
143
+ body=maybe_transform(
144
+ {
145
+ "engines": engines,
146
+ "url": url,
147
+ },
148
+ scrape_scrape_single_params.ScrapeScrapeSingleParams,
149
+ ),
150
+ options=make_request_options(
151
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
152
+ ),
153
+ cast_to=ScrapeScrapeSingleResponse,
154
+ )
155
+
156
+
157
+ class AsyncScrapeResource(AsyncAPIResource):
158
+ @cached_property
159
+ def with_raw_response(self) -> AsyncScrapeResourceWithRawResponse:
160
+ """
161
+ This property can be used as a prefix for any HTTP method call to return
162
+ the raw response object instead of the parsed content.
163
+
164
+ For more information, see https://www.github.com/evrimai/cartography-client#accessing-raw-response-data-eg-headers
165
+ """
166
+ return AsyncScrapeResourceWithRawResponse(self)
167
+
168
+ @cached_property
169
+ def with_streaming_response(self) -> AsyncScrapeResourceWithStreamingResponse:
170
+ """
171
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
172
+
173
+ For more information, see https://www.github.com/evrimai/cartography-client#with_streaming_response
174
+ """
175
+ return AsyncScrapeResourceWithStreamingResponse(self)
176
+
177
+ async def scrape_bulk(
178
+ self,
179
+ *,
180
+ crawl_id: str,
181
+ engines: Iterable[ScrapeEngineParam],
182
+ s3_bucket: str,
183
+ urls: List[str],
184
+ batch_size: int | NotGiven = NOT_GIVEN,
185
+ debug: bool | NotGiven = NOT_GIVEN,
186
+ max_workers: int | NotGiven = NOT_GIVEN,
187
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
188
+ # The extra values given here take precedence over values defined on the client or passed to this method.
189
+ extra_headers: Headers | None = None,
190
+ extra_query: Query | None = None,
191
+ extra_body: Body | None = None,
192
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
193
+ ) -> ScrapeScrapeBulkResponse:
194
+ """
195
+ Bulk scrape multiple URLs with checkpointing to S3
196
+
197
+ Requires permission: scrape:write
198
+
199
+ Args:
200
+ crawl_id: Unique identifier for this crawl
201
+
202
+ engines: List of engines to use
203
+
204
+ s3_bucket: S3 bucket for checkpointing
205
+
206
+ urls: List of URLs to scrape
207
+
208
+ batch_size: URLs per batch
209
+
210
+ debug: Enable debug information
211
+
212
+ max_workers: Maximum concurrent workers
213
+
214
+ extra_headers: Send extra headers
215
+
216
+ extra_query: Add additional query parameters to the request
217
+
218
+ extra_body: Add additional JSON properties to the request
219
+
220
+ timeout: Override the client-level default timeout for this request, in seconds
221
+ """
222
+ return await self._post(
223
+ "/scrape/bulk",
224
+ body=await async_maybe_transform(
225
+ {
226
+ "crawl_id": crawl_id,
227
+ "engines": engines,
228
+ "s3_bucket": s3_bucket,
229
+ "urls": urls,
230
+ "batch_size": batch_size,
231
+ "debug": debug,
232
+ "max_workers": max_workers,
233
+ },
234
+ scrape_scrape_bulk_params.ScrapeScrapeBulkParams,
235
+ ),
236
+ options=make_request_options(
237
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
238
+ ),
239
+ cast_to=ScrapeScrapeBulkResponse,
240
+ )
241
+
242
+ async def scrape_single(
243
+ self,
244
+ *,
245
+ engines: Iterable[ScrapeEngineParam],
246
+ url: str,
247
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
248
+ # The extra values given here take precedence over values defined on the client or passed to this method.
249
+ extra_headers: Headers | None = None,
250
+ extra_query: Query | None = None,
251
+ extra_body: Body | None = None,
252
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
253
+ ) -> ScrapeScrapeSingleResponse:
254
+ """
255
+ Scrape a single URL using the specified engines
256
+
257
+ Requires permission: scrape:read
258
+
259
+ Args:
260
+ engines: List of engines to use
261
+
262
+ extra_headers: Send extra headers
263
+
264
+ extra_query: Add additional query parameters to the request
265
+
266
+ extra_body: Add additional JSON properties to the request
267
+
268
+ timeout: Override the client-level default timeout for this request, in seconds
269
+ """
270
+ return await self._post(
271
+ "/scrape/single",
272
+ body=await async_maybe_transform(
273
+ {
274
+ "engines": engines,
275
+ "url": url,
276
+ },
277
+ scrape_scrape_single_params.ScrapeScrapeSingleParams,
278
+ ),
279
+ options=make_request_options(
280
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
281
+ ),
282
+ cast_to=ScrapeScrapeSingleResponse,
283
+ )
284
+
285
+
286
+ class ScrapeResourceWithRawResponse:
287
+ def __init__(self, scrape: ScrapeResource) -> None:
288
+ self._scrape = scrape
289
+
290
+ self.scrape_bulk = to_raw_response_wrapper(
291
+ scrape.scrape_bulk,
292
+ )
293
+ self.scrape_single = to_raw_response_wrapper(
294
+ scrape.scrape_single,
295
+ )
296
+
297
+
298
+ class AsyncScrapeResourceWithRawResponse:
299
+ def __init__(self, scrape: AsyncScrapeResource) -> None:
300
+ self._scrape = scrape
301
+
302
+ self.scrape_bulk = async_to_raw_response_wrapper(
303
+ scrape.scrape_bulk,
304
+ )
305
+ self.scrape_single = async_to_raw_response_wrapper(
306
+ scrape.scrape_single,
307
+ )
308
+
309
+
310
+ class ScrapeResourceWithStreamingResponse:
311
+ def __init__(self, scrape: ScrapeResource) -> None:
312
+ self._scrape = scrape
313
+
314
+ self.scrape_bulk = to_streamed_response_wrapper(
315
+ scrape.scrape_bulk,
316
+ )
317
+ self.scrape_single = to_streamed_response_wrapper(
318
+ scrape.scrape_single,
319
+ )
320
+
321
+
322
+ class AsyncScrapeResourceWithStreamingResponse:
323
+ def __init__(self, scrape: AsyncScrapeResource) -> None:
324
+ self._scrape = scrape
325
+
326
+ self.scrape_bulk = async_to_streamed_response_wrapper(
327
+ scrape.scrape_bulk,
328
+ )
329
+ self.scrape_single = async_to_streamed_response_wrapper(
330
+ scrape.scrape_single,
331
+ )
@@ -0,0 +1,33 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .request import (
4
+ RequestResource,
5
+ AsyncRequestResource,
6
+ RequestResourceWithRawResponse,
7
+ AsyncRequestResourceWithRawResponse,
8
+ RequestResourceWithStreamingResponse,
9
+ AsyncRequestResourceWithStreamingResponse,
10
+ )
11
+ from .workflows import (
12
+ WorkflowsResource,
13
+ AsyncWorkflowsResource,
14
+ WorkflowsResourceWithRawResponse,
15
+ AsyncWorkflowsResourceWithRawResponse,
16
+ WorkflowsResourceWithStreamingResponse,
17
+ AsyncWorkflowsResourceWithStreamingResponse,
18
+ )
19
+
20
+ __all__ = [
21
+ "RequestResource",
22
+ "AsyncRequestResource",
23
+ "RequestResourceWithRawResponse",
24
+ "AsyncRequestResourceWithRawResponse",
25
+ "RequestResourceWithStreamingResponse",
26
+ "AsyncRequestResourceWithStreamingResponse",
27
+ "WorkflowsResource",
28
+ "AsyncWorkflowsResource",
29
+ "WorkflowsResourceWithRawResponse",
30
+ "AsyncWorkflowsResourceWithRawResponse",
31
+ "WorkflowsResourceWithStreamingResponse",
32
+ "AsyncWorkflowsResourceWithStreamingResponse",
33
+ ]
@@ -0,0 +1,33 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .crawl import (
4
+ CrawlResource,
5
+ AsyncCrawlResource,
6
+ CrawlResourceWithRawResponse,
7
+ AsyncCrawlResourceWithRawResponse,
8
+ CrawlResourceWithStreamingResponse,
9
+ AsyncCrawlResourceWithStreamingResponse,
10
+ )
11
+ from .request import (
12
+ RequestResource,
13
+ AsyncRequestResource,
14
+ RequestResourceWithRawResponse,
15
+ AsyncRequestResourceWithRawResponse,
16
+ RequestResourceWithStreamingResponse,
17
+ AsyncRequestResourceWithStreamingResponse,
18
+ )
19
+
20
+ __all__ = [
21
+ "CrawlResource",
22
+ "AsyncCrawlResource",
23
+ "CrawlResourceWithRawResponse",
24
+ "AsyncCrawlResourceWithRawResponse",
25
+ "CrawlResourceWithStreamingResponse",
26
+ "AsyncCrawlResourceWithStreamingResponse",
27
+ "RequestResource",
28
+ "AsyncRequestResource",
29
+ "RequestResourceWithRawResponse",
30
+ "AsyncRequestResourceWithRawResponse",
31
+ "RequestResourceWithStreamingResponse",
32
+ "AsyncRequestResourceWithStreamingResponse",
33
+ ]
@@ -0,0 +1,295 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, Iterable
6
+
7
+ import httpx
8
+
9
+ from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
10
+ from ...._utils import maybe_transform, async_maybe_transform
11
+ from ...._compat import cached_property
12
+ from ...._resource import SyncAPIResource, AsyncAPIResource
13
+ from ...._response import (
14
+ to_raw_response_wrapper,
15
+ to_streamed_response_wrapper,
16
+ async_to_raw_response_wrapper,
17
+ async_to_streamed_response_wrapper,
18
+ )
19
+ from ...._base_client import make_request_options
20
+ from ....types.workflows.request import crawl_create_params, crawl_create_bulk_params
21
+ from ....types.workflows.request.workflow_result import WorkflowResult
22
+ from ....types.workflows.request.crawl_request_param import CrawlRequestParam
23
+ from ....types.workflows.request.crawl_create_bulk_response import CrawlCreateBulkResponse
24
+
25
+ __all__ = ["CrawlResource", "AsyncCrawlResource"]
26
+
27
+
28
+ class CrawlResource(SyncAPIResource):
29
+ @cached_property
30
+ def with_raw_response(self) -> CrawlResourceWithRawResponse:
31
+ """
32
+ This property can be used as a prefix for any HTTP method call to return
33
+ the raw response object instead of the parsed content.
34
+
35
+ For more information, see https://www.github.com/evrimai/cartography-client#accessing-raw-response-data-eg-headers
36
+ """
37
+ return CrawlResourceWithRawResponse(self)
38
+
39
+ @cached_property
40
+ def with_streaming_response(self) -> CrawlResourceWithStreamingResponse:
41
+ """
42
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
43
+
44
+ For more information, see https://www.github.com/evrimai/cartography-client#with_streaming_response
45
+ """
46
+ return CrawlResourceWithStreamingResponse(self)
47
+
48
+ def create(
49
+ self,
50
+ *,
51
+ bucket_name: str,
52
+ crawl_id: str,
53
+ engines: Iterable[Dict[str, object]],
54
+ url: str,
55
+ absolute_only: bool | NotGiven = NOT_GIVEN,
56
+ batch_size: int | NotGiven = NOT_GIVEN,
57
+ depth: int | NotGiven = NOT_GIVEN,
58
+ keep_external: bool | NotGiven = NOT_GIVEN,
59
+ max_urls: int | NotGiven = NOT_GIVEN,
60
+ max_workers: int | NotGiven = NOT_GIVEN,
61
+ visit_external: bool | NotGiven = NOT_GIVEN,
62
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
63
+ # The extra values given here take precedence over values defined on the client or passed to this method.
64
+ extra_headers: Headers | None = None,
65
+ extra_query: Query | None = None,
66
+ extra_body: Body | None = None,
67
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
68
+ ) -> WorkflowResult:
69
+ """
70
+ Make a request to temporal worker :param request: crawl request data :param
71
+ token_data: api token :return: response
72
+
73
+ Args:
74
+ extra_headers: Send extra headers
75
+
76
+ extra_query: Add additional query parameters to the request
77
+
78
+ extra_body: Add additional JSON properties to the request
79
+
80
+ timeout: Override the client-level default timeout for this request, in seconds
81
+ """
82
+ return self._post(
83
+ "/workflows/request/crawl",
84
+ body=maybe_transform(
85
+ {
86
+ "bucket_name": bucket_name,
87
+ "crawl_id": crawl_id,
88
+ "engines": engines,
89
+ "url": url,
90
+ "absolute_only": absolute_only,
91
+ "batch_size": batch_size,
92
+ "depth": depth,
93
+ "keep_external": keep_external,
94
+ "max_urls": max_urls,
95
+ "max_workers": max_workers,
96
+ "visit_external": visit_external,
97
+ },
98
+ crawl_create_params.CrawlCreateParams,
99
+ ),
100
+ options=make_request_options(
101
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
102
+ ),
103
+ cast_to=WorkflowResult,
104
+ )
105
+
106
+ def create_bulk(
107
+ self,
108
+ *,
109
+ jobs: Iterable[CrawlRequestParam],
110
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
111
+ # The extra values given here take precedence over values defined on the client or passed to this method.
112
+ extra_headers: Headers | None = None,
113
+ extra_query: Query | None = None,
114
+ extra_body: Body | None = None,
115
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
116
+ ) -> CrawlCreateBulkResponse:
117
+ """
118
+ Make bulk requests to temporal worker based on the number of URLs
119
+
120
+ Args:
121
+ extra_headers: Send extra headers
122
+
123
+ extra_query: Add additional query parameters to the request
124
+
125
+ extra_body: Add additional JSON properties to the request
126
+
127
+ timeout: Override the client-level default timeout for this request, in seconds
128
+ """
129
+ return self._post(
130
+ "/workflows/request/crawl/bulk",
131
+ body=maybe_transform({"jobs": jobs}, crawl_create_bulk_params.CrawlCreateBulkParams),
132
+ options=make_request_options(
133
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
134
+ ),
135
+ cast_to=CrawlCreateBulkResponse,
136
+ )
137
+
138
+
139
+ class AsyncCrawlResource(AsyncAPIResource):
140
+ @cached_property
141
+ def with_raw_response(self) -> AsyncCrawlResourceWithRawResponse:
142
+ """
143
+ This property can be used as a prefix for any HTTP method call to return
144
+ the raw response object instead of the parsed content.
145
+
146
+ For more information, see https://www.github.com/evrimai/cartography-client#accessing-raw-response-data-eg-headers
147
+ """
148
+ return AsyncCrawlResourceWithRawResponse(self)
149
+
150
+ @cached_property
151
+ def with_streaming_response(self) -> AsyncCrawlResourceWithStreamingResponse:
152
+ """
153
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
154
+
155
+ For more information, see https://www.github.com/evrimai/cartography-client#with_streaming_response
156
+ """
157
+ return AsyncCrawlResourceWithStreamingResponse(self)
158
+
159
+ async def create(
160
+ self,
161
+ *,
162
+ bucket_name: str,
163
+ crawl_id: str,
164
+ engines: Iterable[Dict[str, object]],
165
+ url: str,
166
+ absolute_only: bool | NotGiven = NOT_GIVEN,
167
+ batch_size: int | NotGiven = NOT_GIVEN,
168
+ depth: int | NotGiven = NOT_GIVEN,
169
+ keep_external: bool | NotGiven = NOT_GIVEN,
170
+ max_urls: int | NotGiven = NOT_GIVEN,
171
+ max_workers: int | NotGiven = NOT_GIVEN,
172
+ visit_external: bool | NotGiven = NOT_GIVEN,
173
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
174
+ # The extra values given here take precedence over values defined on the client or passed to this method.
175
+ extra_headers: Headers | None = None,
176
+ extra_query: Query | None = None,
177
+ extra_body: Body | None = None,
178
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
179
+ ) -> WorkflowResult:
180
+ """
181
+ Make a request to temporal worker :param request: crawl request data :param
182
+ token_data: api token :return: response
183
+
184
+ Args:
185
+ extra_headers: Send extra headers
186
+
187
+ extra_query: Add additional query parameters to the request
188
+
189
+ extra_body: Add additional JSON properties to the request
190
+
191
+ timeout: Override the client-level default timeout for this request, in seconds
192
+ """
193
+ return await self._post(
194
+ "/workflows/request/crawl",
195
+ body=await async_maybe_transform(
196
+ {
197
+ "bucket_name": bucket_name,
198
+ "crawl_id": crawl_id,
199
+ "engines": engines,
200
+ "url": url,
201
+ "absolute_only": absolute_only,
202
+ "batch_size": batch_size,
203
+ "depth": depth,
204
+ "keep_external": keep_external,
205
+ "max_urls": max_urls,
206
+ "max_workers": max_workers,
207
+ "visit_external": visit_external,
208
+ },
209
+ crawl_create_params.CrawlCreateParams,
210
+ ),
211
+ options=make_request_options(
212
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
213
+ ),
214
+ cast_to=WorkflowResult,
215
+ )
216
+
217
+ async def create_bulk(
218
+ self,
219
+ *,
220
+ jobs: Iterable[CrawlRequestParam],
221
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
222
+ # The extra values given here take precedence over values defined on the client or passed to this method.
223
+ extra_headers: Headers | None = None,
224
+ extra_query: Query | None = None,
225
+ extra_body: Body | None = None,
226
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
227
+ ) -> CrawlCreateBulkResponse:
228
+ """
229
+ Make bulk requests to temporal worker based on the number of URLs
230
+
231
+ Args:
232
+ extra_headers: Send extra headers
233
+
234
+ extra_query: Add additional query parameters to the request
235
+
236
+ extra_body: Add additional JSON properties to the request
237
+
238
+ timeout: Override the client-level default timeout for this request, in seconds
239
+ """
240
+ return await self._post(
241
+ "/workflows/request/crawl/bulk",
242
+ body=await async_maybe_transform({"jobs": jobs}, crawl_create_bulk_params.CrawlCreateBulkParams),
243
+ options=make_request_options(
244
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
245
+ ),
246
+ cast_to=CrawlCreateBulkResponse,
247
+ )
248
+
249
+
250
+ class CrawlResourceWithRawResponse:
251
+ def __init__(self, crawl: CrawlResource) -> None:
252
+ self._crawl = crawl
253
+
254
+ self.create = to_raw_response_wrapper(
255
+ crawl.create,
256
+ )
257
+ self.create_bulk = to_raw_response_wrapper(
258
+ crawl.create_bulk,
259
+ )
260
+
261
+
262
+ class AsyncCrawlResourceWithRawResponse:
263
+ def __init__(self, crawl: AsyncCrawlResource) -> None:
264
+ self._crawl = crawl
265
+
266
+ self.create = async_to_raw_response_wrapper(
267
+ crawl.create,
268
+ )
269
+ self.create_bulk = async_to_raw_response_wrapper(
270
+ crawl.create_bulk,
271
+ )
272
+
273
+
274
+ class CrawlResourceWithStreamingResponse:
275
+ def __init__(self, crawl: CrawlResource) -> None:
276
+ self._crawl = crawl
277
+
278
+ self.create = to_streamed_response_wrapper(
279
+ crawl.create,
280
+ )
281
+ self.create_bulk = to_streamed_response_wrapper(
282
+ crawl.create_bulk,
283
+ )
284
+
285
+
286
+ class AsyncCrawlResourceWithStreamingResponse:
287
+ def __init__(self, crawl: AsyncCrawlResource) -> None:
288
+ self._crawl = crawl
289
+
290
+ self.create = async_to_streamed_response_wrapper(
291
+ crawl.create,
292
+ )
293
+ self.create_bulk = async_to_streamed_response_wrapper(
294
+ crawl.create_bulk,
295
+ )