lockss-pyclient 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. lockss/pyclient/__init__.py +67 -0
  2. lockss/pyclient/config/__init__.py +42 -0
  3. lockss/pyclient/config/api/__init__.py +12 -0
  4. lockss/pyclient/config/api/aus_api.py +2195 -0
  5. lockss/pyclient/config/api/config_api.py +718 -0
  6. lockss/pyclient/config/api/plugins_api.py +128 -0
  7. lockss/pyclient/config/api/status_api.py +120 -0
  8. lockss/pyclient/config/api/tdb_api.py +318 -0
  9. lockss/pyclient/config/api/users_api.py +516 -0
  10. lockss/pyclient/config/api/utils_api.py +128 -0
  11. lockss/pyclient/config/api_client.py +632 -0
  12. lockss/pyclient/config/configuration.py +254 -0
  13. lockss/pyclient/config/models/__init__.py +30 -0
  14. lockss/pyclient/config/models/api_status.py +344 -0
  15. lockss/pyclient/config/models/au_configuration.py +142 -0
  16. lockss/pyclient/config/models/au_status.py +113 -0
  17. lockss/pyclient/config/models/au_ws_result.py +113 -0
  18. lockss/pyclient/config/models/auids_body.py +168 -0
  19. lockss/pyclient/config/models/check_substance_result.py +212 -0
  20. lockss/pyclient/config/models/content_configuration_result.py +200 -0
  21. lockss/pyclient/config/models/file_section_name_body.py +113 -0
  22. lockss/pyclient/config/models/platform_configuration_ws_result.py +113 -0
  23. lockss/pyclient/config/models/plugin_ws_result.py +345 -0
  24. lockss/pyclient/config/models/request_au_control_result.py +171 -0
  25. lockss/pyclient/config/models/tdb_au_ws_result.py +360 -0
  26. lockss/pyclient/config/models/tdb_publisher_ws_result.py +113 -0
  27. lockss/pyclient/config/models/tdb_title_ws_result.py +390 -0
  28. lockss/pyclient/config/rest.py +317 -0
  29. lockss/pyclient/crawler/__init__.py +45 -0
  30. lockss/pyclient/crawler/api/__init__.py +10 -0
  31. lockss/pyclient/crawler/api/crawlers_api.py +215 -0
  32. lockss/pyclient/crawler/api/crawls_api.py +952 -0
  33. lockss/pyclient/crawler/api/jobs_api.py +504 -0
  34. lockss/pyclient/crawler/api/status_api.py +120 -0
  35. lockss/pyclient/crawler/api/ws_api.py +128 -0
  36. lockss/pyclient/crawler/api_client.py +632 -0
  37. lockss/pyclient/crawler/configuration.py +254 -0
  38. lockss/pyclient/crawler/models/__init__.py +35 -0
  39. lockss/pyclient/crawler/models/api_status.py +344 -0
  40. lockss/pyclient/crawler/models/counter.py +142 -0
  41. lockss/pyclient/crawler/models/crawl_desc.py +344 -0
  42. lockss/pyclient/crawler/models/crawl_job.py +280 -0
  43. lockss/pyclient/crawler/models/crawl_pager.py +140 -0
  44. lockss/pyclient/crawler/models/crawl_status.py +780 -0
  45. lockss/pyclient/crawler/models/crawl_ws_result.py +814 -0
  46. lockss/pyclient/crawler/models/crawl_ws_result_pages_with_errors.py +162 -0
  47. lockss/pyclient/crawler/models/crawler_config.py +142 -0
  48. lockss/pyclient/crawler/models/crawler_status.py +279 -0
  49. lockss/pyclient/crawler/models/crawler_statuses.py +112 -0
  50. lockss/pyclient/crawler/models/error_result.py +164 -0
  51. lockss/pyclient/crawler/models/job_pager.py +140 -0
  52. lockss/pyclient/crawler/models/job_status.py +147 -0
  53. lockss/pyclient/crawler/models/mime_counter.py +169 -0
  54. lockss/pyclient/crawler/models/page_info.py +228 -0
  55. lockss/pyclient/crawler/models/url_error.py +148 -0
  56. lockss/pyclient/crawler/models/url_info.py +167 -0
  57. lockss/pyclient/crawler/models/url_pager.py +140 -0
  58. lockss/pyclient/crawler/rest.py +317 -0
  59. lockss/pyclient/md/__init__.py +36 -0
  60. lockss/pyclient/md/api/__init__.py +9 -0
  61. lockss/pyclient/md/api/mdupdates_api.py +508 -0
  62. lockss/pyclient/md/api/metadata_api.py +136 -0
  63. lockss/pyclient/md/api/status_api.py +120 -0
  64. lockss/pyclient/md/api/urls_api.py +224 -0
  65. lockss/pyclient/md/api_client.py +632 -0
  66. lockss/pyclient/md/configuration.py +254 -0
  67. lockss/pyclient/md/models/__init__.py +27 -0
  68. lockss/pyclient/md/models/api_status.py +344 -0
  69. lockss/pyclient/md/models/au.py +169 -0
  70. lockss/pyclient/md/models/au_metadata_page_info.py +140 -0
  71. lockss/pyclient/md/models/error_result.py +164 -0
  72. lockss/pyclient/md/models/item_metadata.py +196 -0
  73. lockss/pyclient/md/models/job.py +280 -0
  74. lockss/pyclient/md/models/job_page_info.py +140 -0
  75. lockss/pyclient/md/models/metadata_update_spec.py +142 -0
  76. lockss/pyclient/md/models/page_info.py +228 -0
  77. lockss/pyclient/md/models/status.py +142 -0
  78. lockss/pyclient/md/models/url_info.py +142 -0
  79. lockss/pyclient/md/rest.py +317 -0
  80. lockss/pyclient/poller/__init__.py +54 -0
  81. lockss/pyclient/poller/api/__init__.py +13 -0
  82. lockss/pyclient/poller/api/export_api.py +156 -0
  83. lockss/pyclient/poller/api/hash_api.py +413 -0
  84. lockss/pyclient/poller/api/import_api.py +157 -0
  85. lockss/pyclient/poller/api/poll_detail_api.py +374 -0
  86. lockss/pyclient/poller/api/poller_polls_api.py +223 -0
  87. lockss/pyclient/poller/api/repo_api.py +223 -0
  88. lockss/pyclient/poller/api/service_api.py +694 -0
  89. lockss/pyclient/poller/api/voter_polls_api.py +223 -0
  90. lockss/pyclient/poller/api_client.py +632 -0
  91. lockss/pyclient/poller/configuration.py +254 -0
  92. lockss/pyclient/poller/models/__init__.py +41 -0
  93. lockss/pyclient/poller/models/api_status.py +344 -0
  94. lockss/pyclient/poller/models/aus_import_body.py +199 -0
  95. lockss/pyclient/poller/models/cached_uri_set_spec.py +169 -0
  96. lockss/pyclient/poller/models/error_result.py +164 -0
  97. lockss/pyclient/poller/models/hasher_ws_params.py +432 -0
  98. lockss/pyclient/poller/models/link_desc.py +141 -0
  99. lockss/pyclient/poller/models/page_desc.py +227 -0
  100. lockss/pyclient/poller/models/peer_data.py +638 -0
  101. lockss/pyclient/poller/models/peer_ws_result.py +113 -0
  102. lockss/pyclient/poller/models/poll_desc.py +285 -0
  103. lockss/pyclient/poller/models/poll_ws_result.py +142 -0
  104. lockss/pyclient/poller/models/poller_detail.py +613 -0
  105. lockss/pyclient/poller/models/poller_pager.py +139 -0
  106. lockss/pyclient/poller/models/poller_summary.py +452 -0
  107. lockss/pyclient/poller/models/repair_data.py +176 -0
  108. lockss/pyclient/poller/models/repair_pager.py +139 -0
  109. lockss/pyclient/poller/models/repair_queue.py +249 -0
  110. lockss/pyclient/poller/models/repository_space_ws_result.py +113 -0
  111. lockss/pyclient/poller/models/repository_ws_result.py +113 -0
  112. lockss/pyclient/poller/models/tally_data.py +471 -0
  113. lockss/pyclient/poller/models/url_pager.py +139 -0
  114. lockss/pyclient/poller/models/vote_ws_result.py +142 -0
  115. lockss/pyclient/poller/models/voter_detail.py +701 -0
  116. lockss/pyclient/poller/models/voter_pager.py +139 -0
  117. lockss/pyclient/poller/models/voter_summary.py +284 -0
  118. lockss/pyclient/poller/rest.py +317 -0
  119. lockss/pyclient/rs/__init__.py +41 -0
  120. lockss/pyclient/rs/api/__init__.py +10 -0
  121. lockss/pyclient/rs/api/artifacts_api.py +988 -0
  122. lockss/pyclient/rs/api/aus_api.py +334 -0
  123. lockss/pyclient/rs/api/repo_api.py +379 -0
  124. lockss/pyclient/rs/api/status_api.py +120 -0
  125. lockss/pyclient/rs/api/wayback_api.py +386 -0
  126. lockss/pyclient/rs/api_client.py +632 -0
  127. lockss/pyclient/rs/configuration.py +247 -0
  128. lockss/pyclient/rs/models/__init__.py +31 -0
  129. lockss/pyclient/rs/models/api_status.py +344 -0
  130. lockss/pyclient/rs/models/archives_body.py +142 -0
  131. lockss/pyclient/rs/models/artifact.py +344 -0
  132. lockss/pyclient/rs/models/artifact_page_info.py +140 -0
  133. lockss/pyclient/rs/models/artifact_properties.py +344 -0
  134. lockss/pyclient/rs/models/artifacts_body.py +170 -0
  135. lockss/pyclient/rs/models/au_size.py +162 -0
  136. lockss/pyclient/rs/models/auid_page_info.py +140 -0
  137. lockss/pyclient/rs/models/error_result.py +164 -0
  138. lockss/pyclient/rs/models/import_status.py +298 -0
  139. lockss/pyclient/rs/models/page_info.py +229 -0
  140. lockss/pyclient/rs/models/repository_info.py +164 -0
  141. lockss/pyclient/rs/models/repository_statistics.py +112 -0
  142. lockss/pyclient/rs/models/storage_info.py +287 -0
  143. lockss/pyclient/rs/models/streaming_response_body.py +84 -0
  144. lockss/pyclient/rs/rest.py +317 -0
  145. lockss_pyclient-0.1.0.dev1.dist-info/LICENSE +27 -0
  146. lockss_pyclient-0.1.0.dev1.dist-info/METADATA +29 -0
  147. lockss_pyclient-0.1.0.dev1.dist-info/RECORD +148 -0
  148. lockss_pyclient-0.1.0.dev1.dist-info/WHEEL +4 -0
@@ -0,0 +1,952 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ LOCKSS Crawler Service REST API
5
+
6
+ REST API of the LOCKSS Crawler Service # noqa: E501
7
+
8
+ OpenAPI spec version: 2.0.0
9
+ Contact: lockss-support@lockss.org
10
+ Generated by: https://github.com/swagger-api/swagger-codegen.git
11
+ """
12
+
13
+ from __future__ import absolute_import
14
+
15
+ import re # noqa: F401
16
+
17
+ # python 2 and python 3 compatibility library
18
+ import six
19
+
20
+ from lockss.pyclient.crawler.api_client import ApiClient
21
+
22
+
23
+ class CrawlsApi(object):
24
+ """NOTE: This class is auto generated by the swagger code generator program.
25
+
26
+ Do not edit the class manually.
27
+ Ref: https://github.com/swagger-api/swagger-codegen
28
+ """
29
+
30
+ def __init__(self, api_client=None):
31
+ if api_client is None:
32
+ api_client = ApiClient()
33
+ self.api_client = api_client
34
+
35
+ def get_crawl_by_id(self, job_id, **kwargs): # noqa: E501
36
+ """Get the crawl status of this job # noqa: E501
37
+
38
+ Get the job represented by this crawl id # noqa: E501
39
+ This method makes a synchronous HTTP request by default. To make an
40
+ asynchronous HTTP request, please pass async_req=True
41
+ >>> thread = api.get_crawl_by_id(job_id, async_req=True)
42
+ >>> result = thread.get()
43
+
44
+ :param async_req bool
45
+ :param str job_id: (required)
46
+ :return: CrawlStatus
47
+ If the method is called asynchronously,
48
+ returns the request thread.
49
+ """
50
+ kwargs['_return_http_data_only'] = True
51
+ if kwargs.get('async_req'):
52
+ return self.get_crawl_by_id_with_http_info(job_id, **kwargs) # noqa: E501
53
+ else:
54
+ (data) = self.get_crawl_by_id_with_http_info(job_id, **kwargs) # noqa: E501
55
+ return data
56
+
57
+ def get_crawl_by_id_with_http_info(self, job_id, **kwargs): # noqa: E501
58
+ """Get the crawl status of this job # noqa: E501
59
+
60
+ Get the job represented by this crawl id # noqa: E501
61
+ This method makes a synchronous HTTP request by default. To make an
62
+ asynchronous HTTP request, please pass async_req=True
63
+ >>> thread = api.get_crawl_by_id_with_http_info(job_id, async_req=True)
64
+ >>> result = thread.get()
65
+
66
+ :param async_req bool
67
+ :param str job_id: (required)
68
+ :return: CrawlStatus
69
+ If the method is called asynchronously,
70
+ returns the request thread.
71
+ """
72
+
73
+ all_params = ['job_id'] # noqa: E501
74
+ all_params.append('async_req')
75
+ all_params.append('_return_http_data_only')
76
+ all_params.append('_preload_content')
77
+ all_params.append('_request_timeout')
78
+
79
+ params = locals()
80
+ for key, val in six.iteritems(params['kwargs']):
81
+ if key not in all_params:
82
+ raise TypeError(
83
+ "Got an unexpected keyword argument '%s'"
84
+ " to method get_crawl_by_id" % key
85
+ )
86
+ params[key] = val
87
+ del params['kwargs']
88
+ # verify the required parameter 'job_id' is set
89
+ if ('job_id' not in params or
90
+ params['job_id'] is None):
91
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_by_id`") # noqa: E501
92
+
93
+ collection_formats = {}
94
+
95
+ path_params = {}
96
+ if 'job_id' in params:
97
+ path_params['jobId'] = params['job_id'] # noqa: E501
98
+
99
+ query_params = []
100
+
101
+ header_params = {}
102
+
103
+ form_params = []
104
+ local_var_files = {}
105
+
106
+ body_params = None
107
+ # HTTP header `Accept`
108
+ header_params['Accept'] = self.api_client.select_header_accept(
109
+ ['application/json']) # noqa: E501
110
+
111
+ # Authentication setting
112
+ auth_settings = ['basicAuth'] # noqa: E501
113
+
114
+ return self.api_client.call_api(
115
+ '/crawls/{jobId}', 'GET',
116
+ path_params,
117
+ query_params,
118
+ header_params,
119
+ body=body_params,
120
+ post_params=form_params,
121
+ files=local_var_files,
122
+ response_type='CrawlStatus', # noqa: E501
123
+ auth_settings=auth_settings,
124
+ async_req=params.get('async_req'),
125
+ _return_http_data_only=params.get('_return_http_data_only'),
126
+ _preload_content=params.get('_preload_content', True),
127
+ _request_timeout=params.get('_request_timeout'),
128
+ collection_formats=collection_formats)
129
+
130
+ def get_crawl_by_mime_type(self, job_id, type, **kwargs): # noqa: E501
131
+ """A pageable list of urls of mimetype. # noqa: E501
132
+
133
+ Get a list of urls of mimetype. # noqa: E501
134
+ This method makes a synchronous HTTP request by default. To make an
135
+ asynchronous HTTP request, please pass async_req=True
136
+ >>> thread = api.get_crawl_by_mime_type(job_id, type, async_req=True)
137
+ >>> result = thread.get()
138
+
139
+ :param async_req bool
140
+ :param str job_id: (required)
141
+ :param str type: (required)
142
+ :param int limit: The number of jobs per page.
143
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
144
+ :return: UrlPager
145
+ If the method is called asynchronously,
146
+ returns the request thread.
147
+ """
148
+ kwargs['_return_http_data_only'] = True
149
+ if kwargs.get('async_req'):
150
+ return self.get_crawl_by_mime_type_with_http_info(job_id, type, **kwargs) # noqa: E501
151
+ else:
152
+ (data) = self.get_crawl_by_mime_type_with_http_info(job_id, type, **kwargs) # noqa: E501
153
+ return data
154
+
155
+ def get_crawl_by_mime_type_with_http_info(self, job_id, type, **kwargs): # noqa: E501
156
+ """A pageable list of urls of mimetype. # noqa: E501
157
+
158
+ Get a list of urls of mimetype. # noqa: E501
159
+ This method makes a synchronous HTTP request by default. To make an
160
+ asynchronous HTTP request, please pass async_req=True
161
+ >>> thread = api.get_crawl_by_mime_type_with_http_info(job_id, type, async_req=True)
162
+ >>> result = thread.get()
163
+
164
+ :param async_req bool
165
+ :param str job_id: (required)
166
+ :param str type: (required)
167
+ :param int limit: The number of jobs per page.
168
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
169
+ :return: UrlPager
170
+ If the method is called asynchronously,
171
+ returns the request thread.
172
+ """
173
+
174
+ all_params = ['job_id', 'type', 'limit', 'continuation_token'] # noqa: E501
175
+ all_params.append('async_req')
176
+ all_params.append('_return_http_data_only')
177
+ all_params.append('_preload_content')
178
+ all_params.append('_request_timeout')
179
+
180
+ params = locals()
181
+ for key, val in six.iteritems(params['kwargs']):
182
+ if key not in all_params:
183
+ raise TypeError(
184
+ "Got an unexpected keyword argument '%s'"
185
+ " to method get_crawl_by_mime_type" % key
186
+ )
187
+ params[key] = val
188
+ del params['kwargs']
189
+ # verify the required parameter 'job_id' is set
190
+ if ('job_id' not in params or
191
+ params['job_id'] is None):
192
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_by_mime_type`") # noqa: E501
193
+ # verify the required parameter 'type' is set
194
+ if ('type' not in params or
195
+ params['type'] is None):
196
+ raise ValueError("Missing the required parameter `type` when calling `get_crawl_by_mime_type`") # noqa: E501
197
+
198
+ collection_formats = {}
199
+
200
+ path_params = {}
201
+ if 'job_id' in params:
202
+ path_params['jobId'] = params['job_id'] # noqa: E501
203
+ if 'type' in params:
204
+ path_params['type'] = params['type'] # noqa: E501
205
+
206
+ query_params = []
207
+ if 'limit' in params:
208
+ query_params.append(('limit', params['limit'])) # noqa: E501
209
+ if 'continuation_token' in params:
210
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
211
+
212
+ header_params = {}
213
+
214
+ form_params = []
215
+ local_var_files = {}
216
+
217
+ body_params = None
218
+ # HTTP header `Accept`
219
+ header_params['Accept'] = self.api_client.select_header_accept(
220
+ ['application/json']) # noqa: E501
221
+
222
+ # Authentication setting
223
+ auth_settings = ['basicAuth'] # noqa: E501
224
+
225
+ return self.api_client.call_api(
226
+ '/crawls/{jobId}/mimeType/{type}', 'GET',
227
+ path_params,
228
+ query_params,
229
+ header_params,
230
+ body=body_params,
231
+ post_params=form_params,
232
+ files=local_var_files,
233
+ response_type='UrlPager', # noqa: E501
234
+ auth_settings=auth_settings,
235
+ async_req=params.get('async_req'),
236
+ _return_http_data_only=params.get('_return_http_data_only'),
237
+ _preload_content=params.get('_preload_content', True),
238
+ _request_timeout=params.get('_request_timeout'),
239
+ collection_formats=collection_formats)
240
+
241
+ def get_crawl_errors(self, job_id, **kwargs): # noqa: E501
242
+ """A pageable list of urls with errors. # noqa: E501
243
+
244
+ Get a list of urls with errors. # noqa: E501
245
+ This method makes a synchronous HTTP request by default. To make an
246
+ asynchronous HTTP request, please pass async_req=True
247
+ >>> thread = api.get_crawl_errors(job_id, async_req=True)
248
+ >>> result = thread.get()
249
+
250
+ :param async_req bool
251
+ :param str job_id: (required)
252
+ :param int limit: The number of jobs per page.
253
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
254
+ :return: UrlPager
255
+ If the method is called asynchronously,
256
+ returns the request thread.
257
+ """
258
+ kwargs['_return_http_data_only'] = True
259
+ if kwargs.get('async_req'):
260
+ return self.get_crawl_errors_with_http_info(job_id, **kwargs) # noqa: E501
261
+ else:
262
+ (data) = self.get_crawl_errors_with_http_info(job_id, **kwargs) # noqa: E501
263
+ return data
264
+
265
+ def get_crawl_errors_with_http_info(self, job_id, **kwargs): # noqa: E501
266
+ """A pageable list of urls with errors. # noqa: E501
267
+
268
+ Get a list of urls with errors. # noqa: E501
269
+ This method makes a synchronous HTTP request by default. To make an
270
+ asynchronous HTTP request, please pass async_req=True
271
+ >>> thread = api.get_crawl_errors_with_http_info(job_id, async_req=True)
272
+ >>> result = thread.get()
273
+
274
+ :param async_req bool
275
+ :param str job_id: (required)
276
+ :param int limit: The number of jobs per page.
277
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
278
+ :return: UrlPager
279
+ If the method is called asynchronously,
280
+ returns the request thread.
281
+ """
282
+
283
+ all_params = ['job_id', 'limit', 'continuation_token'] # noqa: E501
284
+ all_params.append('async_req')
285
+ all_params.append('_return_http_data_only')
286
+ all_params.append('_preload_content')
287
+ all_params.append('_request_timeout')
288
+
289
+ params = locals()
290
+ for key, val in six.iteritems(params['kwargs']):
291
+ if key not in all_params:
292
+ raise TypeError(
293
+ "Got an unexpected keyword argument '%s'"
294
+ " to method get_crawl_errors" % key
295
+ )
296
+ params[key] = val
297
+ del params['kwargs']
298
+ # verify the required parameter 'job_id' is set
299
+ if ('job_id' not in params or
300
+ params['job_id'] is None):
301
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_errors`") # noqa: E501
302
+
303
+ collection_formats = {}
304
+
305
+ path_params = {}
306
+ if 'job_id' in params:
307
+ path_params['jobId'] = params['job_id'] # noqa: E501
308
+
309
+ query_params = []
310
+ if 'limit' in params:
311
+ query_params.append(('limit', params['limit'])) # noqa: E501
312
+ if 'continuation_token' in params:
313
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
314
+
315
+ header_params = {}
316
+
317
+ form_params = []
318
+ local_var_files = {}
319
+
320
+ body_params = None
321
+ # HTTP header `Accept`
322
+ header_params['Accept'] = self.api_client.select_header_accept(
323
+ ['application/json']) # noqa: E501
324
+
325
+ # Authentication setting
326
+ auth_settings = ['basicAuth'] # noqa: E501
327
+
328
+ return self.api_client.call_api(
329
+ '/crawls/{jobId}/errors', 'GET',
330
+ path_params,
331
+ query_params,
332
+ header_params,
333
+ body=body_params,
334
+ post_params=form_params,
335
+ files=local_var_files,
336
+ response_type='UrlPager', # noqa: E501
337
+ auth_settings=auth_settings,
338
+ async_req=params.get('async_req'),
339
+ _return_http_data_only=params.get('_return_http_data_only'),
340
+ _preload_content=params.get('_preload_content', True),
341
+ _request_timeout=params.get('_request_timeout'),
342
+ collection_formats=collection_formats)
343
+
344
+ def get_crawl_excluded(self, job_id, **kwargs): # noqa: E501
345
+ """A pageable list of excluded urls. # noqa: E501
346
+
347
+ Get a list of excluded urls. # noqa: E501
348
+ This method makes a synchronous HTTP request by default. To make an
349
+ asynchronous HTTP request, please pass async_req=True
350
+ >>> thread = api.get_crawl_excluded(job_id, async_req=True)
351
+ >>> result = thread.get()
352
+
353
+ :param async_req bool
354
+ :param str job_id: identifier used to identify a specific crawl. (required)
355
+ :param int limit: The number of jobs per page.
356
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
357
+ :return: UrlPager
358
+ If the method is called asynchronously,
359
+ returns the request thread.
360
+ """
361
+ kwargs['_return_http_data_only'] = True
362
+ if kwargs.get('async_req'):
363
+ return self.get_crawl_excluded_with_http_info(job_id, **kwargs) # noqa: E501
364
+ else:
365
+ (data) = self.get_crawl_excluded_with_http_info(job_id, **kwargs) # noqa: E501
366
+ return data
367
+
368
+ def get_crawl_excluded_with_http_info(self, job_id, **kwargs): # noqa: E501
369
+ """A pageable list of excluded urls. # noqa: E501
370
+
371
+ Get a list of excluded urls. # noqa: E501
372
+ This method makes a synchronous HTTP request by default. To make an
373
+ asynchronous HTTP request, please pass async_req=True
374
+ >>> thread = api.get_crawl_excluded_with_http_info(job_id, async_req=True)
375
+ >>> result = thread.get()
376
+
377
+ :param async_req bool
378
+ :param str job_id: identifier used to identify a specific crawl. (required)
379
+ :param int limit: The number of jobs per page.
380
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
381
+ :return: UrlPager
382
+ If the method is called asynchronously,
383
+ returns the request thread.
384
+ """
385
+
386
+ all_params = ['job_id', 'limit', 'continuation_token'] # noqa: E501
387
+ all_params.append('async_req')
388
+ all_params.append('_return_http_data_only')
389
+ all_params.append('_preload_content')
390
+ all_params.append('_request_timeout')
391
+
392
+ params = locals()
393
+ for key, val in six.iteritems(params['kwargs']):
394
+ if key not in all_params:
395
+ raise TypeError(
396
+ "Got an unexpected keyword argument '%s'"
397
+ " to method get_crawl_excluded" % key
398
+ )
399
+ params[key] = val
400
+ del params['kwargs']
401
+ # verify the required parameter 'job_id' is set
402
+ if ('job_id' not in params or
403
+ params['job_id'] is None):
404
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_excluded`") # noqa: E501
405
+
406
+ collection_formats = {}
407
+
408
+ path_params = {}
409
+ if 'job_id' in params:
410
+ path_params['jobId'] = params['job_id'] # noqa: E501
411
+
412
+ query_params = []
413
+ if 'limit' in params:
414
+ query_params.append(('limit', params['limit'])) # noqa: E501
415
+ if 'continuation_token' in params:
416
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
417
+
418
+ header_params = {}
419
+
420
+ form_params = []
421
+ local_var_files = {}
422
+
423
+ body_params = None
424
+ # HTTP header `Accept`
425
+ header_params['Accept'] = self.api_client.select_header_accept(
426
+ ['application/json']) # noqa: E501
427
+
428
+ # Authentication setting
429
+ auth_settings = ['basicAuth'] # noqa: E501
430
+
431
+ return self.api_client.call_api(
432
+ '/crawls/{jobId}/excluded', 'GET',
433
+ path_params,
434
+ query_params,
435
+ header_params,
436
+ body=body_params,
437
+ post_params=form_params,
438
+ files=local_var_files,
439
+ response_type='UrlPager', # noqa: E501
440
+ auth_settings=auth_settings,
441
+ async_req=params.get('async_req'),
442
+ _return_http_data_only=params.get('_return_http_data_only'),
443
+ _preload_content=params.get('_preload_content', True),
444
+ _request_timeout=params.get('_request_timeout'),
445
+ collection_formats=collection_formats)
446
+
447
+ def get_crawl_fetched(self, job_id, **kwargs): # noqa: E501
448
+ """A pageable list of fetched urls. # noqa: E501
449
+
450
+ Get a list of fetched urls. # noqa: E501
451
+ This method makes a synchronous HTTP request by default. To make an
452
+ asynchronous HTTP request, please pass async_req=True
453
+ >>> thread = api.get_crawl_fetched(job_id, async_req=True)
454
+ >>> result = thread.get()
455
+
456
+ :param async_req bool
457
+ :param str job_id: (required)
458
+ :param int limit: The number of jobs per page.
459
+ :param str continuation_token: The continuation token of the next page of jobs to be returned.
460
+ :return: UrlPager
461
+ If the method is called asynchronously,
462
+ returns the request thread.
463
+ """
464
+ kwargs['_return_http_data_only'] = True
465
+ if kwargs.get('async_req'):
466
+ return self.get_crawl_fetched_with_http_info(job_id, **kwargs) # noqa: E501
467
+ else:
468
+ (data) = self.get_crawl_fetched_with_http_info(job_id, **kwargs) # noqa: E501
469
+ return data
470
+
471
+ def get_crawl_fetched_with_http_info(self, job_id, **kwargs): # noqa: E501
472
+ """A pageable list of fetched urls. # noqa: E501
473
+
474
+ Get a list of fetched urls. # noqa: E501
475
+ This method makes a synchronous HTTP request by default. To make an
476
+ asynchronous HTTP request, please pass async_req=True
477
+ >>> thread = api.get_crawl_fetched_with_http_info(job_id, async_req=True)
478
+ >>> result = thread.get()
479
+
480
+ :param async_req bool
481
+ :param str job_id: (required)
482
+ :param int limit: The number of jobs per page.
483
+ :param str continuation_token: The continuation token of the next page of jobs to be returned.
484
+ :return: UrlPager
485
+ If the method is called asynchronously,
486
+ returns the request thread.
487
+ """
488
+
489
+ all_params = ['job_id', 'limit', 'continuation_token'] # noqa: E501
490
+ all_params.append('async_req')
491
+ all_params.append('_return_http_data_only')
492
+ all_params.append('_preload_content')
493
+ all_params.append('_request_timeout')
494
+
495
+ params = locals()
496
+ for key, val in six.iteritems(params['kwargs']):
497
+ if key not in all_params:
498
+ raise TypeError(
499
+ "Got an unexpected keyword argument '%s'"
500
+ " to method get_crawl_fetched" % key
501
+ )
502
+ params[key] = val
503
+ del params['kwargs']
504
+ # verify the required parameter 'job_id' is set
505
+ if ('job_id' not in params or
506
+ params['job_id'] is None):
507
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_fetched`") # noqa: E501
508
+
509
+ collection_formats = {}
510
+
511
+ path_params = {}
512
+ if 'job_id' in params:
513
+ path_params['jobId'] = params['job_id'] # noqa: E501
514
+
515
+ query_params = []
516
+ if 'limit' in params:
517
+ query_params.append(('limit', params['limit'])) # noqa: E501
518
+ if 'continuation_token' in params:
519
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
520
+
521
+ header_params = {}
522
+
523
+ form_params = []
524
+ local_var_files = {}
525
+
526
+ body_params = None
527
+ # HTTP header `Accept`
528
+ header_params['Accept'] = self.api_client.select_header_accept(
529
+ ['application/json']) # noqa: E501
530
+
531
+ # Authentication setting
532
+ auth_settings = ['basicAuth'] # noqa: E501
533
+
534
+ return self.api_client.call_api(
535
+ '/crawls/{jobId}/fetched', 'GET',
536
+ path_params,
537
+ query_params,
538
+ header_params,
539
+ body=body_params,
540
+ post_params=form_params,
541
+ files=local_var_files,
542
+ response_type='UrlPager', # noqa: E501
543
+ auth_settings=auth_settings,
544
+ async_req=params.get('async_req'),
545
+ _return_http_data_only=params.get('_return_http_data_only'),
546
+ _preload_content=params.get('_preload_content', True),
547
+ _request_timeout=params.get('_request_timeout'),
548
+ collection_formats=collection_formats)
549
+
550
+ def get_crawl_not_modified(self, job_id, **kwargs): # noqa: E501
551
+ """A pageable list of not modified urls. # noqa: E501
552
+
553
+ Get a list of not modified urls. # noqa: E501
554
+ This method makes a synchronous HTTP request by default. To make an
555
+ asynchronous HTTP request, please pass async_req=True
556
+ >>> thread = api.get_crawl_not_modified(job_id, async_req=True)
557
+ >>> result = thread.get()
558
+
559
+ :param async_req bool
560
+ :param str job_id: (required)
561
+ :param int limit: The number of jobs per page.
562
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
563
+ :return: UrlPager
564
+ If the method is called asynchronously,
565
+ returns the request thread.
566
+ """
567
+ kwargs['_return_http_data_only'] = True
568
+ if kwargs.get('async_req'):
569
+ return self.get_crawl_not_modified_with_http_info(job_id, **kwargs) # noqa: E501
570
+ else:
571
+ (data) = self.get_crawl_not_modified_with_http_info(job_id, **kwargs) # noqa: E501
572
+ return data
573
+
574
+ def get_crawl_not_modified_with_http_info(self, job_id, **kwargs): # noqa: E501
575
+ """A pageable list of not modified urls. # noqa: E501
576
+
577
+ Get a list of not modified urls. # noqa: E501
578
+ This method makes a synchronous HTTP request by default. To make an
579
+ asynchronous HTTP request, please pass async_req=True
580
+ >>> thread = api.get_crawl_not_modified_with_http_info(job_id, async_req=True)
581
+ >>> result = thread.get()
582
+
583
+ :param async_req bool
584
+ :param str job_id: (required)
585
+ :param int limit: The number of jobs per page.
586
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
587
+ :return: UrlPager
588
+ If the method is called asynchronously,
589
+ returns the request thread.
590
+ """
591
+
592
+ all_params = ['job_id', 'limit', 'continuation_token'] # noqa: E501
593
+ all_params.append('async_req')
594
+ all_params.append('_return_http_data_only')
595
+ all_params.append('_preload_content')
596
+ all_params.append('_request_timeout')
597
+
598
+ params = locals()
599
+ for key, val in six.iteritems(params['kwargs']):
600
+ if key not in all_params:
601
+ raise TypeError(
602
+ "Got an unexpected keyword argument '%s'"
603
+ " to method get_crawl_not_modified" % key
604
+ )
605
+ params[key] = val
606
+ del params['kwargs']
607
+ # verify the required parameter 'job_id' is set
608
+ if ('job_id' not in params or
609
+ params['job_id'] is None):
610
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_not_modified`") # noqa: E501
611
+
612
+ collection_formats = {}
613
+
614
+ path_params = {}
615
+ if 'job_id' in params:
616
+ path_params['jobId'] = params['job_id'] # noqa: E501
617
+
618
+ query_params = []
619
+ if 'limit' in params:
620
+ query_params.append(('limit', params['limit'])) # noqa: E501
621
+ if 'continuation_token' in params:
622
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
623
+
624
+ header_params = {}
625
+
626
+ form_params = []
627
+ local_var_files = {}
628
+
629
+ body_params = None
630
+ # HTTP header `Accept`
631
+ header_params['Accept'] = self.api_client.select_header_accept(
632
+ ['application/json']) # noqa: E501
633
+
634
+ # Authentication setting
635
+ auth_settings = ['basicAuth'] # noqa: E501
636
+
637
+ return self.api_client.call_api(
638
+ '/crawls/{jobId}/notModified', 'GET',
639
+ path_params,
640
+ query_params,
641
+ header_params,
642
+ body=body_params,
643
+ post_params=form_params,
644
+ files=local_var_files,
645
+ response_type='UrlPager', # noqa: E501
646
+ auth_settings=auth_settings,
647
+ async_req=params.get('async_req'),
648
+ _return_http_data_only=params.get('_return_http_data_only'),
649
+ _preload_content=params.get('_preload_content', True),
650
+ _request_timeout=params.get('_request_timeout'),
651
+ collection_formats=collection_formats)
652
+
653
+ def get_crawl_parsed(self, job_id, **kwargs): # noqa: E501
654
+ """A pageable list of parsed urls. # noqa: E501
655
+
656
+ Get a list of parsed urls. # noqa: E501
657
+ This method makes a synchronous HTTP request by default. To make an
658
+ asynchronous HTTP request, please pass async_req=True
659
+ >>> thread = api.get_crawl_parsed(job_id, async_req=True)
660
+ >>> result = thread.get()
661
+
662
+ :param async_req bool
663
+ :param str job_id: (required)
664
+ :param int limit: The number of jobs per page.
665
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
666
+ :return: UrlPager
667
+ If the method is called asynchronously,
668
+ returns the request thread.
669
+ """
670
+ kwargs['_return_http_data_only'] = True
671
+ if kwargs.get('async_req'):
672
+ return self.get_crawl_parsed_with_http_info(job_id, **kwargs) # noqa: E501
673
+ else:
674
+ (data) = self.get_crawl_parsed_with_http_info(job_id, **kwargs) # noqa: E501
675
+ return data
676
+
677
+ def get_crawl_parsed_with_http_info(self, job_id, **kwargs): # noqa: E501
678
+ """A pageable list of parsed urls. # noqa: E501
679
+
680
+ Get a list of parsed urls. # noqa: E501
681
+ This method makes a synchronous HTTP request by default. To make an
682
+ asynchronous HTTP request, please pass async_req=True
683
+ >>> thread = api.get_crawl_parsed_with_http_info(job_id, async_req=True)
684
+ >>> result = thread.get()
685
+
686
+ :param async_req bool
687
+ :param str job_id: (required)
688
+ :param int limit: The number of jobs per page.
689
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
690
+ :return: UrlPager
691
+ If the method is called asynchronously,
692
+ returns the request thread.
693
+ """
694
+
695
+ all_params = ['job_id', 'limit', 'continuation_token'] # noqa: E501
696
+ all_params.append('async_req')
697
+ all_params.append('_return_http_data_only')
698
+ all_params.append('_preload_content')
699
+ all_params.append('_request_timeout')
700
+
701
+ params = locals()
702
+ for key, val in six.iteritems(params['kwargs']):
703
+ if key not in all_params:
704
+ raise TypeError(
705
+ "Got an unexpected keyword argument '%s'"
706
+ " to method get_crawl_parsed" % key
707
+ )
708
+ params[key] = val
709
+ del params['kwargs']
710
+ # verify the required parameter 'job_id' is set
711
+ if ('job_id' not in params or
712
+ params['job_id'] is None):
713
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_parsed`") # noqa: E501
714
+
715
+ collection_formats = {}
716
+
717
+ path_params = {}
718
+ if 'job_id' in params:
719
+ path_params['jobId'] = params['job_id'] # noqa: E501
720
+
721
+ query_params = []
722
+ if 'limit' in params:
723
+ query_params.append(('limit', params['limit'])) # noqa: E501
724
+ if 'continuation_token' in params:
725
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
726
+
727
+ header_params = {}
728
+
729
+ form_params = []
730
+ local_var_files = {}
731
+
732
+ body_params = None
733
+ # HTTP header `Accept`
734
+ header_params['Accept'] = self.api_client.select_header_accept(
735
+ ['application/json']) # noqa: E501
736
+
737
+ # Authentication setting
738
+ auth_settings = ['basicAuth'] # noqa: E501
739
+
740
+ return self.api_client.call_api(
741
+ '/crawls/{jobId}/parsed', 'GET',
742
+ path_params,
743
+ query_params,
744
+ header_params,
745
+ body=body_params,
746
+ post_params=form_params,
747
+ files=local_var_files,
748
+ response_type='UrlPager', # noqa: E501
749
+ auth_settings=auth_settings,
750
+ async_req=params.get('async_req'),
751
+ _return_http_data_only=params.get('_return_http_data_only'),
752
+ _preload_content=params.get('_preload_content', True),
753
+ _request_timeout=params.get('_request_timeout'),
754
+ collection_formats=collection_formats)
755
+
756
+ def get_crawl_pending(self, job_id, **kwargs): # noqa: E501
757
+ """A pageable list of pending urls. # noqa: E501
758
+
759
+ Get a list of pending urls. # noqa: E501
760
+ This method makes a synchronous HTTP request by default. To make an
761
+ asynchronous HTTP request, please pass async_req=True
762
+ >>> thread = api.get_crawl_pending(job_id, async_req=True)
763
+ >>> result = thread.get()
764
+
765
+ :param async_req bool
766
+ :param str job_id: (required)
767
+ :param int limit: The number of jobs per page.
768
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
769
+ :return: UrlPager
770
+ If the method is called asynchronously,
771
+ returns the request thread.
772
+ """
773
+ kwargs['_return_http_data_only'] = True
774
+ if kwargs.get('async_req'):
775
+ return self.get_crawl_pending_with_http_info(job_id, **kwargs) # noqa: E501
776
+ else:
777
+ (data) = self.get_crawl_pending_with_http_info(job_id, **kwargs) # noqa: E501
778
+ return data
779
+
780
+ def get_crawl_pending_with_http_info(self, job_id, **kwargs): # noqa: E501
781
+ """A pageable list of pending urls. # noqa: E501
782
+
783
+ Get a list of pending urls. # noqa: E501
784
+ This method makes a synchronous HTTP request by default. To make an
785
+ asynchronous HTTP request, please pass async_req=True
786
+ >>> thread = api.get_crawl_pending_with_http_info(job_id, async_req=True)
787
+ >>> result = thread.get()
788
+
789
+ :param async_req bool
790
+ :param str job_id: (required)
791
+ :param int limit: The number of jobs per page.
792
+ :param str continuation_token: The continuation token of the next page of urls to be returned.
793
+ :return: UrlPager
794
+ If the method is called asynchronously,
795
+ returns the request thread.
796
+ """
797
+
798
+ all_params = ['job_id', 'limit', 'continuation_token'] # noqa: E501
799
+ all_params.append('async_req')
800
+ all_params.append('_return_http_data_only')
801
+ all_params.append('_preload_content')
802
+ all_params.append('_request_timeout')
803
+
804
+ params = locals()
805
+ for key, val in six.iteritems(params['kwargs']):
806
+ if key not in all_params:
807
+ raise TypeError(
808
+ "Got an unexpected keyword argument '%s'"
809
+ " to method get_crawl_pending" % key
810
+ )
811
+ params[key] = val
812
+ del params['kwargs']
813
+ # verify the required parameter 'job_id' is set
814
+ if ('job_id' not in params or
815
+ params['job_id'] is None):
816
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_pending`") # noqa: E501
817
+
818
+ collection_formats = {}
819
+
820
+ path_params = {}
821
+ if 'job_id' in params:
822
+ path_params['jobId'] = params['job_id'] # noqa: E501
823
+
824
+ query_params = []
825
+ if 'limit' in params:
826
+ query_params.append(('limit', params['limit'])) # noqa: E501
827
+ if 'continuation_token' in params:
828
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
829
+
830
+ header_params = {}
831
+
832
+ form_params = []
833
+ local_var_files = {}
834
+
835
+ body_params = None
836
+ # HTTP header `Accept`
837
+ header_params['Accept'] = self.api_client.select_header_accept(
838
+ ['application/json']) # noqa: E501
839
+
840
+ # Authentication setting
841
+ auth_settings = ['basicAuth'] # noqa: E501
842
+
843
+ return self.api_client.call_api(
844
+ '/crawls/{jobId}/pending', 'GET',
845
+ path_params,
846
+ query_params,
847
+ header_params,
848
+ body=body_params,
849
+ post_params=form_params,
850
+ files=local_var_files,
851
+ response_type='UrlPager', # noqa: E501
852
+ auth_settings=auth_settings,
853
+ async_req=params.get('async_req'),
854
+ _return_http_data_only=params.get('_return_http_data_only'),
855
+ _preload_content=params.get('_preload_content', True),
856
+ _request_timeout=params.get('_request_timeout'),
857
+ collection_formats=collection_formats)
858
+
859
+ def get_crawls(self, **kwargs): # noqa: E501
860
+ """Get the list of crawls. # noqa: E501
861
+
862
+ Get a list of crawls a pageful at a time as defined by limit. # noqa: E501
863
+ This method makes a synchronous HTTP request by default. To make an
864
+ asynchronous HTTP request, please pass async_req=True
865
+ >>> thread = api.get_crawls(async_req=True)
866
+ >>> result = thread.get()
867
+
868
+ :param async_req bool
869
+ :param int limit: The number of jobs per page
870
+ :param str continuation_token: The continuation token of the next page of crawl status data to be returned.
871
+ :return: CrawlPager
872
+ If the method is called asynchronously,
873
+ returns the request thread.
874
+ """
875
+ kwargs['_return_http_data_only'] = True
876
+ if kwargs.get('async_req'):
877
+ return self.get_crawls_with_http_info(**kwargs) # noqa: E501
878
+ else:
879
+ (data) = self.get_crawls_with_http_info(**kwargs) # noqa: E501
880
+ return data
881
+
882
+ def get_crawls_with_http_info(self, **kwargs): # noqa: E501
883
+ """Get the list of crawls. # noqa: E501
884
+
885
+ Get a list of crawls a pageful at a time as defined by limit. # noqa: E501
886
+ This method makes a synchronous HTTP request by default. To make an
887
+ asynchronous HTTP request, please pass async_req=True
888
+ >>> thread = api.get_crawls_with_http_info(async_req=True)
889
+ >>> result = thread.get()
890
+
891
+ :param async_req bool
892
+ :param int limit: The number of jobs per page
893
+ :param str continuation_token: The continuation token of the next page of crawl status data to be returned.
894
+ :return: CrawlPager
895
+ If the method is called asynchronously,
896
+ returns the request thread.
897
+ """
898
+
899
+ all_params = ['limit', 'continuation_token'] # noqa: E501
900
+ all_params.append('async_req')
901
+ all_params.append('_return_http_data_only')
902
+ all_params.append('_preload_content')
903
+ all_params.append('_request_timeout')
904
+
905
+ params = locals()
906
+ for key, val in six.iteritems(params['kwargs']):
907
+ if key not in all_params:
908
+ raise TypeError(
909
+ "Got an unexpected keyword argument '%s'"
910
+ " to method get_crawls" % key
911
+ )
912
+ params[key] = val
913
+ del params['kwargs']
914
+
915
+ collection_formats = {}
916
+
917
+ path_params = {}
918
+
919
+ query_params = []
920
+ if 'limit' in params:
921
+ query_params.append(('limit', params['limit'])) # noqa: E501
922
+ if 'continuation_token' in params:
923
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
924
+
925
+ header_params = {}
926
+
927
+ form_params = []
928
+ local_var_files = {}
929
+
930
+ body_params = None
931
+ # HTTP header `Accept`
932
+ header_params['Accept'] = self.api_client.select_header_accept(
933
+ ['application/json']) # noqa: E501
934
+
935
+ # Authentication setting
936
+ auth_settings = ['basicAuth'] # noqa: E501
937
+
938
+ return self.api_client.call_api(
939
+ '/crawls', 'GET',
940
+ path_params,
941
+ query_params,
942
+ header_params,
943
+ body=body_params,
944
+ post_params=form_params,
945
+ files=local_var_files,
946
+ response_type='CrawlPager', # noqa: E501
947
+ auth_settings=auth_settings,
948
+ async_req=params.get('async_req'),
949
+ _return_http_data_only=params.get('_return_http_data_only'),
950
+ _preload_content=params.get('_preload_content', True),
951
+ _request_timeout=params.get('_request_timeout'),
952
+ collection_formats=collection_formats)