lockss-pyclient 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. lockss/pyclient/__init__.py +67 -0
  2. lockss/pyclient/config/__init__.py +42 -0
  3. lockss/pyclient/config/api/__init__.py +12 -0
  4. lockss/pyclient/config/api/aus_api.py +2195 -0
  5. lockss/pyclient/config/api/config_api.py +718 -0
  6. lockss/pyclient/config/api/plugins_api.py +128 -0
  7. lockss/pyclient/config/api/status_api.py +120 -0
  8. lockss/pyclient/config/api/tdb_api.py +318 -0
  9. lockss/pyclient/config/api/users_api.py +516 -0
  10. lockss/pyclient/config/api/utils_api.py +128 -0
  11. lockss/pyclient/config/api_client.py +632 -0
  12. lockss/pyclient/config/configuration.py +254 -0
  13. lockss/pyclient/config/models/__init__.py +30 -0
  14. lockss/pyclient/config/models/api_status.py +344 -0
  15. lockss/pyclient/config/models/au_configuration.py +142 -0
  16. lockss/pyclient/config/models/au_status.py +113 -0
  17. lockss/pyclient/config/models/au_ws_result.py +113 -0
  18. lockss/pyclient/config/models/auids_body.py +168 -0
  19. lockss/pyclient/config/models/check_substance_result.py +212 -0
  20. lockss/pyclient/config/models/content_configuration_result.py +200 -0
  21. lockss/pyclient/config/models/file_section_name_body.py +113 -0
  22. lockss/pyclient/config/models/platform_configuration_ws_result.py +113 -0
  23. lockss/pyclient/config/models/plugin_ws_result.py +345 -0
  24. lockss/pyclient/config/models/request_au_control_result.py +171 -0
  25. lockss/pyclient/config/models/tdb_au_ws_result.py +360 -0
  26. lockss/pyclient/config/models/tdb_publisher_ws_result.py +113 -0
  27. lockss/pyclient/config/models/tdb_title_ws_result.py +390 -0
  28. lockss/pyclient/config/rest.py +317 -0
  29. lockss/pyclient/crawler/__init__.py +45 -0
  30. lockss/pyclient/crawler/api/__init__.py +10 -0
  31. lockss/pyclient/crawler/api/crawlers_api.py +215 -0
  32. lockss/pyclient/crawler/api/crawls_api.py +952 -0
  33. lockss/pyclient/crawler/api/jobs_api.py +504 -0
  34. lockss/pyclient/crawler/api/status_api.py +120 -0
  35. lockss/pyclient/crawler/api/ws_api.py +128 -0
  36. lockss/pyclient/crawler/api_client.py +632 -0
  37. lockss/pyclient/crawler/configuration.py +254 -0
  38. lockss/pyclient/crawler/models/__init__.py +35 -0
  39. lockss/pyclient/crawler/models/api_status.py +344 -0
  40. lockss/pyclient/crawler/models/counter.py +142 -0
  41. lockss/pyclient/crawler/models/crawl_desc.py +344 -0
  42. lockss/pyclient/crawler/models/crawl_job.py +280 -0
  43. lockss/pyclient/crawler/models/crawl_pager.py +140 -0
  44. lockss/pyclient/crawler/models/crawl_status.py +780 -0
  45. lockss/pyclient/crawler/models/crawl_ws_result.py +814 -0
  46. lockss/pyclient/crawler/models/crawl_ws_result_pages_with_errors.py +162 -0
  47. lockss/pyclient/crawler/models/crawler_config.py +142 -0
  48. lockss/pyclient/crawler/models/crawler_status.py +279 -0
  49. lockss/pyclient/crawler/models/crawler_statuses.py +112 -0
  50. lockss/pyclient/crawler/models/error_result.py +164 -0
  51. lockss/pyclient/crawler/models/job_pager.py +140 -0
  52. lockss/pyclient/crawler/models/job_status.py +147 -0
  53. lockss/pyclient/crawler/models/mime_counter.py +169 -0
  54. lockss/pyclient/crawler/models/page_info.py +228 -0
  55. lockss/pyclient/crawler/models/url_error.py +148 -0
  56. lockss/pyclient/crawler/models/url_info.py +167 -0
  57. lockss/pyclient/crawler/models/url_pager.py +140 -0
  58. lockss/pyclient/crawler/rest.py +317 -0
  59. lockss/pyclient/md/__init__.py +36 -0
  60. lockss/pyclient/md/api/__init__.py +9 -0
  61. lockss/pyclient/md/api/mdupdates_api.py +508 -0
  62. lockss/pyclient/md/api/metadata_api.py +136 -0
  63. lockss/pyclient/md/api/status_api.py +120 -0
  64. lockss/pyclient/md/api/urls_api.py +224 -0
  65. lockss/pyclient/md/api_client.py +632 -0
  66. lockss/pyclient/md/configuration.py +254 -0
  67. lockss/pyclient/md/models/__init__.py +27 -0
  68. lockss/pyclient/md/models/api_status.py +344 -0
  69. lockss/pyclient/md/models/au.py +169 -0
  70. lockss/pyclient/md/models/au_metadata_page_info.py +140 -0
  71. lockss/pyclient/md/models/error_result.py +164 -0
  72. lockss/pyclient/md/models/item_metadata.py +196 -0
  73. lockss/pyclient/md/models/job.py +280 -0
  74. lockss/pyclient/md/models/job_page_info.py +140 -0
  75. lockss/pyclient/md/models/metadata_update_spec.py +142 -0
  76. lockss/pyclient/md/models/page_info.py +228 -0
  77. lockss/pyclient/md/models/status.py +142 -0
  78. lockss/pyclient/md/models/url_info.py +142 -0
  79. lockss/pyclient/md/rest.py +317 -0
  80. lockss/pyclient/poller/__init__.py +54 -0
  81. lockss/pyclient/poller/api/__init__.py +13 -0
  82. lockss/pyclient/poller/api/export_api.py +156 -0
  83. lockss/pyclient/poller/api/hash_api.py +413 -0
  84. lockss/pyclient/poller/api/import_api.py +157 -0
  85. lockss/pyclient/poller/api/poll_detail_api.py +374 -0
  86. lockss/pyclient/poller/api/poller_polls_api.py +223 -0
  87. lockss/pyclient/poller/api/repo_api.py +223 -0
  88. lockss/pyclient/poller/api/service_api.py +694 -0
  89. lockss/pyclient/poller/api/voter_polls_api.py +223 -0
  90. lockss/pyclient/poller/api_client.py +632 -0
  91. lockss/pyclient/poller/configuration.py +254 -0
  92. lockss/pyclient/poller/models/__init__.py +41 -0
  93. lockss/pyclient/poller/models/api_status.py +344 -0
  94. lockss/pyclient/poller/models/aus_import_body.py +199 -0
  95. lockss/pyclient/poller/models/cached_uri_set_spec.py +169 -0
  96. lockss/pyclient/poller/models/error_result.py +164 -0
  97. lockss/pyclient/poller/models/hasher_ws_params.py +432 -0
  98. lockss/pyclient/poller/models/link_desc.py +141 -0
  99. lockss/pyclient/poller/models/page_desc.py +227 -0
  100. lockss/pyclient/poller/models/peer_data.py +638 -0
  101. lockss/pyclient/poller/models/peer_ws_result.py +113 -0
  102. lockss/pyclient/poller/models/poll_desc.py +285 -0
  103. lockss/pyclient/poller/models/poll_ws_result.py +142 -0
  104. lockss/pyclient/poller/models/poller_detail.py +613 -0
  105. lockss/pyclient/poller/models/poller_pager.py +139 -0
  106. lockss/pyclient/poller/models/poller_summary.py +452 -0
  107. lockss/pyclient/poller/models/repair_data.py +176 -0
  108. lockss/pyclient/poller/models/repair_pager.py +139 -0
  109. lockss/pyclient/poller/models/repair_queue.py +249 -0
  110. lockss/pyclient/poller/models/repository_space_ws_result.py +113 -0
  111. lockss/pyclient/poller/models/repository_ws_result.py +113 -0
  112. lockss/pyclient/poller/models/tally_data.py +471 -0
  113. lockss/pyclient/poller/models/url_pager.py +139 -0
  114. lockss/pyclient/poller/models/vote_ws_result.py +142 -0
  115. lockss/pyclient/poller/models/voter_detail.py +701 -0
  116. lockss/pyclient/poller/models/voter_pager.py +139 -0
  117. lockss/pyclient/poller/models/voter_summary.py +284 -0
  118. lockss/pyclient/poller/rest.py +317 -0
  119. lockss/pyclient/rs/__init__.py +41 -0
  120. lockss/pyclient/rs/api/__init__.py +10 -0
  121. lockss/pyclient/rs/api/artifacts_api.py +988 -0
  122. lockss/pyclient/rs/api/aus_api.py +334 -0
  123. lockss/pyclient/rs/api/repo_api.py +379 -0
  124. lockss/pyclient/rs/api/status_api.py +120 -0
  125. lockss/pyclient/rs/api/wayback_api.py +386 -0
  126. lockss/pyclient/rs/api_client.py +632 -0
  127. lockss/pyclient/rs/configuration.py +247 -0
  128. lockss/pyclient/rs/models/__init__.py +31 -0
  129. lockss/pyclient/rs/models/api_status.py +344 -0
  130. lockss/pyclient/rs/models/archives_body.py +142 -0
  131. lockss/pyclient/rs/models/artifact.py +344 -0
  132. lockss/pyclient/rs/models/artifact_page_info.py +140 -0
  133. lockss/pyclient/rs/models/artifact_properties.py +344 -0
  134. lockss/pyclient/rs/models/artifacts_body.py +170 -0
  135. lockss/pyclient/rs/models/au_size.py +162 -0
  136. lockss/pyclient/rs/models/auid_page_info.py +140 -0
  137. lockss/pyclient/rs/models/error_result.py +164 -0
  138. lockss/pyclient/rs/models/import_status.py +298 -0
  139. lockss/pyclient/rs/models/page_info.py +229 -0
  140. lockss/pyclient/rs/models/repository_info.py +164 -0
  141. lockss/pyclient/rs/models/repository_statistics.py +112 -0
  142. lockss/pyclient/rs/models/storage_info.py +287 -0
  143. lockss/pyclient/rs/models/streaming_response_body.py +84 -0
  144. lockss/pyclient/rs/rest.py +317 -0
  145. lockss_pyclient-0.1.0.dev1.dist-info/LICENSE +27 -0
  146. lockss_pyclient-0.1.0.dev1.dist-info/METADATA +29 -0
  147. lockss_pyclient-0.1.0.dev1.dist-info/RECORD +148 -0
  148. lockss_pyclient-0.1.0.dev1.dist-info/WHEEL +4 -0
@@ -0,0 +1,504 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ LOCKSS Crawler Service REST API
5
+
6
+ REST API of the LOCKSS Crawler Service # noqa: E501
7
+
8
+ OpenAPI spec version: 2.0.0
9
+ Contact: lockss-support@lockss.org
10
+ Generated by: https://github.com/swagger-api/swagger-codegen.git
11
+ """
12
+
13
+ from __future__ import absolute_import
14
+
15
+ import re # noqa: F401
16
+
17
+ # python 2 and python 3 compatibility library
18
+ import six
19
+
20
+ from lockss.pyclient.crawler.api_client import ApiClient
21
+
22
+
23
+ class JobsApi(object):
24
+ """NOTE: This class is auto generated by the swagger code generator program.
25
+
26
+ Do not edit the class manually.
27
+ Ref: https://github.com/swagger-api/swagger-codegen
28
+ """
29
+
30
+ def __init__(self, api_client=None):
31
+ if api_client is None:
32
+ api_client = ApiClient()
33
+ self.api_client = api_client
34
+
35
+ def delete_crawl_job(self, job_id, **kwargs): # noqa: E501
36
+ """Remove or stop a crawl job. # noqa: E501
37
+
38
+ Delete a crawl job with the given job id, stopping any current processing, if necessary. # noqa: E501
39
+ This method makes a synchronous HTTP request by default. To make an
40
+ asynchronous HTTP request, please pass async_req=True
41
+ >>> thread = api.delete_crawl_job(job_id, async_req=True)
42
+ >>> result = thread.get()
43
+
44
+ :param async_req bool
45
+ :param str job_id: The identifier used to identify a specific crawl job. (required)
46
+ :return: CrawlJob
47
+ If the method is called asynchronously,
48
+ returns the request thread.
49
+ """
50
+ kwargs['_return_http_data_only'] = True
51
+ if kwargs.get('async_req'):
52
+ return self.delete_crawl_job_with_http_info(job_id, **kwargs) # noqa: E501
53
+ else:
54
+ (data) = self.delete_crawl_job_with_http_info(job_id, **kwargs) # noqa: E501
55
+ return data
56
+
57
+ def delete_crawl_job_with_http_info(self, job_id, **kwargs): # noqa: E501
58
+ """Remove or stop a crawl job. # noqa: E501
59
+
60
+ Delete a crawl job with the given job id, stopping any current processing, if necessary. # noqa: E501
61
+ This method makes a synchronous HTTP request by default. To make an
62
+ asynchronous HTTP request, please pass async_req=True
63
+ >>> thread = api.delete_crawl_job_with_http_info(job_id, async_req=True)
64
+ >>> result = thread.get()
65
+
66
+ :param async_req bool
67
+ :param str job_id: The identifier used to identify a specific crawl job. (required)
68
+ :return: CrawlJob
69
+ If the method is called asynchronously,
70
+ returns the request thread.
71
+ """
72
+
73
+ all_params = ['job_id'] # noqa: E501
74
+ all_params.append('async_req')
75
+ all_params.append('_return_http_data_only')
76
+ all_params.append('_preload_content')
77
+ all_params.append('_request_timeout')
78
+
79
+ params = locals()
80
+ for key, val in six.iteritems(params['kwargs']):
81
+ if key not in all_params:
82
+ raise TypeError(
83
+ "Got an unexpected keyword argument '%s'"
84
+ " to method delete_crawl_job" % key
85
+ )
86
+ params[key] = val
87
+ del params['kwargs']
88
+ # verify the required parameter 'job_id' is set
89
+ if ('job_id' not in params or
90
+ params['job_id'] is None):
91
+ raise ValueError("Missing the required parameter `job_id` when calling `delete_crawl_job`") # noqa: E501
92
+
93
+ collection_formats = {}
94
+
95
+ path_params = {}
96
+ if 'job_id' in params:
97
+ path_params['jobId'] = params['job_id'] # noqa: E501
98
+
99
+ query_params = []
100
+
101
+ header_params = {}
102
+
103
+ form_params = []
104
+ local_var_files = {}
105
+
106
+ body_params = None
107
+ # HTTP header `Accept`
108
+ header_params['Accept'] = self.api_client.select_header_accept(
109
+ ['application/json']) # noqa: E501
110
+
111
+ # Authentication setting
112
+ auth_settings = ['basicAuth'] # noqa: E501
113
+
114
+ return self.api_client.call_api(
115
+ '/jobs/{jobId}', 'DELETE',
116
+ path_params,
117
+ query_params,
118
+ header_params,
119
+ body=body_params,
120
+ post_params=form_params,
121
+ files=local_var_files,
122
+ response_type='CrawlJob', # noqa: E501
123
+ auth_settings=auth_settings,
124
+ async_req=params.get('async_req'),
125
+ _return_http_data_only=params.get('_return_http_data_only'),
126
+ _preload_content=params.get('_preload_content', True),
127
+ _request_timeout=params.get('_request_timeout'),
128
+ collection_formats=collection_formats)
129
+
130
+ def delete_jobs(self, **kwargs): # noqa: E501
131
+ """Delete all of the currently queued and active jobs # noqa: E501
132
+
133
+ Halt and delete all of the currently queued and active crawl jobs # noqa: E501
134
+ This method makes a synchronous HTTP request by default. To make an
135
+ asynchronous HTTP request, please pass async_req=True
136
+ >>> thread = api.delete_jobs(async_req=True)
137
+ >>> result = thread.get()
138
+
139
+ :param async_req bool
140
+ :return: None
141
+ If the method is called asynchronously,
142
+ returns the request thread.
143
+ """
144
+ kwargs['_return_http_data_only'] = True
145
+ if kwargs.get('async_req'):
146
+ return self.delete_jobs_with_http_info(**kwargs) # noqa: E501
147
+ else:
148
+ (data) = self.delete_jobs_with_http_info(**kwargs) # noqa: E501
149
+ return data
150
+
151
+ def delete_jobs_with_http_info(self, **kwargs): # noqa: E501
152
+ """Delete all of the currently queued and active jobs # noqa: E501
153
+
154
+ Halt and delete all of the currently queued and active crawl jobs # noqa: E501
155
+ This method makes a synchronous HTTP request by default. To make an
156
+ asynchronous HTTP request, please pass async_req=True
157
+ >>> thread = api.delete_jobs_with_http_info(async_req=True)
158
+ >>> result = thread.get()
159
+
160
+ :param async_req bool
161
+ :return: None
162
+ If the method is called asynchronously,
163
+ returns the request thread.
164
+ """
165
+
166
+ all_params = [] # noqa: E501
167
+ all_params.append('async_req')
168
+ all_params.append('_return_http_data_only')
169
+ all_params.append('_preload_content')
170
+ all_params.append('_request_timeout')
171
+
172
+ params = locals()
173
+ for key, val in six.iteritems(params['kwargs']):
174
+ if key not in all_params:
175
+ raise TypeError(
176
+ "Got an unexpected keyword argument '%s'"
177
+ " to method delete_jobs" % key
178
+ )
179
+ params[key] = val
180
+ del params['kwargs']
181
+
182
+ collection_formats = {}
183
+
184
+ path_params = {}
185
+
186
+ query_params = []
187
+
188
+ header_params = {}
189
+
190
+ form_params = []
191
+ local_var_files = {}
192
+
193
+ body_params = None
194
+ # HTTP header `Accept`
195
+ header_params['Accept'] = self.api_client.select_header_accept(
196
+ ['application/json']) # noqa: E501
197
+
198
+ # Authentication setting
199
+ auth_settings = ['basicAuth'] # noqa: E501
200
+
201
+ return self.api_client.call_api(
202
+ '/jobs', 'DELETE',
203
+ path_params,
204
+ query_params,
205
+ header_params,
206
+ body=body_params,
207
+ post_params=form_params,
208
+ files=local_var_files,
209
+ response_type=None, # noqa: E501
210
+ auth_settings=auth_settings,
211
+ async_req=params.get('async_req'),
212
+ _return_http_data_only=params.get('_return_http_data_only'),
213
+ _preload_content=params.get('_preload_content', True),
214
+ _request_timeout=params.get('_request_timeout'),
215
+ collection_formats=collection_formats)
216
+
217
+ def get_crawl_job(self, job_id, **kwargs): # noqa: E501
218
+ """Get the crawl status of this job # noqa: E501
219
+
220
+ Get the crawl job with a given crawl id # noqa: E501
221
+ This method makes a synchronous HTTP request by default. To make an
222
+ asynchronous HTTP request, please pass async_req=True
223
+ >>> thread = api.get_crawl_job(job_id, async_req=True)
224
+ >>> result = thread.get()
225
+
226
+ :param async_req bool
227
+ :param str job_id: (required)
228
+ :return: CrawlJob
229
+ If the method is called asynchronously,
230
+ returns the request thread.
231
+ """
232
+ kwargs['_return_http_data_only'] = True
233
+ if kwargs.get('async_req'):
234
+ return self.get_crawl_job_with_http_info(job_id, **kwargs) # noqa: E501
235
+ else:
236
+ (data) = self.get_crawl_job_with_http_info(job_id, **kwargs) # noqa: E501
237
+ return data
238
+
239
+ def get_crawl_job_with_http_info(self, job_id, **kwargs): # noqa: E501
240
+ """Get the crawl status of this job # noqa: E501
241
+
242
+ Get the crawl job with a given crawl id # noqa: E501
243
+ This method makes a synchronous HTTP request by default. To make an
244
+ asynchronous HTTP request, please pass async_req=True
245
+ >>> thread = api.get_crawl_job_with_http_info(job_id, async_req=True)
246
+ >>> result = thread.get()
247
+
248
+ :param async_req bool
249
+ :param str job_id: (required)
250
+ :return: CrawlJob
251
+ If the method is called asynchronously,
252
+ returns the request thread.
253
+ """
254
+
255
+ all_params = ['job_id'] # noqa: E501
256
+ all_params.append('async_req')
257
+ all_params.append('_return_http_data_only')
258
+ all_params.append('_preload_content')
259
+ all_params.append('_request_timeout')
260
+
261
+ params = locals()
262
+ for key, val in six.iteritems(params['kwargs']):
263
+ if key not in all_params:
264
+ raise TypeError(
265
+ "Got an unexpected keyword argument '%s'"
266
+ " to method get_crawl_job" % key
267
+ )
268
+ params[key] = val
269
+ del params['kwargs']
270
+ # verify the required parameter 'job_id' is set
271
+ if ('job_id' not in params or
272
+ params['job_id'] is None):
273
+ raise ValueError("Missing the required parameter `job_id` when calling `get_crawl_job`") # noqa: E501
274
+
275
+ collection_formats = {}
276
+
277
+ path_params = {}
278
+ if 'job_id' in params:
279
+ path_params['jobId'] = params['job_id'] # noqa: E501
280
+
281
+ query_params = []
282
+
283
+ header_params = {}
284
+
285
+ form_params = []
286
+ local_var_files = {}
287
+
288
+ body_params = None
289
+ # HTTP header `Accept`
290
+ header_params['Accept'] = self.api_client.select_header_accept(
291
+ ['application/json']) # noqa: E501
292
+
293
+ # Authentication setting
294
+ auth_settings = ['basicAuth'] # noqa: E501
295
+
296
+ return self.api_client.call_api(
297
+ '/jobs/{jobId}', 'GET',
298
+ path_params,
299
+ query_params,
300
+ header_params,
301
+ body=body_params,
302
+ post_params=form_params,
303
+ files=local_var_files,
304
+ response_type='CrawlJob', # noqa: E501
305
+ auth_settings=auth_settings,
306
+ async_req=params.get('async_req'),
307
+ _return_http_data_only=params.get('_return_http_data_only'),
308
+ _preload_content=params.get('_preload_content', True),
309
+ _request_timeout=params.get('_request_timeout'),
310
+ collection_formats=collection_formats)
311
+
312
+ def get_jobs(self, **kwargs): # noqa: E501
313
+ """Get the list of crawl jobs. # noqa: E501
314
+
315
+ Get a list of crawl jobs a pageful at a time as defined by the continuation token and limit. # noqa: E501
316
+ This method makes a synchronous HTTP request by default. To make an
317
+ asynchronous HTTP request, please pass async_req=True
318
+ >>> thread = api.get_jobs(async_req=True)
319
+ >>> result = thread.get()
320
+
321
+ :param async_req bool
322
+ :param int limit: The number of jobs per page
323
+ :param str continuation_token: The continuation token of the next page of jobs to be returned.
324
+ :return: JobPager
325
+ If the method is called asynchronously,
326
+ returns the request thread.
327
+ """
328
+ kwargs['_return_http_data_only'] = True
329
+ if kwargs.get('async_req'):
330
+ return self.get_jobs_with_http_info(**kwargs) # noqa: E501
331
+ else:
332
+ (data) = self.get_jobs_with_http_info(**kwargs) # noqa: E501
333
+ return data
334
+
335
+ def get_jobs_with_http_info(self, **kwargs): # noqa: E501
336
+ """Get the list of crawl jobs. # noqa: E501
337
+
338
+ Get a list of crawl jobs a pageful at a time as defined by the continuation token and limit. # noqa: E501
339
+ This method makes a synchronous HTTP request by default. To make an
340
+ asynchronous HTTP request, please pass async_req=True
341
+ >>> thread = api.get_jobs_with_http_info(async_req=True)
342
+ >>> result = thread.get()
343
+
344
+ :param async_req bool
345
+ :param int limit: The number of jobs per page
346
+ :param str continuation_token: The continuation token of the next page of jobs to be returned.
347
+ :return: JobPager
348
+ If the method is called asynchronously,
349
+ returns the request thread.
350
+ """
351
+
352
+ all_params = ['limit', 'continuation_token'] # noqa: E501
353
+ all_params.append('async_req')
354
+ all_params.append('_return_http_data_only')
355
+ all_params.append('_preload_content')
356
+ all_params.append('_request_timeout')
357
+
358
+ params = locals()
359
+ for key, val in six.iteritems(params['kwargs']):
360
+ if key not in all_params:
361
+ raise TypeError(
362
+ "Got an unexpected keyword argument '%s'"
363
+ " to method get_jobs" % key
364
+ )
365
+ params[key] = val
366
+ del params['kwargs']
367
+
368
+ collection_formats = {}
369
+
370
+ path_params = {}
371
+
372
+ query_params = []
373
+ if 'limit' in params:
374
+ query_params.append(('limit', params['limit'])) # noqa: E501
375
+ if 'continuation_token' in params:
376
+ query_params.append(('continuationToken', params['continuation_token'])) # noqa: E501
377
+
378
+ header_params = {}
379
+
380
+ form_params = []
381
+ local_var_files = {}
382
+
383
+ body_params = None
384
+ # HTTP header `Accept`
385
+ header_params['Accept'] = self.api_client.select_header_accept(
386
+ ['application/json']) # noqa: E501
387
+
388
+ # Authentication setting
389
+ auth_settings = ['basicAuth'] # noqa: E501
390
+
391
+ return self.api_client.call_api(
392
+ '/jobs', 'GET',
393
+ path_params,
394
+ query_params,
395
+ header_params,
396
+ body=body_params,
397
+ post_params=form_params,
398
+ files=local_var_files,
399
+ response_type='JobPager', # noqa: E501
400
+ auth_settings=auth_settings,
401
+ async_req=params.get('async_req'),
402
+ _return_http_data_only=params.get('_return_http_data_only'),
403
+ _preload_content=params.get('_preload_content', True),
404
+ _request_timeout=params.get('_request_timeout'),
405
+ collection_formats=collection_formats)
406
+
407
+ def queue_job(self, body, **kwargs): # noqa: E501
408
+ """Request a crawl as defined by the descriptor # noqa: E501
409
+
410
+ Enqueue a new crawl job as defined by the crawl descriptor and return it. # noqa: E501
411
+ This method makes a synchronous HTTP request by default. To make an
412
+ asynchronous HTTP request, please pass async_req=True
413
+ >>> thread = api.queue_job(body, async_req=True)
414
+ >>> result = thread.get()
415
+
416
+ :param async_req bool
417
+ :param CrawlDesc body: crawl request (required)
418
+ :return: CrawlJob
419
+ If the method is called asynchronously,
420
+ returns the request thread.
421
+ """
422
+ kwargs['_return_http_data_only'] = True
423
+ if kwargs.get('async_req'):
424
+ return self.queue_job_with_http_info(body, **kwargs) # noqa: E501
425
+ else:
426
+ (data) = self.queue_job_with_http_info(body, **kwargs) # noqa: E501
427
+ return data
428
+
429
+ def queue_job_with_http_info(self, body, **kwargs): # noqa: E501
430
+ """Request a crawl as defined by the descriptor # noqa: E501
431
+
432
+ Enqueue a new crawl job as defined by the crawl descriptor and return it. # noqa: E501
433
+ This method makes a synchronous HTTP request by default. To make an
434
+ asynchronous HTTP request, please pass async_req=True
435
+ >>> thread = api.queue_job_with_http_info(body, async_req=True)
436
+ >>> result = thread.get()
437
+
438
+ :param async_req bool
439
+ :param CrawlDesc body: crawl request (required)
440
+ :return: CrawlJob
441
+ If the method is called asynchronously,
442
+ returns the request thread.
443
+ """
444
+
445
+ all_params = ['body'] # noqa: E501
446
+ all_params.append('async_req')
447
+ all_params.append('_return_http_data_only')
448
+ all_params.append('_preload_content')
449
+ all_params.append('_request_timeout')
450
+
451
+ params = locals()
452
+ for key, val in six.iteritems(params['kwargs']):
453
+ if key not in all_params:
454
+ raise TypeError(
455
+ "Got an unexpected keyword argument '%s'"
456
+ " to method queue_job" % key
457
+ )
458
+ params[key] = val
459
+ del params['kwargs']
460
+ # verify the required parameter 'body' is set
461
+ if ('body' not in params or
462
+ params['body'] is None):
463
+ raise ValueError("Missing the required parameter `body` when calling `queue_job`") # noqa: E501
464
+
465
+ collection_formats = {}
466
+
467
+ path_params = {}
468
+
469
+ query_params = []
470
+
471
+ header_params = {}
472
+
473
+ form_params = []
474
+ local_var_files = {}
475
+
476
+ body_params = None
477
+ if 'body' in params:
478
+ body_params = params['body']
479
+ # HTTP header `Accept`
480
+ header_params['Accept'] = self.api_client.select_header_accept(
481
+ ['application/json']) # noqa: E501
482
+
483
+ # HTTP header `Content-Type`
484
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
485
+ ['*/*']) # noqa: E501
486
+
487
+ # Authentication setting
488
+ auth_settings = ['basicAuth'] # noqa: E501
489
+
490
+ return self.api_client.call_api(
491
+ '/jobs', 'POST',
492
+ path_params,
493
+ query_params,
494
+ header_params,
495
+ body=body_params,
496
+ post_params=form_params,
497
+ files=local_var_files,
498
+ response_type='CrawlJob', # noqa: E501
499
+ auth_settings=auth_settings,
500
+ async_req=params.get('async_req'),
501
+ _return_http_data_only=params.get('_return_http_data_only'),
502
+ _preload_content=params.get('_preload_content', True),
503
+ _request_timeout=params.get('_request_timeout'),
504
+ collection_formats=collection_formats)
@@ -0,0 +1,120 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ LOCKSS Crawler Service REST API
5
+
6
+ REST API of the LOCKSS Crawler Service # noqa: E501
7
+
8
+ OpenAPI spec version: 2.0.0
9
+ Contact: lockss-support@lockss.org
10
+ Generated by: https://github.com/swagger-api/swagger-codegen.git
11
+ """
12
+
13
+ from __future__ import absolute_import
14
+
15
+ import re # noqa: F401
16
+
17
+ # python 2 and python 3 compatibility library
18
+ import six
19
+
20
+ from lockss.pyclient.crawler.api_client import ApiClient
21
+
22
+
23
+ class StatusApi(object):
24
+ """NOTE: This class is auto generated by the swagger code generator program.
25
+
26
+ Do not edit the class manually.
27
+ Ref: https://github.com/swagger-api/swagger-codegen
28
+ """
29
+
30
+ def __init__(self, api_client=None):
31
+ if api_client is None:
32
+ api_client = ApiClient()
33
+ self.api_client = api_client
34
+
35
+ def get_status(self, **kwargs): # noqa: E501
36
+ """Get the status of the service # noqa: E501
37
+
38
+ Get the status of the service # noqa: E501
39
+ This method makes a synchronous HTTP request by default. To make an
40
+ asynchronous HTTP request, please pass async_req=True
41
+ >>> thread = api.get_status(async_req=True)
42
+ >>> result = thread.get()
43
+
44
+ :param async_req bool
45
+ :return: ApiStatus
46
+ If the method is called asynchronously,
47
+ returns the request thread.
48
+ """
49
+ kwargs['_return_http_data_only'] = True
50
+ if kwargs.get('async_req'):
51
+ return self.get_status_with_http_info(**kwargs) # noqa: E501
52
+ else:
53
+ (data) = self.get_status_with_http_info(**kwargs) # noqa: E501
54
+ return data
55
+
56
+ def get_status_with_http_info(self, **kwargs): # noqa: E501
57
+ """Get the status of the service # noqa: E501
58
+
59
+ Get the status of the service # noqa: E501
60
+ This method makes a synchronous HTTP request by default. To make an
61
+ asynchronous HTTP request, please pass async_req=True
62
+ >>> thread = api.get_status_with_http_info(async_req=True)
63
+ >>> result = thread.get()
64
+
65
+ :param async_req bool
66
+ :return: ApiStatus
67
+ If the method is called asynchronously,
68
+ returns the request thread.
69
+ """
70
+
71
+ all_params = [] # noqa: E501
72
+ all_params.append('async_req')
73
+ all_params.append('_return_http_data_only')
74
+ all_params.append('_preload_content')
75
+ all_params.append('_request_timeout')
76
+
77
+ params = locals()
78
+ for key, val in six.iteritems(params['kwargs']):
79
+ if key not in all_params:
80
+ raise TypeError(
81
+ "Got an unexpected keyword argument '%s'"
82
+ " to method get_status" % key
83
+ )
84
+ params[key] = val
85
+ del params['kwargs']
86
+
87
+ collection_formats = {}
88
+
89
+ path_params = {}
90
+
91
+ query_params = []
92
+
93
+ header_params = {}
94
+
95
+ form_params = []
96
+ local_var_files = {}
97
+
98
+ body_params = None
99
+ # HTTP header `Accept`
100
+ header_params['Accept'] = self.api_client.select_header_accept(
101
+ ['application/json']) # noqa: E501
102
+
103
+ # Authentication setting
104
+ auth_settings = ['basicAuth'] # noqa: E501
105
+
106
+ return self.api_client.call_api(
107
+ '/status', 'GET',
108
+ path_params,
109
+ query_params,
110
+ header_params,
111
+ body=body_params,
112
+ post_params=form_params,
113
+ files=local_var_files,
114
+ response_type='ApiStatus', # noqa: E501
115
+ auth_settings=auth_settings,
116
+ async_req=params.get('async_req'),
117
+ _return_http_data_only=params.get('_return_http_data_only'),
118
+ _preload_content=params.get('_preload_content', True),
119
+ _request_timeout=params.get('_request_timeout'),
120
+ collection_formats=collection_formats)