anaplan-sdk 0.4.5__py3-none-any.whl → 0.5.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- anaplan_sdk/_async_clients/_alm.py +248 -44
- anaplan_sdk/_async_clients/_audit.py +13 -13
- anaplan_sdk/_async_clients/_bulk.py +181 -135
- anaplan_sdk/_async_clients/_cloud_works.py +57 -38
- anaplan_sdk/_async_clients/_cw_flow.py +25 -16
- anaplan_sdk/_async_clients/_transactional.py +251 -53
- anaplan_sdk/_clients/_alm.py +246 -45
- anaplan_sdk/_clients/_audit.py +13 -14
- anaplan_sdk/_clients/_bulk.py +180 -123
- anaplan_sdk/_clients/_cloud_works.py +54 -36
- anaplan_sdk/_clients/_cw_flow.py +25 -16
- anaplan_sdk/_clients/_transactional.py +246 -50
- anaplan_sdk/_services.py +392 -0
- anaplan_sdk/models/__init__.py +49 -2
- anaplan_sdk/models/_alm.py +64 -6
- anaplan_sdk/models/_bulk.py +16 -9
- anaplan_sdk/models/_transactional.py +221 -4
- {anaplan_sdk-0.4.5.dist-info → anaplan_sdk-0.5.0a2.dist-info}/METADATA +1 -1
- anaplan_sdk-0.5.0a2.dist-info/RECORD +30 -0
- anaplan_sdk/_base.py +0 -297
- anaplan_sdk-0.4.5.dist-info/RECORD +0 -30
- {anaplan_sdk-0.4.5.dist-info → anaplan_sdk-0.5.0a2.dist-info}/WHEEL +0 -0
- {anaplan_sdk-0.4.5.dist-info → anaplan_sdk-0.5.0a2.dist-info}/licenses/LICENSE +0 -0
anaplan_sdk/_clients/_bulk.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
import logging
|
2
2
|
import multiprocessing
|
3
|
-
import time
|
4
3
|
from concurrent.futures import ThreadPoolExecutor
|
5
4
|
from copy import copy
|
6
5
|
from typing import Iterator
|
@@ -9,7 +8,7 @@ import httpx
|
|
9
8
|
from typing_extensions import Self
|
10
9
|
|
11
10
|
from anaplan_sdk._auth import _create_auth
|
12
|
-
from anaplan_sdk.
|
11
|
+
from anaplan_sdk._services import _HttpService, action_url
|
13
12
|
from anaplan_sdk.exceptions import AnaplanActionError, InvalidIdentifierException
|
14
13
|
from anaplan_sdk.models import (
|
15
14
|
Action,
|
@@ -17,6 +16,7 @@ from anaplan_sdk.models import (
|
|
17
16
|
File,
|
18
17
|
Import,
|
19
18
|
Model,
|
19
|
+
ModelDeletionResult,
|
20
20
|
Process,
|
21
21
|
TaskStatus,
|
22
22
|
TaskSummary,
|
@@ -28,11 +28,10 @@ from ._audit import _AuditClient
|
|
28
28
|
from ._cloud_works import _CloudWorksClient
|
29
29
|
from ._transactional import _TransactionalClient
|
30
30
|
|
31
|
-
logging.getLogger("httpx").setLevel(logging.CRITICAL)
|
32
31
|
logger = logging.getLogger("anaplan_sdk")
|
33
32
|
|
34
33
|
|
35
|
-
class Client
|
34
|
+
class Client:
|
36
35
|
"""
|
37
36
|
Synchronous Anaplan Client. For guides and examples
|
38
37
|
refer to https://vinzenzklass.github.io/anaplan-sdk.
|
@@ -51,10 +50,12 @@ class Client(_BaseClient):
|
|
51
50
|
auth: httpx.Auth | None = None,
|
52
51
|
timeout: float | httpx.Timeout = 30,
|
53
52
|
retry_count: int = 2,
|
53
|
+
page_size: int = 5_000,
|
54
54
|
status_poll_delay: int = 1,
|
55
55
|
upload_parallel: bool = True,
|
56
56
|
upload_chunk_size: int = 25_000_000,
|
57
57
|
allow_file_creation: bool = False,
|
58
|
+
**httpx_kwargs,
|
58
59
|
) -> None:
|
59
60
|
"""
|
60
61
|
Synchronous Anaplan Client. For guides and examples
|
@@ -87,6 +88,9 @@ class Client(_BaseClient):
|
|
87
88
|
:param retry_count: The number of times to retry an HTTP request if it fails. Set this to 0
|
88
89
|
to never retry. Defaults to 2, meaning each HTTP Operation will be tried a total
|
89
90
|
number of 2 times.
|
91
|
+
:param page_size: The number of items to return per page when paginating through results.
|
92
|
+
Defaults to 5000. This is the maximum number of items that can be returned per
|
93
|
+
request. If you pass a value greater than 5000, it will be capped to 5000.
|
90
94
|
:param status_poll_delay: The delay between polling the status of a task.
|
91
95
|
:param upload_parallel: Whether to upload chunks in parallel when uploading files.
|
92
96
|
:param upload_chunk_size: The size of the chunks to upload. This is the maximum size of
|
@@ -96,54 +100,64 @@ class Client(_BaseClient):
|
|
96
100
|
altogether. A file that is created this way will not be referenced by any action in
|
97
101
|
anaplan until manually assigned so there is typically no value in dynamically
|
98
102
|
creating new files and uploading content to them.
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
)
|
111
|
-
),
|
112
|
-
timeout=timeout,
|
103
|
+
:param httpx_kwargs: Additional keyword arguments to pass to the `httpx.Client`.
|
104
|
+
This can be used to set additional options such as proxies, headers, etc. See
|
105
|
+
https://www.python-httpx.org/api/#client for the full list of arguments.
|
106
|
+
"""
|
107
|
+
auth = auth or _create_auth(
|
108
|
+
token=token,
|
109
|
+
user_email=user_email,
|
110
|
+
password=password,
|
111
|
+
certificate=certificate,
|
112
|
+
private_key=private_key,
|
113
|
+
private_key_password=private_key_password,
|
113
114
|
)
|
115
|
+
_client = httpx.Client(auth=auth, timeout=timeout, **httpx_kwargs)
|
116
|
+
self._http = _HttpService(_client, retry_count, page_size, status_poll_delay)
|
114
117
|
self._retry_count = retry_count
|
118
|
+
self._workspace_id = workspace_id
|
119
|
+
self._model_id = model_id
|
115
120
|
self._url = f"https://api.anaplan.com/2/0/workspaces/{workspace_id}/models/{model_id}"
|
116
121
|
self._transactional_client = (
|
117
|
-
_TransactionalClient(
|
122
|
+
_TransactionalClient(self._http, model_id) if model_id else None
|
118
123
|
)
|
119
|
-
self._alm_client = _AlmClient(
|
120
|
-
self._cloud_works = _CloudWorksClient(
|
124
|
+
self._alm_client = _AlmClient(self._http, model_id) if model_id else None
|
125
|
+
self._cloud_works = _CloudWorksClient(self._http)
|
121
126
|
self._thread_count = multiprocessing.cpu_count()
|
122
|
-
self._audit = _AuditClient(
|
127
|
+
self._audit = _AuditClient(self._http)
|
123
128
|
self.status_poll_delay = status_poll_delay
|
124
129
|
self.upload_parallel = upload_parallel
|
125
130
|
self.upload_chunk_size = upload_chunk_size
|
126
131
|
self.allow_file_creation = allow_file_creation
|
127
|
-
super().__init__(self._retry_count, _client)
|
128
132
|
|
129
133
|
@classmethod
|
130
|
-
def from_existing(
|
134
|
+
def from_existing(
|
135
|
+
cls, existing: Self, *, workspace_id: str | None = None, model_id: str | None = None
|
136
|
+
) -> Self:
|
131
137
|
"""
|
132
138
|
Create a new instance of the Client from an existing instance. This is useful if you want
|
133
139
|
to interact with multiple models or workspaces in the same script but share the same
|
134
140
|
authentication and configuration. This creates a shallow copy of the existing client and
|
135
|
-
|
141
|
+
optionally updates the relevant attributes to the new workspace and model. You can provide
|
142
|
+
either a new workspace Id or a new model Id, or both. If you do not provide one of them,
|
143
|
+
the existing value will be used. If you omit both, the new instance will be an identical
|
144
|
+
copy of the existing instance.
|
145
|
+
|
136
146
|
:param existing: The existing instance to copy.
|
137
|
-
:param workspace_id: The workspace Id to use.
|
138
|
-
:param model_id: The model Id to use.
|
147
|
+
:param workspace_id: The workspace Id to use or None to use the existing workspace Id.
|
148
|
+
:param model_id: The model Id to use or None to use the existing model Id.
|
139
149
|
:return: A new instance of the Client.
|
140
150
|
"""
|
141
151
|
client = copy(existing)
|
142
|
-
|
143
|
-
|
144
|
-
|
152
|
+
new_ws_id = workspace_id or existing._workspace_id
|
153
|
+
new_model_id = model_id or existing._model_id
|
154
|
+
logger.debug(
|
155
|
+
f"Creating a new AsyncClient from existing instance "
|
156
|
+
f"with workspace_id={new_ws_id}, model_id={new_model_id}."
|
145
157
|
)
|
146
|
-
client.
|
158
|
+
client._url = f"https://api.anaplan.com/2/0/workspaces/{new_ws_id}/models/{new_model_id}"
|
159
|
+
client._transactional_client = _TransactionalClient(existing._http, new_model_id)
|
160
|
+
client._alm_client = _AlmClient(existing._http, new_model_id)
|
147
161
|
return client
|
148
162
|
|
149
163
|
@property
|
@@ -163,14 +177,14 @@ class Client(_BaseClient):
|
|
163
177
|
return self._cloud_works
|
164
178
|
|
165
179
|
@property
|
166
|
-
def
|
180
|
+
def tr(self) -> _TransactionalClient:
|
167
181
|
"""
|
168
182
|
The Transactional Client provides access to the Anaplan Transactional API. This is useful
|
169
183
|
for more advanced use cases where you need to interact with the Anaplan Model in a more
|
170
184
|
granular way.
|
171
185
|
|
172
186
|
If you instantiated the client without the field `model_id`, this will raise a
|
173
|
-
|
187
|
+
`ValueError`, since none of the endpoints can be invoked without the model Id.
|
174
188
|
:return: The Transactional Client.
|
175
189
|
"""
|
176
190
|
if not self._transactional_client:
|
@@ -201,7 +215,7 @@ class Client(_BaseClient):
|
|
201
215
|
)
|
202
216
|
return self._alm_client
|
203
217
|
|
204
|
-
def
|
218
|
+
def get_workspaces(self, search_pattern: str | None = None) -> list[Workspace]:
|
205
219
|
"""
|
206
220
|
Lists all the Workspaces the authenticated user has access to.
|
207
221
|
:param search_pattern: Optional filter for workspaces. When provided, case-insensitive
|
@@ -214,16 +228,18 @@ class Client(_BaseClient):
|
|
214
228
|
params["s"] = search_pattern
|
215
229
|
return [
|
216
230
|
Workspace.model_validate(e)
|
217
|
-
for e in self.
|
231
|
+
for e in self._http.get_paginated(
|
218
232
|
"https://api.anaplan.com/2/0/workspaces", "workspaces", params=params
|
219
233
|
)
|
220
234
|
]
|
221
235
|
|
222
|
-
def
|
236
|
+
def get_models(self, search_pattern: str | None = None) -> list[Model]:
|
223
237
|
"""
|
224
238
|
Lists all the Models the authenticated user has access to.
|
225
|
-
:param search_pattern:
|
226
|
-
|
239
|
+
:param search_pattern: Optionally filter for specific models. When provided,
|
240
|
+
case-insensitive matches model names containing this string.
|
241
|
+
You can use the wildcards `%` for 0-n characters, and `_` for exactly 1 character.
|
242
|
+
When None (default), returns all models.
|
227
243
|
:return: The List of Models.
|
228
244
|
"""
|
229
245
|
params = {"modelDetails": "true"}
|
@@ -231,19 +247,35 @@ class Client(_BaseClient):
|
|
231
247
|
params["s"] = search_pattern
|
232
248
|
return [
|
233
249
|
Model.model_validate(e)
|
234
|
-
for e in self.
|
250
|
+
for e in self._http.get_paginated(
|
235
251
|
"https://api.anaplan.com/2/0/models", "models", params=params
|
236
252
|
)
|
237
253
|
]
|
238
254
|
|
239
|
-
def
|
255
|
+
def delete_models(self, model_ids: list[str]) -> ModelDeletionResult:
|
256
|
+
"""
|
257
|
+
Delete the given Models. Models need to be closed before they can be deleted. If one of the
|
258
|
+
deletions fails, the other deletions will still be attempted and may complete.
|
259
|
+
:param model_ids: The list of Model identifiers to delete.
|
260
|
+
:return:
|
261
|
+
"""
|
262
|
+
logger.info(f"Deleting Models: {', '.join(model_ids)}.")
|
263
|
+
res = self._http.post(
|
264
|
+
f"https://api.anaplan.com/2/0/workspaces/{self._workspace_id}/bulkDeleteModels",
|
265
|
+
json={"modelIdsToDelete": model_ids},
|
266
|
+
)
|
267
|
+
return ModelDeletionResult.model_validate(res)
|
268
|
+
|
269
|
+
def get_files(self) -> list[File]:
|
240
270
|
"""
|
241
271
|
Lists all the Files in the Model.
|
242
272
|
:return: The List of Files.
|
243
273
|
"""
|
244
|
-
return [
|
274
|
+
return [
|
275
|
+
File.model_validate(e) for e in self._http.get_paginated(f"{self._url}/files", "files")
|
276
|
+
]
|
245
277
|
|
246
|
-
def
|
278
|
+
def get_actions(self) -> list[Action]:
|
247
279
|
"""
|
248
280
|
Lists all the Actions in the Model. This will only return the Actions listed under
|
249
281
|
`Other Actions` in Anaplan. For Imports, exports, and processes, see their respective
|
@@ -251,64 +283,65 @@ class Client(_BaseClient):
|
|
251
283
|
:return: The List of Actions.
|
252
284
|
"""
|
253
285
|
return [
|
254
|
-
Action.model_validate(e)
|
286
|
+
Action.model_validate(e)
|
287
|
+
for e in self._http.get_paginated(f"{self._url}/actions", "actions")
|
255
288
|
]
|
256
289
|
|
257
|
-
def
|
290
|
+
def get_processes(self) -> list[Process]:
|
258
291
|
"""
|
259
292
|
Lists all the Processes in the Model.
|
260
293
|
:return: The List of Processes.
|
261
294
|
"""
|
262
295
|
return [
|
263
296
|
Process.model_validate(e)
|
264
|
-
for e in self.
|
297
|
+
for e in self._http.get_paginated(f"{self._url}/processes", "processes")
|
265
298
|
]
|
266
299
|
|
267
|
-
def
|
300
|
+
def get_imports(self) -> list[Import]:
|
268
301
|
"""
|
269
302
|
Lists all the Imports in the Model.
|
270
303
|
:return: The List of Imports.
|
271
304
|
"""
|
272
305
|
return [
|
273
|
-
Import.model_validate(e)
|
306
|
+
Import.model_validate(e)
|
307
|
+
for e in self._http.get_paginated(f"{self._url}/imports", "imports")
|
274
308
|
]
|
275
309
|
|
276
|
-
def
|
310
|
+
def get_exports(self) -> list[Export]:
|
277
311
|
"""
|
278
312
|
Lists all the Exports in the Model.
|
279
313
|
:return: The List of Exports.
|
280
314
|
"""
|
281
315
|
return [
|
282
|
-
Export.model_validate(e)
|
316
|
+
Export.model_validate(e)
|
317
|
+
for e in (self._http.get(f"{self._url}/exports")).get("exports", [])
|
283
318
|
]
|
284
319
|
|
285
|
-
def run_action(self, action_id: int) -> TaskStatus:
|
320
|
+
def run_action(self, action_id: int, wait_for_completion: bool = True) -> TaskStatus:
|
286
321
|
"""
|
287
|
-
Runs the
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
raise an Exception to handle - if you for e.g. think that one of the uploaded chunks may
|
292
|
-
have been dropped and simply retrying with new data may help - and not return the task
|
293
|
-
status information that needs to be handled by the caller.
|
294
|
-
|
295
|
-
If you need more information or control, you can use `invoke_action()` and
|
296
|
-
`get_task_status()`.
|
322
|
+
Runs the Action and validates the spawned task. If the Action fails or completes with
|
323
|
+
errors, this will raise an AnaplanActionError. Failed Tasks are often not something you
|
324
|
+
can recover from at runtime and often require manual changes in Anaplan, i.e. updating the
|
325
|
+
mapping of an Import or similar.
|
297
326
|
:param action_id: The identifier of the Action to run. Can be any Anaplan Invokable;
|
298
|
-
|
327
|
+
Processes, Imports, Exports, Other Actions.
|
328
|
+
:param wait_for_completion: If True, the method will poll the task status and not return
|
329
|
+
until the task is complete. If False, it will spawn the task and return immediately.
|
299
330
|
"""
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
time.sleep(self.status_poll_delay)
|
305
|
-
task_status = self.get_task_status(action_id, task_id)
|
331
|
+
body = {"localeName": "en_US"}
|
332
|
+
res = self._http.post(f"{self._url}/{action_url(action_id)}/{action_id}/tasks", json=body)
|
333
|
+
task_id = res["task"]["taskId"]
|
334
|
+
logger.info(f"Invoked Action '{action_id}', spawned Task: '{task_id}'.")
|
306
335
|
|
307
|
-
if
|
336
|
+
if not wait_for_completion:
|
337
|
+
return TaskStatus.model_validate(self.get_task_status(action_id, task_id))
|
338
|
+
status = self._http.poll_task(self.get_task_status, action_id, task_id)
|
339
|
+
if status.task_state == "COMPLETE" and not status.result.successful:
|
340
|
+
logger.error(f"Task '{task_id}' completed with errors.")
|
308
341
|
raise AnaplanActionError(f"Task '{task_id}' completed with errors.")
|
309
342
|
|
310
|
-
logger.info(f"Task '{task_id}' completed successfully.")
|
311
|
-
return
|
343
|
+
logger.info(f"Task '{task_id}' of Action '{action_id}' completed successfully.")
|
344
|
+
return status
|
312
345
|
|
313
346
|
def get_file(self, file_id: int) -> bytes:
|
314
347
|
"""
|
@@ -317,43 +350,53 @@ class Client(_BaseClient):
|
|
317
350
|
:return: The content of the file.
|
318
351
|
"""
|
319
352
|
chunk_count = self._file_pre_check(file_id)
|
320
|
-
if chunk_count <= 1:
|
321
|
-
return self._get_binary(f"{self._url}/files/{file_id}")
|
322
353
|
logger.info(f"File {file_id} has {chunk_count} chunks.")
|
354
|
+
if chunk_count <= 1:
|
355
|
+
return self._http.get_binary(f"{self._url}/files/{file_id}")
|
323
356
|
with ThreadPoolExecutor(max_workers=self._thread_count) as executor:
|
324
357
|
chunks = executor.map(
|
325
|
-
self.
|
326
|
-
|
358
|
+
self._http.get_binary,
|
359
|
+
(f"{self._url}/files/{file_id}/chunks/{i}" for i in range(chunk_count)),
|
327
360
|
)
|
328
361
|
return b"".join(chunks)
|
329
362
|
|
330
|
-
def get_file_stream(self, file_id: int) -> Iterator[bytes]:
|
363
|
+
def get_file_stream(self, file_id: int, batch_size: int = 1) -> Iterator[bytes]:
|
331
364
|
"""
|
332
365
|
Retrieves the content of the specified file as a stream of chunks. The chunks are yielded
|
333
366
|
one by one, so you can process them as they arrive. This is useful for large files where
|
334
367
|
you don't want to or cannot load the entire file into memory at once.
|
335
368
|
:param file_id: The identifier of the file to retrieve.
|
369
|
+
:param batch_size: Number of chunks to fetch concurrently. If > 1, n chunks will be fetched
|
370
|
+
concurrently. This still yields each chunk individually, only the requests are
|
371
|
+
batched. If 1 (default), each chunk is fetched sequentially.
|
336
372
|
:return: A generator yielding the chunks of the file.
|
337
373
|
"""
|
338
374
|
chunk_count = self._file_pre_check(file_id)
|
375
|
+
logger.info(f"File {file_id} has {chunk_count} chunks.")
|
339
376
|
if chunk_count <= 1:
|
340
|
-
yield self.
|
377
|
+
yield self._http.get_binary(f"{self._url}/files/{file_id}")
|
341
378
|
return
|
342
|
-
|
343
|
-
|
344
|
-
|
379
|
+
|
380
|
+
with ThreadPoolExecutor(max_workers=batch_size) as executor:
|
381
|
+
for batch_start in range(0, chunk_count, batch_size):
|
382
|
+
batch_chunks = executor.map(
|
383
|
+
self._http.get_binary,
|
384
|
+
(
|
385
|
+
f"{self._url}/files/{file_id}/chunks/{i}"
|
386
|
+
for i in range(batch_start, min(batch_start + batch_size, chunk_count))
|
387
|
+
),
|
388
|
+
)
|
389
|
+
for chunk in batch_chunks:
|
390
|
+
yield chunk
|
345
391
|
|
346
392
|
def upload_file(self, file_id: int, content: str | bytes) -> None:
|
347
393
|
"""
|
348
|
-
Uploads the content to the specified file. If
|
349
|
-
|
350
|
-
better performance. If you are network bound or are experiencing rate limiting issues, set
|
351
|
-
`upload_parallel` to False.
|
394
|
+
Uploads the content to the specified file. If there are several chunks, upload of
|
395
|
+
individual chunks are uploaded concurrently.
|
352
396
|
|
353
397
|
:param file_id: The identifier of the file to upload to.
|
354
398
|
:param content: The content to upload. **This Content will be compressed before uploading.
|
355
|
-
|
356
|
-
redundant work.**
|
399
|
+
If you are passing the Input as bytes, pass it uncompressed.**
|
357
400
|
"""
|
358
401
|
if isinstance(content, str):
|
359
402
|
content = content.encode()
|
@@ -361,7 +404,7 @@ class Client(_BaseClient):
|
|
361
404
|
content[i : i + self.upload_chunk_size]
|
362
405
|
for i in range(0, len(content), self.upload_chunk_size)
|
363
406
|
]
|
364
|
-
logger.info(f"Content will be uploaded in {len(chunks)} chunks.")
|
407
|
+
logger.info(f"Content for file '{file_id}' will be uploaded in {len(chunks)} chunks.")
|
365
408
|
self._set_chunk_count(file_id, len(chunks))
|
366
409
|
if self.upload_parallel:
|
367
410
|
with ThreadPoolExecutor(max_workers=self._thread_count) as executor:
|
@@ -371,36 +414,66 @@ class Client(_BaseClient):
|
|
371
414
|
else:
|
372
415
|
for index, chunk in enumerate(chunks):
|
373
416
|
self._upload_chunk(file_id, index, chunk)
|
417
|
+
logger.info(f"Completed upload for file '{file_id}'.")
|
374
418
|
|
375
|
-
def upload_file_stream(
|
419
|
+
def upload_file_stream(
|
420
|
+
self, file_id: int, content: Iterator[str | bytes], batch_size: int = 1
|
421
|
+
) -> None:
|
376
422
|
"""
|
377
423
|
Uploads the content to the specified file as a stream of chunks. This is useful either for
|
378
424
|
large files where you don't want to or cannot load the entire file into memory at once, or
|
379
425
|
if you simply do not know the number of chunks ahead of time and instead just want to pass
|
380
426
|
on chunks i.e. consumed from a queue until it is exhausted. In this case, you can pass a
|
381
427
|
generator that yields the chunks of the file one by one to this method.
|
428
|
+
|
382
429
|
:param file_id: The identifier of the file to upload to.
|
383
|
-
:param content: An Iterator yielding the chunks of the file.
|
430
|
+
:param content: An Iterator or AsyncIterator yielding the chunks of the file. You can pass
|
431
|
+
any Iterator, but you will most likely want to pass a Generator.
|
432
|
+
:param batch_size: Number of chunks to upload concurrently. If > 1, n chunks will be
|
433
|
+
uploaded concurrently. This can be useful if you either do not control the chunk
|
434
|
+
size, or if you want to keep the chunk size small but still want some concurrency.
|
384
435
|
"""
|
436
|
+
logger.info(f"Starting upload stream for file '{file_id}' with batch size {batch_size}.")
|
385
437
|
self._set_chunk_count(file_id, -1)
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
438
|
+
indices, chunks = [], []
|
439
|
+
with ThreadPoolExecutor(max_workers=batch_size) as executor:
|
440
|
+
for index, chunk in enumerate(content):
|
441
|
+
indices.append(index)
|
442
|
+
chunks.append(chunk)
|
443
|
+
if len(indices) == max(batch_size, 1):
|
444
|
+
list(
|
445
|
+
executor.map(self._upload_chunk, (file_id,) * len(indices), indices, chunks)
|
446
|
+
)
|
447
|
+
logger.info(
|
448
|
+
f"Completed upload stream batch of size {batch_size} for file {file_id}."
|
449
|
+
)
|
450
|
+
indices, chunks = [], []
|
451
|
+
|
452
|
+
if indices:
|
453
|
+
executor.map(self._upload_chunk, (file_id,) * len(indices), indices, chunks)
|
454
|
+
logger.info(
|
455
|
+
f"Completed final upload stream batch of size {len(indices)} for file {file_id}."
|
456
|
+
)
|
457
|
+
self._http.post(f"{self._url}/files/{file_id}/complete", json={"id": file_id})
|
458
|
+
logger.info(f"Completed upload stream for '{file_id}'.")
|
391
459
|
|
392
|
-
def upload_and_import(
|
460
|
+
def upload_and_import(
|
461
|
+
self, file_id: int, content: str | bytes, action_id: int, wait_for_completion: bool = True
|
462
|
+
) -> TaskStatus:
|
393
463
|
"""
|
394
464
|
Convenience wrapper around `upload_file()` and `run_action()` to upload content to a file
|
395
465
|
and run an import action in one call.
|
396
466
|
:param file_id: The identifier of the file to upload to.
|
397
467
|
:param content: The content to upload. **This Content will be compressed before uploading.
|
398
|
-
|
399
|
-
|
468
|
+
If you are passing the Input as bytes, pass it uncompressed to avoid redundant
|
469
|
+
work.**
|
400
470
|
:param action_id: The identifier of the action to run after uploading the content.
|
471
|
+
:param wait_for_completion: If True, the method will poll the import task status and not
|
472
|
+
return until the task is complete. If False, it will spawn the import task and
|
473
|
+
return immediately.
|
401
474
|
"""
|
402
475
|
self.upload_file(file_id, content)
|
403
|
-
self.run_action(action_id)
|
476
|
+
return self.run_action(action_id, wait_for_completion)
|
404
477
|
|
405
478
|
def export_and_download(self, action_id: int) -> bytes:
|
406
479
|
"""
|
@@ -412,7 +485,7 @@ class Client(_BaseClient):
|
|
412
485
|
self.run_action(action_id)
|
413
486
|
return self.get_file(action_id)
|
414
487
|
|
415
|
-
def
|
488
|
+
def get_task_summaries(self, action_id: int) -> list[TaskSummary]:
|
416
489
|
"""
|
417
490
|
Retrieves the status of all tasks spawned by the specified action.
|
418
491
|
:param action_id: The identifier of the action that was invoked.
|
@@ -420,7 +493,7 @@ class Client(_BaseClient):
|
|
420
493
|
"""
|
421
494
|
return [
|
422
495
|
TaskSummary.model_validate(e)
|
423
|
-
for e in self.
|
496
|
+
for e in self._http.get_paginated(
|
424
497
|
f"{self._url}/{action_url(action_id)}/{action_id}/tasks", "tasks"
|
425
498
|
)
|
426
499
|
]
|
@@ -433,7 +506,7 @@ class Client(_BaseClient):
|
|
433
506
|
:return: The status of the task.
|
434
507
|
"""
|
435
508
|
return TaskStatus.model_validate(
|
436
|
-
self.
|
509
|
+
self._http.get(f"{self._url}/{action_url(action_id)}/{action_id}/tasks/{task_id}").get(
|
437
510
|
"task"
|
438
511
|
)
|
439
512
|
)
|
@@ -445,45 +518,29 @@ class Client(_BaseClient):
|
|
445
518
|
:param task_id: The Task identifier, sometimes also referred to as the Correlation Id.
|
446
519
|
:return: The content of the solution logs.
|
447
520
|
"""
|
448
|
-
return self.
|
521
|
+
return self._http.get_binary(
|
449
522
|
f"{self._url}/optimizeActions/{action_id}/tasks/{task_id}/solutionLogs"
|
450
523
|
)
|
451
524
|
|
452
|
-
def invoke_action(self, action_id: int) -> str:
|
453
|
-
"""
|
454
|
-
You may want to consider using `run_action()` instead.
|
455
|
-
|
456
|
-
Invokes the specified Anaplan Action and returns the spawned Task identifier. This is
|
457
|
-
useful if you want to handle the Task status yourself or if you want to run multiple
|
458
|
-
Actions in parallel.
|
459
|
-
:param action_id: The identifier of the Action to run. Can be any Anaplan Invokable.
|
460
|
-
:return: The identifier of the spawned Task.
|
461
|
-
"""
|
462
|
-
response = self._post(
|
463
|
-
f"{self._url}/{action_url(action_id)}/{action_id}/tasks", json={"localeName": "en_US"}
|
464
|
-
)
|
465
|
-
task_id = response.get("task").get("taskId")
|
466
|
-
logger.info(f"Invoked Action '{action_id}', spawned Task: '{task_id}'.")
|
467
|
-
return task_id
|
468
|
-
|
469
525
|
def _file_pre_check(self, file_id: int) -> int:
|
470
|
-
file = next(
|
526
|
+
file = next((f for f in self.get_files() if f.id == file_id), None)
|
471
527
|
if not file:
|
472
528
|
raise InvalidIdentifierException(f"File {file_id} not found.")
|
473
529
|
return file.chunk_count
|
474
530
|
|
475
|
-
def _upload_chunk(self, file_id: int, index: int, chunk: bytes) -> None:
|
476
|
-
self.
|
477
|
-
logger.
|
531
|
+
def _upload_chunk(self, file_id: int, index: int, chunk: str | bytes) -> None:
|
532
|
+
self._http.put_binary_gzip(f"{self._url}/files/{file_id}/chunks/{index}", chunk)
|
533
|
+
logger.debug(f"Chunk {index} loaded to file '{file_id}'.")
|
478
534
|
|
479
535
|
def _set_chunk_count(self, file_id: int, num_chunks: int) -> None:
|
536
|
+
logger.debug(f"Setting chunk count for file '{file_id}' to {num_chunks}.")
|
480
537
|
if not self.allow_file_creation and not (113000000000 <= file_id <= 113999999999):
|
481
538
|
raise InvalidIdentifierException(
|
482
539
|
f"File with Id {file_id} does not exist. If you want to dynamically create files "
|
483
540
|
"to avoid this error, set `allow_file_creation=True` on the calling instance. "
|
484
541
|
"Make sure you have understood the implications of this before doing so. "
|
485
542
|
)
|
486
|
-
response = self.
|
543
|
+
response = self._http.post(f"{self._url}/files/{file_id}", json={"chunkCount": num_chunks})
|
487
544
|
optionally_new_file = int(response.get("file").get("id"))
|
488
545
|
if optionally_new_file != file_id:
|
489
546
|
if self.allow_file_creation:
|