anaplan-sdk 0.4.5__py3-none-any.whl → 0.5.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,8 @@
1
1
  import logging
2
2
  import multiprocessing
3
- import time
4
3
  from concurrent.futures import ThreadPoolExecutor
5
4
  from copy import copy
5
+ from time import sleep
6
6
  from typing import Iterator
7
7
 
8
8
  import httpx
@@ -17,6 +17,7 @@ from anaplan_sdk.models import (
17
17
  File,
18
18
  Import,
19
19
  Model,
20
+ ModelDeletionResult,
20
21
  Process,
21
22
  TaskStatus,
22
23
  TaskSummary,
@@ -28,7 +29,6 @@ from ._audit import _AuditClient
28
29
  from ._cloud_works import _CloudWorksClient
29
30
  from ._transactional import _TransactionalClient
30
31
 
31
- logging.getLogger("httpx").setLevel(logging.CRITICAL)
32
32
  logger = logging.getLogger("anaplan_sdk")
33
33
 
34
34
 
@@ -112,11 +112,17 @@ class Client(_BaseClient):
112
112
  timeout=timeout,
113
113
  )
114
114
  self._retry_count = retry_count
115
+ self._workspace_id = workspace_id
116
+ self._model_id = model_id
115
117
  self._url = f"https://api.anaplan.com/2/0/workspaces/{workspace_id}/models/{model_id}"
116
118
  self._transactional_client = (
117
119
  _TransactionalClient(_client, model_id, self._retry_count) if model_id else None
118
120
  )
119
- self._alm_client = _AlmClient(_client, model_id, self._retry_count) if model_id else None
121
+ self._alm_client = (
122
+ _AlmClient(_client, model_id, self._retry_count, status_poll_delay)
123
+ if model_id
124
+ else None
125
+ )
120
126
  self._cloud_works = _CloudWorksClient(_client, self._retry_count)
121
127
  self._thread_count = multiprocessing.cpu_count()
122
128
  self._audit = _AuditClient(_client, self._retry_count, self._thread_count)
@@ -127,23 +133,37 @@ class Client(_BaseClient):
127
133
  super().__init__(self._retry_count, _client)
128
134
 
129
135
  @classmethod
130
- def from_existing(cls, existing: Self, workspace_id: str, model_id: str) -> Self:
136
+ def from_existing(
137
+ cls, existing: Self, *, workspace_id: str | None = None, model_id: str | None = None
138
+ ) -> Self:
131
139
  """
132
140
  Create a new instance of the Client from an existing instance. This is useful if you want
133
141
  to interact with multiple models or workspaces in the same script but share the same
134
142
  authentication and configuration. This creates a shallow copy of the existing client and
135
- update the relevant attributes to the new workspace and model.
143
+ optionally updates the relevant attributes to the new workspace and model. You can provide
144
+ either a new workspace Id or a new model Id, or both. If you do not provide one of them,
145
+ the existing value will be used. If you omit both, the new instance will be an identical
146
+ copy of the existing instance.
147
+
136
148
  :param existing: The existing instance to copy.
137
- :param workspace_id: The workspace Id to use.
138
- :param model_id: The model Id to use.
149
+ :param workspace_id: The workspace Id to use or None to use the existing workspace Id.
150
+ :param model_id: The model Id to use or None to use the existing model Id.
139
151
  :return: A new instance of the Client.
140
152
  """
141
153
  client = copy(existing)
142
- client._url = f"https://api.anaplan.com/2/0/workspaces/{workspace_id}/models/{model_id}"
154
+ new_ws_id = workspace_id or existing._workspace_id
155
+ new_model_id = model_id or existing._model_id
156
+ logger.debug(
157
+ f"Creating a new AsyncClient from existing instance "
158
+ f"with workspace_id={new_ws_id}, model_id={new_model_id}."
159
+ )
160
+ client._url = f"https://api.anaplan.com/2/0/workspaces/{new_ws_id}/models/{new_model_id}"
143
161
  client._transactional_client = _TransactionalClient(
144
- existing._client, model_id, existing._retry_count
162
+ existing._client, new_model_id, existing._retry_count
163
+ )
164
+ client._alm_client = _AlmClient(
165
+ existing._client, new_model_id, existing._retry_count, existing.status_poll_delay
145
166
  )
146
- client._alm_client = _AlmClient(existing._client, model_id, existing._retry_count)
147
167
  return client
148
168
 
149
169
  @property
@@ -201,7 +221,7 @@ class Client(_BaseClient):
201
221
  )
202
222
  return self._alm_client
203
223
 
204
- def list_workspaces(self, search_pattern: str | None = None) -> list[Workspace]:
224
+ def get_workspaces(self, search_pattern: str | None = None) -> list[Workspace]:
205
225
  """
206
226
  Lists all the Workspaces the authenticated user has access to.
207
227
  :param search_pattern: Optional filter for workspaces. When provided, case-insensitive
@@ -219,11 +239,13 @@ class Client(_BaseClient):
219
239
  )
220
240
  ]
221
241
 
222
- def list_models(self, search_pattern: str | None = None) -> list[Model]:
242
+ def get_models(self, search_pattern: str | None = None) -> list[Model]:
223
243
  """
224
244
  Lists all the Models the authenticated user has access to.
225
- :param search_pattern: Optional filter for models. When provided, case-insensitive matches
226
- models with names containing this string. When None (default), returns all models.
245
+ :param search_pattern: Optionally filter for specific models. When provided,
246
+ case-insensitive matches model names containing this string.
247
+ You can use the wildcards `%` for 0-n characters, and `_` for exactly 1 character.
248
+ When None (default), returns all models.
227
249
  :return: The List of Models.
228
250
  """
229
251
  params = {"modelDetails": "true"}
@@ -236,14 +258,28 @@ class Client(_BaseClient):
236
258
  )
237
259
  ]
238
260
 
239
- def list_files(self) -> list[File]:
261
+ def delete_models(self, model_ids: list[str]) -> ModelDeletionResult:
262
+ """
263
+ Delete the given Models. Models need to be closed before they can be deleted. If one of the
264
+ deletions fails, the other deletions will still be attempted and may complete.
265
+ :param model_ids: The list of Model identifiers to delete.
266
+ :return:
267
+ """
268
+ logger.info(f"Deleting Models: {', '.join(model_ids)}.")
269
+ res = self._post(
270
+ f"https://api.anaplan.com/2/0/workspaces/{self._workspace_id}/bulkDeleteModels",
271
+ json={"modelIdsToDelete": model_ids},
272
+ )
273
+ return ModelDeletionResult.model_validate(res)
274
+
275
+ def get_files(self) -> list[File]:
240
276
  """
241
277
  Lists all the Files in the Model.
242
278
  :return: The List of Files.
243
279
  """
244
280
  return [File.model_validate(e) for e in self._get_paginated(f"{self._url}/files", "files")]
245
281
 
246
- def list_actions(self) -> list[Action]:
282
+ def get_actions(self) -> list[Action]:
247
283
  """
248
284
  Lists all the Actions in the Model. This will only return the Actions listed under
249
285
  `Other Actions` in Anaplan. For Imports, exports, and processes, see their respective
@@ -254,7 +290,7 @@ class Client(_BaseClient):
254
290
  Action.model_validate(e) for e in self._get_paginated(f"{self._url}/actions", "actions")
255
291
  ]
256
292
 
257
- def list_processes(self) -> list[Process]:
293
+ def get_processes(self) -> list[Process]:
258
294
  """
259
295
  Lists all the Processes in the Model.
260
296
  :return: The List of Processes.
@@ -264,7 +300,7 @@ class Client(_BaseClient):
264
300
  for e in self._get_paginated(f"{self._url}/processes", "processes")
265
301
  ]
266
302
 
267
- def list_imports(self) -> list[Import]:
303
+ def get_imports(self) -> list[Import]:
268
304
  """
269
305
  Lists all the Imports in the Model.
270
306
  :return: The List of Imports.
@@ -273,7 +309,7 @@ class Client(_BaseClient):
273
309
  Import.model_validate(e) for e in self._get_paginated(f"{self._url}/imports", "imports")
274
310
  ]
275
311
 
276
- def list_exports(self) -> list[Export]:
312
+ def get_exports(self) -> list[Export]:
277
313
  """
278
314
  Lists all the Exports in the Model.
279
315
  :return: The List of Exports.
@@ -282,33 +318,34 @@ class Client(_BaseClient):
282
318
  Export.model_validate(e) for e in (self._get(f"{self._url}/exports")).get("exports", [])
283
319
  ]
284
320
 
285
- def run_action(self, action_id: int) -> TaskStatus:
321
+ def run_action(self, action_id: int, wait_for_completion: bool = True) -> TaskStatus:
286
322
  """
287
- Runs the specified Anaplan Action and validates the spawned task. If the Action fails or
288
- completes with errors, will raise an :py:class:`AnaplanActionError`. Failed Tasks are
289
- usually not something you can recover from at runtime and often require manual changes in
290
- Anaplan, i.e. updating the mapping of an Import or similar. So, for convenience, this will
291
- raise an Exception to handle - if you for e.g. think that one of the uploaded chunks may
292
- have been dropped and simply retrying with new data may help - and not return the task
293
- status information that needs to be handled by the caller.
294
-
295
- If you need more information or control, you can use `invoke_action()` and
296
- `get_task_status()`.
323
+ Runs the Action and validates the spawned task. If the Action fails or completes with
324
+ errors, this will raise an AnaplanActionError. Failed Tasks are often not something you
325
+ can recover from at runtime and often require manual changes in Anaplan, i.e. updating the
326
+ mapping of an Import or similar.
297
327
  :param action_id: The identifier of the Action to run. Can be any Anaplan Invokable;
298
- Processes, Imports, Exports, Other Actions.
328
+ Processes, Imports, Exports, Other Actions.
329
+ :param wait_for_completion: If True, the method will poll the task status and not return
330
+ until the task is complete. If False, it will spawn the task and return immediately.
299
331
  """
300
- task_id = self.invoke_action(action_id)
301
- task_status = self.get_task_status(action_id, task_id)
332
+ body = {"localeName": "en_US"}
333
+ res = self._post(f"{self._url}/{action_url(action_id)}/{action_id}/tasks", json=body)
334
+ task_id = res["task"]["taskId"]
335
+ logger.info(f"Invoked Action '{action_id}', spawned Task: '{task_id}'.")
302
336
 
303
- while task_status.task_state != "COMPLETE":
304
- time.sleep(self.status_poll_delay)
305
- task_status = self.get_task_status(action_id, task_id)
337
+ if not wait_for_completion:
338
+ return TaskStatus.model_validate(self.get_task_status(action_id, task_id))
306
339
 
307
- if task_status.task_state == "COMPLETE" and not task_status.result.successful:
340
+ while (status := self.get_task_status(action_id, task_id)).task_state != "COMPLETE":
341
+ sleep(self.status_poll_delay)
342
+
343
+ if status.task_state == "COMPLETE" and not status.result.successful:
344
+ logger.error(f"Task '{task_id}' completed with errors: {status.result.error_message}")
308
345
  raise AnaplanActionError(f"Task '{task_id}' completed with errors.")
309
346
 
310
- logger.info(f"Task '{task_id}' completed successfully.")
311
- return task_status
347
+ logger.info(f"Task '{task_id}' of Action '{action_id}' completed successfully.")
348
+ return status
312
349
 
313
350
  def get_file(self, file_id: int) -> bytes:
314
351
  """
@@ -317,43 +354,53 @@ class Client(_BaseClient):
317
354
  :return: The content of the file.
318
355
  """
319
356
  chunk_count = self._file_pre_check(file_id)
357
+ logger.info(f"File {file_id} has {chunk_count} chunks.")
320
358
  if chunk_count <= 1:
321
359
  return self._get_binary(f"{self._url}/files/{file_id}")
322
- logger.info(f"File {file_id} has {chunk_count} chunks.")
323
360
  with ThreadPoolExecutor(max_workers=self._thread_count) as executor:
324
361
  chunks = executor.map(
325
362
  self._get_binary,
326
- [f"{self._url}/files/{file_id}/chunks/{i}" for i in range(chunk_count)],
363
+ (f"{self._url}/files/{file_id}/chunks/{i}" for i in range(chunk_count)),
327
364
  )
328
365
  return b"".join(chunks)
329
366
 
330
- def get_file_stream(self, file_id: int) -> Iterator[bytes]:
367
+ def get_file_stream(self, file_id: int, batch_size: int = 1) -> Iterator[bytes]:
331
368
  """
332
369
  Retrieves the content of the specified file as a stream of chunks. The chunks are yielded
333
370
  one by one, so you can process them as they arrive. This is useful for large files where
334
371
  you don't want to or cannot load the entire file into memory at once.
335
372
  :param file_id: The identifier of the file to retrieve.
373
+ :param batch_size: Number of chunks to fetch concurrently. If > 1, n chunks will be fetched
374
+ concurrently. This still yields each chunk individually, only the requests are
375
+ batched. If 1 (default), each chunk is fetched sequentially.
336
376
  :return: A generator yielding the chunks of the file.
337
377
  """
338
378
  chunk_count = self._file_pre_check(file_id)
379
+ logger.info(f"File {file_id} has {chunk_count} chunks.")
339
380
  if chunk_count <= 1:
340
381
  yield self._get_binary(f"{self._url}/files/{file_id}")
341
382
  return
342
- logger.info(f"File {file_id} has {chunk_count} chunks.")
343
- for i in range(chunk_count):
344
- yield self._get_binary(f"{self._url}/files/{file_id}/chunks/{i}")
383
+
384
+ with ThreadPoolExecutor(max_workers=batch_size) as executor:
385
+ for batch_start in range(0, chunk_count, batch_size):
386
+ batch_chunks = executor.map(
387
+ self._get_binary,
388
+ (
389
+ f"{self._url}/files/{file_id}/chunks/{i}"
390
+ for i in range(batch_start, min(batch_start + batch_size, chunk_count))
391
+ ),
392
+ )
393
+ for chunk in batch_chunks:
394
+ yield chunk
345
395
 
346
396
  def upload_file(self, file_id: int, content: str | bytes) -> None:
347
397
  """
348
- Uploads the content to the specified file. If `upload_parallel` is set to True on the
349
- instance you are invoking this from, will attempt to upload the chunks in parallel for
350
- better performance. If you are network bound or are experiencing rate limiting issues, set
351
- `upload_parallel` to False.
398
+ Uploads the content to the specified file. If there are several chunks, upload of
399
+ individual chunks are uploaded concurrently.
352
400
 
353
401
  :param file_id: The identifier of the file to upload to.
354
402
  :param content: The content to upload. **This Content will be compressed before uploading.
355
- If you are passing the Input as bytes, pass it uncompressed to avoid
356
- redundant work.**
403
+ If you are passing the Input as bytes, pass it uncompressed.**
357
404
  """
358
405
  if isinstance(content, str):
359
406
  content = content.encode()
@@ -361,7 +408,7 @@ class Client(_BaseClient):
361
408
  content[i : i + self.upload_chunk_size]
362
409
  for i in range(0, len(content), self.upload_chunk_size)
363
410
  ]
364
- logger.info(f"Content will be uploaded in {len(chunks)} chunks.")
411
+ logger.info(f"Content for file '{file_id}' will be uploaded in {len(chunks)} chunks.")
365
412
  self._set_chunk_count(file_id, len(chunks))
366
413
  if self.upload_parallel:
367
414
  with ThreadPoolExecutor(max_workers=self._thread_count) as executor:
@@ -371,23 +418,48 @@ class Client(_BaseClient):
371
418
  else:
372
419
  for index, chunk in enumerate(chunks):
373
420
  self._upload_chunk(file_id, index, chunk)
421
+ logger.info(f"Completed upload for file '{file_id}'.")
374
422
 
375
- def upload_file_stream(self, file_id: int, content: Iterator[bytes | str]) -> None:
423
+ def upload_file_stream(
424
+ self, file_id: int, content: Iterator[str | bytes], batch_size: int = 1
425
+ ) -> None:
376
426
  """
377
427
  Uploads the content to the specified file as a stream of chunks. This is useful either for
378
428
  large files where you don't want to or cannot load the entire file into memory at once, or
379
429
  if you simply do not know the number of chunks ahead of time and instead just want to pass
380
430
  on chunks i.e. consumed from a queue until it is exhausted. In this case, you can pass a
381
431
  generator that yields the chunks of the file one by one to this method.
432
+
382
433
  :param file_id: The identifier of the file to upload to.
383
- :param content: An Iterator yielding the chunks of the file. (Most likely a generator).
434
+ :param content: An Iterator or AsyncIterator yielding the chunks of the file. You can pass
435
+ any Iterator, but you will most likely want to pass a Generator.
436
+ :param batch_size: Number of chunks to upload concurrently. If > 1, n chunks will be
437
+ uploaded concurrently. This can be useful if you either do not control the chunk
438
+ size, or if you want to keep the chunk size small but still want some concurrency.
384
439
  """
440
+ logger.info(f"Starting upload stream for file '{file_id}' with batch size {batch_size}.")
385
441
  self._set_chunk_count(file_id, -1)
386
- for index, chunk in enumerate(content):
387
- self._upload_chunk(file_id, index, chunk.encode() if isinstance(chunk, str) else chunk)
388
-
442
+ indices, chunks = [], []
443
+ with ThreadPoolExecutor(max_workers=batch_size) as executor:
444
+ for index, chunk in enumerate(content):
445
+ indices.append(index)
446
+ chunks.append(chunk)
447
+ if len(indices) == max(batch_size, 1):
448
+ list(
449
+ executor.map(self._upload_chunk, (file_id,) * len(indices), indices, chunks)
450
+ )
451
+ logger.info(
452
+ f"Completed upload stream batch of size {batch_size} for file {file_id}."
453
+ )
454
+ indices, chunks = [], []
455
+
456
+ if indices:
457
+ executor.map(self._upload_chunk, (file_id,) * len(indices), indices, chunks)
458
+ logger.info(
459
+ f"Completed final upload stream batch of size {len(indices)} for file {file_id}."
460
+ )
389
461
  self._post(f"{self._url}/files/{file_id}/complete", json={"id": file_id})
390
- logger.info(f"Marked all chunks as complete for file '{file_id}'.")
462
+ logger.info(f"Completed upload stream for '{file_id}'.")
391
463
 
392
464
  def upload_and_import(self, file_id: int, content: str | bytes, action_id: int) -> None:
393
465
  """
@@ -412,7 +484,7 @@ class Client(_BaseClient):
412
484
  self.run_action(action_id)
413
485
  return self.get_file(action_id)
414
486
 
415
- def list_task_status(self, action_id: int) -> list[TaskSummary]:
487
+ def get_task_summaries(self, action_id: int) -> list[TaskSummary]:
416
488
  """
417
489
  Retrieves the status of all tasks spawned by the specified action.
418
490
  :param action_id: The identifier of the action that was invoked.
@@ -449,34 +521,18 @@ class Client(_BaseClient):
449
521
  f"{self._url}/optimizeActions/{action_id}/tasks/{task_id}/solutionLogs"
450
522
  )
451
523
 
452
- def invoke_action(self, action_id: int) -> str:
453
- """
454
- You may want to consider using `run_action()` instead.
455
-
456
- Invokes the specified Anaplan Action and returns the spawned Task identifier. This is
457
- useful if you want to handle the Task status yourself or if you want to run multiple
458
- Actions in parallel.
459
- :param action_id: The identifier of the Action to run. Can be any Anaplan Invokable.
460
- :return: The identifier of the spawned Task.
461
- """
462
- response = self._post(
463
- f"{self._url}/{action_url(action_id)}/{action_id}/tasks", json={"localeName": "en_US"}
464
- )
465
- task_id = response.get("task").get("taskId")
466
- logger.info(f"Invoked Action '{action_id}', spawned Task: '{task_id}'.")
467
- return task_id
468
-
469
524
  def _file_pre_check(self, file_id: int) -> int:
470
- file = next(filter(lambda f: f.id == file_id, self.list_files()), None)
525
+ file = next((f for f in self.get_files() if f.id == file_id), None)
471
526
  if not file:
472
527
  raise InvalidIdentifierException(f"File {file_id} not found.")
473
528
  return file.chunk_count
474
529
 
475
- def _upload_chunk(self, file_id: int, index: int, chunk: bytes) -> None:
476
- self._put_binary_gzip(f"{self._url}/files/{file_id}/chunks/{index}", content=chunk)
477
- logger.info(f"Chunk {index} loaded to file '{file_id}'.")
530
+ def _upload_chunk(self, file_id: int, index: int, chunk: str | bytes) -> None:
531
+ self._put_binary_gzip(f"{self._url}/files/{file_id}/chunks/{index}", chunk)
532
+ logger.debug(f"Chunk {index} loaded to file '{file_id}'.")
478
533
 
479
534
  def _set_chunk_count(self, file_id: int, num_chunks: int) -> None:
535
+ logger.debug(f"Setting chunk count for file '{file_id}' to {num_chunks}.")
480
536
  if not self.allow_file_creation and not (113000000000 <= file_id <= 113999999999):
481
537
  raise InvalidIdentifierException(
482
538
  f"File with Id {file_id} does not exist. If you want to dynamically create files "
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from typing import Any, Literal
2
3
 
3
4
  import httpx
@@ -27,6 +28,8 @@ from anaplan_sdk.models.cloud_works import (
27
28
 
28
29
  from ._cw_flow import _FlowClient
29
30
 
31
+ logger = logging.getLogger("anaplan_sdk")
32
+
30
33
 
31
34
  class _CloudWorksClient(_BaseClient):
32
35
  def __init__(self, client: httpx.Client, retry_count: int) -> None:
@@ -41,7 +44,7 @@ class _CloudWorksClient(_BaseClient):
41
44
  """
42
45
  return self._flow
43
46
 
44
- def list_connections(self) -> list[Connection]:
47
+ def get_connections(self) -> list[Connection]:
45
48
  """
46
49
  List all Connections available in CloudWorks.
47
50
  :return: A list of connections.
@@ -62,7 +65,9 @@ class _CloudWorksClient(_BaseClient):
62
65
  res = self._post(
63
66
  f"{self._url}/connections", json=construct_payload(ConnectionInput, con_info)
64
67
  )
65
- return res["connections"]["connectionId"]
68
+ connection_id = res["connections"]["connectionId"]
69
+ logger.info(f"Created connection '{connection_id}'.")
70
+ return connection_id
66
71
 
67
72
  def update_connection(self, con_id: str, con_info: ConnectionBody | dict[str, Any]) -> None:
68
73
  """
@@ -89,8 +94,9 @@ class _CloudWorksClient(_BaseClient):
89
94
  :param con_id: The ID of the connection to delete.
90
95
  """
91
96
  self._delete(f"{self._url}/connections/{con_id}")
97
+ logger.info(f"Deleted connection '{con_id}'.")
92
98
 
93
- def list_integrations(
99
+ def get_integrations(
94
100
  self, sort_by_name: Literal["ascending", "descending"] = "ascending"
95
101
  ) -> list[Integration]:
96
102
  """
@@ -141,7 +147,9 @@ class _CloudWorksClient(_BaseClient):
141
147
  :return: The ID of the new integration.
142
148
  """
143
149
  json = integration_payload(body)
144
- return (self._post(f"{self._url}", json=json))["integration"]["integrationId"]
150
+ integration_id = (self._post(f"{self._url}", json=json))["integration"]["integrationId"]
151
+ logger.info(f"Created integration '{integration_id}'.")
152
+ return integration_id
145
153
 
146
154
  def update_integration(
147
155
  self, integration_id: str, body: IntegrationInput | IntegrationProcessInput | dict[str, Any]
@@ -162,7 +170,9 @@ class _CloudWorksClient(_BaseClient):
162
170
  :param integration_id: The ID of the integration to run.
163
171
  :return: The ID of the run instance.
164
172
  """
165
- return (self._post_empty(f"{self._url}/{integration_id}/run"))["run"]["id"]
173
+ run_id = (self._post_empty(f"{self._url}/{integration_id}/run"))["run"]["id"]
174
+ logger.info(f"Started integration run '{run_id}' for integration '{integration_id}'.")
175
+ return run_id
166
176
 
167
177
  def delete_integration(self, integration_id: str) -> None:
168
178
  """
@@ -170,6 +180,7 @@ class _CloudWorksClient(_BaseClient):
170
180
  :param integration_id: The ID of the integration to delete.
171
181
  """
172
182
  self._delete(f"{self._url}/{integration_id}")
183
+ logger.info(f"Deleted integration '{integration_id}'.")
173
184
 
174
185
  def get_run_history(self, integration_id: str) -> list[RunSummary]:
175
186
  """
@@ -216,6 +227,7 @@ class _CloudWorksClient(_BaseClient):
216
227
  f"{self._url}/{integration_id}/schedule",
217
228
  json=schedule_payload(integration_id, schedule),
218
229
  )
230
+ logger.info(f"Created schedule for integration '{integration_id}'.")
219
231
 
220
232
  def update_schedule(
221
233
  self, integration_id: str, schedule: ScheduleInput | dict[str, Any]
@@ -231,6 +243,7 @@ class _CloudWorksClient(_BaseClient):
231
243
  f"{self._url}/{integration_id}/schedule",
232
244
  json=schedule_payload(integration_id, schedule),
233
245
  )
246
+ logger.info(f"Updated schedule for integration '{integration_id}'.")
234
247
 
235
248
  def set_schedule_status(
236
249
  self, integration_id: str, status: Literal["enabled", "disabled"]
@@ -241,6 +254,7 @@ class _CloudWorksClient(_BaseClient):
241
254
  :param status: The status of the schedule. This can be either "enabled" or "disabled".
242
255
  """
243
256
  self._post_empty(f"{self._url}/{integration_id}/schedule/status/{status}")
257
+ logger.info(f"Set schedule status to '{status}' for integration '{integration_id}'.")
244
258
 
245
259
  def delete_schedule(self, integration_id: str) -> None:
246
260
  """
@@ -248,6 +262,7 @@ class _CloudWorksClient(_BaseClient):
248
262
  :param integration_id: The ID of the integration to schedule.
249
263
  """
250
264
  self._delete(f"{self._url}/{integration_id}/schedule")
265
+ logger.info(f"Deleted schedule for integration '{integration_id}'.")
251
266
 
252
267
  def get_notification_config(
253
268
  self, notification_id: str | None = None, integration_id: str | None = None
@@ -283,7 +298,9 @@ class _CloudWorksClient(_BaseClient):
283
298
  res = self._post(
284
299
  f"{self._url}/notification", json=construct_payload(NotificationInput, config)
285
300
  )
286
- return res["notification"]["notificationId"]
301
+ notification_id = res["notification"]["notificationId"]
302
+ logger.info(f"Created notification configuration '{notification_id}'.")
303
+ return notification_id
287
304
 
288
305
  def update_notification_config(
289
306
  self, notification_id: str, config: NotificationInput | dict[str, Any]
@@ -318,6 +335,7 @@ class _CloudWorksClient(_BaseClient):
318
335
  if integration_id:
319
336
  notification_id = (self.get_integration(integration_id)).notification_id
320
337
  self._delete(f"{self._url}/notification/{notification_id}")
338
+ logger.info(f"Deleted notification configuration '{notification_id}'.")
321
339
 
322
340
  def get_import_error_dump(self, run_id: str) -> bytes:
323
341
  """
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from typing import Any
2
3
 
3
4
  import httpx
@@ -5,13 +6,15 @@ import httpx
5
6
  from anaplan_sdk._base import _BaseClient, construct_payload
6
7
  from anaplan_sdk.models.flows import Flow, FlowInput, FlowSummary
7
8
 
9
+ logger = logging.getLogger("anaplan_sdk")
10
+
8
11
 
9
12
  class _FlowClient(_BaseClient):
10
13
  def __init__(self, client: httpx.Client, retry_count: int) -> None:
11
14
  self._url = "https://api.cloudworks.anaplan.com/2/0/integrationflows"
12
15
  super().__init__(retry_count, client)
13
16
 
14
- def list_flows(self, current_user_only: bool = False) -> list[FlowSummary]:
17
+ def get_flows(self, current_user_only: bool = False) -> list[FlowSummary]:
15
18
  """
16
19
  List all flows in CloudWorks.
17
20
  :param current_user_only: Filters the flows to only those created by the current user.
@@ -47,7 +50,9 @@ class _FlowClient(_BaseClient):
47
50
  if only_steps
48
51
  else self._post_empty(url)
49
52
  )
50
- return res["run"]["id"]
53
+ run_id = res["run"]["id"]
54
+ logger.info(f"Started flow run '{run_id}' for flow '{flow_id}'.")
55
+ return run_id
51
56
 
52
57
  def create_flow(self, flow: FlowInput | dict[str, Any]) -> str:
53
58
  """
@@ -58,7 +63,9 @@ class _FlowClient(_BaseClient):
58
63
  :return: The ID of the created flow.
59
64
  """
60
65
  res = self._post(self._url, json=construct_payload(FlowInput, flow))
61
- return res["integrationFlow"]["integrationFlowId"]
66
+ flow_id = res["integrationFlow"]["integrationFlowId"]
67
+ logger.info(f"Created flow '{flow_id}'.")
68
+ return flow_id
62
69
 
63
70
  def update_flow(self, flow_id: str, flow: FlowInput | dict[str, Any]) -> None:
64
71
  """
@@ -68,6 +75,7 @@ class _FlowClient(_BaseClient):
68
75
  :param flow: The flow to update. This can be a FlowInput object or a dictionary.
69
76
  """
70
77
  self._put(f"{self._url}/{flow_id}", json=construct_payload(FlowInput, flow))
78
+ logger.info(f"Updated flow '{flow_id}'.")
71
79
 
72
80
  def delete_flow(self, flow_id: str) -> None:
73
81
  """
@@ -76,3 +84,4 @@ class _FlowClient(_BaseClient):
76
84
  :param flow_id: The ID of the flow to delete.
77
85
  """
78
86
  self._delete(f"{self._url}/{flow_id}")
87
+ logger.info(f"Deleted flow '{flow_id}'.")