anaplan-sdk 0.5.0a1__py3-none-any.whl → 0.5.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,16 +1,16 @@
1
1
  import asyncio
2
2
  import logging
3
- import random
4
3
  import time
5
- from asyncio import gather
4
+ from asyncio import gather, sleep
6
5
  from concurrent.futures import ThreadPoolExecutor
7
6
  from gzip import compress
8
7
  from itertools import chain
9
8
  from math import ceil
10
- from typing import Any, Callable, Coroutine, Iterator, Literal, Type, TypeVar
9
+ from typing import Any, Awaitable, Callable, Coroutine, Iterator, Literal, Type, TypeVar
11
10
 
12
11
  import httpx
13
12
  from httpx import HTTPError, Response
13
+ from pydantic.alias_generators import to_camel
14
14
 
15
15
  from .exceptions import AnaplanException, AnaplanTimeoutException, InvalidIdentifierException
16
16
  from .models import (
@@ -18,6 +18,7 @@ from .models import (
18
18
  InsertionResult,
19
19
  ModelCalendar,
20
20
  MonthsQuartersYearsCalendar,
21
+ TaskSummary,
21
22
  WeeksGeneralCalendar,
22
23
  WeeksGroupingCalendar,
23
24
  WeeksPeriodsCalendar,
@@ -38,75 +39,101 @@ _json_header = {"Content-Type": "application/json"}
38
39
  _gzip_header = {"Content-Type": "application/x-gzip"}
39
40
 
40
41
  T = TypeVar("T", bound=AnaplanModel)
41
-
42
-
43
- class _BaseClient:
44
- def __init__(self, retry_count: int, client: httpx.Client):
45
- self._retry_count = retry_count
42
+ Task = TypeVar("Task", bound=TaskSummary)
43
+
44
+
45
+ class _HttpService:
46
+ def __init__(
47
+ self,
48
+ client: httpx.Client,
49
+ *,
50
+ retry_count: int,
51
+ backoff: float,
52
+ backoff_factor: float,
53
+ page_size: int,
54
+ poll_delay: int,
55
+ ):
56
+ logger.debug(
57
+ f"Initializing HttpService with retry_count={retry_count}, "
58
+ f"page_size={page_size}, poll_delay={poll_delay}."
59
+ )
46
60
  self._client = client
47
- logger.debug(f"Initialized BaseClient with retry_count={retry_count}.")
61
+ self._retry_count = retry_count
62
+ self._backoff = backoff
63
+ self._backoff_factor = backoff_factor
64
+ self._poll_delay = poll_delay
65
+ self._page_size = min(page_size, 5_000)
48
66
 
49
- def _get(self, url: str, **kwargs) -> dict[str, Any]:
67
+ def get(self, url: str, **kwargs) -> dict[str, Any]:
50
68
  return self.__run_with_retry(self._client.get, url, **kwargs).json()
51
69
 
52
- def _get_binary(self, url: str) -> bytes:
70
+ def get_binary(self, url: str) -> bytes:
53
71
  return self.__run_with_retry(self._client.get, url).content
54
72
 
55
- def _post(self, url: str, json: dict | list) -> dict[str, Any]:
73
+ def post(self, url: str, json: dict | list) -> dict[str, Any]:
56
74
  return self.__run_with_retry(self._client.post, url, headers=_json_header, json=json).json()
57
75
 
58
- def _put(self, url: str, json: dict | list) -> dict[str, Any]:
76
+ def put(self, url: str, json: dict | list) -> dict[str, Any]:
59
77
  res = self.__run_with_retry(self._client.put, url, headers=_json_header, json=json)
60
78
  return res.json() if res.num_bytes_downloaded > 0 else {}
61
79
 
62
- def _patch(self, url: str, json: dict | list) -> dict[str, Any]:
80
+ def patch(self, url: str, json: dict | list) -> dict[str, Any]:
63
81
  return (
64
82
  self.__run_with_retry(self._client.patch, url, headers=_json_header, json=json)
65
83
  ).json()
66
84
 
67
- def _delete(self, url: str) -> dict[str, Any]:
85
+ def delete(self, url: str) -> dict[str, Any]:
68
86
  return (self.__run_with_retry(self._client.delete, url, headers=_json_header)).json()
69
87
 
70
- def _post_empty(self, url: str, **kwargs) -> dict[str, Any]:
88
+ def post_empty(self, url: str, **kwargs) -> dict[str, Any]:
71
89
  res = self.__run_with_retry(self._client.post, url, **kwargs)
72
90
  return res.json() if res.num_bytes_downloaded > 0 else {}
73
91
 
74
- def _put_binary_gzip(self, url: str, content: str | bytes) -> Response:
92
+ def put_binary_gzip(self, url: str, content: str | bytes) -> Response:
75
93
  content = compress(content.encode() if isinstance(content, str) else content)
76
94
  return self.__run_with_retry(self._client.put, url, headers=_gzip_header, content=content)
77
95
 
78
- def __get_page(self, url: str, limit: int, offset: int, result_key: str, **kwargs) -> list:
79
- logger.debug(f"Fetching page: offset={offset}, limit={limit} from {url}.")
80
- kwargs["params"] = kwargs.get("params") or {} | {"limit": limit, "offset": offset}
81
- return self._get(url, **kwargs).get(result_key, [])
96
+ def poll_task(self, func: Callable[..., Task], *args) -> Task:
97
+ while (result := func(*args)).task_state != "COMPLETE":
98
+ time.sleep(self._poll_delay)
99
+ return result
82
100
 
83
- def __get_first_page(self, url: str, limit: int, result_key: str, **kwargs) -> tuple[list, int]:
84
- logger.debug(f"Fetching first page with limit={limit} from {url}.")
85
- kwargs["params"] = kwargs.get("params") or {} | {"limit": limit}
86
- res = self._get(url, **kwargs)
87
- total_items, first_page = res["meta"]["paging"]["totalSize"], res.get(result_key, [])
88
- logger.debug(f"Found {total_items} total items, retrieved {len(first_page)} in first page.")
89
- return first_page, total_items
90
-
91
- def _get_paginated(
92
- self, url: str, result_key: str, page_size: int = 5_000, **kwargs
93
- ) -> Iterator[dict[str, Any]]:
94
- logger.debug(f"Starting paginated fetch from {url} with page_size={page_size}.")
95
- first_page, total_items = self.__get_first_page(url, page_size, result_key, **kwargs)
96
- if total_items <= page_size:
101
+ def get_paginated(self, url: str, result_key: str, **kwargs) -> Iterator[dict[str, Any]]:
102
+ logger.debug(f"Starting paginated fetch from {url} with page_size={self._page_size}.")
103
+ first_page, total_items, actual_size = self._get_first_page(url, result_key, **kwargs)
104
+ if total_items <= self._page_size:
97
105
  logger.debug("All items fit in first page, no additional requests needed.")
98
106
  return iter(first_page)
99
107
 
100
- pages_needed = ceil(total_items / page_size)
101
- logger.debug(f"Fetching {pages_needed - 1} additional pages with {page_size} items each.")
108
+ pages_needed = ceil(total_items / actual_size)
109
+ logger.debug(f"Fetching {pages_needed - 1} additional pages with {actual_size} items each.")
102
110
  with ThreadPoolExecutor() as executor:
103
111
  pages = executor.map(
104
- lambda n: self.__get_page(url, page_size, n * page_size, result_key, **kwargs),
112
+ lambda n: self._get_page(url, actual_size, n * actual_size, result_key, **kwargs),
105
113
  range(1, pages_needed),
106
114
  )
107
115
  logger.debug(f"Completed paginated fetch of {total_items} total items.")
108
116
  return chain(first_page, *pages)
109
117
 
118
+ def _get_page(self, url: str, limit: int, offset: int, result_key: str, **kwargs) -> list:
119
+ logger.debug(f"Fetching page: offset={offset}, limit={limit} from {url}.")
120
+ kwargs["params"] = (kwargs.get("params") or {}) | {"limit": limit, "offset": offset}
121
+ return self.get(url, **kwargs).get(result_key, [])
122
+
123
+ def _get_first_page(self, url: str, result_key: str, **kwargs) -> tuple[list, int, int]:
124
+ logger.debug(f"Fetching first page with limit={self._page_size} from {url}.")
125
+ kwargs["params"] = (kwargs.get("params") or {}) | {"limit": self._page_size}
126
+ res = self.get(url, **kwargs)
127
+ total_items, first_page = res["meta"]["paging"]["totalSize"], res.get(result_key, [])
128
+ actual_page_size = res["meta"]["paging"]["currentPageSize"]
129
+ if actual_page_size < self._page_size and not actual_page_size == total_items:
130
+ logger.warning(
131
+ f"Page size {self._page_size} was silently truncated to {actual_page_size}."
132
+ f"Using the server-side enforced page size {actual_page_size} for further requests."
133
+ )
134
+ logger.debug(f"Found {total_items} total items, retrieved {len(first_page)} in first page.")
135
+ return first_page, total_items, actual_page_size
136
+
110
137
  def __run_with_retry(self, func: Callable[..., Response], *args, **kwargs) -> Response:
111
138
  for i in range(max(self._retry_count, 1)):
112
139
  try:
@@ -114,7 +141,7 @@ class _BaseClient:
114
141
  if response.status_code == 429:
115
142
  if i >= self._retry_count - 1:
116
143
  raise AnaplanException("Rate limit exceeded.")
117
- backoff_time = max(i, 1) * random.randint(2, 5)
144
+ backoff_time = self._backoff * (self._backoff_factor if i > 0 else 1)
118
145
  logger.warning(f"Rate limited. Retrying in {backoff_time} seconds.")
119
146
  time.sleep(backoff_time)
120
147
  continue
@@ -129,80 +156,101 @@ class _BaseClient:
129
156
  raise AnaplanException("Exhausted all retries without a successful response or Error.")
130
157
 
131
158
 
132
- class _AsyncBaseClient:
133
- def __init__(self, retry_count: int, client: httpx.AsyncClient):
134
- self._retry_count = retry_count
159
+ class _AsyncHttpService:
160
+ def __init__(
161
+ self,
162
+ client: httpx.AsyncClient,
163
+ *,
164
+ retry_count: int,
165
+ backoff: float,
166
+ backoff_factor: float,
167
+ page_size: int,
168
+ poll_delay: int,
169
+ ):
170
+ logger.debug(
171
+ f"Initializing AsyncHttpService with retry_count={retry_count}, "
172
+ f"page_size={page_size}, poll_delay={poll_delay}."
173
+ )
135
174
  self._client = client
136
- logger.debug(f"Initialized AsyncBaseClient with retry_count={retry_count}.")
175
+ self._retry_count = retry_count
176
+ self._backoff = backoff
177
+ self._backoff_factor = backoff_factor
178
+ self._poll_delay = poll_delay
179
+ self._page_size = min(page_size, 5_000)
137
180
 
138
- async def _get(self, url: str, **kwargs) -> dict[str, Any]:
139
- return (await self.__run_with_retry(self._client.get, url, **kwargs)).json()
181
+ async def get(self, url: str, **kwargs) -> dict[str, Any]:
182
+ return (await self._run_with_retry(self._client.get, url, **kwargs)).json()
140
183
 
141
- async def _get_binary(self, url: str) -> bytes:
142
- return (await self.__run_with_retry(self._client.get, url)).content
184
+ async def get_binary(self, url: str) -> bytes:
185
+ return (await self._run_with_retry(self._client.get, url)).content
143
186
 
144
- async def _post(self, url: str, json: dict | list) -> dict[str, Any]:
187
+ async def post(self, url: str, json: dict | list) -> dict[str, Any]:
145
188
  return (
146
- await self.__run_with_retry(self._client.post, url, headers=_json_header, json=json)
189
+ await self._run_with_retry(self._client.post, url, headers=_json_header, json=json)
147
190
  ).json()
148
191
 
149
- async def _put(self, url: str, json: dict | list) -> dict[str, Any]:
150
- res = await self.__run_with_retry(self._client.put, url, headers=_json_header, json=json)
192
+ async def put(self, url: str, json: dict | list) -> dict[str, Any]:
193
+ res = await self._run_with_retry(self._client.put, url, headers=_json_header, json=json)
151
194
  return res.json() if res.num_bytes_downloaded > 0 else {}
152
195
 
153
- async def _patch(self, url: str, json: dict | list) -> dict[str, Any]:
196
+ async def patch(self, url: str, json: dict | list) -> dict[str, Any]:
154
197
  return (
155
- await self.__run_with_retry(self._client.patch, url, headers=_json_header, json=json)
198
+ await self._run_with_retry(self._client.patch, url, headers=_json_header, json=json)
156
199
  ).json()
157
200
 
158
- async def _delete(self, url: str) -> dict[str, Any]:
159
- return (await self.__run_with_retry(self._client.delete, url, headers=_json_header)).json()
201
+ async def delete(self, url: str) -> dict[str, Any]:
202
+ return (await self._run_with_retry(self._client.delete, url, headers=_json_header)).json()
160
203
 
161
- async def _post_empty(self, url: str, **kwargs) -> dict[str, Any]:
162
- res = await self.__run_with_retry(self._client.post, url, **kwargs)
204
+ async def post_empty(self, url: str, **kwargs) -> dict[str, Any]:
205
+ res = await self._run_with_retry(self._client.post, url, **kwargs)
163
206
  return res.json() if res.num_bytes_downloaded > 0 else {}
164
207
 
165
- async def _put_binary_gzip(self, url: str, content: str | bytes) -> Response:
208
+ async def put_binary_gzip(self, url: str, content: str | bytes) -> Response:
166
209
  content = compress(content.encode() if isinstance(content, str) else content)
167
- return await self.__run_with_retry(
210
+ return await self._run_with_retry(
168
211
  self._client.put, url, headers=_gzip_header, content=content
169
212
  )
170
213
 
171
- async def __get_page(
172
- self, url: str, limit: int, offset: int, result_key: str, **kwargs
173
- ) -> list:
174
- logger.debug(f"Fetching page: offset={offset}, limit={limit} from {url}.")
175
- kwargs["params"] = kwargs.get("params") or {} | {"limit": limit, "offset": offset}
176
- return (await self._get(url, **kwargs)).get(result_key, [])
177
-
178
- async def __get_first_page(
179
- self, url: str, limit: int, result_key: str, **kwargs
180
- ) -> tuple[list, int]:
181
- logger.debug(f"Fetching first page with limit={limit} from {url}.")
182
- kwargs["params"] = kwargs.get("params") or {} | {"limit": limit}
183
- res = await self._get(url, **kwargs)
184
- total_items, first_page = res["meta"]["paging"]["totalSize"], res.get(result_key, [])
185
- logger.debug(f"Found {total_items} total items, retrieved {len(first_page)} in first page.")
186
- return first_page, total_items
187
-
188
- async def _get_paginated(
189
- self, url: str, result_key: str, page_size: int = 5_000, **kwargs
190
- ) -> Iterator[dict[str, Any]]:
191
- logger.debug(f"Starting paginated fetch from {url} with page_size={page_size}.")
192
- first_page, total_items = await self.__get_first_page(url, page_size, result_key, **kwargs)
193
- if total_items <= page_size:
214
+ async def poll_task(self, func: Callable[..., Awaitable[Task]], *args) -> Task:
215
+ while (result := await func(*args)).task_state != "COMPLETE":
216
+ await sleep(self._poll_delay)
217
+ return result
218
+
219
+ async def get_paginated(self, url: str, result_key: str, **kwargs) -> Iterator[dict[str, Any]]:
220
+ logger.debug(f"Starting paginated fetch from {url} with page_size={self._page_size}.")
221
+ first_page, total_items, actual_size = await self._get_first_page(url, result_key, **kwargs)
222
+ if total_items <= self._page_size:
194
223
  logger.debug("All items fit in first page, no additional requests needed.")
195
224
  return iter(first_page)
196
225
  pages = await gather(
197
226
  *(
198
- self.__get_page(url, page_size, n * page_size, result_key, **kwargs)
199
- for n in range(1, ceil(total_items / page_size))
227
+ self._get_page(url, actual_size, n * actual_size, result_key, **kwargs)
228
+ for n in range(1, ceil(total_items / actual_size))
200
229
  )
201
230
  )
202
- logger.info(f"Completed paginated fetch of {total_items} total items.")
231
+ logger.debug(f"Completed paginated fetch of {total_items} total items.")
203
232
  return chain(first_page, *pages)
204
233
 
205
- async def __run_with_retry(
234
+ async def _get_page(self, url: str, limit: int, offset: int, result_key: str, **kwargs) -> list:
235
+ logger.debug(f"Fetching page: offset={offset}, limit={limit} from {url}.")
236
+ kwargs["params"] = (kwargs.get("params") or {}) | {"limit": limit, "offset": offset}
237
+ return (await self.get(url, **kwargs)).get(result_key, [])
238
+
239
+ async def _get_first_page(self, url: str, result_key: str, **kwargs) -> tuple[list, int, int]:
240
+ logger.debug(f"Fetching first page with limit={self._page_size} from {url}.")
241
+ kwargs["params"] = (kwargs.get("params") or {}) | {"limit": self._page_size}
242
+ res = await self.get(url, **kwargs)
243
+ total_items, first_page = res["meta"]["paging"]["totalSize"], res.get(result_key, [])
244
+ actual_page_size = res["meta"]["paging"]["currentPageSize"]
245
+ if actual_page_size < self._page_size and not actual_page_size == total_items:
246
+ logger.warning(
247
+ f"Page size {self._page_size} was silently truncated to {actual_page_size}."
248
+ f"Using the server-side enforced page size {actual_page_size} for further requests."
249
+ )
250
+ logger.debug(f"Found {total_items} total items, retrieved {len(first_page)} in first page.")
251
+ return first_page, total_items, actual_page_size
252
+
253
+ async def _run_with_retry(
206
254
  self, func: Callable[..., Coroutine[Any, Any, Response]], *args, **kwargs
207
255
  ) -> Response:
208
256
  for i in range(max(self._retry_count, 1)):
@@ -211,7 +259,7 @@ class _AsyncBaseClient:
211
259
  if response.status_code == 429:
212
260
  if i >= self._retry_count - 1:
213
261
  raise AnaplanException("Rate limit exceeded.")
214
- backoff_time = (i + 1) * random.randint(3, 5)
262
+ backoff_time = self._backoff * (self._backoff_factor if i > 0 else 1)
215
263
  logger.warning(f"Rate limited. Retrying in {backoff_time} seconds.")
216
264
  await asyncio.sleep(backoff_time)
217
265
  continue
@@ -226,6 +274,16 @@ class _AsyncBaseClient:
226
274
  raise AnaplanException("Exhausted all retries without a successful response or Error.")
227
275
 
228
276
 
277
+ def sort_params(sort_by: str, descending: bool) -> dict[str, str | bool]:
278
+ """
279
+ Construct search parameters for sorting. This also converts snake_case to camelCase.
280
+ :param sort_by: The field to sort by, optionally in snake_case.
281
+ :param descending: Whether to sort in descending order.
282
+ :return: A dictionary of search parameters in Anaplan's expected format.
283
+ """
284
+ return {"sort": f"{'-' if descending else '+'}{to_camel(sort_by)}"}
285
+
286
+
229
287
  def construct_payload(model: Type[T], body: T | dict[str, Any]) -> dict[str, Any]:
230
288
  """
231
289
  Construct a payload for the given model and body.
@@ -34,6 +34,7 @@ from ._transactional import (
34
34
  FiscalYear,
35
35
  InsertionResult,
36
36
  LineItem,
37
+ ListDeletionResult,
37
38
  ListItem,
38
39
  ModelCalendar,
39
40
  ModelStatus,
@@ -91,4 +92,5 @@ __all__ = [
91
92
  "ModelCalendar",
92
93
  "ModelDeletionResult",
93
94
  "DimensionWithCode",
95
+ "ListDeletionResult",
94
96
  ]
@@ -1,7 +1,6 @@
1
1
  from typing import Literal, TypeAlias
2
2
 
3
- from pydantic import ConfigDict, Field, field_validator
4
- from pydantic.alias_generators import to_camel
3
+ from pydantic import Field, field_validator
5
4
 
6
5
  from ._base import AnaplanModel
7
6
 
@@ -164,13 +163,7 @@ class TaskResult(AnaplanModel):
164
163
  )
165
164
 
166
165
 
167
- class TaskStatus(AnaplanModel):
168
- model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
169
- id: str = Field(validation_alias="taskId", description="The unique identifier of this task.")
170
- task_state: Literal["NOT_STARTED", "IN_PROGRESS", "COMPLETE"] = Field(
171
- description="The state of this task."
172
- )
173
- creation_time: int = Field(description="Unix timestamp of when this task was created.")
166
+ class TaskStatus(TaskSummary):
174
167
  progress: float = Field(description="The progress of this task as a float between 0 and 1.")
175
168
  current_step: str | None = Field(None, description="The current step of this task.")
176
169
  result: TaskResult | None = Field(None)
@@ -141,7 +141,9 @@ class ConnectionInput(AnaplanModel):
141
141
 
142
142
 
143
143
  class Connection(_VersionedBaseModel):
144
- connection_id: str = Field(description="The unique identifier of this connection.")
144
+ id: str = Field(
145
+ validation_alias="connectionId", description="The unique identifier of this connection."
146
+ )
145
147
  connection_type: ConnectionType = Field(description="The type of this connection.")
146
148
  body: AzureBlobConnectionInfo | AmazonS3ConnectionInfo | GoogleBigQueryConnectionInfo = Field(
147
149
  description="Connection information."
@@ -237,7 +239,9 @@ class _BaseIntegration(_VersionedBaseModel):
237
239
 
238
240
 
239
241
  class Integration(_BaseIntegration):
240
- integration_id: str = Field(description="The unique identifier of this integration.")
242
+ id: str = Field(
243
+ validation_alias="integrationId", description="The unique identifier of this integration."
244
+ )
241
245
  integration_type: Literal["Import", "Export", "Process"] = Field(
242
246
  description="The type of this integration."
243
247
  )
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: anaplan-sdk
3
- Version: 0.5.0a1
4
- Summary: Streamlined Python Interface for Anaplan
3
+ Version: 0.5.0a3
4
+ Summary: Streamlined Python Interface for the Anaplan API.
5
5
  Project-URL: Homepage, https://vinzenzklass.github.io/anaplan-sdk/
6
6
  Project-URL: Repository, https://github.com/VinzenzKlass/anaplan-sdk
7
7
  Project-URL: Documentation, https://vinzenzklass.github.io/anaplan-sdk/
@@ -0,0 +1,30 @@
1
+ anaplan_sdk/__init__.py,sha256=WScEKtXlnRLjCb-j3qW9W4kEACTyPsTLFs-L54et2TQ,351
2
+ anaplan_sdk/_auth.py,sha256=l5z2WCcfQ05OkuQ1dcmikp6dB87Rw1qy2zu8bbaAQTs,16620
3
+ anaplan_sdk/_oauth.py,sha256=AynlJDrGIinQT0jwxI2RSvtU4D7Wasyw3H1uicdlLVI,12672
4
+ anaplan_sdk/_services.py,sha256=gEeGUBAtmkp55-pZXPU1ehd57pGJkkVJJ6OB1oJ5awU,19252
5
+ anaplan_sdk/exceptions.py,sha256=ALkA9fBF0NQ7dufFxV6AivjmHyuJk9DOQ9jtJV2n7f0,1809
6
+ anaplan_sdk/_async_clients/__init__.py,sha256=pZXgMMg4S9Aj_pxQCaSiPuNG-sePVGBtNJ0133VjqW4,364
7
+ anaplan_sdk/_async_clients/_alm.py,sha256=rhVhykUo6wIvA1SBQkpEAviSsVLURumi_3XQlxTf7z8,12788
8
+ anaplan_sdk/_async_clients/_audit.py,sha256=dipSzp4jMvRCHJAVMQfO854_wpmIcYEDinEPSGdoms4,2342
9
+ anaplan_sdk/_async_clients/_bulk.py,sha256=hjxJ4kY0IfyWYYLPu9QuFkxsPFGbcmY0XPru5LatTcs,30069
10
+ anaplan_sdk/_async_clients/_cloud_works.py,sha256=ecm7DqT39J56xQwYxJMKd_ZVqxzXZdpmagwJUvqKBj4,17613
11
+ anaplan_sdk/_async_clients/_cw_flow.py,sha256=_allKIOP-qb33wrOj6GV5VAOvrCXOVJ1QXvck-jsocQ,3935
12
+ anaplan_sdk/_async_clients/_transactional.py,sha256=gO4v22O0RIHLYwfaVURie1-uLlXo0ypUlOloOZEgmtw,18038
13
+ anaplan_sdk/_clients/__init__.py,sha256=FsbwvZC1FHrxuRXwbPxUzbhz_lO1DpXIxEOjx6-3QuA,219
14
+ anaplan_sdk/_clients/_alm.py,sha256=_LlZIRCE3HxZ4OzU13LOGnX4MQ26j2puSPTy9WGJa3o,12515
15
+ anaplan_sdk/_clients/_audit.py,sha256=9mq7VGYsl6wOdIU7G3GvzP3O7r1ZDCFg5eAu7k4RgxM,2154
16
+ anaplan_sdk/_clients/_bulk.py,sha256=aY2uwAzXSMorEZr7FfP18QA_z0NV6ONQrqOOsyg8CsU,28108
17
+ anaplan_sdk/_clients/_cloud_works.py,sha256=C4F_zJ70vIlZ1lPTCsHkbB41oO6vip9iXaQqRHo4FuY,17404
18
+ anaplan_sdk/_clients/_cw_flow.py,sha256=O6t4utbDZdSVXGC0PXUcPpQ4oXrPohU9_8SUBCpxTXw,3824
19
+ anaplan_sdk/_clients/_transactional.py,sha256=avqww59ccM3FqYMeK1oE-8UH4jyk_pKSCETzhSGKyxA,16936
20
+ anaplan_sdk/models/__init__.py,sha256=zfwDQJQrXuLEXSpbJcm08a_YK1P7a7u-kMhwtJiJFmA,1783
21
+ anaplan_sdk/models/_alm.py,sha256=oeENd0YM7-LoIRBq2uATIQTxVgIP9rXx3UZE2UnQAp0,4670
22
+ anaplan_sdk/models/_base.py,sha256=6AZc9CfireUKgpZfMxYKu4MbwiyHQOsGLjKrxGXBLic,508
23
+ anaplan_sdk/models/_bulk.py,sha256=S72qujNr5STdiyKaCEvrQjKYHik_aemiJFNKE7docpI,8405
24
+ anaplan_sdk/models/_transactional.py,sha256=2bH10zvtMb5Lfh6DC7iQk72aEwq6tyLQ-XnH_0wYSqI,14172
25
+ anaplan_sdk/models/cloud_works.py,sha256=APUGDt_e-JshtXkba5cQh5rZkXOZBz0Aix0qVNdEWgw,19501
26
+ anaplan_sdk/models/flows.py,sha256=SuLgNj5-2SeE3U1i8iY8cq2IkjuUgd_3M1n2ENructk,3625
27
+ anaplan_sdk-0.5.0a3.dist-info/METADATA,sha256=zd3-QsRJ_Fa9PTqn2wt0lMnLrJAkHuLi1YfmYnboeoE,3678
28
+ anaplan_sdk-0.5.0a3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
+ anaplan_sdk-0.5.0a3.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
30
+ anaplan_sdk-0.5.0a3.dist-info/RECORD,,
@@ -1,30 +0,0 @@
1
- anaplan_sdk/__init__.py,sha256=WScEKtXlnRLjCb-j3qW9W4kEACTyPsTLFs-L54et2TQ,351
2
- anaplan_sdk/_auth.py,sha256=l5z2WCcfQ05OkuQ1dcmikp6dB87Rw1qy2zu8bbaAQTs,16620
3
- anaplan_sdk/_base.py,sha256=i7NznGMnI9tVQB5mXo5DakSRmvWoDOgfkOMWQGNiXOI,16616
4
- anaplan_sdk/_oauth.py,sha256=AynlJDrGIinQT0jwxI2RSvtU4D7Wasyw3H1uicdlLVI,12672
5
- anaplan_sdk/exceptions.py,sha256=ALkA9fBF0NQ7dufFxV6AivjmHyuJk9DOQ9jtJV2n7f0,1809
6
- anaplan_sdk/_async_clients/__init__.py,sha256=pZXgMMg4S9Aj_pxQCaSiPuNG-sePVGBtNJ0133VjqW4,364
7
- anaplan_sdk/_async_clients/_alm.py,sha256=pL-l9EBkbR_M7FbIpKo-YPi265busEhqJn2fB0syVsA,13063
8
- anaplan_sdk/_async_clients/_audit.py,sha256=tsMydMxepKW9NVAVpqoC48sfmKKC7bJoljUycWfxipA,2396
9
- anaplan_sdk/_async_clients/_bulk.py,sha256=OcCzvMhbQjJBbeyVw6J5o3Kipa7Tfv0fJcS5MAcIZn4,26708
10
- anaplan_sdk/_async_clients/_cloud_works.py,sha256=miTgllBKcd5-kKjy5XIdMzndQnyR2DngEN8ldS7t_Rg,17529
11
- anaplan_sdk/_async_clients/_cw_flow.py,sha256=gb7UhKuYI0Z1ftEeLKtx-oWmaqDJN_RTRdiOm2ZkjFM,3991
12
- anaplan_sdk/_async_clients/_transactional.py,sha256=9TbFaOYG3cPUPwgQosTtFWXQ6G2PiU3P8r5Mw1p4-dk,17063
13
- anaplan_sdk/_clients/__init__.py,sha256=FsbwvZC1FHrxuRXwbPxUzbhz_lO1DpXIxEOjx6-3QuA,219
14
- anaplan_sdk/_clients/_alm.py,sha256=IS9G7B8MI9ACucxBKdA60p8Bz3-FsywleNK6g3_cK50,12788
15
- anaplan_sdk/_clients/_audit.py,sha256=zjzuU0YS1XViBKZfIVJXAivIsskuJQRyU22DRnpSHZo,2265
16
- anaplan_sdk/_clients/_bulk.py,sha256=bIzu6wd0JbwwQdK4-tp25gVrx3Q7V2NNrV7fFe6TkPg,26760
17
- anaplan_sdk/_clients/_cloud_works.py,sha256=bFtIgexOS6oXftrTL-o2E1v5H8CzNxDy-vHAlUxdKJg,17334
18
- anaplan_sdk/_clients/_cw_flow.py,sha256=F7zoZ4CEpXe7FcGTJO9y2vJC5cd7Jz-ipTnlsk4Q-dA,3867
19
- anaplan_sdk/_clients/_transactional.py,sha256=rEa9VdgTFYQkzij9nBGdur7KbWDq_RrkiuwE25FZ0ic,16767
20
- anaplan_sdk/models/__init__.py,sha256=8qS16lOb2cKxbHzqMkTK0bYvzolucppqD1I7Xx1I5rc,1731
21
- anaplan_sdk/models/_alm.py,sha256=oeENd0YM7-LoIRBq2uATIQTxVgIP9rXx3UZE2UnQAp0,4670
22
- anaplan_sdk/models/_base.py,sha256=6AZc9CfireUKgpZfMxYKu4MbwiyHQOsGLjKrxGXBLic,508
23
- anaplan_sdk/models/_bulk.py,sha256=WL0OPNbsYo7lpx-vrk_GLvQXZijxfpE0kleqfQifRyg,8868
24
- anaplan_sdk/models/_transactional.py,sha256=2bH10zvtMb5Lfh6DC7iQk72aEwq6tyLQ-XnH_0wYSqI,14172
25
- anaplan_sdk/models/cloud_works.py,sha256=nfn_LHPR-KmW7Tpvz-5qNCzmR8SYgvsVV-lx5iDlyqI,19425
26
- anaplan_sdk/models/flows.py,sha256=SuLgNj5-2SeE3U1i8iY8cq2IkjuUgd_3M1n2ENructk,3625
27
- anaplan_sdk-0.5.0a1.dist-info/METADATA,sha256=e9ldeBTGI3rqZ9o8VsK_rCl4e2BOqCRIW_Zl5B4UH4U,3669
28
- anaplan_sdk-0.5.0a1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
- anaplan_sdk-0.5.0a1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
30
- anaplan_sdk-0.5.0a1.dist-info/RECORD,,