brynq-sdk-monday 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brynq_sdk_monday/__init__.py +3 -0
- brynq_sdk_monday/extract_monday.py +335 -0
- brynq_sdk_monday/extract_tracket.py +105 -0
- brynq_sdk_monday/upload_tracket.py +150 -0
- brynq_sdk_monday-2.1.2.dist-info/METADATA +16 -0
- brynq_sdk_monday-2.1.2.dist-info/RECORD +8 -0
- brynq_sdk_monday-2.1.2.dist-info/WHEEL +5 -0
- brynq_sdk_monday-2.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
from brynq_sdk_brynq import BrynQ
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from typing import Union, List, Literal, Optional
|
|
6
|
+
import requests
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
10
|
+
sys.path.append(basedir)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ExtractMonday(BrynQ):
|
|
14
|
+
|
|
15
|
+
def __init__(self, system_type: Optional[Literal['source', 'target']] = None, debug: bool = False):
|
|
16
|
+
"""
|
|
17
|
+
For the full documentation, see: https://developer.monday.com/api-reference/docs/basics
|
|
18
|
+
"""
|
|
19
|
+
super().__init__()
|
|
20
|
+
self.endpoint = "https://api.monday.com/v2/"
|
|
21
|
+
self.debug = debug
|
|
22
|
+
self.timeout = 3600
|
|
23
|
+
self.headers = self.__get_headers(system_type)
|
|
24
|
+
|
|
25
|
+
def __get_headers(self, system_type):
|
|
26
|
+
credentials = self.interfaces.credentials.get(system="monday", system_type=system_type)
|
|
27
|
+
credentials = credentials.get('data')
|
|
28
|
+
api_key = credentials['api_key']
|
|
29
|
+
headers = {
|
|
30
|
+
'Authorization': f"Bearer {api_key}",
|
|
31
|
+
'Content-Type': 'application/json',
|
|
32
|
+
'API-Version': '2023-10'
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
return headers
|
|
36
|
+
|
|
37
|
+
def get_activity_logs_boards(self, board_id: int, start_date: str, end_date: str, column_ids: str = '', limit: int = 25):
|
|
38
|
+
"""
|
|
39
|
+
See for the docs: https://developer.monday.com/api-reference/docs/activity-logs
|
|
40
|
+
:param board_id: the ID of the board you want to get the activity logs from
|
|
41
|
+
:param start_date: start date in YYYY-MM-DD format
|
|
42
|
+
:param end_date: end date in YYYY-MM-DD format
|
|
43
|
+
:param column_ids: optional list of column ID's where you want to get the status updates for. If empty, updates for all columns will be returned
|
|
44
|
+
:param limit: amount of items to be returned. Default is 25
|
|
45
|
+
"""
|
|
46
|
+
continue_loop = True
|
|
47
|
+
page = 0
|
|
48
|
+
df = pd.DataFrame()
|
|
49
|
+
while continue_loop:
|
|
50
|
+
page += 1
|
|
51
|
+
payload = json.dumps({
|
|
52
|
+
"query": f"query {{boards (ids: {board_id}) {{ activity_logs (from: \"{start_date}\", to: \"{end_date}\", limit: {limit}, page: {page}, column_ids: [\"{column_ids}\"]) {{ id event entity data user_id created_at }} }} }}"
|
|
53
|
+
})
|
|
54
|
+
if self.debug:
|
|
55
|
+
print(payload)
|
|
56
|
+
response = requests.request("POST", self.endpoint, headers=self.headers, data=payload, timeout=self.timeout)
|
|
57
|
+
if self.debug:
|
|
58
|
+
print(response.json())
|
|
59
|
+
response.raise_for_status()
|
|
60
|
+
response_length = len(response.json()['data']['boards'][0]['activity_logs'])
|
|
61
|
+
if response_length > 0:
|
|
62
|
+
df_temp = pd.json_normalize(response.json()['data']['boards'][0]['activity_logs'])
|
|
63
|
+
df = pd.concat([df, df_temp], axis=0)
|
|
64
|
+
if response_length < limit:
|
|
65
|
+
continue_loop = False
|
|
66
|
+
return df
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def get_activity_logs_items(
|
|
70
|
+
self,
|
|
71
|
+
board_id: int,
|
|
72
|
+
item_ids: list,
|
|
73
|
+
start_date: str,
|
|
74
|
+
end_date: str = None,
|
|
75
|
+
limit: int = 500,
|
|
76
|
+
):
|
|
77
|
+
"""
|
|
78
|
+
Retrieve activity‑log history for one or more items on a monday.com board.
|
|
79
|
+
|
|
80
|
+
Parameters
|
|
81
|
+
----------
|
|
82
|
+
board_id : int
|
|
83
|
+
ID of the board that owns the items.
|
|
84
|
+
item_ids : list
|
|
85
|
+
List of item IDs (max 50 per request, per monday API limits).
|
|
86
|
+
start_date : str
|
|
87
|
+
Lower‑bound timestamp in ISO‑8601 format (``YYYY‑MM‑DDThh:mm:ssZ``).
|
|
88
|
+
end_date : str, optional
|
|
89
|
+
Upper‑bound timestamp in ISO‑8601 format. ``None`` means "up to now".
|
|
90
|
+
limit : int, optional
|
|
91
|
+
Maximum log rows per page (default 500, monday hard‑caps at 500).
|
|
92
|
+
|
|
93
|
+
Returns
|
|
94
|
+
-------
|
|
95
|
+
pandas.DataFrame
|
|
96
|
+
DataFrame containing the activity‑log rows.
|
|
97
|
+
"""
|
|
98
|
+
continue_loop = True
|
|
99
|
+
page = 0
|
|
100
|
+
df = pd.DataFrame()
|
|
101
|
+
|
|
102
|
+
# Pre‑format the item‑ID literal once
|
|
103
|
+
ids_literal = ",".join(str(i) for i in item_ids)
|
|
104
|
+
|
|
105
|
+
while continue_loop:
|
|
106
|
+
page += 1
|
|
107
|
+
to_clause = f'to: "{end_date}", ' if end_date else ''
|
|
108
|
+
query = (
|
|
109
|
+
f'query {{ boards (ids: {board_id}) {{ '
|
|
110
|
+
f'activity_logs (item_ids: [{ids_literal}], '
|
|
111
|
+
f'from: "{start_date}", '
|
|
112
|
+
+ to_clause +
|
|
113
|
+
f'limit: {limit}, page: {page}) '
|
|
114
|
+
f'{{ id event entity data user_id created_at }} }} }}'
|
|
115
|
+
)
|
|
116
|
+
payload = json.dumps({"query": query})
|
|
117
|
+
|
|
118
|
+
if self.debug:
|
|
119
|
+
print(payload)
|
|
120
|
+
|
|
121
|
+
response = requests.request("POST", self.endpoint, headers=self.headers, data=payload)
|
|
122
|
+
|
|
123
|
+
if self.debug:
|
|
124
|
+
print(response.json())
|
|
125
|
+
|
|
126
|
+
response.raise_for_status()
|
|
127
|
+
|
|
128
|
+
logs = response.json()["data"]["boards"][0]["activity_logs"]
|
|
129
|
+
|
|
130
|
+
# Append to dataframe if we received rows
|
|
131
|
+
if logs:
|
|
132
|
+
df = pd.concat([df, pd.json_normalize(logs)], axis=0)
|
|
133
|
+
|
|
134
|
+
# Stop looping once the page returns fewer rows than the page size
|
|
135
|
+
if len(logs) < limit:
|
|
136
|
+
continue_loop = False
|
|
137
|
+
|
|
138
|
+
# -- flatten the JSON held in the "data" column into real columns
|
|
139
|
+
if not df.empty and "data" in df.columns:
|
|
140
|
+
# convert JSON‑encoded strings into dicts
|
|
141
|
+
data_dicts = df["data"].apply(lambda x: json.loads(x) if isinstance(x, str) and x.startswith("{") else {})
|
|
142
|
+
df_expanded = pd.json_normalize(data_dicts, sep="__")
|
|
143
|
+
df = pd.concat([df.drop(columns=["data"]).reset_index(drop=True), df_expanded.reset_index(drop=True)], axis=1)
|
|
144
|
+
|
|
145
|
+
return df
|
|
146
|
+
|
|
147
|
+
def get_users(self, limit: int = 50, fields: str = 'id name created_at email is_admin is_guest is_view_only is_pending enabled join_date title last_activity account {id}'):
|
|
148
|
+
continue_loop = True
|
|
149
|
+
page = 0
|
|
150
|
+
df = pd.DataFrame()
|
|
151
|
+
while continue_loop:
|
|
152
|
+
page += 1
|
|
153
|
+
payload = json.dumps({
|
|
154
|
+
"query": f"query {{users (limit:{limit} page:{page}) {{ {fields} }} }}"
|
|
155
|
+
})
|
|
156
|
+
if self.debug:
|
|
157
|
+
print(payload)
|
|
158
|
+
response = requests.request("POST", self.endpoint, headers=self.headers, data=payload, timeout=self.timeout)
|
|
159
|
+
if self.debug:
|
|
160
|
+
print(response.json())
|
|
161
|
+
response.raise_for_status()
|
|
162
|
+
response_length = len(response.json()['data']['users'])
|
|
163
|
+
if response_length > 0:
|
|
164
|
+
df_temp = pd.json_normalize(response.json()['data']['users'])
|
|
165
|
+
df = pd.concat([df, df_temp], axis=0)
|
|
166
|
+
if response_length < limit:
|
|
167
|
+
continue_loop = False
|
|
168
|
+
return df
|
|
169
|
+
|
|
170
|
+
def get_boards(self, limit: int = 50, fields: str = 'id name description board_kind board_folder_id state items_count'):
|
|
171
|
+
continue_loop = True
|
|
172
|
+
page = 0
|
|
173
|
+
df = pd.DataFrame()
|
|
174
|
+
while continue_loop:
|
|
175
|
+
page += 1
|
|
176
|
+
payload = json.dumps({
|
|
177
|
+
"query": f"query {{boards (limit:{limit} page:{page}) {{ {fields} }} }}"
|
|
178
|
+
})
|
|
179
|
+
if self.debug:
|
|
180
|
+
print(payload)
|
|
181
|
+
response = requests.request("POST", self.endpoint, headers=self.headers, data=payload, timeout=self.timeout)
|
|
182
|
+
if self.debug:
|
|
183
|
+
print(response.json())
|
|
184
|
+
response.raise_for_status()
|
|
185
|
+
response_length = len(response.json()['data']['boards'])
|
|
186
|
+
if response_length > 0:
|
|
187
|
+
df_temp = pd.json_normalize(response.json()['data']['boards'])
|
|
188
|
+
df = pd.concat([df, df_temp], axis=0)
|
|
189
|
+
if response_length < limit:
|
|
190
|
+
continue_loop = False
|
|
191
|
+
return df
|
|
192
|
+
|
|
193
|
+
def get_groups(self, board_id, fields: str = 'id title position archived deleted color'):
|
|
194
|
+
"""
|
|
195
|
+
Get the groups from a board. Groups are groupings of tickets.
|
|
196
|
+
:param board_id: mandatory field from monday.com
|
|
197
|
+
:param fields: optional fields to be returned. Enter as one string without comma's. Default is id, title, position, archived, deleted and color
|
|
198
|
+
"""
|
|
199
|
+
payload = json.dumps({
|
|
200
|
+
"query": f"query {{boards (ids:{board_id}) {{groups {{ {fields} }} }} }}"
|
|
201
|
+
})
|
|
202
|
+
if self.debug:
|
|
203
|
+
print(payload)
|
|
204
|
+
response = requests.request("POST", self.endpoint, headers=self.headers, data=payload, timeout=self.timeout)
|
|
205
|
+
if self.debug:
|
|
206
|
+
print(response.json())
|
|
207
|
+
response.raise_for_status()
|
|
208
|
+
response_data = response.json()
|
|
209
|
+
if isinstance(response_data.get('data'), dict):
|
|
210
|
+
df = pd.json_normalize(response_data['data']['boards'][0]['groups'])
|
|
211
|
+
return df
|
|
212
|
+
else:
|
|
213
|
+
return response
|
|
214
|
+
|
|
215
|
+
def get_column_values(self, item_ids: list):
|
|
216
|
+
"""
|
|
217
|
+
:param item_ids: all the items where you want to get the column values from
|
|
218
|
+
"""
|
|
219
|
+
# Chunk in lists of 50 items since monday.com doesn't accept requests longer than 50 items
|
|
220
|
+
if not isinstance(item_ids, list):
|
|
221
|
+
item_ids = item_ids.tolist()
|
|
222
|
+
else:
|
|
223
|
+
item_ids = item_ids
|
|
224
|
+
items_list = [item_ids[pos:pos + 25] for pos in range(0, len(item_ids), 25)]
|
|
225
|
+
all_data = []
|
|
226
|
+
|
|
227
|
+
for chunk in items_list:
|
|
228
|
+
payload = {
|
|
229
|
+
"query": f"query {{items (ids: {json.dumps(chunk)} exclude_nonactive: false) {{id name state updated_at column_values {{ column {{ title }} id text value }} }} }}"
|
|
230
|
+
}
|
|
231
|
+
payload = json.dumps(payload, ensure_ascii=False)
|
|
232
|
+
if self.debug:
|
|
233
|
+
print(payload)
|
|
234
|
+
response = requests.request("POST", self.endpoint, headers=self.headers, data=payload, timeout=self.timeout)
|
|
235
|
+
if self.debug:
|
|
236
|
+
print(response.json())
|
|
237
|
+
response.raise_for_status()
|
|
238
|
+
data = response.json()['data']
|
|
239
|
+
|
|
240
|
+
# flatten the data
|
|
241
|
+
flat_data = []
|
|
242
|
+
for item in data['items']:
|
|
243
|
+
row = {}
|
|
244
|
+
row['item_id'] = item['id']
|
|
245
|
+
row['item'] = item['name']
|
|
246
|
+
row['state'] = item['state']
|
|
247
|
+
row['updated_at'] = item['updated_at']
|
|
248
|
+
for col in item['column_values']:
|
|
249
|
+
title = col['column']['title']
|
|
250
|
+
text = col['value'] if col['text'] == None else col['text']
|
|
251
|
+
row[title] = text
|
|
252
|
+
flat_data.append(row)
|
|
253
|
+
all_data.extend(flat_data)
|
|
254
|
+
df = pd.DataFrame(all_data)
|
|
255
|
+
return df
|
|
256
|
+
|
|
257
|
+
def get_items(self,
|
|
258
|
+
board_id: int = None,
|
|
259
|
+
limit: int = 50,
|
|
260
|
+
linked_board_id: int = None,
|
|
261
|
+
source_column_id: str = None,
|
|
262
|
+
target_column_id: str = None,
|
|
263
|
+
item_filter: list = None,
|
|
264
|
+
fields: str = 'id name created_at email group {id} parent_item {id} state subitems {id} updated_at creator_id'
|
|
265
|
+
):
|
|
266
|
+
"""
|
|
267
|
+
Get the items from a group. Be aware, we only got the item ID's in this request. Values should be received from a different url.
|
|
268
|
+
:param board_id: mandatory field from monday.com
|
|
269
|
+
:param linked_board_id: If you want to get fields from a linked board, enter that board ID here
|
|
270
|
+
:param source_column_id: The column ID of the column on the CURRENT (board_id) board that links to the linked board. This is the column that contains the linked items.
|
|
271
|
+
:param target_column_id: The column ID of the column on the LINKED (linked_board_id) board that contains the value you want to get.
|
|
272
|
+
:param limit: amount of items to be returned. Default is 50
|
|
273
|
+
:param item_filter: Optional filter to filter on specific items based on their ID. Give a list with ID's
|
|
274
|
+
:param fields: optional fields to be returned. Enter as one string without comma's. Default is id, name, created_at, email, group {id}, parent_item {id}, state, subitems {id}, updated_at and creator_id
|
|
275
|
+
"""
|
|
276
|
+
# Since monday.com doesn't accept requests, longer than 100 items, split up the request in multiple requests
|
|
277
|
+
df = pd.DataFrame()
|
|
278
|
+
# Create chunks of item_filter if it has more than 100 IDs
|
|
279
|
+
if item_filter:
|
|
280
|
+
item_filter_chunks = [item_filter[i:i + limit] for i in range(0, len(item_filter), limit)]
|
|
281
|
+
else:
|
|
282
|
+
item_filter_chunks = [None]
|
|
283
|
+
|
|
284
|
+
for item_filter_chunk in item_filter_chunks:
|
|
285
|
+
|
|
286
|
+
if item_filter_chunk and linked_board_id and board_id:
|
|
287
|
+
if not source_column_id or not target_column_id:
|
|
288
|
+
raise ValueError('If you have filled the linked_board_id, you need to specify the source_column_id and target_column_id')
|
|
289
|
+
payload = {"query":
|
|
290
|
+
f"query {{boards (ids:{board_id}) {{id items_page (limit: {limit}, query_params: {{ids: {item_filter_chunk} }}) "
|
|
291
|
+
f"{{cursor items {{ {fields} linked_items (linked_board_id: {linked_board_id}, link_to_item_column_id: \"{source_column_id}\") {{ id column_values(ids: [\"{target_column_id}\"]) {{ id text value }} }} }} }} }} }}"}
|
|
292
|
+
elif linked_board_id and board_id:
|
|
293
|
+
if not source_column_id or not target_column_id:
|
|
294
|
+
raise ValueError('If you have filled the linked_board_id, you need to specify the source_column_id and target_column_id')
|
|
295
|
+
payload = {"query":
|
|
296
|
+
f"query {{boards (ids:{board_id}) {{id items_page (limit: {limit}) {{cursor items {{ {fields} linked_items "
|
|
297
|
+
f"(linked_board_id: {linked_board_id}, link_to_item_column_id: \"{source_column_id}\") {{ id column_values(ids: [\"{target_column_id}\"]) {{ id text value }} }} }} }} }} }}"}
|
|
298
|
+
elif item_filter_chunk and board_id:
|
|
299
|
+
payload = {"query": f"query {{boards (ids:{board_id}) {{id items_page (limit: {limit}, query_params: {{ids: {item_filter_chunk} }}) {{cursor items {{ {fields} }} }} }} }}"}
|
|
300
|
+
elif board_id:
|
|
301
|
+
payload = {"query": f"query {{boards (ids:{board_id}) {{id items_page (limit: {limit}) {{cursor items {{ {fields} }} }} }} }}"}
|
|
302
|
+
else:
|
|
303
|
+
payload = {"query": f"query {{items (ids: {item_filter_chunk}) {{ {fields} }} }}"}
|
|
304
|
+
|
|
305
|
+
payload = json.dumps(payload)
|
|
306
|
+
if self.debug:
|
|
307
|
+
print(payload)
|
|
308
|
+
continue_loop = True
|
|
309
|
+
while continue_loop:
|
|
310
|
+
response = requests.request("POST", self.endpoint, headers=self.headers, data=payload, timeout=self.timeout)
|
|
311
|
+
if self.debug:
|
|
312
|
+
print(response.json())
|
|
313
|
+
response.raise_for_status()
|
|
314
|
+
if board_id:
|
|
315
|
+
data = response.json()['data']['boards'][0]['items_page']
|
|
316
|
+
else:
|
|
317
|
+
data = response.json()['data']
|
|
318
|
+
df_temp = pd.json_normalize(data['items'])
|
|
319
|
+
df = pd.concat([df, df_temp])
|
|
320
|
+
|
|
321
|
+
# Check if there is a next page
|
|
322
|
+
cursor = data.get('cursor')
|
|
323
|
+
if cursor:
|
|
324
|
+
if linked_board_id:
|
|
325
|
+
payload = json.dumps({
|
|
326
|
+
"query": f"query {{boards (ids:{board_id}) {{id items_page (limit: {limit}, cursor: \"{cursor}\") {{cursor items {{ {fields} linked_items (linked_board_id: {linked_board_id}, link_to_item_column_id: \"{source_column_id}\") {{ id column_values(ids: [\"{target_column_id}\"]) {{ id text value }} }} }} }} }} }}"
|
|
327
|
+
})
|
|
328
|
+
else:
|
|
329
|
+
payload = json.dumps({
|
|
330
|
+
"query": f"query {{boards (ids:{board_id}) {{id items_page (limit: {limit}, cursor: \"{cursor}\") {{cursor items {{ {fields} }} }} }} }}"
|
|
331
|
+
})
|
|
332
|
+
else:
|
|
333
|
+
continue_loop = False
|
|
334
|
+
|
|
335
|
+
return df
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
from brynq_sdk_brynq import BrynQ
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from typing import Union, List, Literal, Optional
|
|
6
|
+
import requests
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
10
|
+
sys.path.append(basedir)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ExtractTracket(BrynQ):
|
|
14
|
+
|
|
15
|
+
def __init__(self, system_type: Optional[Literal['source', 'target']] = None, debug: bool = False):
|
|
16
|
+
"""
|
|
17
|
+
For the full documentation, see: https://avisi-apps.gitbook.io/tracket/api/
|
|
18
|
+
"""
|
|
19
|
+
super().__init__()
|
|
20
|
+
self.timeout = 3600
|
|
21
|
+
self.headers = self.__get_headers(system_type)
|
|
22
|
+
self.base_url = "https://us.production.timesheet.avisi-apps.com/api/2.0/"
|
|
23
|
+
|
|
24
|
+
def __get_headers(self, system_type):
|
|
25
|
+
"""
|
|
26
|
+
Get the credentials for the Traket API from BrynQ, with those credentials, get the access_token for Tracket.
|
|
27
|
+
Return the headers with the access_token.
|
|
28
|
+
"""
|
|
29
|
+
# Get credentials from BrynQ
|
|
30
|
+
credentials = self.interfaces.credentials.get(system="monday", system_type=system_type)
|
|
31
|
+
credentials = credentials.get('data')
|
|
32
|
+
|
|
33
|
+
# With those credentials, get the access_token from Tracket
|
|
34
|
+
endpoint = 'https://us.production.timesheet.avisi-apps.com/api/2.0/oauth2/token'
|
|
35
|
+
payload = json.dumps({
|
|
36
|
+
"grant-type": "client-credentials",
|
|
37
|
+
"monday/account-id": credentials['account_id'],
|
|
38
|
+
"client-id": credentials['client_id'],
|
|
39
|
+
"client-secret": credentials['client_secret']
|
|
40
|
+
})
|
|
41
|
+
headers = {'Content-Type': 'application/json'}
|
|
42
|
+
tracket_response = requests.request("POST", endpoint, headers=headers, data=payload, timeout=self.timeout)
|
|
43
|
+
tracket_response.raise_for_status()
|
|
44
|
+
|
|
45
|
+
# Return the headers with the access_token
|
|
46
|
+
access_token = tracket_response.json()['access_token']
|
|
47
|
+
headers = {
|
|
48
|
+
'Authorization': f"Bearer {access_token}",
|
|
49
|
+
'Content-Type': 'application/json'
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return headers
|
|
53
|
+
|
|
54
|
+
def get_worklogs(self, date_start: str = None, date_end: str = None, created_since: str = None, created_up_to: str = None, updated_since: str = None, updated_up_to: str = None):
|
|
55
|
+
"""
|
|
56
|
+
Get all the worklogs from Tracket.
|
|
57
|
+
:param date_start: Get all the records from a certain date and after
|
|
58
|
+
:param date_end: Get all the records until a certain date
|
|
59
|
+
:param created_since: Get all the records which are created since a certain date
|
|
60
|
+
:param created_up_to: Get all the records which are created before a certain date
|
|
61
|
+
:param updated_since: Get all the records which are updated since a certain date
|
|
62
|
+
:param updated_up_to: Get all the records which are updated before a certain date
|
|
63
|
+
"""
|
|
64
|
+
endpoint = f'{self.base_url}timeEntries?size=100&'
|
|
65
|
+
if date_start:
|
|
66
|
+
endpoint = f'{endpoint}fields.date.gte={date_start}&'
|
|
67
|
+
if date_end:
|
|
68
|
+
endpoint = f'{endpoint}fields.date.lte={date_end}&'
|
|
69
|
+
if created_since:
|
|
70
|
+
endpoint = f'{endpoint}fields.createdDate.gte={created_since}&'
|
|
71
|
+
if created_up_to:
|
|
72
|
+
endpoint = f'{endpoint}fields.createdDate.lte={created_up_to}&'
|
|
73
|
+
if updated_since:
|
|
74
|
+
endpoint = f'{endpoint}fields.updatedDate.gte={updated_since}&'
|
|
75
|
+
if updated_up_to:
|
|
76
|
+
endpoint = f'{endpoint}fields.updatedDate.lte={updated_up_to}&'
|
|
77
|
+
continue_loop = True
|
|
78
|
+
df = pd.DataFrame()
|
|
79
|
+
full_url = endpoint
|
|
80
|
+
while continue_loop:
|
|
81
|
+
response = requests.get(full_url, headers=self.headers, timeout=self.timeout)
|
|
82
|
+
response.raise_for_status()
|
|
83
|
+
response_data = response.json()
|
|
84
|
+
worklogs = response_data.get('items')
|
|
85
|
+
worklogs = worklogs if worklogs else []
|
|
86
|
+
next_cursor = response_data.get('nextCursor')
|
|
87
|
+
if len(worklogs) > 0:
|
|
88
|
+
df_temp = pd.DataFrame(worklogs)
|
|
89
|
+
df = pd.concat([df, df_temp])
|
|
90
|
+
if next_cursor:
|
|
91
|
+
full_url = f'{endpoint}after={next_cursor}'
|
|
92
|
+
else:
|
|
93
|
+
continue_loop = False
|
|
94
|
+
return df
|
|
95
|
+
|
|
96
|
+
def get_categories(self):
|
|
97
|
+
"""
|
|
98
|
+
Get all the hour categories from Tracket.
|
|
99
|
+
"""
|
|
100
|
+
endpoint = f'{self.base_url}templates/timeEntry/fields/category/options'
|
|
101
|
+
response = requests.request("GET", endpoint, headers=self.headers, timeout=self.timeout)
|
|
102
|
+
response.raise_for_status()
|
|
103
|
+
data = response.json()['items']
|
|
104
|
+
df = pd.DataFrame(data)
|
|
105
|
+
return df
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
from brynq_sdk_brynq import BrynQ
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from typing import Union, List, Literal, Optional
|
|
6
|
+
import warnings
|
|
7
|
+
import requests
|
|
8
|
+
import json
|
|
9
|
+
|
|
10
|
+
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
11
|
+
sys.path.append(basedir)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class UploadTracket(BrynQ):
|
|
15
|
+
|
|
16
|
+
def __init__(self, system_type: Optional[Literal['source', 'target']] = None, debug: bool = False):
|
|
17
|
+
"""
|
|
18
|
+
For the full documentation, see: https://avisi-apps.gitbook.io/tracket/api/
|
|
19
|
+
"""
|
|
20
|
+
super().__init__()
|
|
21
|
+
self.timeout = 3600
|
|
22
|
+
self.headers = self.__get_headers(system_type)
|
|
23
|
+
self.base_url = "https://us.production.timesheet.avisi-apps.com/api/2.0/"
|
|
24
|
+
self.debug = debug
|
|
25
|
+
|
|
26
|
+
def __get_headers(self, system_type):
|
|
27
|
+
"""
|
|
28
|
+
Get the credentials for the Traket API from BrynQ, with those credentials, get the access_token for Tracket.
|
|
29
|
+
Return the headers with the access_token.
|
|
30
|
+
"""
|
|
31
|
+
# Get credentials from BrynQ
|
|
32
|
+
credentials = self.interfaces.credentials.get(system="monday", system_type=system_type)
|
|
33
|
+
credentials = credentials.get('data')
|
|
34
|
+
|
|
35
|
+
# With those credentials, get the access_token from Tracket
|
|
36
|
+
endpoint = 'https://us.production.timesheet.avisi-apps.com/api/2.0/oauth2/token'
|
|
37
|
+
payload = json.dumps({
|
|
38
|
+
"grant-type": "client-credentials",
|
|
39
|
+
"monday/account-id": credentials['account_id'],
|
|
40
|
+
"client-id": credentials['client_id'],
|
|
41
|
+
"client-secret": credentials['client_secret']
|
|
42
|
+
})
|
|
43
|
+
headers = {'Content-Type': 'application/json'}
|
|
44
|
+
tracket_response = requests.request("POST", endpoint, headers=headers, data=payload, timeout=self.timeout)
|
|
45
|
+
|
|
46
|
+
# Return the headers with the access_token
|
|
47
|
+
access_token = tracket_response.json()['access_token']
|
|
48
|
+
headers = {
|
|
49
|
+
'Authorization': f"Bearer {access_token}",
|
|
50
|
+
'Content-Type': 'application/json'
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return headers
|
|
54
|
+
|
|
55
|
+
@staticmethod
|
|
56
|
+
def __check_fields(data: Union[dict, List], required_fields: List, allowed_fields: List):
|
|
57
|
+
if isinstance(data, dict):
|
|
58
|
+
data = data.keys()
|
|
59
|
+
|
|
60
|
+
for field in data:
|
|
61
|
+
if field not in allowed_fields and field not in required_fields:
|
|
62
|
+
warnings.warn('Field {field} is not implemented. Optional fields are: {allowed_fields}'.format(field=field, allowed_fields=tuple(allowed_fields)))
|
|
63
|
+
|
|
64
|
+
for field in required_fields:
|
|
65
|
+
if field not in data:
|
|
66
|
+
raise ValueError('Field {field} is required. Required fields are: {required_fields}'.format(field=field, required_fields=tuple(required_fields)))
|
|
67
|
+
|
|
68
|
+
def create_worklog(self, data: dict) -> requests.Response:
|
|
69
|
+
"""
|
|
70
|
+
Create a new worklog in Tracket.
|
|
71
|
+
:param data: A dictionary with all the required fields to create a worklog.
|
|
72
|
+
"""
|
|
73
|
+
required_fields = ['worklogMinutes', 'worklogDate', 'itemId', 'userId']
|
|
74
|
+
allowed_fields = ['worklogCategory', 'description', 'worklogBillableMinutes', 'team']
|
|
75
|
+
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
|
|
76
|
+
|
|
77
|
+
url = f'{self.base_url}timeEntries'
|
|
78
|
+
|
|
79
|
+
base_body = {
|
|
80
|
+
"minutes": data['worklogMinutes'],
|
|
81
|
+
"date": data['worklogDate'],
|
|
82
|
+
"item": data['itemId'],
|
|
83
|
+
"user": data['userId'],
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if 'worklogCategory' in data:
|
|
87
|
+
base_body["customFields"] = {
|
|
88
|
+
"category": data['worklogCategory']
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
fields_to_update = {}
|
|
92
|
+
|
|
93
|
+
# Add fields that you want to update a dict (adding to body itself is too much text)
|
|
94
|
+
fields_to_update.update({"description": data['description']}) if 'description' in data else fields_to_update
|
|
95
|
+
fields_to_update.update({"billableMinutes": data['worklogBillableMinutes']}) if 'worklogBillableMinutes' in data else fields_to_update
|
|
96
|
+
fields_to_update.update({"team": data['team']}) if 'team' in data else fields_to_update
|
|
97
|
+
base_body.update(fields_to_update)
|
|
98
|
+
|
|
99
|
+
if self.debug:
|
|
100
|
+
print(json.dumps(base_body))
|
|
101
|
+
|
|
102
|
+
response = requests.request("POST", url, data=json.dumps(base_body), headers=self.headers, timeout=self.timeout)
|
|
103
|
+
return response
|
|
104
|
+
|
|
105
|
+
def update_worklog(self, worklog_id: str, data: dict) -> requests.Response:
|
|
106
|
+
"""
|
|
107
|
+
Get all the worklogs from Tracket.
|
|
108
|
+
:param worklog_id: The ID of the worklog that you want to update.
|
|
109
|
+
:param data: A dictionary with all the required fields to update a worklog.
|
|
110
|
+
"""
|
|
111
|
+
required_fields = ['worklogMinutes', 'worklogDate', 'itemId', 'userId']
|
|
112
|
+
allowed_fields = ['worklogCategory', 'description', 'worklogBillableMinutes', 'team']
|
|
113
|
+
self.__check_fields(data=data, required_fields=required_fields, allowed_fields=allowed_fields)
|
|
114
|
+
|
|
115
|
+
url = f'{self.base_url}timeEntries/{worklog_id}'
|
|
116
|
+
|
|
117
|
+
base_body = {
|
|
118
|
+
"minutes": data['worklogMinutes'],
|
|
119
|
+
"date": data['worklogDate'],
|
|
120
|
+
"item": data['itemId'],
|
|
121
|
+
"user": data['userId']
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
if 'worklogCategory' in data:
|
|
125
|
+
base_body["customFields"] = {
|
|
126
|
+
"category": data['worklogCategory']
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
fields_to_update = {}
|
|
130
|
+
|
|
131
|
+
# Add fields that you want to update a dict (adding to body itself is too much text)
|
|
132
|
+
fields_to_update.update({"description": data['description']}) if 'description' in data else fields_to_update
|
|
133
|
+
fields_to_update.update({"billableMinutes": data['worklogBillableMinutes']}) if 'worklogBillableMinutes' in data else fields_to_update
|
|
134
|
+
fields_to_update.update({"team": data['team']}) if 'team' in data else fields_to_update
|
|
135
|
+
base_body.update(fields_to_update)
|
|
136
|
+
|
|
137
|
+
if self.debug:
|
|
138
|
+
print(json.dumps(base_body))
|
|
139
|
+
|
|
140
|
+
response = requests.request("PUT", url, data=json.dumps(base_body), headers=self.headers, timeout=self.timeout)
|
|
141
|
+
return response
|
|
142
|
+
|
|
143
|
+
def delete_worklog(self, worklog_id: str) -> requests.Response:
|
|
144
|
+
"""
|
|
145
|
+
Get all the worklogs from Tracket.
|
|
146
|
+
:param worklog_id: The ID of the worklog that you want to delete.
|
|
147
|
+
"""
|
|
148
|
+
url = (f'{self.base_url}timeEntries/{worklog_id}')
|
|
149
|
+
response = requests.request("DELETE", url, headers=self.headers, timeout=self.timeout)
|
|
150
|
+
return response
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: brynq_sdk_monday
|
|
3
|
+
Version: 2.1.2
|
|
4
|
+
Summary: Monday.com wrapper from BrynQ
|
|
5
|
+
Author: BrynQ
|
|
6
|
+
Author-email: support@brynq.com
|
|
7
|
+
License: BrynQ License
|
|
8
|
+
Requires-Dist: brynq-sdk-brynq<5,>=4
|
|
9
|
+
Dynamic: author
|
|
10
|
+
Dynamic: author-email
|
|
11
|
+
Dynamic: description
|
|
12
|
+
Dynamic: license
|
|
13
|
+
Dynamic: requires-dist
|
|
14
|
+
Dynamic: summary
|
|
15
|
+
|
|
16
|
+
Monday.com wrapper from BrynQ
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
brynq_sdk_monday/__init__.py,sha256=mBtdOdiAEp_o1RrwkUVWB9RWdJaG5R9LpmTXz-eyzwU,127
|
|
2
|
+
brynq_sdk_monday/extract_monday.py,sha256=WsnIXDo0jpyGJ263-xiZo5IGI6nRjK8z-Mw1jtHXf24,15946
|
|
3
|
+
brynq_sdk_monday/extract_tracket.py,sha256=M3ISHF-4t54b8gfBGZ8Latrlqv-mzq46MkSo3NEMH6A,4540
|
|
4
|
+
brynq_sdk_monday/upload_tracket.py,sha256=oRcFNwWSSjebTNsgmdUm8AxPLcnWKmuNXUAE2QIKEzQ,6391
|
|
5
|
+
brynq_sdk_monday-2.1.2.dist-info/METADATA,sha256=LhQZamW-EVqKhjMe_USdiljdlkzxCCjypKYaazeKQLY,352
|
|
6
|
+
brynq_sdk_monday-2.1.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
7
|
+
brynq_sdk_monday-2.1.2.dist-info/top_level.txt,sha256=04GPvAhh_JA0TxgrLk__3MYMHISNjuyR-9OpiwP-vyI,17
|
|
8
|
+
brynq_sdk_monday-2.1.2.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
brynq_sdk_monday
|