monday_ruby 1.0.0 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.env +1 -1
- data/.rspec +0 -1
- data/.rubocop.yml +19 -0
- data/.simplecov +1 -0
- data/CHANGELOG.md +49 -0
- data/CONTRIBUTING.md +165 -0
- data/README.md +167 -88
- data/docs/.vitepress/config.mjs +255 -0
- data/docs/.vitepress/theme/index.js +4 -0
- data/docs/.vitepress/theme/style.css +43 -0
- data/docs/README.md +80 -0
- data/docs/explanation/architecture.md +507 -0
- data/docs/explanation/best-practices/errors.md +478 -0
- data/docs/explanation/best-practices/performance.md +1084 -0
- data/docs/explanation/best-practices/rate-limiting.md +630 -0
- data/docs/explanation/best-practices/testing.md +820 -0
- data/docs/explanation/column-values.md +857 -0
- data/docs/explanation/design.md +795 -0
- data/docs/explanation/graphql.md +356 -0
- data/docs/explanation/migration/v1.md +808 -0
- data/docs/explanation/pagination.md +447 -0
- data/docs/guides/advanced/batch.md +1274 -0
- data/docs/guides/advanced/complex-queries.md +1114 -0
- data/docs/guides/advanced/errors.md +818 -0
- data/docs/guides/advanced/pagination.md +934 -0
- data/docs/guides/advanced/rate-limiting.md +981 -0
- data/docs/guides/authentication.md +286 -0
- data/docs/guides/boards/create.md +386 -0
- data/docs/guides/boards/delete.md +405 -0
- data/docs/guides/boards/duplicate.md +511 -0
- data/docs/guides/boards/query.md +530 -0
- data/docs/guides/boards/update.md +453 -0
- data/docs/guides/columns/create.md +452 -0
- data/docs/guides/columns/metadata.md +492 -0
- data/docs/guides/columns/query.md +455 -0
- data/docs/guides/columns/update-multiple.md +459 -0
- data/docs/guides/columns/update-values.md +509 -0
- data/docs/guides/files/add-to-column.md +40 -0
- data/docs/guides/files/add-to-update.md +37 -0
- data/docs/guides/files/clear-column.md +33 -0
- data/docs/guides/first-request.md +285 -0
- data/docs/guides/folders/manage.md +750 -0
- data/docs/guides/groups/items.md +626 -0
- data/docs/guides/groups/manage.md +501 -0
- data/docs/guides/installation.md +169 -0
- data/docs/guides/items/create.md +493 -0
- data/docs/guides/items/delete.md +514 -0
- data/docs/guides/items/query.md +605 -0
- data/docs/guides/items/subitems.md +483 -0
- data/docs/guides/items/update.md +699 -0
- data/docs/guides/updates/manage.md +619 -0
- data/docs/guides/use-cases/dashboard.md +1421 -0
- data/docs/guides/use-cases/import.md +1962 -0
- data/docs/guides/use-cases/task-management.md +1381 -0
- data/docs/guides/workspaces/manage.md +502 -0
- data/docs/index.md +69 -0
- data/docs/package-lock.json +2468 -0
- data/docs/package.json +13 -0
- data/docs/reference/client.md +540 -0
- data/docs/reference/configuration.md +586 -0
- data/docs/reference/errors.md +693 -0
- data/docs/reference/resources/account.md +208 -0
- data/docs/reference/resources/activity-log.md +369 -0
- data/docs/reference/resources/board-view.md +359 -0
- data/docs/reference/resources/board.md +393 -0
- data/docs/reference/resources/column.md +543 -0
- data/docs/reference/resources/file.md +236 -0
- data/docs/reference/resources/folder.md +386 -0
- data/docs/reference/resources/group.md +507 -0
- data/docs/reference/resources/item.md +348 -0
- data/docs/reference/resources/subitem.md +267 -0
- data/docs/reference/resources/update.md +259 -0
- data/docs/reference/resources/workspace.md +213 -0
- data/docs/reference/response.md +560 -0
- data/docs/tutorial/first-integration.md +713 -0
- data/lib/monday/client.rb +41 -2
- data/lib/monday/configuration.rb +13 -0
- data/lib/monday/deprecation.rb +23 -0
- data/lib/monday/error.rb +5 -2
- data/lib/monday/request.rb +19 -1
- data/lib/monday/resources/base.rb +4 -0
- data/lib/monday/resources/board.rb +52 -0
- data/lib/monday/resources/column.rb +6 -0
- data/lib/monday/resources/file.rb +56 -0
- data/lib/monday/resources/folder.rb +55 -0
- data/lib/monday/resources/group.rb +66 -0
- data/lib/monday/resources/item.rb +62 -0
- data/lib/monday/util.rb +33 -1
- data/lib/monday/version.rb +1 -1
- data/lib/monday_ruby.rb +1 -0
- metadata +92 -11
- data/monday_ruby.gemspec +0 -39
|
@@ -0,0 +1,1421 @@
|
|
|
1
|
+
# Project Dashboard System
|
|
2
|
+
|
|
3
|
+
Build a comprehensive project dashboard that aggregates data from multiple boards, calculates health metrics, generates reports, and prepares visualization-ready data.
|
|
4
|
+
|
|
5
|
+
## What You'll Build
|
|
6
|
+
|
|
7
|
+
A complete project dashboard system that:
|
|
8
|
+
- Aggregates data from multiple project boards
|
|
9
|
+
- Calculates project health metrics (completion rates, overdue items, team workload)
|
|
10
|
+
- Tracks milestones and timeline performance
|
|
11
|
+
- Generates exportable reports (JSON/CSV)
|
|
12
|
+
- Provides real-time KPI data for dashboards
|
|
13
|
+
|
|
14
|
+
## Prerequisites
|
|
15
|
+
|
|
16
|
+
```ruby
|
|
17
|
+
require "monday_ruby"
|
|
18
|
+
require "json"
|
|
19
|
+
require "csv"
|
|
20
|
+
require "date"
|
|
21
|
+
|
|
22
|
+
Monday.configure do |config|
|
|
23
|
+
config.token = ENV["MONDAY_TOKEN"]
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
client = Monday::Client.new
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Step 1: Query Workspace and Identify Project Boards
|
|
30
|
+
|
|
31
|
+
First, discover all boards in your workspace and identify which ones are project boards.
|
|
32
|
+
|
|
33
|
+
### Get All Workspace Boards
|
|
34
|
+
|
|
35
|
+
```ruby
|
|
36
|
+
def get_workspace_boards(client, workspace_id)
|
|
37
|
+
response = client.board.query(
|
|
38
|
+
args: { workspace_ids: [workspace_id] },
|
|
39
|
+
select: [
|
|
40
|
+
"id",
|
|
41
|
+
"name",
|
|
42
|
+
"description",
|
|
43
|
+
"state",
|
|
44
|
+
{
|
|
45
|
+
groups: ["id", "title"],
|
|
46
|
+
columns: ["id", "title", "type"]
|
|
47
|
+
}
|
|
48
|
+
]
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
if response.success?
|
|
52
|
+
boards = response.body.dig("data", "boards") || []
|
|
53
|
+
puts "Found #{boards.length} boards in workspace #{workspace_id}"
|
|
54
|
+
boards
|
|
55
|
+
else
|
|
56
|
+
puts "Failed to fetch boards"
|
|
57
|
+
[]
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# Usage
|
|
62
|
+
workspace_id = 1234567
|
|
63
|
+
boards = get_workspace_boards(client, workspace_id)
|
|
64
|
+
|
|
65
|
+
boards.each do |board|
|
|
66
|
+
puts " #{board['name']} - #{board['groups'].length} groups, #{board['columns'].length} columns"
|
|
67
|
+
end
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
### Filter Project Boards
|
|
71
|
+
|
|
72
|
+
Identify boards that represent projects based on naming conventions or board structure:
|
|
73
|
+
|
|
74
|
+
```ruby
|
|
75
|
+
def identify_project_boards(boards)
|
|
76
|
+
project_boards = boards.select do |board|
|
|
77
|
+
# Filter by name pattern (e.g., boards with "Project" in the name)
|
|
78
|
+
# or by having specific columns (status, timeline, person, etc.)
|
|
79
|
+
has_project_columns = board["columns"].any? { |col| col["type"] == "status" } &&
|
|
80
|
+
board["columns"].any? { |col| col["type"] == "timeline" }
|
|
81
|
+
|
|
82
|
+
has_project_columns || board["name"].match?(/project/i)
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
puts "\nIdentified #{project_boards.length} project boards:"
|
|
86
|
+
project_boards.each do |board|
|
|
87
|
+
puts " • #{board['name']}"
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
project_boards
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
# Usage
|
|
94
|
+
project_boards = identify_project_boards(boards)
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
## Step 2: Aggregate Data from Multiple Boards
|
|
98
|
+
|
|
99
|
+
Collect all items from project boards using pagination for large datasets.
|
|
100
|
+
|
|
101
|
+
### Fetch All Items with Pagination
|
|
102
|
+
|
|
103
|
+
```ruby
|
|
104
|
+
def fetch_all_board_items(client, board_id)
|
|
105
|
+
all_items = []
|
|
106
|
+
cursor = nil
|
|
107
|
+
page_count = 0
|
|
108
|
+
|
|
109
|
+
loop do
|
|
110
|
+
page_count += 1
|
|
111
|
+
|
|
112
|
+
response = client.board.items_page(
|
|
113
|
+
board_ids: board_id,
|
|
114
|
+
limit: 100,
|
|
115
|
+
cursor: cursor,
|
|
116
|
+
select: [
|
|
117
|
+
"id",
|
|
118
|
+
"name",
|
|
119
|
+
"state",
|
|
120
|
+
"created_at",
|
|
121
|
+
"updated_at",
|
|
122
|
+
{
|
|
123
|
+
group: ["id", "title"],
|
|
124
|
+
column_values: [
|
|
125
|
+
"id",
|
|
126
|
+
"text",
|
|
127
|
+
"type",
|
|
128
|
+
"value",
|
|
129
|
+
{
|
|
130
|
+
"... on StatusValue": ["label"],
|
|
131
|
+
"... on DateValue": ["date"],
|
|
132
|
+
"... on PeopleValue": ["persons_and_teams"],
|
|
133
|
+
"... on TimelineValue": ["from", "to"]
|
|
134
|
+
}
|
|
135
|
+
]
|
|
136
|
+
}
|
|
137
|
+
]
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
break unless response.success?
|
|
141
|
+
|
|
142
|
+
items_page = response.body.dig("data", "boards", 0, "items_page")
|
|
143
|
+
items = items_page["items"] || []
|
|
144
|
+
|
|
145
|
+
break if items.empty?
|
|
146
|
+
|
|
147
|
+
all_items.concat(items)
|
|
148
|
+
cursor = items_page["cursor"]
|
|
149
|
+
|
|
150
|
+
puts " Page #{page_count}: Fetched #{items.length} items (Total: #{all_items.length})"
|
|
151
|
+
|
|
152
|
+
break if cursor.nil?
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
all_items
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
# Usage
|
|
159
|
+
def aggregate_project_data(client, project_boards)
|
|
160
|
+
project_data = {}
|
|
161
|
+
|
|
162
|
+
project_boards.each do |board|
|
|
163
|
+
puts "\nFetching items from: #{board['name']}"
|
|
164
|
+
items = fetch_all_board_items(client, board["id"])
|
|
165
|
+
|
|
166
|
+
project_data[board["id"]] = {
|
|
167
|
+
"board" => board,
|
|
168
|
+
"items" => items,
|
|
169
|
+
"total_items" => items.length
|
|
170
|
+
}
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
project_data
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
# Collect all data
|
|
177
|
+
project_data = aggregate_project_data(client, project_boards)
|
|
178
|
+
puts "\n✓ Aggregated data from #{project_data.keys.length} boards"
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### Group Data by Project
|
|
182
|
+
|
|
183
|
+
```ruby
|
|
184
|
+
def group_items_by_status(items)
|
|
185
|
+
grouped = Hash.new { |h, k| h[k] = [] }
|
|
186
|
+
|
|
187
|
+
items.each do |item|
|
|
188
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
189
|
+
status = status_column&.dig("text") || "No Status"
|
|
190
|
+
grouped[status] << item
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
grouped
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
# Usage
|
|
197
|
+
project_data.each do |board_id, data|
|
|
198
|
+
board_name = data["board"]["name"]
|
|
199
|
+
grouped = group_items_by_status(data["items"])
|
|
200
|
+
|
|
201
|
+
puts "\n#{board_name}:"
|
|
202
|
+
grouped.each do |status, items|
|
|
203
|
+
puts " #{status}: #{items.length} items"
|
|
204
|
+
end
|
|
205
|
+
end
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
## Step 3: Calculate Project Health Metrics
|
|
209
|
+
|
|
210
|
+
Calculate key metrics to assess project health and performance.
|
|
211
|
+
|
|
212
|
+
### Calculate Completion Percentage
|
|
213
|
+
|
|
214
|
+
```ruby
|
|
215
|
+
def calculate_completion_rate(items)
|
|
216
|
+
return 0 if items.empty?
|
|
217
|
+
|
|
218
|
+
completed_states = ["done", "completed"]
|
|
219
|
+
completed_count = items.count do |item|
|
|
220
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
221
|
+
status_text = status_column&.dig("text")&.downcase || ""
|
|
222
|
+
completed_states.any? { |state| status_text.include?(state) }
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
(completed_count.to_f / items.length * 100).round(2)
|
|
226
|
+
end
|
|
227
|
+
|
|
228
|
+
# Usage
|
|
229
|
+
project_data.each do |board_id, data|
|
|
230
|
+
completion_rate = calculate_completion_rate(data["items"])
|
|
231
|
+
puts "#{data['board']['name']}: #{completion_rate}% complete"
|
|
232
|
+
end
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
### Count Overdue Items
|
|
236
|
+
|
|
237
|
+
```ruby
|
|
238
|
+
def count_overdue_items(items)
|
|
239
|
+
today = Date.today
|
|
240
|
+
overdue_items = []
|
|
241
|
+
|
|
242
|
+
items.each do |item|
|
|
243
|
+
# Check date columns
|
|
244
|
+
date_column = item["column_values"].find { |cv| cv["type"] == "date" }
|
|
245
|
+
if date_column && date_column["text"]
|
|
246
|
+
begin
|
|
247
|
+
due_date = Date.parse(date_column["text"])
|
|
248
|
+
if due_date < today
|
|
249
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
250
|
+
status = status_column&.dig("text")&.downcase || ""
|
|
251
|
+
|
|
252
|
+
# Only count as overdue if not completed
|
|
253
|
+
unless status.include?("done") || status.include?("completed")
|
|
254
|
+
overdue_items << {
|
|
255
|
+
"item" => item,
|
|
256
|
+
"due_date" => due_date,
|
|
257
|
+
"days_overdue" => (today - due_date).to_i
|
|
258
|
+
}
|
|
259
|
+
end
|
|
260
|
+
end
|
|
261
|
+
rescue Date::Error
|
|
262
|
+
# Invalid date format, skip
|
|
263
|
+
end
|
|
264
|
+
end
|
|
265
|
+
|
|
266
|
+
# Check timeline columns
|
|
267
|
+
timeline_column = item["column_values"].find { |cv| cv["type"] == "timeline" }
|
|
268
|
+
if timeline_column
|
|
269
|
+
begin
|
|
270
|
+
value = JSON.parse(timeline_column["value"] || "{}")
|
|
271
|
+
if value["to"]
|
|
272
|
+
end_date = Date.parse(value["to"])
|
|
273
|
+
if end_date < today
|
|
274
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
275
|
+
status = status_column&.dig("text")&.downcase || ""
|
|
276
|
+
|
|
277
|
+
unless status.include?("done") || status.include?("completed")
|
|
278
|
+
overdue_items << {
|
|
279
|
+
"item" => item,
|
|
280
|
+
"due_date" => end_date,
|
|
281
|
+
"days_overdue" => (today - end_date).to_i
|
|
282
|
+
}
|
|
283
|
+
end
|
|
284
|
+
end
|
|
285
|
+
end
|
|
286
|
+
rescue JSON::ParserError, Date::Error
|
|
287
|
+
# Invalid format, skip
|
|
288
|
+
end
|
|
289
|
+
end
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
overdue_items.uniq { |oi| oi["item"]["id"] }
|
|
293
|
+
end
|
|
294
|
+
|
|
295
|
+
# Usage
|
|
296
|
+
project_data.each do |board_id, data|
|
|
297
|
+
overdue = count_overdue_items(data["items"])
|
|
298
|
+
puts "#{data['board']['name']}: #{overdue.length} overdue items"
|
|
299
|
+
|
|
300
|
+
if overdue.any?
|
|
301
|
+
puts " Most overdue:"
|
|
302
|
+
overdue.sort_by { |oi| -oi["days_overdue"] }.first(3).each do |oi|
|
|
303
|
+
puts " • #{oi['item']['name']} (#{oi['days_overdue']} days)"
|
|
304
|
+
end
|
|
305
|
+
end
|
|
306
|
+
end
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
### Calculate Team Workload Distribution
|
|
310
|
+
|
|
311
|
+
```ruby
|
|
312
|
+
def calculate_team_workload(items)
|
|
313
|
+
workload = Hash.new { |h, k| h[k] = { total: 0, active: 0, completed: 0 } }
|
|
314
|
+
|
|
315
|
+
items.each do |item|
|
|
316
|
+
# Find person column
|
|
317
|
+
person_column = item["column_values"].find { |cv| cv["type"] == "people" }
|
|
318
|
+
next unless person_column
|
|
319
|
+
|
|
320
|
+
begin
|
|
321
|
+
value = JSON.parse(person_column["value"] || "{}")
|
|
322
|
+
persons = value["personsAndTeams"] || []
|
|
323
|
+
|
|
324
|
+
persons.each do |person|
|
|
325
|
+
person_id = person["id"]
|
|
326
|
+
person_name = person_column["text"] || "Unassigned"
|
|
327
|
+
|
|
328
|
+
# Determine status
|
|
329
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
330
|
+
status = status_column&.dig("text")&.downcase || ""
|
|
331
|
+
|
|
332
|
+
workload[person_name][:total] += 1
|
|
333
|
+
|
|
334
|
+
if status.include?("done") || status.include?("completed")
|
|
335
|
+
workload[person_name][:completed] += 1
|
|
336
|
+
else
|
|
337
|
+
workload[person_name][:active] += 1
|
|
338
|
+
end
|
|
339
|
+
end
|
|
340
|
+
rescue JSON::ParserError
|
|
341
|
+
# Invalid format, skip
|
|
342
|
+
end
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
workload
|
|
346
|
+
end
|
|
347
|
+
|
|
348
|
+
# Usage
|
|
349
|
+
project_data.each do |board_id, data|
|
|
350
|
+
workload = calculate_team_workload(data["items"])
|
|
351
|
+
|
|
352
|
+
puts "\n#{data['board']['name']} - Team Workload:"
|
|
353
|
+
workload.sort_by { |name, stats| -stats[:total] }.each do |name, stats|
|
|
354
|
+
completion_rate = stats[:total] > 0 ? (stats[:completed].to_f / stats[:total] * 100).round(1) : 0
|
|
355
|
+
puts " #{name}:"
|
|
356
|
+
puts " Total: #{stats[:total]} | Active: #{stats[:active]} | Completed: #{stats[:completed]} (#{completion_rate}%)"
|
|
357
|
+
end
|
|
358
|
+
end
|
|
359
|
+
```
|
|
360
|
+
|
|
361
|
+
### Analyze Timeline Performance
|
|
362
|
+
|
|
363
|
+
```ruby
|
|
364
|
+
def analyze_timeline_performance(items)
|
|
365
|
+
today = Date.today
|
|
366
|
+
timeline_stats = {
|
|
367
|
+
on_time: 0,
|
|
368
|
+
at_risk: 0, # Due within 7 days
|
|
369
|
+
delayed: 0,
|
|
370
|
+
completed_on_time: 0,
|
|
371
|
+
completed_late: 0,
|
|
372
|
+
no_timeline: 0
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
items.each do |item|
|
|
376
|
+
timeline_column = item["column_values"].find { |cv| cv["type"] == "timeline" }
|
|
377
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
378
|
+
status = status_column&.dig("text")&.downcase || ""
|
|
379
|
+
is_completed = status.include?("done") || status.include?("completed")
|
|
380
|
+
|
|
381
|
+
unless timeline_column && timeline_column["value"]
|
|
382
|
+
timeline_stats[:no_timeline] += 1
|
|
383
|
+
next
|
|
384
|
+
end
|
|
385
|
+
|
|
386
|
+
begin
|
|
387
|
+
value = JSON.parse(timeline_column["value"])
|
|
388
|
+
start_date = value["from"] ? Date.parse(value["from"]) : nil
|
|
389
|
+
end_date = value["to"] ? Date.parse(value["to"]) : nil
|
|
390
|
+
|
|
391
|
+
next unless end_date
|
|
392
|
+
|
|
393
|
+
if is_completed
|
|
394
|
+
# Check if completed on time
|
|
395
|
+
updated_at = item["updated_at"] ? Date.parse(item["updated_at"]) : today
|
|
396
|
+
if updated_at <= end_date
|
|
397
|
+
timeline_stats[:completed_on_time] += 1
|
|
398
|
+
else
|
|
399
|
+
timeline_stats[:completed_late] += 1
|
|
400
|
+
end
|
|
401
|
+
else
|
|
402
|
+
# Active items
|
|
403
|
+
days_until_due = (end_date - today).to_i
|
|
404
|
+
|
|
405
|
+
if days_until_due < 0
|
|
406
|
+
timeline_stats[:delayed] += 1
|
|
407
|
+
elsif days_until_due <= 7
|
|
408
|
+
timeline_stats[:at_risk] += 1
|
|
409
|
+
else
|
|
410
|
+
timeline_stats[:on_time] += 1
|
|
411
|
+
end
|
|
412
|
+
end
|
|
413
|
+
rescue JSON::ParserError, Date::Error
|
|
414
|
+
timeline_stats[:no_timeline] += 1
|
|
415
|
+
end
|
|
416
|
+
end
|
|
417
|
+
|
|
418
|
+
timeline_stats
|
|
419
|
+
end
|
|
420
|
+
|
|
421
|
+
# Usage
|
|
422
|
+
project_data.each do |board_id, data|
|
|
423
|
+
stats = analyze_timeline_performance(data["items"])
|
|
424
|
+
total = data["items"].length
|
|
425
|
+
|
|
426
|
+
puts "\n#{data['board']['name']} - Timeline Performance:"
|
|
427
|
+
puts " Active Items:"
|
|
428
|
+
puts " On Time: #{stats[:on_time]}"
|
|
429
|
+
puts " At Risk (< 7 days): #{stats[:at_risk]}"
|
|
430
|
+
puts " Delayed: #{stats[:delayed]}"
|
|
431
|
+
puts " Completed Items:"
|
|
432
|
+
puts " On Time: #{stats[:completed_on_time]}"
|
|
433
|
+
puts " Late: #{stats[:completed_late]}"
|
|
434
|
+
puts " No Timeline: #{stats[:no_timeline]}"
|
|
435
|
+
end
|
|
436
|
+
```
|
|
437
|
+
|
|
438
|
+
### Calculate Status Distribution
|
|
439
|
+
|
|
440
|
+
```ruby
|
|
441
|
+
def calculate_status_distribution(items)
|
|
442
|
+
distribution = Hash.new(0)
|
|
443
|
+
|
|
444
|
+
items.each do |item|
|
|
445
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
446
|
+
status = status_column&.dig("text") || "No Status"
|
|
447
|
+
distribution[status] += 1
|
|
448
|
+
end
|
|
449
|
+
|
|
450
|
+
distribution.sort_by { |status, count| -count }.to_h
|
|
451
|
+
end
|
|
452
|
+
|
|
453
|
+
# Usage
|
|
454
|
+
project_data.each do |board_id, data|
|
|
455
|
+
distribution = calculate_status_distribution(data["items"])
|
|
456
|
+
|
|
457
|
+
puts "\n#{data['board']['name']} - Status Distribution:"
|
|
458
|
+
distribution.each do |status, count|
|
|
459
|
+
percentage = (count.to_f / data["items"].length * 100).round(1)
|
|
460
|
+
puts " #{status}: #{count} (#{percentage}%)"
|
|
461
|
+
end
|
|
462
|
+
end
|
|
463
|
+
```
|
|
464
|
+
|
|
465
|
+
## Step 4: Generate Reports
|
|
466
|
+
|
|
467
|
+
Create comprehensive reports in multiple formats.
|
|
468
|
+
|
|
469
|
+
### Project Summary Report
|
|
470
|
+
|
|
471
|
+
```ruby
|
|
472
|
+
def generate_project_summary(client, project_data)
|
|
473
|
+
summary = {
|
|
474
|
+
generated_at: Time.now.iso8601,
|
|
475
|
+
total_projects: project_data.keys.length,
|
|
476
|
+
projects: []
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
project_data.each do |board_id, data|
|
|
480
|
+
board = data["board"]
|
|
481
|
+
items = data["items"]
|
|
482
|
+
|
|
483
|
+
project_summary = {
|
|
484
|
+
board_id: board["id"],
|
|
485
|
+
board_name: board["name"],
|
|
486
|
+
description: board["description"],
|
|
487
|
+
metrics: {
|
|
488
|
+
total_items: items.length,
|
|
489
|
+
completion_rate: calculate_completion_rate(items),
|
|
490
|
+
overdue_count: count_overdue_items(items).length,
|
|
491
|
+
status_distribution: calculate_status_distribution(items),
|
|
492
|
+
timeline_performance: analyze_timeline_performance(items),
|
|
493
|
+
team_workload: calculate_team_workload(items)
|
|
494
|
+
},
|
|
495
|
+
health_score: nil # Calculate below
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
# Calculate health score (0-100)
|
|
499
|
+
completion = project_summary[:metrics][:completion_rate]
|
|
500
|
+
overdue_ratio = items.length > 0 ? (project_summary[:metrics][:overdue_count].to_f / items.length * 100) : 0
|
|
501
|
+
timeline = project_summary[:metrics][:timeline_performance]
|
|
502
|
+
active_items = timeline[:on_time] + timeline[:at_risk] + timeline[:delayed]
|
|
503
|
+
on_time_ratio = active_items > 0 ? (timeline[:on_time].to_f / active_items * 100) : 100
|
|
504
|
+
|
|
505
|
+
health_score = (
|
|
506
|
+
(completion * 0.3) + # 30% weight on completion
|
|
507
|
+
((100 - overdue_ratio) * 0.4) + # 40% weight on not being overdue
|
|
508
|
+
(on_time_ratio * 0.3) # 30% weight on timeline performance
|
|
509
|
+
).round(2)
|
|
510
|
+
|
|
511
|
+
project_summary[:health_score] = health_score
|
|
512
|
+
summary[:projects] << project_summary
|
|
513
|
+
end
|
|
514
|
+
|
|
515
|
+
# Sort projects by health score
|
|
516
|
+
summary[:projects].sort_by! { |p| -p[:health_score] }
|
|
517
|
+
|
|
518
|
+
summary
|
|
519
|
+
end
|
|
520
|
+
|
|
521
|
+
# Usage
|
|
522
|
+
summary = generate_project_summary(client, project_data)
|
|
523
|
+
|
|
524
|
+
puts "\n" + "=" * 70
|
|
525
|
+
puts "PROJECT SUMMARY REPORT"
|
|
526
|
+
puts "Generated: #{summary[:generated_at]}"
|
|
527
|
+
puts "=" * 70
|
|
528
|
+
|
|
529
|
+
summary[:projects].each do |project|
|
|
530
|
+
puts "\n#{project[:board_name]}"
|
|
531
|
+
puts " Health Score: #{project[:health_score]}/100"
|
|
532
|
+
puts " Completion: #{project[:metrics][:completion_rate]}%"
|
|
533
|
+
puts " Total Items: #{project[:metrics][:total_items]}"
|
|
534
|
+
puts " Overdue: #{project[:metrics][:overdue_count]}"
|
|
535
|
+
end
|
|
536
|
+
```
|
|
537
|
+
|
|
538
|
+
### Team Performance Report
|
|
539
|
+
|
|
540
|
+
```ruby
|
|
541
|
+
def generate_team_performance_report(project_data)
|
|
542
|
+
global_workload = Hash.new { |h, k| h[k] = { total: 0, active: 0, completed: 0, boards: [] } }
|
|
543
|
+
|
|
544
|
+
project_data.each do |board_id, data|
|
|
545
|
+
board_workload = calculate_team_workload(data["items"])
|
|
546
|
+
|
|
547
|
+
board_workload.each do |person, stats|
|
|
548
|
+
global_workload[person][:total] += stats[:total]
|
|
549
|
+
global_workload[person][:active] += stats[:active]
|
|
550
|
+
global_workload[person][:completed] += stats[:completed]
|
|
551
|
+
global_workload[person][:boards] << data["board"]["name"] unless global_workload[person][:boards].include?(data["board"]["name"])
|
|
552
|
+
end
|
|
553
|
+
end
|
|
554
|
+
|
|
555
|
+
report = {
|
|
556
|
+
generated_at: Time.now.iso8601,
|
|
557
|
+
team_members: []
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
global_workload.each do |name, stats|
|
|
561
|
+
completion_rate = stats[:total] > 0 ? (stats[:completed].to_f / stats[:total] * 100).round(2) : 0
|
|
562
|
+
|
|
563
|
+
report[:team_members] << {
|
|
564
|
+
name: name,
|
|
565
|
+
total_items: stats[:total],
|
|
566
|
+
active_items: stats[:active],
|
|
567
|
+
completed_items: stats[:completed],
|
|
568
|
+
completion_rate: completion_rate,
|
|
569
|
+
boards_involved: stats[:boards]
|
|
570
|
+
}
|
|
571
|
+
end
|
|
572
|
+
|
|
573
|
+
# Sort by total workload
|
|
574
|
+
report[:team_members].sort_by! { |tm| -tm[:total_items] }
|
|
575
|
+
|
|
576
|
+
report
|
|
577
|
+
end
|
|
578
|
+
|
|
579
|
+
# Usage
|
|
580
|
+
team_report = generate_team_performance_report(project_data)
|
|
581
|
+
|
|
582
|
+
puts "\n" + "=" * 70
|
|
583
|
+
puts "TEAM PERFORMANCE REPORT"
|
|
584
|
+
puts "Generated: #{team_report[:generated_at]}"
|
|
585
|
+
puts "=" * 70
|
|
586
|
+
|
|
587
|
+
team_report[:team_members].each do |member|
|
|
588
|
+
puts "\n#{member[:name]}"
|
|
589
|
+
puts " Total Items: #{member[:total_items]}"
|
|
590
|
+
puts " Active: #{member[:active_items]} | Completed: #{member[:completed_items]}"
|
|
591
|
+
puts " Completion Rate: #{member[:completion_rate]}%"
|
|
592
|
+
puts " Involved in: #{member[:boards_involved].join(', ')}"
|
|
593
|
+
end
|
|
594
|
+
```
|
|
595
|
+
|
|
596
|
+
### Timeline Report
|
|
597
|
+
|
|
598
|
+
```ruby
|
|
599
|
+
def generate_timeline_report(project_data)
|
|
600
|
+
report = {
|
|
601
|
+
generated_at: Time.now.iso8601,
|
|
602
|
+
projects: []
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
project_data.each do |board_id, data|
|
|
606
|
+
timeline_perf = analyze_timeline_performance(data["items"])
|
|
607
|
+
overdue = count_overdue_items(data["items"])
|
|
608
|
+
|
|
609
|
+
project_timeline = {
|
|
610
|
+
board_name: data["board"]["name"],
|
|
611
|
+
board_id: board_id,
|
|
612
|
+
timeline_performance: timeline_perf,
|
|
613
|
+
overdue_items: overdue.map do |oi|
|
|
614
|
+
{
|
|
615
|
+
id: oi["item"]["id"],
|
|
616
|
+
name: oi["item"]["name"],
|
|
617
|
+
due_date: oi["due_date"].to_s,
|
|
618
|
+
days_overdue: oi["days_overdue"]
|
|
619
|
+
}
|
|
620
|
+
end.sort_by { |item| -item[:days_overdue] }
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
report[:projects] << project_timeline
|
|
624
|
+
end
|
|
625
|
+
|
|
626
|
+
report
|
|
627
|
+
end
|
|
628
|
+
|
|
629
|
+
# Usage
|
|
630
|
+
timeline_report = generate_timeline_report(project_data)
|
|
631
|
+
|
|
632
|
+
puts "\n" + "=" * 70
|
|
633
|
+
puts "TIMELINE REPORT"
|
|
634
|
+
puts "Generated: #{timeline_report[:generated_at]}"
|
|
635
|
+
puts "=" * 70
|
|
636
|
+
|
|
637
|
+
timeline_report[:projects].each do |project|
|
|
638
|
+
puts "\n#{project[:board_name]}"
|
|
639
|
+
perf = project[:timeline_performance]
|
|
640
|
+
|
|
641
|
+
puts " Active Items:"
|
|
642
|
+
puts " ✓ On Time: #{perf[:on_time]}"
|
|
643
|
+
puts " ⚠ At Risk: #{perf[:at_risk]}"
|
|
644
|
+
puts " ✗ Delayed: #{perf[:delayed]}"
|
|
645
|
+
|
|
646
|
+
if project[:overdue_items].any?
|
|
647
|
+
puts " Most Overdue Items:"
|
|
648
|
+
project[:overdue_items].first(5).each do |item|
|
|
649
|
+
puts " • #{item[:name]} (#{item[:days_overdue]} days)"
|
|
650
|
+
end
|
|
651
|
+
end
|
|
652
|
+
end
|
|
653
|
+
```
|
|
654
|
+
|
|
655
|
+
### Export to JSON
|
|
656
|
+
|
|
657
|
+
```ruby
|
|
658
|
+
def export_to_json(report, filename)
|
|
659
|
+
File.write(filename, JSON.pretty_generate(report))
|
|
660
|
+
puts "✓ Exported to #{filename}"
|
|
661
|
+
end
|
|
662
|
+
|
|
663
|
+
# Usage
|
|
664
|
+
export_to_json(summary, "project_summary.json")
|
|
665
|
+
export_to_json(team_report, "team_performance.json")
|
|
666
|
+
export_to_json(timeline_report, "timeline_report.json")
|
|
667
|
+
```
|
|
668
|
+
|
|
669
|
+
### Export to CSV
|
|
670
|
+
|
|
671
|
+
```ruby
|
|
672
|
+
def export_projects_to_csv(summary, filename)
|
|
673
|
+
CSV.open(filename, "w") do |csv|
|
|
674
|
+
# Headers
|
|
675
|
+
csv << [
|
|
676
|
+
"Board Name",
|
|
677
|
+
"Health Score",
|
|
678
|
+
"Total Items",
|
|
679
|
+
"Completion Rate (%)",
|
|
680
|
+
"Overdue Items",
|
|
681
|
+
"On Time",
|
|
682
|
+
"At Risk",
|
|
683
|
+
"Delayed"
|
|
684
|
+
]
|
|
685
|
+
|
|
686
|
+
# Data rows
|
|
687
|
+
summary[:projects].each do |project|
|
|
688
|
+
timeline = project[:metrics][:timeline_performance]
|
|
689
|
+
csv << [
|
|
690
|
+
project[:board_name],
|
|
691
|
+
project[:health_score],
|
|
692
|
+
project[:metrics][:total_items],
|
|
693
|
+
project[:metrics][:completion_rate],
|
|
694
|
+
project[:metrics][:overdue_count],
|
|
695
|
+
timeline[:on_time],
|
|
696
|
+
timeline[:at_risk],
|
|
697
|
+
timeline[:delayed]
|
|
698
|
+
]
|
|
699
|
+
end
|
|
700
|
+
end
|
|
701
|
+
|
|
702
|
+
puts "✓ Exported to #{filename}"
|
|
703
|
+
end
|
|
704
|
+
|
|
705
|
+
def export_team_to_csv(team_report, filename)
|
|
706
|
+
CSV.open(filename, "w") do |csv|
|
|
707
|
+
# Headers
|
|
708
|
+
csv << [
|
|
709
|
+
"Team Member",
|
|
710
|
+
"Total Items",
|
|
711
|
+
"Active Items",
|
|
712
|
+
"Completed Items",
|
|
713
|
+
"Completion Rate (%)",
|
|
714
|
+
"Boards Involved"
|
|
715
|
+
]
|
|
716
|
+
|
|
717
|
+
# Data rows
|
|
718
|
+
team_report[:team_members].each do |member|
|
|
719
|
+
csv << [
|
|
720
|
+
member[:name],
|
|
721
|
+
member[:total_items],
|
|
722
|
+
member[:active_items],
|
|
723
|
+
member[:completed_items],
|
|
724
|
+
member[:completion_rate],
|
|
725
|
+
member[:boards_involved].join("; ")
|
|
726
|
+
]
|
|
727
|
+
end
|
|
728
|
+
end
|
|
729
|
+
|
|
730
|
+
puts "✓ Exported to #{filename}"
|
|
731
|
+
end
|
|
732
|
+
|
|
733
|
+
# Usage
|
|
734
|
+
export_projects_to_csv(summary, "projects.csv")
|
|
735
|
+
export_team_to_csv(team_report, "team_performance.csv")
|
|
736
|
+
```
|
|
737
|
+
|
|
738
|
+
## Step 5: Real-time Dashboard Data
|
|
739
|
+
|
|
740
|
+
Prepare data for dashboard display with current state and KPIs.
|
|
741
|
+
|
|
742
|
+
### Calculate Dashboard KPIs
|
|
743
|
+
|
|
744
|
+
```ruby
|
|
745
|
+
def calculate_dashboard_kpis(project_data)
|
|
746
|
+
kpis = {
|
|
747
|
+
total_projects: project_data.keys.length,
|
|
748
|
+
total_items: 0,
|
|
749
|
+
completed_items: 0,
|
|
750
|
+
active_items: 0,
|
|
751
|
+
overdue_items: 0,
|
|
752
|
+
at_risk_items: 0,
|
|
753
|
+
team_members: Set.new,
|
|
754
|
+
average_health_score: 0,
|
|
755
|
+
projects_at_risk: 0 # Health score < 60
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
health_scores = []
|
|
759
|
+
|
|
760
|
+
project_data.each do |board_id, data|
|
|
761
|
+
items = data["items"]
|
|
762
|
+
kpis[:total_items] += items.length
|
|
763
|
+
|
|
764
|
+
# Count statuses
|
|
765
|
+
items.each do |item|
|
|
766
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
767
|
+
status = status_column&.dig("text")&.downcase || ""
|
|
768
|
+
|
|
769
|
+
if status.include?("done") || status.include?("completed")
|
|
770
|
+
kpis[:completed_items] += 1
|
|
771
|
+
else
|
|
772
|
+
kpis[:active_items] += 1
|
|
773
|
+
end
|
|
774
|
+
|
|
775
|
+
# Track team members
|
|
776
|
+
person_column = item["column_values"].find { |cv| cv["type"] == "people" }
|
|
777
|
+
if person_column && person_column["text"]
|
|
778
|
+
kpis[:team_members].add(person_column["text"])
|
|
779
|
+
end
|
|
780
|
+
end
|
|
781
|
+
|
|
782
|
+
# Overdue and at-risk counts
|
|
783
|
+
overdue = count_overdue_items(items)
|
|
784
|
+
timeline_perf = analyze_timeline_performance(items)
|
|
785
|
+
|
|
786
|
+
kpis[:overdue_items] += overdue.length
|
|
787
|
+
kpis[:at_risk_items] += timeline_perf[:at_risk]
|
|
788
|
+
|
|
789
|
+
# Calculate health score for this project
|
|
790
|
+
completion = calculate_completion_rate(items)
|
|
791
|
+
overdue_ratio = items.length > 0 ? (overdue.length.to_f / items.length * 100) : 0
|
|
792
|
+
active_items = timeline_perf[:on_time] + timeline_perf[:at_risk] + timeline_perf[:delayed]
|
|
793
|
+
on_time_ratio = active_items > 0 ? (timeline_perf[:on_time].to_f / active_items * 100) : 100
|
|
794
|
+
|
|
795
|
+
health_score = (
|
|
796
|
+
(completion * 0.3) +
|
|
797
|
+
((100 - overdue_ratio) * 0.4) +
|
|
798
|
+
(on_time_ratio * 0.3)
|
|
799
|
+
).round(2)
|
|
800
|
+
|
|
801
|
+
health_scores << health_score
|
|
802
|
+
kpis[:projects_at_risk] += 1 if health_score < 60
|
|
803
|
+
end
|
|
804
|
+
|
|
805
|
+
kpis[:average_health_score] = health_scores.empty? ? 0 : (health_scores.sum / health_scores.length).round(2)
|
|
806
|
+
kpis[:team_members] = kpis[:team_members].size
|
|
807
|
+
|
|
808
|
+
kpis
|
|
809
|
+
end
|
|
810
|
+
|
|
811
|
+
# Usage
|
|
812
|
+
kpis = calculate_dashboard_kpis(project_data)
|
|
813
|
+
|
|
814
|
+
puts "\n" + "=" * 70
|
|
815
|
+
puts "DASHBOARD KPIs"
|
|
816
|
+
puts "=" * 70
|
|
817
|
+
puts "Total Projects: #{kpis[:total_projects]}"
|
|
818
|
+
puts "Total Items: #{kpis[:total_items]}"
|
|
819
|
+
puts " Active: #{kpis[:active_items]}"
|
|
820
|
+
puts " Completed: #{kpis[:completed_items]}"
|
|
821
|
+
puts " Overdue: #{kpis[:overdue_items]}"
|
|
822
|
+
puts " At Risk: #{kpis[:at_risk_items]}"
|
|
823
|
+
puts "\nTeam Members: #{kpis[:team_members]}"
|
|
824
|
+
puts "Average Health Score: #{kpis[:average_health_score]}/100"
|
|
825
|
+
puts "Projects at Risk: #{kpis[:projects_at_risk]}"
|
|
826
|
+
```
|
|
827
|
+
|
|
828
|
+
### Format for Dashboard Widgets
|
|
829
|
+
|
|
830
|
+
```ruby
|
|
831
|
+
def format_for_dashboard(project_data)
|
|
832
|
+
dashboard = {
|
|
833
|
+
timestamp: Time.now.iso8601,
|
|
834
|
+
kpis: calculate_dashboard_kpis(project_data),
|
|
835
|
+
projects: [],
|
|
836
|
+
recent_updates: [],
|
|
837
|
+
alerts: []
|
|
838
|
+
}
|
|
839
|
+
|
|
840
|
+
# Project widgets
|
|
841
|
+
project_data.each do |board_id, data|
|
|
842
|
+
items = data["items"]
|
|
843
|
+
completion = calculate_completion_rate(items)
|
|
844
|
+
overdue = count_overdue_items(items)
|
|
845
|
+
timeline_perf = analyze_timeline_performance(items)
|
|
846
|
+
|
|
847
|
+
dashboard[:projects] << {
|
|
848
|
+
id: board_id,
|
|
849
|
+
name: data["board"]["name"],
|
|
850
|
+
completion_percentage: completion,
|
|
851
|
+
total_items: items.length,
|
|
852
|
+
overdue_count: overdue.length,
|
|
853
|
+
status_breakdown: calculate_status_distribution(items),
|
|
854
|
+
timeline: {
|
|
855
|
+
on_time: timeline_perf[:on_time],
|
|
856
|
+
at_risk: timeline_perf[:at_risk],
|
|
857
|
+
delayed: timeline_perf[:delayed]
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
# Generate alerts for critical items
|
|
862
|
+
if overdue.length > 0
|
|
863
|
+
dashboard[:alerts] << {
|
|
864
|
+
severity: "high",
|
|
865
|
+
project: data["board"]["name"],
|
|
866
|
+
message: "#{overdue.length} overdue items",
|
|
867
|
+
items: overdue.first(3).map { |oi| { name: oi["item"]["name"], days: oi["days_overdue"] } }
|
|
868
|
+
}
|
|
869
|
+
end
|
|
870
|
+
|
|
871
|
+
if timeline_perf[:at_risk] > 5
|
|
872
|
+
dashboard[:alerts] << {
|
|
873
|
+
severity: "medium",
|
|
874
|
+
project: data["board"]["name"],
|
|
875
|
+
message: "#{timeline_perf[:at_risk]} items due within 7 days"
|
|
876
|
+
}
|
|
877
|
+
end
|
|
878
|
+
|
|
879
|
+
# Recent updates (last 5 updated items)
|
|
880
|
+
recent = items.sort_by { |item| item["updated_at"] || "" }.reverse.first(5)
|
|
881
|
+
recent.each do |item|
|
|
882
|
+
dashboard[:recent_updates] << {
|
|
883
|
+
project: data["board"]["name"],
|
|
884
|
+
item_name: item["name"],
|
|
885
|
+
updated_at: item["updated_at"]
|
|
886
|
+
}
|
|
887
|
+
end
|
|
888
|
+
end
|
|
889
|
+
|
|
890
|
+
# Sort alerts by severity
|
|
891
|
+
dashboard[:alerts].sort_by! { |alert| alert[:severity] == "high" ? 0 : 1 }
|
|
892
|
+
|
|
893
|
+
# Keep only 10 most recent updates
|
|
894
|
+
dashboard[:recent_updates] = dashboard[:recent_updates].sort_by { |u| u[:updated_at] || "" }.reverse.first(10)
|
|
895
|
+
|
|
896
|
+
dashboard
|
|
897
|
+
end
|
|
898
|
+
|
|
899
|
+
# Usage
|
|
900
|
+
dashboard_data = format_for_dashboard(project_data)
|
|
901
|
+
|
|
902
|
+
puts "\n" + "=" * 70
|
|
903
|
+
puts "DASHBOARD DATA"
|
|
904
|
+
puts "=" * 70
|
|
905
|
+
|
|
906
|
+
# Show alerts
|
|
907
|
+
puts "\nAlerts (#{dashboard_data[:alerts].length}):"
|
|
908
|
+
dashboard_data[:alerts].each do |alert|
|
|
909
|
+
severity_icon = alert[:severity] == "high" ? "🔴" : "🟡"
|
|
910
|
+
puts " #{severity_icon} #{alert[:project]}: #{alert[:message]}"
|
|
911
|
+
end
|
|
912
|
+
|
|
913
|
+
# Export dashboard data
|
|
914
|
+
export_to_json(dashboard_data, "dashboard_data.json")
|
|
915
|
+
```
|
|
916
|
+
|
|
917
|
+
## Step 6: Workspace Overview
|
|
918
|
+
|
|
919
|
+
Get high-level metrics across all projects in the workspace.
|
|
920
|
+
|
|
921
|
+
### Workspace-Level Metrics
|
|
922
|
+
|
|
923
|
+
```ruby
|
|
924
|
+
def generate_workspace_overview(client, workspace_id, project_data)
|
|
925
|
+
overview = {
|
|
926
|
+
workspace_id: workspace_id,
|
|
927
|
+
generated_at: Time.now.iso8601,
|
|
928
|
+
summary: {
|
|
929
|
+
total_projects: project_data.keys.length,
|
|
930
|
+
total_items: 0,
|
|
931
|
+
total_completed: 0,
|
|
932
|
+
overall_completion_rate: 0,
|
|
933
|
+
total_overdue: 0,
|
|
934
|
+
projects_on_track: 0,
|
|
935
|
+
projects_at_risk: 0,
|
|
936
|
+
projects_critical: 0
|
|
937
|
+
},
|
|
938
|
+
project_breakdown: []
|
|
939
|
+
}
|
|
940
|
+
|
|
941
|
+
project_data.each do |board_id, data|
|
|
942
|
+
items = data["items"]
|
|
943
|
+
completion = calculate_completion_rate(items)
|
|
944
|
+
overdue = count_overdue_items(items)
|
|
945
|
+
timeline_perf = analyze_timeline_performance(items)
|
|
946
|
+
|
|
947
|
+
overview[:summary][:total_items] += items.length
|
|
948
|
+
|
|
949
|
+
completed_count = items.count do |item|
|
|
950
|
+
status_column = item["column_values"].find { |cv| cv["type"] == "status" }
|
|
951
|
+
status = status_column&.dig("text")&.downcase || ""
|
|
952
|
+
status.include?("done") || status.include?("completed")
|
|
953
|
+
end
|
|
954
|
+
overview[:summary][:total_completed] += completed_count
|
|
955
|
+
overview[:summary][:total_overdue] += overdue.length
|
|
956
|
+
|
|
957
|
+
# Calculate health score
|
|
958
|
+
overdue_ratio = items.length > 0 ? (overdue.length.to_f / items.length * 100) : 0
|
|
959
|
+
active_items = timeline_perf[:on_time] + timeline_perf[:at_risk] + timeline_perf[:delayed]
|
|
960
|
+
on_time_ratio = active_items > 0 ? (timeline_perf[:on_time].to_f / active_items * 100) : 100
|
|
961
|
+
|
|
962
|
+
health_score = (
|
|
963
|
+
(completion * 0.3) +
|
|
964
|
+
((100 - overdue_ratio) * 0.4) +
|
|
965
|
+
(on_time_ratio * 0.3)
|
|
966
|
+
).round(2)
|
|
967
|
+
|
|
968
|
+
# Categorize project health
|
|
969
|
+
if health_score >= 80
|
|
970
|
+
overview[:summary][:projects_on_track] += 1
|
|
971
|
+
status = "on_track"
|
|
972
|
+
elsif health_score >= 60
|
|
973
|
+
overview[:summary][:projects_at_risk] += 1
|
|
974
|
+
status = "at_risk"
|
|
975
|
+
else
|
|
976
|
+
overview[:summary][:projects_critical] += 1
|
|
977
|
+
status = "critical"
|
|
978
|
+
end
|
|
979
|
+
|
|
980
|
+
overview[:project_breakdown] << {
|
|
981
|
+
name: data["board"]["name"],
|
|
982
|
+
health_score: health_score,
|
|
983
|
+
status: status,
|
|
984
|
+
completion_rate: completion,
|
|
985
|
+
total_items: items.length,
|
|
986
|
+
overdue_items: overdue.length
|
|
987
|
+
}
|
|
988
|
+
end
|
|
989
|
+
|
|
990
|
+
# Calculate overall completion rate
|
|
991
|
+
overview[:summary][:overall_completion_rate] = if overview[:summary][:total_items] > 0
|
|
992
|
+
(overview[:summary][:total_completed].to_f / overview[:summary][:total_items] * 100).round(2)
|
|
993
|
+
else
|
|
994
|
+
0
|
|
995
|
+
end
|
|
996
|
+
|
|
997
|
+
# Sort projects by health score
|
|
998
|
+
overview[:project_breakdown].sort_by! { |p| -p[:health_score] }
|
|
999
|
+
|
|
1000
|
+
overview
|
|
1001
|
+
end
|
|
1002
|
+
|
|
1003
|
+
# Usage
|
|
1004
|
+
workspace_overview = generate_workspace_overview(client, workspace_id, project_data)
|
|
1005
|
+
|
|
1006
|
+
puts "\n" + "=" * 70
|
|
1007
|
+
puts "WORKSPACE OVERVIEW"
|
|
1008
|
+
puts "=" * 70
|
|
1009
|
+
puts "Total Projects: #{workspace_overview[:summary][:total_projects]}"
|
|
1010
|
+
puts "Total Items: #{workspace_overview[:summary][:total_items]}"
|
|
1011
|
+
puts "Overall Completion: #{workspace_overview[:summary][:overall_completion_rate]}%"
|
|
1012
|
+
puts "Total Overdue: #{workspace_overview[:summary][:total_overdue]}"
|
|
1013
|
+
puts "\nProject Health:"
|
|
1014
|
+
puts " ✓ On Track: #{workspace_overview[:summary][:projects_on_track]}"
|
|
1015
|
+
puts " ⚠ At Risk: #{workspace_overview[:summary][:projects_at_risk]}"
|
|
1016
|
+
puts " ✗ Critical: #{workspace_overview[:summary][:projects_critical]}"
|
|
1017
|
+
|
|
1018
|
+
puts "\nProject Breakdown:"
|
|
1019
|
+
workspace_overview[:project_breakdown].each do |project|
|
|
1020
|
+
status_icon = case project[:status]
|
|
1021
|
+
when "on_track" then "✓"
|
|
1022
|
+
when "at_risk" then "⚠"
|
|
1023
|
+
when "critical" then "✗"
|
|
1024
|
+
end
|
|
1025
|
+
|
|
1026
|
+
puts " #{status_icon} #{project[:name]} - Health: #{project[:health_score]}/100"
|
|
1027
|
+
end
|
|
1028
|
+
```
|
|
1029
|
+
|
|
1030
|
+
### Identify At-Risk Projects
|
|
1031
|
+
|
|
1032
|
+
```ruby
|
|
1033
|
+
def identify_at_risk_projects(workspace_overview)
|
|
1034
|
+
at_risk = workspace_overview[:project_breakdown].select do |project|
|
|
1035
|
+
project[:status] == "at_risk" || project[:status] == "critical"
|
|
1036
|
+
end
|
|
1037
|
+
|
|
1038
|
+
report = {
|
|
1039
|
+
generated_at: Time.now.iso8601,
|
|
1040
|
+
at_risk_count: at_risk.length,
|
|
1041
|
+
projects: at_risk.map do |project|
|
|
1042
|
+
{
|
|
1043
|
+
name: project[:name],
|
|
1044
|
+
health_score: project[:health_score],
|
|
1045
|
+
status: project[:status],
|
|
1046
|
+
issues: []
|
|
1047
|
+
}
|
|
1048
|
+
end
|
|
1049
|
+
}
|
|
1050
|
+
|
|
1051
|
+
# Add specific issues
|
|
1052
|
+
report[:projects].each do |project|
|
|
1053
|
+
if project[:health_score] < 40
|
|
1054
|
+
project[:issues] << "Critical health score"
|
|
1055
|
+
end
|
|
1056
|
+
|
|
1057
|
+
original_project = workspace_overview[:project_breakdown].find { |p| p[:name] == project[:name] }
|
|
1058
|
+
|
|
1059
|
+
if original_project[:completion_rate] < 30
|
|
1060
|
+
project[:issues] << "Low completion rate (#{original_project[:completion_rate]}%)"
|
|
1061
|
+
end
|
|
1062
|
+
|
|
1063
|
+
if original_project[:overdue_items] > 5
|
|
1064
|
+
project[:issues] << "High number of overdue items (#{original_project[:overdue_items]})"
|
|
1065
|
+
end
|
|
1066
|
+
end
|
|
1067
|
+
|
|
1068
|
+
report
|
|
1069
|
+
end
|
|
1070
|
+
|
|
1071
|
+
# Usage
|
|
1072
|
+
at_risk_report = identify_at_risk_projects(workspace_overview)
|
|
1073
|
+
|
|
1074
|
+
puts "\n" + "=" * 70
|
|
1075
|
+
puts "AT-RISK PROJECTS REPORT"
|
|
1076
|
+
puts "=" * 70
|
|
1077
|
+
puts "#{at_risk_report[:at_risk_count]} projects need attention\n"
|
|
1078
|
+
|
|
1079
|
+
at_risk_report[:projects].each do |project|
|
|
1080
|
+
puts "\n#{project[:name]} (Health: #{project[:health_score]}/100)"
|
|
1081
|
+
puts " Status: #{project[:status].upcase}"
|
|
1082
|
+
puts " Issues:"
|
|
1083
|
+
project[:issues].each do |issue|
|
|
1084
|
+
puts " • #{issue}"
|
|
1085
|
+
end
|
|
1086
|
+
end
|
|
1087
|
+
|
|
1088
|
+
export_to_json(at_risk_report, "at_risk_projects.json")
|
|
1089
|
+
```
|
|
1090
|
+
|
|
1091
|
+
## Step 7: Complete Dashboard Example
|
|
1092
|
+
|
|
1093
|
+
Putting it all together in a single dashboard collector script.
|
|
1094
|
+
|
|
1095
|
+
```ruby
|
|
1096
|
+
require "monday_ruby"
|
|
1097
|
+
require "json"
|
|
1098
|
+
require "csv"
|
|
1099
|
+
require "date"
|
|
1100
|
+
require "set"
|
|
1101
|
+
|
|
1102
|
+
Monday.configure do |config|
|
|
1103
|
+
config.token = ENV["MONDAY_TOKEN"]
|
|
1104
|
+
end
|
|
1105
|
+
|
|
1106
|
+
class ProjectDashboard
|
|
1107
|
+
attr_reader :client, :workspace_id, :project_data
|
|
1108
|
+
|
|
1109
|
+
def initialize(workspace_id)
|
|
1110
|
+
@client = Monday::Client.new
|
|
1111
|
+
@workspace_id = workspace_id
|
|
1112
|
+
@project_data = {}
|
|
1113
|
+
end
|
|
1114
|
+
|
|
1115
|
+
# Collect all project data
|
|
1116
|
+
def collect_data
|
|
1117
|
+
puts "Collecting project data..."
|
|
1118
|
+
|
|
1119
|
+
# Get all boards in workspace
|
|
1120
|
+
boards = get_workspace_boards
|
|
1121
|
+
|
|
1122
|
+
# Filter to project boards
|
|
1123
|
+
project_boards = identify_project_boards(boards)
|
|
1124
|
+
|
|
1125
|
+
# Aggregate data from all project boards
|
|
1126
|
+
aggregate_project_data(project_boards)
|
|
1127
|
+
|
|
1128
|
+
puts "✓ Data collection complete: #{@project_data.keys.length} projects"
|
|
1129
|
+
end
|
|
1130
|
+
|
|
1131
|
+
# Generate all reports
|
|
1132
|
+
def generate_reports
|
|
1133
|
+
puts "\nGenerating reports..."
|
|
1134
|
+
|
|
1135
|
+
reports = {
|
|
1136
|
+
summary: generate_project_summary,
|
|
1137
|
+
team: generate_team_performance_report,
|
|
1138
|
+
timeline: generate_timeline_report,
|
|
1139
|
+
kpis: calculate_dashboard_kpis,
|
|
1140
|
+
workspace: generate_workspace_overview,
|
|
1141
|
+
at_risk: identify_at_risk_projects
|
|
1142
|
+
}
|
|
1143
|
+
|
|
1144
|
+
puts "✓ All reports generated"
|
|
1145
|
+
reports
|
|
1146
|
+
end
|
|
1147
|
+
|
|
1148
|
+
# Export all data
|
|
1149
|
+
def export_all(reports)
|
|
1150
|
+
puts "\nExporting data..."
|
|
1151
|
+
|
|
1152
|
+
# JSON exports
|
|
1153
|
+
File.write("reports/summary.json", JSON.pretty_generate(reports[:summary]))
|
|
1154
|
+
File.write("reports/team.json", JSON.pretty_generate(reports[:team]))
|
|
1155
|
+
File.write("reports/timeline.json", JSON.pretty_generate(reports[:timeline]))
|
|
1156
|
+
File.write("reports/kpis.json", JSON.pretty_generate(reports[:kpis]))
|
|
1157
|
+
File.write("reports/workspace.json", JSON.pretty_generate(reports[:workspace]))
|
|
1158
|
+
File.write("reports/at_risk.json", JSON.pretty_generate(reports[:at_risk]))
|
|
1159
|
+
|
|
1160
|
+
# CSV exports
|
|
1161
|
+
export_projects_csv(reports[:summary])
|
|
1162
|
+
export_team_csv(reports[:team])
|
|
1163
|
+
|
|
1164
|
+
# Dashboard data
|
|
1165
|
+
dashboard_data = format_for_dashboard
|
|
1166
|
+
File.write("reports/dashboard.json", JSON.pretty_generate(dashboard_data))
|
|
1167
|
+
|
|
1168
|
+
puts "✓ All data exported to ./reports/"
|
|
1169
|
+
end
|
|
1170
|
+
|
|
1171
|
+
# Display summary
|
|
1172
|
+
def display_summary(reports)
|
|
1173
|
+
puts "\n" + "=" * 70
|
|
1174
|
+
puts "PROJECT DASHBOARD SUMMARY"
|
|
1175
|
+
puts "Generated: #{Time.now}"
|
|
1176
|
+
puts "=" * 70
|
|
1177
|
+
|
|
1178
|
+
kpis = reports[:kpis]
|
|
1179
|
+
puts "\nKEY METRICS"
|
|
1180
|
+
puts " Projects: #{kpis[:total_projects]}"
|
|
1181
|
+
puts " Total Items: #{kpis[:total_items]}"
|
|
1182
|
+
puts " Completed: #{kpis[:completed_items]}"
|
|
1183
|
+
puts " Overdue: #{kpis[:overdue_items]}"
|
|
1184
|
+
puts " At Risk: #{kpis[:at_risk_items]}"
|
|
1185
|
+
puts " Average Health: #{kpis[:average_health_score]}/100"
|
|
1186
|
+
|
|
1187
|
+
puts "\nTOP PERFORMERS"
|
|
1188
|
+
reports[:summary][:projects].first(3).each_with_index do |project, i|
|
|
1189
|
+
puts " #{i + 1}. #{project[:board_name]} (Health: #{project[:health_score]}/100)"
|
|
1190
|
+
end
|
|
1191
|
+
|
|
1192
|
+
puts "\nNEED ATTENTION"
|
|
1193
|
+
reports[:at_risk][:projects].first(3).each do |project|
|
|
1194
|
+
puts " ⚠ #{project[:name]} (Health: #{project[:health_score]}/100)"
|
|
1195
|
+
end
|
|
1196
|
+
|
|
1197
|
+
puts "\n" + "=" * 70
|
|
1198
|
+
end
|
|
1199
|
+
|
|
1200
|
+
private
|
|
1201
|
+
|
|
1202
|
+
def get_workspace_boards
|
|
1203
|
+
response = @client.board.query(
|
|
1204
|
+
args: { workspace_ids: [@workspace_id] },
|
|
1205
|
+
select: [
|
|
1206
|
+
"id", "name", "description", "state",
|
|
1207
|
+
{ groups: ["id", "title"], columns: ["id", "title", "type"] }
|
|
1208
|
+
]
|
|
1209
|
+
)
|
|
1210
|
+
|
|
1211
|
+
response.success? ? response.body.dig("data", "boards") || [] : []
|
|
1212
|
+
end
|
|
1213
|
+
|
|
1214
|
+
def identify_project_boards(boards)
|
|
1215
|
+
boards.select do |board|
|
|
1216
|
+
board["columns"].any? { |col| col["type"] == "status" } &&
|
|
1217
|
+
(board["columns"].any? { |col| col["type"] == "timeline" } ||
|
|
1218
|
+
board["name"].match?(/project/i))
|
|
1219
|
+
end
|
|
1220
|
+
end
|
|
1221
|
+
|
|
1222
|
+
def aggregate_project_data(project_boards)
|
|
1223
|
+
project_boards.each do |board|
|
|
1224
|
+
items = fetch_all_board_items(board["id"])
|
|
1225
|
+
@project_data[board["id"]] = {
|
|
1226
|
+
"board" => board,
|
|
1227
|
+
"items" => items,
|
|
1228
|
+
"total_items" => items.length
|
|
1229
|
+
}
|
|
1230
|
+
end
|
|
1231
|
+
end
|
|
1232
|
+
|
|
1233
|
+
def fetch_all_board_items(board_id)
|
|
1234
|
+
all_items = []
|
|
1235
|
+
cursor = nil
|
|
1236
|
+
|
|
1237
|
+
loop do
|
|
1238
|
+
response = @client.board.items_page(
|
|
1239
|
+
board_ids: board_id,
|
|
1240
|
+
limit: 100,
|
|
1241
|
+
cursor: cursor,
|
|
1242
|
+
select: [
|
|
1243
|
+
"id", "name", "state", "created_at", "updated_at",
|
|
1244
|
+
{ group: ["id", "title"], column_values: ["id", "text", "type", "value"] }
|
|
1245
|
+
]
|
|
1246
|
+
)
|
|
1247
|
+
|
|
1248
|
+
break unless response.success?
|
|
1249
|
+
|
|
1250
|
+
items_page = response.body.dig("data", "boards", 0, "items_page")
|
|
1251
|
+
items = items_page["items"] || []
|
|
1252
|
+
break if items.empty?
|
|
1253
|
+
|
|
1254
|
+
all_items.concat(items)
|
|
1255
|
+
cursor = items_page["cursor"]
|
|
1256
|
+
break if cursor.nil?
|
|
1257
|
+
end
|
|
1258
|
+
|
|
1259
|
+
all_items
|
|
1260
|
+
end
|
|
1261
|
+
|
|
1262
|
+
# Include all the helper methods from previous sections:
|
|
1263
|
+
# - calculate_completion_rate
|
|
1264
|
+
# - count_overdue_items
|
|
1265
|
+
# - calculate_team_workload
|
|
1266
|
+
# - analyze_timeline_performance
|
|
1267
|
+
# - calculate_status_distribution
|
|
1268
|
+
# - generate_project_summary
|
|
1269
|
+
# - generate_team_performance_report
|
|
1270
|
+
# - generate_timeline_report
|
|
1271
|
+
# - calculate_dashboard_kpis
|
|
1272
|
+
# - generate_workspace_overview
|
|
1273
|
+
# - identify_at_risk_projects
|
|
1274
|
+
# - format_for_dashboard
|
|
1275
|
+
# - export_projects_csv
|
|
1276
|
+
# - export_team_csv
|
|
1277
|
+
end
|
|
1278
|
+
|
|
1279
|
+
# Usage
|
|
1280
|
+
dashboard = ProjectDashboard.new(1234567) # Your workspace ID
|
|
1281
|
+
|
|
1282
|
+
# Collect data
|
|
1283
|
+
dashboard.collect_data
|
|
1284
|
+
|
|
1285
|
+
# Generate reports
|
|
1286
|
+
reports = dashboard.generate_reports
|
|
1287
|
+
|
|
1288
|
+
# Export everything
|
|
1289
|
+
Dir.mkdir("reports") unless Dir.exist?("reports")
|
|
1290
|
+
dashboard.export_all(reports)
|
|
1291
|
+
|
|
1292
|
+
# Display summary
|
|
1293
|
+
dashboard.display_summary(reports)
|
|
1294
|
+
```
|
|
1295
|
+
|
|
1296
|
+
## Running the Dashboard
|
|
1297
|
+
|
|
1298
|
+
```bash
|
|
1299
|
+
# Set your API token
|
|
1300
|
+
export MONDAY_TOKEN="your_token_here"
|
|
1301
|
+
|
|
1302
|
+
# Run the dashboard
|
|
1303
|
+
ruby project_dashboard.rb
|
|
1304
|
+
|
|
1305
|
+
# Output:
|
|
1306
|
+
# Collecting project data...
|
|
1307
|
+
# ✓ Data collection complete: 5 projects
|
|
1308
|
+
#
|
|
1309
|
+
# Generating reports...
|
|
1310
|
+
# ✓ All reports generated
|
|
1311
|
+
#
|
|
1312
|
+
# Exporting data...
|
|
1313
|
+
# ✓ All data exported to ./reports/
|
|
1314
|
+
#
|
|
1315
|
+
# ======================================================================
|
|
1316
|
+
# PROJECT DASHBOARD SUMMARY
|
|
1317
|
+
# Generated: 2024-01-15 10:30:00
|
|
1318
|
+
# ======================================================================
|
|
1319
|
+
#
|
|
1320
|
+
# KEY METRICS
|
|
1321
|
+
# Projects: 5
|
|
1322
|
+
# Total Items: 247
|
|
1323
|
+
# Completed: 156
|
|
1324
|
+
# Overdue: 12
|
|
1325
|
+
# At Risk: 8
|
|
1326
|
+
# Average Health: 78.5/100
|
|
1327
|
+
# ...
|
|
1328
|
+
```
|
|
1329
|
+
|
|
1330
|
+
## Best Practices
|
|
1331
|
+
|
|
1332
|
+
### 1. Handle Rate Limits
|
|
1333
|
+
|
|
1334
|
+
Add delays between large queries:
|
|
1335
|
+
|
|
1336
|
+
```ruby
|
|
1337
|
+
def fetch_all_board_items(board_id)
|
|
1338
|
+
# ... pagination logic ...
|
|
1339
|
+
|
|
1340
|
+
sleep(0.5) unless cursor.nil? # Add delay between pages
|
|
1341
|
+
end
|
|
1342
|
+
```
|
|
1343
|
+
|
|
1344
|
+
### 2. Cache Results
|
|
1345
|
+
|
|
1346
|
+
Store intermediate results to avoid re-fetching:
|
|
1347
|
+
|
|
1348
|
+
```ruby
|
|
1349
|
+
def collect_data_with_cache
|
|
1350
|
+
cache_file = "cache/project_data_#{Date.today}.json"
|
|
1351
|
+
|
|
1352
|
+
if File.exist?(cache_file) && File.mtime(cache_file) > (Time.now - 3600)
|
|
1353
|
+
puts "Loading from cache..."
|
|
1354
|
+
@project_data = JSON.parse(File.read(cache_file))
|
|
1355
|
+
else
|
|
1356
|
+
collect_data
|
|
1357
|
+
File.write(cache_file, JSON.pretty_generate(@project_data))
|
|
1358
|
+
end
|
|
1359
|
+
end
|
|
1360
|
+
```
|
|
1361
|
+
|
|
1362
|
+
### 3. Error Handling
|
|
1363
|
+
|
|
1364
|
+
Add robust error handling:
|
|
1365
|
+
|
|
1366
|
+
```ruby
|
|
1367
|
+
def fetch_all_board_items(board_id)
|
|
1368
|
+
all_items = []
|
|
1369
|
+
cursor = nil
|
|
1370
|
+
retry_count = 0
|
|
1371
|
+
|
|
1372
|
+
loop do
|
|
1373
|
+
begin
|
|
1374
|
+
response = @client.board.items_page(
|
|
1375
|
+
board_ids: board_id,
|
|
1376
|
+
limit: 100,
|
|
1377
|
+
cursor: cursor
|
|
1378
|
+
)
|
|
1379
|
+
|
|
1380
|
+
# ... process response ...
|
|
1381
|
+
|
|
1382
|
+
rescue Monday::ComplexityException => e
|
|
1383
|
+
if retry_count < 3
|
|
1384
|
+
retry_count += 1
|
|
1385
|
+
sleep(60)
|
|
1386
|
+
retry
|
|
1387
|
+
else
|
|
1388
|
+
puts "Max retries exceeded for board #{board_id}"
|
|
1389
|
+
break
|
|
1390
|
+
end
|
|
1391
|
+
end
|
|
1392
|
+
end
|
|
1393
|
+
|
|
1394
|
+
all_items
|
|
1395
|
+
end
|
|
1396
|
+
```
|
|
1397
|
+
|
|
1398
|
+
### 4. Incremental Updates
|
|
1399
|
+
|
|
1400
|
+
Only fetch items updated since last run:
|
|
1401
|
+
|
|
1402
|
+
```ruby
|
|
1403
|
+
def fetch_recent_updates(board_id, since_date)
|
|
1404
|
+
# Use updated_at in your filtering logic
|
|
1405
|
+
items = fetch_all_board_items(board_id)
|
|
1406
|
+
|
|
1407
|
+
items.select do |item|
|
|
1408
|
+
updated_at = Date.parse(item["updated_at"])
|
|
1409
|
+
updated_at >= since_date
|
|
1410
|
+
rescue
|
|
1411
|
+
false
|
|
1412
|
+
end
|
|
1413
|
+
end
|
|
1414
|
+
```
|
|
1415
|
+
|
|
1416
|
+
## Next Steps
|
|
1417
|
+
|
|
1418
|
+
- [Pagination guide](/guides/advanced/pagination) - Efficient data fetching
|
|
1419
|
+
- [Error handling](/guides/advanced/errors) - Robust error management
|
|
1420
|
+
- [Complex queries](/guides/advanced/complex-queries) - Advanced data retrieval
|
|
1421
|
+
- [Rate limiting](/guides/advanced/rate-limiting) - API usage optimization
|