tallyfy 1.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. tallyfy/__init__.py +27 -0
  2. tallyfy/__pycache__/__init__.cpython-310.pyc +0 -0
  3. tallyfy/__pycache__/core.cpython-310.pyc +0 -0
  4. tallyfy/__pycache__/form_fields_management.cpython-310.pyc +0 -0
  5. tallyfy/__pycache__/models.cpython-310.pyc +0 -0
  6. tallyfy/__pycache__/task_management.cpython-310.pyc +0 -0
  7. tallyfy/__pycache__/template_management.cpython-310.pyc +0 -0
  8. tallyfy/__pycache__/user_management.cpython-310.pyc +0 -0
  9. tallyfy/core.py +361 -0
  10. tallyfy/form_fields_management/__init__.py +70 -0
  11. tallyfy/form_fields_management/__pycache__/__init__.cpython-310.pyc +0 -0
  12. tallyfy/form_fields_management/__pycache__/base.cpython-310.pyc +0 -0
  13. tallyfy/form_fields_management/__pycache__/crud_operations.cpython-310.pyc +0 -0
  14. tallyfy/form_fields_management/__pycache__/options_management.cpython-310.pyc +0 -0
  15. tallyfy/form_fields_management/__pycache__/suggestions.cpython-310.pyc +0 -0
  16. tallyfy/form_fields_management/base.py +109 -0
  17. tallyfy/form_fields_management/crud_operations.py +234 -0
  18. tallyfy/form_fields_management/options_management.py +222 -0
  19. tallyfy/form_fields_management/suggestions.py +411 -0
  20. tallyfy/models.py +1464 -0
  21. tallyfy/organization_management/__init__.py +26 -0
  22. tallyfy/organization_management/base.py +76 -0
  23. tallyfy/organization_management/retrieval.py +39 -0
  24. tallyfy/task_management/__init__.py +81 -0
  25. tallyfy/task_management/__pycache__/__init__.cpython-310.pyc +0 -0
  26. tallyfy/task_management/__pycache__/base.cpython-310.pyc +0 -0
  27. tallyfy/task_management/__pycache__/creation.cpython-310.pyc +0 -0
  28. tallyfy/task_management/__pycache__/retrieval.cpython-310.pyc +0 -0
  29. tallyfy/task_management/__pycache__/search.cpython-310.pyc +0 -0
  30. tallyfy/task_management/base.py +125 -0
  31. tallyfy/task_management/creation.py +221 -0
  32. tallyfy/task_management/retrieval.py +252 -0
  33. tallyfy/task_management/search.py +198 -0
  34. tallyfy/template_management/__init__.py +85 -0
  35. tallyfy/template_management/analysis.py +1099 -0
  36. tallyfy/template_management/automation.py +469 -0
  37. tallyfy/template_management/base.py +56 -0
  38. tallyfy/template_management/basic_operations.py +479 -0
  39. tallyfy/template_management/health_assessment.py +793 -0
  40. tallyfy/user_management/__init__.py +70 -0
  41. tallyfy/user_management/__pycache__/__init__.cpython-310.pyc +0 -0
  42. tallyfy/user_management/__pycache__/base.cpython-310.pyc +0 -0
  43. tallyfy/user_management/__pycache__/invitation.cpython-310.pyc +0 -0
  44. tallyfy/user_management/__pycache__/retrieval.cpython-310.pyc +0 -0
  45. tallyfy/user_management/base.py +146 -0
  46. tallyfy/user_management/invitation.py +286 -0
  47. tallyfy/user_management/retrieval.py +381 -0
  48. tallyfy-1.0.16.dist-info/METADATA +742 -0
  49. tallyfy-1.0.16.dist-info/RECORD +52 -0
  50. tallyfy-1.0.16.dist-info/WHEEL +5 -0
  51. tallyfy-1.0.16.dist-info/licenses/LICENSE +21 -0
  52. tallyfy-1.0.16.dist-info/top_level.txt +1 -0
@@ -0,0 +1,793 @@
1
+ """
2
+ Comprehensive template health assessment and improvement recommendations
3
+ """
4
+
5
+ import datetime
6
+ from typing import List, Optional, Dict, Any
7
+ from .base import TemplateManagerBase
8
+ from ..models import Template, TallyfyError
9
+
10
+
11
+ class TemplateHealthAssessment(TemplateManagerBase):
12
+ """Handles comprehensive template health analysis and improvement recommendations"""
13
+
14
+ def assess_template_health(self, org_id: str, template_id: str) -> Dict[str, Any]:
15
+ """
16
+ Main comprehensive health check method.
17
+
18
+ Args:
19
+ org_id: Organization ID
20
+ template_id: Template ID to assess
21
+
22
+ Returns:
23
+ Dictionary containing complete health assessment with scores, issues, and recommendations
24
+
25
+ Raises:
26
+ TallyfyError: If the request fails
27
+ """
28
+ self._validate_org_id(org_id)
29
+ self._validate_template_id(template_id)
30
+
31
+ try:
32
+ # Get template with full data - this would need to be injected or imported
33
+ # For now, we'll make the API call directly
34
+ template_endpoint = f"organizations/{org_id}/checklists/{template_id}"
35
+ template_params = {'with': 'steps,automated_actions,prerun'}
36
+ template_response = self.sdk._make_request('GET', template_endpoint, params=template_params)
37
+
38
+ template_data = self._extract_data(template_response)
39
+ if not template_data:
40
+ raise TallyfyError("Unable to retrieve template data for health assessment")
41
+
42
+ # Initialize assessment results
43
+ assessment = {
44
+ 'template_id': template_id,
45
+ 'template_title': template_data.get('title', 'Unknown'),
46
+ 'assessment_timestamp': self._get_current_timestamp(),
47
+ 'overall_score': 0,
48
+ 'health_rating': 'poor',
49
+ 'category_scores': {},
50
+ 'issues': [],
51
+ 'recommendations': [],
52
+ 'improvement_plan': []
53
+ }
54
+
55
+ # Perform individual assessments
56
+ metadata_score, metadata_issues = self._assess_template_metadata(template_data)
57
+ step_score, step_issues = self._assess_step_clarity(template_data)
58
+ form_score, form_issues = self._assess_form_completeness(template_data)
59
+ automation_score, automation_issues = self._assess_automation_efficiency(org_id, template_id, template_data)
60
+ deadline_score, deadline_issues = self._assess_deadline_reasonableness(template_data)
61
+ workflow_score, workflow_issues = self._assess_workflow_logic(template_data)
62
+
63
+ # Calculate scores and compile issues
64
+ assessment['category_scores'] = {
65
+ 'metadata_quality': {'score': metadata_score, 'max_score': 15},
66
+ 'step_clarity': {'score': step_score, 'max_score': 20},
67
+ 'form_completeness': {'score': form_score, 'max_score': 15},
68
+ 'automation_efficiency': {'score': automation_score, 'max_score': 20},
69
+ 'deadline_reasonableness': {'score': deadline_score, 'max_score': 15},
70
+ 'workflow_logic': {'score': workflow_score, 'max_score': 15}
71
+ }
72
+
73
+ # Calculate overall score (out of 100)
74
+ total_score = (metadata_score + step_score + form_score +
75
+ automation_score + deadline_score + workflow_score)
76
+ assessment['overall_score'] = total_score
77
+ assessment['health_rating'] = self._get_health_rating(total_score)
78
+
79
+ # Compile all issues
80
+ all_issues = (metadata_issues + step_issues + form_issues +
81
+ automation_issues + deadline_issues + workflow_issues)
82
+ assessment['issues'] = all_issues
83
+
84
+ # Generate recommendations based on issues
85
+ recommendations = []
86
+ critical_issues = [issue for issue in all_issues if issue.get('severity') == 'critical']
87
+ high_issues = [issue for issue in all_issues if issue.get('severity') == 'high']
88
+
89
+ if critical_issues:
90
+ recommendations.append({
91
+ 'priority': 'critical',
92
+ 'title': 'Address Critical Issues Immediately',
93
+ 'description': f'Found {len(critical_issues)} critical issues that require immediate attention',
94
+ 'action': 'Review and fix all critical issues before using this template in production'
95
+ })
96
+
97
+ if high_issues:
98
+ recommendations.append({
99
+ 'priority': 'high',
100
+ 'title': 'Resolve High Priority Issues',
101
+ 'description': f'Found {len(high_issues)} high priority issues that should be addressed soon',
102
+ 'action': 'Plan to address these issues in the next template update'
103
+ })
104
+
105
+ if total_score < 60:
106
+ recommendations.append({
107
+ 'priority': 'high',
108
+ 'title': 'Template Needs Significant Improvement',
109
+ 'description': 'Overall template quality is below acceptable standards',
110
+ 'action': 'Consider comprehensive template redesign or major improvements across all categories'
111
+ })
112
+
113
+ assessment['recommendations'] = recommendations
114
+
115
+ # Generate improvement plan
116
+ assessment['improvement_plan'] = self._generate_improvement_plan(all_issues, assessment['category_scores'])
117
+
118
+ return assessment
119
+
120
+ except TallyfyError:
121
+ raise
122
+ except Exception as e:
123
+ self._handle_api_error(e, "assess template health", org_id=org_id, template_id=template_id)
124
+
125
+ def _assess_template_metadata(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
126
+ """
127
+ Assess template metadata quality.
128
+
129
+ Args:
130
+ template_data: Template data dictionary
131
+
132
+ Returns:
133
+ Tuple of (score out of 15, list of issues)
134
+ """
135
+ score = 0
136
+ issues = []
137
+
138
+ # Check title quality (5 points)
139
+ title = template_data.get('title', '').strip()
140
+ if not title:
141
+ issues.append({
142
+ 'category': 'metadata',
143
+ 'severity': 'critical',
144
+ 'title': 'Missing Template Title',
145
+ 'description': 'Template must have a descriptive title',
146
+ 'recommendation': 'Add a clear, descriptive title that explains the template purpose'
147
+ })
148
+ elif len(title) < 10:
149
+ issues.append({
150
+ 'category': 'metadata',
151
+ 'severity': 'medium',
152
+ 'title': 'Template Title Too Short',
153
+ 'description': f'Title "{title}" is very brief ({len(title)} characters)',
154
+ 'recommendation': 'Expand title to be more descriptive (aim for 10-50 characters)'
155
+ })
156
+ score += 2
157
+ elif len(title) > 100:
158
+ issues.append({
159
+ 'category': 'metadata',
160
+ 'severity': 'low',
161
+ 'title': 'Template Title Too Long',
162
+ 'description': f'Title is very long ({len(title)} characters)',
163
+ 'recommendation': 'Shorten title to be more concise (aim for 10-50 characters)'
164
+ })
165
+ score += 3
166
+ else:
167
+ score += 5
168
+
169
+ # Check summary/description quality (5 points)
170
+ try:
171
+ summary = template_data.get('summary', '').strip()
172
+ except:
173
+ summary = None
174
+ if not summary:
175
+ issues.append({
176
+ 'category': 'metadata',
177
+ 'severity': 'high',
178
+ 'title': 'Missing Template Summary',
179
+ 'description': 'Template should have a summary explaining its purpose',
180
+ 'recommendation': 'Add a summary that explains when and how to use this template'
181
+ })
182
+ elif len(summary) < 20:
183
+ issues.append({
184
+ 'category': 'metadata',
185
+ 'severity': 'medium',
186
+ 'title': 'Template Summary Too Brief',
187
+ 'description': f'Summary is very short ({len(summary)} characters)',
188
+ 'recommendation': 'Expand summary to provide more context about template usage'
189
+ })
190
+ score += 2
191
+ else:
192
+ score += 5
193
+
194
+ # Check guidance quality (5 points)
195
+ try:
196
+ guidance = template_data.get('guidance', '').strip()
197
+ except:
198
+ guidance = None
199
+
200
+ if not guidance:
201
+ issues.append({
202
+ 'category': 'metadata',
203
+ 'severity': 'medium',
204
+ 'title': 'Missing Template Guidance',
205
+ 'description': 'Template could benefit from usage guidance',
206
+ 'recommendation': 'Add guidance to help users understand how to use this template effectively'
207
+ })
208
+ score += 2
209
+ elif len(guidance) < 50:
210
+ issues.append({
211
+ 'category': 'metadata',
212
+ 'severity': 'low',
213
+ 'title': 'Template Guidance Could Be More Detailed',
214
+ 'description': 'Guidance is present but could be more comprehensive',
215
+ 'recommendation': 'Expand guidance with more detailed instructions and best practices'
216
+ })
217
+ score += 3
218
+ else:
219
+ score += 5
220
+
221
+ return score, issues
222
+
223
+ def _assess_step_clarity(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
224
+ """
225
+ Assess step title clarity and descriptiveness.
226
+
227
+ Args:
228
+ template_data: Template data dictionary
229
+
230
+ Returns:
231
+ Tuple of (score out of 20, list of issues)
232
+ """
233
+ score = 0
234
+ issues = []
235
+
236
+ steps = template_data.get('steps', [])
237
+ if not steps:
238
+ issues.append({
239
+ 'category': 'steps',
240
+ 'severity': 'critical',
241
+ 'title': 'Template Has No Steps',
242
+ 'description': 'Template must have at least one step',
243
+ 'recommendation': 'Add steps to create a functional workflow'
244
+ })
245
+ return 0, issues
246
+
247
+ step_count = len(steps)
248
+ unclear_steps = 0
249
+ missing_summaries = 0
250
+ very_short_titles = 0
251
+
252
+ for step in steps:
253
+ try:
254
+ step_title = step.get('title', '').strip()
255
+ except:
256
+ step_title = None
257
+
258
+ try:
259
+ step_summary = step.get('summary', '').strip()
260
+ except:
261
+ step_summary = None
262
+
263
+ # Check title clarity
264
+ if not step_title:
265
+ unclear_steps += 1
266
+ elif len(step_title) < 5:
267
+ very_short_titles += 1
268
+ elif step_title.lower() in ['step', 'task', 'do this', 'complete', 'finish']:
269
+ unclear_steps += 1
270
+
271
+ # Check for summary
272
+ if not step_summary:
273
+ missing_summaries += 1
274
+
275
+ # Score based on step quality
276
+ if unclear_steps == 0 and very_short_titles == 0:
277
+ score += 10 # Excellent step titles
278
+ elif unclear_steps <= step_count * 0.1: # 10% or less unclear
279
+ score += 8
280
+ elif unclear_steps <= step_count * 0.2: # 20% or less unclear
281
+ score += 6
282
+ else:
283
+ score += 2
284
+
285
+ # Summary completeness scoring
286
+ summary_percentage = 1 - (missing_summaries / step_count)
287
+ if summary_percentage >= 0.8:
288
+ score += 10
289
+ elif summary_percentage >= 0.6:
290
+ score += 7
291
+ elif summary_percentage >= 0.4:
292
+ score += 5
293
+ else:
294
+ score += 2
295
+
296
+ # Generate issues
297
+ if unclear_steps > 0:
298
+ severity = 'critical' if unclear_steps > step_count * 0.3 else 'high'
299
+ issues.append({
300
+ 'category': 'steps',
301
+ 'severity': severity,
302
+ 'title': f'{unclear_steps} Steps Have Unclear Titles',
303
+ 'description': f'{unclear_steps} out of {step_count} steps have unclear or missing titles',
304
+ 'recommendation': 'Ensure all step titles clearly describe what needs to be done'
305
+ })
306
+
307
+ if very_short_titles > 0:
308
+ issues.append({
309
+ 'category': 'steps',
310
+ 'severity': 'medium',
311
+ 'title': f'{very_short_titles} Steps Have Very Short Titles',
312
+ 'description': f'{very_short_titles} steps have very brief titles',
313
+ 'recommendation': 'Expand step titles to be more descriptive'
314
+ })
315
+
316
+ if missing_summaries > step_count * 0.5:
317
+ issues.append({
318
+ 'category': 'steps',
319
+ 'severity': 'medium',
320
+ 'title': 'Many Steps Missing Summaries',
321
+ 'description': f'{missing_summaries} out of {step_count} steps lack summary descriptions',
322
+ 'recommendation': 'Add summaries to provide additional context for complex steps'
323
+ })
324
+
325
+ return score, issues
326
+
327
+ def _assess_form_completeness(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
328
+ """
329
+ Assess form field quality and completeness.
330
+
331
+ Args:
332
+ template_data: Template data dictionary
333
+
334
+ Returns:
335
+ Tuple of (score out of 15, list of issues)
336
+ """
337
+ score = 0
338
+ issues = []
339
+
340
+ prerun_fields = template_data.get('prerun', [])
341
+ steps = template_data.get('steps', [])
342
+
343
+ # Check kickoff form completeness (10 points)
344
+ if not prerun_fields:
345
+ # Check if template likely needs kickoff fields
346
+ template_content = f"{template_data.get('title', '')} {template_data.get('summary', '')}"
347
+ step_content = " ".join([step.get('title', '') + " " + step.get('summary', '') for step in steps])
348
+ all_content = (template_content + " " + step_content).lower()
349
+
350
+ needs_fields_keywords = ['client', 'customer', 'project', 'name', 'email', 'date', 'budget', 'details']
351
+ if any(keyword in all_content for keyword in needs_fields_keywords):
352
+ issues.append({
353
+ 'category': 'forms',
354
+ 'severity': 'medium',
355
+ 'title': 'Missing Kickoff Fields',
356
+ 'description': 'Template could benefit from kickoff fields to collect initial information',
357
+ 'recommendation': 'Add kickoff fields to gather necessary information before starting the workflow'
358
+ })
359
+ score += 5
360
+ else:
361
+ score += 8 # Template may not need kickoff fields
362
+ else:
363
+ # Assess field quality
364
+ required_fields = len([f for f in prerun_fields if f.get('required')])
365
+ total_fields = len(prerun_fields)
366
+
367
+ if total_fields >= 3 and required_fields > 0:
368
+ score += 10
369
+ elif total_fields >= 1:
370
+ score += 7
371
+ else:
372
+ score += 3
373
+
374
+ # Check step forms (5 points)
375
+ steps_with_forms = 0
376
+ for step in steps:
377
+ # This would need to be expanded based on actual step form structure
378
+ # For now, we'll do a simple check
379
+ if 'form' in step or 'fields' in step:
380
+ steps_with_forms += 1
381
+
382
+ if steps_with_forms > 0:
383
+ score += 5
384
+ else:
385
+ # Check if steps might need forms
386
+ form_keywords = ['input', 'enter', 'provide', 'fill', 'complete', 'details', 'information']
387
+ steps_needing_forms = 0
388
+ for step in steps['data']:
389
+ try:
390
+ step_title = step.get('title', '').lower()
391
+ if not step_title:
392
+ step_title = ""
393
+ except:
394
+ step_title = ""
395
+
396
+ try:
397
+ step_summary = step.get('summary', '').lower()
398
+ if not step_summary:
399
+ step_summary = ""
400
+ except:
401
+ step_summary = ""
402
+
403
+ step_text = (step_title + " " + step_summary).lower()
404
+ if any(keyword in step_text for keyword in form_keywords):
405
+ steps_needing_forms += 1
406
+
407
+ if steps_needing_forms > 0:
408
+ issues.append({
409
+ 'category': 'forms',
410
+ 'severity': 'low',
411
+ 'title': 'Steps May Need Forms',
412
+ 'description': f'{steps_needing_forms} steps appear to require information input',
413
+ 'recommendation': 'Consider adding forms to steps that require information gathering'
414
+ })
415
+ score += 3
416
+ else:
417
+ score += 5
418
+
419
+ return score, issues
420
+
421
+ def _assess_automation_efficiency(self, org_id: str, template_id: str, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
422
+ """
423
+ Assess automation rules efficiency.
424
+
425
+ Args:
426
+ org_id: Organization ID
427
+ template_id: Template ID
428
+ template_data: Template data dictionary
429
+
430
+ Returns:
431
+ Tuple of (score out of 20, list of issues)
432
+ """
433
+ score = 0
434
+ issues = []
435
+
436
+ try:
437
+ automations = template_data.get('automated_actions', [])
438
+
439
+ if not automations:
440
+ # Template has no automations - this might be fine for simple templates
441
+ steps = template_data.get('steps', [])
442
+ if len(steps) > 5:
443
+ issues.append({
444
+ 'category': 'automation',
445
+ 'severity': 'low',
446
+ 'title': 'No Automation Rules',
447
+ 'description': 'Template has multiple steps but no automation rules',
448
+ 'recommendation': 'Consider adding automation rules to improve workflow efficiency'
449
+ })
450
+ score += 15
451
+ else:
452
+ score += 18 # Simple templates may not need automation
453
+ return score, issues
454
+
455
+ # Analyze automation complexity and conflicts
456
+ total_automations = len(automations)
457
+ complex_automations = 0
458
+ simple_automations = 0
459
+
460
+ for automation in automations:
461
+ conditions = automation.get('conditions', [])
462
+ actions = automation.get('actions', [])
463
+
464
+ if len(conditions) > 3 or len(actions) > 2:
465
+ complex_automations += 1
466
+ else:
467
+ simple_automations += 1
468
+
469
+ # Score based on automation balance
470
+ if complex_automations <= total_automations * 0.3: # 30% or less complex
471
+ score += 10
472
+ elif complex_automations <= total_automations * 0.5: # 50% or less complex
473
+ score += 7
474
+ else:
475
+ score += 4
476
+ issues.append({
477
+ 'category': 'automation',
478
+ 'severity': 'medium',
479
+ 'title': 'Many Complex Automation Rules',
480
+ 'description': f'{complex_automations} out of {total_automations} automation rules are complex',
481
+ 'recommendation': 'Consider simplifying complex automation rules for better maintainability'
482
+ })
483
+
484
+ # Check for potential conflicts (simplified check)
485
+ if total_automations > 10:
486
+ issues.append({
487
+ 'category': 'automation',
488
+ 'severity': 'medium',
489
+ 'title': 'High Number of Automation Rules',
490
+ 'description': f'Template has {total_automations} automation rules',
491
+ 'recommendation': 'Review automation rules for potential consolidation opportunities'
492
+ })
493
+ score += 5
494
+ else:
495
+ score += 10
496
+
497
+ except Exception as e:
498
+ # If automation analysis fails, give neutral score
499
+ self.sdk.logger.warning(f"Failed to analyze automations: {e}")
500
+ score += 10
501
+
502
+ return score, issues
503
+
504
+ def _assess_deadline_reasonableness(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
505
+ """
506
+ Assess deadline appropriateness.
507
+
508
+ Args:
509
+ template_data: Template data dictionary
510
+
511
+ Returns:
512
+ Tuple of (score out of 15, list of issues)
513
+ """
514
+ score = 0
515
+ issues = []
516
+
517
+ steps = template_data.get('steps', [])
518
+ if not steps:
519
+ return 0, issues
520
+
521
+ steps_with_deadlines = 0
522
+ reasonable_deadlines = 0
523
+ unreasonable_deadlines = 0
524
+
525
+ for step in steps:
526
+ try:
527
+ deadline = step.get('deadline')
528
+ except:
529
+ deadline = None
530
+ if deadline:
531
+ steps_with_deadlines += 1
532
+
533
+ # Simple deadline reasonableness check
534
+ deadline_value = deadline.get('value', 0)
535
+ deadline_unit = deadline.get('unit', 'days')
536
+
537
+ # Convert to hours for comparison
538
+ if deadline_unit == 'minutes':
539
+ hours = deadline_value / 60
540
+ elif deadline_unit == 'hours':
541
+ hours = deadline_value
542
+ elif deadline_unit == 'days':
543
+ hours = deadline_value * 24
544
+ elif deadline_unit == 'weeks':
545
+ hours = deadline_value * 24 * 7
546
+ else:
547
+ hours = deadline_value * 24 # Assume days
548
+
549
+ # Check reasonableness
550
+ if 0.5 <= hours <= 8760: # 30 minutes to 1 year
551
+ reasonable_deadlines += 1
552
+ else:
553
+ unreasonable_deadlines += 1
554
+
555
+ total_steps = len(steps)
556
+
557
+ # Score based on deadline usage and reasonableness
558
+ if steps_with_deadlines == 0:
559
+ if total_steps > 3:
560
+ issues.append({
561
+ 'category': 'deadlines',
562
+ 'severity': 'low',
563
+ 'title': 'No Step Deadlines Set',
564
+ 'description': 'Template has multiple steps but no deadlines',
565
+ 'recommendation': 'Consider adding deadlines to time-sensitive steps'
566
+ })
567
+ score += 10
568
+ else:
569
+ score += 13 # Simple templates may not need deadlines
570
+ else:
571
+ # Score based on deadline quality
572
+ if unreasonable_deadlines == 0:
573
+ score += 15
574
+ elif unreasonable_deadlines <= steps_with_deadlines * 0.2: # 20% or less unreasonable
575
+ score += 12
576
+ else:
577
+ score += 8
578
+ issues.append({
579
+ 'category': 'deadlines',
580
+ 'severity': 'medium',
581
+ 'title': 'Some Deadlines May Be Unreasonable',
582
+ 'description': f'{unreasonable_deadlines} steps have potentially unreasonable deadlines',
583
+ 'recommendation': 'Review step deadlines to ensure they are achievable and appropriate'
584
+ })
585
+
586
+ return score, issues
587
+
588
+ def _assess_workflow_logic(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
589
+ """
590
+ Assess overall workflow structure.
591
+
592
+ Args:
593
+ template_data: Template data dictionary
594
+
595
+ Returns:
596
+ Tuple of (score out of 15, list of issues)
597
+ """
598
+ score = 0
599
+ issues = []
600
+
601
+ steps = template_data.get('steps', [])
602
+ if not steps:
603
+ issues.append({
604
+ 'category': 'workflow',
605
+ 'severity': 'critical',
606
+ 'title': 'No Workflow Steps',
607
+ 'description': 'Template must have workflow steps',
608
+ 'recommendation': 'Add steps to create a meaningful workflow'
609
+ })
610
+ return 0, issues
611
+
612
+ step_count = len(steps)
613
+
614
+ # Check workflow length appropriateness (5 points)
615
+ if 2 <= step_count <= 20:
616
+ score += 5
617
+ elif 1 <= step_count <= 30:
618
+ score += 4
619
+ elif step_count > 30:
620
+ issues.append({
621
+ 'category': 'workflow',
622
+ 'severity': 'medium',
623
+ 'title': 'Very Long Workflow',
624
+ 'description': f'Template has {step_count} steps, which may be difficult to manage',
625
+ 'recommendation': 'Consider breaking down into smaller sub-workflows or templates'
626
+ })
627
+ score += 2
628
+ else:
629
+ score += 3
630
+ steps = steps['data']
631
+ # Check step positioning and logic (5 points)
632
+ positions = [step.get('position', 0) for step in steps]
633
+ unique_positions = len(set(positions))
634
+
635
+ if unique_positions == step_count and min(positions) > 0:
636
+ score += 5 # All steps have unique, valid positions
637
+ elif unique_positions >= step_count * 0.8: # 80% have unique positions
638
+ score += 4
639
+ else:
640
+ issues.append({
641
+ 'category': 'workflow',
642
+ 'severity': 'low',
643
+ 'title': 'Inconsistent Step Positioning',
644
+ 'description': 'Some steps may have duplicate or missing positions',
645
+ 'recommendation': 'Review step order and ensure logical sequence'
646
+ })
647
+ score += 2
648
+
649
+ # Check for logical flow (5 points)
650
+ # This is a simplified check - in practice, you'd analyze dependencies
651
+ first_steps = [step for step in steps if step.get('position', 0) <= 2]
652
+ last_steps = [step for step in steps if step.get('position', 0) >= step_count - 1]
653
+
654
+ if first_steps and last_steps:
655
+ # Check if first steps are setup-like and last steps are completion-like
656
+ first_step_content = " ".join([step.get('title', '').lower() for step in first_steps])
657
+ last_step_content = " ".join([step.get('title', '').lower() for step in last_steps])
658
+
659
+ setup_keywords = ['start', 'begin', 'initialize', 'setup', 'create', 'prepare']
660
+ completion_keywords = ['complete', 'finish', 'finalize', 'close', 'deliver', 'submit']
661
+
662
+ has_logical_start = any(keyword in first_step_content for keyword in setup_keywords)
663
+ has_logical_end = any(keyword in last_step_content for keyword in completion_keywords)
664
+
665
+ if has_logical_start and has_logical_end:
666
+ score += 5
667
+ elif has_logical_start or has_logical_end:
668
+ score += 3
669
+ else:
670
+ score += 2
671
+ else:
672
+ score += 3
673
+
674
+ return score, issues
675
+
676
+ def _generate_improvement_plan(self, issues: List[Dict[str, Any]], category_scores: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
677
+ """
678
+ Generate prioritized improvement plan.
679
+
680
+ Args:
681
+ issues: List of all identified issues
682
+ category_scores: Dictionary of category scores
683
+
684
+ Returns:
685
+ List of improvement plan items
686
+ """
687
+ improvement_plan = []
688
+
689
+ # Sort issues by severity
690
+ critical_issues = [issue for issue in issues if issue.get('severity') == 'critical']
691
+ high_issues = [issue for issue in issues if issue.get('severity') == 'high']
692
+ medium_issues = [issue for issue in issues if issue.get('severity') == 'medium']
693
+ low_issues = [issue for issue in issues if issue.get('severity') == 'low']
694
+
695
+ # Create improvement items for critical issues
696
+ if critical_issues:
697
+ improvement_plan.append({
698
+ 'priority': 1,
699
+ 'phase': 'Immediate',
700
+ 'title': 'Fix Critical Issues',
701
+ 'description': f'Address {len(critical_issues)} critical issues that prevent template from functioning properly',
702
+ 'effort': 'High',
703
+ 'impact': 'Critical',
704
+ 'estimated_time': '2-4 hours',
705
+ 'issues_addressed': [issue['title'] for issue in critical_issues]
706
+ })
707
+
708
+ # Group issues by category for focused improvements
709
+ category_issue_map = {}
710
+ for issue in high_issues + medium_issues:
711
+ category = issue.get('category', 'general')
712
+ if category not in category_issue_map:
713
+ category_issue_map[category] = []
714
+ category_issue_map[category].append(issue)
715
+
716
+ # Find lowest scoring categories for prioritization
717
+ category_priorities = []
718
+ for category, score_info in category_scores.items():
719
+ score_percentage = (score_info['score'] / score_info['max_score']) * 100
720
+ if score_percentage < 70: # Focus on categories scoring below 70%
721
+ issue_count = len(category_issue_map.get(category.split('_')[0], []))
722
+ category_priorities.append({
723
+ 'category': category,
724
+ 'score_percentage': score_percentage,
725
+ 'issue_count': issue_count
726
+ })
727
+
728
+ # Sort by score percentage (lowest first)
729
+ category_priorities.sort(key=lambda x: x['score_percentage'])
730
+
731
+ # Create improvement items for priority categories
732
+ phase_counter = 2
733
+ for cat_info in category_priorities[:3]: # Top 3 categories
734
+ category_name = cat_info['category']
735
+ category_issues = category_issue_map.get(category_name.split('_')[0], [])
736
+
737
+ if category_issues:
738
+ improvement_plan.append({
739
+ 'priority': phase_counter,
740
+ 'phase': f'Phase {phase_counter - 1}',
741
+ 'title': f'Improve {category_name.replace("_", " ").title()}',
742
+ 'description': f'Address {len(category_issues)} issues in {category_name.replace("_", " ")} (currently {cat_info["score_percentage"]:.0f}%)',
743
+ 'effort': 'Medium' if len(category_issues) <= 3 else 'High',
744
+ 'impact': 'High' if cat_info['score_percentage'] < 50 else 'Medium',
745
+ 'estimated_time': f'{len(category_issues) * 30}-{len(category_issues) * 60} minutes',
746
+ 'issues_addressed': [issue['title'] for issue in category_issues]
747
+ })
748
+ phase_counter += 1
749
+
750
+ # Add low priority improvements
751
+ if low_issues:
752
+ improvement_plan.append({
753
+ 'priority': phase_counter,
754
+ 'phase': 'Polish',
755
+ 'title': 'Address Minor Improvements',
756
+ 'description': f'Handle {len(low_issues)} minor issues for template optimization',
757
+ 'effort': 'Low',
758
+ 'impact': 'Low',
759
+ 'estimated_time': f'{len(low_issues) * 15}-{len(low_issues) * 30} minutes',
760
+ 'issues_addressed': [issue['title'] for issue in low_issues]
761
+ })
762
+
763
+ return improvement_plan
764
+
765
+ def _get_health_rating(self, score: int) -> str:
766
+ """
767
+ Convert numeric score to health rating.
768
+
769
+ Args:
770
+ score: Numeric score out of 100
771
+
772
+ Returns:
773
+ Health rating as string
774
+ """
775
+ if score >= 90:
776
+ return 'excellent'
777
+ elif score >= 75:
778
+ return 'good'
779
+ elif score >= 60:
780
+ return 'fair'
781
+ elif score >= 40:
782
+ return 'poor'
783
+ else:
784
+ return 'critical'
785
+
786
+ def _get_current_timestamp(self) -> str:
787
+ """
788
+ Get current timestamp for assessment.
789
+
790
+ Returns:
791
+ ISO formatted timestamp string
792
+ """
793
+ return datetime.datetime.now().isoformat()