tallyfy 1.0.4__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tallyfy might be problematic. Click here for more details.
- tallyfy/__init__.py +8 -4
- tallyfy/core.py +8 -8
- tallyfy/form_fields_management/__init__.py +70 -0
- tallyfy/form_fields_management/base.py +109 -0
- tallyfy/form_fields_management/crud_operations.py +234 -0
- tallyfy/form_fields_management/options_management.py +222 -0
- tallyfy/form_fields_management/suggestions.py +411 -0
- tallyfy/task_management/__init__.py +81 -0
- tallyfy/task_management/base.py +125 -0
- tallyfy/task_management/creation.py +221 -0
- tallyfy/task_management/retrieval.py +211 -0
- tallyfy/task_management/search.py +196 -0
- tallyfy/template_management/__init__.py +85 -0
- tallyfy/template_management/analysis.py +1093 -0
- tallyfy/template_management/automation.py +469 -0
- tallyfy/template_management/base.py +56 -0
- tallyfy/template_management/basic_operations.py +477 -0
- tallyfy/template_management/health_assessment.py +763 -0
- tallyfy/user_management/__init__.py +69 -0
- tallyfy/user_management/base.py +146 -0
- tallyfy/user_management/invitation.py +286 -0
- tallyfy/user_management/retrieval.py +339 -0
- {tallyfy-1.0.4.dist-info → tallyfy-1.0.5.dist-info}/METADATA +120 -56
- tallyfy-1.0.5.dist-info/RECORD +28 -0
- tallyfy/BUILD.md +0 -5
- tallyfy/form_fields_management.py +0 -582
- tallyfy/task_management.py +0 -356
- tallyfy/template_management.py +0 -2607
- tallyfy/user_management.py +0 -235
- tallyfy-1.0.4.dist-info/RECORD +0 -13
- {tallyfy-1.0.4.dist-info → tallyfy-1.0.5.dist-info}/WHEEL +0 -0
- {tallyfy-1.0.4.dist-info → tallyfy-1.0.5.dist-info}/licenses/LICENSE +0 -0
- {tallyfy-1.0.4.dist-info → tallyfy-1.0.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,763 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Comprehensive template health assessment and improvement recommendations
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import datetime
|
|
6
|
+
from typing import List, Optional, Dict, Any
|
|
7
|
+
from .base import TemplateManagerBase
|
|
8
|
+
from ..models import Template, TallyfyError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TemplateHealthAssessment(TemplateManagerBase):
|
|
12
|
+
"""Handles comprehensive template health analysis and improvement recommendations"""
|
|
13
|
+
|
|
14
|
+
def assess_template_health(self, org_id: str, template_id: str) -> Dict[str, Any]:
|
|
15
|
+
"""
|
|
16
|
+
Main comprehensive health check method.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
org_id: Organization ID
|
|
20
|
+
template_id: Template ID to assess
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
Dictionary containing complete health assessment with scores, issues, and recommendations
|
|
24
|
+
|
|
25
|
+
Raises:
|
|
26
|
+
TallyfyError: If the request fails
|
|
27
|
+
"""
|
|
28
|
+
self._validate_org_id(org_id)
|
|
29
|
+
self._validate_template_id(template_id)
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
# Get template with full data - this would need to be injected or imported
|
|
33
|
+
# For now, we'll make the API call directly
|
|
34
|
+
template_endpoint = f"organizations/{org_id}/checklists/{template_id}"
|
|
35
|
+
template_params = {'with': 'steps,automated_actions,prerun'}
|
|
36
|
+
template_response = self.sdk._make_request('GET', template_endpoint, params=template_params)
|
|
37
|
+
|
|
38
|
+
template_data = self._extract_data(template_response)
|
|
39
|
+
if not template_data:
|
|
40
|
+
raise TallyfyError("Unable to retrieve template data for health assessment")
|
|
41
|
+
|
|
42
|
+
# Initialize assessment results
|
|
43
|
+
assessment = {
|
|
44
|
+
'template_id': template_id,
|
|
45
|
+
'template_title': template_data.get('title', 'Unknown'),
|
|
46
|
+
'assessment_timestamp': self._get_current_timestamp(),
|
|
47
|
+
'overall_score': 0,
|
|
48
|
+
'health_rating': 'poor',
|
|
49
|
+
'category_scores': {},
|
|
50
|
+
'issues': [],
|
|
51
|
+
'recommendations': [],
|
|
52
|
+
'improvement_plan': []
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# Perform individual assessments
|
|
56
|
+
metadata_score, metadata_issues = self._assess_template_metadata(template_data)
|
|
57
|
+
step_score, step_issues = self._assess_step_clarity(template_data)
|
|
58
|
+
form_score, form_issues = self._assess_form_completeness(template_data)
|
|
59
|
+
automation_score, automation_issues = self._assess_automation_efficiency(org_id, template_id, template_data)
|
|
60
|
+
deadline_score, deadline_issues = self._assess_deadline_reasonableness(template_data)
|
|
61
|
+
workflow_score, workflow_issues = self._assess_workflow_logic(template_data)
|
|
62
|
+
|
|
63
|
+
# Calculate scores and compile issues
|
|
64
|
+
assessment['category_scores'] = {
|
|
65
|
+
'metadata_quality': {'score': metadata_score, 'max_score': 15},
|
|
66
|
+
'step_clarity': {'score': step_score, 'max_score': 20},
|
|
67
|
+
'form_completeness': {'score': form_score, 'max_score': 15},
|
|
68
|
+
'automation_efficiency': {'score': automation_score, 'max_score': 20},
|
|
69
|
+
'deadline_reasonableness': {'score': deadline_score, 'max_score': 15},
|
|
70
|
+
'workflow_logic': {'score': workflow_score, 'max_score': 15}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
# Calculate overall score (out of 100)
|
|
74
|
+
total_score = (metadata_score + step_score + form_score +
|
|
75
|
+
automation_score + deadline_score + workflow_score)
|
|
76
|
+
assessment['overall_score'] = total_score
|
|
77
|
+
assessment['health_rating'] = self._get_health_rating(total_score)
|
|
78
|
+
|
|
79
|
+
# Compile all issues
|
|
80
|
+
all_issues = (metadata_issues + step_issues + form_issues +
|
|
81
|
+
automation_issues + deadline_issues + workflow_issues)
|
|
82
|
+
assessment['issues'] = all_issues
|
|
83
|
+
|
|
84
|
+
# Generate recommendations based on issues
|
|
85
|
+
recommendations = []
|
|
86
|
+
critical_issues = [issue for issue in all_issues if issue.get('severity') == 'critical']
|
|
87
|
+
high_issues = [issue for issue in all_issues if issue.get('severity') == 'high']
|
|
88
|
+
|
|
89
|
+
if critical_issues:
|
|
90
|
+
recommendations.append({
|
|
91
|
+
'priority': 'critical',
|
|
92
|
+
'title': 'Address Critical Issues Immediately',
|
|
93
|
+
'description': f'Found {len(critical_issues)} critical issues that require immediate attention',
|
|
94
|
+
'action': 'Review and fix all critical issues before using this template in production'
|
|
95
|
+
})
|
|
96
|
+
|
|
97
|
+
if high_issues:
|
|
98
|
+
recommendations.append({
|
|
99
|
+
'priority': 'high',
|
|
100
|
+
'title': 'Resolve High Priority Issues',
|
|
101
|
+
'description': f'Found {len(high_issues)} high priority issues that should be addressed soon',
|
|
102
|
+
'action': 'Plan to address these issues in the next template update'
|
|
103
|
+
})
|
|
104
|
+
|
|
105
|
+
if total_score < 60:
|
|
106
|
+
recommendations.append({
|
|
107
|
+
'priority': 'high',
|
|
108
|
+
'title': 'Template Needs Significant Improvement',
|
|
109
|
+
'description': 'Overall template quality is below acceptable standards',
|
|
110
|
+
'action': 'Consider comprehensive template redesign or major improvements across all categories'
|
|
111
|
+
})
|
|
112
|
+
|
|
113
|
+
assessment['recommendations'] = recommendations
|
|
114
|
+
|
|
115
|
+
# Generate improvement plan
|
|
116
|
+
assessment['improvement_plan'] = self._generate_improvement_plan(all_issues, assessment['category_scores'])
|
|
117
|
+
|
|
118
|
+
return assessment
|
|
119
|
+
|
|
120
|
+
except TallyfyError:
|
|
121
|
+
raise
|
|
122
|
+
except Exception as e:
|
|
123
|
+
self._handle_api_error(e, "assess template health", org_id=org_id, template_id=template_id)
|
|
124
|
+
|
|
125
|
+
def _assess_template_metadata(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
|
|
126
|
+
"""
|
|
127
|
+
Assess template metadata quality.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
template_data: Template data dictionary
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Tuple of (score out of 15, list of issues)
|
|
134
|
+
"""
|
|
135
|
+
score = 0
|
|
136
|
+
issues = []
|
|
137
|
+
|
|
138
|
+
# Check title quality (5 points)
|
|
139
|
+
title = template_data.get('title', '').strip()
|
|
140
|
+
if not title:
|
|
141
|
+
issues.append({
|
|
142
|
+
'category': 'metadata',
|
|
143
|
+
'severity': 'critical',
|
|
144
|
+
'title': 'Missing Template Title',
|
|
145
|
+
'description': 'Template must have a descriptive title',
|
|
146
|
+
'recommendation': 'Add a clear, descriptive title that explains the template purpose'
|
|
147
|
+
})
|
|
148
|
+
elif len(title) < 10:
|
|
149
|
+
issues.append({
|
|
150
|
+
'category': 'metadata',
|
|
151
|
+
'severity': 'medium',
|
|
152
|
+
'title': 'Template Title Too Short',
|
|
153
|
+
'description': f'Title "{title}" is very brief ({len(title)} characters)',
|
|
154
|
+
'recommendation': 'Expand title to be more descriptive (aim for 10-50 characters)'
|
|
155
|
+
})
|
|
156
|
+
score += 2
|
|
157
|
+
elif len(title) > 100:
|
|
158
|
+
issues.append({
|
|
159
|
+
'category': 'metadata',
|
|
160
|
+
'severity': 'low',
|
|
161
|
+
'title': 'Template Title Too Long',
|
|
162
|
+
'description': f'Title is very long ({len(title)} characters)',
|
|
163
|
+
'recommendation': 'Shorten title to be more concise (aim for 10-50 characters)'
|
|
164
|
+
})
|
|
165
|
+
score += 3
|
|
166
|
+
else:
|
|
167
|
+
score += 5
|
|
168
|
+
|
|
169
|
+
# Check summary/description quality (5 points)
|
|
170
|
+
summary = template_data.get('summary', '').strip()
|
|
171
|
+
if not summary:
|
|
172
|
+
issues.append({
|
|
173
|
+
'category': 'metadata',
|
|
174
|
+
'severity': 'high',
|
|
175
|
+
'title': 'Missing Template Summary',
|
|
176
|
+
'description': 'Template should have a summary explaining its purpose',
|
|
177
|
+
'recommendation': 'Add a summary that explains when and how to use this template'
|
|
178
|
+
})
|
|
179
|
+
elif len(summary) < 20:
|
|
180
|
+
issues.append({
|
|
181
|
+
'category': 'metadata',
|
|
182
|
+
'severity': 'medium',
|
|
183
|
+
'title': 'Template Summary Too Brief',
|
|
184
|
+
'description': f'Summary is very short ({len(summary)} characters)',
|
|
185
|
+
'recommendation': 'Expand summary to provide more context about template usage'
|
|
186
|
+
})
|
|
187
|
+
score += 2
|
|
188
|
+
else:
|
|
189
|
+
score += 5
|
|
190
|
+
|
|
191
|
+
# Check guidance quality (5 points)
|
|
192
|
+
guidance = template_data.get('guidance', '').strip()
|
|
193
|
+
if not guidance:
|
|
194
|
+
issues.append({
|
|
195
|
+
'category': 'metadata',
|
|
196
|
+
'severity': 'medium',
|
|
197
|
+
'title': 'Missing Template Guidance',
|
|
198
|
+
'description': 'Template could benefit from usage guidance',
|
|
199
|
+
'recommendation': 'Add guidance to help users understand how to use this template effectively'
|
|
200
|
+
})
|
|
201
|
+
score += 2
|
|
202
|
+
elif len(guidance) < 50:
|
|
203
|
+
issues.append({
|
|
204
|
+
'category': 'metadata',
|
|
205
|
+
'severity': 'low',
|
|
206
|
+
'title': 'Template Guidance Could Be More Detailed',
|
|
207
|
+
'description': 'Guidance is present but could be more comprehensive',
|
|
208
|
+
'recommendation': 'Expand guidance with more detailed instructions and best practices'
|
|
209
|
+
})
|
|
210
|
+
score += 3
|
|
211
|
+
else:
|
|
212
|
+
score += 5
|
|
213
|
+
|
|
214
|
+
return score, issues
|
|
215
|
+
|
|
216
|
+
def _assess_step_clarity(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
|
|
217
|
+
"""
|
|
218
|
+
Assess step title clarity and descriptiveness.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
template_data: Template data dictionary
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
Tuple of (score out of 20, list of issues)
|
|
225
|
+
"""
|
|
226
|
+
score = 0
|
|
227
|
+
issues = []
|
|
228
|
+
|
|
229
|
+
steps = template_data.get('steps', [])
|
|
230
|
+
if not steps:
|
|
231
|
+
issues.append({
|
|
232
|
+
'category': 'steps',
|
|
233
|
+
'severity': 'critical',
|
|
234
|
+
'title': 'Template Has No Steps',
|
|
235
|
+
'description': 'Template must have at least one step',
|
|
236
|
+
'recommendation': 'Add steps to create a functional workflow'
|
|
237
|
+
})
|
|
238
|
+
return 0, issues
|
|
239
|
+
|
|
240
|
+
step_count = len(steps)
|
|
241
|
+
unclear_steps = 0
|
|
242
|
+
missing_summaries = 0
|
|
243
|
+
very_short_titles = 0
|
|
244
|
+
|
|
245
|
+
for step in steps:
|
|
246
|
+
step_title = step.get('title', '').strip()
|
|
247
|
+
step_summary = step.get('summary', '').strip()
|
|
248
|
+
|
|
249
|
+
# Check title clarity
|
|
250
|
+
if not step_title:
|
|
251
|
+
unclear_steps += 1
|
|
252
|
+
elif len(step_title) < 5:
|
|
253
|
+
very_short_titles += 1
|
|
254
|
+
elif step_title.lower() in ['step', 'task', 'do this', 'complete', 'finish']:
|
|
255
|
+
unclear_steps += 1
|
|
256
|
+
|
|
257
|
+
# Check for summary
|
|
258
|
+
if not step_summary:
|
|
259
|
+
missing_summaries += 1
|
|
260
|
+
|
|
261
|
+
# Score based on step quality
|
|
262
|
+
if unclear_steps == 0 and very_short_titles == 0:
|
|
263
|
+
score += 10 # Excellent step titles
|
|
264
|
+
elif unclear_steps <= step_count * 0.1: # 10% or less unclear
|
|
265
|
+
score += 8
|
|
266
|
+
elif unclear_steps <= step_count * 0.2: # 20% or less unclear
|
|
267
|
+
score += 6
|
|
268
|
+
else:
|
|
269
|
+
score += 2
|
|
270
|
+
|
|
271
|
+
# Summary completeness scoring
|
|
272
|
+
summary_percentage = 1 - (missing_summaries / step_count)
|
|
273
|
+
if summary_percentage >= 0.8:
|
|
274
|
+
score += 10
|
|
275
|
+
elif summary_percentage >= 0.6:
|
|
276
|
+
score += 7
|
|
277
|
+
elif summary_percentage >= 0.4:
|
|
278
|
+
score += 5
|
|
279
|
+
else:
|
|
280
|
+
score += 2
|
|
281
|
+
|
|
282
|
+
# Generate issues
|
|
283
|
+
if unclear_steps > 0:
|
|
284
|
+
severity = 'critical' if unclear_steps > step_count * 0.3 else 'high'
|
|
285
|
+
issues.append({
|
|
286
|
+
'category': 'steps',
|
|
287
|
+
'severity': severity,
|
|
288
|
+
'title': f'{unclear_steps} Steps Have Unclear Titles',
|
|
289
|
+
'description': f'{unclear_steps} out of {step_count} steps have unclear or missing titles',
|
|
290
|
+
'recommendation': 'Ensure all step titles clearly describe what needs to be done'
|
|
291
|
+
})
|
|
292
|
+
|
|
293
|
+
if very_short_titles > 0:
|
|
294
|
+
issues.append({
|
|
295
|
+
'category': 'steps',
|
|
296
|
+
'severity': 'medium',
|
|
297
|
+
'title': f'{very_short_titles} Steps Have Very Short Titles',
|
|
298
|
+
'description': f'{very_short_titles} steps have very brief titles',
|
|
299
|
+
'recommendation': 'Expand step titles to be more descriptive'
|
|
300
|
+
})
|
|
301
|
+
|
|
302
|
+
if missing_summaries > step_count * 0.5:
|
|
303
|
+
issues.append({
|
|
304
|
+
'category': 'steps',
|
|
305
|
+
'severity': 'medium',
|
|
306
|
+
'title': 'Many Steps Missing Summaries',
|
|
307
|
+
'description': f'{missing_summaries} out of {step_count} steps lack summary descriptions',
|
|
308
|
+
'recommendation': 'Add summaries to provide additional context for complex steps'
|
|
309
|
+
})
|
|
310
|
+
|
|
311
|
+
return score, issues
|
|
312
|
+
|
|
313
|
+
def _assess_form_completeness(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
|
|
314
|
+
"""
|
|
315
|
+
Assess form field quality and completeness.
|
|
316
|
+
|
|
317
|
+
Args:
|
|
318
|
+
template_data: Template data dictionary
|
|
319
|
+
|
|
320
|
+
Returns:
|
|
321
|
+
Tuple of (score out of 15, list of issues)
|
|
322
|
+
"""
|
|
323
|
+
score = 0
|
|
324
|
+
issues = []
|
|
325
|
+
|
|
326
|
+
prerun_fields = template_data.get('prerun', [])
|
|
327
|
+
steps = template_data.get('steps', [])
|
|
328
|
+
|
|
329
|
+
# Check kickoff form completeness (10 points)
|
|
330
|
+
if not prerun_fields:
|
|
331
|
+
# Check if template likely needs kickoff fields
|
|
332
|
+
template_content = f"{template_data.get('title', '')} {template_data.get('summary', '')}"
|
|
333
|
+
step_content = " ".join([step.get('title', '') + " " + step.get('summary', '') for step in steps])
|
|
334
|
+
all_content = (template_content + " " + step_content).lower()
|
|
335
|
+
|
|
336
|
+
needs_fields_keywords = ['client', 'customer', 'project', 'name', 'email', 'date', 'budget', 'details']
|
|
337
|
+
if any(keyword in all_content for keyword in needs_fields_keywords):
|
|
338
|
+
issues.append({
|
|
339
|
+
'category': 'forms',
|
|
340
|
+
'severity': 'medium',
|
|
341
|
+
'title': 'Missing Kickoff Fields',
|
|
342
|
+
'description': 'Template could benefit from kickoff fields to collect initial information',
|
|
343
|
+
'recommendation': 'Add kickoff fields to gather necessary information before starting the workflow'
|
|
344
|
+
})
|
|
345
|
+
score += 5
|
|
346
|
+
else:
|
|
347
|
+
score += 8 # Template may not need kickoff fields
|
|
348
|
+
else:
|
|
349
|
+
# Assess field quality
|
|
350
|
+
required_fields = len([f for f in prerun_fields if f.get('required')])
|
|
351
|
+
total_fields = len(prerun_fields)
|
|
352
|
+
|
|
353
|
+
if total_fields >= 3 and required_fields > 0:
|
|
354
|
+
score += 10
|
|
355
|
+
elif total_fields >= 1:
|
|
356
|
+
score += 7
|
|
357
|
+
else:
|
|
358
|
+
score += 3
|
|
359
|
+
|
|
360
|
+
# Check step forms (5 points)
|
|
361
|
+
steps_with_forms = 0
|
|
362
|
+
for step in steps:
|
|
363
|
+
# This would need to be expanded based on actual step form structure
|
|
364
|
+
# For now, we'll do a simple check
|
|
365
|
+
if 'form' in step or 'fields' in step:
|
|
366
|
+
steps_with_forms += 1
|
|
367
|
+
|
|
368
|
+
if steps_with_forms > 0:
|
|
369
|
+
score += 5
|
|
370
|
+
else:
|
|
371
|
+
# Check if steps might need forms
|
|
372
|
+
form_keywords = ['input', 'enter', 'provide', 'fill', 'complete', 'details', 'information']
|
|
373
|
+
steps_needing_forms = 0
|
|
374
|
+
|
|
375
|
+
for step in steps:
|
|
376
|
+
step_text = (step.get('title', '') + " " + step.get('summary', '')).lower()
|
|
377
|
+
if any(keyword in step_text for keyword in form_keywords):
|
|
378
|
+
steps_needing_forms += 1
|
|
379
|
+
|
|
380
|
+
if steps_needing_forms > 0:
|
|
381
|
+
issues.append({
|
|
382
|
+
'category': 'forms',
|
|
383
|
+
'severity': 'low',
|
|
384
|
+
'title': 'Steps May Need Forms',
|
|
385
|
+
'description': f'{steps_needing_forms} steps appear to require information input',
|
|
386
|
+
'recommendation': 'Consider adding forms to steps that require information gathering'
|
|
387
|
+
})
|
|
388
|
+
score += 3
|
|
389
|
+
else:
|
|
390
|
+
score += 5
|
|
391
|
+
|
|
392
|
+
return score, issues
|
|
393
|
+
|
|
394
|
+
def _assess_automation_efficiency(self, org_id: str, template_id: str, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
|
|
395
|
+
"""
|
|
396
|
+
Assess automation rules efficiency.
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
org_id: Organization ID
|
|
400
|
+
template_id: Template ID
|
|
401
|
+
template_data: Template data dictionary
|
|
402
|
+
|
|
403
|
+
Returns:
|
|
404
|
+
Tuple of (score out of 20, list of issues)
|
|
405
|
+
"""
|
|
406
|
+
score = 0
|
|
407
|
+
issues = []
|
|
408
|
+
|
|
409
|
+
try:
|
|
410
|
+
automations = template_data.get('automated_actions', [])
|
|
411
|
+
|
|
412
|
+
if not automations:
|
|
413
|
+
# Template has no automations - this might be fine for simple templates
|
|
414
|
+
steps = template_data.get('steps', [])
|
|
415
|
+
if len(steps) > 5:
|
|
416
|
+
issues.append({
|
|
417
|
+
'category': 'automation',
|
|
418
|
+
'severity': 'low',
|
|
419
|
+
'title': 'No Automation Rules',
|
|
420
|
+
'description': 'Template has multiple steps but no automation rules',
|
|
421
|
+
'recommendation': 'Consider adding automation rules to improve workflow efficiency'
|
|
422
|
+
})
|
|
423
|
+
score += 15
|
|
424
|
+
else:
|
|
425
|
+
score += 18 # Simple templates may not need automation
|
|
426
|
+
return score, issues
|
|
427
|
+
|
|
428
|
+
# Analyze automation complexity and conflicts
|
|
429
|
+
total_automations = len(automations)
|
|
430
|
+
complex_automations = 0
|
|
431
|
+
simple_automations = 0
|
|
432
|
+
|
|
433
|
+
for automation in automations:
|
|
434
|
+
conditions = automation.get('conditions', [])
|
|
435
|
+
actions = automation.get('actions', [])
|
|
436
|
+
|
|
437
|
+
if len(conditions) > 3 or len(actions) > 2:
|
|
438
|
+
complex_automations += 1
|
|
439
|
+
else:
|
|
440
|
+
simple_automations += 1
|
|
441
|
+
|
|
442
|
+
# Score based on automation balance
|
|
443
|
+
if complex_automations <= total_automations * 0.3: # 30% or less complex
|
|
444
|
+
score += 10
|
|
445
|
+
elif complex_automations <= total_automations * 0.5: # 50% or less complex
|
|
446
|
+
score += 7
|
|
447
|
+
else:
|
|
448
|
+
score += 4
|
|
449
|
+
issues.append({
|
|
450
|
+
'category': 'automation',
|
|
451
|
+
'severity': 'medium',
|
|
452
|
+
'title': 'Many Complex Automation Rules',
|
|
453
|
+
'description': f'{complex_automations} out of {total_automations} automation rules are complex',
|
|
454
|
+
'recommendation': 'Consider simplifying complex automation rules for better maintainability'
|
|
455
|
+
})
|
|
456
|
+
|
|
457
|
+
# Check for potential conflicts (simplified check)
|
|
458
|
+
if total_automations > 10:
|
|
459
|
+
issues.append({
|
|
460
|
+
'category': 'automation',
|
|
461
|
+
'severity': 'medium',
|
|
462
|
+
'title': 'High Number of Automation Rules',
|
|
463
|
+
'description': f'Template has {total_automations} automation rules',
|
|
464
|
+
'recommendation': 'Review automation rules for potential consolidation opportunities'
|
|
465
|
+
})
|
|
466
|
+
score += 5
|
|
467
|
+
else:
|
|
468
|
+
score += 10
|
|
469
|
+
|
|
470
|
+
except Exception as e:
|
|
471
|
+
# If automation analysis fails, give neutral score
|
|
472
|
+
self.sdk.logger.warning(f"Failed to analyze automations: {e}")
|
|
473
|
+
score += 10
|
|
474
|
+
|
|
475
|
+
return score, issues
|
|
476
|
+
|
|
477
|
+
def _assess_deadline_reasonableness(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
|
|
478
|
+
"""
|
|
479
|
+
Assess deadline appropriateness.
|
|
480
|
+
|
|
481
|
+
Args:
|
|
482
|
+
template_data: Template data dictionary
|
|
483
|
+
|
|
484
|
+
Returns:
|
|
485
|
+
Tuple of (score out of 15, list of issues)
|
|
486
|
+
"""
|
|
487
|
+
score = 0
|
|
488
|
+
issues = []
|
|
489
|
+
|
|
490
|
+
steps = template_data.get('steps', [])
|
|
491
|
+
if not steps:
|
|
492
|
+
return 0, issues
|
|
493
|
+
|
|
494
|
+
steps_with_deadlines = 0
|
|
495
|
+
reasonable_deadlines = 0
|
|
496
|
+
unreasonable_deadlines = 0
|
|
497
|
+
|
|
498
|
+
for step in steps:
|
|
499
|
+
deadline = step.get('deadline')
|
|
500
|
+
if deadline:
|
|
501
|
+
steps_with_deadlines += 1
|
|
502
|
+
|
|
503
|
+
# Simple deadline reasonableness check
|
|
504
|
+
deadline_value = deadline.get('value', 0)
|
|
505
|
+
deadline_unit = deadline.get('unit', 'days')
|
|
506
|
+
|
|
507
|
+
# Convert to hours for comparison
|
|
508
|
+
if deadline_unit == 'minutes':
|
|
509
|
+
hours = deadline_value / 60
|
|
510
|
+
elif deadline_unit == 'hours':
|
|
511
|
+
hours = deadline_value
|
|
512
|
+
elif deadline_unit == 'days':
|
|
513
|
+
hours = deadline_value * 24
|
|
514
|
+
elif deadline_unit == 'weeks':
|
|
515
|
+
hours = deadline_value * 24 * 7
|
|
516
|
+
else:
|
|
517
|
+
hours = deadline_value * 24 # Assume days
|
|
518
|
+
|
|
519
|
+
# Check reasonableness
|
|
520
|
+
if 0.5 <= hours <= 8760: # 30 minutes to 1 year
|
|
521
|
+
reasonable_deadlines += 1
|
|
522
|
+
else:
|
|
523
|
+
unreasonable_deadlines += 1
|
|
524
|
+
|
|
525
|
+
total_steps = len(steps)
|
|
526
|
+
|
|
527
|
+
# Score based on deadline usage and reasonableness
|
|
528
|
+
if steps_with_deadlines == 0:
|
|
529
|
+
if total_steps > 3:
|
|
530
|
+
issues.append({
|
|
531
|
+
'category': 'deadlines',
|
|
532
|
+
'severity': 'low',
|
|
533
|
+
'title': 'No Step Deadlines Set',
|
|
534
|
+
'description': 'Template has multiple steps but no deadlines',
|
|
535
|
+
'recommendation': 'Consider adding deadlines to time-sensitive steps'
|
|
536
|
+
})
|
|
537
|
+
score += 10
|
|
538
|
+
else:
|
|
539
|
+
score += 13 # Simple templates may not need deadlines
|
|
540
|
+
else:
|
|
541
|
+
# Score based on deadline quality
|
|
542
|
+
if unreasonable_deadlines == 0:
|
|
543
|
+
score += 15
|
|
544
|
+
elif unreasonable_deadlines <= steps_with_deadlines * 0.2: # 20% or less unreasonable
|
|
545
|
+
score += 12
|
|
546
|
+
else:
|
|
547
|
+
score += 8
|
|
548
|
+
issues.append({
|
|
549
|
+
'category': 'deadlines',
|
|
550
|
+
'severity': 'medium',
|
|
551
|
+
'title': 'Some Deadlines May Be Unreasonable',
|
|
552
|
+
'description': f'{unreasonable_deadlines} steps have potentially unreasonable deadlines',
|
|
553
|
+
'recommendation': 'Review step deadlines to ensure they are achievable and appropriate'
|
|
554
|
+
})
|
|
555
|
+
|
|
556
|
+
return score, issues
|
|
557
|
+
|
|
558
|
+
def _assess_workflow_logic(self, template_data: Dict[str, Any]) -> tuple[int, List[Dict[str, Any]]]:
|
|
559
|
+
"""
|
|
560
|
+
Assess overall workflow structure.
|
|
561
|
+
|
|
562
|
+
Args:
|
|
563
|
+
template_data: Template data dictionary
|
|
564
|
+
|
|
565
|
+
Returns:
|
|
566
|
+
Tuple of (score out of 15, list of issues)
|
|
567
|
+
"""
|
|
568
|
+
score = 0
|
|
569
|
+
issues = []
|
|
570
|
+
|
|
571
|
+
steps = template_data.get('steps', [])
|
|
572
|
+
if not steps:
|
|
573
|
+
issues.append({
|
|
574
|
+
'category': 'workflow',
|
|
575
|
+
'severity': 'critical',
|
|
576
|
+
'title': 'No Workflow Steps',
|
|
577
|
+
'description': 'Template must have workflow steps',
|
|
578
|
+
'recommendation': 'Add steps to create a meaningful workflow'
|
|
579
|
+
})
|
|
580
|
+
return 0, issues
|
|
581
|
+
|
|
582
|
+
step_count = len(steps)
|
|
583
|
+
|
|
584
|
+
# Check workflow length appropriateness (5 points)
|
|
585
|
+
if 2 <= step_count <= 20:
|
|
586
|
+
score += 5
|
|
587
|
+
elif 1 <= step_count <= 30:
|
|
588
|
+
score += 4
|
|
589
|
+
elif step_count > 30:
|
|
590
|
+
issues.append({
|
|
591
|
+
'category': 'workflow',
|
|
592
|
+
'severity': 'medium',
|
|
593
|
+
'title': 'Very Long Workflow',
|
|
594
|
+
'description': f'Template has {step_count} steps, which may be difficult to manage',
|
|
595
|
+
'recommendation': 'Consider breaking down into smaller sub-workflows or templates'
|
|
596
|
+
})
|
|
597
|
+
score += 2
|
|
598
|
+
else:
|
|
599
|
+
score += 3
|
|
600
|
+
|
|
601
|
+
# Check step positioning and logic (5 points)
|
|
602
|
+
positions = [step.get('position', 0) for step in steps]
|
|
603
|
+
unique_positions = len(set(positions))
|
|
604
|
+
|
|
605
|
+
if unique_positions == step_count and min(positions) > 0:
|
|
606
|
+
score += 5 # All steps have unique, valid positions
|
|
607
|
+
elif unique_positions >= step_count * 0.8: # 80% have unique positions
|
|
608
|
+
score += 4
|
|
609
|
+
else:
|
|
610
|
+
issues.append({
|
|
611
|
+
'category': 'workflow',
|
|
612
|
+
'severity': 'low',
|
|
613
|
+
'title': 'Inconsistent Step Positioning',
|
|
614
|
+
'description': 'Some steps may have duplicate or missing positions',
|
|
615
|
+
'recommendation': 'Review step order and ensure logical sequence'
|
|
616
|
+
})
|
|
617
|
+
score += 2
|
|
618
|
+
|
|
619
|
+
# Check for logical flow (5 points)
|
|
620
|
+
# This is a simplified check - in practice, you'd analyze dependencies
|
|
621
|
+
first_steps = [step for step in steps if step.get('position', 0) <= 2]
|
|
622
|
+
last_steps = [step for step in steps if step.get('position', 0) >= step_count - 1]
|
|
623
|
+
|
|
624
|
+
if first_steps and last_steps:
|
|
625
|
+
# Check if first steps are setup-like and last steps are completion-like
|
|
626
|
+
first_step_content = " ".join([step.get('title', '').lower() for step in first_steps])
|
|
627
|
+
last_step_content = " ".join([step.get('title', '').lower() for step in last_steps])
|
|
628
|
+
|
|
629
|
+
setup_keywords = ['start', 'begin', 'initialize', 'setup', 'create', 'prepare']
|
|
630
|
+
completion_keywords = ['complete', 'finish', 'finalize', 'close', 'deliver', 'submit']
|
|
631
|
+
|
|
632
|
+
has_logical_start = any(keyword in first_step_content for keyword in setup_keywords)
|
|
633
|
+
has_logical_end = any(keyword in last_step_content for keyword in completion_keywords)
|
|
634
|
+
|
|
635
|
+
if has_logical_start and has_logical_end:
|
|
636
|
+
score += 5
|
|
637
|
+
elif has_logical_start or has_logical_end:
|
|
638
|
+
score += 3
|
|
639
|
+
else:
|
|
640
|
+
score += 2
|
|
641
|
+
else:
|
|
642
|
+
score += 3
|
|
643
|
+
|
|
644
|
+
return score, issues
|
|
645
|
+
|
|
646
|
+
def _generate_improvement_plan(self, issues: List[Dict[str, Any]], category_scores: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
647
|
+
"""
|
|
648
|
+
Generate prioritized improvement plan.
|
|
649
|
+
|
|
650
|
+
Args:
|
|
651
|
+
issues: List of all identified issues
|
|
652
|
+
category_scores: Dictionary of category scores
|
|
653
|
+
|
|
654
|
+
Returns:
|
|
655
|
+
List of improvement plan items
|
|
656
|
+
"""
|
|
657
|
+
improvement_plan = []
|
|
658
|
+
|
|
659
|
+
# Sort issues by severity
|
|
660
|
+
critical_issues = [issue for issue in issues if issue.get('severity') == 'critical']
|
|
661
|
+
high_issues = [issue for issue in issues if issue.get('severity') == 'high']
|
|
662
|
+
medium_issues = [issue for issue in issues if issue.get('severity') == 'medium']
|
|
663
|
+
low_issues = [issue for issue in issues if issue.get('severity') == 'low']
|
|
664
|
+
|
|
665
|
+
# Create improvement items for critical issues
|
|
666
|
+
if critical_issues:
|
|
667
|
+
improvement_plan.append({
|
|
668
|
+
'priority': 1,
|
|
669
|
+
'phase': 'Immediate',
|
|
670
|
+
'title': 'Fix Critical Issues',
|
|
671
|
+
'description': f'Address {len(critical_issues)} critical issues that prevent template from functioning properly',
|
|
672
|
+
'effort': 'High',
|
|
673
|
+
'impact': 'Critical',
|
|
674
|
+
'estimated_time': '2-4 hours',
|
|
675
|
+
'issues_addressed': [issue['title'] for issue in critical_issues]
|
|
676
|
+
})
|
|
677
|
+
|
|
678
|
+
# Group issues by category for focused improvements
|
|
679
|
+
category_issue_map = {}
|
|
680
|
+
for issue in high_issues + medium_issues:
|
|
681
|
+
category = issue.get('category', 'general')
|
|
682
|
+
if category not in category_issue_map:
|
|
683
|
+
category_issue_map[category] = []
|
|
684
|
+
category_issue_map[category].append(issue)
|
|
685
|
+
|
|
686
|
+
# Find lowest scoring categories for prioritization
|
|
687
|
+
category_priorities = []
|
|
688
|
+
for category, score_info in category_scores.items():
|
|
689
|
+
score_percentage = (score_info['score'] / score_info['max_score']) * 100
|
|
690
|
+
if score_percentage < 70: # Focus on categories scoring below 70%
|
|
691
|
+
issue_count = len(category_issue_map.get(category.split('_')[0], []))
|
|
692
|
+
category_priorities.append({
|
|
693
|
+
'category': category,
|
|
694
|
+
'score_percentage': score_percentage,
|
|
695
|
+
'issue_count': issue_count
|
|
696
|
+
})
|
|
697
|
+
|
|
698
|
+
# Sort by score percentage (lowest first)
|
|
699
|
+
category_priorities.sort(key=lambda x: x['score_percentage'])
|
|
700
|
+
|
|
701
|
+
# Create improvement items for priority categories
|
|
702
|
+
phase_counter = 2
|
|
703
|
+
for cat_info in category_priorities[:3]: # Top 3 categories
|
|
704
|
+
category_name = cat_info['category']
|
|
705
|
+
category_issues = category_issue_map.get(category_name.split('_')[0], [])
|
|
706
|
+
|
|
707
|
+
if category_issues:
|
|
708
|
+
improvement_plan.append({
|
|
709
|
+
'priority': phase_counter,
|
|
710
|
+
'phase': f'Phase {phase_counter - 1}',
|
|
711
|
+
'title': f'Improve {category_name.replace("_", " ").title()}',
|
|
712
|
+
'description': f'Address {len(category_issues)} issues in {category_name.replace("_", " ")} (currently {cat_info["score_percentage"]:.0f}%)',
|
|
713
|
+
'effort': 'Medium' if len(category_issues) <= 3 else 'High',
|
|
714
|
+
'impact': 'High' if cat_info['score_percentage'] < 50 else 'Medium',
|
|
715
|
+
'estimated_time': f'{len(category_issues) * 30}-{len(category_issues) * 60} minutes',
|
|
716
|
+
'issues_addressed': [issue['title'] for issue in category_issues]
|
|
717
|
+
})
|
|
718
|
+
phase_counter += 1
|
|
719
|
+
|
|
720
|
+
# Add low priority improvements
|
|
721
|
+
if low_issues:
|
|
722
|
+
improvement_plan.append({
|
|
723
|
+
'priority': phase_counter,
|
|
724
|
+
'phase': 'Polish',
|
|
725
|
+
'title': 'Address Minor Improvements',
|
|
726
|
+
'description': f'Handle {len(low_issues)} minor issues for template optimization',
|
|
727
|
+
'effort': 'Low',
|
|
728
|
+
'impact': 'Low',
|
|
729
|
+
'estimated_time': f'{len(low_issues) * 15}-{len(low_issues) * 30} minutes',
|
|
730
|
+
'issues_addressed': [issue['title'] for issue in low_issues]
|
|
731
|
+
})
|
|
732
|
+
|
|
733
|
+
return improvement_plan
|
|
734
|
+
|
|
735
|
+
def _get_health_rating(self, score: int) -> str:
|
|
736
|
+
"""
|
|
737
|
+
Convert numeric score to health rating.
|
|
738
|
+
|
|
739
|
+
Args:
|
|
740
|
+
score: Numeric score out of 100
|
|
741
|
+
|
|
742
|
+
Returns:
|
|
743
|
+
Health rating as string
|
|
744
|
+
"""
|
|
745
|
+
if score >= 90:
|
|
746
|
+
return 'excellent'
|
|
747
|
+
elif score >= 75:
|
|
748
|
+
return 'good'
|
|
749
|
+
elif score >= 60:
|
|
750
|
+
return 'fair'
|
|
751
|
+
elif score >= 40:
|
|
752
|
+
return 'poor'
|
|
753
|
+
else:
|
|
754
|
+
return 'critical'
|
|
755
|
+
|
|
756
|
+
def _get_current_timestamp(self) -> str:
|
|
757
|
+
"""
|
|
758
|
+
Get current timestamp for assessment.
|
|
759
|
+
|
|
760
|
+
Returns:
|
|
761
|
+
ISO formatted timestamp string
|
|
762
|
+
"""
|
|
763
|
+
return datetime.datetime.now().isoformat()
|