gitflow-analytics 1.0.0__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitflow_analytics/__init__.py +11 -9
- gitflow_analytics/_version.py +2 -2
- gitflow_analytics/cli.py +691 -243
- gitflow_analytics/cli_rich.py +353 -0
- gitflow_analytics/config.py +389 -96
- gitflow_analytics/core/analyzer.py +175 -78
- gitflow_analytics/core/branch_mapper.py +132 -132
- gitflow_analytics/core/cache.py +242 -173
- gitflow_analytics/core/identity.py +214 -178
- gitflow_analytics/extractors/base.py +13 -11
- gitflow_analytics/extractors/story_points.py +70 -59
- gitflow_analytics/extractors/tickets.py +111 -88
- gitflow_analytics/integrations/github_integration.py +91 -77
- gitflow_analytics/integrations/jira_integration.py +284 -0
- gitflow_analytics/integrations/orchestrator.py +99 -72
- gitflow_analytics/metrics/dora.py +183 -179
- gitflow_analytics/models/database.py +191 -54
- gitflow_analytics/qualitative/__init__.py +30 -0
- gitflow_analytics/qualitative/classifiers/__init__.py +13 -0
- gitflow_analytics/qualitative/classifiers/change_type.py +468 -0
- gitflow_analytics/qualitative/classifiers/domain_classifier.py +399 -0
- gitflow_analytics/qualitative/classifiers/intent_analyzer.py +436 -0
- gitflow_analytics/qualitative/classifiers/risk_analyzer.py +412 -0
- gitflow_analytics/qualitative/core/__init__.py +13 -0
- gitflow_analytics/qualitative/core/llm_fallback.py +653 -0
- gitflow_analytics/qualitative/core/nlp_engine.py +373 -0
- gitflow_analytics/qualitative/core/pattern_cache.py +457 -0
- gitflow_analytics/qualitative/core/processor.py +540 -0
- gitflow_analytics/qualitative/models/__init__.py +25 -0
- gitflow_analytics/qualitative/models/schemas.py +272 -0
- gitflow_analytics/qualitative/utils/__init__.py +13 -0
- gitflow_analytics/qualitative/utils/batch_processor.py +326 -0
- gitflow_analytics/qualitative/utils/cost_tracker.py +343 -0
- gitflow_analytics/qualitative/utils/metrics.py +347 -0
- gitflow_analytics/qualitative/utils/text_processing.py +243 -0
- gitflow_analytics/reports/analytics_writer.py +25 -8
- gitflow_analytics/reports/csv_writer.py +60 -32
- gitflow_analytics/reports/narrative_writer.py +21 -15
- gitflow_analytics/tui/__init__.py +5 -0
- gitflow_analytics/tui/app.py +721 -0
- gitflow_analytics/tui/screens/__init__.py +8 -0
- gitflow_analytics/tui/screens/analysis_progress_screen.py +487 -0
- gitflow_analytics/tui/screens/configuration_screen.py +547 -0
- gitflow_analytics/tui/screens/loading_screen.py +358 -0
- gitflow_analytics/tui/screens/main_screen.py +304 -0
- gitflow_analytics/tui/screens/results_screen.py +698 -0
- gitflow_analytics/tui/widgets/__init__.py +7 -0
- gitflow_analytics/tui/widgets/data_table.py +257 -0
- gitflow_analytics/tui/widgets/export_modal.py +301 -0
- gitflow_analytics/tui/widgets/progress_widget.py +192 -0
- gitflow_analytics-1.0.3.dist-info/METADATA +490 -0
- gitflow_analytics-1.0.3.dist-info/RECORD +62 -0
- gitflow_analytics-1.0.0.dist-info/METADATA +0 -201
- gitflow_analytics-1.0.0.dist-info/RECORD +0 -30
- {gitflow_analytics-1.0.0.dist-info → gitflow_analytics-1.0.3.dist-info}/WHEEL +0 -0
- {gitflow_analytics-1.0.0.dist-info → gitflow_analytics-1.0.3.dist-info}/entry_points.txt +0 -0
- {gitflow_analytics-1.0.0.dist-info → gitflow_analytics-1.0.3.dist-info}/licenses/LICENSE +0 -0
- {gitflow_analytics-1.0.0.dist-info → gitflow_analytics-1.0.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,436 @@
|
|
|
1
|
+
"""Intent analyzer for extracting developer intent and urgency from commits."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import re
|
|
5
|
+
from typing import Dict, List, Any, Set
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
|
|
8
|
+
from ..models.schemas import IntentConfig
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import spacy
|
|
12
|
+
from spacy.tokens import Doc
|
|
13
|
+
SPACY_AVAILABLE = True
|
|
14
|
+
except ImportError:
|
|
15
|
+
SPACY_AVAILABLE = False
|
|
16
|
+
Doc = Any
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class IntentAnalyzer:
|
|
20
|
+
"""Analyze commit messages to extract developer intent and urgency signals.
|
|
21
|
+
|
|
22
|
+
This analyzer identifies:
|
|
23
|
+
- Urgency level (critical, important, routine)
|
|
24
|
+
- Intent confidence (how clear the intent is)
|
|
25
|
+
- Emotional tone (frustrated, confident, uncertain)
|
|
26
|
+
- Planning signals (TODO, FIXME, temporary fixes)
|
|
27
|
+
- Collaboration signals (pair programming, code review)
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, config: IntentConfig):
|
|
31
|
+
"""Initialize intent analyzer.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
config: Configuration for intent analysis
|
|
35
|
+
"""
|
|
36
|
+
self.config = config
|
|
37
|
+
self.logger = logging.getLogger(__name__)
|
|
38
|
+
|
|
39
|
+
# Urgency keyword patterns from config
|
|
40
|
+
self.urgency_keywords = config.urgency_keywords
|
|
41
|
+
|
|
42
|
+
# Confidence indicators
|
|
43
|
+
self.confidence_indicators = {
|
|
44
|
+
'high_confidence': {
|
|
45
|
+
'definitely', 'clearly', 'obviously', 'certainly', 'confirmed',
|
|
46
|
+
'verified', 'tested', 'working', 'complete', 'finished',
|
|
47
|
+
'implement', 'solution', 'resolve'
|
|
48
|
+
},
|
|
49
|
+
'low_confidence': {
|
|
50
|
+
'maybe', 'perhaps', 'possibly', 'might', 'could', 'should',
|
|
51
|
+
'try', 'attempt', 'experiment', 'test', 'temporary', 'quick',
|
|
52
|
+
'hack', 'workaround', 'temp'
|
|
53
|
+
},
|
|
54
|
+
'uncertain': {
|
|
55
|
+
'not sure', 'unclear', 'confusing', 'weird', 'strange',
|
|
56
|
+
'unexpected', 'unsure', 'investigation', 'debug', 'investigate'
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
# Emotional tone indicators
|
|
61
|
+
self.tone_indicators = {
|
|
62
|
+
'frustrated': {
|
|
63
|
+
'annoying', 'frustrating', 'stupid', 'broken', 'terrible',
|
|
64
|
+
'awful', 'hate', 'annoyed', 'ugh', 'argh', 'damn', 'wtf'
|
|
65
|
+
},
|
|
66
|
+
'confident': {
|
|
67
|
+
'great', 'excellent', 'perfect', 'awesome', 'clean', 'elegant',
|
|
68
|
+
'nice', 'good', 'better', 'improved', 'optimized'
|
|
69
|
+
},
|
|
70
|
+
'cautious': {
|
|
71
|
+
'careful', 'cautious', 'gentle', 'safe', 'conservative',
|
|
72
|
+
'minimal', 'small', 'incremental', 'gradual'
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
# Planning and TODO indicators
|
|
77
|
+
self.planning_indicators = {
|
|
78
|
+
'todo': {
|
|
79
|
+
'todo', 'fixme', 'hack', 'temporary', 'temp', 'later',
|
|
80
|
+
'placeholder', 'stub', 'incomplete', 'wip'
|
|
81
|
+
},
|
|
82
|
+
'future_work': {
|
|
83
|
+
'future', 'later', 'eventually', 'someday', 'next',
|
|
84
|
+
'upcoming', 'planned', 'roadmap'
|
|
85
|
+
},
|
|
86
|
+
'immediate': {
|
|
87
|
+
'now', 'immediate', 'urgent', 'asap', 'quickly', 'fast',
|
|
88
|
+
'emergency', 'critical', 'hotfix'
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
# Collaboration indicators
|
|
93
|
+
self.collaboration_indicators = {
|
|
94
|
+
'pair_programming': {
|
|
95
|
+
'pair', 'pairing', 'together', 'with', 'collaborative',
|
|
96
|
+
'co-authored', 'mob', 'mobbing'
|
|
97
|
+
},
|
|
98
|
+
'code_review': {
|
|
99
|
+
'review', 'feedback', 'suggestion', 'requested', 'comment',
|
|
100
|
+
'pr', 'pull request', 'merge request'
|
|
101
|
+
},
|
|
102
|
+
'help_seeking': {
|
|
103
|
+
'help', 'assistance', 'advice', 'guidance', 'input',
|
|
104
|
+
'thoughts', 'opinions', 'feedback'
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
# Technical complexity indicators
|
|
109
|
+
self.complexity_indicators = {
|
|
110
|
+
'simple': {
|
|
111
|
+
'simple', 'easy', 'quick', 'minor', 'small', 'tiny',
|
|
112
|
+
'straightforward', 'basic'
|
|
113
|
+
},
|
|
114
|
+
'complex': {
|
|
115
|
+
'complex', 'complicated', 'difficult', 'challenging',
|
|
116
|
+
'major', 'significant', 'substantial', 'extensive'
|
|
117
|
+
},
|
|
118
|
+
'refactoring': {
|
|
119
|
+
'refactor', 'restructure', 'reorganize', 'cleanup',
|
|
120
|
+
'simplify', 'optimize', 'improve'
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
def analyze(self, message: str, doc: Doc) -> Dict[str, Any]:
|
|
125
|
+
"""Analyze commit message for intent signals.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
message: Commit message
|
|
129
|
+
doc: spaCy processed document (may be None)
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
Dictionary with intent analysis results
|
|
133
|
+
"""
|
|
134
|
+
if not message:
|
|
135
|
+
return {
|
|
136
|
+
'urgency': 'routine',
|
|
137
|
+
'confidence': 0.0,
|
|
138
|
+
'tone': 'neutral',
|
|
139
|
+
'planning_stage': 'implementation',
|
|
140
|
+
'collaboration_signals': [],
|
|
141
|
+
'complexity': 'moderate',
|
|
142
|
+
'signals': []
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
message_lower = message.lower()
|
|
146
|
+
|
|
147
|
+
# Extract all signals
|
|
148
|
+
urgency = self._analyze_urgency(message_lower)
|
|
149
|
+
confidence_info = self._analyze_confidence(message_lower, doc)
|
|
150
|
+
tone = self._analyze_tone(message_lower)
|
|
151
|
+
planning = self._analyze_planning_stage(message_lower)
|
|
152
|
+
collaboration = self._analyze_collaboration(message_lower)
|
|
153
|
+
complexity = self._analyze_complexity(message_lower)
|
|
154
|
+
|
|
155
|
+
# Collect all detected signals
|
|
156
|
+
all_signals = []
|
|
157
|
+
all_signals.extend(urgency.get('signals', []))
|
|
158
|
+
all_signals.extend(confidence_info.get('signals', []))
|
|
159
|
+
all_signals.extend(tone.get('signals', []))
|
|
160
|
+
all_signals.extend(planning.get('signals', []))
|
|
161
|
+
all_signals.extend(collaboration.get('signals', []))
|
|
162
|
+
all_signals.extend(complexity.get('signals', []))
|
|
163
|
+
|
|
164
|
+
return {
|
|
165
|
+
'urgency': urgency['level'],
|
|
166
|
+
'confidence': confidence_info['score'],
|
|
167
|
+
'tone': tone['dominant_tone'],
|
|
168
|
+
'planning_stage': planning['stage'],
|
|
169
|
+
'collaboration_signals': collaboration['types'],
|
|
170
|
+
'complexity': complexity['level'],
|
|
171
|
+
'signals': all_signals,
|
|
172
|
+
'detailed_analysis': {
|
|
173
|
+
'urgency_breakdown': urgency,
|
|
174
|
+
'confidence_breakdown': confidence_info,
|
|
175
|
+
'tone_breakdown': tone,
|
|
176
|
+
'planning_breakdown': planning,
|
|
177
|
+
'collaboration_breakdown': collaboration,
|
|
178
|
+
'complexity_breakdown': complexity
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
def _analyze_urgency(self, message: str) -> Dict[str, Any]:
|
|
183
|
+
"""Analyze urgency level from message content.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
message: Lowercase commit message
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
Dictionary with urgency analysis
|
|
190
|
+
"""
|
|
191
|
+
signals = []
|
|
192
|
+
urgency_scores = defaultdict(float)
|
|
193
|
+
|
|
194
|
+
# Check configured urgency keywords
|
|
195
|
+
for urgency_level, keywords in self.urgency_keywords.items():
|
|
196
|
+
for keyword in keywords:
|
|
197
|
+
if keyword.lower() in message:
|
|
198
|
+
signals.append(f"urgency:{urgency_level}:{keyword}")
|
|
199
|
+
urgency_scores[urgency_level] += 1.0
|
|
200
|
+
|
|
201
|
+
# Additional urgency patterns
|
|
202
|
+
urgent_patterns = [
|
|
203
|
+
(r'\b(urgent|critical|emergency|asap|immediate)\b', 'critical', 2.0),
|
|
204
|
+
(r'\b(important|priority|needed|required)\b', 'important', 1.5),
|
|
205
|
+
(r'\b(hotfix|quickfix|patch)\b', 'critical', 2.0),
|
|
206
|
+
(r'\b(breaking|major)\b', 'important', 1.5),
|
|
207
|
+
(r'\b(minor|small|tiny)\b', 'routine', 0.5),
|
|
208
|
+
]
|
|
209
|
+
|
|
210
|
+
for pattern, level, weight in urgent_patterns:
|
|
211
|
+
if re.search(pattern, message):
|
|
212
|
+
signals.append(f"urgency_pattern:{level}:{pattern}")
|
|
213
|
+
urgency_scores[level] += weight
|
|
214
|
+
|
|
215
|
+
# Determine dominant urgency level
|
|
216
|
+
if urgency_scores:
|
|
217
|
+
dominant_urgency = max(urgency_scores.keys(), key=lambda k: urgency_scores[k])
|
|
218
|
+
else:
|
|
219
|
+
dominant_urgency = 'routine'
|
|
220
|
+
|
|
221
|
+
return {
|
|
222
|
+
'level': dominant_urgency,
|
|
223
|
+
'scores': dict(urgency_scores),
|
|
224
|
+
'signals': signals
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
def _analyze_confidence(self, message: str, doc: Doc) -> Dict[str, Any]:
|
|
228
|
+
"""Analyze confidence level in the commit.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
message: Lowercase commit message
|
|
232
|
+
doc: spaCy processed document
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Dictionary with confidence analysis
|
|
236
|
+
"""
|
|
237
|
+
signals = []
|
|
238
|
+
confidence_score = 0.5 # Start with neutral confidence
|
|
239
|
+
|
|
240
|
+
# Check confidence indicators
|
|
241
|
+
for confidence_type, keywords in self.confidence_indicators.items():
|
|
242
|
+
matches = sum(1 for keyword in keywords if keyword in message)
|
|
243
|
+
if matches > 0:
|
|
244
|
+
signals.append(f"confidence:{confidence_type}:{matches}")
|
|
245
|
+
|
|
246
|
+
if confidence_type == 'high_confidence':
|
|
247
|
+
confidence_score += matches * 0.2
|
|
248
|
+
elif confidence_type == 'low_confidence':
|
|
249
|
+
confidence_score -= matches * 0.15
|
|
250
|
+
elif confidence_type == 'uncertain':
|
|
251
|
+
confidence_score -= matches * 0.25
|
|
252
|
+
|
|
253
|
+
# Check message structure and completeness
|
|
254
|
+
if len(message.split()) >= 5: # Detailed message
|
|
255
|
+
confidence_score += 0.1
|
|
256
|
+
signals.append("confidence:detailed_message")
|
|
257
|
+
elif len(message.split()) <= 2: # Very brief message
|
|
258
|
+
confidence_score -= 0.1
|
|
259
|
+
signals.append("confidence:brief_message")
|
|
260
|
+
|
|
261
|
+
# Check for question marks (uncertainty)
|
|
262
|
+
if '?' in message:
|
|
263
|
+
confidence_score -= 0.2
|
|
264
|
+
signals.append("confidence:contains_question")
|
|
265
|
+
|
|
266
|
+
# Check for ellipsis or incomplete thoughts
|
|
267
|
+
if '...' in message or message.endswith('.'):
|
|
268
|
+
confidence_score -= 0.1
|
|
269
|
+
signals.append("confidence:incomplete_thought")
|
|
270
|
+
|
|
271
|
+
# Normalize confidence score
|
|
272
|
+
confidence_score = max(0.0, min(1.0, confidence_score))
|
|
273
|
+
|
|
274
|
+
return {
|
|
275
|
+
'score': confidence_score,
|
|
276
|
+
'level': 'high' if confidence_score > 0.7 else 'medium' if confidence_score > 0.4 else 'low',
|
|
277
|
+
'signals': signals
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
def _analyze_tone(self, message: str) -> Dict[str, Any]:
|
|
281
|
+
"""Analyze emotional tone of the commit message.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
message: Lowercase commit message
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
Dictionary with tone analysis
|
|
288
|
+
"""
|
|
289
|
+
signals = []
|
|
290
|
+
tone_scores = defaultdict(float)
|
|
291
|
+
|
|
292
|
+
# Check tone indicators
|
|
293
|
+
for tone_type, keywords in self.tone_indicators.items():
|
|
294
|
+
matches = sum(1 for keyword in keywords if keyword in message)
|
|
295
|
+
if matches > 0:
|
|
296
|
+
signals.append(f"tone:{tone_type}:{matches}")
|
|
297
|
+
tone_scores[tone_type] += matches
|
|
298
|
+
|
|
299
|
+
# Check punctuation for tone
|
|
300
|
+
if '!' in message:
|
|
301
|
+
tone_scores['confident'] += 0.5
|
|
302
|
+
signals.append("tone:exclamation_mark")
|
|
303
|
+
elif '...' in message:
|
|
304
|
+
tone_scores['cautious'] += 0.5
|
|
305
|
+
signals.append("tone:ellipsis")
|
|
306
|
+
|
|
307
|
+
# Determine dominant tone
|
|
308
|
+
if tone_scores:
|
|
309
|
+
dominant_tone = max(tone_scores.keys(), key=lambda k: tone_scores[k])
|
|
310
|
+
else:
|
|
311
|
+
dominant_tone = 'neutral'
|
|
312
|
+
|
|
313
|
+
return {
|
|
314
|
+
'dominant_tone': dominant_tone,
|
|
315
|
+
'scores': dict(tone_scores),
|
|
316
|
+
'signals': signals
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
def _analyze_planning_stage(self, message: str) -> Dict[str, Any]:
|
|
320
|
+
"""Analyze what stage of planning/development this commit represents.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
message: Lowercase commit message
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
Dictionary with planning stage analysis
|
|
327
|
+
"""
|
|
328
|
+
signals = []
|
|
329
|
+
stage_scores = defaultdict(float)
|
|
330
|
+
|
|
331
|
+
# Check planning indicators
|
|
332
|
+
for stage_type, keywords in self.planning_indicators.items():
|
|
333
|
+
matches = sum(1 for keyword in keywords if keyword in message)
|
|
334
|
+
if matches > 0:
|
|
335
|
+
signals.append(f"planning:{stage_type}:{matches}")
|
|
336
|
+
stage_scores[stage_type] += matches
|
|
337
|
+
|
|
338
|
+
# Additional stage indicators
|
|
339
|
+
if any(word in message for word in ['start', 'initial', 'begin', 'setup']):
|
|
340
|
+
stage_scores['initial'] = stage_scores.get('initial', 0) + 1
|
|
341
|
+
signals.append("planning:initial_stage")
|
|
342
|
+
|
|
343
|
+
if any(word in message for word in ['complete', 'finish', 'done', 'final']):
|
|
344
|
+
stage_scores['completion'] = stage_scores.get('completion', 0) + 1
|
|
345
|
+
signals.append("planning:completion_stage")
|
|
346
|
+
|
|
347
|
+
# Determine stage
|
|
348
|
+
if stage_scores:
|
|
349
|
+
if 'immediate' in stage_scores:
|
|
350
|
+
stage = 'immediate'
|
|
351
|
+
elif 'todo' in stage_scores:
|
|
352
|
+
stage = 'planning'
|
|
353
|
+
elif 'future_work' in stage_scores:
|
|
354
|
+
stage = 'future_planning'
|
|
355
|
+
elif 'completion' in stage_scores:
|
|
356
|
+
stage = 'completion'
|
|
357
|
+
elif 'initial' in stage_scores:
|
|
358
|
+
stage = 'initiation'
|
|
359
|
+
else:
|
|
360
|
+
stage = 'implementation'
|
|
361
|
+
else:
|
|
362
|
+
stage = 'implementation'
|
|
363
|
+
|
|
364
|
+
return {
|
|
365
|
+
'stage': stage,
|
|
366
|
+
'scores': dict(stage_scores),
|
|
367
|
+
'signals': signals
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
def _analyze_collaboration(self, message: str) -> Dict[str, Any]:
|
|
371
|
+
"""Analyze collaboration signals in the commit message.
|
|
372
|
+
|
|
373
|
+
Args:
|
|
374
|
+
message: Lowercase commit message
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
Dictionary with collaboration analysis
|
|
378
|
+
"""
|
|
379
|
+
signals = []
|
|
380
|
+
collaboration_types = []
|
|
381
|
+
|
|
382
|
+
# Check collaboration indicators
|
|
383
|
+
for collab_type, keywords in self.collaboration_indicators.items():
|
|
384
|
+
matches = [keyword for keyword in keywords if keyword in message]
|
|
385
|
+
if matches:
|
|
386
|
+
signals.extend([f"collaboration:{collab_type}:{match}" for match in matches])
|
|
387
|
+
collaboration_types.append(collab_type)
|
|
388
|
+
|
|
389
|
+
# Check for co-author patterns
|
|
390
|
+
if 'co-authored-by:' in message or 'with @' in message:
|
|
391
|
+
collaboration_types.append('co_authored')
|
|
392
|
+
signals.append("collaboration:co_authored")
|
|
393
|
+
|
|
394
|
+
return {
|
|
395
|
+
'types': collaboration_types,
|
|
396
|
+
'signals': signals,
|
|
397
|
+
'is_collaborative': len(collaboration_types) > 0
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
def _analyze_complexity(self, message: str) -> Dict[str, Any]:
|
|
401
|
+
"""Analyze technical complexity signals in the commit message.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
message: Lowercase commit message
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
Dictionary with complexity analysis
|
|
408
|
+
"""
|
|
409
|
+
signals = []
|
|
410
|
+
complexity_scores = defaultdict(float)
|
|
411
|
+
|
|
412
|
+
# Check complexity indicators
|
|
413
|
+
for complexity_type, keywords in self.complexity_indicators.items():
|
|
414
|
+
matches = sum(1 for keyword in keywords if keyword in message)
|
|
415
|
+
if matches > 0:
|
|
416
|
+
signals.append(f"complexity:{complexity_type}:{matches}")
|
|
417
|
+
complexity_scores[complexity_type] += matches
|
|
418
|
+
|
|
419
|
+
# Determine complexity level
|
|
420
|
+
if complexity_scores:
|
|
421
|
+
if complexity_scores.get('complex', 0) > complexity_scores.get('simple', 0):
|
|
422
|
+
level = 'complex'
|
|
423
|
+
elif complexity_scores.get('simple', 0) > 0:
|
|
424
|
+
level = 'simple'
|
|
425
|
+
elif complexity_scores.get('refactoring', 0) > 0:
|
|
426
|
+
level = 'moderate' # Refactoring is usually moderate complexity
|
|
427
|
+
else:
|
|
428
|
+
level = 'moderate'
|
|
429
|
+
else:
|
|
430
|
+
level = 'moderate'
|
|
431
|
+
|
|
432
|
+
return {
|
|
433
|
+
'level': level,
|
|
434
|
+
'scores': dict(complexity_scores),
|
|
435
|
+
'signals': signals
|
|
436
|
+
}
|