juno-code 1.0.38 → 1.0.40
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +93 -0
- package/dist/bin/cli.js +54 -9
- package/dist/bin/cli.js.map +1 -1
- package/dist/bin/cli.mjs +54 -9
- package/dist/bin/cli.mjs.map +1 -1
- package/dist/index.js +2 -2
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +2 -2
- package/dist/index.mjs.map +1 -1
- package/dist/templates/scripts/__pycache__/github.cpython-38.pyc +0 -0
- package/dist/templates/scripts/github.py +2383 -0
- package/dist/templates/scripts/install_requirements.sh +270 -1
- package/dist/templates/scripts/kanban.sh +3 -1
- package/dist/templates/scripts/run_until_completion.sh +44 -4
- package/dist/templates/scripts/slack_respond.py +2 -2
- package/package.json +1 -1
|
@@ -0,0 +1,2383 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
GitHub Integration for juno-code - Bidirectional workflow between GitHub Issues and Kanban.
|
|
4
|
+
|
|
5
|
+
This script provides a unified interface for syncing GitHub Issues with the juno-code
|
|
6
|
+
kanban system. It uses tag-based identification (tag_id) for reliable task-to-issue mapping.
|
|
7
|
+
|
|
8
|
+
Features:
|
|
9
|
+
- Fetch GitHub issues and create kanban tasks with automatic tagging
|
|
10
|
+
- Respond to issues by posting comments when kanban tasks are completed
|
|
11
|
+
- Bidirectional sync (fetch + respond) with optional continuous monitoring
|
|
12
|
+
- Persistent state tracking (NDJSON-based) to prevent duplicate processing
|
|
13
|
+
- Tag-based identification using tag_id for O(1) lookups (no fuzzy matching)
|
|
14
|
+
- Environment-based configuration with secure token management
|
|
15
|
+
|
|
16
|
+
Usage:
|
|
17
|
+
python github.py fetch --repo owner/repo
|
|
18
|
+
python github.py respond --tag github-input
|
|
19
|
+
python github.py sync --repo owner/repo --once
|
|
20
|
+
|
|
21
|
+
Version: 1.0.0
|
|
22
|
+
Package: juno-code@1.x.x
|
|
23
|
+
Auto-installed by: ScriptInstaller
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import argparse
|
|
27
|
+
import json
|
|
28
|
+
import logging
|
|
29
|
+
import os
|
|
30
|
+
import re
|
|
31
|
+
import signal
|
|
32
|
+
import subprocess
|
|
33
|
+
import sys
|
|
34
|
+
import time
|
|
35
|
+
from datetime import datetime, timezone
|
|
36
|
+
from pathlib import Path
|
|
37
|
+
from typing import Dict, List, Optional, Any, Tuple
|
|
38
|
+
|
|
39
|
+
__version__ = "1.0.0"
|
|
40
|
+
|
|
41
|
+
# Try importing required dependencies
|
|
42
|
+
try:
|
|
43
|
+
import requests
|
|
44
|
+
from dotenv import load_dotenv
|
|
45
|
+
except ImportError as e:
|
|
46
|
+
print(f"Error: Missing required dependencies: {e}")
|
|
47
|
+
print("Please run: pip install requests python-dotenv")
|
|
48
|
+
sys.exit(1)
|
|
49
|
+
|
|
50
|
+
# Global shutdown flag
|
|
51
|
+
shutdown_requested = False
|
|
52
|
+
|
|
53
|
+
# Configure logging
|
|
54
|
+
logger = logging.getLogger(__name__)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# =============================================================================
|
|
58
|
+
# State Management Classes
|
|
59
|
+
# =============================================================================
|
|
60
|
+
|
|
61
|
+
class GitHubStateManager:
|
|
62
|
+
"""
|
|
63
|
+
Manages persistent state for GitHub issue processing.
|
|
64
|
+
|
|
65
|
+
Tracks which GitHub issues have been processed and their associated
|
|
66
|
+
kanban task IDs using tag_id for fast O(1) lookup.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def __init__(self, state_file_path: str):
|
|
70
|
+
"""
|
|
71
|
+
Initialize GitHubStateManager.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
state_file_path: Path to NDJSON state file (e.g., .juno_task/github/state.ndjson)
|
|
75
|
+
"""
|
|
76
|
+
self.state_file = Path(state_file_path)
|
|
77
|
+
self.issues: Dict[str, Dict[str, Any]] = {} # Keyed by tag_id
|
|
78
|
+
self._load_state()
|
|
79
|
+
|
|
80
|
+
def _load_state(self) -> None:
|
|
81
|
+
"""Load existing state from NDJSON file."""
|
|
82
|
+
if not self.state_file.exists():
|
|
83
|
+
logger.info(f"State file does not exist, will create: {self.state_file}")
|
|
84
|
+
self.state_file.parent.mkdir(parents=True, exist_ok=True)
|
|
85
|
+
self.issues = {}
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
self.issues = {}
|
|
90
|
+
with open(self.state_file, 'r', encoding='utf-8') as f:
|
|
91
|
+
for line in f:
|
|
92
|
+
line = line.strip()
|
|
93
|
+
if line:
|
|
94
|
+
issue = json.loads(line)
|
|
95
|
+
tag_id = issue.get('tag_id')
|
|
96
|
+
if tag_id:
|
|
97
|
+
self.issues[tag_id] = issue
|
|
98
|
+
|
|
99
|
+
logger.info(f"Loaded {len(self.issues)} issues from {self.state_file}")
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
logger.error(f"Error loading state from {self.state_file}: {e}")
|
|
103
|
+
self.issues = {}
|
|
104
|
+
|
|
105
|
+
def is_processed(self, issue_number: int, repo: str) -> bool:
|
|
106
|
+
"""
|
|
107
|
+
Check if issue already processed.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
issue_number: GitHub issue number
|
|
111
|
+
repo: Repository in format "owner/repo"
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
True if already processed, False otherwise
|
|
115
|
+
"""
|
|
116
|
+
tag_id = self._make_tag_id(issue_number, repo)
|
|
117
|
+
return tag_id in self.issues
|
|
118
|
+
|
|
119
|
+
def mark_processed(self, issue_data: Dict[str, Any], task_id: str) -> bool:
|
|
120
|
+
"""
|
|
121
|
+
Mark issue as processed, store task mapping.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
issue_data: Issue data dict with keys: issue_number, repo, title, body, etc.
|
|
125
|
+
task_id: Kanban task ID created for this issue
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
True if recorded successfully, False otherwise
|
|
129
|
+
"""
|
|
130
|
+
tag_id = self._make_tag_id(issue_data['issue_number'], issue_data['repo'])
|
|
131
|
+
|
|
132
|
+
entry = {
|
|
133
|
+
**issue_data,
|
|
134
|
+
'task_id': task_id,
|
|
135
|
+
'tag_id': tag_id,
|
|
136
|
+
'processed_at': datetime.now(timezone.utc).isoformat()
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
try:
|
|
140
|
+
# Append to file (atomic)
|
|
141
|
+
with open(self.state_file, 'a', encoding='utf-8') as f:
|
|
142
|
+
f.write(json.dumps(entry, ensure_ascii=False) + '\n')
|
|
143
|
+
|
|
144
|
+
# Update in-memory state
|
|
145
|
+
self.issues[tag_id] = entry
|
|
146
|
+
logger.debug(f"Recorded issue #{issue_data['issue_number']} -> task_id={task_id}, tag_id={tag_id}")
|
|
147
|
+
return True
|
|
148
|
+
|
|
149
|
+
except Exception as e:
|
|
150
|
+
logger.error(f"Error appending to {self.state_file}: {e}")
|
|
151
|
+
return False
|
|
152
|
+
|
|
153
|
+
def get_issue_for_task(self, tag_id: str) -> Optional[Dict[str, Any]]:
|
|
154
|
+
"""
|
|
155
|
+
Get issue data by tag_id (O(1) lookup).
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
tag_id: Tag identifier (e.g., "github_issue_owner_repo_123")
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Issue data dict or None if not found
|
|
162
|
+
"""
|
|
163
|
+
return self.issues.get(tag_id)
|
|
164
|
+
|
|
165
|
+
def get_last_update_timestamp(self, repo: str) -> Optional[str]:
|
|
166
|
+
"""
|
|
167
|
+
Get the most recent updated_at for incremental sync.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
repo: Repository in format "owner/repo"
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
ISO 8601 timestamp or None if no issues for this repo
|
|
174
|
+
"""
|
|
175
|
+
repo_issues = [i for i in self.issues.values() if i['repo'] == repo]
|
|
176
|
+
if not repo_issues:
|
|
177
|
+
return None
|
|
178
|
+
return max(i['updated_at'] for i in repo_issues)
|
|
179
|
+
|
|
180
|
+
def get_issue_count(self) -> int:
|
|
181
|
+
"""Get total number of processed issues."""
|
|
182
|
+
return len(self.issues)
|
|
183
|
+
|
|
184
|
+
@staticmethod
|
|
185
|
+
def _make_tag_id(issue_number: int, repo: str) -> str:
|
|
186
|
+
"""
|
|
187
|
+
Generate tag_id: github_issue_owner_repo_123
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
issue_number: GitHub issue number
|
|
191
|
+
repo: Repository in format "owner/repo"
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
Tag ID string
|
|
195
|
+
"""
|
|
196
|
+
owner, repo_name = repo.split('/')
|
|
197
|
+
# Sanitize owner and repo name (replace hyphens/special chars with underscores)
|
|
198
|
+
owner = re.sub(r'[^a-zA-Z0-9_]', '_', owner)
|
|
199
|
+
repo_name = re.sub(r'[^a-zA-Z0-9_]', '_', repo_name)
|
|
200
|
+
return f"github_issue_{owner}_{repo_name}_{issue_number}"
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
class ResponseStateManager:
|
|
204
|
+
"""
|
|
205
|
+
Manages state for tracking sent responses.
|
|
206
|
+
|
|
207
|
+
Prevents duplicate responses by tracking which task/issue combinations
|
|
208
|
+
have already received a response.
|
|
209
|
+
"""
|
|
210
|
+
|
|
211
|
+
def __init__(self, state_file_path: str):
|
|
212
|
+
"""
|
|
213
|
+
Initialize ResponseStateManager.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
state_file_path: Path to NDJSON state file
|
|
217
|
+
"""
|
|
218
|
+
self.state_file = Path(state_file_path)
|
|
219
|
+
self.sent_responses: List[Dict[str, Any]] = []
|
|
220
|
+
self.sent_keys: set = set() # (task_id, tag_id) tuples
|
|
221
|
+
self._load_state()
|
|
222
|
+
|
|
223
|
+
def _load_state(self) -> None:
|
|
224
|
+
"""Load existing state from NDJSON file."""
|
|
225
|
+
if not self.state_file.exists():
|
|
226
|
+
logger.info(f"Response state file does not exist, will create: {self.state_file}")
|
|
227
|
+
self.state_file.parent.mkdir(parents=True, exist_ok=True)
|
|
228
|
+
self.sent_responses = []
|
|
229
|
+
self.sent_keys = set()
|
|
230
|
+
return
|
|
231
|
+
|
|
232
|
+
try:
|
|
233
|
+
self.sent_responses = []
|
|
234
|
+
self.sent_keys = set()
|
|
235
|
+
with open(self.state_file, 'r', encoding='utf-8') as f:
|
|
236
|
+
for line in f:
|
|
237
|
+
line = line.strip()
|
|
238
|
+
if line:
|
|
239
|
+
entry = json.loads(line)
|
|
240
|
+
self.sent_responses.append(entry)
|
|
241
|
+
task_id = entry.get('task_id')
|
|
242
|
+
tag_id = entry.get('tag_id')
|
|
243
|
+
if task_id and tag_id:
|
|
244
|
+
self.sent_keys.add((task_id, tag_id))
|
|
245
|
+
|
|
246
|
+
logger.info(f"Loaded {len(self.sent_responses)} sent responses from {self.state_file}")
|
|
247
|
+
|
|
248
|
+
except Exception as e:
|
|
249
|
+
logger.error(f"Error loading response state from {self.state_file}: {e}")
|
|
250
|
+
self.sent_responses = []
|
|
251
|
+
self.sent_keys = set()
|
|
252
|
+
|
|
253
|
+
def was_response_sent(self, task_id: str, tag_id: str) -> bool:
|
|
254
|
+
"""
|
|
255
|
+
Check if a response was already sent for this task/issue.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
task_id: Kanban task ID
|
|
259
|
+
tag_id: GitHub issue tag identifier
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
True if already sent, False otherwise
|
|
263
|
+
"""
|
|
264
|
+
return (task_id, tag_id) in self.sent_keys
|
|
265
|
+
|
|
266
|
+
def record_sent(
|
|
267
|
+
self,
|
|
268
|
+
task_id: str,
|
|
269
|
+
tag_id: str,
|
|
270
|
+
issue_number: int,
|
|
271
|
+
repo: str,
|
|
272
|
+
comment_id: int,
|
|
273
|
+
comment_url: str
|
|
274
|
+
) -> bool:
|
|
275
|
+
"""
|
|
276
|
+
Record that a response was sent.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
task_id: Kanban task ID
|
|
280
|
+
tag_id: GitHub issue tag identifier
|
|
281
|
+
issue_number: GitHub issue number
|
|
282
|
+
repo: Repository in format "owner/repo"
|
|
283
|
+
comment_id: ID of the posted comment
|
|
284
|
+
comment_url: URL to the posted comment
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
True if recorded, False if duplicate or error
|
|
288
|
+
"""
|
|
289
|
+
key = (task_id, tag_id)
|
|
290
|
+
if key in self.sent_keys:
|
|
291
|
+
logger.debug(f"Response already recorded for task={task_id}, tag_id={tag_id}")
|
|
292
|
+
return False
|
|
293
|
+
|
|
294
|
+
entry = {
|
|
295
|
+
'task_id': task_id,
|
|
296
|
+
'tag_id': tag_id,
|
|
297
|
+
'issue_number': issue_number,
|
|
298
|
+
'repo': repo,
|
|
299
|
+
'comment_id': comment_id,
|
|
300
|
+
'comment_url': comment_url,
|
|
301
|
+
'sent_at': datetime.now(timezone.utc).isoformat()
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
try:
|
|
305
|
+
# Append to file (atomic)
|
|
306
|
+
with open(self.state_file, 'a', encoding='utf-8') as f:
|
|
307
|
+
f.write(json.dumps(entry, ensure_ascii=False) + '\n')
|
|
308
|
+
|
|
309
|
+
# Update in-memory state
|
|
310
|
+
self.sent_responses.append(entry)
|
|
311
|
+
self.sent_keys.add(key)
|
|
312
|
+
|
|
313
|
+
logger.debug(f"Recorded sent response for task={task_id}, tag_id={tag_id}")
|
|
314
|
+
return True
|
|
315
|
+
|
|
316
|
+
except Exception as e:
|
|
317
|
+
logger.error(f"Error recording response to {self.state_file}: {e}")
|
|
318
|
+
return False
|
|
319
|
+
|
|
320
|
+
def get_sent_count(self) -> int:
|
|
321
|
+
"""Get total number of sent responses."""
|
|
322
|
+
return len(self.sent_responses)
|
|
323
|
+
|
|
324
|
+
def reset_state(self) -> None:
|
|
325
|
+
"""
|
|
326
|
+
Clear all state (WARNING: will cause re-sending).
|
|
327
|
+
|
|
328
|
+
Use with caution - should only be called after user confirmation.
|
|
329
|
+
"""
|
|
330
|
+
if self.state_file.exists():
|
|
331
|
+
self.state_file.unlink()
|
|
332
|
+
logger.warning(f"Deleted response state file: {self.state_file}")
|
|
333
|
+
|
|
334
|
+
self.sent_responses = []
|
|
335
|
+
self.sent_keys = set()
|
|
336
|
+
logger.warning("Response state reset - all responses may be re-sent")
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
class CommentStateManager:
|
|
340
|
+
"""
|
|
341
|
+
Manages state for tracking processed comments (user replies).
|
|
342
|
+
|
|
343
|
+
Prevents duplicate processing by tracking which comments have been
|
|
344
|
+
converted to kanban tasks.
|
|
345
|
+
"""
|
|
346
|
+
|
|
347
|
+
def __init__(self, state_file_path: str):
|
|
348
|
+
"""
|
|
349
|
+
Initialize CommentStateManager.
|
|
350
|
+
|
|
351
|
+
Args:
|
|
352
|
+
state_file_path: Path to NDJSON state file
|
|
353
|
+
"""
|
|
354
|
+
self.state_file = Path(state_file_path)
|
|
355
|
+
self.processed_comments: Dict[int, Dict[str, Any]] = {} # Keyed by comment_id
|
|
356
|
+
self._load_state()
|
|
357
|
+
|
|
358
|
+
def _load_state(self) -> None:
|
|
359
|
+
"""Load existing state from NDJSON file."""
|
|
360
|
+
if not self.state_file.exists():
|
|
361
|
+
logger.info(f"Comment state file does not exist, will create: {self.state_file}")
|
|
362
|
+
self.state_file.parent.mkdir(parents=True, exist_ok=True)
|
|
363
|
+
self.processed_comments = {}
|
|
364
|
+
return
|
|
365
|
+
|
|
366
|
+
try:
|
|
367
|
+
self.processed_comments = {}
|
|
368
|
+
with open(self.state_file, 'r', encoding='utf-8') as f:
|
|
369
|
+
for line in f:
|
|
370
|
+
line = line.strip()
|
|
371
|
+
if line:
|
|
372
|
+
entry = json.loads(line)
|
|
373
|
+
comment_id = entry.get('comment_id')
|
|
374
|
+
if comment_id:
|
|
375
|
+
self.processed_comments[comment_id] = entry
|
|
376
|
+
|
|
377
|
+
logger.info(f"Loaded {len(self.processed_comments)} processed comments from {self.state_file}")
|
|
378
|
+
|
|
379
|
+
except Exception as e:
|
|
380
|
+
logger.error(f"Error loading comment state from {self.state_file}: {e}")
|
|
381
|
+
self.processed_comments = {}
|
|
382
|
+
|
|
383
|
+
def is_comment_processed(self, comment_id: int) -> bool:
|
|
384
|
+
"""
|
|
385
|
+
Check if a comment has already been processed.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
comment_id: GitHub comment ID
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
True if already processed, False otherwise
|
|
392
|
+
"""
|
|
393
|
+
return comment_id in self.processed_comments
|
|
394
|
+
|
|
395
|
+
def mark_comment_processed(
|
|
396
|
+
self,
|
|
397
|
+
comment_id: int,
|
|
398
|
+
issue_number: int,
|
|
399
|
+
repo: str,
|
|
400
|
+
task_id: str,
|
|
401
|
+
related_task_ids: List[str]
|
|
402
|
+
) -> bool:
|
|
403
|
+
"""
|
|
404
|
+
Mark a comment as processed.
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
comment_id: GitHub comment ID
|
|
408
|
+
issue_number: GitHub issue number
|
|
409
|
+
repo: Repository in format "owner/repo"
|
|
410
|
+
task_id: Kanban task ID created for this comment
|
|
411
|
+
related_task_ids: List of previous task IDs found in the thread
|
|
412
|
+
|
|
413
|
+
Returns:
|
|
414
|
+
True if recorded, False if duplicate or error
|
|
415
|
+
"""
|
|
416
|
+
if comment_id in self.processed_comments:
|
|
417
|
+
logger.debug(f"Comment {comment_id} already recorded")
|
|
418
|
+
return False
|
|
419
|
+
|
|
420
|
+
entry = {
|
|
421
|
+
'comment_id': comment_id,
|
|
422
|
+
'issue_number': issue_number,
|
|
423
|
+
'repo': repo,
|
|
424
|
+
'task_id': task_id,
|
|
425
|
+
'related_task_ids': related_task_ids,
|
|
426
|
+
'processed_at': datetime.now(timezone.utc).isoformat()
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
try:
|
|
430
|
+
# Append to file (atomic)
|
|
431
|
+
with open(self.state_file, 'a', encoding='utf-8') as f:
|
|
432
|
+
f.write(json.dumps(entry, ensure_ascii=False) + '\n')
|
|
433
|
+
|
|
434
|
+
# Update in-memory state
|
|
435
|
+
self.processed_comments[comment_id] = entry
|
|
436
|
+
|
|
437
|
+
logger.debug(f"Recorded processed comment {comment_id} -> task_id={task_id}")
|
|
438
|
+
return True
|
|
439
|
+
|
|
440
|
+
except Exception as e:
|
|
441
|
+
logger.error(f"Error recording comment to {self.state_file}: {e}")
|
|
442
|
+
return False
|
|
443
|
+
|
|
444
|
+
def get_processed_count(self) -> int:
|
|
445
|
+
"""Get total number of processed comments."""
|
|
446
|
+
return len(self.processed_comments)
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
# =============================================================================
|
|
450
|
+
# GitHub API Client
|
|
451
|
+
# =============================================================================
|
|
452
|
+
|
|
453
|
+
class GitHubClient:
|
|
454
|
+
"""GitHub API client with authentication and error handling."""
|
|
455
|
+
|
|
456
|
+
def __init__(self, token: str, api_url: str = "https://api.github.com"):
|
|
457
|
+
"""
|
|
458
|
+
Initialize GitHub API client.
|
|
459
|
+
|
|
460
|
+
Args:
|
|
461
|
+
token: GitHub personal access token
|
|
462
|
+
api_url: GitHub API base URL (for GitHub Enterprise)
|
|
463
|
+
"""
|
|
464
|
+
self.token = token
|
|
465
|
+
self.api_url = api_url.rstrip('/')
|
|
466
|
+
self.session = requests.Session()
|
|
467
|
+
self.session.headers.update({
|
|
468
|
+
'Authorization': f'token {token}',
|
|
469
|
+
'Accept': 'application/vnd.github.v3+json'
|
|
470
|
+
})
|
|
471
|
+
|
|
472
|
+
def test_connection(self) -> Dict[str, Any]:
|
|
473
|
+
"""
|
|
474
|
+
Test GitHub API connection.
|
|
475
|
+
|
|
476
|
+
Returns:
|
|
477
|
+
User info dict
|
|
478
|
+
|
|
479
|
+
Raises:
|
|
480
|
+
requests.exceptions.HTTPError: If authentication fails
|
|
481
|
+
"""
|
|
482
|
+
url = f"{self.api_url}/user"
|
|
483
|
+
response = self.session.get(url, timeout=10)
|
|
484
|
+
self._check_rate_limit(response)
|
|
485
|
+
response.raise_for_status()
|
|
486
|
+
return response.json()
|
|
487
|
+
|
|
488
|
+
def list_issues(
|
|
489
|
+
self,
|
|
490
|
+
owner: str,
|
|
491
|
+
repo: str,
|
|
492
|
+
state: str = 'open',
|
|
493
|
+
labels: Optional[List[str]] = None,
|
|
494
|
+
assignee: Optional[str] = None,
|
|
495
|
+
since: Optional[str] = None
|
|
496
|
+
) -> List[Dict[str, Any]]:
|
|
497
|
+
"""
|
|
498
|
+
Fetch issues from repository with filters.
|
|
499
|
+
|
|
500
|
+
Args:
|
|
501
|
+
owner: Repository owner
|
|
502
|
+
repo: Repository name
|
|
503
|
+
state: Issue state (open, closed, all)
|
|
504
|
+
labels: Filter by labels
|
|
505
|
+
assignee: Filter by assignee
|
|
506
|
+
since: Only issues updated after this timestamp (ISO 8601)
|
|
507
|
+
|
|
508
|
+
Returns:
|
|
509
|
+
List of issue dicts
|
|
510
|
+
"""
|
|
511
|
+
url = f"{self.api_url}/repos/{owner}/{repo}/issues"
|
|
512
|
+
params = {'state': state, 'per_page': 100}
|
|
513
|
+
|
|
514
|
+
if labels:
|
|
515
|
+
params['labels'] = ','.join(labels)
|
|
516
|
+
if assignee:
|
|
517
|
+
params['assignee'] = assignee
|
|
518
|
+
if since:
|
|
519
|
+
params['since'] = since
|
|
520
|
+
|
|
521
|
+
issues = []
|
|
522
|
+
page = 1
|
|
523
|
+
|
|
524
|
+
while True:
|
|
525
|
+
params['page'] = page
|
|
526
|
+
logger.debug(f"Fetching issues page {page}...")
|
|
527
|
+
|
|
528
|
+
try:
|
|
529
|
+
response = self.session.get(url, params=params, timeout=30)
|
|
530
|
+
self._check_rate_limit(response)
|
|
531
|
+
response.raise_for_status()
|
|
532
|
+
|
|
533
|
+
page_issues = response.json()
|
|
534
|
+
if not page_issues:
|
|
535
|
+
break
|
|
536
|
+
|
|
537
|
+
# Filter out pull requests (GitHub API returns both issues and PRs)
|
|
538
|
+
page_issues = [i for i in page_issues if 'pull_request' not in i]
|
|
539
|
+
|
|
540
|
+
issues.extend(page_issues)
|
|
541
|
+
page += 1
|
|
542
|
+
|
|
543
|
+
# Check if there are more pages
|
|
544
|
+
link_header = response.headers.get('Link', '')
|
|
545
|
+
if 'rel="next"' not in link_header:
|
|
546
|
+
break
|
|
547
|
+
|
|
548
|
+
except requests.exceptions.Timeout:
|
|
549
|
+
logger.error(f"Timeout fetching issues from {owner}/{repo}")
|
|
550
|
+
break
|
|
551
|
+
except requests.exceptions.HTTPError as e:
|
|
552
|
+
logger.error(f"HTTP error fetching issues: {e}")
|
|
553
|
+
break
|
|
554
|
+
|
|
555
|
+
logger.debug(f"Fetched {len(issues)} issues from {owner}/{repo}")
|
|
556
|
+
return issues
|
|
557
|
+
|
|
558
|
+
def get_issue(self, owner: str, repo: str, issue_number: int) -> Dict[str, Any]:
|
|
559
|
+
"""
|
|
560
|
+
Get a specific issue.
|
|
561
|
+
|
|
562
|
+
Args:
|
|
563
|
+
owner: Repository owner
|
|
564
|
+
repo: Repository name
|
|
565
|
+
issue_number: Issue number
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
Issue dict
|
|
569
|
+
|
|
570
|
+
Raises:
|
|
571
|
+
requests.exceptions.HTTPError: If issue not found
|
|
572
|
+
"""
|
|
573
|
+
url = f"{self.api_url}/repos/{owner}/{repo}/issues/{issue_number}"
|
|
574
|
+
response = self.session.get(url, timeout=10)
|
|
575
|
+
self._check_rate_limit(response)
|
|
576
|
+
response.raise_for_status()
|
|
577
|
+
return response.json()
|
|
578
|
+
|
|
579
|
+
def post_comment(self, owner: str, repo: str, issue_number: int, body: str) -> Dict[str, Any]:
|
|
580
|
+
"""
|
|
581
|
+
Post a comment on an issue.
|
|
582
|
+
|
|
583
|
+
Args:
|
|
584
|
+
owner: Repository owner
|
|
585
|
+
repo: Repository name
|
|
586
|
+
issue_number: Issue number
|
|
587
|
+
body: Comment body (markdown)
|
|
588
|
+
|
|
589
|
+
Returns:
|
|
590
|
+
Comment dict with id, url, etc.
|
|
591
|
+
|
|
592
|
+
Raises:
|
|
593
|
+
requests.exceptions.HTTPError: If comment fails to post
|
|
594
|
+
"""
|
|
595
|
+
url = f"{self.api_url}/repos/{owner}/{repo}/issues/{issue_number}/comments"
|
|
596
|
+
response = self.session.post(url, json={'body': body}, timeout=30)
|
|
597
|
+
self._check_rate_limit(response)
|
|
598
|
+
response.raise_for_status()
|
|
599
|
+
return response.json()
|
|
600
|
+
|
|
601
|
+
def close_issue(self, owner: str, repo: str, issue_number: int) -> Dict[str, Any]:
|
|
602
|
+
"""
|
|
603
|
+
Close an issue.
|
|
604
|
+
|
|
605
|
+
Args:
|
|
606
|
+
owner: Repository owner
|
|
607
|
+
repo: Repository name
|
|
608
|
+
issue_number: Issue number
|
|
609
|
+
|
|
610
|
+
Returns:
|
|
611
|
+
Updated issue dict
|
|
612
|
+
|
|
613
|
+
Raises:
|
|
614
|
+
requests.exceptions.HTTPError: If close fails
|
|
615
|
+
"""
|
|
616
|
+
url = f"{self.api_url}/repos/{owner}/{repo}/issues/{issue_number}"
|
|
617
|
+
response = self.session.patch(url, json={'state': 'closed'}, timeout=30)
|
|
618
|
+
self._check_rate_limit(response)
|
|
619
|
+
response.raise_for_status()
|
|
620
|
+
return response.json()
|
|
621
|
+
|
|
622
|
+
def create_issue(self, owner: str, repo: str, title: str, body: str, labels: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
623
|
+
"""
|
|
624
|
+
Create a new issue.
|
|
625
|
+
|
|
626
|
+
Args:
|
|
627
|
+
owner: Repository owner
|
|
628
|
+
repo: Repository name
|
|
629
|
+
title: Issue title
|
|
630
|
+
body: Issue body (markdown)
|
|
631
|
+
labels: Optional list of label names
|
|
632
|
+
|
|
633
|
+
Returns:
|
|
634
|
+
Created issue dict with number, url, etc.
|
|
635
|
+
|
|
636
|
+
Raises:
|
|
637
|
+
requests.exceptions.HTTPError: If issue creation fails
|
|
638
|
+
"""
|
|
639
|
+
url = f"{self.api_url}/repos/{owner}/{repo}/issues"
|
|
640
|
+
payload = {'title': title, 'body': body}
|
|
641
|
+
|
|
642
|
+
if labels:
|
|
643
|
+
payload['labels'] = labels
|
|
644
|
+
|
|
645
|
+
response = self.session.post(url, json=payload, timeout=30)
|
|
646
|
+
self._check_rate_limit(response)
|
|
647
|
+
response.raise_for_status()
|
|
648
|
+
return response.json()
|
|
649
|
+
|
|
650
|
+
def list_issue_comments(
|
|
651
|
+
self,
|
|
652
|
+
owner: str,
|
|
653
|
+
repo: str,
|
|
654
|
+
issue_number: int,
|
|
655
|
+
since: Optional[str] = None
|
|
656
|
+
) -> List[Dict[str, Any]]:
|
|
657
|
+
"""
|
|
658
|
+
Fetch all comments on an issue.
|
|
659
|
+
|
|
660
|
+
Args:
|
|
661
|
+
owner: Repository owner
|
|
662
|
+
repo: Repository name
|
|
663
|
+
issue_number: Issue number
|
|
664
|
+
since: Only comments updated after this timestamp (ISO 8601)
|
|
665
|
+
|
|
666
|
+
Returns:
|
|
667
|
+
List of comment dicts
|
|
668
|
+
"""
|
|
669
|
+
url = f"{self.api_url}/repos/{owner}/{repo}/issues/{issue_number}/comments"
|
|
670
|
+
params = {'per_page': 100}
|
|
671
|
+
|
|
672
|
+
if since:
|
|
673
|
+
params['since'] = since
|
|
674
|
+
|
|
675
|
+
comments = []
|
|
676
|
+
page = 1
|
|
677
|
+
|
|
678
|
+
while True:
|
|
679
|
+
params['page'] = page
|
|
680
|
+
logger.debug(f"Fetching comments page {page} for issue #{issue_number}...")
|
|
681
|
+
|
|
682
|
+
try:
|
|
683
|
+
response = self.session.get(url, params=params, timeout=30)
|
|
684
|
+
self._check_rate_limit(response)
|
|
685
|
+
response.raise_for_status()
|
|
686
|
+
|
|
687
|
+
page_comments = response.json()
|
|
688
|
+
if not page_comments:
|
|
689
|
+
break
|
|
690
|
+
|
|
691
|
+
comments.extend(page_comments)
|
|
692
|
+
page += 1
|
|
693
|
+
|
|
694
|
+
# Check if there are more pages
|
|
695
|
+
link_header = response.headers.get('Link', '')
|
|
696
|
+
if 'rel="next"' not in link_header:
|
|
697
|
+
break
|
|
698
|
+
|
|
699
|
+
except requests.exceptions.Timeout:
|
|
700
|
+
logger.error(f"Timeout fetching comments for issue #{issue_number}")
|
|
701
|
+
break
|
|
702
|
+
except requests.exceptions.HTTPError as e:
|
|
703
|
+
logger.error(f"HTTP error fetching comments: {e}")
|
|
704
|
+
break
|
|
705
|
+
|
|
706
|
+
logger.debug(f"Fetched {len(comments)} comments for issue #{issue_number}")
|
|
707
|
+
return comments
|
|
708
|
+
|
|
709
|
+
def reopen_issue(self, owner: str, repo: str, issue_number: int) -> Dict[str, Any]:
|
|
710
|
+
"""
|
|
711
|
+
Reopen a closed issue.
|
|
712
|
+
|
|
713
|
+
Args:
|
|
714
|
+
owner: Repository owner
|
|
715
|
+
repo: Repository name
|
|
716
|
+
issue_number: Issue number
|
|
717
|
+
|
|
718
|
+
Returns:
|
|
719
|
+
Updated issue dict
|
|
720
|
+
|
|
721
|
+
Raises:
|
|
722
|
+
requests.exceptions.HTTPError: If reopen fails
|
|
723
|
+
"""
|
|
724
|
+
url = f"{self.api_url}/repos/{owner}/{repo}/issues/{issue_number}"
|
|
725
|
+
response = self.session.patch(url, json={'state': 'open'}, timeout=30)
|
|
726
|
+
self._check_rate_limit(response)
|
|
727
|
+
response.raise_for_status()
|
|
728
|
+
return response.json()
|
|
729
|
+
|
|
730
|
+
def _check_rate_limit(self, response):
|
|
731
|
+
"""Check and log rate limit status."""
|
|
732
|
+
remaining = response.headers.get('X-RateLimit-Remaining')
|
|
733
|
+
reset = response.headers.get('X-RateLimit-Reset')
|
|
734
|
+
|
|
735
|
+
if remaining:
|
|
736
|
+
remaining = int(remaining)
|
|
737
|
+
if remaining < 100:
|
|
738
|
+
logger.warning(f"GitHub API rate limit low: {remaining} remaining")
|
|
739
|
+
if reset:
|
|
740
|
+
reset_time = datetime.fromtimestamp(int(reset))
|
|
741
|
+
logger.warning(f"Rate limit resets at: {reset_time}")
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
# =============================================================================
|
|
745
|
+
# Utility Functions
|
|
746
|
+
# =============================================================================
|
|
747
|
+
|
|
748
|
+
def setup_logging(verbose: bool = False) -> None:
|
|
749
|
+
"""Configure logging for the application."""
|
|
750
|
+
log_level = logging.DEBUG if verbose else logging.INFO
|
|
751
|
+
|
|
752
|
+
env_log_level = os.getenv('LOG_LEVEL', '').upper()
|
|
753
|
+
if env_log_level in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
|
|
754
|
+
log_level = getattr(logging, env_log_level)
|
|
755
|
+
|
|
756
|
+
logging.basicConfig(
|
|
757
|
+
level=log_level,
|
|
758
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
759
|
+
datefmt='%Y-%m-%d %H:%M:%S'
|
|
760
|
+
)
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
def signal_handler(signum: int, frame) -> None:
|
|
764
|
+
"""Handle shutdown signals gracefully."""
|
|
765
|
+
global shutdown_requested
|
|
766
|
+
signal_name = signal.Signals(signum).name
|
|
767
|
+
logger.info(f"Received {signal_name}, initiating graceful shutdown...")
|
|
768
|
+
shutdown_requested = True
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
def sanitize_tag(tag: str) -> str:
|
|
772
|
+
"""
|
|
773
|
+
Sanitize a tag to be compatible with kanban validation.
|
|
774
|
+
|
|
775
|
+
Kanban tags only allow: letters, numbers, underscores (_), and hyphens (-).
|
|
776
|
+
|
|
777
|
+
Args:
|
|
778
|
+
tag: The raw tag string
|
|
779
|
+
|
|
780
|
+
Returns:
|
|
781
|
+
Sanitized tag compatible with kanban system
|
|
782
|
+
"""
|
|
783
|
+
# Replace spaces and colons with underscores
|
|
784
|
+
tag = tag.replace(' ', '_').replace(':', '_')
|
|
785
|
+
# Remove any remaining invalid characters
|
|
786
|
+
tag = re.sub(r'[^a-zA-Z0-9_-]', '_', tag)
|
|
787
|
+
# Collapse multiple underscores
|
|
788
|
+
tag = re.sub(r'_+', '_', tag)
|
|
789
|
+
# Remove leading/trailing underscores
|
|
790
|
+
tag = tag.strip('_')
|
|
791
|
+
return tag
|
|
792
|
+
|
|
793
|
+
|
|
794
|
+
def extract_github_tag(tags: List[str]) -> Optional[str]:
|
|
795
|
+
"""
|
|
796
|
+
Extract github_issue_* tag from task tags.
|
|
797
|
+
|
|
798
|
+
Args:
|
|
799
|
+
tags: List of task tags
|
|
800
|
+
|
|
801
|
+
Returns:
|
|
802
|
+
Tag ID string or None if not found
|
|
803
|
+
"""
|
|
804
|
+
if tags is None:
|
|
805
|
+
return None
|
|
806
|
+
for tag in tags:
|
|
807
|
+
if tag.startswith('github_issue_'):
|
|
808
|
+
return tag
|
|
809
|
+
return None
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
def extract_parent_github_tag(tags: List[str]) -> Optional[str]:
|
|
813
|
+
"""
|
|
814
|
+
Extract parent_github_issue_* tag from reply task tags.
|
|
815
|
+
|
|
816
|
+
Reply tasks (created from GitHub issue comments) have parent_github_issue_* tags
|
|
817
|
+
pointing to the original issue they should reply to.
|
|
818
|
+
|
|
819
|
+
Args:
|
|
820
|
+
tags: List of task tags
|
|
821
|
+
|
|
822
|
+
Returns:
|
|
823
|
+
Parent tag ID string (without 'parent_' prefix) or None if not found
|
|
824
|
+
|
|
825
|
+
Example:
|
|
826
|
+
tags = ['github-reply', 'parent_github_issue_owner_repo_123']
|
|
827
|
+
returns: 'github_issue_owner_repo_123'
|
|
828
|
+
"""
|
|
829
|
+
if tags is None:
|
|
830
|
+
return None
|
|
831
|
+
for tag in tags:
|
|
832
|
+
if tag.startswith('parent_github_issue_'):
|
|
833
|
+
# Remove 'parent_' prefix to get the actual github_issue_* tag
|
|
834
|
+
return 'github_issue_' + tag[len('parent_github_issue_'):]
|
|
835
|
+
return None
|
|
836
|
+
|
|
837
|
+
|
|
838
|
+
def is_reply_task(tags: List[str]) -> bool:
|
|
839
|
+
"""
|
|
840
|
+
Check if a task is a reply task (created from a GitHub issue comment).
|
|
841
|
+
|
|
842
|
+
Args:
|
|
843
|
+
tags: List of task tags
|
|
844
|
+
|
|
845
|
+
Returns:
|
|
846
|
+
True if task has 'github-reply' tag, False otherwise
|
|
847
|
+
"""
|
|
848
|
+
if tags is None:
|
|
849
|
+
return False
|
|
850
|
+
return 'github-reply' in tags
|
|
851
|
+
|
|
852
|
+
|
|
853
|
+
def parse_tag_id(tag_id: str) -> Optional[Dict[str, Any]]:
|
|
854
|
+
"""
|
|
855
|
+
Parse tag_id into components.
|
|
856
|
+
|
|
857
|
+
Args:
|
|
858
|
+
tag_id: Tag ID (e.g., "github_issue_owner_repo_123")
|
|
859
|
+
|
|
860
|
+
Returns:
|
|
861
|
+
Dict with keys: owner, repo, issue_number, full_repo
|
|
862
|
+
Or None if invalid format
|
|
863
|
+
"""
|
|
864
|
+
if not tag_id.startswith('github_issue_'):
|
|
865
|
+
return None
|
|
866
|
+
|
|
867
|
+
parts = tag_id[len('github_issue_'):].split('_')
|
|
868
|
+
if len(parts) < 3:
|
|
869
|
+
return None
|
|
870
|
+
|
|
871
|
+
# Last part is issue number
|
|
872
|
+
try:
|
|
873
|
+
issue_number = int(parts[-1])
|
|
874
|
+
except ValueError:
|
|
875
|
+
return None
|
|
876
|
+
|
|
877
|
+
# Everything before last part is repo
|
|
878
|
+
repo_parts = parts[:-1]
|
|
879
|
+
|
|
880
|
+
# First part is owner, rest is repo name
|
|
881
|
+
owner = repo_parts[0]
|
|
882
|
+
repo_name = '_'.join(repo_parts[1:])
|
|
883
|
+
|
|
884
|
+
return {
|
|
885
|
+
'owner': owner,
|
|
886
|
+
'repo': repo_name,
|
|
887
|
+
'issue_number': issue_number,
|
|
888
|
+
'full_repo': f"{owner}/{repo_name}"
|
|
889
|
+
}
|
|
890
|
+
|
|
891
|
+
|
|
892
|
+
def validate_repo_format(repo: str) -> bool:
|
|
893
|
+
"""
|
|
894
|
+
Validate repository format: owner/repo
|
|
895
|
+
|
|
896
|
+
Args:
|
|
897
|
+
repo: Repository string
|
|
898
|
+
|
|
899
|
+
Returns:
|
|
900
|
+
True if valid, False otherwise
|
|
901
|
+
"""
|
|
902
|
+
if '/' not in repo:
|
|
903
|
+
logger.error(f"Invalid repo format: {repo}. Expected: owner/repo")
|
|
904
|
+
return False
|
|
905
|
+
|
|
906
|
+
owner, repo_name = repo.split('/', 1)
|
|
907
|
+
if not owner or not repo_name:
|
|
908
|
+
logger.error(f"Invalid repo format: {repo}. Expected: owner/repo")
|
|
909
|
+
return False
|
|
910
|
+
|
|
911
|
+
return True
|
|
912
|
+
|
|
913
|
+
|
|
914
|
+
def extract_task_ids_from_text(text: str) -> List[str]:
|
|
915
|
+
"""
|
|
916
|
+
Extract task IDs from text using [task_id]...[/task_id] format.
|
|
917
|
+
|
|
918
|
+
Args:
|
|
919
|
+
text: Text to search for task IDs
|
|
920
|
+
|
|
921
|
+
Returns:
|
|
922
|
+
List of task IDs found (comma-separated values are split)
|
|
923
|
+
"""
|
|
924
|
+
task_ids = []
|
|
925
|
+
|
|
926
|
+
# Pattern: [task_id]...[/task_id] (can contain comma-separated IDs)
|
|
927
|
+
pattern = r'\[task_id\]([^[]+)\[/task_id\]'
|
|
928
|
+
matches = re.findall(pattern, text, re.IGNORECASE)
|
|
929
|
+
|
|
930
|
+
for match in matches:
|
|
931
|
+
# Split by comma and strip whitespace
|
|
932
|
+
ids = [tid.strip() for tid in match.split(',') if tid.strip()]
|
|
933
|
+
task_ids.extend(ids)
|
|
934
|
+
|
|
935
|
+
return task_ids
|
|
936
|
+
|
|
937
|
+
|
|
938
|
+
def is_agent_comment(comment_body: str) -> bool:
|
|
939
|
+
"""
|
|
940
|
+
Detect if a comment was posted by our agent (not a user reply).
|
|
941
|
+
|
|
942
|
+
Agent comments contain [task_id]...[/task_id] at the beginning,
|
|
943
|
+
formatted as **[task_id]...[/task_id]**.
|
|
944
|
+
|
|
945
|
+
Args:
|
|
946
|
+
comment_body: Comment body text
|
|
947
|
+
|
|
948
|
+
Returns:
|
|
949
|
+
True if this is an agent-posted comment, False otherwise
|
|
950
|
+
"""
|
|
951
|
+
# Agent comments start with **[task_id]...[/task_id]**
|
|
952
|
+
# User replies won't have this specific format at the start
|
|
953
|
+
agent_pattern = r'^\s*\*\*\[task_id\][^\[]+\[/task_id\]\*\*'
|
|
954
|
+
|
|
955
|
+
return bool(re.match(agent_pattern, comment_body.strip()))
|
|
956
|
+
|
|
957
|
+
|
|
958
|
+
def collect_task_ids_from_thread(
|
|
959
|
+
issue_body: str,
|
|
960
|
+
comments: List[Dict[str, Any]]
|
|
961
|
+
) -> List[str]:
|
|
962
|
+
"""
|
|
963
|
+
Collect all task IDs from an issue thread (body + comments).
|
|
964
|
+
|
|
965
|
+
Args:
|
|
966
|
+
issue_body: The issue body text
|
|
967
|
+
comments: List of comment dicts
|
|
968
|
+
|
|
969
|
+
Returns:
|
|
970
|
+
Deduplicated list of task IDs found in the thread
|
|
971
|
+
"""
|
|
972
|
+
all_task_ids = []
|
|
973
|
+
|
|
974
|
+
# Extract from issue body
|
|
975
|
+
all_task_ids.extend(extract_task_ids_from_text(issue_body or ''))
|
|
976
|
+
|
|
977
|
+
# Extract from all comments (both agent responses and user replies)
|
|
978
|
+
for comment in comments:
|
|
979
|
+
body = comment.get('body', '')
|
|
980
|
+
all_task_ids.extend(extract_task_ids_from_text(body))
|
|
981
|
+
|
|
982
|
+
# Deduplicate while preserving order
|
|
983
|
+
seen = set()
|
|
984
|
+
unique_ids = []
|
|
985
|
+
for tid in all_task_ids:
|
|
986
|
+
if tid not in seen:
|
|
987
|
+
seen.add(tid)
|
|
988
|
+
unique_ids.append(tid)
|
|
989
|
+
|
|
990
|
+
return unique_ids
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
GITHUB_TOKEN_DOCS_URL = "https://github.com/settings/tokens"
|
|
994
|
+
|
|
995
|
+
|
|
996
|
+
def validate_github_environment() -> Tuple[Optional[str], Optional[str], List[str]]:
|
|
997
|
+
"""
|
|
998
|
+
Validate GitHub environment variables are properly configured.
|
|
999
|
+
|
|
1000
|
+
Returns:
|
|
1001
|
+
Tuple of (token, repo, errors)
|
|
1002
|
+
- token: GitHub token if found, None otherwise
|
|
1003
|
+
- repo: Repository if found, None otherwise
|
|
1004
|
+
- errors: List of error messages if validation failed
|
|
1005
|
+
"""
|
|
1006
|
+
errors = []
|
|
1007
|
+
|
|
1008
|
+
# Check for token
|
|
1009
|
+
token = os.getenv('GITHUB_TOKEN')
|
|
1010
|
+
if not token:
|
|
1011
|
+
errors.append(
|
|
1012
|
+
"GITHUB_TOKEN not found.\n"
|
|
1013
|
+
" Generate a token at: https://github.com/settings/tokens\n"
|
|
1014
|
+
" Required scopes: 'repo' (private) or 'public_repo' (public)\n"
|
|
1015
|
+
" Set via environment variable or .env file:\n"
|
|
1016
|
+
" export GITHUB_TOKEN=ghp_your_token_here\n"
|
|
1017
|
+
" Or add to .env file:\n"
|
|
1018
|
+
" GITHUB_TOKEN=ghp_your_token_here"
|
|
1019
|
+
)
|
|
1020
|
+
elif not (token.startswith('ghp_') or token.startswith('gho_') or token.startswith('ghs_')):
|
|
1021
|
+
errors.append(
|
|
1022
|
+
f"GITHUB_TOKEN appears invalid.\n"
|
|
1023
|
+
f" Personal access tokens start with: ghp_, gho_, or ghs_\n"
|
|
1024
|
+
f" Current value starts with: {token[:10]}...\n"
|
|
1025
|
+
f" Generate a valid token at: {GITHUB_TOKEN_DOCS_URL}"
|
|
1026
|
+
)
|
|
1027
|
+
|
|
1028
|
+
# Check for repo (optional but warn)
|
|
1029
|
+
repo = os.getenv('GITHUB_REPO')
|
|
1030
|
+
|
|
1031
|
+
return token, repo, errors
|
|
1032
|
+
|
|
1033
|
+
|
|
1034
|
+
def print_env_help() -> None:
|
|
1035
|
+
"""Print help message about configuring GitHub environment variables."""
|
|
1036
|
+
print("\n" + "=" * 70)
|
|
1037
|
+
print("GitHub Integration - Environment Configuration")
|
|
1038
|
+
print("=" * 70)
|
|
1039
|
+
print(f"""
|
|
1040
|
+
Required Environment Variables:
|
|
1041
|
+
GITHUB_TOKEN Your GitHub personal access token (ghp_*, gho_*, ghs_*)
|
|
1042
|
+
|
|
1043
|
+
Optional Environment Variables:
|
|
1044
|
+
GITHUB_REPO Default repository (format: owner/repo)
|
|
1045
|
+
GITHUB_API_URL GitHub API URL (default: https://api.github.com)
|
|
1046
|
+
For GitHub Enterprise: https://github.company.com/api/v3
|
|
1047
|
+
CHECK_INTERVAL_SECONDS Polling interval in seconds (default: 300)
|
|
1048
|
+
LOG_LEVEL DEBUG, INFO, WARNING, ERROR (default: INFO)
|
|
1049
|
+
|
|
1050
|
+
Configuration Methods:
|
|
1051
|
+
1. Environment variables:
|
|
1052
|
+
export GITHUB_TOKEN=ghp_your_token_here
|
|
1053
|
+
export GITHUB_REPO=owner/repo
|
|
1054
|
+
|
|
1055
|
+
2. .env file (in project root):
|
|
1056
|
+
GITHUB_TOKEN=ghp_your_token_here
|
|
1057
|
+
GITHUB_REPO=owner/repo
|
|
1058
|
+
|
|
1059
|
+
Generating a GitHub Personal Access Token:
|
|
1060
|
+
1. Go to {GITHUB_TOKEN_DOCS_URL}
|
|
1061
|
+
2. Click "Generate new token" (classic or fine-grained)
|
|
1062
|
+
3. Select scopes:
|
|
1063
|
+
- For private repos: 'repo' (full control of private repositories)
|
|
1064
|
+
- For public repos: 'public_repo' (access public repositories)
|
|
1065
|
+
4. Generate token and copy it (starts with ghp_, gho_, or ghs_)
|
|
1066
|
+
5. Set GITHUB_TOKEN environment variable or add to .env file
|
|
1067
|
+
|
|
1068
|
+
Example .env file:
|
|
1069
|
+
GITHUB_TOKEN=ghp_YOUR_PERSONAL_ACCESS_TOKEN_HERE
|
|
1070
|
+
GITHUB_REPO=myorg/myrepo
|
|
1071
|
+
CHECK_INTERVAL_SECONDS=600
|
|
1072
|
+
LOG_LEVEL=INFO
|
|
1073
|
+
""")
|
|
1074
|
+
print("=" * 70 + "\n")
|
|
1075
|
+
|
|
1076
|
+
|
|
1077
|
+
def find_kanban_script(project_dir: Path) -> Optional[str]:
|
|
1078
|
+
"""Find the kanban.sh script in the project."""
|
|
1079
|
+
candidates = [
|
|
1080
|
+
project_dir / '.juno_task' / 'scripts' / 'kanban.sh',
|
|
1081
|
+
project_dir / 'scripts' / 'kanban.sh',
|
|
1082
|
+
]
|
|
1083
|
+
|
|
1084
|
+
for path in candidates:
|
|
1085
|
+
if path.exists():
|
|
1086
|
+
return str(path)
|
|
1087
|
+
|
|
1088
|
+
logger.error("Could not find kanban.sh script")
|
|
1089
|
+
return None
|
|
1090
|
+
|
|
1091
|
+
|
|
1092
|
+
# =============================================================================
|
|
1093
|
+
# Kanban Integration
|
|
1094
|
+
# =============================================================================
|
|
1095
|
+
|
|
1096
|
+
def create_kanban_task_from_issue(
|
|
1097
|
+
issue: Dict[str, Any],
|
|
1098
|
+
repo: str,
|
|
1099
|
+
kanban_script: str,
|
|
1100
|
+
dry_run: bool = False
|
|
1101
|
+
) -> Optional[str]:
|
|
1102
|
+
"""
|
|
1103
|
+
Create kanban task from GitHub issue.
|
|
1104
|
+
|
|
1105
|
+
Args:
|
|
1106
|
+
issue: GitHub issue dict
|
|
1107
|
+
repo: Repository in format "owner/repo"
|
|
1108
|
+
kanban_script: Path to kanban.sh script
|
|
1109
|
+
dry_run: If True, don't actually create the task
|
|
1110
|
+
|
|
1111
|
+
Returns:
|
|
1112
|
+
Task ID if created, None if failed
|
|
1113
|
+
"""
|
|
1114
|
+
owner, repo_name = repo.split('/')
|
|
1115
|
+
issue_number = issue['number']
|
|
1116
|
+
|
|
1117
|
+
# Generate tag_id
|
|
1118
|
+
tag_id = GitHubStateManager._make_tag_id(issue_number, repo)
|
|
1119
|
+
|
|
1120
|
+
# Build task body - optimized for token efficiency
|
|
1121
|
+
# Start with just title and description
|
|
1122
|
+
task_body = f"# {issue['title']}\n\n"
|
|
1123
|
+
task_body += issue['body'] or "(No description)"
|
|
1124
|
+
|
|
1125
|
+
# Build tags - all metadata goes here for token efficiency
|
|
1126
|
+
tags = [
|
|
1127
|
+
'github-input',
|
|
1128
|
+
f'repo_{sanitize_tag(owner)}_{sanitize_tag(repo_name)}',
|
|
1129
|
+
f"author_{sanitize_tag(issue['user']['login'])}",
|
|
1130
|
+
f"issue_{issue_number}",
|
|
1131
|
+
f"state_{issue['state']}"
|
|
1132
|
+
]
|
|
1133
|
+
|
|
1134
|
+
for label in issue.get('labels', []):
|
|
1135
|
+
tags.append(f"label_{sanitize_tag(label['name'])}")
|
|
1136
|
+
|
|
1137
|
+
for assignee in issue.get('assignees', []):
|
|
1138
|
+
tags.append(f"assignee_{sanitize_tag(assignee['login'])}")
|
|
1139
|
+
|
|
1140
|
+
# Add tag_id as a tag
|
|
1141
|
+
tags.append(tag_id)
|
|
1142
|
+
|
|
1143
|
+
if dry_run:
|
|
1144
|
+
logger.info(f"[DRY RUN] Would create task with tag_id: {tag_id}")
|
|
1145
|
+
logger.debug(f"[DRY RUN] Body: {task_body[:200]}...")
|
|
1146
|
+
logger.debug(f"[DRY RUN] Tags: {', '.join(tags)}")
|
|
1147
|
+
return "dry-run-task-id"
|
|
1148
|
+
|
|
1149
|
+
try:
|
|
1150
|
+
# Execute kanban create command
|
|
1151
|
+
cmd = [kanban_script, 'create', task_body, '--tags', ','.join(tags)]
|
|
1152
|
+
logger.debug(f"Running: {' '.join(cmd[:3])}...")
|
|
1153
|
+
|
|
1154
|
+
result = subprocess.run(
|
|
1155
|
+
cmd,
|
|
1156
|
+
capture_output=True,
|
|
1157
|
+
text=True,
|
|
1158
|
+
timeout=30
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
if result.returncode == 0:
|
|
1162
|
+
# Parse output to get task ID
|
|
1163
|
+
try:
|
|
1164
|
+
output = json.loads(result.stdout)
|
|
1165
|
+
if isinstance(output, list) and len(output) > 0:
|
|
1166
|
+
task_id = output[0].get('id')
|
|
1167
|
+
logger.info(f"Created kanban task: {task_id} (tag_id: {tag_id})")
|
|
1168
|
+
return task_id
|
|
1169
|
+
except json.JSONDecodeError:
|
|
1170
|
+
logger.warning(f"Could not parse kanban output: {result.stdout[:200]}")
|
|
1171
|
+
return "unknown-task-id"
|
|
1172
|
+
else:
|
|
1173
|
+
logger.error(f"Failed to create kanban task: {result.stderr}")
|
|
1174
|
+
return None
|
|
1175
|
+
|
|
1176
|
+
except subprocess.TimeoutExpired:
|
|
1177
|
+
logger.error("Kanban command timed out")
|
|
1178
|
+
return None
|
|
1179
|
+
except Exception as e:
|
|
1180
|
+
logger.error(f"Error creating kanban task: {e}")
|
|
1181
|
+
return None
|
|
1182
|
+
|
|
1183
|
+
|
|
1184
|
+
def get_completed_tasks_with_responses(
|
|
1185
|
+
kanban_script: str,
|
|
1186
|
+
tag_filter: Optional[str] = None,
|
|
1187
|
+
limit: int = 10000
|
|
1188
|
+
) -> List[Dict[str, Any]]:
|
|
1189
|
+
"""
|
|
1190
|
+
Get kanban tasks with agent responses.
|
|
1191
|
+
|
|
1192
|
+
Args:
|
|
1193
|
+
kanban_script: Path to kanban.sh script
|
|
1194
|
+
tag_filter: Optional tag to filter by
|
|
1195
|
+
limit: Maximum number of tasks to retrieve
|
|
1196
|
+
|
|
1197
|
+
Returns:
|
|
1198
|
+
List of task dicts with non-empty agent_response
|
|
1199
|
+
"""
|
|
1200
|
+
cmd = [kanban_script, 'list', '--limit', str(limit)]
|
|
1201
|
+
|
|
1202
|
+
if tag_filter:
|
|
1203
|
+
cmd.extend(['--tag', tag_filter])
|
|
1204
|
+
|
|
1205
|
+
logger.debug(f"Running: {' '.join(cmd)}")
|
|
1206
|
+
|
|
1207
|
+
try:
|
|
1208
|
+
result = subprocess.run(
|
|
1209
|
+
cmd,
|
|
1210
|
+
capture_output=True,
|
|
1211
|
+
text=True,
|
|
1212
|
+
timeout=30
|
|
1213
|
+
)
|
|
1214
|
+
|
|
1215
|
+
if result.returncode != 0:
|
|
1216
|
+
logger.error(f"Kanban command failed: {result.stderr}")
|
|
1217
|
+
return []
|
|
1218
|
+
|
|
1219
|
+
try:
|
|
1220
|
+
tasks = json.loads(result.stdout)
|
|
1221
|
+
if isinstance(tasks, list):
|
|
1222
|
+
# Filter to tasks with non-empty agent_response
|
|
1223
|
+
return [t for t in tasks if t.get('agent_response') and t['agent_response'] != 'null']
|
|
1224
|
+
logger.warning(f"Unexpected kanban output format: {type(tasks)}")
|
|
1225
|
+
return []
|
|
1226
|
+
except json.JSONDecodeError as e:
|
|
1227
|
+
logger.error(f"Failed to parse kanban output: {e}")
|
|
1228
|
+
return []
|
|
1229
|
+
|
|
1230
|
+
except subprocess.TimeoutExpired:
|
|
1231
|
+
logger.error("Kanban command timed out")
|
|
1232
|
+
return []
|
|
1233
|
+
except Exception as e:
|
|
1234
|
+
logger.error(f"Error running kanban command: {e}")
|
|
1235
|
+
return []
|
|
1236
|
+
|
|
1237
|
+
|
|
1238
|
+
def get_all_kanban_tasks(
|
|
1239
|
+
kanban_script: str,
|
|
1240
|
+
tag_filter: Optional[str] = None,
|
|
1241
|
+
status_filter: Optional[List[str]] = None,
|
|
1242
|
+
limit: int = 10000
|
|
1243
|
+
) -> List[Dict[str, Any]]:
|
|
1244
|
+
"""
|
|
1245
|
+
Get all kanban tasks.
|
|
1246
|
+
|
|
1247
|
+
Args:
|
|
1248
|
+
kanban_script: Path to kanban.sh script
|
|
1249
|
+
tag_filter: Optional tag to filter by
|
|
1250
|
+
status_filter: Optional list of statuses to filter by
|
|
1251
|
+
limit: Maximum number of tasks to retrieve
|
|
1252
|
+
|
|
1253
|
+
Returns:
|
|
1254
|
+
List of task dicts
|
|
1255
|
+
"""
|
|
1256
|
+
cmd = [kanban_script, 'list', '--limit', str(limit)]
|
|
1257
|
+
|
|
1258
|
+
if tag_filter:
|
|
1259
|
+
cmd.extend(['--tag', tag_filter])
|
|
1260
|
+
|
|
1261
|
+
if status_filter:
|
|
1262
|
+
cmd.extend(['--status'] + status_filter)
|
|
1263
|
+
|
|
1264
|
+
logger.debug(f"Running: {' '.join(cmd)}")
|
|
1265
|
+
|
|
1266
|
+
try:
|
|
1267
|
+
result = subprocess.run(
|
|
1268
|
+
cmd,
|
|
1269
|
+
capture_output=True,
|
|
1270
|
+
text=True,
|
|
1271
|
+
timeout=30
|
|
1272
|
+
)
|
|
1273
|
+
|
|
1274
|
+
if result.returncode != 0:
|
|
1275
|
+
logger.error(f"Kanban command failed: {result.stderr}")
|
|
1276
|
+
return []
|
|
1277
|
+
|
|
1278
|
+
try:
|
|
1279
|
+
tasks = json.loads(result.stdout)
|
|
1280
|
+
if isinstance(tasks, list):
|
|
1281
|
+
return tasks
|
|
1282
|
+
logger.warning(f"Unexpected kanban output format: {type(tasks)}")
|
|
1283
|
+
return []
|
|
1284
|
+
except json.JSONDecodeError as e:
|
|
1285
|
+
logger.error(f"Failed to parse kanban output: {e}")
|
|
1286
|
+
return []
|
|
1287
|
+
|
|
1288
|
+
except subprocess.TimeoutExpired:
|
|
1289
|
+
logger.error("Kanban command timed out")
|
|
1290
|
+
return []
|
|
1291
|
+
except Exception as e:
|
|
1292
|
+
logger.error(f"Error running kanban command: {e}")
|
|
1293
|
+
return []
|
|
1294
|
+
|
|
1295
|
+
|
|
1296
|
+
def add_tag_to_kanban_task(kanban_script: str, task_id: str, tag: str) -> bool:
|
|
1297
|
+
"""
|
|
1298
|
+
Add a tag to a kanban task.
|
|
1299
|
+
|
|
1300
|
+
Args:
|
|
1301
|
+
kanban_script: Path to kanban.sh script
|
|
1302
|
+
task_id: Task ID
|
|
1303
|
+
tag: Tag to add
|
|
1304
|
+
|
|
1305
|
+
Returns:
|
|
1306
|
+
True if successful, False otherwise
|
|
1307
|
+
"""
|
|
1308
|
+
cmd = [kanban_script, 'update', task_id, '--tags', tag]
|
|
1309
|
+
|
|
1310
|
+
logger.debug(f"Running: {' '.join(cmd)}")
|
|
1311
|
+
|
|
1312
|
+
try:
|
|
1313
|
+
result = subprocess.run(
|
|
1314
|
+
cmd,
|
|
1315
|
+
capture_output=True,
|
|
1316
|
+
text=True,
|
|
1317
|
+
timeout=10
|
|
1318
|
+
)
|
|
1319
|
+
|
|
1320
|
+
if result.returncode != 0:
|
|
1321
|
+
logger.error(f"Failed to tag task {task_id}: {result.stderr}")
|
|
1322
|
+
return False
|
|
1323
|
+
|
|
1324
|
+
return True
|
|
1325
|
+
|
|
1326
|
+
except subprocess.TimeoutExpired:
|
|
1327
|
+
logger.error(f"Tag command timed out for task {task_id}")
|
|
1328
|
+
return False
|
|
1329
|
+
except Exception as e:
|
|
1330
|
+
logger.error(f"Error tagging task {task_id}: {e}")
|
|
1331
|
+
return False
|
|
1332
|
+
|
|
1333
|
+
|
|
1334
|
+
def create_kanban_task_from_comment(
|
|
1335
|
+
comment: Dict[str, Any],
|
|
1336
|
+
issue: Dict[str, Any],
|
|
1337
|
+
repo: str,
|
|
1338
|
+
kanban_script: str,
|
|
1339
|
+
related_task_ids: List[str],
|
|
1340
|
+
dry_run: bool = False
|
|
1341
|
+
) -> Optional[str]:
|
|
1342
|
+
"""
|
|
1343
|
+
Create kanban task from a GitHub issue comment (user reply).
|
|
1344
|
+
|
|
1345
|
+
Args:
|
|
1346
|
+
comment: GitHub comment dict
|
|
1347
|
+
issue: GitHub issue dict (parent issue)
|
|
1348
|
+
repo: Repository in format "owner/repo"
|
|
1349
|
+
kanban_script: Path to kanban.sh script
|
|
1350
|
+
related_task_ids: List of previous task IDs from the thread
|
|
1351
|
+
dry_run: If True, don't actually create the task
|
|
1352
|
+
|
|
1353
|
+
Returns:
|
|
1354
|
+
Task ID if created, None if failed
|
|
1355
|
+
"""
|
|
1356
|
+
owner, repo_name = repo.split('/')
|
|
1357
|
+
issue_number = issue['number']
|
|
1358
|
+
comment_id = comment['id']
|
|
1359
|
+
|
|
1360
|
+
# Generate tag_id for the comment (different from issue tag_id)
|
|
1361
|
+
# Format: ghc_{comment_id} - shortened to stay under 50 char kanban tag limit
|
|
1362
|
+
# Comment IDs are globally unique across GitHub, so no need for repo/issue in tag
|
|
1363
|
+
comment_tag_id = f"ghc_{comment_id}"
|
|
1364
|
+
|
|
1365
|
+
# Build task body
|
|
1366
|
+
# Start with the reply content
|
|
1367
|
+
task_body = f"# Reply to Issue #{issue_number}: {issue['title']}\n\n"
|
|
1368
|
+
task_body += comment['body'] or "(No content)"
|
|
1369
|
+
|
|
1370
|
+
# Add previous task_id references if any
|
|
1371
|
+
if related_task_ids:
|
|
1372
|
+
task_body += f"\n\n[task_id]{','.join(related_task_ids)}[/task_id]"
|
|
1373
|
+
|
|
1374
|
+
# Build tags
|
|
1375
|
+
tags = [
|
|
1376
|
+
'github-input',
|
|
1377
|
+
'github-reply', # Mark as a reply specifically
|
|
1378
|
+
f'repo_{sanitize_tag(owner)}_{sanitize_tag(repo_name)}',
|
|
1379
|
+
f"author_{sanitize_tag(comment['user']['login'])}",
|
|
1380
|
+
f"issue_{issue_number}",
|
|
1381
|
+
f"comment_{comment_id}",
|
|
1382
|
+
]
|
|
1383
|
+
|
|
1384
|
+
# Add the issue's tag_id as a related tag (for O(1) lookup)
|
|
1385
|
+
issue_tag_id = GitHubStateManager._make_tag_id(issue_number, repo)
|
|
1386
|
+
tags.append(f"parent_{issue_tag_id}")
|
|
1387
|
+
|
|
1388
|
+
# Add comment tag_id
|
|
1389
|
+
tags.append(comment_tag_id)
|
|
1390
|
+
|
|
1391
|
+
if dry_run:
|
|
1392
|
+
logger.info(f"[DRY RUN] Would create task for comment #{comment_id} on issue #{issue_number}")
|
|
1393
|
+
logger.debug(f"[DRY RUN] Body: {task_body[:200]}...")
|
|
1394
|
+
logger.debug(f"[DRY RUN] Tags: {', '.join(tags)}")
|
|
1395
|
+
logger.debug(f"[DRY RUN] Related task IDs: {related_task_ids}")
|
|
1396
|
+
return "dry-run-task-id"
|
|
1397
|
+
|
|
1398
|
+
try:
|
|
1399
|
+
# Execute kanban create command
|
|
1400
|
+
cmd = [kanban_script, 'create', task_body, '--tags', ','.join(tags)]
|
|
1401
|
+
logger.debug(f"Running: {' '.join(cmd[:3])}...")
|
|
1402
|
+
|
|
1403
|
+
result = subprocess.run(
|
|
1404
|
+
cmd,
|
|
1405
|
+
capture_output=True,
|
|
1406
|
+
text=True,
|
|
1407
|
+
timeout=30
|
|
1408
|
+
)
|
|
1409
|
+
|
|
1410
|
+
if result.returncode == 0:
|
|
1411
|
+
# Parse output to get task ID
|
|
1412
|
+
try:
|
|
1413
|
+
output = json.loads(result.stdout)
|
|
1414
|
+
if isinstance(output, list) and len(output) > 0:
|
|
1415
|
+
task_id = output[0].get('id')
|
|
1416
|
+
logger.info(f"Created kanban task from comment: {task_id} (comment_id: {comment_id})")
|
|
1417
|
+
return task_id
|
|
1418
|
+
except json.JSONDecodeError:
|
|
1419
|
+
logger.warning(f"Could not parse kanban output: {result.stdout[:200]}")
|
|
1420
|
+
return "unknown-task-id"
|
|
1421
|
+
else:
|
|
1422
|
+
logger.error(f"Failed to create kanban task from comment: {result.stderr}")
|
|
1423
|
+
return None
|
|
1424
|
+
|
|
1425
|
+
except subprocess.TimeoutExpired:
|
|
1426
|
+
logger.error("Kanban command timed out")
|
|
1427
|
+
return None
|
|
1428
|
+
except Exception as e:
|
|
1429
|
+
logger.error(f"Error creating kanban task from comment: {e}")
|
|
1430
|
+
return None
|
|
1431
|
+
|
|
1432
|
+
|
|
1433
|
+
def process_issue_comments(
|
|
1434
|
+
client: 'GitHubClient',
|
|
1435
|
+
issue: Dict[str, Any],
|
|
1436
|
+
repo: str,
|
|
1437
|
+
kanban_script: str,
|
|
1438
|
+
comment_state_mgr: 'CommentStateManager',
|
|
1439
|
+
dry_run: bool = False
|
|
1440
|
+
) -> Tuple[int, int]:
|
|
1441
|
+
"""
|
|
1442
|
+
Process comments on an issue, creating kanban tasks for user replies.
|
|
1443
|
+
|
|
1444
|
+
This detects:
|
|
1445
|
+
1. Agent comments (identified by **[task_id]...[/task_id]** format)
|
|
1446
|
+
2. User replies (any comment that is NOT an agent comment)
|
|
1447
|
+
|
|
1448
|
+
For user replies, creates a kanban task with references to previous task_ids.
|
|
1449
|
+
|
|
1450
|
+
Args:
|
|
1451
|
+
client: GitHub API client
|
|
1452
|
+
issue: GitHub issue dict
|
|
1453
|
+
repo: Repository in format "owner/repo"
|
|
1454
|
+
kanban_script: Path to kanban.sh script
|
|
1455
|
+
comment_state_mgr: CommentStateManager for tracking processed comments
|
|
1456
|
+
dry_run: If True, don't actually create tasks
|
|
1457
|
+
|
|
1458
|
+
Returns:
|
|
1459
|
+
Tuple of (processed_count, created_count)
|
|
1460
|
+
"""
|
|
1461
|
+
owner, repo_name = repo.split('/')
|
|
1462
|
+
issue_number = issue['number']
|
|
1463
|
+
|
|
1464
|
+
# Fetch all comments for this issue
|
|
1465
|
+
comments = client.list_issue_comments(owner, repo_name, issue_number)
|
|
1466
|
+
|
|
1467
|
+
if not comments:
|
|
1468
|
+
logger.debug(f"Issue #{issue_number}: No comments")
|
|
1469
|
+
return 0, 0
|
|
1470
|
+
|
|
1471
|
+
logger.debug(f"Issue #{issue_number}: Found {len(comments)} comments")
|
|
1472
|
+
|
|
1473
|
+
processed = 0
|
|
1474
|
+
created = 0
|
|
1475
|
+
|
|
1476
|
+
# Collect all task_ids from the thread for context
|
|
1477
|
+
all_task_ids = collect_task_ids_from_thread(issue.get('body', ''), comments)
|
|
1478
|
+
|
|
1479
|
+
for comment in comments:
|
|
1480
|
+
comment_id = comment['id']
|
|
1481
|
+
comment_body = comment.get('body', '')
|
|
1482
|
+
comment_author = comment['user']['login']
|
|
1483
|
+
|
|
1484
|
+
# Skip already processed comments
|
|
1485
|
+
if comment_state_mgr.is_comment_processed(comment_id):
|
|
1486
|
+
logger.debug(f" Comment #{comment_id}: Already processed, skipping")
|
|
1487
|
+
continue
|
|
1488
|
+
|
|
1489
|
+
# Skip agent comments (our own responses)
|
|
1490
|
+
if is_agent_comment(comment_body):
|
|
1491
|
+
logger.debug(f" Comment #{comment_id}: Agent comment, skipping")
|
|
1492
|
+
# Still mark as processed to avoid re-checking
|
|
1493
|
+
if not dry_run:
|
|
1494
|
+
comment_state_mgr.mark_comment_processed(
|
|
1495
|
+
comment_id, issue_number, repo, "agent-comment", []
|
|
1496
|
+
)
|
|
1497
|
+
continue
|
|
1498
|
+
|
|
1499
|
+
# This is a user reply - create a kanban task
|
|
1500
|
+
logger.info(f" Comment #{comment_id} (@{comment_author}): User reply detected")
|
|
1501
|
+
processed += 1
|
|
1502
|
+
|
|
1503
|
+
task_id = create_kanban_task_from_comment(
|
|
1504
|
+
comment, issue, repo, kanban_script, all_task_ids, dry_run
|
|
1505
|
+
)
|
|
1506
|
+
|
|
1507
|
+
if task_id:
|
|
1508
|
+
if not dry_run:
|
|
1509
|
+
comment_state_mgr.mark_comment_processed(
|
|
1510
|
+
comment_id, issue_number, repo, task_id, all_task_ids
|
|
1511
|
+
)
|
|
1512
|
+
logger.info(f" ✓ Created kanban task: {task_id}")
|
|
1513
|
+
if all_task_ids:
|
|
1514
|
+
logger.info(f" ✓ Linked to previous task(s): {', '.join(all_task_ids)}")
|
|
1515
|
+
created += 1
|
|
1516
|
+
else:
|
|
1517
|
+
logger.warning(f" ✗ Failed to create task for comment #{comment_id}")
|
|
1518
|
+
|
|
1519
|
+
return processed, created
|
|
1520
|
+
|
|
1521
|
+
|
|
1522
|
+
# =============================================================================
|
|
1523
|
+
# Command Handlers
|
|
1524
|
+
# =============================================================================
|
|
1525
|
+
|
|
1526
|
+
def handle_fetch(args: argparse.Namespace) -> int:
|
|
1527
|
+
"""Handle 'fetch' subcommand."""
|
|
1528
|
+
logger.info("=" * 70)
|
|
1529
|
+
logger.info("GitHub Fetch - Creating kanban tasks from GitHub issues")
|
|
1530
|
+
logger.info("=" * 70)
|
|
1531
|
+
|
|
1532
|
+
# Validate environment
|
|
1533
|
+
token, default_repo, errors = validate_github_environment()
|
|
1534
|
+
if errors:
|
|
1535
|
+
for error in errors:
|
|
1536
|
+
logger.error(error)
|
|
1537
|
+
print_env_help()
|
|
1538
|
+
return 1
|
|
1539
|
+
|
|
1540
|
+
repo = args.repo or default_repo
|
|
1541
|
+
if not repo:
|
|
1542
|
+
logger.error("No repository specified. Use --repo or set GITHUB_REPO")
|
|
1543
|
+
return 1
|
|
1544
|
+
|
|
1545
|
+
if not validate_repo_format(repo):
|
|
1546
|
+
return 1
|
|
1547
|
+
|
|
1548
|
+
owner, repo_name = repo.split('/')
|
|
1549
|
+
|
|
1550
|
+
# Find project root and kanban script
|
|
1551
|
+
project_dir = Path.cwd()
|
|
1552
|
+
kanban_script = find_kanban_script(project_dir)
|
|
1553
|
+
if not kanban_script:
|
|
1554
|
+
logger.error("Cannot find kanban.sh script. Is the project initialized?")
|
|
1555
|
+
return 1
|
|
1556
|
+
|
|
1557
|
+
# Initialize GitHub client
|
|
1558
|
+
logger.info("Initializing GitHub client...")
|
|
1559
|
+
api_url = os.getenv('GITHUB_API_URL', 'https://api.github.com')
|
|
1560
|
+
client = GitHubClient(token, api_url)
|
|
1561
|
+
|
|
1562
|
+
# Test connection
|
|
1563
|
+
try:
|
|
1564
|
+
user_info = client.test_connection()
|
|
1565
|
+
logger.info(f"Connected to GitHub API (user: {user_info['login']})")
|
|
1566
|
+
except requests.exceptions.HTTPError as e:
|
|
1567
|
+
error_msg = f"Failed to connect to GitHub: {e}"
|
|
1568
|
+
logger.error(error_msg)
|
|
1569
|
+
print(f"\n❌ ERROR: {error_msg}", file=sys.stderr)
|
|
1570
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
1571
|
+
try:
|
|
1572
|
+
error_detail = e.response.json()
|
|
1573
|
+
print(f" Details: {error_detail.get('message', 'No details available')}", file=sys.stderr)
|
|
1574
|
+
except:
|
|
1575
|
+
print(f" HTTP Status: {e.response.status_code}", file=sys.stderr)
|
|
1576
|
+
print(" Check your GITHUB_TOKEN permissions and validity", file=sys.stderr)
|
|
1577
|
+
return 1
|
|
1578
|
+
|
|
1579
|
+
# Initialize state manager
|
|
1580
|
+
state_dir = project_dir / '.juno_task' / 'github'
|
|
1581
|
+
state_file = state_dir / 'state.ndjson'
|
|
1582
|
+
logger.info(f"Initializing state manager: {state_file}")
|
|
1583
|
+
state_mgr = GitHubStateManager(str(state_file))
|
|
1584
|
+
|
|
1585
|
+
# Initialize comment state manager for tracking processed comments/replies
|
|
1586
|
+
comment_state_file = state_dir / 'comments.ndjson'
|
|
1587
|
+
logger.info(f"Initializing comment state manager: {comment_state_file}")
|
|
1588
|
+
comment_state_mgr = CommentStateManager(str(comment_state_file))
|
|
1589
|
+
|
|
1590
|
+
# Determine --since for incremental fetch
|
|
1591
|
+
since = args.since or state_mgr.get_last_update_timestamp(repo)
|
|
1592
|
+
|
|
1593
|
+
# Check if we should include comments
|
|
1594
|
+
include_comments = getattr(args, 'include_comments', True) # Default to True
|
|
1595
|
+
|
|
1596
|
+
if args.dry_run:
|
|
1597
|
+
logger.info("Running in DRY RUN mode - no tasks will be created")
|
|
1598
|
+
|
|
1599
|
+
logger.info(f"Monitoring repository: {repo}")
|
|
1600
|
+
logger.info(f"Filters: labels={args.labels or 'None'} assignee={args.assignee or 'None'} state={args.state}")
|
|
1601
|
+
logger.info(f"Include comments/replies: {include_comments}")
|
|
1602
|
+
logger.info(f"Mode: {'once' if args.once else 'continuous'}")
|
|
1603
|
+
if since:
|
|
1604
|
+
logger.info(f"Incremental sync since: {since}")
|
|
1605
|
+
logger.info("-" * 70)
|
|
1606
|
+
|
|
1607
|
+
# Get check interval
|
|
1608
|
+
check_interval = args.interval or int(os.getenv('CHECK_INTERVAL_SECONDS', 300))
|
|
1609
|
+
|
|
1610
|
+
# Register signal handlers
|
|
1611
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
1612
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
|
1613
|
+
|
|
1614
|
+
# Main loop
|
|
1615
|
+
iteration = 0
|
|
1616
|
+
total_processed = 0
|
|
1617
|
+
total_comments_processed = 0
|
|
1618
|
+
total_replies_created = 0
|
|
1619
|
+
|
|
1620
|
+
while not shutdown_requested:
|
|
1621
|
+
iteration += 1
|
|
1622
|
+
logger.debug(f"Starting iteration {iteration}")
|
|
1623
|
+
|
|
1624
|
+
try:
|
|
1625
|
+
# Fetch issues
|
|
1626
|
+
labels = args.labels.split(',') if args.labels else None
|
|
1627
|
+
issues = client.list_issues(
|
|
1628
|
+
owner,
|
|
1629
|
+
repo_name,
|
|
1630
|
+
state=args.state,
|
|
1631
|
+
labels=labels,
|
|
1632
|
+
assignee=args.assignee,
|
|
1633
|
+
since=since
|
|
1634
|
+
)
|
|
1635
|
+
|
|
1636
|
+
# Filter already processed issues (but we still need to check their comments)
|
|
1637
|
+
new_issues = [i for i in issues if not state_mgr.is_processed(i['number'], repo)]
|
|
1638
|
+
|
|
1639
|
+
if new_issues:
|
|
1640
|
+
logger.info(f"Processing {len(new_issues)} new issues...")
|
|
1641
|
+
|
|
1642
|
+
for issue in new_issues:
|
|
1643
|
+
logger.info(f" Issue #{issue['number']} (@{issue['user']['login']}): {issue['title']}")
|
|
1644
|
+
|
|
1645
|
+
task_id = create_kanban_task_from_issue(issue, repo, kanban_script, args.dry_run)
|
|
1646
|
+
|
|
1647
|
+
if task_id:
|
|
1648
|
+
if not args.dry_run:
|
|
1649
|
+
state_mgr.mark_processed({
|
|
1650
|
+
'issue_number': issue['number'],
|
|
1651
|
+
'repo': repo,
|
|
1652
|
+
'title': issue['title'],
|
|
1653
|
+
'body': issue['body'],
|
|
1654
|
+
'author': issue['user']['login'],
|
|
1655
|
+
'author_id': issue['user']['id'],
|
|
1656
|
+
'labels': [l['name'] for l in issue.get('labels', [])],
|
|
1657
|
+
'assignees': [a['login'] for a in issue.get('assignees', [])],
|
|
1658
|
+
'state': issue['state'],
|
|
1659
|
+
'created_at': issue['created_at'],
|
|
1660
|
+
'updated_at': issue['updated_at'],
|
|
1661
|
+
'issue_url': issue['url'],
|
|
1662
|
+
'issue_html_url': issue['html_url']
|
|
1663
|
+
}, task_id)
|
|
1664
|
+
|
|
1665
|
+
logger.info(f" ✓ Created kanban task: {task_id}")
|
|
1666
|
+
total_processed += 1
|
|
1667
|
+
else:
|
|
1668
|
+
logger.warning(f" ✗ Failed to create task for issue #{issue['number']}")
|
|
1669
|
+
else:
|
|
1670
|
+
logger.debug("No new issues")
|
|
1671
|
+
|
|
1672
|
+
# Process comments/replies on all issues (including already processed ones)
|
|
1673
|
+
# This handles the case where a user replies to a closed issue and reopens it
|
|
1674
|
+
if include_comments:
|
|
1675
|
+
logger.info("Checking for new comments/replies on issues...")
|
|
1676
|
+
|
|
1677
|
+
# Fetch ALL issues to check for new comments (state='all' for reopened issues)
|
|
1678
|
+
all_issues = client.list_issues(
|
|
1679
|
+
owner,
|
|
1680
|
+
repo_name,
|
|
1681
|
+
state='all', # Include closed and reopened issues
|
|
1682
|
+
labels=labels,
|
|
1683
|
+
assignee=args.assignee,
|
|
1684
|
+
since=since
|
|
1685
|
+
)
|
|
1686
|
+
|
|
1687
|
+
for issue in all_issues:
|
|
1688
|
+
# Process comments on this issue
|
|
1689
|
+
comments_processed, replies_created = process_issue_comments(
|
|
1690
|
+
client,
|
|
1691
|
+
issue,
|
|
1692
|
+
repo,
|
|
1693
|
+
kanban_script,
|
|
1694
|
+
comment_state_mgr,
|
|
1695
|
+
args.dry_run
|
|
1696
|
+
)
|
|
1697
|
+
|
|
1698
|
+
if replies_created > 0:
|
|
1699
|
+
logger.info(f"Issue #{issue['number']}: Created {replies_created} task(s) from user replies")
|
|
1700
|
+
|
|
1701
|
+
total_comments_processed += comments_processed
|
|
1702
|
+
total_replies_created += replies_created
|
|
1703
|
+
|
|
1704
|
+
# Exit if --once mode
|
|
1705
|
+
if args.once:
|
|
1706
|
+
logger.info("--once mode: exiting after single check")
|
|
1707
|
+
break
|
|
1708
|
+
|
|
1709
|
+
# Sleep
|
|
1710
|
+
if not shutdown_requested:
|
|
1711
|
+
logger.debug(f"Sleeping for {check_interval} seconds...")
|
|
1712
|
+
time.sleep(check_interval)
|
|
1713
|
+
|
|
1714
|
+
except KeyboardInterrupt:
|
|
1715
|
+
logger.info("Keyboard interrupt received")
|
|
1716
|
+
break
|
|
1717
|
+
except Exception as e:
|
|
1718
|
+
logger.error(f"Error in main loop: {e}", exc_info=True)
|
|
1719
|
+
if args.once:
|
|
1720
|
+
return 1
|
|
1721
|
+
time.sleep(check_interval)
|
|
1722
|
+
|
|
1723
|
+
# Shutdown
|
|
1724
|
+
logger.info("-" * 70)
|
|
1725
|
+
logger.info("Summary:")
|
|
1726
|
+
logger.info(f" New issues processed: {total_processed}")
|
|
1727
|
+
logger.info(f" User replies processed: {total_comments_processed}")
|
|
1728
|
+
logger.info(f" Reply tasks created: {total_replies_created}")
|
|
1729
|
+
logger.info(f" Total tracked issues: {state_mgr.get_issue_count()}")
|
|
1730
|
+
logger.info(f" Total tracked comments: {comment_state_mgr.get_processed_count()}")
|
|
1731
|
+
|
|
1732
|
+
return 0
|
|
1733
|
+
|
|
1734
|
+
|
|
1735
|
+
def handle_respond(args: argparse.Namespace) -> int:
|
|
1736
|
+
"""Handle 'respond' subcommand."""
|
|
1737
|
+
logger.info("=" * 70)
|
|
1738
|
+
logger.info("GitHub Respond - Posting agent responses to GitHub issues")
|
|
1739
|
+
logger.info("=" * 70)
|
|
1740
|
+
|
|
1741
|
+
# Validate environment
|
|
1742
|
+
token, default_repo, errors = validate_github_environment()
|
|
1743
|
+
if errors:
|
|
1744
|
+
for error in errors:
|
|
1745
|
+
logger.error(error)
|
|
1746
|
+
print_env_help()
|
|
1747
|
+
return 1
|
|
1748
|
+
|
|
1749
|
+
# Find project root and kanban script
|
|
1750
|
+
project_dir = Path.cwd()
|
|
1751
|
+
kanban_script = find_kanban_script(project_dir)
|
|
1752
|
+
if not kanban_script:
|
|
1753
|
+
logger.error("Cannot find kanban.sh script. Is the project initialized?")
|
|
1754
|
+
return 1
|
|
1755
|
+
|
|
1756
|
+
# Initialize GitHub client
|
|
1757
|
+
logger.info("Initializing GitHub client...")
|
|
1758
|
+
api_url = os.getenv('GITHUB_API_URL', 'https://api.github.com')
|
|
1759
|
+
client = GitHubClient(token, api_url)
|
|
1760
|
+
|
|
1761
|
+
# Test connection
|
|
1762
|
+
try:
|
|
1763
|
+
user_info = client.test_connection()
|
|
1764
|
+
logger.info(f"Connected to GitHub API (user: {user_info['login']})")
|
|
1765
|
+
except requests.exceptions.HTTPError as e:
|
|
1766
|
+
error_msg = f"Failed to connect to GitHub: {e}"
|
|
1767
|
+
logger.error(error_msg)
|
|
1768
|
+
print(f"\n❌ ERROR: {error_msg}", file=sys.stderr)
|
|
1769
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
1770
|
+
try:
|
|
1771
|
+
error_detail = e.response.json()
|
|
1772
|
+
print(f" Details: {error_detail.get('message', 'No details available')}", file=sys.stderr)
|
|
1773
|
+
except:
|
|
1774
|
+
print(f" HTTP Status: {e.response.status_code}", file=sys.stderr)
|
|
1775
|
+
print(" Check your GITHUB_TOKEN permissions and validity", file=sys.stderr)
|
|
1776
|
+
return 1
|
|
1777
|
+
|
|
1778
|
+
# Initialize state managers
|
|
1779
|
+
state_dir = project_dir / '.juno_task' / 'github'
|
|
1780
|
+
state_file = state_dir / 'state.ndjson'
|
|
1781
|
+
response_state_file = state_dir / 'responses.ndjson'
|
|
1782
|
+
|
|
1783
|
+
logger.info(f"Loading GitHub issue state: {state_file}")
|
|
1784
|
+
state_mgr = GitHubStateManager(str(state_file))
|
|
1785
|
+
|
|
1786
|
+
logger.info(f"Loading response state: {response_state_file}")
|
|
1787
|
+
response_mgr = ResponseStateManager(str(response_state_file))
|
|
1788
|
+
|
|
1789
|
+
if args.reset_tracker:
|
|
1790
|
+
confirm = input("WARNING: This will reset the response tracker. Type 'yes' to confirm: ")
|
|
1791
|
+
if confirm.lower() == 'yes':
|
|
1792
|
+
response_mgr.reset_state()
|
|
1793
|
+
logger.info("Response tracker reset")
|
|
1794
|
+
else:
|
|
1795
|
+
logger.info("Reset cancelled")
|
|
1796
|
+
return 0
|
|
1797
|
+
|
|
1798
|
+
if args.dry_run:
|
|
1799
|
+
logger.info("Running in DRY RUN mode - no comments will be posted")
|
|
1800
|
+
|
|
1801
|
+
logger.info(f"Loaded {state_mgr.get_issue_count()} processed issues")
|
|
1802
|
+
logger.info(f"Loaded {response_mgr.get_sent_count()} responses already sent")
|
|
1803
|
+
logger.info("-" * 70)
|
|
1804
|
+
|
|
1805
|
+
# Get kanban tasks
|
|
1806
|
+
tasks = get_completed_tasks_with_responses(kanban_script, tag_filter=args.tag)
|
|
1807
|
+
logger.info(f"Found {len(tasks)} kanban tasks with responses")
|
|
1808
|
+
|
|
1809
|
+
# Process tasks
|
|
1810
|
+
total_tasks = 0
|
|
1811
|
+
matched_tasks = 0
|
|
1812
|
+
sent_responses = 0
|
|
1813
|
+
already_sent = 0
|
|
1814
|
+
errors_count = 0
|
|
1815
|
+
|
|
1816
|
+
for task in tasks:
|
|
1817
|
+
task_id = task.get('id')
|
|
1818
|
+
agent_response = task.get('agent_response', '')
|
|
1819
|
+
commit_hash = task.get('commit_hash', '')
|
|
1820
|
+
feature_tags = task.get('feature_tags', [])
|
|
1821
|
+
|
|
1822
|
+
total_tasks += 1
|
|
1823
|
+
|
|
1824
|
+
# Extract tag_id - handle both regular issues and reply tasks
|
|
1825
|
+
tag_id = None
|
|
1826
|
+
is_reply = is_reply_task(feature_tags)
|
|
1827
|
+
|
|
1828
|
+
if is_reply:
|
|
1829
|
+
# For reply tasks, extract parent issue tag
|
|
1830
|
+
tag_id = extract_parent_github_tag(feature_tags)
|
|
1831
|
+
if tag_id:
|
|
1832
|
+
logger.debug(f"Task {task_id}: Reply task, using parent tag {tag_id}")
|
|
1833
|
+
else:
|
|
1834
|
+
# For regular issue tasks, extract github_issue_* tag
|
|
1835
|
+
tag_id = extract_github_tag(feature_tags)
|
|
1836
|
+
|
|
1837
|
+
if not tag_id:
|
|
1838
|
+
logger.debug(f"Task {task_id}: No GitHub tag_id, skipping")
|
|
1839
|
+
continue
|
|
1840
|
+
|
|
1841
|
+
# Look up issue
|
|
1842
|
+
issue_data = state_mgr.get_issue_for_task(tag_id)
|
|
1843
|
+
if not issue_data:
|
|
1844
|
+
logger.debug(f"Task {task_id}: No issue found for tag_id {tag_id}")
|
|
1845
|
+
continue
|
|
1846
|
+
|
|
1847
|
+
matched_tasks += 1
|
|
1848
|
+
|
|
1849
|
+
issue_number = issue_data['issue_number']
|
|
1850
|
+
repo = issue_data['repo']
|
|
1851
|
+
author = issue_data.get('author', 'unknown')
|
|
1852
|
+
|
|
1853
|
+
logger.debug(f"Task {task_id}: Found GitHub issue #{issue_number} (@{author})")
|
|
1854
|
+
|
|
1855
|
+
# Check if already sent
|
|
1856
|
+
if response_mgr.was_response_sent(task_id, tag_id):
|
|
1857
|
+
logger.info(f"Task {task_id}: Already sent response to issue #{issue_number} (skipping)")
|
|
1858
|
+
already_sent += 1
|
|
1859
|
+
continue
|
|
1860
|
+
|
|
1861
|
+
# Send response
|
|
1862
|
+
logger.info(f"Task {task_id}: Sending response to issue #{issue_number}")
|
|
1863
|
+
|
|
1864
|
+
# Format comment body
|
|
1865
|
+
comment_body = f"**[task_id]{task_id}[/task_id]**\n\n{agent_response}"
|
|
1866
|
+
|
|
1867
|
+
# Add commit hash if available
|
|
1868
|
+
if commit_hash:
|
|
1869
|
+
comment_body += f"\n\n**Commit:** {commit_hash}"
|
|
1870
|
+
|
|
1871
|
+
if args.dry_run:
|
|
1872
|
+
logger.info(f" [DRY RUN] Would post comment on issue #{issue_number}")
|
|
1873
|
+
logger.info(f" [DRY RUN] Would close issue #{issue_number}")
|
|
1874
|
+
logger.debug(f" [DRY RUN] Comment: {comment_body[:200]}...")
|
|
1875
|
+
sent_responses += 1
|
|
1876
|
+
continue
|
|
1877
|
+
|
|
1878
|
+
try:
|
|
1879
|
+
owner, repo_name = repo.split('/')
|
|
1880
|
+
|
|
1881
|
+
# Debug output to help troubleshoot
|
|
1882
|
+
logger.debug(f"Posting comment to {owner}/{repo_name} issue #{issue_number}")
|
|
1883
|
+
logger.debug(f"Comment preview: {comment_body[:100]}...")
|
|
1884
|
+
|
|
1885
|
+
# Post comment
|
|
1886
|
+
comment = client.post_comment(owner, repo_name, issue_number, comment_body)
|
|
1887
|
+
logger.info(f" ✓ Posted comment on issue #{issue_number}")
|
|
1888
|
+
|
|
1889
|
+
# Close the issue
|
|
1890
|
+
try:
|
|
1891
|
+
client.close_issue(owner, repo_name, issue_number)
|
|
1892
|
+
logger.info(f" ✓ Closed issue #{issue_number}")
|
|
1893
|
+
except requests.exceptions.HTTPError as e:
|
|
1894
|
+
warning_msg = f" ⚠ Failed to close issue #{issue_number}: {e}"
|
|
1895
|
+
logger.warning(warning_msg)
|
|
1896
|
+
print(f"\n{warning_msg}", file=sys.stderr)
|
|
1897
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
1898
|
+
try:
|
|
1899
|
+
error_detail = e.response.json()
|
|
1900
|
+
detail_msg = f" Details: {error_detail.get('message', 'No details available')}"
|
|
1901
|
+
logger.warning(detail_msg)
|
|
1902
|
+
print(detail_msg, file=sys.stderr)
|
|
1903
|
+
except:
|
|
1904
|
+
status_msg = f" HTTP Status: {e.response.status_code}"
|
|
1905
|
+
logger.warning(status_msg)
|
|
1906
|
+
print(status_msg, file=sys.stderr)
|
|
1907
|
+
print(" Note: Comment was posted successfully, but couldn't close the issue", file=sys.stderr)
|
|
1908
|
+
print(" Check that GITHUB_TOKEN has 'repo' scope with write permissions", file=sys.stderr)
|
|
1909
|
+
# Continue anyway - comment was posted successfully
|
|
1910
|
+
|
|
1911
|
+
# Record response
|
|
1912
|
+
response_mgr.record_sent(
|
|
1913
|
+
task_id,
|
|
1914
|
+
tag_id,
|
|
1915
|
+
issue_number,
|
|
1916
|
+
repo,
|
|
1917
|
+
comment['id'],
|
|
1918
|
+
comment['html_url']
|
|
1919
|
+
)
|
|
1920
|
+
|
|
1921
|
+
sent_responses += 1
|
|
1922
|
+
|
|
1923
|
+
except requests.exceptions.HTTPError as e:
|
|
1924
|
+
errors_count += 1
|
|
1925
|
+
error_msg = f" ✗ Failed to post comment on issue #{issue_number}: {e}"
|
|
1926
|
+
logger.error(error_msg)
|
|
1927
|
+
print(f"\n{error_msg}", file=sys.stderr)
|
|
1928
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
1929
|
+
try:
|
|
1930
|
+
error_detail = e.response.json()
|
|
1931
|
+
detail_msg = f" Details: {error_detail.get('message', 'No details available')}"
|
|
1932
|
+
logger.error(detail_msg)
|
|
1933
|
+
print(detail_msg, file=sys.stderr)
|
|
1934
|
+
except:
|
|
1935
|
+
status_msg = f" HTTP Status: {e.response.status_code}"
|
|
1936
|
+
logger.error(status_msg)
|
|
1937
|
+
print(status_msg, file=sys.stderr)
|
|
1938
|
+
print(" Common causes:", file=sys.stderr)
|
|
1939
|
+
print(" - Missing 'repo' or 'issues' scope in GITHUB_TOKEN", file=sys.stderr)
|
|
1940
|
+
print(" - Token doesn't have write access to the repository", file=sys.stderr)
|
|
1941
|
+
print(" - Token is expired or revoked", file=sys.stderr)
|
|
1942
|
+
|
|
1943
|
+
# Summary
|
|
1944
|
+
logger.info("")
|
|
1945
|
+
logger.info("=" * 70)
|
|
1946
|
+
logger.info("Summary:")
|
|
1947
|
+
logger.info(f" Total tasks processed: {total_tasks}")
|
|
1948
|
+
logger.info(f" Tasks matched with GitHub issues: {matched_tasks}")
|
|
1949
|
+
logger.info(f" Comments posted: {sent_responses}")
|
|
1950
|
+
logger.info(f" Already sent (skipped): {already_sent}")
|
|
1951
|
+
if errors_count > 0:
|
|
1952
|
+
logger.error(f" Errors: {errors_count}")
|
|
1953
|
+
|
|
1954
|
+
if args.dry_run:
|
|
1955
|
+
logger.info("(Dry run mode - no comments were actually posted)")
|
|
1956
|
+
|
|
1957
|
+
return 0 if errors_count == 0 else 1
|
|
1958
|
+
|
|
1959
|
+
|
|
1960
|
+
def handle_sync(args: argparse.Namespace) -> int:
|
|
1961
|
+
"""Handle 'sync' subcommand (fetch + respond)."""
|
|
1962
|
+
logger.info("=" * 70)
|
|
1963
|
+
logger.info("GitHub Sync - Fetch issues AND respond to completed tasks")
|
|
1964
|
+
logger.info("=" * 70)
|
|
1965
|
+
|
|
1966
|
+
# Get check interval
|
|
1967
|
+
check_interval = args.interval or int(os.getenv('CHECK_INTERVAL_SECONDS', 600))
|
|
1968
|
+
|
|
1969
|
+
# Register signal handlers
|
|
1970
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
1971
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
|
1972
|
+
|
|
1973
|
+
iteration = 0
|
|
1974
|
+
|
|
1975
|
+
while not shutdown_requested:
|
|
1976
|
+
iteration += 1
|
|
1977
|
+
logger.info(f"Starting sync iteration {iteration}...")
|
|
1978
|
+
|
|
1979
|
+
try:
|
|
1980
|
+
# Run fetch
|
|
1981
|
+
logger.info("Phase 1: Fetching new issues...")
|
|
1982
|
+
fetch_result = handle_fetch(args)
|
|
1983
|
+
if fetch_result != 0:
|
|
1984
|
+
logger.error("Fetch phase failed")
|
|
1985
|
+
if args.once:
|
|
1986
|
+
return fetch_result
|
|
1987
|
+
|
|
1988
|
+
# Run respond
|
|
1989
|
+
logger.info("Phase 2: Responding to completed tasks...")
|
|
1990
|
+
respond_result = handle_respond(args)
|
|
1991
|
+
if respond_result != 0:
|
|
1992
|
+
logger.error("Respond phase failed")
|
|
1993
|
+
if args.once:
|
|
1994
|
+
return respond_result
|
|
1995
|
+
|
|
1996
|
+
logger.info(f"Sync iteration {iteration} completed successfully")
|
|
1997
|
+
|
|
1998
|
+
# Exit if --once mode
|
|
1999
|
+
if args.once:
|
|
2000
|
+
logger.info("--once mode: exiting after single sync")
|
|
2001
|
+
break
|
|
2002
|
+
|
|
2003
|
+
# Sleep
|
|
2004
|
+
if not shutdown_requested:
|
|
2005
|
+
logger.info(f"Sleeping for {check_interval} seconds before next sync...")
|
|
2006
|
+
time.sleep(check_interval)
|
|
2007
|
+
|
|
2008
|
+
except KeyboardInterrupt:
|
|
2009
|
+
logger.info("Keyboard interrupt received")
|
|
2010
|
+
break
|
|
2011
|
+
except Exception as e:
|
|
2012
|
+
logger.error(f"Error in sync loop: {e}", exc_info=True)
|
|
2013
|
+
if args.once:
|
|
2014
|
+
return 1
|
|
2015
|
+
time.sleep(check_interval)
|
|
2016
|
+
|
|
2017
|
+
logger.info("Sync completed")
|
|
2018
|
+
return 0
|
|
2019
|
+
|
|
2020
|
+
|
|
2021
|
+
def handle_push(args: argparse.Namespace) -> int:
|
|
2022
|
+
"""Handle 'push' subcommand - create GitHub issues from kanban tasks."""
|
|
2023
|
+
logger.info("=" * 70)
|
|
2024
|
+
logger.info("GitHub Push - Creating GitHub issues from kanban tasks")
|
|
2025
|
+
logger.info("=" * 70)
|
|
2026
|
+
|
|
2027
|
+
# Validate environment
|
|
2028
|
+
token, default_repo, errors = validate_github_environment()
|
|
2029
|
+
if errors:
|
|
2030
|
+
for error in errors:
|
|
2031
|
+
logger.error(error)
|
|
2032
|
+
print_env_help()
|
|
2033
|
+
return 1
|
|
2034
|
+
|
|
2035
|
+
# Determine repository
|
|
2036
|
+
repo = args.repo or default_repo
|
|
2037
|
+
if not repo:
|
|
2038
|
+
logger.error("No repository specified. Use --repo or set GITHUB_REPO environment variable")
|
|
2039
|
+
return 1
|
|
2040
|
+
|
|
2041
|
+
# Parse owner/repo
|
|
2042
|
+
try:
|
|
2043
|
+
owner, repo_name = repo.split('/')
|
|
2044
|
+
except ValueError:
|
|
2045
|
+
logger.error(f"Invalid repository format: {repo}. Expected: owner/repo")
|
|
2046
|
+
return 1
|
|
2047
|
+
|
|
2048
|
+
# Find project root and kanban script
|
|
2049
|
+
project_dir = Path.cwd()
|
|
2050
|
+
kanban_script = find_kanban_script(project_dir)
|
|
2051
|
+
if not kanban_script:
|
|
2052
|
+
logger.error("Cannot find kanban.sh script. Is the project initialized?")
|
|
2053
|
+
return 1
|
|
2054
|
+
|
|
2055
|
+
# Initialize GitHub client
|
|
2056
|
+
logger.info("Initializing GitHub client...")
|
|
2057
|
+
api_url = os.getenv('GITHUB_API_URL', 'https://api.github.com')
|
|
2058
|
+
client = GitHubClient(token, api_url)
|
|
2059
|
+
|
|
2060
|
+
# Test connection
|
|
2061
|
+
try:
|
|
2062
|
+
user_info = client.test_connection()
|
|
2063
|
+
logger.info(f"Connected to GitHub API (user: {user_info['login']})")
|
|
2064
|
+
except requests.exceptions.HTTPError as e:
|
|
2065
|
+
error_msg = f"Failed to connect to GitHub: {e}"
|
|
2066
|
+
logger.error(error_msg)
|
|
2067
|
+
print(f"\n❌ ERROR: {error_msg}", file=sys.stderr)
|
|
2068
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
2069
|
+
try:
|
|
2070
|
+
error_detail = e.response.json()
|
|
2071
|
+
print(f" Details: {error_detail.get('message', 'No details available')}", file=sys.stderr)
|
|
2072
|
+
except:
|
|
2073
|
+
print(f" HTTP Status: {e.response.status_code}", file=sys.stderr)
|
|
2074
|
+
print(" Check your GITHUB_TOKEN permissions and validity", file=sys.stderr)
|
|
2075
|
+
return 1
|
|
2076
|
+
|
|
2077
|
+
# Initialize state manager
|
|
2078
|
+
state_dir = project_dir / '.juno_task' / 'github'
|
|
2079
|
+
state_file = state_dir / 'state.ndjson'
|
|
2080
|
+
|
|
2081
|
+
logger.info(f"Loading GitHub issue state: {state_file}")
|
|
2082
|
+
state_mgr = GitHubStateManager(str(state_file))
|
|
2083
|
+
|
|
2084
|
+
if args.dry_run:
|
|
2085
|
+
logger.info("Running in DRY RUN mode - no issues will be created")
|
|
2086
|
+
|
|
2087
|
+
logger.info(f"Loaded {state_mgr.get_issue_count()} tracked issues")
|
|
2088
|
+
logger.info("-" * 70)
|
|
2089
|
+
|
|
2090
|
+
# Get kanban tasks
|
|
2091
|
+
status_filter = args.status if args.status else None
|
|
2092
|
+
tasks = get_all_kanban_tasks(kanban_script, tag_filter=args.tag, status_filter=status_filter)
|
|
2093
|
+
logger.info(f"Found {len(tasks)} kanban tasks")
|
|
2094
|
+
|
|
2095
|
+
# Filter tasks that don't have GitHub tags
|
|
2096
|
+
tasks_without_github = []
|
|
2097
|
+
for task in tasks:
|
|
2098
|
+
task_id = task.get('id')
|
|
2099
|
+
feature_tags = task.get('feature_tags', [])
|
|
2100
|
+
|
|
2101
|
+
# Check if task already has a GitHub tag
|
|
2102
|
+
tag_id = extract_github_tag(feature_tags)
|
|
2103
|
+
if not tag_id:
|
|
2104
|
+
tasks_without_github.append(task)
|
|
2105
|
+
else:
|
|
2106
|
+
logger.debug(f"Task {task_id}: Already has GitHub tag {tag_id}, skipping")
|
|
2107
|
+
|
|
2108
|
+
logger.info(f"Found {len(tasks_without_github)} tasks without GitHub issues")
|
|
2109
|
+
|
|
2110
|
+
# Process tasks
|
|
2111
|
+
total_tasks = 0
|
|
2112
|
+
created_issues = 0
|
|
2113
|
+
errors_count = 0
|
|
2114
|
+
|
|
2115
|
+
for task in tasks_without_github:
|
|
2116
|
+
task_id = task.get('id')
|
|
2117
|
+
task_body = task.get('body', '')
|
|
2118
|
+
task_status = task.get('status', 'unknown')
|
|
2119
|
+
|
|
2120
|
+
total_tasks += 1
|
|
2121
|
+
|
|
2122
|
+
# Create issue title: Task ID + first 40 chars of body
|
|
2123
|
+
# Remove markdown headers and extra whitespace from body for title
|
|
2124
|
+
clean_body = re.sub(r'#\s+', '', task_body).strip()
|
|
2125
|
+
clean_body = re.sub(r'\s+', ' ', clean_body)
|
|
2126
|
+
title_suffix = clean_body[:40]
|
|
2127
|
+
if len(clean_body) > 40:
|
|
2128
|
+
title_suffix += "..."
|
|
2129
|
+
|
|
2130
|
+
issue_title = f"[{task_id}] {title_suffix}"
|
|
2131
|
+
|
|
2132
|
+
# Issue body is the complete task body
|
|
2133
|
+
issue_body = task_body
|
|
2134
|
+
|
|
2135
|
+
# Add status metadata to issue body
|
|
2136
|
+
issue_body += f"\n\n---\n**Kanban Task ID:** `{task_id}`\n**Status:** `{task_status}`"
|
|
2137
|
+
|
|
2138
|
+
logger.info(f"Task {task_id}: Creating GitHub issue")
|
|
2139
|
+
logger.debug(f" Title: {issue_title}")
|
|
2140
|
+
|
|
2141
|
+
if args.dry_run:
|
|
2142
|
+
logger.info(f" [DRY RUN] Would create issue: {issue_title}")
|
|
2143
|
+
logger.debug(f" [DRY RUN] Body preview: {issue_body[:200]}...")
|
|
2144
|
+
created_issues += 1
|
|
2145
|
+
continue
|
|
2146
|
+
|
|
2147
|
+
try:
|
|
2148
|
+
# Create the issue
|
|
2149
|
+
labels = args.labels.split(',') if args.labels else None
|
|
2150
|
+
issue = client.create_issue(owner, repo_name, issue_title, issue_body, labels)
|
|
2151
|
+
issue_number = issue['number']
|
|
2152
|
+
issue_url = issue['html_url']
|
|
2153
|
+
|
|
2154
|
+
logger.info(f" ✓ Created issue #{issue_number}: {issue_url}")
|
|
2155
|
+
|
|
2156
|
+
# Generate tag_id for this issue (use same method as fetch to ensure consistency)
|
|
2157
|
+
tag_id = GitHubStateManager._make_tag_id(issue_number, repo)
|
|
2158
|
+
|
|
2159
|
+
# Tag the kanban task
|
|
2160
|
+
if add_tag_to_kanban_task(kanban_script, task_id, tag_id):
|
|
2161
|
+
logger.info(f" ✓ Tagged task {task_id} with {tag_id}")
|
|
2162
|
+
else:
|
|
2163
|
+
logger.warning(f" ⚠ Failed to tag task {task_id} (issue was created successfully)")
|
|
2164
|
+
|
|
2165
|
+
# Record in state
|
|
2166
|
+
state_mgr.mark_processed({
|
|
2167
|
+
'issue_number': issue_number,
|
|
2168
|
+
'repo': repo,
|
|
2169
|
+
'title': issue_title,
|
|
2170
|
+
'body': issue_body,
|
|
2171
|
+
'author': user_info['login'],
|
|
2172
|
+
'author_id': user_info.get('id', 0),
|
|
2173
|
+
'labels': labels if labels else [],
|
|
2174
|
+
'assignees': [],
|
|
2175
|
+
'state': issue.get('state', 'open'),
|
|
2176
|
+
'created_at': issue.get('created_at', datetime.now(timezone.utc).isoformat()),
|
|
2177
|
+
'updated_at': issue.get('updated_at', datetime.now(timezone.utc).isoformat()),
|
|
2178
|
+
'issue_url': issue.get('url', ''),
|
|
2179
|
+
'issue_html_url': issue_url
|
|
2180
|
+
}, task_id)
|
|
2181
|
+
|
|
2182
|
+
created_issues += 1
|
|
2183
|
+
|
|
2184
|
+
except requests.exceptions.HTTPError as e:
|
|
2185
|
+
errors_count += 1
|
|
2186
|
+
error_msg = f" ✗ Failed to create issue for task {task_id}: {e}"
|
|
2187
|
+
logger.error(error_msg)
|
|
2188
|
+
print(f"\n{error_msg}", file=sys.stderr)
|
|
2189
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
2190
|
+
try:
|
|
2191
|
+
error_detail = e.response.json()
|
|
2192
|
+
detail_msg = f" Details: {error_detail.get('message', 'No details available')}"
|
|
2193
|
+
logger.error(detail_msg)
|
|
2194
|
+
print(detail_msg, file=sys.stderr)
|
|
2195
|
+
except:
|
|
2196
|
+
status_msg = f" HTTP Status: {e.response.status_code}"
|
|
2197
|
+
logger.error(status_msg)
|
|
2198
|
+
print(status_msg, file=sys.stderr)
|
|
2199
|
+
print(" Common causes:", file=sys.stderr)
|
|
2200
|
+
print(" - Missing 'repo' or 'issues' scope in GITHUB_TOKEN", file=sys.stderr)
|
|
2201
|
+
print(" - Token doesn't have write access to the repository", file=sys.stderr)
|
|
2202
|
+
print(" - Token is expired or revoked", file=sys.stderr)
|
|
2203
|
+
|
|
2204
|
+
# Summary
|
|
2205
|
+
logger.info("")
|
|
2206
|
+
logger.info("=" * 70)
|
|
2207
|
+
logger.info("Summary:")
|
|
2208
|
+
logger.info(f" Total tasks processed: {total_tasks}")
|
|
2209
|
+
logger.info(f" Issues created: {created_issues}")
|
|
2210
|
+
if errors_count > 0:
|
|
2211
|
+
logger.error(f" Errors: {errors_count}")
|
|
2212
|
+
|
|
2213
|
+
if args.dry_run:
|
|
2214
|
+
logger.info("(Dry run mode - no issues were actually created)")
|
|
2215
|
+
|
|
2216
|
+
return 0 if errors_count == 0 else 1
|
|
2217
|
+
|
|
2218
|
+
|
|
2219
|
+
# =============================================================================
|
|
2220
|
+
# Main CLI
|
|
2221
|
+
# =============================================================================
|
|
2222
|
+
|
|
2223
|
+
def main() -> int:
|
|
2224
|
+
"""Main entry point."""
|
|
2225
|
+
parser = argparse.ArgumentParser(
|
|
2226
|
+
description='GitHub integration for juno-code - Bidirectional sync between GitHub Issues and Kanban',
|
|
2227
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2228
|
+
epilog="""
|
|
2229
|
+
Examples:
|
|
2230
|
+
# Fetch issues from repository
|
|
2231
|
+
%(prog)s fetch --repo owner/repo
|
|
2232
|
+
|
|
2233
|
+
# Fetch with filters
|
|
2234
|
+
%(prog)s fetch --repo owner/repo --labels bug,priority --assignee username
|
|
2235
|
+
|
|
2236
|
+
# Fetch without processing user replies/comments
|
|
2237
|
+
%(prog)s fetch --repo owner/repo --no-comments
|
|
2238
|
+
|
|
2239
|
+
# Respond to completed tasks
|
|
2240
|
+
%(prog)s respond --tag github-input
|
|
2241
|
+
|
|
2242
|
+
# Push kanban tasks to GitHub (create issues)
|
|
2243
|
+
%(prog)s push --repo owner/repo
|
|
2244
|
+
%(prog)s push --repo owner/repo --status backlog todo --labels enhancement
|
|
2245
|
+
|
|
2246
|
+
# Full sync (fetch + respond)
|
|
2247
|
+
%(prog)s sync --repo owner/repo --once
|
|
2248
|
+
|
|
2249
|
+
# Continuous monitoring
|
|
2250
|
+
%(prog)s sync --repo owner/repo --continuous --interval 600
|
|
2251
|
+
|
|
2252
|
+
Environment Variables:
|
|
2253
|
+
GITHUB_TOKEN GitHub personal access token (required)
|
|
2254
|
+
GITHUB_REPO Default repository (format: owner/repo)
|
|
2255
|
+
GITHUB_API_URL GitHub API URL (default: https://api.github.com)
|
|
2256
|
+
CHECK_INTERVAL_SECONDS Polling interval in seconds (default: 300 for fetch, 600 for sync)
|
|
2257
|
+
LOG_LEVEL DEBUG, INFO, WARNING, ERROR (default: INFO)
|
|
2258
|
+
|
|
2259
|
+
Notes:
|
|
2260
|
+
- Issues are tagged with 'github-input', 'repo_*', 'author_*', 'label_*', and tag_id
|
|
2261
|
+
- Tag_id format: github_issue_owner_repo_123 (for O(1) lookup, no fuzzy matching)
|
|
2262
|
+
- User replies/comments create tasks tagged with 'github-reply' and 'comment_*'
|
|
2263
|
+
- Reply tasks include [task_id]...[/task_id] references to previous tasks in thread
|
|
2264
|
+
- Agent comments (matching **[task_id]...[/task_id]**) are automatically detected and skipped
|
|
2265
|
+
- State is persisted to .juno_task/github/state.ndjson
|
|
2266
|
+
- Responses tracked in .juno_task/github/responses.ndjson
|
|
2267
|
+
- Comments tracked in .juno_task/github/comments.ndjson
|
|
2268
|
+
- Use Ctrl+C for graceful shutdown
|
|
2269
|
+
"""
|
|
2270
|
+
)
|
|
2271
|
+
|
|
2272
|
+
parser.add_argument(
|
|
2273
|
+
'--version',
|
|
2274
|
+
action='version',
|
|
2275
|
+
version=f'%(prog)s {__version__}'
|
|
2276
|
+
)
|
|
2277
|
+
|
|
2278
|
+
subparsers = parser.add_subparsers(dest='subcommand', help='Subcommands')
|
|
2279
|
+
|
|
2280
|
+
# Fetch subcommand
|
|
2281
|
+
fetch_parser = subparsers.add_parser('fetch', help='Fetch GitHub issues and create kanban tasks')
|
|
2282
|
+
fetch_parser.add_argument('--repo', help='Repository (format: owner/repo)')
|
|
2283
|
+
fetch_parser.add_argument('--labels', help='Filter by labels (comma-separated)')
|
|
2284
|
+
fetch_parser.add_argument('--assignee', help='Filter by assignee')
|
|
2285
|
+
fetch_parser.add_argument('--state', default='open', choices=['open', 'closed', 'all'], help='Issue state (default: open)')
|
|
2286
|
+
fetch_parser.add_argument('--since', help='Only issues updated since timestamp (ISO 8601)')
|
|
2287
|
+
|
|
2288
|
+
fetch_mode_group = fetch_parser.add_mutually_exclusive_group()
|
|
2289
|
+
fetch_mode_group.add_argument('--once', dest='once', action='store_true', default=True, help='Run once and exit (DEFAULT)')
|
|
2290
|
+
fetch_mode_group.add_argument('--continuous', dest='once', action='store_false', help='Run continuously with polling')
|
|
2291
|
+
|
|
2292
|
+
fetch_parser.add_argument('--interval', type=int, help='Polling interval in seconds (default: 300)')
|
|
2293
|
+
fetch_parser.add_argument('--dry-run', action='store_true', help='Show what would be done without creating tasks')
|
|
2294
|
+
fetch_parser.add_argument('--verbose', '-v', action='store_true', help='Enable DEBUG level logging')
|
|
2295
|
+
fetch_parser.add_argument('--include-comments', dest='include_comments', action='store_true', default=True, help='Include user replies/comments (default: True)')
|
|
2296
|
+
fetch_parser.add_argument('--no-comments', dest='include_comments', action='store_false', help='Skip processing user replies/comments')
|
|
2297
|
+
|
|
2298
|
+
# Respond subcommand
|
|
2299
|
+
respond_parser = subparsers.add_parser('respond', help='Post comments on GitHub issues for completed tasks')
|
|
2300
|
+
respond_parser.add_argument('--repo', help='Filter by repository (format: owner/repo)')
|
|
2301
|
+
respond_parser.add_argument('--tag', default='github-input', help='Filter kanban tasks by tag (default: github-input)')
|
|
2302
|
+
respond_parser.add_argument('--dry-run', action='store_true', help='Show what would be sent without posting comments')
|
|
2303
|
+
respond_parser.add_argument('--verbose', '-v', action='store_true', help='Enable DEBUG level logging')
|
|
2304
|
+
respond_parser.add_argument('--reset-tracker', action='store_true', help='Reset response tracker (WARNING: will re-send all responses)')
|
|
2305
|
+
|
|
2306
|
+
# Sync subcommand
|
|
2307
|
+
sync_parser = subparsers.add_parser('sync', help='Bidirectional sync (fetch + respond)')
|
|
2308
|
+
sync_parser.add_argument('--repo', help='Repository (format: owner/repo)')
|
|
2309
|
+
sync_parser.add_argument('--labels', help='Filter by labels (comma-separated)')
|
|
2310
|
+
sync_parser.add_argument('--assignee', help='Filter by assignee')
|
|
2311
|
+
sync_parser.add_argument('--state', default='open', choices=['open', 'closed', 'all'], help='Issue state (default: open)')
|
|
2312
|
+
sync_parser.add_argument('--since', help='Only issues updated since timestamp (ISO 8601)')
|
|
2313
|
+
sync_parser.add_argument('--tag', default='github-input', help='Filter kanban tasks by tag (default: github-input)')
|
|
2314
|
+
|
|
2315
|
+
sync_mode_group = sync_parser.add_mutually_exclusive_group()
|
|
2316
|
+
sync_mode_group.add_argument('--once', dest='once', action='store_true', default=True, help='Run sync once and exit (DEFAULT)')
|
|
2317
|
+
sync_mode_group.add_argument('--continuous', dest='once', action='store_false', help='Run continuously with polling')
|
|
2318
|
+
|
|
2319
|
+
sync_parser.add_argument('--interval', type=int, help='Polling interval in seconds (default: 600)')
|
|
2320
|
+
sync_parser.add_argument('--dry-run', action='store_true', help='Show what would be done without making changes')
|
|
2321
|
+
sync_parser.add_argument('--verbose', '-v', action='store_true', help='Enable DEBUG level logging')
|
|
2322
|
+
sync_parser.add_argument('--reset-tracker', action='store_true', help='Reset response tracker (WARNING: will re-send all responses)')
|
|
2323
|
+
sync_parser.add_argument('--include-comments', dest='include_comments', action='store_true', default=True, help='Include user replies/comments (default: True)')
|
|
2324
|
+
sync_parser.add_argument('--no-comments', dest='include_comments', action='store_false', help='Skip processing user replies/comments')
|
|
2325
|
+
|
|
2326
|
+
# Push subcommand
|
|
2327
|
+
push_parser = subparsers.add_parser('push', help='Create GitHub issues from kanban tasks without issues')
|
|
2328
|
+
push_parser.add_argument('--repo', required=True, help='Repository (format: owner/repo)')
|
|
2329
|
+
push_parser.add_argument('--tag', help='Filter kanban tasks by tag')
|
|
2330
|
+
push_parser.add_argument('--status', nargs='+', help='Filter by status (e.g., backlog todo in_progress)')
|
|
2331
|
+
push_parser.add_argument('--labels', help='Add labels to created issues (comma-separated)')
|
|
2332
|
+
push_parser.add_argument('--dry-run', action='store_true', help='Show what would be created without making changes')
|
|
2333
|
+
push_parser.add_argument('--verbose', '-v', action='store_true', help='Enable DEBUG level logging')
|
|
2334
|
+
|
|
2335
|
+
args = parser.parse_args()
|
|
2336
|
+
|
|
2337
|
+
if not args.subcommand:
|
|
2338
|
+
parser.print_help()
|
|
2339
|
+
return 1
|
|
2340
|
+
|
|
2341
|
+
# Load environment variables from .env files
|
|
2342
|
+
load_dotenv()
|
|
2343
|
+
|
|
2344
|
+
# Also try loading from project root .env
|
|
2345
|
+
project_root = Path.cwd()
|
|
2346
|
+
env_file = project_root / '.env'
|
|
2347
|
+
if env_file.exists():
|
|
2348
|
+
load_dotenv(env_file)
|
|
2349
|
+
|
|
2350
|
+
# Also check .juno_task/.env
|
|
2351
|
+
juno_env_file = project_root / '.juno_task' / '.env'
|
|
2352
|
+
if juno_env_file.exists():
|
|
2353
|
+
load_dotenv(juno_env_file)
|
|
2354
|
+
|
|
2355
|
+
# Also check .juno_task/github/.env (highest priority)
|
|
2356
|
+
github_env_file = project_root / '.juno_task' / 'github' / '.env'
|
|
2357
|
+
if github_env_file.exists():
|
|
2358
|
+
load_dotenv(github_env_file)
|
|
2359
|
+
|
|
2360
|
+
# Setup logging
|
|
2361
|
+
setup_logging(verbose=args.verbose)
|
|
2362
|
+
|
|
2363
|
+
try:
|
|
2364
|
+
# Route to subcommand handler
|
|
2365
|
+
if args.subcommand == 'fetch':
|
|
2366
|
+
return handle_fetch(args)
|
|
2367
|
+
elif args.subcommand == 'respond':
|
|
2368
|
+
return handle_respond(args)
|
|
2369
|
+
elif args.subcommand == 'sync':
|
|
2370
|
+
return handle_sync(args)
|
|
2371
|
+
elif args.subcommand == 'push':
|
|
2372
|
+
return handle_push(args)
|
|
2373
|
+
else:
|
|
2374
|
+
logger.error(f"Unknown subcommand: {args.subcommand}")
|
|
2375
|
+
return 1
|
|
2376
|
+
|
|
2377
|
+
except Exception as e:
|
|
2378
|
+
logger.error(f"Fatal error: {e}", exc_info=True)
|
|
2379
|
+
return 1
|
|
2380
|
+
|
|
2381
|
+
|
|
2382
|
+
if __name__ == '__main__':
|
|
2383
|
+
sys.exit(main())
|