juno-code 1.0.44 → 1.0.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +1 -1
  2. package/dist/bin/cli.js +658 -50
  3. package/dist/bin/cli.js.map +1 -1
  4. package/dist/bin/cli.mjs +658 -50
  5. package/dist/bin/cli.mjs.map +1 -1
  6. package/dist/index.js +6 -4
  7. package/dist/index.js.map +1 -1
  8. package/dist/index.mjs +6 -4
  9. package/dist/index.mjs.map +1 -1
  10. package/dist/templates/scripts/__pycache__/attachment_downloader.cpython-38.pyc +0 -0
  11. package/dist/templates/scripts/__pycache__/github.cpython-38.pyc +0 -0
  12. package/dist/templates/scripts/__pycache__/slack_fetch.cpython-38.pyc +0 -0
  13. package/dist/templates/scripts/__pycache__/slack_state.cpython-38.pyc +0 -0
  14. package/dist/templates/scripts/attachment_downloader.py +405 -0
  15. package/dist/templates/scripts/github.py +282 -7
  16. package/dist/templates/scripts/hooks/session_counter.sh +328 -0
  17. package/dist/templates/scripts/kanban.sh +22 -4
  18. package/dist/templates/scripts/log_scanner.sh +790 -0
  19. package/dist/templates/scripts/slack_fetch.py +232 -20
  20. package/dist/templates/services/claude.py +50 -1
  21. package/dist/templates/services/codex.py +5 -4
  22. package/dist/templates/skills/claude/.gitkeep +0 -0
  23. package/dist/templates/skills/claude/plan-kanban-tasks/SKILL.md +25 -0
  24. package/dist/templates/skills/claude/ralph-loop/SKILL.md +43 -0
  25. package/dist/templates/skills/claude/ralph-loop/references/first_check.md +20 -0
  26. package/dist/templates/skills/claude/ralph-loop/references/implement.md +99 -0
  27. package/dist/templates/skills/claude/ralph-loop/scripts/kanban.sh +293 -0
  28. package/dist/templates/skills/claude/understand-project/SKILL.md +39 -0
  29. package/dist/templates/skills/codex/.gitkeep +0 -0
  30. package/dist/templates/skills/codex/ralph-loop/SKILL.md +43 -0
  31. package/dist/templates/skills/codex/ralph-loop/references/first_check.md +20 -0
  32. package/dist/templates/skills/codex/ralph-loop/references/implement.md +99 -0
  33. package/dist/templates/skills/codex/ralph-loop/scripts/kanban.sh +293 -0
  34. package/package.json +3 -2
@@ -0,0 +1,405 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Shared attachment downloading utility for Slack and GitHub integrations.
4
+
5
+ This module provides a unified interface for downloading and storing attachments
6
+ from various sources (Slack file uploads, GitHub issue attachments).
7
+
8
+ Features:
9
+ - Domain allowlist security (only trusted sources)
10
+ - File type filtering (skip dangerous extensions)
11
+ - Collision-safe filename generation
12
+ - SHA256 checksums for integrity verification
13
+ - Metadata tracking for each download
14
+ - Retry logic with exponential backoff
15
+ - Size limits to prevent abuse
16
+
17
+ Usage:
18
+ from attachment_downloader import AttachmentDownloader
19
+
20
+ downloader = AttachmentDownloader('.juno_task/attachments')
21
+ path, error = downloader.download_file(
22
+ url='https://files.slack.com/...',
23
+ target_dir=Path('.juno_task/attachments/slack/C123'),
24
+ filename_prefix='1706789012_345678',
25
+ original_filename='report.pdf',
26
+ headers={'Authorization': 'Bearer xoxb-...'},
27
+ metadata={'source': 'slack', 'channel_id': 'C123'}
28
+ )
29
+
30
+ Version: 1.0.0
31
+ Package: juno-code@1.x.x
32
+ Auto-installed by: ScriptInstaller
33
+ """
34
+
35
+ import hashlib
36
+ import json
37
+ import logging
38
+ import os
39
+ import re
40
+ import time
41
+ from datetime import datetime, timezone
42
+ from pathlib import Path
43
+ from typing import Dict, List, Optional, Tuple, Any
44
+ from urllib.parse import urlparse
45
+
46
+ try:
47
+ import requests
48
+ except ImportError:
49
+ print("Error: Missing required dependency: requests")
50
+ print("Please run: pip install requests")
51
+ import sys
52
+ sys.exit(1)
53
+
54
+ __version__ = "1.0.0"
55
+
56
+ logger = logging.getLogger(__name__)
57
+
58
+
59
+ class AttachmentDownloader:
60
+ """Handles downloading and storing attachments from various sources."""
61
+
62
+ # Configuration defaults
63
+ DEFAULT_MAX_SIZE = 50 * 1024 * 1024 # 50MB
64
+ DEFAULT_TIMEOUT = 60 # seconds
65
+ DEFAULT_RETRIES = 3
66
+
67
+ # Security: Only allow downloads from trusted domains
68
+ ALLOWED_DOMAINS = [
69
+ 'files.slack.com',
70
+ 'slack-files.com',
71
+ 'files-pri', # Slack private files pattern
72
+ 'github.com',
73
+ 'githubusercontent.com',
74
+ 'user-images.githubusercontent.com',
75
+ 'private-user-images.githubusercontent.com',
76
+ 'objects.githubusercontent.com',
77
+ ]
78
+
79
+ # File types to skip (security risk or too large)
80
+ SKIP_EXTENSIONS = {'.exe', '.dmg', '.iso', '.msi', '.app', '.deb', '.rpm'}
81
+
82
+ # Default allowed file types (can be overridden via env)
83
+ DEFAULT_ALLOWED_TYPES = {
84
+ '.pdf', '.png', '.jpg', '.jpeg', '.gif', '.webp', '.svg',
85
+ '.txt', '.md', '.csv', '.json', '.yaml', '.yml', '.xml',
86
+ '.log', '.html', '.htm', '.css', '.js', '.ts', '.py',
87
+ '.sh', '.bash', '.zsh', '.toml', '.ini', '.conf',
88
+ '.zip', '.tar', '.gz', '.bz2',
89
+ }
90
+
91
+ def __init__(
92
+ self,
93
+ base_dir: str = '.juno_task/attachments',
94
+ max_size: Optional[int] = None,
95
+ timeout: int = DEFAULT_TIMEOUT
96
+ ):
97
+ """
98
+ Initialize AttachmentDownloader.
99
+
100
+ Args:
101
+ base_dir: Base directory for storing attachments
102
+ max_size: Maximum file size in bytes (default: 50MB)
103
+ timeout: Download timeout in seconds
104
+ """
105
+ self.base_dir = Path(base_dir)
106
+ self.max_size = max_size or int(os.getenv('JUNO_MAX_ATTACHMENT_SIZE', self.DEFAULT_MAX_SIZE))
107
+ self.timeout = timeout
108
+
109
+ # Parse allowed/skip types from environment
110
+ self._allowed_types = self._parse_env_types('JUNO_ALLOWED_FILE_TYPES', self.DEFAULT_ALLOWED_TYPES)
111
+ self._skip_types = self._parse_env_types('JUNO_SKIP_FILE_TYPES', self.SKIP_EXTENSIONS)
112
+
113
+ # Additional allowed domains from environment
114
+ extra_domains = os.getenv('JUNO_ALLOWED_DOMAINS', '')
115
+ if extra_domains:
116
+ self.ALLOWED_DOMAINS = list(self.ALLOWED_DOMAINS) + [d.strip() for d in extra_domains.split(',')]
117
+
118
+ self._ensure_directories()
119
+
120
+ def _parse_env_types(self, env_var: str, default: set) -> set:
121
+ """Parse file types from environment variable."""
122
+ env_value = os.getenv(env_var, '')
123
+ if not env_value:
124
+ return default
125
+ if env_value.lower() == 'all':
126
+ return set() # Empty set means allow all
127
+ types = {t.strip().lower() for t in env_value.split(',')}
128
+ # Ensure extensions start with dot
129
+ return {t if t.startswith('.') else f'.{t}' for t in types if t}
130
+
131
+ def _ensure_directories(self) -> None:
132
+ """Create directory structure."""
133
+ (self.base_dir / 'slack').mkdir(parents=True, exist_ok=True)
134
+ (self.base_dir / 'github').mkdir(parents=True, exist_ok=True)
135
+
136
+ def download_file(
137
+ self,
138
+ url: str,
139
+ target_dir: Path,
140
+ filename_prefix: str,
141
+ original_filename: str,
142
+ headers: Optional[Dict[str, str]] = None,
143
+ metadata: Optional[Dict[str, Any]] = None
144
+ ) -> Tuple[Optional[str], Optional[str]]:
145
+ """
146
+ Download a file and save with metadata.
147
+
148
+ Args:
149
+ url: URL to download from
150
+ target_dir: Directory to save the file
151
+ filename_prefix: Prefix for the filename (e.g., timestamp or ID)
152
+ original_filename: Original filename for extension preservation
153
+ headers: HTTP headers for authentication
154
+ metadata: Additional metadata to store with the file
155
+
156
+ Returns:
157
+ Tuple of (local_path, error_message)
158
+ - On success: (path_string, None)
159
+ - On failure: (None, error_message)
160
+ """
161
+ # Validate URL domain
162
+ if not self._is_allowed_domain(url):
163
+ domain = urlparse(url).netloc
164
+ logger.warning(f"Domain not allowed: {domain}")
165
+ return None, f"Domain not allowed: {domain}"
166
+
167
+ # Validate file type
168
+ ext = Path(original_filename).suffix.lower()
169
+ if ext in self._skip_types:
170
+ logger.warning(f"File type not allowed: {ext}")
171
+ return None, f"File type not allowed: {ext}"
172
+
173
+ # Check allowed types (if set is non-empty, only those types are allowed)
174
+ if self._allowed_types and ext and ext not in self._allowed_types:
175
+ logger.warning(f"File type not in allowlist: {ext}")
176
+ return None, f"File type not in allowlist: {ext}"
177
+
178
+ # Generate safe filename
179
+ safe_filename = self._generate_safe_filename(original_filename, filename_prefix)
180
+ target_path = target_dir / safe_filename
181
+
182
+ # Handle collision
183
+ target_path = self._handle_collision(target_path)
184
+
185
+ # Download with retries
186
+ for attempt in range(self.DEFAULT_RETRIES):
187
+ try:
188
+ response = requests.get(
189
+ url,
190
+ headers=headers or {},
191
+ timeout=self.timeout,
192
+ stream=True,
193
+ allow_redirects=True
194
+ )
195
+ response.raise_for_status()
196
+
197
+ # Check content length if available
198
+ content_length = int(response.headers.get('content-length', 0))
199
+ if content_length > self.max_size:
200
+ return None, f"File too large: {content_length:,} bytes (max: {self.max_size:,})"
201
+
202
+ # Ensure target directory exists
203
+ target_dir.mkdir(parents=True, exist_ok=True)
204
+
205
+ # Download in chunks
206
+ sha256_hash = hashlib.sha256()
207
+ total_bytes = 0
208
+
209
+ with open(target_path, 'wb') as f:
210
+ for chunk in response.iter_content(chunk_size=8192):
211
+ if chunk:
212
+ # Check size during download (for when content-length not provided)
213
+ total_bytes += len(chunk)
214
+ if total_bytes > self.max_size:
215
+ f.close()
216
+ target_path.unlink()
217
+ return None, f"File exceeded max size during download ({total_bytes:,} bytes)"
218
+ f.write(chunk)
219
+ sha256_hash.update(chunk)
220
+
221
+ # Create metadata file
222
+ full_metadata = {
223
+ 'original_filename': original_filename,
224
+ 'downloaded_at': datetime.now(timezone.utc).isoformat(),
225
+ 'file_size': total_bytes,
226
+ 'checksum_sha256': sha256_hash.hexdigest(),
227
+ 'download_url': url
228
+ }
229
+ if metadata:
230
+ full_metadata.update(metadata)
231
+
232
+ self._write_metadata(target_path, full_metadata)
233
+
234
+ logger.info(f"Downloaded: {original_filename} -> {target_path} ({total_bytes:,} bytes)")
235
+ return str(target_path), None
236
+
237
+ except requests.exceptions.Timeout:
238
+ logger.warning(f"Timeout downloading {url} (attempt {attempt + 1}/{self.DEFAULT_RETRIES})")
239
+ if attempt < self.DEFAULT_RETRIES - 1:
240
+ time.sleep(2 ** attempt) # Exponential backoff
241
+ except requests.exceptions.HTTPError as e:
242
+ status = e.response.status_code if e.response is not None else 'unknown'
243
+ logger.error(f"HTTP error downloading {url}: {status}")
244
+ return None, f"HTTP error: {status}"
245
+ except requests.exceptions.RequestException as e:
246
+ logger.error(f"Request error downloading {url}: {e}")
247
+ return None, f"Request error: {str(e)}"
248
+ except IOError as e:
249
+ logger.error(f"IO error saving file: {e}")
250
+ return None, f"IO error: {str(e)}"
251
+ except Exception as e:
252
+ logger.error(f"Unexpected error downloading {url}: {e}")
253
+ return None, f"Download error: {str(e)}"
254
+
255
+ return None, f"Max retries exceeded ({self.DEFAULT_RETRIES})"
256
+
257
+ def _generate_safe_filename(self, original_name: str, prefix: str) -> str:
258
+ """
259
+ Generate safe filename with prefix.
260
+
261
+ Args:
262
+ original_name: Original filename
263
+ prefix: Prefix to add (e.g., message timestamp)
264
+
265
+ Returns:
266
+ Sanitized filename: {prefix}_{sanitized_stem}{extension}
267
+ """
268
+ path = Path(original_name)
269
+ ext = path.suffix.lower()
270
+ stem = path.stem
271
+
272
+ # Sanitize stem: replace unsafe characters with underscores
273
+ stem = re.sub(r'[^\w\-.]', '_', stem)
274
+ stem = re.sub(r'_+', '_', stem) # Collapse multiple underscores
275
+ stem = stem.strip('_')
276
+
277
+ # Ensure stem is not empty
278
+ if not stem:
279
+ stem = 'file'
280
+
281
+ # Truncate if needed (preserve reasonable length)
282
+ max_stem_len = 100
283
+ if len(stem) > max_stem_len:
284
+ stem = stem[:max_stem_len]
285
+
286
+ # Sanitize prefix
287
+ safe_prefix = re.sub(r'[^\w\-]', '_', prefix)
288
+ safe_prefix = re.sub(r'_+', '_', safe_prefix).strip('_')
289
+
290
+ return f"{safe_prefix}_{stem}{ext}"
291
+
292
+ def _handle_collision(self, target_path: Path) -> Path:
293
+ """
294
+ Handle filename collision by appending counter.
295
+
296
+ Args:
297
+ target_path: Intended file path
298
+
299
+ Returns:
300
+ Path that doesn't exist (original or with counter suffix)
301
+ """
302
+ if not target_path.exists():
303
+ return target_path
304
+
305
+ counter = 1
306
+ stem = target_path.stem
307
+ suffix = target_path.suffix
308
+ parent = target_path.parent
309
+
310
+ while True:
311
+ new_path = parent / f"{stem}_{counter}{suffix}"
312
+ if not new_path.exists():
313
+ logger.debug(f"Collision detected, using: {new_path}")
314
+ return new_path
315
+ counter += 1
316
+ if counter > 1000: # Safety limit
317
+ # Use timestamp as fallback
318
+ ts = int(time.time() * 1000)
319
+ return parent / f"{stem}_{ts}{suffix}"
320
+
321
+ def _is_allowed_domain(self, url: str) -> bool:
322
+ """
323
+ Check if URL domain is in allowlist.
324
+
325
+ Args:
326
+ url: URL to check
327
+
328
+ Returns:
329
+ True if domain is allowed, False otherwise
330
+ """
331
+ try:
332
+ parsed = urlparse(url)
333
+ domain = parsed.netloc.lower()
334
+ # Check if any allowed domain is a suffix of or matches the URL domain
335
+ for allowed in self.ALLOWED_DOMAINS:
336
+ allowed_lower = allowed.lower()
337
+ if domain == allowed_lower or domain.endswith('.' + allowed_lower):
338
+ return True
339
+ # Also check if the allowed pattern is contained (for patterns like 'files-pri')
340
+ if allowed_lower in domain:
341
+ return True
342
+ return False
343
+ except Exception as e:
344
+ logger.error(f"Error parsing URL {url}: {e}")
345
+ return False
346
+
347
+ def _write_metadata(self, filepath: Path, metadata: Dict[str, Any]) -> None:
348
+ """
349
+ Write metadata JSON file alongside downloaded file.
350
+
351
+ Args:
352
+ filepath: Path to the downloaded file
353
+ metadata: Metadata dictionary to save
354
+ """
355
+ meta_path = Path(str(filepath) + '.meta.json')
356
+ try:
357
+ with open(meta_path, 'w', encoding='utf-8') as f:
358
+ json.dump(metadata, f, indent=2, ensure_ascii=False)
359
+ logger.debug(f"Wrote metadata: {meta_path}")
360
+ except Exception as e:
361
+ logger.warning(f"Failed to write metadata to {meta_path}: {e}")
362
+
363
+
364
+ def format_attachments_section(file_paths: List[str]) -> str:
365
+ """
366
+ Format file paths as attachment section for kanban task.
367
+
368
+ Args:
369
+ file_paths: List of local file paths
370
+
371
+ Returns:
372
+ Formatted string with [attached files] section, or empty string if no files
373
+ """
374
+ if not file_paths:
375
+ return ""
376
+
377
+ lines = ["\n\n[attached files]"]
378
+ for path in file_paths:
379
+ lines.append(f"- {path}")
380
+
381
+ return '\n'.join(lines)
382
+
383
+
384
+ def is_attachments_enabled() -> bool:
385
+ """
386
+ Check if attachment downloading is enabled.
387
+
388
+ Returns:
389
+ True if enabled (default), False if explicitly disabled
390
+ """
391
+ return os.getenv('JUNO_DOWNLOAD_ATTACHMENTS', 'true').lower() in ('true', '1', 'yes')
392
+
393
+
394
+ # Convenience functions for direct script usage
395
+ def create_downloader(base_dir: str = '.juno_task/attachments') -> AttachmentDownloader:
396
+ """
397
+ Factory function to create an AttachmentDownloader with default settings.
398
+
399
+ Args:
400
+ base_dir: Base directory for attachments
401
+
402
+ Returns:
403
+ Configured AttachmentDownloader instance
404
+ """
405
+ return AttachmentDownloader(base_dir=base_dir)