juno-code 1.0.34 → 1.0.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
@@ -0,0 +1,202 @@
1
+ #!/usr/bin/env bash
2
+
3
+ # run_until_completion.sh
4
+ #
5
+ # Purpose: Continuously run juno-code until all kanban tasks are completed
6
+ #
7
+ # This script uses a do-while loop pattern: it runs juno-code at least once,
8
+ # then checks the kanban board for tasks in backlog, todo, or in_progress status.
9
+ # If tasks remain, it continues running juno-code. This ensures juno-code's
10
+ # internal task management systems get a chance to operate even if kanban.sh
11
+ # doesn't initially detect any tasks.
12
+ #
13
+ # Usage: ./.juno_task/scripts/run_until_completion.sh [juno-code arguments]
14
+ # Example: ./.juno_task/scripts/run_until_completion.sh -s claude -i 5 -v
15
+ # Example: ./.juno_task/scripts/run_until_completion.sh -b shell -s claude -m :opus
16
+ #
17
+ # All arguments passed to this script will be forwarded to juno-code.
18
+ # The script shows all stdout/stderr from juno-code in real-time.
19
+ #
20
+ # Environment Variables:
21
+ # JUNO_DEBUG=true - Show [DEBUG] diagnostic messages
22
+ # JUNO_VERBOSE=true - Show [RUN_UNTIL] informational messages
23
+ # (Both default to false for silent operation)
24
+ #
25
+ # Created by: juno-code init command
26
+ # Date: Auto-generated during project initialization
27
+
28
+ set -euo pipefail # Exit on error, undefined variable, or pipe failure
29
+
30
+ # DEBUG OUTPUT: Show that run_until_completion.sh is being executed
31
+ if [ "${JUNO_DEBUG:-false}" = "true" ]; then
32
+ echo "[DEBUG] run_until_completion.sh is being executed from: $(pwd)" >&2
33
+ fi
34
+
35
+ # Color output for better readability
36
+ RED='\033[0;31m'
37
+ GREEN='\033[0;32m'
38
+ YELLOW='\033[1;33m'
39
+ BLUE='\033[0;34m'
40
+ CYAN='\033[0;36m'
41
+ NC='\033[0m' # No Color
42
+
43
+ # Configuration
44
+ SCRIPTS_DIR=".juno_task/scripts"
45
+ KANBAN_SCRIPT="${SCRIPTS_DIR}/kanban.sh"
46
+
47
+ # Logging functions
48
+ log_info() {
49
+ # Only print if JUNO_VERBOSE is set to true
50
+ if [ "${JUNO_VERBOSE:-false}" = "true" ]; then
51
+ echo -e "${BLUE}[RUN_UNTIL]${NC} $1" >&2
52
+ fi
53
+ }
54
+
55
+ log_success() {
56
+ # Only print if JUNO_VERBOSE is set to true
57
+ if [ "${JUNO_VERBOSE:-false}" = "true" ]; then
58
+ echo -e "${GREEN}[RUN_UNTIL]${NC} $1" >&2
59
+ fi
60
+ }
61
+
62
+ log_warning() {
63
+ # Only print if JUNO_VERBOSE is set to true
64
+ if [ "${JUNO_VERBOSE:-false}" = "true" ]; then
65
+ echo -e "${YELLOW}[RUN_UNTIL]${NC} $1" >&2
66
+ fi
67
+ }
68
+
69
+ log_error() {
70
+ # Always print errors regardless of JUNO_VERBOSE
71
+ echo -e "${RED}[RUN_UNTIL]${NC} $1" >&2
72
+ }
73
+
74
+ log_status() {
75
+ # Always print status updates so user knows what's happening
76
+ echo -e "${CYAN}[RUN_UNTIL]${NC} $1" >&2
77
+ }
78
+
79
+ # Get the directory where this script is located
80
+ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
81
+
82
+ # Navigate to project root (parent of scripts directory)
83
+ PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." && pwd )"
84
+
85
+ # Change to project root
86
+ cd "$PROJECT_ROOT"
87
+
88
+ # Function to check if there are tasks remaining
89
+ has_remaining_tasks() {
90
+ log_info "Checking kanban for remaining tasks..."
91
+
92
+ # Check if kanban script exists
93
+ if [ ! -f "$KANBAN_SCRIPT" ]; then
94
+ log_error "Kanban script not found: $KANBAN_SCRIPT"
95
+ log_error "Please run 'juno-code init' to initialize the project"
96
+ return 1
97
+ fi
98
+
99
+ # Make sure the script is executable
100
+ chmod +x "$KANBAN_SCRIPT"
101
+
102
+ # Run kanban list and check for "No results found"
103
+ # We capture both stdout and stderr to handle various output formats
104
+ local kanban_output
105
+ if kanban_output=$("$KANBAN_SCRIPT" list --status backlog todo in_progress 2>&1); then
106
+ if echo "$kanban_output" | grep -q "No results found"; then
107
+ log_info "No remaining tasks found"
108
+ return 1 # No remaining tasks
109
+ else
110
+ log_info "Found remaining tasks"
111
+ if [ "${JUNO_DEBUG:-false}" = "true" ]; then
112
+ echo "[DEBUG] Kanban output:" >&2
113
+ echo "$kanban_output" >&2
114
+ fi
115
+ return 0 # Has remaining tasks
116
+ fi
117
+ else
118
+ # kanban.sh returned non-zero, check if it's because no results
119
+ if echo "$kanban_output" | grep -q "No results found"; then
120
+ log_info "No remaining tasks found (from error output)"
121
+ return 1
122
+ fi
123
+ log_error "Failed to check kanban status"
124
+ log_error "Output: $kanban_output"
125
+ return 1 # Treat errors as "no tasks" to prevent infinite loops
126
+ fi
127
+ }
128
+
129
+ # Main run loop
130
+ main() {
131
+ local iteration=0
132
+ local max_iterations="${JUNO_RUN_UNTIL_MAX_ITERATIONS:-0}" # 0 = unlimited
133
+
134
+ log_status "=== Run Until Completion ==="
135
+ log_status "Arguments to juno-code: $*"
136
+
137
+ if [ "$max_iterations" -gt 0 ]; then
138
+ log_status "Maximum iterations: $max_iterations"
139
+ else
140
+ log_status "Maximum iterations: unlimited"
141
+ fi
142
+
143
+ # Check if we have any arguments
144
+ if [ $# -eq 0 ]; then
145
+ log_warning "No arguments provided. Running juno-code with no arguments."
146
+ fi
147
+
148
+ # Do-while loop pattern: Run juno-code at least once, then continue while tasks remain
149
+ # This ensures juno-code's internal task management systems get a chance to operate
150
+ # even if kanban.sh doesn't initially detect any tasks
151
+ while true; do
152
+ iteration=$((iteration + 1))
153
+
154
+ log_status ""
155
+ log_status "=========================================="
156
+ log_status "Iteration $iteration"
157
+ log_status "=========================================="
158
+
159
+ # Check max iterations limit BEFORE running (prevents exceeding limit)
160
+ if [ "$max_iterations" -gt 0 ] && [ "$iteration" -gt "$max_iterations" ]; then
161
+ log_warning ""
162
+ log_warning "=========================================="
163
+ log_warning "Maximum iterations ($max_iterations) reached. Exiting."
164
+ log_warning "=========================================="
165
+ exit 0
166
+ fi
167
+
168
+ log_status "Running juno-code with args: $*"
169
+ log_status "------------------------------------------"
170
+
171
+ # Run juno-code with all provided arguments
172
+ # We run juno-code FIRST (do-while pattern), then check for remaining tasks
173
+ if juno-code "$@"; then
174
+ log_success "juno-code completed successfully"
175
+ else
176
+ local exit_code=$?
177
+ log_warning "juno-code exited with code $exit_code"
178
+ # Continue the loop even if juno-code fails - it might succeed next iteration
179
+ # Some failures are expected (e.g., partial task completion)
180
+ fi
181
+
182
+ log_status "------------------------------------------"
183
+ log_status "Iteration $iteration complete. Checking for more tasks..."
184
+
185
+ # Small delay to prevent rapid-fire execution and allow user to Ctrl+C if needed
186
+ sleep 1
187
+
188
+ # Check for remaining tasks AFTER running juno-code (do-while pattern)
189
+ # This ensures juno-code runs at least once, allowing its internal task
190
+ # management systems to check kanban for updates
191
+ if ! has_remaining_tasks; then
192
+ log_success ""
193
+ log_success "=========================================="
194
+ log_success "All tasks completed! Exiting after $iteration iteration(s)."
195
+ log_success "=========================================="
196
+ exit 0
197
+ fi
198
+ done
199
+ }
200
+
201
+ # Run main function with all arguments
202
+ main "$@"
@@ -180,6 +180,48 @@ You can override these by providing the `--tool` argument:
180
180
  ~/.juno_code/services/claude.py -p "Safe operation" --tool Read --tool Write
181
181
  ```
182
182
 
183
+ ### gemini.py
184
+
185
+ Headless wrapper for Gemini CLI with shorthand model support and JSON/text output normalization.
186
+
187
+ #### Features
188
+
189
+ - Headless execution via `--prompt/-p` or `--prompt-file`
190
+ - Shorthand model aliases (`:pro`, `:flash`, `:pro-3`, `:flash-3`, `:pro-2.5`, `:flash-2.5`)
191
+ - Streaming JSON output normalization (default `--output-format stream-json`)
192
+ - Auto-approval for headless mode (defaults to `--yolo` when no approval mode is provided)
193
+ - Fails fast when `GEMINI_API_KEY` is missing to prevent confusing CLI errors
194
+ - Optional directory inclusion (`--include-directories`) and debug passthrough
195
+
196
+ #### Usage
197
+
198
+ ```bash
199
+ # Basic headless run with shorthand model (stream-json output is default)
200
+ ~/.juno_code/services/gemini.py -p "Summarize the README" -m :pro-3
201
+
202
+ # Include project context and enable debug logging
203
+ ~/.juno_code/services/gemini.py -p "Audit the project" --include-directories src,docs --debug
204
+
205
+ # Auto-approve actions explicitly (default when no approval mode provided)
206
+ ~/.juno_code/services/gemini.py -p "Refactor the code" --yolo
207
+
208
+ # Emit non-streaming JSON if needed
209
+ ~/.juno_code/services/gemini.py -p "Quick JSON response" --output-format json
210
+ ```
211
+
212
+ #### Arguments
213
+
214
+ - `-p, --prompt <text>`: Prompt text (required, mutually exclusive with --prompt-file)
215
+ - `-pp, --prompt-file <path>`: Path to prompt file (required if no --prompt)
216
+ - `--cd <path>`: Project path (default: current directory)
217
+ - `-m, --model <name>`: Gemini model (supports shorthand aliases)
218
+ - `--output-format <stream-json|json|text>`: Output format (default: stream-json)
219
+ - `--include-directories <list>`: Comma-separated directories to include
220
+ - `--approval-mode <mode>`: Approval mode (e.g., auto_edit). If omitted, `--yolo` is applied for headless automation.
221
+ - `--yolo`: Auto-approve actions (non-interactive)
222
+ - `--debug`: Enable Gemini CLI debug output
223
+ - `--verbose`: Print the constructed command before execution
224
+
183
225
  ## Customization
184
226
 
185
227
  All service scripts installed in `~/.juno_code/services/` can be modified to suit your needs. This directory is designed for user customization.
@@ -238,6 +280,7 @@ Service scripts require Python 3.6+ to be installed on your system. Individual s
238
280
 
239
281
  - **codex.py**: Requires OpenAI Codex CLI to be installed
240
282
  - **claude.py**: Requires Anthropic Claude CLI to be installed (see https://docs.anthropic.com/en/docs/agents-and-tools/claude-code)
283
+ - **gemini.py**: Requires Gemini CLI to be installed (see https://geminicli.com/docs/cli/headless/)
241
284
 
242
285
  ## Troubleshooting
243
286
 
@@ -20,7 +20,7 @@ class ClaudeService:
20
20
  # Default configuration
21
21
  DEFAULT_MODEL = "claude-sonnet-4-5-20250929"
22
22
  DEFAULT_PERMISSION_MODE = "default"
23
- DEFAULT_AUTO_INSTRUCTION = """You are Claude Code, an AI coding assistant. Follow the instructions provided and generate high-quality code."""
23
+ DEFAULT_AUTO_INSTRUCTION = """"""
24
24
 
25
25
  # Model shorthand mappings (colon-prefixed names expand to full model IDs)
26
26
  MODEL_SHORTHANDS = {
@@ -17,12 +17,12 @@ class CodexService:
17
17
  """Service wrapper for OpenAI Codex CLI"""
18
18
 
19
19
  # Default configuration
20
- DEFAULT_MODEL = "codex-5.1-max"
20
+ DEFAULT_MODEL = "codex-5.2-max"
21
21
  DEFAULT_AUTO_INSTRUCTION = """You are an AI coding assistant. Follow the instructions provided and generate high-quality code."""
22
22
 
23
23
  # Model shorthand mappings (colon-prefixed names expand to full model IDs)
24
24
  MODEL_SHORTHANDS = {
25
- ":codex": "codex-5.1-codex-max",
25
+ ":codex": "codex-5.2-codex-max",
26
26
  ":gpt-5": "gpt-5",
27
27
  ":mini": "gpt-5-codex-mini",
28
28
  }
@@ -34,6 +34,7 @@ class CodexService:
34
34
  self.prompt = ""
35
35
  self.additional_args: List[str] = []
36
36
  self.verbose = False
37
+ self._item_counter = 0
37
38
 
38
39
  def expand_model_shorthand(self, model: str) -> str:
39
40
  """
@@ -69,10 +70,10 @@ Examples:
69
70
  %(prog)s -p "Write a hello world function"
70
71
  %(prog)s -pp prompt.txt --cd /path/to/project
71
72
  %(prog)s -p "Add tests" -m gpt-4 -c custom_arg=value
72
- %(prog)s -p "Optimize code" -m :codex # uses codex-5.1-codex-max
73
+ %(prog)s -p "Optimize code" -m :codex # uses codex-5.2-codex-max
73
74
 
74
75
  Environment Variables:
75
- CODEX_MODEL Model name (supports shorthand, default: codex-5.1-max)
76
+ CODEX_MODEL Model name (supports shorthand, default: codex-5.2-max)
76
77
  CODEX_HIDE_STREAM_TYPES Comma-separated list of streaming msg types to hide
77
78
  Default: turn_diff,token_count,exec_command_output_delta
78
79
  JUNO_CODE_HIDE_STREAM_TYPES Same as CODEX_HIDE_STREAM_TYPES (alias)
@@ -199,6 +200,37 @@ Environment Variables:
199
200
  content_text,
200
201
  )
201
202
 
203
+ def _parse_item_number(self, item_id: str) -> Optional[int]:
204
+ """Return numeric component from item_{n} ids or None if unparseable."""
205
+ if not isinstance(item_id, str):
206
+ return None
207
+ item_id = item_id.strip()
208
+ if not item_id.startswith("item_"):
209
+ return None
210
+ try:
211
+ return int(item_id.split("item_", 1)[1])
212
+ except Exception:
213
+ return None
214
+
215
+ def _normalize_item_id(self, payload: dict, outer_type: str) -> Optional[str]:
216
+ """
217
+ Prefer the existing id on item.* payloads; otherwise synthesize sequential item_{n}.
218
+ Maintains a per-run counter so missing ids still expose turn counts.
219
+ """
220
+ item_id = payload.get("id") if isinstance(payload, dict) else None
221
+ if isinstance(item_id, str) and item_id.strip():
222
+ parsed = self._parse_item_number(item_id)
223
+ if parsed is not None and parsed + 1 > self._item_counter:
224
+ self._item_counter = parsed + 1
225
+ return item_id.strip()
226
+
227
+ if isinstance(outer_type, str) and outer_type.startswith("item."):
228
+ generated = f"item_{self._item_counter}"
229
+ self._item_counter += 1
230
+ return generated
231
+
232
+ return None
233
+
202
234
  def read_prompt_file(self, file_path: str) -> str:
203
235
  """Read prompt from a file"""
204
236
  try:
@@ -266,6 +298,7 @@ Environment Variables:
266
298
  msg_type: str,
267
299
  payload: dict,
268
300
  outer_type: str = "",
301
+ item_id: Optional[str] = None,
269
302
  ) -> Optional[str]:
270
303
  """
271
304
  Pretty format for specific msg types to be human readable while
@@ -282,12 +315,21 @@ Environment Variables:
282
315
  now = datetime.now().strftime("%I:%M:%S %p")
283
316
  msg_type = (msg_type or "").strip()
284
317
  header_type = (outer_type or msg_type).strip()
285
- header = {"type": header_type or msg_type or "message", "datetime": now}
318
+ base_type = header_type or msg_type or "message"
286
319
 
287
- if outer_type and msg_type and outer_type != msg_type:
288
- header["item_type"] = msg_type
320
+ def make_header(type_value: str):
321
+ hdr = {"type": type_value, "datetime": now}
322
+ if item_id:
323
+ hdr["id"] = item_id
324
+ if outer_type and msg_type and outer_type != msg_type:
325
+ hdr["item_type"] = msg_type
326
+ return hdr
327
+
328
+ header = make_header(base_type)
289
329
 
290
330
  if isinstance(payload, dict):
331
+ if item_id and "id" not in payload:
332
+ payload["id"] = item_id
291
333
  if payload.get("command"):
292
334
  header["command"] = payload.get("command")
293
335
  if payload.get("status"):
@@ -298,9 +340,7 @@ Environment Variables:
298
340
  # agent_reasoning → show 'text' human-readable
299
341
  if msg_type in {"agent_reasoning", "reasoning"}:
300
342
  content = self._extract_reasoning_text(payload)
301
- header = {"type": header_type or msg_type, "datetime": now}
302
- if outer_type and msg_type and outer_type != msg_type:
303
- header["item_type"] = msg_type
343
+ header = make_header(header_type or msg_type)
304
344
  if "\n" in content:
305
345
  return json.dumps(header, ensure_ascii=False) + "\ntext:\n" + content
306
346
  header["text"] = content
@@ -308,9 +348,7 @@ Environment Variables:
308
348
 
309
349
  if msg_type in {"agent_message", "message", "assistant_message", "assistant"}:
310
350
  content = self._extract_message_text(payload)
311
- header = {"type": header_type or msg_type, "datetime": now}
312
- if outer_type and msg_type and outer_type != msg_type:
313
- header["item_type"] = msg_type
351
+ header = make_header(header_type or msg_type)
314
352
  if "\n" in content:
315
353
  return json.dumps(header, ensure_ascii=False) + "\nmessage:\n" + content
316
354
  if content != "":
@@ -386,6 +424,9 @@ Environment Variables:
386
424
  parts = [p.strip() for p in env_val.split(",") if p.strip()]
387
425
  hide_types.update(parts)
388
426
 
427
+ # Reset per-run item counter for synthesized ids
428
+ self._item_counter = 0
429
+
389
430
  # We fully suppress all token_count events (do not emit even at end)
390
431
  last_token_count = None
391
432
 
@@ -444,6 +485,14 @@ Environment Variables:
444
485
  def handle_obj(obj_dict: dict):
445
486
  nonlocal last_token_count
446
487
  msg_type_inner, payload_inner, outer_type_inner = self._normalize_event(obj_dict)
488
+ item_id_inner = self._normalize_item_id(payload_inner, outer_type_inner)
489
+
490
+ if (
491
+ item_id_inner
492
+ and isinstance(obj_dict.get("item"), dict)
493
+ and not obj_dict["item"].get("id")
494
+ ):
495
+ obj_dict["item"]["id"] = item_id_inner
447
496
 
448
497
  if msg_type_inner == "token_count":
449
498
  last_token_count = obj_dict
@@ -452,7 +501,12 @@ Environment Variables:
452
501
  if msg_type_inner and msg_type_inner in hide_types:
453
502
  return # suppress
454
503
 
455
- pretty_line_inner = self._format_msg_pretty(msg_type_inner, payload_inner, outer_type_inner)
504
+ pretty_line_inner = self._format_msg_pretty(
505
+ msg_type_inner,
506
+ payload_inner,
507
+ outer_type_inner,
508
+ item_id=item_id_inner,
509
+ )
456
510
  if pretty_line_inner is not None:
457
511
  print(pretty_line_inner, flush=True)
458
512
  else: