aia 0.9.24 → 0.10.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/.version +1 -1
  3. data/CHANGELOG.md +84 -3
  4. data/README.md +179 -59
  5. data/bin/aia +6 -0
  6. data/docs/cli-reference.md +145 -72
  7. data/docs/configuration.md +156 -19
  8. data/docs/examples/tools/index.md +2 -2
  9. data/docs/faq.md +11 -11
  10. data/docs/guides/available-models.md +11 -11
  11. data/docs/guides/basic-usage.md +18 -17
  12. data/docs/guides/chat.md +57 -11
  13. data/docs/guides/executable-prompts.md +15 -15
  14. data/docs/guides/first-prompt.md +2 -2
  15. data/docs/guides/getting-started.md +6 -6
  16. data/docs/guides/image-generation.md +24 -24
  17. data/docs/guides/local-models.md +2 -2
  18. data/docs/guides/models.md +96 -18
  19. data/docs/guides/tools.md +4 -4
  20. data/docs/installation.md +2 -2
  21. data/docs/prompt_management.md +11 -11
  22. data/docs/security.md +3 -3
  23. data/docs/workflows-and-pipelines.md +1 -1
  24. data/examples/README.md +6 -6
  25. data/examples/headlines +3 -3
  26. data/lib/aia/aia_completion.bash +2 -2
  27. data/lib/aia/aia_completion.fish +4 -4
  28. data/lib/aia/aia_completion.zsh +2 -2
  29. data/lib/aia/chat_processor_service.rb +31 -21
  30. data/lib/aia/config/cli_parser.rb +403 -403
  31. data/lib/aia/config/config_section.rb +87 -0
  32. data/lib/aia/config/defaults.yml +219 -0
  33. data/lib/aia/config/defaults_loader.rb +147 -0
  34. data/lib/aia/config/mcp_parser.rb +151 -0
  35. data/lib/aia/config/model_spec.rb +67 -0
  36. data/lib/aia/config/validator.rb +185 -136
  37. data/lib/aia/config.rb +336 -17
  38. data/lib/aia/directive_processor.rb +14 -6
  39. data/lib/aia/directives/configuration.rb +24 -10
  40. data/lib/aia/directives/models.rb +3 -4
  41. data/lib/aia/directives/utility.rb +3 -2
  42. data/lib/aia/directives/web_and_file.rb +50 -47
  43. data/lib/aia/logger.rb +328 -0
  44. data/lib/aia/prompt_handler.rb +18 -22
  45. data/lib/aia/ruby_llm_adapter.rb +572 -69
  46. data/lib/aia/session.rb +9 -8
  47. data/lib/aia/ui_presenter.rb +20 -16
  48. data/lib/aia/utility.rb +50 -18
  49. data/lib/aia.rb +91 -66
  50. data/lib/extensions/ruby_llm/modalities.rb +2 -0
  51. data/mcp_servers/apple-mcp.json +8 -0
  52. data/mcp_servers/mcp_server_chart.json +11 -0
  53. data/mcp_servers/playwright_one.json +8 -0
  54. data/mcp_servers/playwright_two.json +8 -0
  55. data/mcp_servers/tavily_mcp_server.json +8 -0
  56. metadata +83 -25
  57. data/lib/aia/config/base.rb +0 -308
  58. data/lib/aia/config/defaults.rb +0 -91
  59. data/lib/aia/config/file_loader.rb +0 -163
  60. data/mcp_servers/imcp.json +0 -7
  61. data/mcp_servers/launcher.json +0 -11
  62. data/mcp_servers/timeserver.json +0 -8
@@ -72,10 +72,10 @@ aia --tools ./tools/ comprehensive_analysis
72
72
  ### Tool Security
73
73
  ```bash
74
74
  # Restrict to specific tools
75
- aia --tools ./tools/ --allowed_tools "file_analyzer,data_analyzer" safe_analysis
75
+ aia --tools ./tools/ --allowed-tools "file_analyzer,data_analyzer" safe_analysis
76
76
 
77
77
  # Block potentially dangerous tools
78
- aia --tools ./tools/ --rejected_tools "system_monitor,process_manager" user_analysis
78
+ aia --tools ./tools/ --rejected-tools "system_monitor,process_manager" user_analysis
79
79
  ```
80
80
 
81
81
  ### Tool Integration in Prompts
data/docs/faq.md CHANGED
@@ -86,7 +86,7 @@ aia --model ollama/llama3.2,gpt-4o-mini my_prompt
86
86
  aia --model ollama/mistral,lms/qwen-coder,claude-3-sonnet --consensus decision
87
87
 
88
88
  # Use local for drafts, cloud for refinement
89
- aia --model ollama/llama3.2 --out_file draft.md initial_analysis
89
+ aia --model ollama/llama3.2 --output draft.md initial_analysis
90
90
  aia --model gpt-4 --include draft.md final_report
91
91
  ```
92
92
 
@@ -147,13 +147,13 @@ aia --model gpt-4 my_prompt
147
147
  ```
148
148
 
149
149
  ### Q: How do I set a custom prompts directory?
150
- **A:** Use the `--prompts_dir` option or set it in configuration:
150
+ **A:** Use the `--prompts-dir` option or set it in configuration:
151
151
  ```bash
152
152
  # Command line
153
- aia --prompts_dir /path/to/prompts my_prompt
153
+ aia --prompts-dir /path/to/prompts my_prompt
154
154
 
155
- # Environment variable
156
- export AIA_PROMPTS_DIR="/path/to/prompts"
155
+ # Environment variable (uses nested naming convention)
156
+ export AIA_PROMPTS__DIR="/path/to/prompts"
157
157
  ```
158
158
 
159
159
  ## Prompts and Directives
@@ -292,7 +292,7 @@ You: /clear
292
292
  **A:**
293
293
  1. Check your API keys are set correctly
294
294
  2. Verify internet connection
295
- 3. Test with: `aia --available_models`
295
+ 3. Test with: `aia --available-models`
296
296
 
297
297
  ### Q: "Permission denied" errors
298
298
  **A:**
@@ -396,7 +396,7 @@ git push -u origin main
396
396
  **A:** This usually means AIA can't locate your prompt file:
397
397
  ```bash
398
398
  # Check prompts directory
399
- ls $AIA_PROMPTS_DIR
399
+ ls $AIA_PROMPTS__DIR
400
400
 
401
401
  # Verify prompt file exists
402
402
  ls ~/.prompts/my_prompt.txt
@@ -409,7 +409,7 @@ aia --fuzzy
409
409
  **A:** Check your model name and availability:
410
410
  ```bash
411
411
  # List available models
412
- aia --available_models
412
+ aia --available-models
413
413
 
414
414
  # Check model name spelling
415
415
  aia --model gpt-4o-mini # Correct
@@ -448,7 +448,7 @@ aia --debug --verbose my_prompt
448
448
  aia --model gpt-4o-mini my_prompt
449
449
 
450
450
  # Reduce max tokens
451
- aia --max_tokens 1000 my_prompt
451
+ aia --max-tokens 1000 my_prompt
452
452
 
453
453
  # Lower temperature for faster responses
454
454
  aia --temperature 0.1 my_prompt
@@ -464,7 +464,7 @@ aia --pipeline "analyze,summarize,report" large_data.csv
464
464
  //include specific_section.txt
465
465
 
466
466
  # Check model context limits
467
- aia --available_models | grep context
467
+ aia --available-models | grep context
468
468
  ```
469
469
 
470
470
  ### Q: Debug mode - how to get more information?
@@ -485,7 +485,7 @@ aia --debug --config
485
485
  | Error | Cause | Solution |
486
486
  |-------|-------|----------|
487
487
  | "Prompt not found" | Missing prompt file | Check file exists and spelling |
488
- | "Model not available" | Invalid model name | Use `--available_models` to list valid models |
488
+ | "Model not available" | Invalid model name | Use `--available-models` to list valid models |
489
489
  | "Shell command failed" | Invalid shell syntax | Test shell commands separately first |
490
490
  | "Configuration error" | Invalid config syntax | Check config file YAML syntax |
491
491
  | "API key missing" | No API key configured | Set environment variables for your models |
@@ -7,21 +7,21 @@ AIA supports a wide range of AI models through the RubyLLM gem. This comprehensi
7
7
  ### Command Line Query
8
8
  ```bash
9
9
  # List all available models
10
- aia --available_models
10
+ aia --available-models
11
11
 
12
12
  # Filter by provider
13
- aia --available_models openai
14
- aia --available_models anthropic
15
- aia --available_models google
13
+ aia --available-models openai
14
+ aia --available-models anthropic
15
+ aia --available-models google
16
16
 
17
17
  # Filter by capability
18
- aia --available_models vision
19
- aia --available_models function_calling
20
- aia --available_models text_to_image
18
+ aia --available-models vision
19
+ aia --available-models function_calling
20
+ aia --available-models text_to_image
21
21
 
22
22
  # Complex filtering (AND operation)
23
- aia --available_models openai,gpt,4
24
- aia --available_models anthropic,claude,sonnet
23
+ aia --available-models openai,gpt,4
24
+ aia --available-models anthropic,claude,sonnet
25
25
  ```
26
26
 
27
27
  ### Within Prompts
@@ -329,7 +329,7 @@ puts "//config model #{model}"
329
329
  # Model fallback chain
330
330
  //ruby
331
331
  preferred_models = ['gpt-4', 'claude-3-sonnet', 'gpt-3.5-turbo']
332
- available_models = `aia --available_models`.split("\n").map { |line| line.split.first }
332
+ available_models = `aia --available-models`.split("\n").map { |line| line.split.first }
333
333
 
334
334
  selected_model = preferred_models.find { |model| available_models.include?(model) }
335
335
  puts "//config model #{selected_model || 'gpt-3.5-turbo'}"
@@ -338,7 +338,7 @@ puts "//config model #{selected_model || 'gpt-3.5-turbo'}"
338
338
  ## Staying Current
339
339
 
340
340
  ### Model Updates
341
- - **Check regularly**: `aia --available_models`
341
+ - **Check regularly**: `aia --available-models`
342
342
  - **Version changes**: Models are updated periodically
343
343
  - **New releases**: Follow provider announcements
344
344
  - **Deprecations**: Some models may be retired
@@ -34,13 +34,13 @@ Control where and how AIA saves results:
34
34
 
35
35
  ```bash
36
36
  # Save to file
37
- aia --out_file result.md analysis_prompt data.csv
37
+ aia --output result.md analysis_prompt data.csv
38
38
 
39
39
  # Append to existing file
40
- aia --out_file log.md --append status_check
40
+ aia --output log.md --append status_check
41
41
 
42
42
  # Format with markdown
43
- aia --out_file report.md --markdown comprehensive_analysis
43
+ aia --output report.md --markdown comprehensive_analysis
44
44
  ```
45
45
 
46
46
  ## Common Workflow Patterns
@@ -108,8 +108,8 @@ debug: false
108
108
 
109
109
  ```bash
110
110
  # Use environment-specific configs
111
- aia --config_file ~/.aia/dev_config.yml development_task
112
- aia --config_file ~/.aia/prod_config.yml production_analysis
111
+ aia --config-file ~/.aia/dev_config.yml development_task
112
+ aia --config-file ~/.aia/prod_config.yml production_analysis
113
113
  ```
114
114
 
115
115
  ### Task-Specific Model Selection
@@ -216,7 +216,7 @@ Validate inputs before processing:
216
216
  test -f input.csv && aia data_analysis input.csv || echo "Input file not found"
217
217
 
218
218
  # Verify model availability
219
- aia --available_models | grep -q "gpt-4" && aia --model gpt-4 task || aia task
219
+ aia --available-models | grep -q "gpt-4" && aia --model gpt-4 task || aia task
220
220
  ```
221
221
 
222
222
  ## Performance Optimization
@@ -237,11 +237,11 @@ Handle multiple similar tasks efficiently:
237
237
  ```bash
238
238
  # Process multiple files
239
239
  for file in *.py; do
240
- aia code_review "$file" --out_file "reviews/${file%.py}_review.md"
240
+ aia code_review "$file" --output "reviews/${file%.py}_review.md"
241
241
  done
242
242
 
243
243
  # Parallel processing
244
- parallel -j4 aia analysis_task {} --out_file {.}_analysis.md ::: *.csv
244
+ parallel -j4 aia analysis_task {} --output {.}_analysis.md ::: *.csv
245
245
  ```
246
246
 
247
247
  ### Caching and Reuse
@@ -250,7 +250,7 @@ Avoid redundant processing:
250
250
  ```bash
251
251
  # Check if output exists before processing
252
252
  output_file="analysis_$(date +%Y%m%d).md"
253
- test -f "$output_file" || aia daily_analysis --out_file "$output_file"
253
+ test -f "$output_file" || aia daily_analysis --output "$output_file"
254
254
 
255
255
  # Reuse previous analysis
256
256
  aia followup_analysis --previous_analysis yesterday_analysis.md
@@ -266,11 +266,11 @@ Integrate AIA into shell workflows:
266
266
  # Automated analysis script
267
267
 
268
268
  echo "Starting analysis..."
269
- aia system_health_check --out_file health_$(date +%Y%m%d_%H%M).md
269
+ aia system_health_check --output health_$(date +%Y%m%d_%H%M).md
270
270
 
271
271
  if [ $? -eq 0 ]; then
272
272
  echo "Health check complete"
273
- aia generate_report --source health_*.md --out_file daily_report.md
273
+ aia generate_report --source health_*.md --output daily_report.md
274
274
  else
275
275
  echo "Health check failed, investigating..."
276
276
  aia troubleshoot_system --debug --verbose
@@ -316,7 +316,7 @@ jobs:
316
316
  run: gem install aia
317
317
  - name: Run Analysis
318
318
  run: |
319
- aia pr_analysis --diff_only --out_file analysis.md
319
+ aia pr_analysis --diff_only --output analysis.md
320
320
  cat analysis.md >> $GITHUB_STEP_SUMMARY
321
321
  env:
322
322
  OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
@@ -327,7 +327,7 @@ jobs:
327
327
  ### Model and API Issues
328
328
  ```bash
329
329
  # Test model availability
330
- aia --available_models | grep "gpt-4" || echo "GPT-4 not available"
330
+ aia --available-models | grep "gpt-4" || echo "GPT-4 not available"
331
331
 
332
332
  # Test API connection
333
333
  aia --model gpt-3.5-turbo --debug simple_test_prompt
@@ -415,16 +415,17 @@ Add these powerful aliases and functions to your shell configuration:
415
415
 
416
416
  ```bash
417
417
  # ~/.bashrc_aia (or add to ~/.bashrc)
418
- export AIA_PROMPTS_DIR=~/.prompts
419
- export AIA_OUT_FILE=./temp.md
418
+ # Uses nested naming convention with double underscore
419
+ export AIA_PROMPTS__DIR=~/.prompts
420
+ export AIA_OUTPUT__FILE=./temp.md
420
421
  export AIA_MODEL=gpt-4o-mini
421
- export AIA_VERBOSE=true # Shows spinner while waiting for LLM response
422
+ export AIA_FLAGS__VERBOSE=true # Shows spinner while waiting for LLM response
422
423
 
423
424
  # Quick chat alias
424
425
  alias chat='aia --chat --terse'
425
426
 
426
427
  # Quick question function
427
- ask() { echo "$1" | aia run --no-out_file; }
428
+ ask() { echo "$1" | aia run --no-output; }
428
429
  ```
429
430
 
430
431
  **Usage Examples:**
data/docs/guides/chat.md CHANGED
@@ -22,7 +22,7 @@ aia --chat --role assistant my_context.txt
22
22
  aia --chat my_initial_prompt
23
23
 
24
24
  # Chat with system prompt
25
- aia --chat --system_prompt helpful_assistant
25
+ aia --chat --system-prompt helpful_assistant
26
26
 
27
27
  # Chat with role-based context
28
28
  aia --chat --role code_expert debugging_session
@@ -190,7 +190,7 @@ aia --chat --tools ./my_tools.rb
190
190
  aia --chat --tools ./tools/
191
191
 
192
192
  # Restrict tool access
193
- aia --chat --tools ./tools/ --allowed_tools "file_reader,calculator"
193
+ aia --chat --tools ./tools/ --allowed-tools "file_reader,calculator"
194
194
  ```
195
195
 
196
196
  #### Using Tools in Conversation
@@ -216,7 +216,7 @@ Available Tools:
216
216
  ### Code Review Session
217
217
  ```bash
218
218
  # Start specialized code review chat
219
- aia --chat --role code_expert --system_prompt code_reviewer
219
+ aia --chat --role code_expert --system-prompt code_reviewer
220
220
 
221
221
  You: I need help reviewing this Python function:
222
222
  //include my_function.py
@@ -259,7 +259,7 @@ AI: Here's how we can make it more engaging...
259
259
  ### Learning Session
260
260
  ```bash
261
261
  # Start educational chat
262
- aia --chat --role teacher --system_prompt patient_explainer
262
+ aia --chat --role teacher --system-prompt patient_explainer
263
263
 
264
264
  You: Explain how blockchain works, but I'm completely new to this
265
265
  AI: Let me explain blockchain in simple terms, starting from the basics...
@@ -279,19 +279,19 @@ aia --chat --speak
279
279
  aia --chat --speak --voice nova
280
280
 
281
281
  # Use high-quality speech model
282
- aia --chat --speak --speech_model tts-1-hd
282
+ aia --chat --speak --speech-model tts-1-hd
283
283
  ```
284
284
 
285
285
  ### Audio Input
286
286
  ```bash
287
287
  # Use speech-to-text for input
288
- aia --chat --transcription_model whisper-1 audio_input.wav
288
+ aia --chat --transcription-model whisper-1 audio_input.wav
289
289
  ```
290
290
 
291
291
  ### Interactive Voice Chat
292
292
  ```bash
293
293
  # Full voice interaction
294
- aia --chat --speak --voice echo --transcription_model whisper-1
294
+ aia --chat --speak --voice echo --transcription-model whisper-1
295
295
 
296
296
  # Great for hands-free operation or accessibility
297
297
  ```
@@ -421,6 +421,52 @@ class ChatCommands < RubyLLM::Tool
421
421
  end
422
422
  ```
423
423
 
424
+ ## Token Usage and Cost Tracking
425
+
426
+ ### Displaying Token Usage
427
+ Use the `--tokens` flag to see token usage after each response:
428
+
429
+ ```bash
430
+ # Enable token usage display
431
+ aia --chat --tokens
432
+
433
+ # Example output after a response:
434
+ # AI: Here's my response to your question...
435
+ #
436
+ # Tokens: input=125, output=89, model=gpt-4o-mini
437
+ ```
438
+
439
+ ### Cost Estimation
440
+ Use the `--cost` flag to include cost calculations with token usage:
441
+
442
+ ```bash
443
+ # Enable cost estimation (automatically enables --tokens)
444
+ aia --chat --cost
445
+
446
+ # Example output after a response:
447
+ # AI: Here's my response to your question...
448
+ #
449
+ # Tokens: input=125, output=89, model=gpt-4o-mini
450
+ # Cost: $0.0003 (input: $0.0002, output: $0.0001)
451
+ ```
452
+
453
+ ### Multi-Model Token Tracking
454
+ When using multiple models, token usage is displayed for each model:
455
+
456
+ ```bash
457
+ aia --chat --tokens --model gpt-4,claude-3-sonnet
458
+
459
+ # Example output:
460
+ # from: gpt-4
461
+ # Here's my response...
462
+ #
463
+ # from: claude-3-sonnet
464
+ # Here's my alternative response...
465
+ #
466
+ # Model: gpt-4 - Tokens: input=125, output=89
467
+ # Model: claude-3-sonnet - Tokens: input=125, output=112
468
+ ```
469
+
424
470
  ## Troubleshooting Chat Mode
425
471
 
426
472
  ### Common Issues
@@ -498,19 +544,19 @@ aia --pipeline "data_prep,analysis" --chat dataset.csv
498
544
  ### Configuration Integration
499
545
  ```bash
500
546
  # Use predefined configurations in chat
501
- aia --config_file chat_setup.yml --chat
547
+ aia --config-file chat_setup.yml --chat
502
548
 
503
549
  # Override specific settings
504
- aia --chat --temperature 0.9 --max_tokens 3000
550
+ aia --chat --temperature 0.9 --max-tokens 3000
505
551
  ```
506
552
 
507
553
  ### Output Integration
508
554
  ```bash
509
555
  # Save chat output to file
510
- aia --chat --out_file discussion.md --markdown
556
+ aia --chat --output discussion.md --markdown
511
557
 
512
558
  # Append to existing files
513
- aia --chat --out_file project_log.md --append
559
+ aia --chat --output project_log.md --append
514
560
  ```
515
561
 
516
562
  ## Related Documentation
@@ -11,7 +11,7 @@ Executable prompts are prompt files with a special shebang line that makes them
11
11
  ### Basic Structure
12
12
 
13
13
  ```bash
14
- #!/usr/bin/env aia run --no-out_file --exec
14
+ #!/usr/bin/env aia run --no-output --exec
15
15
  # Your prompt description and comments
16
16
 
17
17
  Your prompt content here...
@@ -20,7 +20,7 @@ Your prompt content here...
20
20
  ### Key Components
21
21
 
22
22
  1. **Shebang Line**: Must include `--exec` flag to enable prompt processing
23
- 2. **Output Configuration**: Use `--no-out_file` to send output to STDOUT
23
+ 2. **Output Configuration**: Use `--no-output` to send output to STDOUT
24
24
  3. **Executable Permissions**: Make file executable with `chmod +x`
25
25
 
26
26
  ## The `run` Prompt Pattern
@@ -56,7 +56,7 @@ This pattern allows for quick one-shot questions without creating specific promp
56
56
  Create a weather monitoring executable:
57
57
 
58
58
  ```bash
59
- #!/usr/bin/env aia run --no-out_file --exec
59
+ #!/usr/bin/env aia run --no-output --exec
60
60
  # Get current storm activity for the east and south coast of the US
61
61
 
62
62
  Summarize the tropical storm outlook for the Atlantic, Caribbean Sea and Gulf of America.
@@ -79,7 +79,7 @@ chmod +x weather_report
79
79
  ### System Status Monitor
80
80
 
81
81
  ```bash
82
- #!/usr/bin/env aia run --no-out_file --exec
82
+ #!/usr/bin/env aia run --no-output --exec
83
83
  # System health check and analysis
84
84
 
85
85
  Analyze the current system status and provide recommendations:
@@ -102,7 +102,7 @@ Provide analysis and recommendations for system optimization.
102
102
  ### Code Quality Checker
103
103
 
104
104
  ```bash
105
- #!/usr/bin/env aia run --no-out_file --exec
105
+ #!/usr/bin/env aia run --no-output --exec
106
106
  # Analyze code quality for the current directory
107
107
 
108
108
  //config model = gpt-4
@@ -125,7 +125,7 @@ Provide code quality assessment and improvement recommendations.
125
125
  ### Daily Standup Generator
126
126
 
127
127
  ```bash
128
- #!/usr/bin/env aia run --no-out_file --exec
128
+ #!/usr/bin/env aia run --no-output --exec
129
129
  # Generate daily standup report from git activity
130
130
 
131
131
  //config model = gpt-4o-mini
@@ -151,7 +151,7 @@ Provide a structured standup report.
151
151
  Create executable prompts that accept command-line parameters:
152
152
 
153
153
  ```bash
154
- #!/usr/bin/env aia run --no-out_file --exec
154
+ #!/usr/bin/env aia run --no-output --exec
155
155
  # Code review for specific file
156
156
  # Usage: ./code_review <filename>
157
157
 
@@ -172,7 +172,7 @@ Provide specific, actionable feedback for improvements.
172
172
  Chain multiple prompts in an executable workflow:
173
173
 
174
174
  ```bash
175
- #!/usr/bin/env aia run --no-out_file --exec
175
+ #!/usr/bin/env aia run --no-output --exec
176
176
  # Complete project analysis pipeline
177
177
 
178
178
  //pipeline project_scan,security_check,recommendations
@@ -183,7 +183,7 @@ Starting comprehensive project analysis...
183
183
  ### Conditional Logic Executables
184
184
 
185
185
  ```bash
186
- #!/usr/bin/env aia run --no-out_file --exec
186
+ #!/usr/bin/env aia run --no-output --exec
187
187
  # Environment-aware deployment checker
188
188
 
189
189
  //ruby
@@ -216,7 +216,7 @@ Analyze the deployment configuration and provide environment-specific recommenda
216
216
  ### As Git Hooks
217
217
 
218
218
  ```bash
219
- #!/usr/bin/env aia run --no-out_file --exec
219
+ #!/usr/bin/env aia run --no-output --exec
220
220
  # .git/hooks/pre-commit
221
221
  # Automated commit message analysis
222
222
 
@@ -285,7 +285,7 @@ chmod 700 sensitive_prompt # Owner only
285
285
  ### Error Handling
286
286
 
287
287
  ```bash
288
- #!/usr/bin/env aia run --no-out_file --exec
288
+ #!/usr/bin/env aia run --no-output --exec
289
289
  # Robust executable with error handling
290
290
 
291
291
  //ruby
@@ -327,7 +327,7 @@ Analyze the file structure, quality, and provide recommendations.
327
327
  ### Enable Debug Mode
328
328
 
329
329
  ```bash
330
- #!/usr/bin/env aia run --no-out_file --exec --debug --verbose
330
+ #!/usr/bin/env aia run --no-output --exec --debug --verbose
331
331
  # Debug version of your executable prompt
332
332
  ```
333
333
 
@@ -352,14 +352,14 @@ git status
352
352
  | "Permission denied" | File not executable | `chmod +x filename` |
353
353
  | "Command not found" | Missing shebang or wrong path | Check shebang line |
354
354
  | "Prompt not found" | Missing run prompt | Create ~/.prompts/run.txt |
355
- | "Output not appearing" | Missing --no-out_file | Add flag to shebang |
355
+ | "Output not appearing" | Missing --no-output | Add flag to shebang |
356
356
 
357
357
  ## Advanced Executable Patterns
358
358
 
359
359
  ### Self-Documenting Executables
360
360
 
361
361
  ```bash
362
- #!/usr/bin/env aia run --no-out_file --exec
362
+ #!/usr/bin/env aia run --no-output --exec
363
363
  # Self-documenting code analyzer
364
364
  # Usage: ./code_analyzer [--help] <directory>
365
365
 
@@ -384,7 +384,7 @@ end
384
384
  ### Multi-Stage Executables
385
385
 
386
386
  ```bash
387
- #!/usr/bin/env aia run --no-out_file --exec
387
+ #!/usr/bin/env aia run --no-output --exec
388
388
  # Multi-stage project analysis
389
389
 
390
390
  //ruby
@@ -385,7 +385,7 @@ git commit -m "My first AIA prompts"
385
385
  aia --help
386
386
 
387
387
  # Model information
388
- aia --available_models
388
+ aia --available-models
389
389
 
390
390
  # Debug information
391
391
  aia --debug my_prompt
@@ -417,7 +417,7 @@ ls ~/.prompts/your_prompt.txt
417
417
  chmod 644 ~/.prompts/your_prompt.txt
418
418
 
419
419
  # Use full path if needed
420
- aia --prompts_dir ~/.prompts your_prompt
420
+ aia --prompts-dir ~/.prompts your_prompt
421
421
  ```
422
422
 
423
423
  ### API Errors
@@ -53,7 +53,7 @@ aia --model gpt-4 --chat
53
53
  aia --model claude-3-sonnet --chat
54
54
 
55
55
  # See all available models
56
- aia --available_models
56
+ aia --available-models
57
57
  ```
58
58
 
59
59
  ### 3. Adjusting AI Behavior
@@ -71,7 +71,7 @@ aia --temperature 0.3 --chat
71
71
  aia --terse --chat
72
72
 
73
73
  # Limit response length
74
- aia --max_tokens 100 --chat
74
+ aia --max-tokens 100 --chat
75
75
  ```
76
76
 
77
77
  ## Creating Your First Prompt
@@ -239,13 +239,13 @@ Save AI responses to files:
239
239
 
240
240
  ```bash
241
241
  # Save to a file
242
- aia --out_file response.md my_prompt
242
+ aia --output response.md my_prompt
243
243
 
244
244
  # Append to an existing file
245
- aia --out_file response.md --append my_prompt
245
+ aia --output response.md --append my_prompt
246
246
 
247
247
  # Format with Markdown
248
- aia --out_file response.md --markdown my_prompt
248
+ aia --output response.md --markdown my_prompt
249
249
  ```
250
250
 
251
251
  ## Chat Mode Features
@@ -426,7 +426,7 @@ Now that you understand the basics:
426
426
 
427
427
  #### "Model not available"
428
428
  - Check your API keys: `echo $OPENAI_API_KEY`
429
- - List available models: `aia --available_models`
429
+ - List available models: `aia --available-models`
430
430
  - Check your internet connection
431
431
 
432
432
  #### "Permission denied"