aia 0.9.24 → 0.10.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/.version +1 -1
  3. data/CHANGELOG.md +84 -3
  4. data/README.md +179 -59
  5. data/bin/aia +6 -0
  6. data/docs/cli-reference.md +145 -72
  7. data/docs/configuration.md +156 -19
  8. data/docs/examples/tools/index.md +2 -2
  9. data/docs/faq.md +11 -11
  10. data/docs/guides/available-models.md +11 -11
  11. data/docs/guides/basic-usage.md +18 -17
  12. data/docs/guides/chat.md +57 -11
  13. data/docs/guides/executable-prompts.md +15 -15
  14. data/docs/guides/first-prompt.md +2 -2
  15. data/docs/guides/getting-started.md +6 -6
  16. data/docs/guides/image-generation.md +24 -24
  17. data/docs/guides/local-models.md +2 -2
  18. data/docs/guides/models.md +96 -18
  19. data/docs/guides/tools.md +4 -4
  20. data/docs/installation.md +2 -2
  21. data/docs/prompt_management.md +11 -11
  22. data/docs/security.md +3 -3
  23. data/docs/workflows-and-pipelines.md +1 -1
  24. data/examples/README.md +6 -6
  25. data/examples/headlines +3 -3
  26. data/lib/aia/aia_completion.bash +2 -2
  27. data/lib/aia/aia_completion.fish +4 -4
  28. data/lib/aia/aia_completion.zsh +2 -2
  29. data/lib/aia/chat_processor_service.rb +31 -21
  30. data/lib/aia/config/cli_parser.rb +403 -403
  31. data/lib/aia/config/config_section.rb +87 -0
  32. data/lib/aia/config/defaults.yml +219 -0
  33. data/lib/aia/config/defaults_loader.rb +147 -0
  34. data/lib/aia/config/mcp_parser.rb +151 -0
  35. data/lib/aia/config/model_spec.rb +67 -0
  36. data/lib/aia/config/validator.rb +185 -136
  37. data/lib/aia/config.rb +336 -17
  38. data/lib/aia/directive_processor.rb +14 -6
  39. data/lib/aia/directives/configuration.rb +24 -10
  40. data/lib/aia/directives/models.rb +3 -4
  41. data/lib/aia/directives/utility.rb +3 -2
  42. data/lib/aia/directives/web_and_file.rb +50 -47
  43. data/lib/aia/logger.rb +328 -0
  44. data/lib/aia/prompt_handler.rb +18 -22
  45. data/lib/aia/ruby_llm_adapter.rb +572 -69
  46. data/lib/aia/session.rb +9 -8
  47. data/lib/aia/ui_presenter.rb +20 -16
  48. data/lib/aia/utility.rb +50 -18
  49. data/lib/aia.rb +91 -66
  50. data/lib/extensions/ruby_llm/modalities.rb +2 -0
  51. data/mcp_servers/apple-mcp.json +8 -0
  52. data/mcp_servers/mcp_server_chart.json +11 -0
  53. data/mcp_servers/playwright_one.json +8 -0
  54. data/mcp_servers/playwright_two.json +8 -0
  55. data/mcp_servers/tavily_mcp_server.json +8 -0
  56. metadata +83 -25
  57. data/lib/aia/config/base.rb +0 -308
  58. data/lib/aia/config/defaults.rb +0 -91
  59. data/lib/aia/config/file_loader.rb +0 -163
  60. data/mcp_servers/imcp.json +0 -7
  61. data/mcp_servers/launcher.json +0 -11
  62. data/mcp_servers/timeserver.json +0 -8
@@ -11,7 +11,7 @@ AIA supports AI-powered image generation through various models, enabling you to
11
11
  ### Image Model Capabilities
12
12
  ```bash
13
13
  # Check available image generation models
14
- aia --available_models text_to_image
14
+ aia --available-models text_to_image
15
15
 
16
16
  # Example output:
17
17
  # - dall-e-3 (openai) text to image
@@ -26,26 +26,26 @@ aia --available_models text_to_image
26
26
  aia --model dall-e-3 "A serene mountain lake at sunset"
27
27
 
28
28
  # Generate with specific size
29
- aia --model dall-e-3 --image_size 1024x1024 "Modern office workspace"
29
+ aia --model dall-e-3 --image-size 1024x1024 "Modern office workspace"
30
30
 
31
31
  # Generate with quality settings
32
- aia --model dall-e-3 --image_quality hd "Professional headshot"
32
+ aia --model dall-e-3 --image-quality hd "Professional headshot"
33
33
  ```
34
34
 
35
35
  ### Image Configuration Options
36
36
 
37
- #### Image Size (`--image_size`, `--is`)
37
+ #### Image Size (`--image-size`, `--is`)
38
38
  ```bash
39
39
  # Square formats
40
- aia --image_size 1024x1024 "Square image prompt"
40
+ aia --image-size 1024x1024 "Square image prompt"
41
41
  aia --is 512x512 "Smaller square image"
42
42
 
43
43
  # Landscape formats
44
- aia --image_size 1792x1024 "Wide landscape image"
44
+ aia --image-size 1792x1024 "Wide landscape image"
45
45
  aia --is 1344x768 "Medium landscape"
46
46
 
47
47
  # Portrait formats
48
- aia --image_size 1024x1792 "Tall portrait image"
48
+ aia --image-size 1024x1792 "Tall portrait image"
49
49
  aia --is 768x1344 "Medium portrait"
50
50
  ```
51
51
 
@@ -54,13 +54,13 @@ aia --is 768x1344 "Medium portrait"
54
54
  - Landscape: `1792x1024`, `1344x768`
55
55
  - Portrait: `1024x1792`, `768x1344`
56
56
 
57
- #### Image Quality (`--image_quality`, `--iq`)
57
+ #### Image Quality (`--image-quality`, `--iq`)
58
58
  ```bash
59
59
  # Standard quality (faster, less expensive)
60
- aia --image_quality standard "Quick concept image"
60
+ aia --image-quality standard "Quick concept image"
61
61
 
62
62
  # HD quality (better detail, more expensive)
63
- aia --image_quality hd "High-quality marketing image"
63
+ aia --image-quality hd "High-quality marketing image"
64
64
  aia --iq hd "Detailed technical diagram"
65
65
  ```
66
66
 
@@ -68,13 +68,13 @@ aia --iq hd "Detailed technical diagram"
68
68
  - `standard`: Good quality, faster generation, lower cost
69
69
  - `hd`: Enhanced detail and resolution, slower, higher cost
70
70
 
71
- #### Image Style (`--style`, `--image_style`)
71
+ #### Image Style (`--style`, `--image-style`)
72
72
  ```bash
73
73
  # Vivid style (hyper-real, dramatic colors)
74
- aia --image_style vivid "Dramatic sunset over city skyline"
74
+ aia --image-style vivid "Dramatic sunset over city skyline"
75
75
 
76
76
  # Natural style (more natural, less stylized)
77
- aia --image_style natural "Realistic portrait of a person reading"
77
+ aia --image-style natural "Realistic portrait of a person reading"
78
78
  aia --style natural "Documentary-style photograph"
79
79
  ```
80
80
 
@@ -342,30 +342,30 @@ aia --model dall-e-3 "Centered composition, symmetrical balance, rule of thirds,
342
342
  #### Resolution and Size Selection
343
343
  ```bash
344
344
  # Choose size based on use case
345
- aia --image_size 1792x1024 "Website hero image" # Landscape
346
- aia --image_size 1024x1792 "Mobile app screenshot" # Portrait
347
- aia --image_size 1024x1024 "Social media post" # Square
345
+ aia --image-size 1792x1024 "Website hero image" # Landscape
346
+ aia --image-size 1024x1792 "Mobile app screenshot" # Portrait
347
+ aia --image-size 1024x1024 "Social media post" # Square
348
348
  ```
349
349
 
350
350
  #### Quality vs. Cost Balance
351
351
  ```bash
352
352
  # Standard for concepts/drafts
353
- aia --image_quality standard "Initial concept image"
353
+ aia --image-quality standard "Initial concept image"
354
354
 
355
355
  # HD for final/published images
356
- aia --image_quality hd "Final marketing image"
356
+ aia --image-quality hd "Final marketing image"
357
357
  ```
358
358
 
359
359
  ### Iterative Refinement
360
360
  ```bash
361
361
  # Generate initial concept
362
- aia --model dall-e-3 --out_file concept_v1.png "Modern kitchen design"
362
+ aia --model dall-e-3 --output concept_v1.png "Modern kitchen design"
363
363
 
364
364
  # Refine based on results
365
- aia --model dall-e-3 --out_file concept_v2.png "Modern kitchen with marble countertops, pendant lighting, minimalist cabinets"
365
+ aia --model dall-e-3 --output concept_v2.png "Modern kitchen with marble countertops, pendant lighting, minimalist cabinets"
366
366
 
367
367
  # Final version with specific details
368
- aia --model dall-e-3 --image_quality hd --out_file final_kitchen.png "Ultra-modern kitchen with white marble waterfall countertops, brass pendant lights, handleless cabinets, large island, professional photography"
368
+ aia --model dall-e-3 --image-quality hd --output final_kitchen.png "Ultra-modern kitchen with white marble waterfall countertops, brass pendant lights, handleless cabinets, large island, professional photography"
369
369
  ```
370
370
 
371
371
  ## Troubleshooting Image Generation
@@ -399,7 +399,7 @@ aia --model dall-e-3 "Subject photographed with 50mm lens, f/2.8, natural lighti
399
399
  **Solutions**:
400
400
  ```bash
401
401
  # Specify exact requirements
402
- aia --model dall-e-3 --image_size 1792x1024 --image_quality hd "Detailed prompt"
402
+ aia --model dall-e-3 --image-size 1792x1024 --image-quality hd "Detailed prompt"
403
403
 
404
404
  # Match use case to settings
405
405
  aia --is 1024x1024 --iq standard "Social media post" # Standard for social media
@@ -424,10 +424,10 @@ end
424
424
  #### Cost Management
425
425
  ```bash
426
426
  # Use standard quality for iterations
427
- aia --image_quality standard "Draft version for review"
427
+ aia --image-quality standard "Draft version for review"
428
428
 
429
429
  # Use HD only for finals
430
- aia --image_quality hd "Final approved concept"
430
+ aia --image-quality hd "Final approved concept"
431
431
  ```
432
432
 
433
433
  ## Integration Examples
@@ -209,7 +209,7 @@ aia --model ollama/llama3.2,ollama/mistral,gpt-4 --consensus decision_prompt
209
209
 
210
210
  ```bash
211
211
  # 1. Process with local model (private)
212
- aia --model ollama/llama3.2 --out_file draft.md sensitive_data.txt
212
+ aia --model ollama/llama3.2 --output draft.md sensitive_data.txt
213
213
 
214
214
  # 2. Review and sanitize draft.md manually
215
215
 
@@ -222,7 +222,7 @@ aia --model gpt-4 --include draft.md final_output
222
222
  ```bash
223
223
  # Bulk tasks with local model
224
224
  for i in {1..1000}; do
225
- aia --model ollama/mistral --out_file "result_$i.md" process "input_$i.txt"
225
+ aia --model ollama/mistral --output "result_$i.md" process "input_$i.txt"
226
226
  done
227
227
 
228
228
  # No API costs!
@@ -7,20 +7,20 @@ AIA supports multiple AI models through the RubyLLM gem, allowing you to choose
7
7
  ### List All Models
8
8
  ```bash
9
9
  # Show all available models
10
- aia --available_models
10
+ aia --available-models
11
11
 
12
12
  # Filter by provider
13
- aia --available_models openai
14
- aia --available_models anthropic
15
- aia --available_models google
13
+ aia --available-models openai
14
+ aia --available-models anthropic
15
+ aia --available-models google
16
16
 
17
17
  # Filter by capability
18
- aia --available_models vision
19
- aia --available_models function_calling
20
- aia --available_models text_to_image
18
+ aia --available-models vision
19
+ aia --available-models function_calling
20
+ aia --available-models text_to_image
21
21
 
22
22
  # Complex filtering
23
- aia --available_models openai,gpt,4
23
+ aia --available-models openai,gpt,4
24
24
  ```
25
25
 
26
26
  ### Model Information
@@ -167,6 +167,84 @@ Model Details:
167
167
  - **Error Handling**: Invalid models are reported but don't prevent valid models from working
168
168
  - **Batch Mode Support**: Multi-model responses are properly formatted in output files
169
169
 
170
+ ### Token Usage and Cost Tracking
171
+
172
+ One of AIA's most powerful capabilities is real-time tracking of token usage and cost estimates across multiple models. This enables informed decisions about model selection based on both quality and cost.
173
+
174
+ #### Enabling Token Tracking
175
+
176
+ ```bash
177
+ # Display token usage for each model
178
+ aia my_prompt -m gpt-4o,claude-3-sonnet --tokens
179
+
180
+ # Include cost estimates (automatically enables --tokens)
181
+ aia my_prompt -m gpt-4o,claude-3-sonnet --cost
182
+
183
+ # In chat mode with full tracking
184
+ aia --chat -m gpt-4o,claude-3-sonnet,gemini-pro --cost
185
+ ```
186
+
187
+ #### Multi-Model Comparison with Metrics
188
+
189
+ ```bash
190
+ # Compare 3 models with cost tracking
191
+ aia --chat -m gpt-4o,claude-3-5-sonnet,gemini-1.5-pro --cost
192
+ ```
193
+
194
+ **Example Output:**
195
+ ```
196
+ You: Explain the CAP theorem and its implications for distributed databases.
197
+
198
+ from: gpt-4o
199
+ The CAP theorem states that a distributed system can only guarantee two of three properties...
200
+
201
+ from: claude-3-5-sonnet
202
+ CAP theorem, proposed by Eric Brewer, describes fundamental trade-offs in distributed systems...
203
+
204
+ from: gemini-1.5-pro
205
+ The CAP theorem is a cornerstone principle in distributed computing that states...
206
+
207
+ ┌─────────────────────────────────────────────────────────────────┐
208
+ │ Model │ Input Tokens │ Output Tokens │ Cost │
209
+ ├─────────────────────────────────────────────────────────────────┤
210
+ │ gpt-4o │ 42 │ 287 │ $0.0068 │
211
+ │ claude-3-5-sonnet │ 42 │ 312 │ $0.0053 │
212
+ │ gemini-1.5-pro │ 42 │ 298 │ $0.0038 │
213
+ └─────────────────────────────────────────────────────────────────┘
214
+ Total: $0.0159
215
+ ```
216
+
217
+ #### Use Cases for Token/Cost Tracking
218
+
219
+ | Use Case | Description |
220
+ |----------|-------------|
221
+ | **Budget Management** | Monitor API costs in real-time during development |
222
+ | **Model Evaluation** | Compare quality vs. cost across different providers |
223
+ | **Cost Optimization** | Identify the most cost-effective model for your tasks |
224
+ | **Usage Auditing** | Track token consumption for billing and optimization |
225
+ | **A/B Testing** | Compare model performance with objective metrics |
226
+
227
+ #### Combining with Consensus Mode
228
+
229
+ ```bash
230
+ # Get consensus response with cost breakdown
231
+ aia my_prompt -m gpt-4o,claude-3-sonnet,gemini-pro --consensus --cost
232
+
233
+ # The consensus response shows combined metrics:
234
+ # Tokens: input=126 (total), output=892 (consensus + individual)
235
+ # Cost: $0.0189 (all models combined)
236
+ ```
237
+
238
+ #### Environment Variables
239
+
240
+ ```bash
241
+ # Enable token tracking by default
242
+ export AIA_FLAGS__TOKENS=true
243
+
244
+ # Enable cost tracking by default
245
+ export AIA_FLAGS__COST=true
246
+ ```
247
+
170
248
  ### Per-Model Roles
171
249
 
172
250
  Assign specific roles to each model in multi-model mode to get diverse perspectives on your prompts. Each model receives a prepended role prompt that shapes its perspective.
@@ -431,7 +509,7 @@ aia --model claude-3-haiku code_completion
431
509
 
432
510
  # Bulk processing
433
511
  for file in *.txt; do
434
- aia --model gpt-3.5-turbo --out_file "${file%.txt}_processed.md" process_file "$file"
512
+ aia --model gpt-3.5-turbo --output "${file%.txt}_processed.md" process_file "$file"
435
513
  done
436
514
  ```
437
515
 
@@ -699,7 +777,7 @@ export LMS_API_BASE=http://localhost:1234/v1
699
777
  #### Privacy-First Workflow
700
778
  ```bash
701
779
  # Use local model for sensitive data
702
- aia --model ollama/llama3.2 --out_file draft.md process_private_data.txt
780
+ aia --model ollama/llama3.2 --output draft.md process_private_data.txt
703
781
 
704
782
  # Use cloud model for final polish (on sanitized data)
705
783
  aia --model gpt-4 --include draft.md refine_output
@@ -709,7 +787,7 @@ aia --model gpt-4 --include draft.md refine_output
709
787
  ```bash
710
788
  # Bulk processing with local model (free)
711
789
  for file in *.txt; do
712
- aia --model ollama/mistral --out_file "${file%.txt}_summary.md" summarize "$file"
790
+ aia --model ollama/mistral --output "${file%.txt}_summary.md" summarize "$file"
713
791
  done
714
792
 
715
793
  # Final review with premium cloud model
@@ -732,10 +810,10 @@ aia --model ollama/llama3.2,lms/qwen-coder,claude-3-sonnet --no-consensus code_r
732
810
  #### Model Not Available
733
811
  ```bash
734
812
  # Check if model exists
735
- aia --available_models | grep model_name
813
+ aia --available-models | grep model_name
736
814
 
737
815
  # Try alternative model names
738
- aia --available_models anthropic
816
+ aia --available-models anthropic
739
817
  ```
740
818
 
741
819
  #### Authentication Errors
@@ -775,10 +853,10 @@ aia --model claude-3-sonnet alternative_processing
775
853
  ### Model Switching Workflows
776
854
  ```bash
777
855
  # Start with fast model for initial processing
778
- aia --model gpt-3.5-turbo --out_file draft.md initial_analysis data.csv
856
+ aia --model gpt-3.5-turbo --output draft.md initial_analysis data.csv
779
857
 
780
858
  # Switch to quality model for refinement
781
- aia --model gpt-4 --include draft.md --out_file final.md refine_analysis
859
+ aia --model gpt-4 --include draft.md --output final.md refine_analysis
782
860
 
783
861
  # Use specialized model for specific tasks
784
862
  aia --model gpt-4-vision --include final.md image_analysis charts/
@@ -803,8 +881,8 @@ puts "Selected #{model} for #{complexity} complexity task"
803
881
  ### Model Ensemble Techniques
804
882
  ```bash
805
883
  # Use different models for different aspects
806
- aia --model gpt-4 --out_file technical_analysis.md technical_review code.py
807
- aia --model claude-3-sonnet --out_file style_analysis.md style_review code.py
884
+ aia --model gpt-4 --output technical_analysis.md technical_review code.py
885
+ aia --model claude-3-sonnet --output style_analysis.md style_review code.py
808
886
  aia --model gpt-3.5-turbo --include technical_analysis.md --include style_analysis.md synthesize_reviews
809
887
  ```
810
888
 
@@ -826,7 +904,7 @@ You: //compare "Explain this concept" --models gpt-4,claude-3-sonnet
826
904
  ### Pipeline Model Configuration
827
905
  ```bash
828
906
  # Different models for different pipeline stages
829
- aia --config_file pipeline_config.yml --pipeline "extract,analyze,report"
907
+ aia --config-file pipeline_config.yml --pipeline "extract,analyze,report"
830
908
 
831
909
  # pipeline_config.yml
832
910
  extract:
data/docs/guides/tools.md CHANGED
@@ -47,13 +47,13 @@ aia --tools "./tools/,./custom_tools.rb,/shared/tools/" my_prompt
47
47
  ### Tool Security
48
48
  ```bash
49
49
  # Restrict to specific tools
50
- aia --tools ./tools/ --allowed_tools "file_reader,calculator" my_prompt
50
+ aia --tools ./tools/ --allowed-tools "file_reader,calculator" my_prompt
51
51
 
52
52
  # Block dangerous tools
53
- aia --tools ./tools/ --rejected_tools "file_writer,system_admin" my_prompt
53
+ aia --tools ./tools/ --rejected-tools "file_writer,system_admin" my_prompt
54
54
 
55
55
  # Combine restrictions
56
- aia --tools ./tools/ --allowed_tools "safe_tools" --rejected_tools "dangerous_tools" my_prompt
56
+ aia --tools ./tools/ --allowed-tools "safe_tools" --rejected-tools "dangerous_tools" my_prompt
57
57
  ```
58
58
 
59
59
  ### Discovering Available Tools
@@ -749,7 +749,7 @@ class DebuggingTool < RubyLLM::Tool
749
749
  end
750
750
 
751
751
  def debug_mode?
752
- ENV['AIA_DEBUG'] == 'true'
752
+ ENV['AIA_FLAGS__DEBUG'] == 'true'
753
753
  end
754
754
  end
755
755
  ```
data/docs/installation.md CHANGED
@@ -153,7 +153,7 @@ gem install tty-prompt
153
153
  ### 1. Check Available Models
154
154
 
155
155
  ```bash
156
- aia --available_models
156
+ aia --available-models
157
157
  ```
158
158
 
159
159
  This will show all available AI models.
@@ -191,7 +191,7 @@ This should start an interactive chat session.
191
191
  #### "No models available"
192
192
  - Check that your API keys are set correctly
193
193
  - Verify your internet connection
194
- - Try: `aia --available_models` to diagnose
194
+ - Try: `aia --available-models` to diagnose
195
195
 
196
196
  #### "fzf not found" warning
197
197
  - Install fzf as described above
@@ -32,12 +32,12 @@ AIA provides sophisticated prompt management capabilities through the PromptMana
32
32
 
33
33
  ### Custom Structure
34
34
  ```bash
35
- # Set custom prompts directory
36
- export AIA_PROMPTS_DIR="/path/to/custom/prompts"
37
- aia --prompts_dir /path/to/custom/prompts
35
+ # Set custom prompts directory (uses nested naming convention)
36
+ export AIA_PROMPTS__DIR="/path/to/custom/prompts"
37
+ aia --prompts-dir /path/to/custom/prompts
38
38
 
39
39
  # Use project-specific prompts
40
- aia --prompts_dir ./project_prompts my_prompt
40
+ aia --prompts-dir ./project_prompts my_prompt
41
41
  ```
42
42
 
43
43
  ## Prompt File Formats
@@ -126,7 +126,7 @@ Please analyze this system information and provide:
126
126
  ### Basic Search
127
127
  ```bash
128
128
  # List all prompts
129
- aia --prompts_dir ~/.prompts
129
+ aia --prompts-dir ~/.prompts
130
130
 
131
131
  # Search by pattern
132
132
  find ~/.prompts -name "*code*" -type f
@@ -155,7 +155,7 @@ aia --fuzzy development/
155
155
  aia --fuzzy roles/
156
156
 
157
157
  # Search in specific subdirectory
158
- aia --prompts_dir ~/.prompts/analysis --fuzzy
158
+ aia --prompts-dir ~/.prompts/analysis --fuzzy
159
159
  ```
160
160
 
161
161
  ## Prompt Organization Strategies
@@ -272,7 +272,7 @@ aia --role software_architect system_design
272
272
  aia --role code_expert code_review main.py
273
273
 
274
274
  # Custom roles directory
275
- aia --roles_prefix personas --role mentor learning_session
275
+ aia --roles-prefix personas --role mentor learning_session
276
276
  ```
277
277
 
278
278
  ### Context Layering
@@ -378,10 +378,10 @@ Enhanced code review with security focus and structured output.
378
378
  ```bash
379
379
  # Shared team prompts
380
380
  git clone git@github.com:team/shared-prompts.git ~/.prompts/shared/
381
- aia --prompts_dir ~/.prompts/shared/ team_code_review
381
+ aia --prompts-dir ~/.prompts/shared/ team_code_review
382
382
 
383
383
  # Personal + shared prompts
384
- export AIA_PROMPTS_DIR="~/.prompts:~/.prompts/shared:./project_prompts"
384
+ export AIA_PROMPTS__DIR="~/.prompts:~/.prompts/shared:./project_prompts"
385
385
  ```
386
386
 
387
387
  ### Prompt Documentation
@@ -450,11 +450,11 @@ end
450
450
  ```bash
451
451
  # Batch process multiple files
452
452
  for file in data/*.csv; do
453
- aia batch_analysis_prompt "$file" --out_file "results/$(basename $file .csv)_analysis.md"
453
+ aia batch_analysis_prompt "$file" --output "results/$(basename $file .csv)_analysis.md"
454
454
  done
455
455
 
456
456
  # Parallel processing
457
- parallel -j4 aia analysis_prompt {} --out_file {.}_result.md ::: data/*.txt
457
+ parallel -j4 aia analysis_prompt {} --output {.}_result.md ::: data/*.txt
458
458
  ```
459
459
 
460
460
  ## Troubleshooting Prompts
data/docs/security.md CHANGED
@@ -31,7 +31,7 @@ export OPENAI_API_KEY_FULL="sk-..." # For full operations
31
31
  ### Key Validation and Testing
32
32
  ```bash
33
33
  # Test API keys safely
34
- aia --available_models | head -5 # Test without exposing key
34
+ aia --available-models | head -5 # Test without exposing key
35
35
 
36
36
  # Validate key format before use
37
37
  if [[ $OPENAI_API_KEY =~ ^sk-[a-zA-Z0-9]{48}$ ]]; then
@@ -582,8 +582,8 @@ security_response() {
582
582
  echo "AIA disabled due to security alert" | mail -s "AIA Security Alert" admin@company.com
583
583
  ;;
584
584
  "MEDIUM")
585
- # Increase logging level
586
- export AIA_LOG_LEVEL=debug
585
+ # Increase logging level (nested config format)
586
+ export AIA_LOGGER__AIA__LEVEL=debug
587
587
  echo "AIA security monitoring increased" | mail -s "AIA Security Notice" admin@company.com
588
588
  ;;
589
589
  "LOW")
@@ -55,7 +55,7 @@ aia data_prep --next analysis --next report dataset.csv
55
55
  aia --pipeline "step1,step2,step3" input.txt
56
56
 
57
57
  # Pipeline with output files
58
- aia --pipeline "extract,transform,load" --out_file results.md data.csv
58
+ aia --pipeline "extract,transform,load" --output results.md data.csv
59
59
 
60
60
  # Pipeline with model specification
61
61
  aia --model gpt-4 --pipeline "review,optimize,test" code.py
data/examples/README.md CHANGED
@@ -10,17 +10,17 @@ An executable prompt is a special type of prompt file that is structured to allo
10
10
 
11
11
  1. **Shebang Line**: The first line of the prompt indicates how the file should be executed. For instance:
12
12
  ```bash
13
- #!/usr/bin/env aia run --no-out_file
13
+ #!/usr/bin/env aia run --no-output
14
14
  ```
15
15
 
16
- This line tells the system to use the `aia` CLI with the `run` prompt ID for execution, while `--no-out_file` indicates that output should be sent to STDOUT instead of being written to a file.
16
+ This line tells the system to use the `aia` CLI with the `run` prompt ID for execution, while `--no-output` indicates that output should be sent to STDOUT instead of being written to a file.
17
17
 
18
18
  2. **Content**: Below the shebang line, users can add the prompt content that will be sent to the AI model for processing. This content can use flexible directives and dynamic parameters.
19
19
 
20
20
  ### Example of an Executable Prompt
21
21
 
22
22
  ```bash
23
- #!/usr/bin/env aia run --no-out_file
23
+ #!/usr/bin/env aia run --no-output
24
24
  # File: top10
25
25
  # Desc: The top 10 cities by population
26
26
 
@@ -45,7 +45,7 @@ The `//config` directive is used to modify configuration settings specifically f
45
45
 
46
46
  ```bash
47
47
  //config model = gpt-4
48
- //config out_file = results.md
48
+ //config output = results.md
49
49
  ```
50
50
 
51
51
  **Example**: You can control the model and output settings dynamically without changing global or default settings.
@@ -94,11 +94,11 @@ When the prompt runs, users will be prompted to provide a value for `MY_TOPIC`,
94
94
  Create a script that gives information on the top cities in the USA:
95
95
 
96
96
  ```bash
97
- #!/usr/bin/env aia run --no-out_file
97
+ #!/usr/bin/env aia run --no-output
98
98
  # File: top10
99
99
  # Desc: Retrieves top 10 cities by population
100
100
 
101
- //config out_file=top10_cities.md
101
+ //config output=top10_cities.md
102
102
 
103
103
  What are the top 10 cities by population in the USA? Summarize what people like about living in each city and include links to their respective Wikipedia pages.
104
104
  ```
data/examples/headlines CHANGED
@@ -1,13 +1,13 @@
1
- #!/usr/bin/env aia run --no-out_file --exec
1
+ #!/usr/bin/env aia run --no-output --exec
2
2
  # NOTE: the --exec option is REQUIRED to run this executable prompt file
3
3
  # This option signals that the contents of this file are to be appended
4
4
  # to the contents of the given prompt ID file. In this case it is the
5
5
  # "run" prompt ID's text file.
6
6
  #
7
- # All other AIA options are, well, optional. The --no-out_file is
7
+ # All other AIA options are, well, optional. The --no-output is
8
8
  # used here to cause the response to this executable prompt to be
9
9
  # sent to STDOUT like a good little *nix CLI tool. Its not necessary.
10
- # if you do not use it, the output will go to the default out_file.
10
+ # if you do not use it, the output will go to the default output file.
11
11
  # You could also specify a specific file to write the output to built
12
12
  # but it is more convential to use the *nix STDOUT redirect "> output.md"
13
13
  #
@@ -3,7 +3,7 @@
3
3
  # the bash shell
4
4
  #
5
5
  # This script assumes that the system environment
6
- # variable AIA_PROMPTS_DIR has been set correctly
6
+ # variable AIA_PROMPTS__DIR has been set correctly
7
7
 
8
8
  _aia_completion() {
9
9
  # The current word being completed
@@ -18,7 +18,7 @@ _aia_completion() {
18
18
  # Check if we are currently completing the option that requires prompt IDs
19
19
  if [[ "$prev_word" == "aia" ]]; then
20
20
  # Change directory to the prompts directory
21
- cd "$AIA_PROMPTS_DIR" || return
21
+ cd "$AIA_PROMPTS__DIR" || return
22
22
 
23
23
  # Generate a list of relative paths from the ~/.prompts directory (without .txt extension)
24
24
  local files=($(find . -name "*.txt" -type f | sed 's|^\./||' | sed 's/\.txt$//'))
@@ -2,7 +2,7 @@
2
2
  # Setup a prompt completion for use with the fish shell
3
3
  #
4
4
  # This script assumes that the system environment
5
- # variable AIA_PROMPTS_DIR has been set correctly
5
+ # variable AIA_PROMPTS__DIR has been set correctly
6
6
 
7
7
  function __fish_aia_complete
8
8
  # Get the command line and current token
@@ -12,9 +12,9 @@ function __fish_aia_complete
12
12
  # Check if we are currently completing the option that requires prompt IDs
13
13
  if set -q cmd_line[2]
14
14
  # Change directory to the prompts directory
15
- if test -d $AIA_PROMPTS_DIR
16
- pushd $AIA_PROMPTS_DIR
17
- # Generate completions based on .txt files in the AIA_PROMPTS_DIR directory
15
+ if test -d $AIA_PROMPTS__DIR
16
+ pushd $AIA_PROMPTS__DIR
17
+ # Generate completions based on .txt files in the AIA_PROMPTS__DIR directory
18
18
  for file in (find . -name "*.txt" -type f)
19
19
  set file (string replace -r '\.txt$' '' -- $file)
20
20
  set file (string replace -r '^\./' '' -- $file)
@@ -3,7 +3,7 @@
3
3
  # the zsh shell
4
4
  #
5
5
  # This script assumes that the system environment
6
- # variable AIA_PROMPTS_DIR has been set correctly
6
+ # variable AIA_PROMPTS__DIR has been set correctly
7
7
 
8
8
  _aia_completion() {
9
9
  # The current word being completed
@@ -18,7 +18,7 @@ _aia_completion() {
18
18
  # Check if we are currently completing the option that requires prompt IDs
19
19
  if [[ "$prev_word" == "aia" ]]; then
20
20
  # Change directory to the prompts directory
21
- cd "$AIA_PROMPTS_DIR" || return
21
+ cd "$AIA_PROMPTS__DIR" || return
22
22
 
23
23
  # Generate a list of relative paths from the ~/.prompts directory (without .txt extension)
24
24
  local files=($(find . -name "*.txt" -type f | sed 's|^\./||' | sed 's/\.txt$//'))