aidp 0.15.2 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +47 -0
  3. data/lib/aidp/analyze/error_handler.rb +14 -15
  4. data/lib/aidp/analyze/runner.rb +27 -5
  5. data/lib/aidp/analyze/steps.rb +4 -0
  6. data/lib/aidp/cli/jobs_command.rb +2 -1
  7. data/lib/aidp/cli.rb +812 -3
  8. data/lib/aidp/concurrency/backoff.rb +148 -0
  9. data/lib/aidp/concurrency/exec.rb +192 -0
  10. data/lib/aidp/concurrency/wait.rb +148 -0
  11. data/lib/aidp/concurrency.rb +71 -0
  12. data/lib/aidp/config.rb +20 -0
  13. data/lib/aidp/daemon/runner.rb +9 -8
  14. data/lib/aidp/debug_mixin.rb +1 -0
  15. data/lib/aidp/errors.rb +12 -0
  16. data/lib/aidp/execute/interactive_repl.rb +102 -11
  17. data/lib/aidp/execute/repl_macros.rb +776 -2
  18. data/lib/aidp/execute/runner.rb +27 -5
  19. data/lib/aidp/execute/steps.rb +2 -0
  20. data/lib/aidp/harness/config_loader.rb +24 -2
  21. data/lib/aidp/harness/enhanced_runner.rb +16 -2
  22. data/lib/aidp/harness/error_handler.rb +1 -1
  23. data/lib/aidp/harness/provider_info.rb +19 -15
  24. data/lib/aidp/harness/provider_manager.rb +47 -41
  25. data/lib/aidp/harness/runner.rb +3 -11
  26. data/lib/aidp/harness/state/persistence.rb +1 -6
  27. data/lib/aidp/harness/state_manager.rb +115 -7
  28. data/lib/aidp/harness/status_display.rb +11 -18
  29. data/lib/aidp/harness/ui/navigation/submenu.rb +1 -0
  30. data/lib/aidp/harness/ui/workflow_controller.rb +1 -1
  31. data/lib/aidp/harness/user_interface.rb +12 -15
  32. data/lib/aidp/jobs/background_runner.rb +15 -5
  33. data/lib/aidp/providers/codex.rb +0 -1
  34. data/lib/aidp/providers/cursor.rb +0 -1
  35. data/lib/aidp/providers/github_copilot.rb +0 -1
  36. data/lib/aidp/providers/opencode.rb +0 -1
  37. data/lib/aidp/skills/composer.rb +178 -0
  38. data/lib/aidp/skills/loader.rb +205 -0
  39. data/lib/aidp/skills/registry.rb +220 -0
  40. data/lib/aidp/skills/skill.rb +174 -0
  41. data/lib/aidp/skills.rb +30 -0
  42. data/lib/aidp/version.rb +1 -1
  43. data/lib/aidp/watch/build_processor.rb +93 -28
  44. data/lib/aidp/watch/runner.rb +3 -2
  45. data/lib/aidp/workstream_executor.rb +244 -0
  46. data/lib/aidp/workstream_state.rb +212 -0
  47. data/lib/aidp/worktree.rb +208 -0
  48. data/lib/aidp.rb +6 -0
  49. metadata +17 -4
@@ -0,0 +1,148 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "concurrent-ruby"
4
+
5
+ module Aidp
6
+ module Concurrency
7
+ # Retry logic with exponential backoff and jitter.
8
+ #
9
+ # Replaces ad-hoc retry loops that use sleep() with a standardized,
10
+ # configurable retry mechanism that includes backoff strategies and jitter.
11
+ #
12
+ # @example Simple retry
13
+ # Backoff.retry(max_attempts: 5) { call_external_api() }
14
+ #
15
+ # @example Custom backoff strategy
16
+ # Backoff.retry(max_attempts: 10, base: 1.0, strategy: :linear) do
17
+ # unstable_operation()
18
+ # end
19
+ #
20
+ # @example With error filtering
21
+ # Backoff.retry(max_attempts: 3, on: [Net::ReadTimeout, Errno::ECONNREFUSED]) do
22
+ # http_request()
23
+ # end
24
+ module Backoff
25
+ class << self
26
+ # Retry a block with exponential backoff and jitter.
27
+ #
28
+ # @param max_attempts [Integer] Maximum number of attempts (default: from config)
29
+ # @param base [Float] Base delay in seconds (default: from config)
30
+ # @param max_delay [Float] Maximum delay between retries (default: from config)
31
+ # @param jitter [Float] Jitter factor 0.0-1.0 (default: from config)
32
+ # @param strategy [Symbol] Backoff strategy :exponential, :linear, or :constant
33
+ # @param on [Array<Class>] Array of exception classes to retry (default: StandardError)
34
+ # @yield Block to retry
35
+ # @return [Object] Result of the block
36
+ # @raise [Concurrency::MaxAttemptsError] if all attempts fail
37
+ #
38
+ # @example
39
+ # result = Backoff.retry(max_attempts: 5, base: 0.5, jitter: 0.2) do
40
+ # api_client.fetch_data
41
+ # end
42
+ def retry(max_attempts: nil, base: nil, max_delay: nil, jitter: nil,
43
+ strategy: :exponential, on: [StandardError], &block)
44
+ max_attempts ||= Concurrency.configuration.default_max_attempts
45
+ base ||= Concurrency.configuration.default_backoff_base
46
+ max_delay ||= Concurrency.configuration.default_backoff_max
47
+ jitter ||= Concurrency.configuration.default_jitter
48
+
49
+ raise ArgumentError, "Block required" unless block_given?
50
+ raise ArgumentError, "max_attempts must be >= 1" if max_attempts < 1
51
+
52
+ on = Array(on)
53
+ last_error = nil
54
+ attempt = 0
55
+
56
+ while attempt < max_attempts
57
+ attempt += 1
58
+
59
+ begin
60
+ result = block.call
61
+ log_retry_success(attempt) if attempt > 1
62
+ return result
63
+ rescue => e
64
+ last_error = e
65
+
66
+ # Re-raise if error is not in the retry list
67
+ raise unless on.any? { |klass| e.is_a?(klass) }
68
+
69
+ # Re-raise on last attempt
70
+ if attempt >= max_attempts
71
+ log_max_attempts_exceeded(attempt, e)
72
+ raise Concurrency::MaxAttemptsError, "Max attempts (#{max_attempts}) exceeded: #{e.class} - #{e.message}"
73
+ end
74
+
75
+ # Calculate delay and wait
76
+ delay = calculate_delay(attempt, strategy, base, max_delay, jitter)
77
+ log_retry_attempt(attempt, max_attempts, delay, e)
78
+ sleep(delay) if delay > 0
79
+ end
80
+ end
81
+
82
+ # Should never reach here, but just in case
83
+ raise Concurrency::MaxAttemptsError, "Max attempts (#{max_attempts}) exceeded: #{last_error&.class} - #{last_error&.message}"
84
+ end
85
+
86
+ # Calculate backoff delay for a given attempt.
87
+ #
88
+ # @param attempt [Integer] Current attempt number (1-indexed)
89
+ # @param strategy [Symbol] :exponential, :linear, or :constant
90
+ # @param base [Float] Base delay in seconds
91
+ # @param max_delay [Float] Maximum delay cap
92
+ # @param jitter [Float] Jitter factor 0.0-1.0
93
+ # @return [Float] Delay in seconds
94
+ def calculate_delay(attempt, strategy, base, max_delay, jitter)
95
+ delay = case strategy
96
+ when :exponential
97
+ base * (2**(attempt - 1))
98
+ when :linear
99
+ base * attempt
100
+ when :constant
101
+ base
102
+ else
103
+ raise ArgumentError, "Unknown strategy: #{strategy}"
104
+ end
105
+
106
+ # Cap at max_delay
107
+ delay = [delay, max_delay].min
108
+
109
+ # Add jitter: randomize between (1-jitter)*delay and delay
110
+ # e.g., with jitter=0.2, delay is reduced by 0-20%
111
+ if jitter > 0
112
+ jitter_amount = delay * jitter * rand
113
+ delay -= jitter_amount
114
+ end
115
+
116
+ delay
117
+ end
118
+
119
+ private
120
+
121
+ def log_retry_attempt(attempt, max_attempts, delay, error)
122
+ return unless Concurrency.configuration.log_retries
123
+
124
+ Concurrency.logger&.info(
125
+ "concurrency_retry",
126
+ "Retry attempt #{attempt}/#{max_attempts} after #{delay.round(2)}s: #{error.class} - #{error.message}"
127
+ )
128
+ end
129
+
130
+ def log_retry_success(attempt)
131
+ return unless Concurrency.configuration.log_retries
132
+
133
+ Concurrency.logger&.info(
134
+ "concurrency_retry",
135
+ "Retry succeeded on attempt #{attempt}"
136
+ )
137
+ end
138
+
139
+ def log_max_attempts_exceeded(attempt, error)
140
+ Concurrency.logger&.error(
141
+ "concurrency_retry",
142
+ "Max attempts (#{attempt}) exceeded: #{error.class} - #{error.message}"
143
+ )
144
+ end
145
+ end
146
+ end
147
+ end
148
+ end
@@ -0,0 +1,192 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "concurrent-ruby"
4
+
5
+ module Aidp
6
+ module Concurrency
7
+ # Centralized executor and thread pool management.
8
+ #
9
+ # Provides named, configured executors for different workload types
10
+ # (I/O-bound, CPU-bound, background tasks) with standardized error
11
+ # handling and instrumentation.
12
+ #
13
+ # @example Get a named pool
14
+ # pool = Exec.pool(name: :io_pool, size: 10)
15
+ # future = Concurrent::Promises.future_on(pool) { fetch_remote_data() }
16
+ #
17
+ # @example Execute a future
18
+ # result = Exec.future { expensive_computation() }.value!
19
+ #
20
+ # @example Shutdown all pools
21
+ # Exec.shutdown_all
22
+ module Exec
23
+ class << self
24
+ # Get or create a named thread pool.
25
+ #
26
+ # Pools are cached by name. Calling this method multiple times with the
27
+ # same name returns the same pool instance.
28
+ #
29
+ # @param name [Symbol] Pool name (e.g., :io_pool, :cpu_pool, :background)
30
+ # @param size [Integer] Pool size (default: based on pool type)
31
+ # @param type [Symbol] Pool type :fixed or :cached (default: :fixed)
32
+ # @return [Concurrent::ThreadPoolExecutor] The thread pool
33
+ #
34
+ # @example
35
+ # io_pool = Exec.pool(name: :io, size: 20)
36
+ # cpu_pool = Exec.pool(name: :cpu, size: 4)
37
+ def pool(name:, size: nil, type: :fixed)
38
+ @pools ||= Concurrent::Hash.new
39
+ @pools[name] ||= create_pool(name, size, type)
40
+ end
41
+
42
+ # Execute a block on a future using the default pool.
43
+ #
44
+ # @param executor [Concurrent::ExecutorService] Custom executor (optional)
45
+ # @yield Block to execute asynchronously
46
+ # @return [Concurrent::Promises::Future] The future
47
+ #
48
+ # @example
49
+ # future = Exec.future { slow_operation() }
50
+ # result = future.value! # Wait for result
51
+ def future(executor: nil, &block)
52
+ raise ArgumentError, "Block required" unless block_given?
53
+
54
+ executor ||= default_pool
55
+ Concurrent::Promises.future_on(executor, &block)
56
+ end
57
+
58
+ # Execute multiple futures in parallel and wait for all to complete.
59
+ #
60
+ # @param futures [Array<Concurrent::Promises::Future>] Futures to zip
61
+ # @return [Concurrent::Promises::Future] Future that resolves when all complete
62
+ #
63
+ # @example
64
+ # futures = [
65
+ # Exec.future { task1() },
66
+ # Exec.future { task2() },
67
+ # Exec.future { task3() }
68
+ # ]
69
+ # results = Exec.zip(*futures).value!
70
+ def zip(*futures)
71
+ Concurrent::Promises.zip(*futures)
72
+ end
73
+
74
+ # Get the default executor pool.
75
+ #
76
+ # @return [Concurrent::ThreadPoolExecutor]
77
+ def default_pool
78
+ @default_pool ||= pool(name: :default, size: default_pool_size)
79
+ end
80
+
81
+ # Shutdown a specific pool.
82
+ #
83
+ # @param name [Symbol] Pool name
84
+ # @param timeout [Float] Seconds to wait for shutdown
85
+ # @return [Boolean] true if shutdown cleanly
86
+ def shutdown_pool(name, timeout: 60)
87
+ @pools ||= Concurrent::Hash.new
88
+ pool = @pools.delete(name)
89
+ return true unless pool
90
+
91
+ pool.shutdown
92
+ pool.wait_for_termination(timeout)
93
+ end
94
+
95
+ # Shutdown all managed pools.
96
+ #
97
+ # @param timeout [Float] Seconds to wait for each pool
98
+ # @return [Hash] Map of pool name to shutdown success
99
+ def shutdown_all(timeout: 60)
100
+ @pools ||= Concurrent::Hash.new
101
+ results = {}
102
+
103
+ @pools.each_key do |name|
104
+ results[name] = shutdown_pool(name, timeout: timeout)
105
+ end
106
+
107
+ @default_pool&.shutdown
108
+ @default_pool&.wait_for_termination(timeout)
109
+
110
+ results
111
+ end
112
+
113
+ # Get statistics for all pools.
114
+ #
115
+ # @return [Hash] Map of pool name to stats
116
+ def stats
117
+ @pools ||= Concurrent::Hash.new
118
+ stats = {}
119
+
120
+ @pools.each do |name, pool|
121
+ stats[name] = pool_stats(pool)
122
+ end
123
+
124
+ stats[:default] = pool_stats(@default_pool) if @default_pool
125
+
126
+ stats
127
+ end
128
+
129
+ private
130
+
131
+ def create_pool(name, size, type)
132
+ size ||= default_size_for_pool(name, type)
133
+
134
+ pool = case type
135
+ when :fixed
136
+ Concurrent::FixedThreadPool.new(size)
137
+ when :cached
138
+ Concurrent::CachedThreadPool.new
139
+ else
140
+ raise ArgumentError, "Unknown pool type: #{type}"
141
+ end
142
+
143
+ log_pool_created(name, type, size)
144
+ pool
145
+ end
146
+
147
+ def default_size_for_pool(name, type)
148
+ return nil if type == :cached
149
+
150
+ case name
151
+ when :io, :io_pool
152
+ 20 # I/O-bound: can have more threads
153
+ when :cpu, :cpu_pool
154
+ processor_count # CPU-bound: match CPU cores
155
+ when :background
156
+ 5 # Background tasks: small pool
157
+ else
158
+ 10 # Generic default
159
+ end
160
+ end
161
+
162
+ def default_pool_size
163
+ [processor_count * 2, 10].max
164
+ end
165
+
166
+ def processor_count
167
+ @processor_count ||= Concurrent.processor_count
168
+ end
169
+
170
+ def pool_stats(pool)
171
+ return nil unless pool
172
+
173
+ {
174
+ pool_size: pool.max_length,
175
+ queue_length: pool.queue_length,
176
+ active_threads: pool.length,
177
+ completed_tasks: pool.completed_task_count
178
+ }
179
+ rescue => e
180
+ {error: e.message}
181
+ end
182
+
183
+ def log_pool_created(name, type, size)
184
+ Concurrency.logger&.debug(
185
+ "concurrency_pool",
186
+ "Created #{type} pool :#{name} with size #{size || "dynamic"}"
187
+ )
188
+ end
189
+ end
190
+ end
191
+ end
192
+ end
@@ -0,0 +1,148 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "concurrent-ruby"
4
+
5
+ module Aidp
6
+ module Concurrency
7
+ # Deterministic condition waiting with timeouts and intervals.
8
+ #
9
+ # Replaces sleep-based polling loops with proper timeout enforcement
10
+ # and early exit on condition satisfaction.
11
+ #
12
+ # @example Wait for file to exist
13
+ # Wait.until(timeout: 30, interval: 0.2) { File.exist?("/tmp/ready") }
14
+ #
15
+ # @example Wait for port to open
16
+ # Wait.until(timeout: 60, interval: 1) do
17
+ # TCPSocket.new("localhost", 8080).close rescue false
18
+ # end
19
+ #
20
+ # @example Custom error message
21
+ # Wait.until(timeout: 10, message: "Service failed to start") do
22
+ # service_ready?
23
+ # end
24
+ module Wait
25
+ class << self
26
+ # Wait until a condition becomes true, with timeout and interval polling.
27
+ #
28
+ # @param timeout [Float] Maximum seconds to wait (default: from config)
29
+ # @param interval [Float] Seconds between condition checks (default: from config)
30
+ # @param message [String] Custom error message on timeout
31
+ # @yield Block that returns truthy when condition is met
32
+ # @return [Object] The truthy value from the block
33
+ # @raise [Concurrency::TimeoutError] if timeout is reached before condition is met
34
+ #
35
+ # @example
36
+ # result = Wait.until(timeout: 5, interval: 0.1) { expensive_check() }
37
+ def until(timeout: nil, interval: nil, message: nil, &block)
38
+ timeout ||= Concurrency.configuration.default_timeout
39
+ interval ||= Concurrency.configuration.default_interval
40
+ message ||= "Condition not met within #{timeout}s"
41
+
42
+ raise ArgumentError, "Block required" unless block_given?
43
+
44
+ start_time = monotonic_time
45
+ deadline = start_time + timeout
46
+ elapsed = 0.0
47
+
48
+ loop do
49
+ result = block.call
50
+ if result
51
+ log_wait_completion(elapsed) if should_log_wait?(elapsed)
52
+ return result
53
+ end
54
+
55
+ elapsed = monotonic_time - start_time
56
+ remaining = deadline - monotonic_time
57
+
58
+ if remaining <= 0
59
+ log_timeout(elapsed, message)
60
+ raise Concurrency::TimeoutError, message
61
+ end
62
+
63
+ # Sleep for interval or remaining time, whichever is shorter
64
+ sleep_duration = [interval, remaining].min
65
+ sleep(sleep_duration) if sleep_duration > 0
66
+ end
67
+ end
68
+
69
+ # Wait for a file to exist.
70
+ #
71
+ # @param path [String] File path to check
72
+ # @param timeout [Float] Maximum seconds to wait
73
+ # @param interval [Float] Seconds between checks
74
+ # @return [String] The file path if it exists
75
+ # @raise [Concurrency::TimeoutError] if file doesn't appear in time
76
+ def for_file(path, timeout: nil, interval: nil)
77
+ self.until(
78
+ timeout: timeout,
79
+ interval: interval,
80
+ message: "File not found: #{path} (waited #{timeout || Concurrency.configuration.default_timeout}s)"
81
+ ) { File.exist?(path) }
82
+ path
83
+ end
84
+
85
+ # Wait for a TCP port to be open.
86
+ #
87
+ # @param host [String] Hostname or IP
88
+ # @param port [Integer] Port number
89
+ # @param timeout [Float] Maximum seconds to wait
90
+ # @param interval [Float] Seconds between checks
91
+ # @return [Boolean] true if port is open
92
+ # @raise [Concurrency::TimeoutError] if port doesn't open in time
93
+ def for_port(host, port, timeout: nil, interval: nil)
94
+ require "socket"
95
+ self.until(
96
+ timeout: timeout,
97
+ interval: interval,
98
+ message: "Port #{host}:#{port} not open (waited #{timeout || Concurrency.configuration.default_timeout}s)"
99
+ ) do
100
+ socket = TCPSocket.new(host, port)
101
+ socket.close
102
+ true
103
+ rescue Errno::ECONNREFUSED, Errno::EHOSTUNREACH, Errno::ETIMEDOUT
104
+ false
105
+ end
106
+ end
107
+
108
+ # Wait for a process to exit.
109
+ #
110
+ # @param pid [Integer] Process ID
111
+ # @param timeout [Float] Maximum seconds to wait
112
+ # @param interval [Float] Seconds between checks
113
+ # @return [Process::Status] The process exit status
114
+ # @raise [Concurrency::TimeoutError] if process doesn't exit in time
115
+ def for_process_exit(pid, timeout: nil, interval: nil)
116
+ status = nil
117
+ self.until(
118
+ timeout: timeout,
119
+ interval: interval,
120
+ message: "Process #{pid} did not exit (waited #{timeout || Concurrency.configuration.default_timeout}s)"
121
+ ) do
122
+ _, status = Process.waitpid2(pid, Process::WNOHANG)
123
+ status # Returns truthy when process has exited
124
+ end
125
+ status
126
+ end
127
+
128
+ private
129
+
130
+ def monotonic_time
131
+ Process.clock_gettime(Process::CLOCK_MONOTONIC)
132
+ end
133
+
134
+ def should_log_wait?(elapsed)
135
+ elapsed >= Concurrency.configuration.log_long_waits_threshold
136
+ end
137
+
138
+ def log_wait_completion(elapsed)
139
+ Concurrency.logger&.info("concurrency_wait", "Long wait completed: #{elapsed.round(2)}s")
140
+ end
141
+
142
+ def log_timeout(elapsed, message)
143
+ Concurrency.logger&.warn("concurrency_timeout", "Wait timeout: #{message} (elapsed: #{elapsed.round(2)}s)")
144
+ end
145
+ end
146
+ end
147
+ end
148
+ end
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "concurrent-ruby"
4
+ require_relative "concurrency/wait"
5
+ require_relative "concurrency/backoff"
6
+ require_relative "concurrency/exec"
7
+
8
+ module Aidp
9
+ # Concurrency utilities for deterministic waiting, retry/backoff, and executor management.
10
+ #
11
+ # This module provides standardized primitives to replace arbitrary sleep() calls with
12
+ # proper synchronization, timeouts, and event-based coordination using concurrent-ruby.
13
+ #
14
+ # @example Wait for a condition
15
+ # Concurrency::Wait.until(timeout: 30) { File.exist?(path) }
16
+ #
17
+ # @example Retry with backoff
18
+ # Concurrency::Backoff.retry(max_attempts: 5) { risky_call() }
19
+ #
20
+ # @example Get a named executor
21
+ # pool = Concurrency::Exec.pool(name: :io_pool, size: 10)
22
+ # future = Concurrent::Promises.future_on(pool) { fetch_data() }
23
+ module Concurrency
24
+ class Error < StandardError; end
25
+ class TimeoutError < Error; end
26
+ class MaxAttemptsError < Error; end
27
+
28
+ # Default configuration for executors and timeouts
29
+ class Configuration
30
+ attr_accessor :default_timeout, :default_interval, :default_max_attempts,
31
+ :default_backoff_base, :default_backoff_max, :default_jitter,
32
+ :log_long_waits_threshold, :log_retries
33
+
34
+ def initialize
35
+ @default_timeout = 30.0
36
+ @default_interval = 0.2
37
+ @default_max_attempts = 5
38
+ @default_backoff_base = 0.5
39
+ @default_backoff_max = 30.0
40
+ @default_jitter = 0.2
41
+ @log_long_waits_threshold = 5.0 # Log if wait takes > 5s
42
+ @log_retries = true
43
+ end
44
+ end
45
+
46
+ class << self
47
+ attr_writer :configuration
48
+
49
+ def configuration
50
+ @configuration ||= Configuration.new
51
+ end
52
+
53
+ def configure
54
+ yield(configuration)
55
+ end
56
+
57
+ def logger
58
+ @logger ||= if defined?(Aidp.logger)
59
+ Aidp.logger
60
+ elsif defined?(Rails)
61
+ Rails.logger
62
+ else
63
+ require "logger"
64
+ Logger.new($stdout)
65
+ end
66
+ end
67
+
68
+ attr_writer :logger
69
+ end
70
+ end
71
+ end
data/lib/aidp/config.rb CHANGED
@@ -162,6 +162,11 @@ module Aidp
162
162
  interactive: true
163
163
  }
164
164
  }
165
+ },
166
+ skills: {
167
+ search_paths: [],
168
+ default_provider_filter: true,
169
+ enable_custom_skills: true
165
170
  }
166
171
  }.freeze
167
172
 
@@ -243,6 +248,15 @@ module Aidp
243
248
  providers_section.keys.map(&:to_s)
244
249
  end
245
250
 
251
+ # Get skills configuration
252
+ def self.skills_config(project_dir = Dir.pwd)
253
+ config = load_harness_config(project_dir)
254
+ skills_section = config[:skills] || config["skills"] || {}
255
+
256
+ # Convert string keys to symbols for consistency
257
+ symbolize_keys(skills_section)
258
+ end
259
+
246
260
  # Check if configuration file exists
247
261
  def self.config_exists?(project_dir = Dir.pwd)
248
262
  ConfigPaths.config_exists?(project_dir)
@@ -322,6 +336,12 @@ module Aidp
322
336
  end
323
337
  end
324
338
 
339
+ # Deep merge skills config
340
+ if config[:skills] || config["skills"]
341
+ skills_section = config[:skills] || config["skills"]
342
+ merged[:skills] = merged[:skills].merge(symbolize_keys(skills_section))
343
+ end
344
+
325
345
  merged
326
346
  end
327
347
 
@@ -3,6 +3,7 @@
3
3
  require "socket"
4
4
  require_relative "process_manager"
5
5
  require_relative "../execute/async_work_loop_runner"
6
+ require_relative "../concurrency"
6
7
 
7
8
  module Aidp
8
9
  module Daemon
@@ -35,18 +36,20 @@ module Aidp
35
36
 
36
37
  Process.detach(daemon_pid)
37
38
 
38
- # Wait for daemon to start
39
- sleep 0.5
39
+ # Wait for daemon to start (check if it's running)
40
+ begin
41
+ Aidp::Concurrency::Wait.until(timeout: 5, interval: 0.1) do
42
+ @process_manager.running?
43
+ end
40
44
 
41
- if @process_manager.running?
42
45
  {
43
46
  success: true,
44
47
  message: "Daemon started in #{mode} mode",
45
48
  pid: daemon_pid,
46
49
  log_file: @process_manager.log_file_path
47
50
  }
48
- else
49
- {success: false, message: "Failed to start daemon"}
51
+ rescue Aidp::Concurrency::TimeoutError
52
+ {success: false, message: "Failed to start daemon (timeout)"}
50
53
  end
51
54
  end
52
55
 
@@ -189,7 +192,7 @@ module Aidp
189
192
  sleep(@options[:interval] || 60)
190
193
  rescue => e
191
194
  Aidp.logger.error("watch_error", "Watch cycle error: #{e.message}")
192
- sleep 30 # Back off on error
195
+ sleep 30
193
196
  end
194
197
  end
195
198
 
@@ -199,8 +202,6 @@ module Aidp
199
202
  def run_work_loop_mode
200
203
  Aidp.logger.info("daemon_lifecycle", "Starting work loop mode")
201
204
 
202
- # This would integrate with AsyncWorkLoopRunner
203
- # For now, just log that we're running
204
205
  while @running
205
206
  Aidp.logger.debug("heartbeat", "Daemon running")
206
207
  sleep 10
@@ -111,6 +111,7 @@ module Aidp
111
111
  # Log error with debug context
112
112
  def debug_error(error, context = {})
113
113
  return unless debug_basic?
114
+ return unless error # Handle nil error gracefully
114
115
 
115
116
  error_message = "💥 Error: #{error.class.name}: #{error.message}"
116
117
  debug_logger.error(component_name, error_message, error: error.class.name, **context)
@@ -0,0 +1,12 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aidp
4
+ # Error classes for AIDP
5
+ module Errors
6
+ class ConfigurationError < StandardError; end
7
+ class ProviderError < StandardError; end
8
+ class ValidationError < StandardError; end
9
+ class StateError < StandardError; end
10
+ class UserError < StandardError; end
11
+ end
12
+ end