soba-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.claude/commands/osoba/add-backlog.md +173 -0
- data/.claude/commands/osoba/implement.md +151 -0
- data/.claude/commands/osoba/plan.md +217 -0
- data/.claude/commands/osoba/review.md +133 -0
- data/.claude/commands/osoba/revise.md +176 -0
- data/.claude/commands/soba/implement.md +88 -0
- data/.claude/commands/soba/plan.md +93 -0
- data/.claude/commands/soba/review.md +91 -0
- data/.claude/commands/soba/revise.md +76 -0
- data/.devcontainer/.env +2 -0
- data/.devcontainer/Dockerfile +3 -0
- data/.devcontainer/LICENSE +21 -0
- data/.devcontainer/README.md +85 -0
- data/.devcontainer/bin/devcontainer-common.sh +50 -0
- data/.devcontainer/bin/down +35 -0
- data/.devcontainer/bin/rebuild +10 -0
- data/.devcontainer/bin/up +11 -0
- data/.devcontainer/compose.yaml +28 -0
- data/.devcontainer/devcontainer.json +53 -0
- data/.devcontainer/post-attach.sh +29 -0
- data/.devcontainer/post-create.sh +62 -0
- data/.devcontainer/setup/01-os-package.sh +19 -0
- data/.devcontainer/setup/02-npm-package.sh +22 -0
- data/.devcontainer/setup/03-mcp-server.sh +33 -0
- data/.devcontainer/setup/04-tool.sh +17 -0
- data/.devcontainer/setup/05-soba-setup.sh +66 -0
- data/.devcontainer/setup/scripts/functions/install_apt.sh +77 -0
- data/.devcontainer/setup/scripts/functions/install_npm.sh +71 -0
- data/.devcontainer/setup/scripts/functions/mcp_config.sh +14 -0
- data/.devcontainer/setup/scripts/functions/print_message.sh +59 -0
- data/.devcontainer/setup/scripts/setup/mcp-markdownify.sh +39 -0
- data/.devcontainer/sync-envs.sh +58 -0
- data/.envrc.sample +7 -0
- data/.rspec +4 -0
- data/.rubocop.yml +70 -0
- data/.rubocop_airbnb.yml +2 -0
- data/.rubocop_todo.yml +74 -0
- data/.tool-versions +1 -0
- data/CLAUDE.md +20 -0
- data/LICENSE +21 -0
- data/README.md +384 -0
- data/README_ja.md +384 -0
- data/Rakefile +18 -0
- data/bin/soba +120 -0
- data/config/config.yml.example +36 -0
- data/docs/business/INDEX.md +6 -0
- data/docs/business/overview.md +42 -0
- data/docs/business/workflow.md +143 -0
- data/docs/development/INDEX.md +10 -0
- data/docs/development/architecture.md +69 -0
- data/docs/development/coding-standards.md +152 -0
- data/docs/development/distribution.md +26 -0
- data/docs/development/implementation-guide.md +103 -0
- data/docs/development/testing-strategy.md +128 -0
- data/docs/development/tmux-management.md +253 -0
- data/docs/document_system.md +58 -0
- data/lib/soba/commands/config/show.rb +63 -0
- data/lib/soba/commands/init.rb +778 -0
- data/lib/soba/commands/open.rb +144 -0
- data/lib/soba/commands/start.rb +442 -0
- data/lib/soba/commands/status.rb +175 -0
- data/lib/soba/commands/stop.rb +147 -0
- data/lib/soba/config_loader.rb +32 -0
- data/lib/soba/configuration.rb +268 -0
- data/lib/soba/container.rb +48 -0
- data/lib/soba/domain/issue.rb +38 -0
- data/lib/soba/domain/phase_strategy.rb +74 -0
- data/lib/soba/infrastructure/errors.rb +23 -0
- data/lib/soba/infrastructure/github_client.rb +399 -0
- data/lib/soba/infrastructure/lock_manager.rb +129 -0
- data/lib/soba/infrastructure/tmux_client.rb +331 -0
- data/lib/soba/services/ansi_processor.rb +92 -0
- data/lib/soba/services/auto_merge_service.rb +133 -0
- data/lib/soba/services/closed_issue_window_cleaner.rb +96 -0
- data/lib/soba/services/daemon_service.rb +83 -0
- data/lib/soba/services/git_workspace_manager.rb +102 -0
- data/lib/soba/services/issue_monitor.rb +29 -0
- data/lib/soba/services/issue_processor.rb +215 -0
- data/lib/soba/services/issue_watcher.rb +193 -0
- data/lib/soba/services/pid_manager.rb +87 -0
- data/lib/soba/services/process_info.rb +58 -0
- data/lib/soba/services/queueing_service.rb +98 -0
- data/lib/soba/services/session_logger.rb +111 -0
- data/lib/soba/services/session_resolver.rb +72 -0
- data/lib/soba/services/slack_notifier.rb +121 -0
- data/lib/soba/services/status_manager.rb +74 -0
- data/lib/soba/services/test_process_manager.rb +84 -0
- data/lib/soba/services/tmux_session_manager.rb +251 -0
- data/lib/soba/services/workflow_blocking_checker.rb +73 -0
- data/lib/soba/services/workflow_executor.rb +256 -0
- data/lib/soba/services/workflow_integrity_checker.rb +151 -0
- data/lib/soba/templates/claude_commands/implement.md +88 -0
- data/lib/soba/templates/claude_commands/plan.md +93 -0
- data/lib/soba/templates/claude_commands/review.md +91 -0
- data/lib/soba/templates/claude_commands/revise.md +76 -0
- data/lib/soba/version.rb +5 -0
- data/lib/soba.rb +44 -0
- data/lib/tasks/gem.rake +75 -0
- data/soba-cli.gemspec +59 -0
- metadata +430 -0
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Soba
|
4
|
+
module Domain
|
5
|
+
class Issue
|
6
|
+
attr_reader :id, :number, :title, :body, :state, :labels, :created_at, :updated_at
|
7
|
+
|
8
|
+
def initialize(attributes = {})
|
9
|
+
@id = attributes[:id]
|
10
|
+
@number = attributes[:number]
|
11
|
+
@title = attributes[:title]
|
12
|
+
@body = attributes[:body]
|
13
|
+
@state = attributes[:state]
|
14
|
+
@labels = attributes[:labels] || []
|
15
|
+
@created_at = attributes[:created_at]
|
16
|
+
@updated_at = attributes[:updated_at]
|
17
|
+
end
|
18
|
+
|
19
|
+
def open?
|
20
|
+
state == "open"
|
21
|
+
end
|
22
|
+
|
23
|
+
def closed?
|
24
|
+
state == "closed"
|
25
|
+
end
|
26
|
+
|
27
|
+
def has_label?(label_name)
|
28
|
+
labels.any? { |label| label[:name] == label_name }
|
29
|
+
end
|
30
|
+
|
31
|
+
def priority
|
32
|
+
return :high if has_label?("critical") || has_label?("urgent")
|
33
|
+
return :medium if has_label?("important")
|
34
|
+
:low
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,74 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Soba
|
4
|
+
module Domain
|
5
|
+
class PhaseStrategy
|
6
|
+
PHASE_TRANSITIONS = {
|
7
|
+
'soba:todo' => 'soba:queued',
|
8
|
+
'soba:queued' => 'soba:planning',
|
9
|
+
'soba:planning' => 'soba:ready',
|
10
|
+
'soba:ready' => 'soba:doing',
|
11
|
+
'soba:doing' => 'soba:review-requested',
|
12
|
+
'soba:review-requested' => 'soba:reviewing',
|
13
|
+
'soba:reviewing' => 'soba:requires-changes',
|
14
|
+
'soba:requires-changes' => 'soba:revising',
|
15
|
+
'soba:revising' => 'soba:review-requested',
|
16
|
+
}.freeze
|
17
|
+
|
18
|
+
PHASE_MAPPINGS = {
|
19
|
+
plan: { current: 'soba:todo', next: 'soba:planning' },
|
20
|
+
queued_to_planning: { current: 'soba:queued', next: 'soba:planning' },
|
21
|
+
implement: { current: 'soba:ready', next: 'soba:doing' },
|
22
|
+
review: { current: 'soba:review-requested', next: 'soba:reviewing' },
|
23
|
+
revise: { current: 'soba:requires-changes', next: 'soba:revising' },
|
24
|
+
}.freeze
|
25
|
+
|
26
|
+
IN_PROGRESS_LABELS = %w(soba:planning soba:doing soba:reviewing soba:revising).freeze
|
27
|
+
|
28
|
+
def determine_phase(labels)
|
29
|
+
return nil if labels.blank?
|
30
|
+
|
31
|
+
labels = labels.map(&:to_s)
|
32
|
+
|
33
|
+
return nil if (labels & IN_PROGRESS_LABELS).any?
|
34
|
+
|
35
|
+
return :plan if labels.include?('soba:todo')
|
36
|
+
return :queued_to_planning if labels.include?('soba:queued')
|
37
|
+
return :implement if labels.include?('soba:ready')
|
38
|
+
return :review if labels.include?('soba:review-requested')
|
39
|
+
return :revise if labels.include?('soba:requires-changes')
|
40
|
+
|
41
|
+
nil
|
42
|
+
end
|
43
|
+
|
44
|
+
def next_label(phase)
|
45
|
+
return nil unless phase
|
46
|
+
|
47
|
+
PHASE_MAPPINGS.dig(phase, :next)
|
48
|
+
end
|
49
|
+
|
50
|
+
def current_label_for_phase(phase)
|
51
|
+
return nil unless phase
|
52
|
+
|
53
|
+
PHASE_MAPPINGS.dig(phase, :current)
|
54
|
+
end
|
55
|
+
|
56
|
+
def validate_transition(from_label, to_label)
|
57
|
+
if from_label.nil? || to_label.nil?
|
58
|
+
return false
|
59
|
+
end
|
60
|
+
|
61
|
+
if !from_label.start_with?('soba:') || !to_label.start_with?('soba:')
|
62
|
+
return false
|
63
|
+
end
|
64
|
+
|
65
|
+
# Allow direct transition from soba:todo to soba:planning (legacy path)
|
66
|
+
if from_label == 'soba:todo' && to_label == 'soba:planning'
|
67
|
+
return true
|
68
|
+
end
|
69
|
+
|
70
|
+
PHASE_TRANSITIONS[from_label] == to_label
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Soba
|
4
|
+
module Infrastructure
|
5
|
+
class GitHubClientError < StandardError; end
|
6
|
+
|
7
|
+
class AuthenticationError < GitHubClientError; end
|
8
|
+
|
9
|
+
class RateLimitExceeded < GitHubClientError; end
|
10
|
+
|
11
|
+
class NetworkError < GitHubClientError; end
|
12
|
+
|
13
|
+
class MergeConflictError < GitHubClientError; end
|
14
|
+
|
15
|
+
class TmuxError < StandardError; end
|
16
|
+
|
17
|
+
class TmuxSessionNotFound < TmuxError; end
|
18
|
+
|
19
|
+
class TmuxCommandFailed < TmuxError; end
|
20
|
+
|
21
|
+
class TmuxNotInstalled < TmuxError; end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,399 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "octokit"
|
4
|
+
require "faraday"
|
5
|
+
require "faraday/retry"
|
6
|
+
require "semantic_logger"
|
7
|
+
require_relative "errors"
|
8
|
+
|
9
|
+
module Soba
|
10
|
+
module Infrastructure
|
11
|
+
class GitHubClient
|
12
|
+
include SemanticLogger::Loggable
|
13
|
+
|
14
|
+
attr_reader :octokit
|
15
|
+
|
16
|
+
def initialize(token: nil)
|
17
|
+
token ||= Configuration.config.github.token if defined?(Configuration)
|
18
|
+
token ||= ENV["GITHUB_TOKEN"]
|
19
|
+
|
20
|
+
stack = build_middleware_stack
|
21
|
+
|
22
|
+
@octokit = Octokit::Client.new(
|
23
|
+
access_token: token,
|
24
|
+
auto_paginate: true,
|
25
|
+
per_page: 100,
|
26
|
+
connection_options: {
|
27
|
+
builder: stack,
|
28
|
+
}
|
29
|
+
)
|
30
|
+
end
|
31
|
+
|
32
|
+
def issues(repository, state: "open")
|
33
|
+
logger.info "Fetching issues", repository: repository, state: state
|
34
|
+
|
35
|
+
response = with_error_handling do
|
36
|
+
with_rate_limit_check do
|
37
|
+
@octokit.issues(repository, state: state)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
map_issues_to_domain(response)
|
42
|
+
rescue => e
|
43
|
+
logger.error "Failed to fetch issues", error: e.message, repository: repository
|
44
|
+
raise
|
45
|
+
end
|
46
|
+
|
47
|
+
def issue(repository, number)
|
48
|
+
logger.info "Fetching issue", repository: repository, number: number
|
49
|
+
|
50
|
+
response = with_error_handling do
|
51
|
+
with_rate_limit_check do
|
52
|
+
@octokit.issue(repository, number)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
map_issue_to_domain(response)
|
57
|
+
rescue Octokit::NotFound
|
58
|
+
logger.warn "Issue not found", repository: repository, number: number
|
59
|
+
nil
|
60
|
+
rescue => e
|
61
|
+
logger.error "Failed to fetch issue", error: e.message, repository: repository, number: number
|
62
|
+
raise
|
63
|
+
end
|
64
|
+
|
65
|
+
def rate_limit_remaining
|
66
|
+
@octokit.rate_limit.remaining
|
67
|
+
rescue => e
|
68
|
+
logger.error "Failed to check rate limit", error: e.message
|
69
|
+
nil
|
70
|
+
end
|
71
|
+
|
72
|
+
def update_issue_labels(repository, issue_number, from:, to:)
|
73
|
+
logger.info "Atomic label update with check",
|
74
|
+
repository: repository,
|
75
|
+
issue: issue_number,
|
76
|
+
from: from,
|
77
|
+
to: to
|
78
|
+
|
79
|
+
with_error_handling do
|
80
|
+
with_rate_limit_check do
|
81
|
+
# Get current labels to check state
|
82
|
+
issue = @octokit.issue(repository, issue_number)
|
83
|
+
current_labels = issue.labels.map(&:name)
|
84
|
+
|
85
|
+
# Check if the issue has the expected 'from' label
|
86
|
+
unless current_labels.include?(from)
|
87
|
+
logger.warn "Label state mismatch: expected '#{from}' not found",
|
88
|
+
repository: repository,
|
89
|
+
issue: issue_number,
|
90
|
+
current_labels: current_labels
|
91
|
+
return false
|
92
|
+
end
|
93
|
+
|
94
|
+
# Check if the issue already has the 'to' label (duplicate transition)
|
95
|
+
if current_labels.include?(to)
|
96
|
+
logger.warn "Duplicate transition detected: '#{to}' already exists",
|
97
|
+
repository: repository,
|
98
|
+
issue: issue_number,
|
99
|
+
current_labels: current_labels
|
100
|
+
return false
|
101
|
+
end
|
102
|
+
|
103
|
+
# Perform the label update atomically
|
104
|
+
new_labels = current_labels - [from]
|
105
|
+
new_labels << to
|
106
|
+
|
107
|
+
@octokit.replace_all_labels(repository, issue_number, new_labels)
|
108
|
+
|
109
|
+
logger.info "Labels updated atomically",
|
110
|
+
repository: repository,
|
111
|
+
issue: issue_number,
|
112
|
+
updated_labels: new_labels
|
113
|
+
true
|
114
|
+
end
|
115
|
+
end
|
116
|
+
rescue => e
|
117
|
+
logger.error "Failed to update labels atomically",
|
118
|
+
error: e.message,
|
119
|
+
repository: repository,
|
120
|
+
issue: issue_number
|
121
|
+
raise
|
122
|
+
end
|
123
|
+
|
124
|
+
def wait_for_rate_limit
|
125
|
+
limit_info = @octokit.rate_limit
|
126
|
+
|
127
|
+
if limit_info.remaining == 0
|
128
|
+
reset_time = Time.at(limit_info.resets_at.to_i)
|
129
|
+
wait_seconds = reset_time - Time.now
|
130
|
+
|
131
|
+
if wait_seconds > 0
|
132
|
+
logger.warn "Rate limit exceeded. Waiting #{wait_seconds.round} seconds..."
|
133
|
+
sleep(wait_seconds + 1) # Add 1 second buffer
|
134
|
+
end
|
135
|
+
end
|
136
|
+
rescue => e
|
137
|
+
logger.error "Failed to wait for rate limit", error: e.message
|
138
|
+
end
|
139
|
+
|
140
|
+
def list_labels(repository)
|
141
|
+
logger.info "Fetching labels", repository: repository
|
142
|
+
|
143
|
+
response = with_error_handling do
|
144
|
+
with_rate_limit_check do
|
145
|
+
@octokit.labels(repository)
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
response.map do |label|
|
150
|
+
{
|
151
|
+
name: label.name,
|
152
|
+
color: label.color,
|
153
|
+
description: label.description,
|
154
|
+
}
|
155
|
+
end
|
156
|
+
rescue => e
|
157
|
+
logger.error "Failed to fetch labels", error: e.message, repository: repository
|
158
|
+
raise
|
159
|
+
end
|
160
|
+
|
161
|
+
def create_label(repository, name, color, description)
|
162
|
+
logger.info "Creating label", repository: repository, name: name, color: color
|
163
|
+
|
164
|
+
response = with_error_handling do
|
165
|
+
with_rate_limit_check do
|
166
|
+
@octokit.add_label(repository, name, color, description: description)
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
{
|
171
|
+
name: response.name,
|
172
|
+
color: response.color,
|
173
|
+
description: response.description,
|
174
|
+
}
|
175
|
+
rescue Octokit::UnprocessableEntity
|
176
|
+
# Check if error is because label already exists
|
177
|
+
# Octokit will return "Validation failed" as the message
|
178
|
+
logger.info "Label already exists, skipping", repository: repository, name: name
|
179
|
+
nil
|
180
|
+
rescue => e
|
181
|
+
logger.error "Failed to create label", error: e.message, repository: repository, name: name
|
182
|
+
raise
|
183
|
+
end
|
184
|
+
|
185
|
+
def search_pull_requests(repository:, labels: [])
|
186
|
+
logger.info "Searching pull requests", repository: repository, labels: labels
|
187
|
+
|
188
|
+
query_parts = ["type:pr", "is:open", "repo:#{repository}"]
|
189
|
+
query_parts += labels.map { |label| "label:#{label}" }
|
190
|
+
query = query_parts.join(" ")
|
191
|
+
|
192
|
+
response = with_error_handling do
|
193
|
+
with_rate_limit_check do
|
194
|
+
@octokit.search_issues(query)
|
195
|
+
end
|
196
|
+
end
|
197
|
+
|
198
|
+
response.items.map do |pr|
|
199
|
+
{
|
200
|
+
number: pr.number,
|
201
|
+
title: pr.title,
|
202
|
+
state: pr.state,
|
203
|
+
labels: pr.labels.map { |l| { name: l.name } },
|
204
|
+
}
|
205
|
+
end
|
206
|
+
rescue => e
|
207
|
+
logger.error "Failed to search pull requests", error: e.message, repository: repository
|
208
|
+
raise
|
209
|
+
end
|
210
|
+
|
211
|
+
def merge_pull_request(repository, pr_number, merge_method: "squash")
|
212
|
+
logger.info "Merging pull request", repository: repository, pr_number: pr_number, merge_method: merge_method
|
213
|
+
|
214
|
+
response = with_error_handling do
|
215
|
+
with_rate_limit_check do
|
216
|
+
@octokit.merge_pull_request(repository, pr_number, "", merge_method: merge_method)
|
217
|
+
end
|
218
|
+
end
|
219
|
+
|
220
|
+
{
|
221
|
+
sha: response.sha,
|
222
|
+
merged: response.merged,
|
223
|
+
message: response.message,
|
224
|
+
}
|
225
|
+
rescue Octokit::MethodNotAllowed => e
|
226
|
+
logger.error "Pull request not mergeable", repository: repository, pr_number: pr_number, error: e.message
|
227
|
+
raise MergeConflictError, "Pull request is not mergeable: #{e.message}"
|
228
|
+
rescue => e
|
229
|
+
logger.error "Failed to merge pull request", error: e.message, repository: repository, pr_number: pr_number
|
230
|
+
raise
|
231
|
+
end
|
232
|
+
|
233
|
+
def get_pull_request(repository, pr_number)
|
234
|
+
logger.info "Fetching pull request", repository: repository, pr_number: pr_number
|
235
|
+
|
236
|
+
response = with_error_handling do
|
237
|
+
with_rate_limit_check do
|
238
|
+
@octokit.pull_request(repository, pr_number)
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
{
|
243
|
+
number: response.number,
|
244
|
+
title: response.title,
|
245
|
+
body: response.body,
|
246
|
+
state: response.state,
|
247
|
+
mergeable: response.mergeable,
|
248
|
+
mergeable_state: response.mergeable_state,
|
249
|
+
}
|
250
|
+
rescue => e
|
251
|
+
logger.error "Failed to fetch pull request", error: e.message, repository: repository, pr_number: pr_number
|
252
|
+
raise
|
253
|
+
end
|
254
|
+
|
255
|
+
def get_pr_issue_number(repository, pr_number)
|
256
|
+
logger.info "Extracting issue number from PR", repository: repository, pr_number: pr_number
|
257
|
+
|
258
|
+
pr = get_pull_request(repository, pr_number)
|
259
|
+
body = pr[:body] || ""
|
260
|
+
|
261
|
+
# Match patterns like: fixes #123, closes #456, resolves #789
|
262
|
+
match = body.match(/(?:fixes|closes|resolves|fix|close|resolve)\s+#(\d+)/i)
|
263
|
+
return match[1].to_i if match
|
264
|
+
|
265
|
+
nil
|
266
|
+
rescue => e
|
267
|
+
logger.error "Failed to extract issue number", error: e.message, repository: repository, pr_number: pr_number
|
268
|
+
nil
|
269
|
+
end
|
270
|
+
|
271
|
+
def close_issue_with_label(repository, issue_number, label:)
|
272
|
+
logger.info "Closing issue with label", repository: repository, issue_number: issue_number, label: label
|
273
|
+
|
274
|
+
with_error_handling do
|
275
|
+
with_rate_limit_check do
|
276
|
+
# Close the issue
|
277
|
+
@octokit.close_issue(repository, issue_number)
|
278
|
+
|
279
|
+
# Add label
|
280
|
+
@octokit.add_labels_to_an_issue(repository, issue_number, [label])
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
logger.info "Issue closed and labeled successfully", repository: repository, issue_number: issue_number
|
285
|
+
true
|
286
|
+
rescue => e
|
287
|
+
logger.error "Failed to close issue with label", error: e.message, repository: repository,
|
288
|
+
issue_number: issue_number
|
289
|
+
raise
|
290
|
+
end
|
291
|
+
|
292
|
+
def fetch_closed_issues(repository)
|
293
|
+
logger.info "Fetching closed issues", repository: repository
|
294
|
+
|
295
|
+
response = with_error_handling do
|
296
|
+
with_rate_limit_check do
|
297
|
+
@octokit.issues(repository, state: "closed")
|
298
|
+
end
|
299
|
+
end
|
300
|
+
|
301
|
+
map_issues_to_domain(response)
|
302
|
+
rescue => e
|
303
|
+
logger.error "Failed to fetch closed issues", error: e.message, repository: repository
|
304
|
+
raise
|
305
|
+
end
|
306
|
+
|
307
|
+
private
|
308
|
+
|
309
|
+
def build_middleware_stack
|
310
|
+
Faraday::RackBuilder.new do |builder|
|
311
|
+
# Retry on network failures and specific status codes
|
312
|
+
builder.use Faraday::Retry::Middleware,
|
313
|
+
max: 3,
|
314
|
+
interval: 0.5,
|
315
|
+
interval_randomness: 0.5,
|
316
|
+
backoff_factor: 2,
|
317
|
+
exceptions: [
|
318
|
+
Faraday::ConnectionFailed,
|
319
|
+
Faraday::TimeoutError,
|
320
|
+
Faraday::RetriableResponse,
|
321
|
+
],
|
322
|
+
retry_statuses: [429, 503, 504],
|
323
|
+
retry_block: ->(env, _options, retries, exception) do
|
324
|
+
logger.warn "Retrying request",
|
325
|
+
url: env.url,
|
326
|
+
retry_count: retries,
|
327
|
+
error: exception&.message
|
328
|
+
end
|
329
|
+
|
330
|
+
# Request logging
|
331
|
+
builder.request :url_encoded
|
332
|
+
builder.request :json
|
333
|
+
|
334
|
+
# Response logging and parsing
|
335
|
+
builder.response :json, content_type: /\bjson$/
|
336
|
+
builder.response :logger, logger, bodies: false if ENV["DEBUG"]
|
337
|
+
|
338
|
+
# HTTP adapter
|
339
|
+
builder.adapter Faraday.default_adapter
|
340
|
+
end
|
341
|
+
end
|
342
|
+
|
343
|
+
def with_error_handling
|
344
|
+
yield
|
345
|
+
rescue Octokit::Unauthorized => e
|
346
|
+
raise AuthenticationError, "Authentication failed: #{e.message}"
|
347
|
+
rescue Octokit::TooManyRequests => e
|
348
|
+
raise RateLimitExceeded, "Too many requests: #{e.message}"
|
349
|
+
rescue Octokit::Forbidden => e
|
350
|
+
if e.message.include?("rate limit")
|
351
|
+
raise RateLimitExceeded, "GitHub API rate limit exceeded"
|
352
|
+
else
|
353
|
+
raise GitHubClientError, "Access forbidden: #{e.message}"
|
354
|
+
end
|
355
|
+
rescue Faraday::ConnectionFailed, Faraday::TimeoutError => e
|
356
|
+
raise NetworkError, "Network error: #{e.message}"
|
357
|
+
end
|
358
|
+
|
359
|
+
def with_rate_limit_check
|
360
|
+
# Temporarily disabled rate limit check for testing
|
361
|
+
# TODO: Implement proper rate limit handling with VCR
|
362
|
+
yield
|
363
|
+
end
|
364
|
+
|
365
|
+
def map_issues_to_domain(issues)
|
366
|
+
issues.map { |issue_data| map_issue_to_domain(issue_data) }
|
367
|
+
end
|
368
|
+
|
369
|
+
def map_issue_to_domain(issue_data)
|
370
|
+
return nil unless issue_data
|
371
|
+
|
372
|
+
Domain::Issue.new(
|
373
|
+
id: issue_data[:id],
|
374
|
+
number: issue_data[:number],
|
375
|
+
title: issue_data[:title],
|
376
|
+
body: issue_data[:body],
|
377
|
+
state: issue_data[:state],
|
378
|
+
labels: normalize_labels(issue_data[:labels]),
|
379
|
+
created_at: issue_data[:created_at],
|
380
|
+
updated_at: issue_data[:updated_at]
|
381
|
+
)
|
382
|
+
end
|
383
|
+
|
384
|
+
def normalize_labels(labels)
|
385
|
+
return [] unless labels
|
386
|
+
|
387
|
+
labels.map do |label|
|
388
|
+
if label.is_a?(Hash)
|
389
|
+
# For test stubs that return hashes directly
|
390
|
+
{ name: label[:name] || label["name"], color: label[:color] || label["color"] }
|
391
|
+
else
|
392
|
+
# For real Octokit responses (Sawyer::Resource objects)
|
393
|
+
{ name: label.name, color: label.color }
|
394
|
+
end
|
395
|
+
end
|
396
|
+
end
|
397
|
+
end
|
398
|
+
end
|
399
|
+
end
|
@@ -0,0 +1,129 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'fileutils'
|
4
|
+
require 'timeout'
|
5
|
+
|
6
|
+
module Soba
|
7
|
+
module Infrastructure
|
8
|
+
class LockTimeoutError < StandardError; end
|
9
|
+
|
10
|
+
class LockManager
|
11
|
+
DEFAULT_TIMEOUT = 5 # seconds
|
12
|
+
DEFAULT_STALE_THRESHOLD = 300 # 5 minutes
|
13
|
+
RETRY_INTERVAL = 0.1 # seconds
|
14
|
+
|
15
|
+
def initialize(lock_directory: nil)
|
16
|
+
@lock_directory = lock_directory || default_lock_directory
|
17
|
+
ensure_lock_directory_exists
|
18
|
+
end
|
19
|
+
|
20
|
+
def acquire_lock(resource_name, timeout: 0, stale_threshold: DEFAULT_STALE_THRESHOLD)
|
21
|
+
lock_file = lock_file_path(resource_name)
|
22
|
+
deadline = Time.now + timeout if timeout > 0
|
23
|
+
|
24
|
+
loop do
|
25
|
+
# Check for stale lock
|
26
|
+
if File.exist?(lock_file) && stale_threshold > 0
|
27
|
+
begin
|
28
|
+
if Time.now - File.mtime(lock_file) > stale_threshold
|
29
|
+
# Remove stale lock
|
30
|
+
File.delete(lock_file)
|
31
|
+
end
|
32
|
+
rescue Errno::ENOENT
|
33
|
+
# File was deleted by another process, continue
|
34
|
+
rescue
|
35
|
+
# Other errors, ignore and continue
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
# Try to acquire lock
|
40
|
+
begin
|
41
|
+
File.open(lock_file, File::WRONLY | File::CREAT | File::EXCL) do |f|
|
42
|
+
f.write(Process.pid.to_s)
|
43
|
+
end
|
44
|
+
return true
|
45
|
+
rescue Errno::EEXIST
|
46
|
+
# Lock already exists
|
47
|
+
if timeout > 0 && Time.now < deadline
|
48
|
+
sleep RETRY_INTERVAL
|
49
|
+
next
|
50
|
+
else
|
51
|
+
return false
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def release_lock(resource_name)
|
58
|
+
lock_file = lock_file_path(resource_name)
|
59
|
+
|
60
|
+
return false unless File.exist?(lock_file)
|
61
|
+
|
62
|
+
# Check if we own the lock
|
63
|
+
begin
|
64
|
+
pid = File.read(lock_file).strip.to_i
|
65
|
+
if pid == Process.pid
|
66
|
+
File.delete(lock_file)
|
67
|
+
return true
|
68
|
+
end
|
69
|
+
rescue Errno::ENOENT
|
70
|
+
# File was already deleted
|
71
|
+
return false
|
72
|
+
rescue StandardError
|
73
|
+
# Error reading file
|
74
|
+
end
|
75
|
+
|
76
|
+
false
|
77
|
+
end
|
78
|
+
|
79
|
+
def with_lock(resource_name, timeout: DEFAULT_TIMEOUT, stale_threshold: DEFAULT_STALE_THRESHOLD)
|
80
|
+
unless acquire_lock(resource_name, timeout: timeout, stale_threshold: stale_threshold)
|
81
|
+
raise LockTimeoutError, "Failed to acquire lock for #{resource_name} within #{timeout} seconds"
|
82
|
+
end
|
83
|
+
|
84
|
+
begin
|
85
|
+
yield
|
86
|
+
ensure
|
87
|
+
release_lock(resource_name)
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
def locked?(resource_name)
|
92
|
+
lock_file = lock_file_path(resource_name)
|
93
|
+
File.exist?(lock_file)
|
94
|
+
end
|
95
|
+
|
96
|
+
def cleanup_stale_locks(threshold: DEFAULT_STALE_THRESHOLD)
|
97
|
+
return [] unless Dir.exist?(@lock_directory)
|
98
|
+
|
99
|
+
removed = []
|
100
|
+
Dir.glob(File.join(@lock_directory, '*.lock')).each do |lock_file|
|
101
|
+
if Time.now - File.mtime(lock_file) > threshold
|
102
|
+
begin
|
103
|
+
File.delete(lock_file)
|
104
|
+
rescue
|
105
|
+
nil
|
106
|
+
end
|
107
|
+
removed << File.basename(lock_file, '.lock')
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
removed
|
112
|
+
end
|
113
|
+
|
114
|
+
private
|
115
|
+
|
116
|
+
def default_lock_directory
|
117
|
+
File.join(Dir.tmpdir, 'soba-locks')
|
118
|
+
end
|
119
|
+
|
120
|
+
def ensure_lock_directory_exists
|
121
|
+
FileUtils.mkdir_p(@lock_directory) unless Dir.exist?(@lock_directory)
|
122
|
+
end
|
123
|
+
|
124
|
+
def lock_file_path(resource_name)
|
125
|
+
File.join(@lock_directory, "#{resource_name}.lock")
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|