circleci-tools 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,217 @@
1
+ require 'aws-sdk-cloudwatch'
2
+ require 'digest'
3
+ require 'json'
4
+ require 'set'
5
+ require 'time'
6
+ require 'tty-progressbar'
7
+
8
+ module CircleciTools
9
+ class CloudWatchMetricsService
10
+ UPLOAD_BATCH_SIZE = 20
11
+ METRICS_DIGEST_FILENAME = 'cloud-watch-metrics-digests.txt'
12
+
13
+ def initialize(namespace: 'CircleCI', dry_run: false, logger: Logger.new(STDOUT), s3_bucket: nil)
14
+ @namespace = namespace
15
+ @dry_run = dry_run
16
+ @logger = logger
17
+ @cloudwatch = Aws::CloudWatch::Client.new
18
+ @s3_bucket = s3_bucket
19
+ @s3_client = Aws::S3::Client.new if @s3_bucket
20
+ end
21
+
22
+ def upload_metrics(file_path)
23
+ @logger.info("Uploading metrics from #{file_path} to CloudWatch...")
24
+ events = parse_csv(file_path)
25
+
26
+ if @dry_run
27
+ metrics = generate_metrics(events)
28
+ puts JSON.pretty_generate(metrics)
29
+ else
30
+ events.group_by { |event| event[:project_name] }.each do |project_name, project_events|
31
+ metrics = generate_metrics(project_events)
32
+ upload_to_cloudwatch(project_name, metrics)
33
+ end
34
+ end
35
+ end
36
+
37
+ private
38
+
39
+ def parse_csv(file_path)
40
+ events = []
41
+ two_weeks_ago = Time.now - (14 * 24 * 60 * 60)
42
+ CSV.foreach(file_path, headers: true) do |row|
43
+ next unless row['JOB_RUN_STARTED_AT'].to_i > 0 && row['JOB_RUN_STOPPED_AT'].to_i > 0
44
+
45
+ started_at = Time.parse(row['JOB_RUN_STARTED_AT'])
46
+ stopped_at = Time.parse(row['JOB_RUN_STOPPED_AT'])
47
+ next if stopped_at < two_weeks_ago
48
+
49
+ events << {
50
+ project_name: row['PROJECT_NAME'],
51
+ workflow_name: row['WORKFLOW_NAME'],
52
+ branch: row['VCS_BRANCH'],
53
+ job_name: row['JOB_NAME'],
54
+ job_status: row['JOB_BUILD_STATUS'],
55
+ started_at: started_at,
56
+ stopped_at: stopped_at,
57
+ run_time: (stopped_at - started_at).to_i,
58
+ compute_credits_used: row['COMPUTE_CREDITS'].to_i,
59
+ avg_ram: row['MEDIAN_RAM_UTILIZATION_PCT'].to_i,
60
+ max_ram: row['MAX_RAM_UTILIZATION_PCT'].to_i,
61
+ avg_cpu: row['MEDIAN_CPU_UTILIZATION_PCT'].to_i,
62
+ max_cpu: row['MAX_CPU_UTILIZATION_PCT'].to_i
63
+ }
64
+ end
65
+ events
66
+ end
67
+
68
+ def generate_metrics(events)
69
+ metrics = []
70
+ events.each do |event|
71
+ workflow_dimensions = [
72
+ { name: 'WorkflowName', value: event[:workflow_name] }
73
+ ]
74
+
75
+ branch_dimensions = [
76
+ { name: 'Branch', value: event[:branch] },
77
+ { name: 'JobName', value: event[:job_name] }
78
+ ]
79
+
80
+ truncated_timestamp = truncate_to_minute(event[:stopped_at])
81
+
82
+ metrics << {
83
+ metric_name: 'JobRunTime',
84
+ dimensions: branch_dimensions,
85
+ timestamp: truncated_timestamp,
86
+ value: event[:run_time],
87
+ unit: 'Seconds'
88
+ } if event[:run_time] > 0
89
+ metrics << {
90
+ metric_name: 'AverageRAMUtilization',
91
+ dimensions: branch_dimensions,
92
+ timestamp: truncated_timestamp,
93
+ value: event[:avg_ram],
94
+ unit: 'Percent'
95
+ } if event[:avg_ram] > 0
96
+ metrics << {
97
+ metric_name: 'MaxRAMUtilization',
98
+ dimensions: branch_dimensions,
99
+ timestamp: truncated_timestamp,
100
+ value: event[:max_ram],
101
+ unit: 'Percent'
102
+ } if event[:max_ram] > 0
103
+ metrics << {
104
+ metric_name: 'AverageCPUUtilization',
105
+ dimensions: branch_dimensions,
106
+ timestamp: truncated_timestamp,
107
+ value: event[:avg_cpu],
108
+ unit: 'Percent'
109
+ } if event[:avg_cpu] > 0
110
+ metrics << {
111
+ metric_name: 'MaxCPUUtilization',
112
+ dimensions: branch_dimensions,
113
+ timestamp: truncated_timestamp,
114
+ value: event[:max_cpu],
115
+ unit: 'Percent'
116
+ } if event[:max_cpu] > 0
117
+ metrics << {
118
+ metric_name: 'JobSucceeded',
119
+ dimensions: branch_dimensions,
120
+ timestamp: truncated_timestamp,
121
+ value: 1,
122
+ unit: 'Count'
123
+ } if event[:job_status] == 'success'
124
+ metrics << {
125
+ metric_name: 'JobFailed',
126
+ dimensions: branch_dimensions,
127
+ timestamp: truncated_timestamp,
128
+ value: 1,
129
+ unit: 'Count'
130
+ } if event[:job_status] == 'failed'
131
+ metrics << {
132
+ metric_name: 'ComputeCreditsUsed',
133
+ dimensions: workflow_dimensions,
134
+ timestamp: truncated_timestamp,
135
+ value: event[:compute_credits_used],
136
+ unit: 'Count'
137
+ } if event[:compute_credits_used] > 0
138
+ end
139
+ metrics
140
+ end
141
+
142
+ def truncate_to_minute(time)
143
+ Time.at(time.to_i - time.sec)
144
+ end
145
+
146
+ def upload_to_cloudwatch(project_name, metrics)
147
+ bar = TTY::ProgressBar.new("Uploading [:bar] :percent :elapsed", total: metrics.size)
148
+
149
+ existing_digests = load_existing_digests
150
+ new_metrics = []
151
+ new_digests = []
152
+
153
+ metrics.each do |metric|
154
+ digest = Digest::MD5.hexdigest(metric.to_s)
155
+ next if existing_digests.include?(digest)
156
+
157
+ new_metrics << metric
158
+ new_digests << digest
159
+ end
160
+
161
+ new_metrics.each_slice(UPLOAD_BATCH_SIZE) do |metric_batch|
162
+ begin
163
+ @cloudwatch.put_metric_data(
164
+ namespace: "#{@namespace}/#{project_name}",
165
+ metric_data: metric_batch
166
+ )
167
+ bar.advance(metric_batch.size)
168
+ rescue Aws::CloudWatch::Errors::ServiceError => e
169
+ @logger.error("Failed to upload metrics: #{e.message}")
170
+ end
171
+ end
172
+
173
+ store_new_digests(new_digests)
174
+
175
+ @logger.info("Uploaded #{new_metrics.size} metrics to CloudWatch for project #{project_name}.")
176
+ end
177
+
178
+ private
179
+
180
+ def load_existing_digests
181
+ if @s3_bucket
182
+ begin
183
+ resp = @s3_client.get_object(bucket: @s3_bucket, key: "#{@namespace.downcase}/#{METRICS_DIGEST_FILENAME}")
184
+ Set.new(resp.body.read.split("\n"))
185
+ rescue Aws::S3::Errors::NoSuchKey
186
+ Set.new
187
+ end
188
+ else
189
+ digest_file = File.join('tmp', METRICS_DIGEST_FILENAME)
190
+ if File.exist?(digest_file)
191
+ Set.new(digest_file).map(&:chomp)
192
+ else
193
+ Set.new
194
+ end
195
+ end
196
+ end
197
+
198
+ def store_new_digests(new_digests)
199
+ return if new_digests.empty?
200
+
201
+ if @s3_bucket
202
+ old_digests = load_existing_digests
203
+ merged_digests = (old_digests + new_digests).to_a.uniq
204
+ rotated_digests = merged_digests.last(100_000).join("\n")
205
+ @s3_client.put_object(bucket: @s3_bucket, key: "#{@namespace.downcase}/#{METRICS_DIGEST_FILENAME}", body: rotated_digests)
206
+ else
207
+ digest_file = File.join('tmp', METRICS_DIGEST_FILENAME)
208
+ old_digests = File.exist?(digest_file) ? File.readlines(digest_file).map(&:chomp) : []
209
+ merged_digests = (old_digests + new_digests).uniq
210
+ rotated_digests = merged_digests.last(100_000)
211
+ File.open(digest_file, 'w') do |file|
212
+ rotated_digests.each { |digest| file.puts digest }
213
+ end
214
+ end
215
+ end
216
+ end
217
+ end
@@ -0,0 +1,57 @@
1
+ module CircleciTools
2
+ class DataAggregator
3
+ CREDIT_COST = 0.0006
4
+
5
+ RESOURCE_CLASS_MAP = {
6
+ "small" => { cpus: 1, ram: 2 },
7
+ "medium" => { cpus: 2, ram: 4 },
8
+ "medium+" => { cpus: 3, ram: 6 },
9
+ "large" => { cpus: 4, ram: 8 }
10
+ }
11
+
12
+ def initialize(jobs)
13
+ @jobs = jobs
14
+ end
15
+
16
+ def generate_csv
17
+ csv_file_path = 'tmp/jobs_aggregated.csv'
18
+
19
+ CSV.open(csv_file_path, 'w') do |csv|
20
+ csv << [
21
+ 'job_number', 'duration (ms)', 'duration_minutes', 'total_duration_minutes', 'queued_at',
22
+ 'started_at', 'stopped_at', 'status', 'parallelism', 'resource_class', 'name',
23
+ 'CPUs', 'RAM', 'total_ram', 'total_cpus', 'total_credits', 'total_costs'
24
+ ]
25
+
26
+ @jobs.each do |job|
27
+ duration = job['duration'] || 0
28
+ duration_minutes = duration / 1000.0 / 60.0
29
+ parallelism = job['parallelism'] || 1
30
+ total_duration_minutes = duration_minutes * parallelism
31
+
32
+ resource_class = job['executor']['resource_class']
33
+ next unless resource_class
34
+
35
+ mapped_class = RESOURCE_CLASS_MAP[resource_class]
36
+ next unless mapped_class
37
+
38
+ cpus = mapped_class[:cpus] || 1
39
+ ram = mapped_class[:ram] || 1
40
+
41
+ total_ram = parallelism * ram
42
+ total_cpus = parallelism * cpus
43
+ total_credits = total_cpus * duration_minutes * 5
44
+ total_costs = (total_credits * CREDIT_COST * parallelism).round(2)
45
+
46
+ csv << [
47
+ job['number'], duration, duration_minutes, total_duration_minutes, job['queued_at'],
48
+ job['started_at'], job['stopped_at'], job['status'], parallelism, resource_class,
49
+ job['name'], cpus, ram, total_ram, total_cpus, total_credits, total_costs
50
+ ]
51
+ end
52
+ end
53
+
54
+ puts "CSV file created at #{csv_file_path}"
55
+ end
56
+ end
57
+ end
@@ -0,0 +1,58 @@
1
+ require 'time'
2
+
3
+ module CircleciTools
4
+ class JobAnalyzer
5
+ RESOURCE_RAM = {
6
+ 'small' => 2048, # in MB
7
+ 'medium' => 4096,
8
+ 'medium+' => 6144,
9
+ 'large' => 8192,
10
+ # Add other classes if necessary
11
+ }.freeze
12
+
13
+ def calculate_peak_ram(jobs:)
14
+ events = []
15
+
16
+ jobs.each do |job|
17
+ next unless job['started_at'] && job['stopped_at']
18
+
19
+ start = parse_time(job['started_at'])
20
+ end_time = parse_time(job['stopped_at'])
21
+ ram = get_ram_claim(job)
22
+
23
+ events << { time: start, type: 'start', ram: ram }
24
+ events << { time: end_time, type: 'end', ram: ram }
25
+ end
26
+
27
+ # Sort events by time; 'end' before 'start' if times are equal
28
+ events.sort_by! { |event| [event[:time], event[:type] == 'end' ? 0 : 1] }
29
+
30
+ current_ram = 0
31
+ peak_ram = 0
32
+
33
+ events.each do |event|
34
+ if event[:type] == 'start'
35
+ current_ram += event[:ram]
36
+ peak_ram = [peak_ram, current_ram].max
37
+ else
38
+ current_ram -= event[:ram]
39
+ end
40
+ end
41
+
42
+ peak_ram
43
+ end
44
+
45
+ private
46
+
47
+ def get_ram_claim(job)
48
+ resource_class = job['executor']['resource_class'] || 'medium' # Default to 'medium' if not specified
49
+ RESOURCE_RAM[resource_class] || 4096 # Default to 4096 MB if class not found
50
+ end
51
+
52
+ def parse_time(time_str)
53
+ Time.parse(time_str)
54
+ rescue
55
+ nil
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,274 @@
1
+ require 'aws-sdk-cloudwatchlogs'
2
+ require 'time'
3
+ require 'csv'
4
+ require 'json'
5
+ require_relative 'retryable'
6
+ require 'tty-prompt'
7
+ require 'date'
8
+
9
+ module CircleciTools
10
+ class LogUploader
11
+ MAX_THREADS = 5
12
+
13
+ include Retryable
14
+
15
+ def initialize(log_group_name, dry_run: false)
16
+ @log_group_name = log_group_name
17
+ @dry_run = dry_run
18
+ @client = Aws::CloudWatchLogs::Client.new
19
+
20
+ ensure_log_group_exists
21
+ end
22
+
23
+ def upload_file(file_path)
24
+ events = generate_events(file_path)
25
+ events.sort_by! { |event| event[:timestamp] }
26
+
27
+ grouped_events = group_events_by_date(events)
28
+
29
+ if @dry_run
30
+ handle_dry_run(grouped_events)
31
+ else
32
+ upload_grouped_events(grouped_events, file_path)
33
+ end
34
+ end
35
+
36
+ private
37
+
38
+ def generate_events(file_path)
39
+ events = []
40
+ interval = 10 # seconds
41
+
42
+ CSV.foreach(file_path, headers: true) do |row|
43
+ queued_at = Time.parse(row['queued_at'])
44
+ started_at = Time.parse(row['started_at']) rescue nil
45
+ stopped_at = Time.parse(row['stopped_at']) rescue nil
46
+
47
+ next unless queued_at && started_at && stopped_at
48
+
49
+ # Initialize current_time to the next 10-second interval after queued_at
50
+ current_time = align_time_to_next_interval(queued_at, interval)
51
+ end_time = stopped_at
52
+
53
+ until current_time >= end_time
54
+ state = determine_state(current_time, started_at, stopped_at)
55
+
56
+ log_data = {
57
+ job_number: row['job_number'],
58
+ state: state,
59
+ name: row['name'],
60
+ total_ram: row['total_ram'],
61
+ total_cpus: row['total_cpus'],
62
+ }
63
+
64
+ events << {
65
+ timestamp: current_time.to_i * 1000,
66
+ message: log_data.to_json
67
+ }
68
+
69
+ current_time += interval
70
+ end
71
+
72
+ completed_time = align_time_to_next_interval(stopped_at, interval)
73
+ log_data = {
74
+ job_number: row['job_number'],
75
+ state: 'completed',
76
+ name: row['name'],
77
+ total_ram: row['total_ram'],
78
+ total_cpus: row['total_cpus'],
79
+ }
80
+
81
+ events << {
82
+ timestamp: completed_time.to_i * 1000,
83
+ message: log_data.to_json
84
+ }
85
+ end
86
+
87
+ events
88
+ rescue => e
89
+ puts "Error generating events: #{e.message}"
90
+ []
91
+ end
92
+
93
+ # Extracted Method: Group Events by Date
94
+ def group_events_by_date(events)
95
+ events.group_by do |event|
96
+ Time.at(event[:timestamp] / 1000).utc.strftime('%Y-%m-%d')
97
+ end
98
+ end
99
+
100
+ # Extracted Method: Handle Dry Run for Grouped Events
101
+ def handle_dry_run(grouped_events)
102
+ # Determine the range of dates
103
+ dates = grouped_events.keys.sort
104
+ from_date = dates.first
105
+ to_date = dates.last
106
+ output_file = "tmp/circleci-job_events-#{from_date}_to_#{to_date}.csv"
107
+
108
+ CSV.open(output_file, 'w') do |csv|
109
+ # Define CSV headers based on log_data fields
110
+ csv << ['timestamp', 'job_number', 'state', 'name', 'total_ram', 'total_cpus']
111
+
112
+ grouped_events.each do |date, events|
113
+ events.each do |event|
114
+ log_data = JSON.parse(event[:message])
115
+ timestamp = Time.at(event[:timestamp] / 1000).utc.iso8601
116
+ csv << [
117
+ timestamp,
118
+ log_data['job_number'],
119
+ log_data['state'],
120
+ log_data['name'],
121
+ log_data['total_ram'],
122
+ log_data['total_cpus']
123
+ ]
124
+ end
125
+ end
126
+ end
127
+ puts "Dry run enabled: All events stored in #{output_file}"
128
+ rescue => e
129
+ puts "Error during dry run: #{e.message}"
130
+ end
131
+
132
+ # Extracted Method: Upload Grouped Events to CloudWatch
133
+ def upload_grouped_events(grouped_events, file_path)
134
+ grouped_events.each do |date, events|
135
+ log_stream_name = "jobs-#{date}"
136
+ ensure_log_stream_exists(log_stream_name)
137
+
138
+ upload_events_to_stream(events, log_stream_name, date)
139
+ end
140
+ puts "Uploaded #{file_path} to CloudWatch log streams in log group #{@log_group_name}"
141
+ rescue => e
142
+ puts "Error uploading events: #{e.message}"
143
+ end
144
+
145
+ def determine_state(current_time, started_at, stopped_at)
146
+ if current_time < started_at
147
+ 'running'
148
+ elsif current_time >= started_at && current_time < stopped_at
149
+ 'running'
150
+ elsif current_time >= stopped_at
151
+ 'completed'
152
+ else
153
+ 'unknown'
154
+ end
155
+ end
156
+
157
+ # Aligns a given time to the next 10-second interval and returns a Time object
158
+ def align_time_to_next_interval(time, interval)
159
+ aligned_seconds = (time.to_f / interval).ceil * interval
160
+ Time.at(aligned_seconds).utc
161
+ end
162
+
163
+ # Extracted Method: Ensure Log Group Exists
164
+ def ensure_log_group_exists
165
+ log_group = @client.describe_log_groups(log_group_name_prefix: @log_group_name).log_groups.find { |lg| lg.log_group_name == @log_group_name }
166
+
167
+ unless log_group
168
+ if $stdin.tty?
169
+ prompt = TTY::Prompt.new
170
+ create = prompt.yes?("Log group '#{@log_group_name}' does not exist. Would you like to create it?")
171
+
172
+ if create
173
+ @client.create_log_group(log_group_name: @log_group_name)
174
+ puts "Created log group '#{@log_group_name}'."
175
+ else
176
+ abort("Log group '#{@log_group_name}' does not exist. Exiting.")
177
+ end
178
+ else
179
+ abort("Log group '#{@log_group_name}' does not exist and no interactive prompt available. Exiting.")
180
+ end
181
+ end
182
+ end
183
+
184
+ # Modify ensure_log_stream_exists to handle deletion of existing log streams with prompt
185
+ def ensure_log_stream_exists(log_stream_name)
186
+ log_stream = @client.describe_log_streams(
187
+ log_group_name: @log_group_name,
188
+ log_stream_name_prefix: log_stream_name
189
+ ).log_streams.find { |stream| stream.log_stream_name == log_stream_name }
190
+
191
+ if log_stream
192
+ if $stdin.tty?
193
+ prompt = TTY::Prompt.new
194
+ delete = prompt.yes?("Log stream '#{log_stream_name}' already exists in log group '#{@log_group_name}'. Would you like to delete it and create a new one?")
195
+
196
+ if delete
197
+ @client.delete_log_stream(log_group_name: @log_group_name, log_stream_name: log_stream_name)
198
+ puts "Deleted existing log stream '#{log_stream_name}'."
199
+ @client.create_log_stream(log_group_name: @log_group_name, log_stream_name: log_stream_name)
200
+ puts "Created new log stream '#{log_stream_name}' in log group '#{@log_group_name}'."
201
+ else
202
+ abort("Log stream '#{log_stream_name}' already exists. Exiting.")
203
+ end
204
+ else
205
+ abort("Log stream '#{log_stream_name}' already exists in log group '#{@log_group_name}' and no interactive prompt available. Exiting.")
206
+ end
207
+ else
208
+ @client.create_log_stream(log_group_name: @log_group_name, log_stream_name: log_stream_name)
209
+ puts "Created log stream '#{log_stream_name}' in log group '#{@log_group_name}'."
210
+ end
211
+ end
212
+
213
+ # Extracted Method: Upload Events to a Specific Log Stream
214
+ def upload_events_to_stream(events, log_stream_name, date)
215
+ batches = events.each_slice(1_000).to_a
216
+ threads = []
217
+
218
+ batches.each_with_index do |events_batch, index|
219
+ threads << Thread.new do
220
+ with_retries do
221
+ params = {
222
+ log_events: events_batch,
223
+ log_group_name: @log_group_name,
224
+ log_stream_name: log_stream_name
225
+ }
226
+
227
+ # Get the sequence token for the log stream
228
+ response = @client.describe_log_streams(
229
+ log_group_name: @log_group_name,
230
+ log_stream_name_prefix: log_stream_name
231
+ )
232
+ log_stream = response.log_streams.find { |stream| stream.log_stream_name == log_stream_name }
233
+ if log_stream && log_stream.upload_sequence_token
234
+ params[:sequence_token] = log_stream.upload_sequence_token
235
+ end
236
+
237
+ # Upload the log events
238
+ @client.put_log_events(params)
239
+ end
240
+ puts "Uploaded batch #{index + 1}/#{batches.size} for date #{date} to CloudWatch."
241
+ end
242
+
243
+ if threads.size >= MAX_THREADS
244
+ threads.each(&:join)
245
+ threads.clear
246
+ end
247
+ end
248
+
249
+ threads.each(&:join)
250
+ end
251
+
252
+ def send_log(log_group_name, log_stream_name, message)
253
+ params = {
254
+ log_events: [{
255
+ timestamp: (Time.now.to_f * 1000).to_i,
256
+ message: message
257
+ }],
258
+ log_group_name: log_group_name,
259
+ log_stream_name: log_stream_name
260
+ }
261
+
262
+ response = @client.describe_log_streams(
263
+ log_group_name: log_group_name,
264
+ log_stream_name_prefix: log_stream_name
265
+ )
266
+ log_stream = response.log_streams.find { |stream| stream.log_stream_name == log_stream_name }
267
+ if log_stream && log_stream.upload_sequence_token
268
+ params[:sequence_token] = log_stream.upload_sequence_token
269
+ end
270
+
271
+ @client.put_log_events(params)
272
+ end
273
+ end
274
+ end
@@ -0,0 +1,29 @@
1
+ module CircleciTools
2
+ module Retryable
3
+ MAX_RETRIES = 5
4
+ BACKOFF_FACTOR = 0.5
5
+ MAX_BACKOFF_TIME = 60
6
+
7
+ def with_retries(max_retries: MAX_RETRIES)
8
+ retries = 0
9
+ begin
10
+ yield
11
+ rescue => e
12
+ if retries < max_retries
13
+ retries += 1
14
+ backoff_time = [BACKOFF_FACTOR * (2 ** retries), MAX_BACKOFF_TIME].min.floor
15
+ retry_logger.info "Retry ##{retries} after #{backoff_time} seconds"
16
+ retry_logger.debug "Thread #{Thread.current.object_id}: Error: #{e.message}"
17
+ sleep backoff_time
18
+ retry
19
+ else
20
+ retry_logger.warn "Thread #{Thread.current.object_id}: Error: #{e.message}"
21
+ end
22
+ end
23
+ end
24
+
25
+ def retry_logger
26
+ @logger ||= Logger.new(STDOUT)
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,14 @@
1
+ module CircleciTools
2
+ class RunnerCalculator
3
+ attr_reader :runner_ram_gb
4
+
5
+ def initialize(runner_ram_gb = 8)
6
+ @runner_ram_gb = runner_ram_gb
7
+ @runner_ram_mb = @runner_ram_gb * 1024 # Convert GB to MB
8
+ end
9
+
10
+ def calculate_runners(peak_ram_mb)
11
+ (peak_ram_mb.to_f / @runner_ram_mb).ceil
12
+ end
13
+ end
14
+ end
@@ -0,0 +1,20 @@
1
+ require 'aws-sdk-s3'
2
+ require 'logger'
3
+
4
+ module CircleciTools
5
+ class S3UploadService
6
+ def initialize(bucket_name, logger: Logger.new(STDOUT))
7
+ @bucket_name = bucket_name
8
+ @logger = logger
9
+ @s3_client = Aws::S3::Client.new
10
+ end
11
+
12
+ def upload_file(file_path, s3_key)
13
+ @logger.info("Uploading #{file_path} to S3 bucket #{@bucket_name} with key #{s3_key}...")
14
+ @s3_client.put_object(bucket: @bucket_name, key: "circleci/#{s3_key}", body: File.read(file_path))
15
+ @logger.info("Uploaded #{file_path} to S3 bucket #{@bucket_name} with key #{s3_key}.")
16
+ rescue Aws::S3::Errors::ServiceError => e
17
+ @logger.error("Failed to upload #{file_path} to S3: #{e.message}")
18
+ end
19
+ end
20
+ end