github-daily-digest 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/README.md +35 -0
- data/Rakefile +4 -0
- data/bin/console +11 -0
- data/bin/github-daily-digest +140 -0
- data/bin/setup +8 -0
- data/github-daily-digest.gemspec +47 -0
- data/github-daily-digest.rb +20 -0
- data/lib/activity_analyzer.rb +48 -0
- data/lib/configuration.rb +260 -0
- data/lib/daily_digest_runner.rb +932 -0
- data/lib/gemini_service.rb +616 -0
- data/lib/github-daily-digest/version.rb +5 -0
- data/lib/github_daily_digest.rb +16 -0
- data/lib/github_graphql_service.rb +1191 -0
- data/lib/github_service.rb +364 -0
- data/lib/html_formatter.rb +1297 -0
- data/lib/language_analyzer.rb +163 -0
- data/lib/markdown_formatter.rb +137 -0
- data/lib/output_formatter.rb +818 -0
- metadata +178 -0
@@ -0,0 +1,616 @@
|
|
1
|
+
# github_daily_digest/lib/gemini_service.rb
|
2
|
+
require 'gemini-ai'
|
3
|
+
require 'json'
|
4
|
+
require 'pry'
|
5
|
+
|
6
|
+
module GithubDailyDigest
|
7
|
+
class GeminiService
|
8
|
+
# Default model - will be overridden by configuration
|
9
|
+
DEFAULT_MODEL = 'gemini-2.5-flash-preview-04-17' # Updated to a more widely available model
|
10
|
+
# Keys expected in the Gemini JSON response
|
11
|
+
EXPECTED_KEYS = %w[projects changes contribution_weights pr_count summary lines_changed].freeze
|
12
|
+
|
13
|
+
attr_reader :client
|
14
|
+
|
15
|
+
def initialize(api_key:, logger:, config:, github_graphql_service:)
|
16
|
+
@logger = logger
|
17
|
+
@config = config
|
18
|
+
@github_graphql_service = github_graphql_service
|
19
|
+
@model = config.gemini_model || DEFAULT_MODEL
|
20
|
+
|
21
|
+
initialize_client(api_key, @model)
|
22
|
+
rescue => e
|
23
|
+
@logger.fatal("Failed to initialize Gemini client: #{e.message}")
|
24
|
+
raise
|
25
|
+
end
|
26
|
+
|
27
|
+
def initialize_client(api_key, model)
|
28
|
+
@logger.info("Initializing Gemini client with model: #{model}")
|
29
|
+
|
30
|
+
@client = Gemini.new(
|
31
|
+
credentials: {
|
32
|
+
service: 'generative-language-api',
|
33
|
+
api_key: api_key
|
34
|
+
},
|
35
|
+
options: {
|
36
|
+
model: model
|
37
|
+
}
|
38
|
+
)
|
39
|
+
end
|
40
|
+
|
41
|
+
def analyze_activity(username:, commits_with_code:, review_count:, time_window_days:)
|
42
|
+
# If there are no commits and no reviews, return empty data
|
43
|
+
if commits_with_code.empty? && review_count == 0
|
44
|
+
@logger.info("No activity found for #{username} to analyze.")
|
45
|
+
return default_no_activity_report
|
46
|
+
else
|
47
|
+
@logger.debug("Found activity for #{username}: #{commits_with_code.size} commits in repositories: #{commits_with_code.map { |c| c[:repo] }.uniq.join(', ')}")
|
48
|
+
end
|
49
|
+
|
50
|
+
|
51
|
+
# Make multiple attempts to analyze with Gemini, handle errors gracefully
|
52
|
+
begin
|
53
|
+
prompt = build_prompt(username, commits_with_code, review_count, time_window_days)
|
54
|
+
# @logger.debug("Gemini Prompt for #{username}:\n#{prompt}") # Uncomment for debugging prompts
|
55
|
+
|
56
|
+
response_text = execute_gemini_request(prompt, username)
|
57
|
+
@logger.debug("Gemini response for #{username}: #{response_text}")
|
58
|
+
|
59
|
+
if response_text
|
60
|
+
return parse_and_validate_response(response_text, username)
|
61
|
+
else
|
62
|
+
# Failure occurred within execute_gemini_request (already logged)
|
63
|
+
@logger.warn("Gemini analysis failed for #{username}, using fallback analysis.")
|
64
|
+
return create_fallback_analysis(username, commits_with_code, review_count)
|
65
|
+
end
|
66
|
+
rescue => e
|
67
|
+
@logger.error("Unexpected error analyzing #{username}'s activity: #{e.message}")
|
68
|
+
@logger.warn("Using fallback analysis due to error.")
|
69
|
+
return create_fallback_analysis(username, commits_with_code, review_count)
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
private
|
74
|
+
|
75
|
+
def build_prompt(username, commits, review_count, time_window_days)
|
76
|
+
total_lines_changed = 0
|
77
|
+
total_additions = 0
|
78
|
+
total_deletions = 0
|
79
|
+
commits_with_stats = 0
|
80
|
+
|
81
|
+
repos = Set.new
|
82
|
+
commit_summary = ""
|
83
|
+
|
84
|
+
# Process commits to extract statistics and build a summary
|
85
|
+
commits.each do |commit|
|
86
|
+
repos << commit[:repo]
|
87
|
+
commit_date = Time.parse(commit[:date].to_s) rescue "Unknown"
|
88
|
+
|
89
|
+
# Build commit message summary with additions/deletions if available
|
90
|
+
stats_text = ""
|
91
|
+
if commit[:stats]
|
92
|
+
additions = commit[:stats][:additions] || 0
|
93
|
+
deletions = commit[:stats][:deletions] || 0
|
94
|
+
total_lines = additions.to_i + deletions.to_i
|
95
|
+
|
96
|
+
total_lines_changed += total_lines
|
97
|
+
total_additions += additions.to_i
|
98
|
+
total_deletions += deletions.to_i
|
99
|
+
commits_with_stats += 1
|
100
|
+
|
101
|
+
stats_text = " (+#{additions}, -#{deletions})"
|
102
|
+
end
|
103
|
+
|
104
|
+
# Add commit message and stats
|
105
|
+
message = commit[:message] || "No message"
|
106
|
+
commit_summary << "* #{commit_date.strftime('%Y-%m-%d')}: [#{commit[:repo]}] #{message.strip.gsub(/\n+/, ' ')}#{stats_text}\n"
|
107
|
+
|
108
|
+
# Add code changes if available (limited to avoid huge prompts)
|
109
|
+
if commit[:code_changes] && !commit[:code_changes].empty? && commit[:code_changes][:files]
|
110
|
+
commit_summary << " Code changes:\n"
|
111
|
+
commit[:code_changes][:files].each_with_index do |file, index|
|
112
|
+
# Limit to first 3 files to avoid excessive prompt size
|
113
|
+
break if index >= 3
|
114
|
+
|
115
|
+
commit_summary << " - #{file[:path]} (+#{file[:additions]}, -#{file[:deletions]})\n"
|
116
|
+
|
117
|
+
# Include a limited snippet of the patch if available
|
118
|
+
if file[:patch]
|
119
|
+
# Limit the patch to 10 lines max
|
120
|
+
patch_preview = file[:patch].split("\n")[0...10].join("\n")
|
121
|
+
# Add an ellipsis if the patch was truncated
|
122
|
+
patch_preview += "\n..." if file[:patch].split("\n").size > 10
|
123
|
+
|
124
|
+
commit_summary << "```\n#{patch_preview}\n```\n"
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
# Indicate if there were more files not shown
|
129
|
+
if commit[:code_changes][:changed_files] && commit[:code_changes][:changed_files] > 3
|
130
|
+
commit_summary << " - ... and #{commit[:code_changes][:changed_files] - 3} more files\n"
|
131
|
+
end
|
132
|
+
elsif commit[:files] && commit[:files].to_i > 0
|
133
|
+
# Include basic file count information if code changes couldn't be fetched
|
134
|
+
commit_summary << " - Changed #{commit[:files]} files (detailed changes not available)\n"
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
# If no commits had detailed stats, try to estimate from commit count and changed files
|
139
|
+
if commits_with_stats == 0 && !commits.empty?
|
140
|
+
total_files_changed = commits.sum { |c| c[:files].to_i }
|
141
|
+
estimated_lines = total_files_changed * 30 # Rough estimate: 30 lines per file
|
142
|
+
|
143
|
+
if estimated_lines > 0
|
144
|
+
total_lines_changed = estimated_lines
|
145
|
+
total_additions = (estimated_lines * 0.7).to_i # Assume 70% additions
|
146
|
+
total_deletions = (estimated_lines * 0.3).to_i # Assume 30% deletions
|
147
|
+
commits_with_stats = commits.size
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
# Calculate some derived metrics
|
152
|
+
avg_lines = commits_with_stats > 0 ? (total_lines_changed.to_f / commits_with_stats).round : 0
|
153
|
+
repos_joined = repos.to_a.join(", ")
|
154
|
+
|
155
|
+
# Format the prompt
|
156
|
+
<<~PROMPT
|
157
|
+
You are an expert GitHub activity analyzer specializing in code complexity and engineering contribution analysis. Analyze the following GitHub user's activity:
|
158
|
+
|
159
|
+
GitHub User: #{username}
|
160
|
+
Time Period: Last #{time_window_days} days
|
161
|
+
Total Commits: #{commits.size}
|
162
|
+
PR Reviews: #{review_count}
|
163
|
+
#{commits_with_stats > 0 ? "Total Lines Changed: #{total_lines_changed} (#{total_additions} additions, #{total_deletions} deletions)" : ""}
|
164
|
+
#{commits_with_stats > 0 ? "Average Lines per Commit: #{avg_lines}" : ""}
|
165
|
+
Repositories: #{repos_joined}
|
166
|
+
|
167
|
+
Commit Details (with code samples where available):
|
168
|
+
#{commit_summary}
|
169
|
+
|
170
|
+
As a technical expert, carefully analyze:
|
171
|
+
1. The actual code complexity and technical depth of the work based on the commit messages and code changes
|
172
|
+
2. A weighted contribution score using the factors described below
|
173
|
+
3. A brief summary that captures the technical essence of their contribution (max 100 characters)
|
174
|
+
4. Key technical projects they worked on
|
175
|
+
|
176
|
+
IMPORTANT INSTRUCTIONS:
|
177
|
+
- Calculate weighted contribution score using these factors (each on a scale of 0-10):
|
178
|
+
* Lines of code weight: Based on total volume of code changed
|
179
|
+
* Complexity weight: Based on algorithmic/architectural complexity
|
180
|
+
* Technical depth weight: Based on core vs peripheral system components
|
181
|
+
* Scope weight: Based on number of repositories and projects involved
|
182
|
+
* PR review weight: Based on code review contributions
|
183
|
+
- Use these weights to allow fair comparison between users analyzed separately
|
184
|
+
- Higher weights should be given for:
|
185
|
+
* Large amounts of code changed
|
186
|
+
* Complex algorithmic changes
|
187
|
+
* Changes to core systems/architectural components
|
188
|
+
* Work spanning multiple repositories
|
189
|
+
* Significant code review contributions
|
190
|
+
- Type of contribution affects weights:
|
191
|
+
* Feature development: Higher complexity and technical depth weights
|
192
|
+
* Bug fixes: Higher technical depth weight
|
193
|
+
* Refactoring: Higher complexity weight
|
194
|
+
* Documentation: Lower weights overall
|
195
|
+
|
196
|
+
Return your analysis in this exact JSON format only, with no additional explanation:
|
197
|
+
```json
|
198
|
+
{
|
199
|
+
"projects": #{repos.empty? ? "[]" : repos.to_json},
|
200
|
+
"changes": #{commits.size},
|
201
|
+
"contribution_weights": {
|
202
|
+
"lines_of_code": 5,
|
203
|
+
"complexity": 6,
|
204
|
+
"technical_depth": 5,
|
205
|
+
"scope": 4,
|
206
|
+
"pr_reviews": 3
|
207
|
+
},
|
208
|
+
"pr_count": #{review_count},
|
209
|
+
"summary": "Brief description of their work",
|
210
|
+
"lines_changed": #{total_lines_changed}
|
211
|
+
}
|
212
|
+
```
|
213
|
+
PROMPT
|
214
|
+
end
|
215
|
+
|
216
|
+
def execute_gemini_request(prompt, username, retries = @config.max_api_retries)
|
217
|
+
attempts = 0
|
218
|
+
begin
|
219
|
+
attempts += 1
|
220
|
+
@logger.debug("Sending request to Gemini for #{username} (Attempt #{attempts}/#{retries})")
|
221
|
+
@logger.debug("Using model: #{@model} with API key: #{@config.gemini_api_key.to_s[0..5]}...")
|
222
|
+
|
223
|
+
generation_config = {
|
224
|
+
temperature: 0.2 # Lower temp for consistency
|
225
|
+
}
|
226
|
+
|
227
|
+
@logger.debug("Request configuration: model=#{@model}")
|
228
|
+
|
229
|
+
response = @client.generate_content({
|
230
|
+
contents: { role: 'user', parts: { text: prompt } },
|
231
|
+
generation_config: generation_config
|
232
|
+
})
|
233
|
+
|
234
|
+
# Extract text from the response - gemini-ai gem has a different structure
|
235
|
+
@logger.debug("Response class: #{response.class}")
|
236
|
+
@logger.debug("Response keys: #{response.keys}") if response.respond_to?(:keys)
|
237
|
+
@logger.debug("Response inspect (truncated): #{response.inspect[0..300]}...")
|
238
|
+
|
239
|
+
# More flexible response parsing based on structure
|
240
|
+
raw_response = nil
|
241
|
+
|
242
|
+
if response.is_a?(Hash) && response['candidates'] && response['candidates'][0] &&
|
243
|
+
response['candidates'][0]['content'] && response['candidates'][0]['content']['parts'] &&
|
244
|
+
response['candidates'][0]['content']['parts'][0]
|
245
|
+
# Direct hash structure
|
246
|
+
raw_response = response['candidates'][0]['content']['parts'][0]['text']
|
247
|
+
@logger.debug("Parsed response using direct hash structure")
|
248
|
+
elsif response.is_a?(Array) && response[0] && response[0]['candidates'] &&
|
249
|
+
response[0]['candidates'][0] && response[0]['candidates'][0]['content'] &&
|
250
|
+
response[0]['candidates'][0]['content']['parts'] &&
|
251
|
+
response[0]['candidates'][0]['content']['parts'][0]
|
252
|
+
# Array of events structure
|
253
|
+
raw_response = response[0]['candidates'][0]['content']['parts'][0]['text']
|
254
|
+
@logger.debug("Parsed response using array of events structure")
|
255
|
+
else
|
256
|
+
@logger.error("Gemini response for #{username} has unexpected structure: #{response.inspect}")
|
257
|
+
raise StandardError, "Invalid Gemini response structure"
|
258
|
+
end
|
259
|
+
|
260
|
+
@logger.debug("Raw Gemini response for #{username}: #{raw_response.strip}")
|
261
|
+
return raw_response
|
262
|
+
|
263
|
+
rescue Faraday::ResourceNotFound => e
|
264
|
+
@logger.error("Gemini API resource not found error: #{e.message}")
|
265
|
+
@logger.error("This usually indicates an invalid API key, endpoint, or model name.")
|
266
|
+
@logger.error("Current model: #{@model}")
|
267
|
+
|
268
|
+
if attempts < retries
|
269
|
+
# On 404, try with a different model
|
270
|
+
if attempts == 1
|
271
|
+
@logger.warn("Attempting with alternate model 'gemini-1.5-pro-latest'...")
|
272
|
+
@model = 'gemini-1.5-pro-latest'
|
273
|
+
initialize_client(@config.gemini_api_key, @model)
|
274
|
+
elsif attempts == 2
|
275
|
+
@logger.warn("Attempting with alternate model 'gemini-pro'...")
|
276
|
+
@model = 'gemini-pro'
|
277
|
+
initialize_client(@config.gemini_api_key, @model)
|
278
|
+
end
|
279
|
+
|
280
|
+
sleep_time = calculate_backoff(attempts)
|
281
|
+
@logger.warn("Retrying Gemini request for #{username} in #{sleep_time}s...")
|
282
|
+
sleep sleep_time
|
283
|
+
retry
|
284
|
+
end
|
285
|
+
|
286
|
+
nil # Return nil to trigger fallback
|
287
|
+
rescue Faraday::ConnectionFailed => e
|
288
|
+
@logger.error("Gemini API connection error: #{e.message}")
|
289
|
+
if attempts < retries
|
290
|
+
sleep_time = calculate_backoff(attempts)
|
291
|
+
@logger.warn("Retrying Gemini request for #{username} in #{sleep_time}s...")
|
292
|
+
sleep sleep_time
|
293
|
+
retry
|
294
|
+
else
|
295
|
+
@logger.error("Gemini connection failed after #{attempts} attempts.")
|
296
|
+
nil
|
297
|
+
end
|
298
|
+
rescue => e
|
299
|
+
@logger.error("General error during Gemini request for #{username}: #{e.class} - #{e.message}")
|
300
|
+
if attempts < retries
|
301
|
+
sleep_time = calculate_backoff(attempts)
|
302
|
+
@logger.warn("Retrying Gemini request for #{username} due to unexpected error in #{sleep_time}s...")
|
303
|
+
sleep sleep_time
|
304
|
+
retry
|
305
|
+
else
|
306
|
+
@logger.error("Gemini request failed permanently for #{username} after #{attempts} attempts.")
|
307
|
+
nil # Indicate failure
|
308
|
+
end
|
309
|
+
end
|
310
|
+
end
|
311
|
+
|
312
|
+
def parse_and_validate_response(response_text, username)
|
313
|
+
# Find JSON in the response
|
314
|
+
json_match = response_text.match(/```json\s*(.*?)\s*```/m) || response_text.match(/\{.*\}/m)
|
315
|
+
|
316
|
+
if json_match
|
317
|
+
begin
|
318
|
+
json_str = json_match[1] || json_match[0]
|
319
|
+
@logger.debug("Raw JSON from Gemini for #{username}: #{json_str}")
|
320
|
+
|
321
|
+
result = JSON.parse(json_str)
|
322
|
+
@logger.debug("Parsed Gemini response for #{username}: #{result.inspect}")
|
323
|
+
|
324
|
+
# Validate required fields
|
325
|
+
unless result["projects"] && result["changes"] && result["summary"]
|
326
|
+
missing_fields = []
|
327
|
+
missing_fields << "projects" unless result["projects"]
|
328
|
+
missing_fields << "changes" unless result["changes"]
|
329
|
+
missing_fields << "summary" unless result["summary"]
|
330
|
+
|
331
|
+
@logger.warn("Invalid Gemini response format for #{username}: missing fields: #{missing_fields.join(', ')}")
|
332
|
+
return fallback_result(username)
|
333
|
+
end
|
334
|
+
|
335
|
+
# Handle missing or malformed contribution_weights with defaults
|
336
|
+
unless result["contribution_weights"].is_a?(Hash) &&
|
337
|
+
result["contribution_weights"].has_key?("lines_of_code") &&
|
338
|
+
result["contribution_weights"].has_key?("complexity") &&
|
339
|
+
result["contribution_weights"].has_key?("technical_depth") &&
|
340
|
+
result["contribution_weights"].has_key?("scope") &&
|
341
|
+
result["contribution_weights"].has_key?("pr_reviews")
|
342
|
+
|
343
|
+
@logger.warn("Gemini response for #{username} is missing proper contribution_weights structure")
|
344
|
+
|
345
|
+
# Create default weights based on other available data
|
346
|
+
lines_changed = result["lines_changed"].to_i
|
347
|
+
commits = result["changes"].to_i
|
348
|
+
projects = result["projects"].is_a?(Array) ? result["projects"].size : 1
|
349
|
+
pr_count = result["pr_count"].to_i || 0
|
350
|
+
|
351
|
+
# Calculate weights using a 0-10 scale
|
352
|
+
result["contribution_weights"] = {
|
353
|
+
"lines_of_code" => calculate_loc_weight(lines_changed),
|
354
|
+
"complexity" => calculate_complexity_weight(commits, projects),
|
355
|
+
"technical_depth" => calculate_depth_weight(projects),
|
356
|
+
"scope" => calculate_scope_weight(commits),
|
357
|
+
"pr_reviews" => calculate_pr_weight(pr_count)
|
358
|
+
}
|
359
|
+
|
360
|
+
@logger.info("Created default contribution_weights for #{username}: #{result["contribution_weights"].inspect}")
|
361
|
+
else
|
362
|
+
# Convert existing weights from 0-100 scale to 0-10 scale
|
363
|
+
if result["contribution_weights"].is_a?(Hash)
|
364
|
+
@logger.debug("BEFORE conversion - contribution_weights for #{username}: #{result["contribution_weights"].inspect}")
|
365
|
+
|
366
|
+
["lines_of_code", "complexity", "technical_depth", "scope", "pr_reviews"].each do |key|
|
367
|
+
value = result["contribution_weights"][key]
|
368
|
+
if value.is_a?(String) || value.is_a?(Numeric)
|
369
|
+
# Convert to integer and scale down if the value is large
|
370
|
+
numeric_value = value.to_i
|
371
|
+
if numeric_value > 10
|
372
|
+
result["contribution_weights"][key] = (numeric_value / 10.0).ceil
|
373
|
+
else
|
374
|
+
result["contribution_weights"][key] = numeric_value
|
375
|
+
end
|
376
|
+
end
|
377
|
+
end
|
378
|
+
|
379
|
+
@logger.debug("AFTER conversion - contribution_weights for #{username}: #{result["contribution_weights"].inspect}")
|
380
|
+
end
|
381
|
+
end
|
382
|
+
|
383
|
+
# Calculate the total score
|
384
|
+
total_score = 0
|
385
|
+
if result["contribution_weights"].is_a?(Hash)
|
386
|
+
["lines_of_code", "complexity", "technical_depth", "scope", "pr_reviews"].each do |key|
|
387
|
+
total_score += result["contribution_weights"][key].to_i
|
388
|
+
end
|
389
|
+
end
|
390
|
+
result["total_score"] = total_score
|
391
|
+
|
392
|
+
@logger.info("Successfully parsed Gemini response with contribution_weights for #{username}")
|
393
|
+
@logger.info("FINAL contribution_weights for #{username}: #{result["contribution_weights"].inspect}")
|
394
|
+
@logger.info("FINAL total_score for #{username}: #{result["total_score"]}")
|
395
|
+
result
|
396
|
+
rescue JSON::ParserError => e
|
397
|
+
@logger.error("Failed to parse Gemini response for #{username}: #{e.message}")
|
398
|
+
fallback_result(username)
|
399
|
+
end
|
400
|
+
else
|
401
|
+
@logger.error("Could not extract JSON from Gemini response for #{username}")
|
402
|
+
fallback_result(username)
|
403
|
+
end
|
404
|
+
end
|
405
|
+
|
406
|
+
# Helper methods for calculating weights on a 0-10 scale
|
407
|
+
def calculate_loc_weight(lines_changed)
|
408
|
+
case lines_changed
|
409
|
+
when 0..500 then 2
|
410
|
+
when 501..2000 then 4
|
411
|
+
when 2001..5000 then 6
|
412
|
+
when 5001..10000 then 8
|
413
|
+
else 10
|
414
|
+
end
|
415
|
+
end
|
416
|
+
|
417
|
+
def calculate_complexity_weight(commits, repo_count)
|
418
|
+
base = case commits
|
419
|
+
when 0..5 then 2
|
420
|
+
when 6..15 then 4
|
421
|
+
when 16..30 then 6
|
422
|
+
when 31..50 then 8
|
423
|
+
else 10
|
424
|
+
end
|
425
|
+
|
426
|
+
# Adjust for multi-repo work (max 10)
|
427
|
+
[base + (repo_count > 1 ? 2 : 0), 10].min
|
428
|
+
end
|
429
|
+
|
430
|
+
def calculate_depth_weight(project_count)
|
431
|
+
case project_count
|
432
|
+
when 0..1 then 2
|
433
|
+
when 2..3 then 4
|
434
|
+
when 4..5 then 6
|
435
|
+
when 6..8 then 8
|
436
|
+
else 10
|
437
|
+
end
|
438
|
+
end
|
439
|
+
|
440
|
+
def calculate_scope_weight(commits)
|
441
|
+
case commits
|
442
|
+
when 0..5 then 2
|
443
|
+
when 6..15 then 4
|
444
|
+
when 16..30 then 6
|
445
|
+
when 31..50 then 8
|
446
|
+
else 10
|
447
|
+
end
|
448
|
+
end
|
449
|
+
|
450
|
+
def calculate_pr_weight(pr_count)
|
451
|
+
case pr_count
|
452
|
+
when 0 then 0
|
453
|
+
when 1..2 then 3
|
454
|
+
when 3..5 then 5
|
455
|
+
when 6..10 then 7
|
456
|
+
else 10
|
457
|
+
end
|
458
|
+
end
|
459
|
+
|
460
|
+
def fallback_result(username)
|
461
|
+
{
|
462
|
+
"projects" => [],
|
463
|
+
"changes" => 0,
|
464
|
+
"contribution_weights" => {
|
465
|
+
"lines_of_code" => 5,
|
466
|
+
"complexity" => 6,
|
467
|
+
"technical_depth" => 5,
|
468
|
+
"scope" => 4,
|
469
|
+
"pr_reviews" => 3
|
470
|
+
},
|
471
|
+
"pr_count" => 0,
|
472
|
+
"summary" => "Could not analyze activity",
|
473
|
+
"lines_changed" => 0
|
474
|
+
}
|
475
|
+
end
|
476
|
+
|
477
|
+
def calculate_backoff(attempt)
|
478
|
+
# Exponential backoff with jitter
|
479
|
+
(@config.rate_limit_sleep_base ** attempt) + rand(0.0..1.0)
|
480
|
+
end
|
481
|
+
|
482
|
+
def default_no_activity_report
|
483
|
+
{
|
484
|
+
projects: [],
|
485
|
+
changes: 0,
|
486
|
+
contribution_weights: {
|
487
|
+
lines_of_code: 0,
|
488
|
+
complexity: 0,
|
489
|
+
technical_depth: 0,
|
490
|
+
scope: 0,
|
491
|
+
pr_reviews: 0
|
492
|
+
},
|
493
|
+
pr_count: 0,
|
494
|
+
summary: "No activity detected in the specified time window.",
|
495
|
+
lines_changed: 0,
|
496
|
+
_generated_by: "fallback_system"
|
497
|
+
}
|
498
|
+
end
|
499
|
+
|
500
|
+
# Create a basic analysis when Gemini service fails
|
501
|
+
def create_fallback_analysis(username, commits, review_count)
|
502
|
+
@logger.debug("Creating fallback analysis for #{username}")
|
503
|
+
|
504
|
+
total_lines_changed = 0
|
505
|
+
total_additions = 0
|
506
|
+
total_deletions = 0
|
507
|
+
complexity_score = 1.0
|
508
|
+
projects = Set.new
|
509
|
+
|
510
|
+
# Process commits to extract statistics
|
511
|
+
commits.each do |commit|
|
512
|
+
projects << commit[:repo] if commit[:repo]
|
513
|
+
|
514
|
+
# Calculate lines changed
|
515
|
+
if commit[:stats]
|
516
|
+
additions = commit[:stats][:additions].to_i
|
517
|
+
deletions = commit[:stats][:deletions].to_i
|
518
|
+
|
519
|
+
total_lines_changed += (additions + deletions)
|
520
|
+
total_additions += additions
|
521
|
+
total_deletions += deletions
|
522
|
+
end
|
523
|
+
|
524
|
+
# Simple heuristic for complexity - commits with more complex messages
|
525
|
+
# or with specific keywords tend to be more complex
|
526
|
+
commit_message = commit[:message].to_s.downcase
|
527
|
+
if commit_message.include?("refactor") || commit_message.include?("architecture") ||
|
528
|
+
commit_message.include?("redesign") || commit_message.include?("performance")
|
529
|
+
complexity_score *= 1.2
|
530
|
+
end
|
531
|
+
end
|
532
|
+
|
533
|
+
# If no line data available, make an estimate based on commit count
|
534
|
+
if total_lines_changed == 0 && commits.size > 0
|
535
|
+
# Assume average 50 lines per commit if we don't have actual data
|
536
|
+
total_lines_changed = commits.size * 50
|
537
|
+
total_additions = total_lines_changed * 0.7 # Assume 70% additions
|
538
|
+
total_deletions = total_lines_changed * 0.3 # Assume 30% deletions
|
539
|
+
end
|
540
|
+
|
541
|
+
# Calculate weights on 0-10 scale
|
542
|
+
loc_weight = case total_lines_changed
|
543
|
+
when 0..500 then 2
|
544
|
+
when 501..2000 then 4
|
545
|
+
when 2001..5000 then 6
|
546
|
+
when 5001..10000 then 8
|
547
|
+
else 10
|
548
|
+
end
|
549
|
+
|
550
|
+
complexity_weight = case complexity_score
|
551
|
+
when 0..1.1 then 2
|
552
|
+
when 1.1..1.3 then 4
|
553
|
+
when 1.3..1.5 then 6
|
554
|
+
when 1.5..2.0 then 8
|
555
|
+
else 10
|
556
|
+
end
|
557
|
+
|
558
|
+
technical_depth_weight = case projects.size
|
559
|
+
when 0..1 then 2
|
560
|
+
when 2..3 then 4
|
561
|
+
when 4..5 then 6
|
562
|
+
when 6..7 then 8
|
563
|
+
else 10
|
564
|
+
end
|
565
|
+
|
566
|
+
scope_weight = case commits.size
|
567
|
+
when 0..5 then 2
|
568
|
+
when 6..15 then 4
|
569
|
+
when 16..30 then 6
|
570
|
+
when 31..50 then 8
|
571
|
+
else 10
|
572
|
+
end
|
573
|
+
|
574
|
+
pr_weight = case review_count
|
575
|
+
when 0 then 0
|
576
|
+
when 1..2 then 3
|
577
|
+
when 3..5 then 5
|
578
|
+
when 6..10 then 7
|
579
|
+
else 10
|
580
|
+
end
|
581
|
+
|
582
|
+
# Calculate total score
|
583
|
+
total_score = loc_weight + complexity_weight + technical_depth_weight + scope_weight + pr_weight
|
584
|
+
|
585
|
+
# Generate summary based on activity
|
586
|
+
summary = if commits.empty? && review_count == 0
|
587
|
+
"No activity detected in the specified time window."
|
588
|
+
elsif projects.size > 1
|
589
|
+
"Cross-repository development across #{projects.size} projects with #{commits.size} commits."
|
590
|
+
elsif commits.size > 20
|
591
|
+
"Active development with #{commits.size} commits focusing on #{projects.first}."
|
592
|
+
else
|
593
|
+
"Development activity on #{projects.first || 'repositories'}."
|
594
|
+
end
|
595
|
+
|
596
|
+
@logger.info("Generated fallback analysis for #{username}")
|
597
|
+
|
598
|
+
{
|
599
|
+
"projects" => projects.to_a,
|
600
|
+
"changes" => commits.size,
|
601
|
+
"contribution_weights" => {
|
602
|
+
"lines_of_code" => loc_weight,
|
603
|
+
"complexity" => complexity_weight,
|
604
|
+
"technical_depth" => technical_depth_weight,
|
605
|
+
"scope" => scope_weight,
|
606
|
+
"pr_reviews" => pr_weight
|
607
|
+
},
|
608
|
+
"total_score" => total_score,
|
609
|
+
"pr_count" => review_count,
|
610
|
+
"summary" => summary,
|
611
|
+
"lines_changed" => total_lines_changed,
|
612
|
+
"_generated_by" => "fallback_system"
|
613
|
+
}
|
614
|
+
end
|
615
|
+
end
|
616
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "github-daily-digest/version"
|
4
|
+
require_relative "configuration"
|
5
|
+
require_relative "github_service"
|
6
|
+
require_relative "github_graphql_service"
|
7
|
+
require_relative "gemini_service"
|
8
|
+
require_relative "activity_analyzer"
|
9
|
+
require_relative "daily_digest_runner"
|
10
|
+
require_relative "output_formatter"
|
11
|
+
require_relative "html_formatter"
|
12
|
+
|
13
|
+
module GithubDailyDigest
|
14
|
+
class Error < StandardError; end
|
15
|
+
# Your code goes here...
|
16
|
+
end
|