reposer 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec +3 -0
- data/.rubocop.yml +33 -0
- data/.ruby-version +1 -0
- data/CHANGELOG.md +203 -0
- data/Gemfile +21 -0
- data/Gemfile.lock +151 -0
- data/LICENSE +21 -0
- data/README.md +489 -0
- data/READY_FOR_RELEASE.md +201 -0
- data/RELEASE_GUIDE.md +183 -0
- data/Rakefile +34 -0
- data/demo.rb +43 -0
- data/demo_ai_providers.rb +261 -0
- data/demo_interactive.rb +111 -0
- data/exe/repose +6 -0
- data/exe/reposer +6 -0
- data/lib/repose/ai/gemini_provider.rb +193 -0
- data/lib/repose/ai/ollama_provider.rb +204 -0
- data/lib/repose/ai_generator.rb +172 -0
- data/lib/repose/cli.rb +180 -0
- data/lib/repose/config.rb +47 -0
- data/lib/repose/errors.rb +17 -0
- data/lib/repose/github_client.rb +52 -0
- data/lib/repose/version.rb +5 -0
- data/lib/repose.rb +20 -0
- data/recreate_repo.rb +74 -0
- data/setup_github_repo.rb +92 -0
- metadata +186 -0
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "net/http"
|
|
4
|
+
require "json"
|
|
5
|
+
require "uri"
|
|
6
|
+
|
|
7
|
+
module Repose
|
|
8
|
+
module AI
|
|
9
|
+
class GeminiProvider
|
|
10
|
+
API_ENDPOINT = "https://generativelanguage.googleapis.com/v1beta/models"
|
|
11
|
+
DEFAULT_MODEL = "gemini-1.5-flash"
|
|
12
|
+
MAX_RETRIES = 3
|
|
13
|
+
TIMEOUT = 30
|
|
14
|
+
|
|
15
|
+
attr_reader :api_key, :model
|
|
16
|
+
|
|
17
|
+
def initialize(api_key: nil, model: DEFAULT_MODEL)
|
|
18
|
+
@api_key = api_key || ENV["GEMINI_API_KEY"]
|
|
19
|
+
@model = model
|
|
20
|
+
|
|
21
|
+
raise Repose::ConfigurationError, "Gemini API key not configured" if @api_key.nil? || @api_key.empty?
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def generate_description(context)
|
|
25
|
+
prompt = build_description_prompt(context)
|
|
26
|
+
response = call_api(prompt, max_tokens: 100)
|
|
27
|
+
clean_response(response)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def generate_topics(context)
|
|
31
|
+
prompt = build_topics_prompt(context)
|
|
32
|
+
response = call_api(prompt, max_tokens: 50)
|
|
33
|
+
parse_topics(response)
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def generate_readme(context)
|
|
37
|
+
prompt = build_readme_prompt(context)
|
|
38
|
+
response = call_api(prompt, max_tokens: 1000)
|
|
39
|
+
clean_response(response)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def available?
|
|
43
|
+
return false if @api_key.nil? || @api_key.empty?
|
|
44
|
+
|
|
45
|
+
# Quick health check
|
|
46
|
+
uri = URI("#{API_ENDPOINT}/#{model}?key=#{api_key}")
|
|
47
|
+
request = Net::HTTP::Get.new(uri)
|
|
48
|
+
|
|
49
|
+
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true, open_timeout: 5, read_timeout: 5) do |http|
|
|
50
|
+
http.request(request)
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
response.is_a?(Net::HTTPSuccess)
|
|
54
|
+
rescue StandardError
|
|
55
|
+
false
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
private
|
|
59
|
+
|
|
60
|
+
def call_api(prompt, max_tokens: 500, temperature: 0.7)
|
|
61
|
+
uri = URI("#{API_ENDPOINT}/#{model}:generateContent?key=#{api_key}")
|
|
62
|
+
|
|
63
|
+
payload = {
|
|
64
|
+
contents: [
|
|
65
|
+
{
|
|
66
|
+
parts: [
|
|
67
|
+
{ text: prompt }
|
|
68
|
+
]
|
|
69
|
+
}
|
|
70
|
+
],
|
|
71
|
+
generationConfig: {
|
|
72
|
+
temperature: temperature,
|
|
73
|
+
maxOutputTokens: max_tokens,
|
|
74
|
+
topP: 0.95,
|
|
75
|
+
topK: 40
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
retries = 0
|
|
80
|
+
begin
|
|
81
|
+
request = Net::HTTP::Post.new(uri)
|
|
82
|
+
request["Content-Type"] = "application/json"
|
|
83
|
+
request.body = payload.to_json
|
|
84
|
+
|
|
85
|
+
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true,
|
|
86
|
+
open_timeout: TIMEOUT, read_timeout: TIMEOUT) do |http|
|
|
87
|
+
http.request(request)
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
handle_response(response)
|
|
91
|
+
rescue Net::OpenTimeout, Net::ReadTimeout => e
|
|
92
|
+
retries += 1
|
|
93
|
+
raise Repose::APIError, "Gemini API timeout: #{e.message}" if retries > MAX_RETRIES
|
|
94
|
+
|
|
95
|
+
sleep(2**retries) # Exponential backoff
|
|
96
|
+
retry
|
|
97
|
+
rescue StandardError => e
|
|
98
|
+
raise Repose::APIError, "Gemini API error: #{e.message}"
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
def handle_response(response)
|
|
103
|
+
case response
|
|
104
|
+
when Net::HTTPSuccess
|
|
105
|
+
body = JSON.parse(response.body)
|
|
106
|
+
extract_text_from_response(body)
|
|
107
|
+
when Net::HTTPUnauthorized
|
|
108
|
+
raise Repose::AuthenticationError, "Invalid Gemini API key"
|
|
109
|
+
when Net::HTTPTooManyRequests
|
|
110
|
+
raise Repose::RateLimitError, "Gemini API rate limit exceeded"
|
|
111
|
+
else
|
|
112
|
+
raise Repose::APIError, "Gemini API error (#{response.code}): #{response.body}"
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def extract_text_from_response(body)
|
|
117
|
+
return "" unless body.dig("candidates", 0, "content", "parts", 0, "text")
|
|
118
|
+
|
|
119
|
+
body.dig("candidates", 0, "content", "parts", 0, "text").strip
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
def build_description_prompt(context)
|
|
123
|
+
<<~PROMPT
|
|
124
|
+
Generate a concise, professional GitHub repository description (max 100 characters) for:
|
|
125
|
+
|
|
126
|
+
Repository name: #{context[:name]}
|
|
127
|
+
Language: #{context[:language]}
|
|
128
|
+
Framework: #{context[:framework]}
|
|
129
|
+
Purpose: #{context[:purpose]}
|
|
130
|
+
|
|
131
|
+
Return only the description text, no quotes or extra formatting.
|
|
132
|
+
PROMPT
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def build_topics_prompt(context)
|
|
136
|
+
<<~PROMPT
|
|
137
|
+
Generate 5-8 relevant GitHub topics (keywords) for this repository:
|
|
138
|
+
|
|
139
|
+
Repository name: #{context[:name]}
|
|
140
|
+
Language: #{context[:language]}
|
|
141
|
+
Framework: #{context[:framework]}
|
|
142
|
+
Purpose: #{context[:purpose]}
|
|
143
|
+
|
|
144
|
+
Return topics as comma-separated lowercase words (e.g., javascript, react, api, nodejs).
|
|
145
|
+
No quotes, no explanations, just the comma-separated list.
|
|
146
|
+
PROMPT
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
def build_readme_prompt(context)
|
|
150
|
+
title = context[:name].split(/[-_]/).map(&:capitalize).join(" ")
|
|
151
|
+
|
|
152
|
+
<<~PROMPT
|
|
153
|
+
Generate a comprehensive README.md for a GitHub repository with these details:
|
|
154
|
+
|
|
155
|
+
Repository name: #{context[:name]} (Title: #{title})
|
|
156
|
+
Language: #{context[:language]}
|
|
157
|
+
Framework: #{context[:framework]}
|
|
158
|
+
Purpose: #{context[:purpose]}
|
|
159
|
+
|
|
160
|
+
Include these sections:
|
|
161
|
+
- Title and brief description
|
|
162
|
+
- Features (3-5 bullet points)
|
|
163
|
+
- Installation instructions (language-specific)
|
|
164
|
+
- Usage examples with code blocks
|
|
165
|
+
- Contributing guidelines
|
|
166
|
+
- License (MIT)
|
|
167
|
+
|
|
168
|
+
Use proper Markdown formatting. Be concise and professional.
|
|
169
|
+
Return only the README content, no extra commentary.
|
|
170
|
+
PROMPT
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
def clean_response(text)
|
|
174
|
+
return "" if text.nil? || text.empty?
|
|
175
|
+
|
|
176
|
+
# Remove common markdown artifacts
|
|
177
|
+
text.gsub(/^```\w*\n/, "")
|
|
178
|
+
.gsub(/\n```$/, "")
|
|
179
|
+
.strip
|
|
180
|
+
end
|
|
181
|
+
|
|
182
|
+
def parse_topics(text)
|
|
183
|
+
return [] if text.nil? || text.empty?
|
|
184
|
+
|
|
185
|
+
# Split by commas and clean up
|
|
186
|
+
topics = text.split(",").map(&:strip).map(&:downcase)
|
|
187
|
+
|
|
188
|
+
# Remove duplicates and limit to 8
|
|
189
|
+
topics.uniq.first(8)
|
|
190
|
+
end
|
|
191
|
+
end
|
|
192
|
+
end
|
|
193
|
+
end
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "net/http"
|
|
4
|
+
require "json"
|
|
5
|
+
require "uri"
|
|
6
|
+
|
|
7
|
+
module Repose
|
|
8
|
+
module AI
|
|
9
|
+
class OllamaProvider
|
|
10
|
+
DEFAULT_ENDPOINT = "http://localhost:11434"
|
|
11
|
+
DEFAULT_MODEL = "mistral"
|
|
12
|
+
MAX_RETRIES = 2
|
|
13
|
+
TIMEOUT = 45
|
|
14
|
+
|
|
15
|
+
attr_reader :endpoint, :model
|
|
16
|
+
|
|
17
|
+
def initialize(endpoint: nil, model: nil)
|
|
18
|
+
@endpoint = endpoint || ENV["OLLAMA_ENDPOINT"] || DEFAULT_ENDPOINT
|
|
19
|
+
@model = model || ENV["OLLAMA_MODEL"] || DEFAULT_MODEL
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def generate_description(context)
|
|
23
|
+
prompt = build_description_prompt(context)
|
|
24
|
+
response = call_api(prompt, max_tokens: 100)
|
|
25
|
+
clean_response(response)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def generate_topics(context)
|
|
29
|
+
prompt = build_topics_prompt(context)
|
|
30
|
+
response = call_api(prompt, max_tokens: 50)
|
|
31
|
+
parse_topics(response)
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def generate_readme(context)
|
|
35
|
+
prompt = build_readme_prompt(context)
|
|
36
|
+
response = call_api(prompt, max_tokens: 2000)
|
|
37
|
+
clean_response(response)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def available?
|
|
41
|
+
list_models.any?
|
|
42
|
+
rescue StandardError
|
|
43
|
+
false
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
def list_models
|
|
47
|
+
uri = URI("#{endpoint}/api/tags")
|
|
48
|
+
request = Net::HTTP::Get.new(uri)
|
|
49
|
+
|
|
50
|
+
response = Net::HTTP.start(uri.hostname, uri.port,
|
|
51
|
+
open_timeout: 5, read_timeout: 5) do |http|
|
|
52
|
+
http.request(request)
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
return [] unless response.is_a?(Net::HTTPSuccess)
|
|
56
|
+
|
|
57
|
+
body = JSON.parse(response.body)
|
|
58
|
+
body["models"]&.map { |m| m["name"] } || []
|
|
59
|
+
rescue StandardError
|
|
60
|
+
[]
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
def pull_model(model_name = @model)
|
|
64
|
+
uri = URI("#{endpoint}/api/pull")
|
|
65
|
+
request = Net::HTTP::Post.new(uri)
|
|
66
|
+
request["Content-Type"] = "application/json"
|
|
67
|
+
request.body = { name: model_name }.to_json
|
|
68
|
+
|
|
69
|
+
response = Net::HTTP.start(uri.hostname, uri.port,
|
|
70
|
+
open_timeout: 300, read_timeout: 300) do |http|
|
|
71
|
+
http.request(request)
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
response.is_a?(Net::HTTPSuccess)
|
|
75
|
+
rescue StandardError => e
|
|
76
|
+
raise Repose::APIError, "Failed to pull Ollama model: #{e.message}"
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
private
|
|
80
|
+
|
|
81
|
+
def call_api(prompt, max_tokens: 500, temperature: 0.7)
|
|
82
|
+
uri = URI("#{endpoint}/api/generate")
|
|
83
|
+
|
|
84
|
+
payload = {
|
|
85
|
+
model: model,
|
|
86
|
+
prompt: prompt,
|
|
87
|
+
stream: false,
|
|
88
|
+
options: {
|
|
89
|
+
temperature: temperature,
|
|
90
|
+
num_predict: max_tokens,
|
|
91
|
+
top_p: 0.9,
|
|
92
|
+
top_k: 40
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
retries = 0
|
|
97
|
+
begin
|
|
98
|
+
request = Net::HTTP::Post.new(uri)
|
|
99
|
+
request["Content-Type"] = "application/json"
|
|
100
|
+
request.body = payload.to_json
|
|
101
|
+
|
|
102
|
+
response = Net::HTTP.start(uri.hostname, uri.port,
|
|
103
|
+
open_timeout: TIMEOUT, read_timeout: TIMEOUT) do |http|
|
|
104
|
+
http.request(request)
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
handle_response(response)
|
|
108
|
+
rescue Net::OpenTimeout, Net::ReadTimeout => e
|
|
109
|
+
retries += 1
|
|
110
|
+
raise Repose::APIError, "Ollama timeout: #{e.message}" if retries > MAX_RETRIES
|
|
111
|
+
|
|
112
|
+
sleep(3 * retries) # Linear backoff
|
|
113
|
+
retry
|
|
114
|
+
rescue Errno::ECONNREFUSED
|
|
115
|
+
raise Repose::APIError, "Cannot connect to Ollama at #{endpoint}. Is Ollama running?"
|
|
116
|
+
rescue StandardError => e
|
|
117
|
+
raise Repose::APIError, "Ollama error: #{e.message}"
|
|
118
|
+
end
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
def handle_response(response)
|
|
122
|
+
case response
|
|
123
|
+
when Net::HTTPSuccess
|
|
124
|
+
body = JSON.parse(response.body)
|
|
125
|
+
body["response"]&.strip || ""
|
|
126
|
+
when Net::HTTPNotFound
|
|
127
|
+
raise Repose::APIError, "Ollama model '#{model}' not found. Run: ollama pull #{model}"
|
|
128
|
+
else
|
|
129
|
+
raise Repose::APIError, "Ollama error (#{response.code}): #{response.body}"
|
|
130
|
+
end
|
|
131
|
+
end
|
|
132
|
+
|
|
133
|
+
def build_description_prompt(context)
|
|
134
|
+
<<~PROMPT
|
|
135
|
+
Generate a concise GitHub repository description (max 100 characters) for:
|
|
136
|
+
|
|
137
|
+
Repository: #{context[:name]}
|
|
138
|
+
Language: #{context[:language]}
|
|
139
|
+
Framework: #{context[:framework]}
|
|
140
|
+
Purpose: #{context[:purpose]}
|
|
141
|
+
|
|
142
|
+
Return ONLY the description text with no quotes or formatting.
|
|
143
|
+
PROMPT
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
def build_topics_prompt(context)
|
|
147
|
+
<<~PROMPT
|
|
148
|
+
Generate 5-8 GitHub topics for:
|
|
149
|
+
|
|
150
|
+
Repository: #{context[:name]}
|
|
151
|
+
Language: #{context[:language]}
|
|
152
|
+
Framework: #{context[:framework]}
|
|
153
|
+
Purpose: #{context[:purpose]}
|
|
154
|
+
|
|
155
|
+
Return ONLY comma-separated lowercase keywords (e.g., python, api, docker, cli).
|
|
156
|
+
PROMPT
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
def build_readme_prompt(context)
|
|
160
|
+
title = context[:name].split(/[-_]/).map(&:capitalize).join(" ")
|
|
161
|
+
|
|
162
|
+
<<~PROMPT
|
|
163
|
+
Create a GitHub README.md for:
|
|
164
|
+
|
|
165
|
+
Repository: #{context[:name]} (Display as: #{title})
|
|
166
|
+
Language: #{context[:language]}
|
|
167
|
+
Framework: #{context[:framework]}
|
|
168
|
+
Purpose: #{context[:purpose]}
|
|
169
|
+
|
|
170
|
+
Include:
|
|
171
|
+
- Title (# #{title})
|
|
172
|
+
- Brief description
|
|
173
|
+
- Features (3-5 bullet points)
|
|
174
|
+
- Installation (#{context[:language]}-specific commands)
|
|
175
|
+
- Usage with code examples
|
|
176
|
+
- Contributing
|
|
177
|
+
- MIT License
|
|
178
|
+
|
|
179
|
+
Use proper Markdown. Return ONLY the README content.
|
|
180
|
+
PROMPT
|
|
181
|
+
end
|
|
182
|
+
|
|
183
|
+
def clean_response(text)
|
|
184
|
+
return "" if text.nil? || text.empty?
|
|
185
|
+
|
|
186
|
+
# Remove common artifacts
|
|
187
|
+
text.gsub(/^Here's.*?:\s*/i, "")
|
|
188
|
+
.gsub(/^```\w*\n/, "")
|
|
189
|
+
.gsub(/\n```$/, "")
|
|
190
|
+
.strip
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
def parse_topics(text)
|
|
194
|
+
return [] if text.nil? || text.empty?
|
|
195
|
+
|
|
196
|
+
# Extract comma-separated values
|
|
197
|
+
topics = text.split(",").map(&:strip).map(&:downcase)
|
|
198
|
+
|
|
199
|
+
# Remove duplicates, filter out empty, limit to 8
|
|
200
|
+
topics.reject(&:empty?).uniq.first(8)
|
|
201
|
+
end
|
|
202
|
+
end
|
|
203
|
+
end
|
|
204
|
+
end
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "ai/gemini_provider"
|
|
4
|
+
require_relative "ai/ollama_provider"
|
|
5
|
+
|
|
6
|
+
module Repose
|
|
7
|
+
class AIGenerator
|
|
8
|
+
attr_reader :provider
|
|
9
|
+
|
|
10
|
+
def initialize(provider: nil)
|
|
11
|
+
@provider = select_provider(provider)
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def generate(context)
|
|
15
|
+
{
|
|
16
|
+
name: context[:name],
|
|
17
|
+
description: generate_description(context),
|
|
18
|
+
topics: generate_topics(context),
|
|
19
|
+
readme: generate_readme(context)
|
|
20
|
+
}
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def use_ai?
|
|
24
|
+
!@provider.nil?
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
private
|
|
28
|
+
|
|
29
|
+
def select_provider(provider_name)
|
|
30
|
+
return nil if provider_name == :none || provider_name == false
|
|
31
|
+
|
|
32
|
+
case provider_name&.to_sym
|
|
33
|
+
when :gemini
|
|
34
|
+
AI::GeminiProvider.new if gemini_available?
|
|
35
|
+
when :ollama
|
|
36
|
+
AI::OllamaProvider.new if ollama_available?
|
|
37
|
+
when nil
|
|
38
|
+
# Auto-detect: prefer Gemini, fallback to Ollama, then none
|
|
39
|
+
if gemini_available?
|
|
40
|
+
AI::GeminiProvider.new
|
|
41
|
+
elsif ollama_available?
|
|
42
|
+
AI::OllamaProvider.new
|
|
43
|
+
end
|
|
44
|
+
else
|
|
45
|
+
raise ArgumentError, "Unknown AI provider: #{provider_name}"
|
|
46
|
+
end
|
|
47
|
+
rescue Repose::ConfigurationError, Repose::APIError
|
|
48
|
+
nil # Fallback to template-based generation
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def gemini_available?
|
|
52
|
+
return false unless ENV["GEMINI_API_KEY"]
|
|
53
|
+
|
|
54
|
+
AI::GeminiProvider.new.available?
|
|
55
|
+
rescue StandardError
|
|
56
|
+
false
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def ollama_available?
|
|
60
|
+
AI::OllamaProvider.new.available?
|
|
61
|
+
rescue StandardError
|
|
62
|
+
false
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def generate_description(context)
|
|
66
|
+
if use_ai?
|
|
67
|
+
@provider.generate_description(context)
|
|
68
|
+
else
|
|
69
|
+
generate_fallback_description(context)
|
|
70
|
+
end
|
|
71
|
+
rescue Repose::APIError, Repose::AuthenticationError
|
|
72
|
+
generate_fallback_description(context)
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
def generate_topics(context)
|
|
76
|
+
if use_ai?
|
|
77
|
+
@provider.generate_topics(context)
|
|
78
|
+
else
|
|
79
|
+
generate_fallback_topics(context)
|
|
80
|
+
end
|
|
81
|
+
rescue Repose::APIError, Repose::AuthenticationError
|
|
82
|
+
generate_fallback_topics(context)
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
def generate_readme(context)
|
|
86
|
+
if use_ai?
|
|
87
|
+
@provider.generate_readme(context)
|
|
88
|
+
else
|
|
89
|
+
generate_fallback_readme(context)
|
|
90
|
+
end
|
|
91
|
+
rescue Repose::APIError, Repose::AuthenticationError
|
|
92
|
+
generate_fallback_readme(context)
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def generate_fallback_description(context)
|
|
96
|
+
# Fallback description generation without AI
|
|
97
|
+
base_desc = "A #{context[:language]}"
|
|
98
|
+
base_desc += " #{context[:framework]}" if context[:framework]
|
|
99
|
+
base_desc += " project"
|
|
100
|
+
base_desc += " for #{context[:purpose]}" if context[:purpose] && !context[:purpose].empty?
|
|
101
|
+
|
|
102
|
+
base_desc.capitalize
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
def generate_fallback_topics(context)
|
|
106
|
+
# Basic topic generation without AI
|
|
107
|
+
topics = []
|
|
108
|
+
topics << context[:language].downcase if context[:language]
|
|
109
|
+
topics << context[:framework].downcase if context[:framework]
|
|
110
|
+
|
|
111
|
+
# Add some common topics based on name patterns
|
|
112
|
+
name_lower = context[:name].downcase
|
|
113
|
+
topics << "api" if name_lower.include?("api")
|
|
114
|
+
topics << "web" if name_lower.include?("web") || context[:framework]&.downcase&.include?("rails")
|
|
115
|
+
topics << "cli" if name_lower.include?("cli") || name_lower.include?("command")
|
|
116
|
+
topics << "tool" if name_lower.include?("tool") || name_lower.include?("util")
|
|
117
|
+
|
|
118
|
+
topics.uniq.first(8)
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
def generate_fallback_readme(context)
|
|
122
|
+
title = context[:name].split(/[-_]/).map(&:capitalize).join(" ")
|
|
123
|
+
|
|
124
|
+
<<~README
|
|
125
|
+
# #{title}
|
|
126
|
+
|
|
127
|
+
A #{context[:language]} #{context[:framework] ? "#{context[:framework]} " : ""}project#{context[:purpose] && !context[:purpose].empty? ? " for #{context[:purpose]}" : ""}.
|
|
128
|
+
|
|
129
|
+
## Installation
|
|
130
|
+
|
|
131
|
+
```bash
|
|
132
|
+
git clone https://github.com/yourusername/#{context[:name]}.git
|
|
133
|
+
cd #{context[:name]}
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
#{language_specific_install_instructions(context[:language])}
|
|
137
|
+
|
|
138
|
+
## Usage
|
|
139
|
+
|
|
140
|
+
More documentation coming soon!
|
|
141
|
+
|
|
142
|
+
## Contributing
|
|
143
|
+
|
|
144
|
+
1. Fork the repository
|
|
145
|
+
2. Create a feature branch
|
|
146
|
+
3. Make your changes
|
|
147
|
+
4. Submit a pull request
|
|
148
|
+
|
|
149
|
+
## License
|
|
150
|
+
|
|
151
|
+
This project is licensed under the MIT License.
|
|
152
|
+
README
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
def language_specific_install_instructions(language)
|
|
156
|
+
case language&.downcase
|
|
157
|
+
when "ruby"
|
|
158
|
+
"```bash\nbundle install\n```"
|
|
159
|
+
when "python"
|
|
160
|
+
"```bash\npip install -r requirements.txt\n```"
|
|
161
|
+
when "javascript", "typescript"
|
|
162
|
+
"```bash\nnpm install\n```"
|
|
163
|
+
when "go"
|
|
164
|
+
"```bash\ngo mod download\n```"
|
|
165
|
+
when "rust"
|
|
166
|
+
"```bash\ncargo build\n```"
|
|
167
|
+
else
|
|
168
|
+
""
|
|
169
|
+
end
|
|
170
|
+
end
|
|
171
|
+
end
|
|
172
|
+
end
|