openai_101 0.0.2 → 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.rubocop.yml +20 -1
- data/CHANGELOG.md +14 -0
- data/README.md +63 -39
- data/bin/automate-chatgpt.js +60 -0
- data/bin/automate-midjourney.js +75 -0
- data/bin/convert_webp_to_png.rb +86 -0
- data/bin/gpt_context_gatherer.rb +63 -0
- data/course/course.md +64 -0
- data/course/images/beautiful-llm-models.png +0 -0
- data/course/images/prompts/beautiful-llm-models.txt +1 -0
- data/course/images/prompts/series-2-appydave-gpt-summit.txt +1 -0
- data/course/images/series-2-appydave-gpt-summit.png +0 -0
- data/gpt-context/openai-documentation.md +498 -0
- data/gpt-context/ruby-openai-documenation.md +747 -0
- data/gpt-context/theme-prompts.csv +21 -0
- data/lib/openai_101/config/openai.rb +15 -0
- data/lib/openai_101/tools/automate-images-chatgpt.js +60 -0
- data/lib/openai_101/tools/automate-images-midjourney.js +75 -0
- data/lib/openai_101/tools/bulk_image_bot/base_automator.js +53 -0
- data/lib/openai_101/tools/bulk_image_bot/chatgpt_automator.js +27 -0
- data/lib/openai_101/tools/bulk_image_bot/midjourney_automator.js +49 -0
- data/lib/openai_101/tools/clean_ruby_errors.rb +274 -0
- data/lib/openai_101/tools/edl_to_chapters.rb +56 -0
- data/lib/openai_101/tools/file_content_gatherer.rb +36 -0
- data/lib/openai_101/tools/webp_to_png.rb +124 -0
- data/lib/openai_101/version.rb +1 -1
- data/lib/openai_101.rb +9 -0
- data/package-lock.json +1154 -159
- data/package.json +4 -1
- metadata +83 -6
- data/.builders/_.rb +0 -1
- data/.builders/boot.rb +0 -39
- data/.builders/generators/01-bootstrap.rb +0 -134
@@ -0,0 +1,21 @@
|
|
1
|
+
Theme,Prompt
|
2
|
+
"Futuristic Cityscapes", "A bustling futuristic cityscape where AI and humans coexist seamlessly, with drones flying overhead and digital billboards displaying AI-generated art"
|
3
|
+
"Digital Renaissance", "A digital recreation of a Renaissance scene, where figures interact with AI devices, blending classical beauty with modern technology"
|
4
|
+
"AI in Nature", "A serene forest where trees and wildlife are monitored and cared for by gentle AI robots, highlighting a balance between technology and natural preservation"
|
5
|
+
"Cybernetic Organisms", "A close-up of a flower with digital circuit patterns on its petals, symbolizing the fusion of organic life with AI enhancements"
|
6
|
+
"Virtual Reality Worlds", "An individual wearing a VR headset, transported to a breathtaking AI-created fantasy world, showcasing the limitless possibilities of virtual reality"
|
7
|
+
"AI as a Companion", "A friendly AI robot walking alongside a human in a park, offering guidance and companionship, symbolizing the supportive role of AI"
|
8
|
+
"Abstract Data Visualizations", "An abstract representation of a neural network, with flowing streams of data and colorful nodes, visualizing the complexity and beauty of AI algorithms"
|
9
|
+
"Steampunk AI", "A steampunk-inspired workshop with AI robots made of brass and gears, working alongside humans to invent new technologies"
|
10
|
+
"Space Exploration", "A robotic AI explorer navigating the rugged terrain of an alien planet, with a distant galaxy shining brightly in the sky above"
|
11
|
+
"Underwater Discoveries", "AI-powered submarines exploring coral reefs and ancient underwater ruins, illuminating the depths with advanced technology"
|
12
|
+
"Mythical Integration", "A majestic phoenix with digital wings soaring above a futuristic city, blending mythology with advanced AI technology"
|
13
|
+
"Digital Nomads", "A group of tech-savvy digital nomads in a remote, exotic location, using AI to work and communicate from anywhere in the world"
|
14
|
+
"Retro-Futurism", "A scene set in a neon-lit arcade from the 1980s, where gamers are playing AI-developed video games that predict future events"
|
15
|
+
"AI Guardians", "A powerful AI guardian overseeing the digital safety of a smart city, visualized as a holographic entity projecting from a city's central tower"
|
16
|
+
"Dystopian vs Utopian AI", "A split scene showing one side as a dystopian world overwhelmed by AI surveillance and control, and the other as a utopian society where AI enhances quality of life"
|
17
|
+
"Quantum Computing", "An ethereal quantum realm where qubits float in a space-like environment, representing the power and mystery of quantum computing in AI"
|
18
|
+
"AI in Sports and Fitness", "An athletic training session where an AI coach provides personalized feedback and strategies to an athlete wearing smart wearables"
|
19
|
+
"Education Through AI", "A futuristic classroom where holographic projections of AI tutors teach students from around the globe in an interactive learning environment"
|
20
|
+
"AI in Art and Music", "An AI painting a masterpiece while another composes a symphony, showcasing the creative potential of AI in art and music"
|
21
|
+
"AI as a Time Traveler", "An AI entity interacting with historical figures across different eras, from ancient civilizations to future societies, illustrating the timeless impact of AI"
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'dotenv'
|
4
|
+
# Dotenv.load(".env.#{ENV['APP_ENV']}") # Loads environment-specific dotenv file.
|
5
|
+
Dotenv.load('.env')
|
6
|
+
|
7
|
+
OpenAI.configure do |config|
|
8
|
+
tools_enabled = ENV.fetch('TOOLS_ENABLED', 'false')
|
9
|
+
|
10
|
+
if tools_enabled == 'true'
|
11
|
+
puts 'OpenAI Tools are enabled'
|
12
|
+
config.access_token = ENV.fetch('OPENAI_ACCESS_TOKEN')
|
13
|
+
config.organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID', nil)
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
const { Command } = require('commander');
|
2
|
+
const fs = require('fs');
|
3
|
+
const robot = require("robotjs");
|
4
|
+
|
5
|
+
const program = new Command();
|
6
|
+
|
7
|
+
program
|
8
|
+
.option('-f, --file <filename>', 'The file to read prompts from', 'prompts.txt')
|
9
|
+
.option('-o, --completed-file <filename>', 'The file to write completed prompts to', 'completed.txt')
|
10
|
+
.option('-i, --interval <seconds>', 'The interval in seconds between prompts', 80)
|
11
|
+
.option('-m, --max-images <number>', 'The maximum number of images to process', 20);
|
12
|
+
|
13
|
+
program.parse(process.argv);
|
14
|
+
|
15
|
+
function read_prompts_from_file(file, maxImages) {
|
16
|
+
const prompts = fs.readFileSync(file, 'utf-8').split('\n').filter(Boolean);
|
17
|
+
return prompts.slice(0, maxImages);
|
18
|
+
}
|
19
|
+
|
20
|
+
function remove_prompt_from_file(file, prompt) {
|
21
|
+
const prompts = fs.readFileSync(file, 'utf-8').split('\n').filter(Boolean);
|
22
|
+
const index = prompts.indexOf(prompt);
|
23
|
+
if (index !== -1) {
|
24
|
+
prompts.splice(index, 1);
|
25
|
+
fs.writeFileSync(file, prompts.join('\n'));
|
26
|
+
}
|
27
|
+
}
|
28
|
+
|
29
|
+
function append_prompt_to_file(file, prompt) {
|
30
|
+
fs.appendFileSync(file, `${prompt}\n`);
|
31
|
+
}
|
32
|
+
|
33
|
+
function wait(ms) {
|
34
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
35
|
+
}
|
36
|
+
|
37
|
+
async function send_prompt_to_chat(prompt) {
|
38
|
+
const formattedPrompt = `create image: ${prompt}`;
|
39
|
+
robot.typeString(formattedPrompt);
|
40
|
+
robot.keyTap('enter');
|
41
|
+
}
|
42
|
+
|
43
|
+
async function start() {
|
44
|
+
console.log('Starting Image Creation Automation');
|
45
|
+
console.log('Please open the ChatGPT interface where you intend to use this script.');
|
46
|
+
console.log('Automation will begin in 10 seconds.');
|
47
|
+
await wait(10000);
|
48
|
+
|
49
|
+
const prompts = read_prompts_from_file(program.opts().file, parseInt(program.opts().maxImages));
|
50
|
+
for (let prompt of prompts) {
|
51
|
+
console.log(`Sending prompt "${prompt}"...`);
|
52
|
+
await send_prompt_to_chat(prompt);
|
53
|
+
await wait(program.opts().interval * 1000);
|
54
|
+
remove_prompt_from_file(program.opts().file, prompt);
|
55
|
+
append_prompt_to_file(program.opts().completedFile, prompt);
|
56
|
+
}
|
57
|
+
console.log('Automation complete.');
|
58
|
+
}
|
59
|
+
|
60
|
+
start();
|
@@ -0,0 +1,75 @@
|
|
1
|
+
const { Command } = require('commander');
|
2
|
+
const fs = require('fs');
|
3
|
+
const robot = require("robotjs");
|
4
|
+
|
5
|
+
const program = new Command();
|
6
|
+
|
7
|
+
program
|
8
|
+
.option('-f, --file <filename>', 'The file to read prompts from', 'prompts.txt')
|
9
|
+
.option('-o, --completed-file <filename>', 'The file to write completed prompts to', 'completed.txt')
|
10
|
+
.option('-i, --interval <seconds>', 'The interval in seconds between prompts', 4)
|
11
|
+
.option('-p, --prompts-per-session <prompts>', 'The number of prompts to process in a session', 7)
|
12
|
+
.option('-w, --wait-between-sessions <seconds>', 'The number of seconds to wait between sessions', 180);
|
13
|
+
|
14
|
+
program.parse(process.argv);
|
15
|
+
|
16
|
+
function read_next_prompt_from_file(file) {
|
17
|
+
const prompts = fs.readFileSync(file, 'utf-8').split('\n').filter(Boolean);
|
18
|
+
return prompts.shift();
|
19
|
+
}
|
20
|
+
|
21
|
+
function remove_prompt_from_file(file, prompt) {
|
22
|
+
const lockfile = `${file}.lock`;
|
23
|
+
fs.writeFileSync(lockfile, '', { flag: 'wx' });
|
24
|
+
const prompts = fs.readFileSync(file, 'utf-8').split('\n').filter(Boolean);
|
25
|
+
const index = prompts.indexOf(prompt);
|
26
|
+
prompts.splice(index, 1);
|
27
|
+
fs.writeFileSync(file, prompts.join('\n'));
|
28
|
+
fs.unlinkSync(lockfile);
|
29
|
+
}
|
30
|
+
|
31
|
+
function append_prompt_to_file(file, prompt) {
|
32
|
+
const lockfile = `${file}.lock`;
|
33
|
+
fs.writeFileSync(lockfile, '', { flag: 'wx' });
|
34
|
+
fs.appendFileSync(file, `${prompt}\n`);
|
35
|
+
fs.unlinkSync(lockfile);
|
36
|
+
}
|
37
|
+
|
38
|
+
function wait(ms) {
|
39
|
+
const end = Date.now() + ms;
|
40
|
+
while (Date.now() < end) {
|
41
|
+
// Wait
|
42
|
+
}
|
43
|
+
}
|
44
|
+
|
45
|
+
function send_prompt_to_discord(prompt) {
|
46
|
+
const formattedPrompt = `/imagine prompt: ${prompt}`;
|
47
|
+
robot.typeString(formattedPrompt);
|
48
|
+
robot.keyTap('enter');
|
49
|
+
}
|
50
|
+
|
51
|
+
function start() {
|
52
|
+
console.log('Starting MidJourney Automation');
|
53
|
+
console.log('Please open Discord and navigate to the MidJourney channel.');
|
54
|
+
console.log('Automation will begin in 10 seconds.');
|
55
|
+
wait(10 * 1000);
|
56
|
+
|
57
|
+
let index = 0;
|
58
|
+
let prompt = read_next_prompt_from_file(program.opts().file);
|
59
|
+
while (prompt) {
|
60
|
+
console.log(`Sending prompt "${prompt}"...`);
|
61
|
+
send_prompt_to_discord(prompt);
|
62
|
+
wait(program.opts().interval * 1000);
|
63
|
+
remove_prompt_from_file(program.opts().file, prompt);
|
64
|
+
append_prompt_to_file(program.opts().completedFile, prompt);
|
65
|
+
index++;
|
66
|
+
if (index % program.opts().promptsPerSession === 0) {
|
67
|
+
console.log(`Pausing for ${program.opts().waitBetweenSessions} seconds.`);
|
68
|
+
wait(program.opts().waitBetweenSessions * 1000);
|
69
|
+
}
|
70
|
+
prompt = read_next_prompt_from_file(program.opts().file);
|
71
|
+
}
|
72
|
+
console.log('Automation complete.');
|
73
|
+
}
|
74
|
+
|
75
|
+
start();
|
@@ -0,0 +1,53 @@
|
|
1
|
+
const fs = require('fs');
|
2
|
+
const { promisify } = require('util');
|
3
|
+
const robot = require("robotjs");
|
4
|
+
|
5
|
+
const wait = promisify(setTimeout);
|
6
|
+
|
7
|
+
class BaseAutomator {
|
8
|
+
constructor({ file, completedFile, interval = 5, maxImages = 20 }) {
|
9
|
+
this.file = file;
|
10
|
+
this.completedFile = completedFile;
|
11
|
+
this.interval = interval;
|
12
|
+
this.maxImages = maxImages;
|
13
|
+
}
|
14
|
+
|
15
|
+
async readPromptsFromFile() {
|
16
|
+
try {
|
17
|
+
const content = await fs.promises.readFile(this.file, 'utf-8');
|
18
|
+
return content.split('\n').filter(Boolean).slice(0, this.maxImages);
|
19
|
+
} catch (error) {
|
20
|
+
console.error('Error reading prompts from file:', error);
|
21
|
+
return [];
|
22
|
+
}
|
23
|
+
}
|
24
|
+
|
25
|
+
async appendToCompletedFile(prompt) {
|
26
|
+
try {
|
27
|
+
await fs.promises.appendFile(this.completedFile, `${prompt}\n`);
|
28
|
+
} catch (error) {
|
29
|
+
console.error('Error appending prompt to completed file:', error);
|
30
|
+
}
|
31
|
+
}
|
32
|
+
|
33
|
+
async removeFromInputFile(prompt) {
|
34
|
+
try {
|
35
|
+
let prompts = await this.readPromptsFromFile();
|
36
|
+
prompts = prompts.filter(line => line !== prompt);
|
37
|
+
await fs.promises.writeFile(this.file, prompts.join('\n'));
|
38
|
+
} catch (error) {
|
39
|
+
console.error('Error removing prompt from input file:', error);
|
40
|
+
}
|
41
|
+
}
|
42
|
+
|
43
|
+
async waitInterval() {
|
44
|
+
await wait(this.interval * 1000);
|
45
|
+
}
|
46
|
+
|
47
|
+
async sendKeystrokes(string) {
|
48
|
+
robot.typeString(string);
|
49
|
+
robot.keyTap('enter');
|
50
|
+
}
|
51
|
+
}
|
52
|
+
|
53
|
+
module.exports = BaseAutomator;
|
@@ -0,0 +1,27 @@
|
|
1
|
+
const BaseAutomator = require('./base_automator');
|
2
|
+
|
3
|
+
class ChatGPTAutomator extends BaseAutomator {
|
4
|
+
constructor(options) {
|
5
|
+
super(options);
|
6
|
+
}
|
7
|
+
|
8
|
+
async sendPromptToChat(prompt) {
|
9
|
+
const formattedPrompt = `create image: ${prompt}`;
|
10
|
+
await this.sendKeystrokes(formattedPrompt);
|
11
|
+
console.log(`Sending prompt "${prompt}" to ChatGPT...`);
|
12
|
+
}
|
13
|
+
|
14
|
+
async start() {
|
15
|
+
console.log('Starting ChatGPT Image Creation Automation');
|
16
|
+
const prompts = await this.readPromptsFromFile();
|
17
|
+
for (const prompt of prompts) {
|
18
|
+
await this.sendPromptToChat(prompt);
|
19
|
+
await this.waitInterval();
|
20
|
+
await this.removeFromInputPromptFile(prompt);
|
21
|
+
await this.appendToCompletedPromptFile(prompt);
|
22
|
+
}
|
23
|
+
console.log('Automation complete.');
|
24
|
+
}
|
25
|
+
}
|
26
|
+
|
27
|
+
module.exports = ChatGPTAutomator;
|
@@ -0,0 +1,49 @@
|
|
1
|
+
const BaseAutomator = require('./base_automator');
|
2
|
+
const fs = require('fs');
|
3
|
+
const robot = require("robotjs");
|
4
|
+
|
5
|
+
class MidJourneyAutomator extends BaseAutomator {
|
6
|
+
constructor(options) {
|
7
|
+
super(options);
|
8
|
+
}
|
9
|
+
|
10
|
+
async sendPromptToDiscord(prompt) {
|
11
|
+
const formattedPrompt = this.formatPrompt(prompt);
|
12
|
+
this.sendKeystrokes(formattedPrompt);
|
13
|
+
}
|
14
|
+
|
15
|
+
formatPrompt(prompt) {
|
16
|
+
return `/imagine prompt: ${prompt}`;
|
17
|
+
}
|
18
|
+
|
19
|
+
sendKeystrokes(string) {
|
20
|
+
robot.typeString(string);
|
21
|
+
robot.keyTap('enter');
|
22
|
+
}
|
23
|
+
|
24
|
+
async start() {
|
25
|
+
console.log('Starting MidJourney Automation');
|
26
|
+
console.log('Please open Discord and navigate to the MidJourney channel.');
|
27
|
+
console.log('Automation will begin in 10 seconds.');
|
28
|
+
await this.wait(10 * 1000);
|
29
|
+
|
30
|
+
let index = 0;
|
31
|
+
let prompt = await this.readNextPromptFromFile();
|
32
|
+
while (prompt) {
|
33
|
+
console.log(`Sending prompt "${prompt}"...`);
|
34
|
+
await this.sendPromptToDiscord(prompt);
|
35
|
+
await this.wait(this.interval * 1000);
|
36
|
+
await this.removePromptFromFile(prompt);
|
37
|
+
await this.appendToCompletedFile(this.completedFile, prompt);
|
38
|
+
index++;
|
39
|
+
if (index % this.maxImages === 0) {
|
40
|
+
console.log(`Pausing for ${this.waitBetweenSessions} seconds.`);
|
41
|
+
await this.wait(this.waitBetweenSessions * 1000);
|
42
|
+
}
|
43
|
+
prompt = await this.readNextPromptFromFile();
|
44
|
+
}
|
45
|
+
console.log('Automation complete.');
|
46
|
+
}
|
47
|
+
}
|
48
|
+
|
49
|
+
module.exports = MidJourneyAutomator;
|
@@ -0,0 +1,274 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Openai101
|
4
|
+
module Tools
|
5
|
+
# Clean Ruby Errors
|
6
|
+
module ReportFormatter
|
7
|
+
def print_column_names(column_definitions)
|
8
|
+
headings = column_definitions.map { |_, col| col[:label].ljust(col[:width]) }.join(' | ')
|
9
|
+
separator_length = column_definitions.sum { |_, col| col[:width] } + ((column_definitions.keys.size - 1) * 3)
|
10
|
+
|
11
|
+
puts '-' * separator_length
|
12
|
+
puts headings
|
13
|
+
puts '-' * separator_length
|
14
|
+
end
|
15
|
+
|
16
|
+
def print_line(column_definitions, **column_key_values)
|
17
|
+
line = column_definitions.map do |key, col|
|
18
|
+
value = column_key_values[key] || ''
|
19
|
+
value.to_s.ljust(col[:width])
|
20
|
+
end.join(' | ')
|
21
|
+
puts line
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
# Convert EDL to YouTube description chapters
|
26
|
+
class CleanRubyErrors
|
27
|
+
include ReportFormatter
|
28
|
+
|
29
|
+
attr_reader :file
|
30
|
+
attr_reader :error
|
31
|
+
attr_reader :errors
|
32
|
+
|
33
|
+
def initialize(file)
|
34
|
+
@file = file
|
35
|
+
@errors = []
|
36
|
+
end
|
37
|
+
|
38
|
+
def parse
|
39
|
+
capture_code = []
|
40
|
+
capture_stack = []
|
41
|
+
|
42
|
+
lines.each do |line|
|
43
|
+
@section = detect_section(line)
|
44
|
+
|
45
|
+
case section
|
46
|
+
when :new_error
|
47
|
+
add_stack_to_error(error, capture_stack) if capture_stack.any? && error
|
48
|
+
|
49
|
+
@error = initialize_error(line)
|
50
|
+
errors << error
|
51
|
+
|
52
|
+
when :failure_error1
|
53
|
+
error[:error_technique] = '1'
|
54
|
+
capture_code = []
|
55
|
+
@section = :capture_failure_error
|
56
|
+
when :failure_error2
|
57
|
+
error[:error_technique] = '2'
|
58
|
+
capture_code = []
|
59
|
+
@section = :capture_failure_error
|
60
|
+
when :capture_failure_error
|
61
|
+
capture_code << line.rstrip
|
62
|
+
when :actual_error
|
63
|
+
error[:error_type] = line.strip
|
64
|
+
error[:code_section] = format_code(capture_code)
|
65
|
+
capture_stack = []
|
66
|
+
@section = :capture_stack
|
67
|
+
when :capture_stack
|
68
|
+
capture_stack << line.strip
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
add_stack_to_error(error, capture_stack) if capture_stack.any? && error
|
73
|
+
end
|
74
|
+
|
75
|
+
def print_errors(errors)
|
76
|
+
column_definitions = {
|
77
|
+
test_number: { label: '#', width: 5 },
|
78
|
+
test_technique: { label: 'Style', width: 7 },
|
79
|
+
error_at: { label: 'Error At', width: 70 },
|
80
|
+
error_by: { label: 'Error By', width: 80 },
|
81
|
+
error_type: { label: 'Error Type', width: 20 },
|
82
|
+
error_message: { label: 'Error Message', width: 180 }
|
83
|
+
}
|
84
|
+
|
85
|
+
print_column_names(column_definitions)
|
86
|
+
|
87
|
+
errors.each do |error|
|
88
|
+
print_line(column_definitions,
|
89
|
+
test_number: error[:test_number],
|
90
|
+
test_technique: error[:error_technique],
|
91
|
+
error_at: "#{error[:error_at][:file]}:#{error[:error_at][:line]}",
|
92
|
+
error_by: "#{error[:error_by][:file]}:#{error[:error_by][:line]}",
|
93
|
+
error_type: error[:error_type],
|
94
|
+
error_message: error[:error_message])
|
95
|
+
end
|
96
|
+
|
97
|
+
print_column_names(column_definitions)
|
98
|
+
end
|
99
|
+
|
100
|
+
def print_aggregated_error(errors)
|
101
|
+
aggregated_errors = aggregate_errors(errors)
|
102
|
+
# puts JSON.pretty_generate(aggregated_errors.first)
|
103
|
+
|
104
|
+
# puts '----------------------'
|
105
|
+
# puts aggregated_errors.first
|
106
|
+
|
107
|
+
column_definitions = {
|
108
|
+
error_type: { label: 'Error Type', width: 20 },
|
109
|
+
error_message: { label: 'Error Message', width: 180 },
|
110
|
+
error_at_file: { label: 'Error At File', width: 70 },
|
111
|
+
error_at_lines: { label: 'L#', width: 5 },
|
112
|
+
error_by_file: { label: 'Error By File', width: 80 },
|
113
|
+
error_by_lines: { label: 'Lines', width: 30 }
|
114
|
+
}
|
115
|
+
|
116
|
+
print_column_names(column_definitions)
|
117
|
+
|
118
|
+
aggregated_errors.each do |error|
|
119
|
+
print_line(column_definitions,
|
120
|
+
error_type: error[:error_type],
|
121
|
+
error_message: error[:error_message],
|
122
|
+
error_at_file: error[:error_at_file],
|
123
|
+
error_at_lines: error[:error_at_lines].join(', '),
|
124
|
+
error_by_file: error[:error_by_file],
|
125
|
+
error_by_lines: error[:error_by_lines].join(', '))
|
126
|
+
end
|
127
|
+
|
128
|
+
print_column_names(column_definitions)
|
129
|
+
end
|
130
|
+
|
131
|
+
# # column_definitions = {
|
132
|
+
# # error_message: { label: "Error Message", width: 180 },
|
133
|
+
# # }
|
134
|
+
|
135
|
+
# # # Print the report
|
136
|
+
# # puts "Error Message | Source File(Line Numbers) | Spec File(Line Numbers)"
|
137
|
+
# # puts "-" * 100
|
138
|
+
|
139
|
+
# # aggregated_errors.each do |(message, source_file, spec_file), lines|
|
140
|
+
# # source_lines = lines[:source_lines].uniq.sort.join(', ')
|
141
|
+
# # spec_lines = lines[:spec_lines].uniq.sort.join(', ')
|
142
|
+
# # puts "#{message} | #{source_file}(#{source_lines}) | #{spec_file}(#{spec_lines})"
|
143
|
+
# # end
|
144
|
+
# end
|
145
|
+
|
146
|
+
private
|
147
|
+
|
148
|
+
def lines
|
149
|
+
@lines ||= File.readlines(file)
|
150
|
+
end
|
151
|
+
|
152
|
+
def detect_section(line)
|
153
|
+
return :new_error if line.match?(/^\d+\)/)
|
154
|
+
return :failure_error2 if line.include?('Failure/Error: ') # Note the space
|
155
|
+
return :failure_error1 if line.include?('Failure/Error:')
|
156
|
+
return :actual_error if line.include?('Error:')
|
157
|
+
|
158
|
+
section
|
159
|
+
end
|
160
|
+
|
161
|
+
def section
|
162
|
+
@section ||= :start
|
163
|
+
end
|
164
|
+
|
165
|
+
def initialize_error(line)
|
166
|
+
{
|
167
|
+
error_type: '',
|
168
|
+
error_technique: '',
|
169
|
+
test_number: extract_error_number(line),
|
170
|
+
code_section: [],
|
171
|
+
error_message: '',
|
172
|
+
error_messages: [],
|
173
|
+
stack: [],
|
174
|
+
error_at: { file: '', line: 0 },
|
175
|
+
error_by: { file: '', line: 0 }
|
176
|
+
}
|
177
|
+
end
|
178
|
+
|
179
|
+
def extract_error_number(line)
|
180
|
+
match = line.match(/^(\d+)\)/)
|
181
|
+
match ? match[1] : nil
|
182
|
+
end
|
183
|
+
|
184
|
+
def format_code(code_lines)
|
185
|
+
return 'Investigate!!!' if code_lines.empty?
|
186
|
+
|
187
|
+
first_line_spaces = code_lines.first[/\A */].size
|
188
|
+
code_lines.map { |line| line[first_line_spaces..] }.compact
|
189
|
+
end
|
190
|
+
|
191
|
+
def add_stack_to_error(error, stack_lines)
|
192
|
+
error_messages, stack_trace = split_strack_from_errors(stack_lines)
|
193
|
+
|
194
|
+
error[:error_message] = error_messages.join(' ')
|
195
|
+
error[:error_messages] = error_messages
|
196
|
+
error[:stack] = stack_trace
|
197
|
+
|
198
|
+
error[:error_at] = extract_error_location(stack_trace)
|
199
|
+
error[:error_by] = extract_test_location(stack_trace)
|
200
|
+
end
|
201
|
+
|
202
|
+
def split_strack_from_errors(stack_lines)
|
203
|
+
error_messages, stack_trace = stack_lines.partition { |line| !line.start_with?('#') }
|
204
|
+
|
205
|
+
clean_error_messages = error_messages.reject(&:empty?)
|
206
|
+
clean_stack_trace = stack_trace.map { |line| line.sub(/^#\s*/, '') }
|
207
|
+
|
208
|
+
[clean_error_messages, clean_stack_trace]
|
209
|
+
end
|
210
|
+
|
211
|
+
def extract_error_location(stack_trace)
|
212
|
+
return { file: '', line: 0 } if stack_trace.empty?
|
213
|
+
|
214
|
+
# Matches on file paths ending in .rb followed by a colon and a line number.
|
215
|
+
# It captures the file path and line number into named groups 'file' and 'line'.
|
216
|
+
# Example Match: "./app/models/user.rb:13" => file: "./app/models/user.rb", line: "13"
|
217
|
+
match = stack_trace.first.match(/(?<file>.*\.rb):(?<line>\d+)/)
|
218
|
+
match ? { file: match[:file], line: match[:line] } : { file: 'XXXXXX1', line: 0 }
|
219
|
+
end
|
220
|
+
|
221
|
+
def extract_test_location(stack_trace)
|
222
|
+
stack_trace.reverse_each do |trace_line|
|
223
|
+
# Searches for file paths that specifically end with _spec.rb (indicating a test file),
|
224
|
+
# followed by a colon and a line number. It captures the test file path and line number.
|
225
|
+
# Example Match: "./spec/models/user_spec.rb:26" => file: "./spec/models/user_spec.rb", line: "26"
|
226
|
+
if (match = trace_line.match(/(?<file>.*_spec\.rb):(?<line>\d+)/))
|
227
|
+
return { file: match[:file], line: match[:line] }
|
228
|
+
end
|
229
|
+
end
|
230
|
+
{ file: 'XXXXXX2', line: 0 } # Return a default value if no test location is found
|
231
|
+
end
|
232
|
+
|
233
|
+
def aggregate_errors(errors)
|
234
|
+
# Initialize an empty hash for aggregations
|
235
|
+
aggregated_errors = {}
|
236
|
+
|
237
|
+
errors.each do |error|
|
238
|
+
# Create a unique key for each aggregation based on error_message, error_at[:file], and error_by[:file]
|
239
|
+
key = [
|
240
|
+
error[:error_message],
|
241
|
+
error[:error_at][:file],
|
242
|
+
error[:error_by][:file]
|
243
|
+
].join('|')
|
244
|
+
|
245
|
+
# If the aggregation key doesn't exist, initialize it
|
246
|
+
aggregated_errors[key] ||= {
|
247
|
+
error_type: error[:error_type],
|
248
|
+
error_message: error[:error_message],
|
249
|
+
code_section: error[:code_section], # Assuming code_section is the same for aggregated errors
|
250
|
+
error_at: { file: error[:error_at][:file], lines: [] },
|
251
|
+
error_by: { file: error[:error_by][:file], lines: [] }
|
252
|
+
}
|
253
|
+
|
254
|
+
# Append unique error messages and line numbers for error_at and error_by
|
255
|
+
aggregated_errors[key][:error_at][:lines] |= [error[:error_at][:line]]
|
256
|
+
aggregated_errors[key][:error_by][:lines] |= [error[:error_by][:line]]
|
257
|
+
end
|
258
|
+
|
259
|
+
# Convert the aggregated_errors hash back into an array of values
|
260
|
+
aggregated_errors.values.map do |error|
|
261
|
+
{
|
262
|
+
error_type: error[:error_type],
|
263
|
+
error_message: error[:error_message],
|
264
|
+
code_section: error[:code_section].join("\n"),
|
265
|
+
error_at_file: error[:error_at][:file],
|
266
|
+
error_at_lines: error[:error_at][:lines],
|
267
|
+
error_by_file: error[:error_by][:file],
|
268
|
+
error_by_lines: error[:error_by][:lines]
|
269
|
+
}
|
270
|
+
end
|
271
|
+
end
|
272
|
+
end
|
273
|
+
end
|
274
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Openai101
|
4
|
+
module Tools
|
5
|
+
# Convert EDL to YouTube description chapters
|
6
|
+
class EdlToChapters
|
7
|
+
attr_reader :content
|
8
|
+
|
9
|
+
def initialize(content: nil, file: nil)
|
10
|
+
@content = content
|
11
|
+
@content = File.read(file) if file
|
12
|
+
end
|
13
|
+
|
14
|
+
def convert
|
15
|
+
# TITLE: a36
|
16
|
+
# FCM: NON-DROP FRAME
|
17
|
+
|
18
|
+
# 001 001 V C 00:00:09:16 00:00:09:17 00:00:09:16 00:00:09:17
|
19
|
+
# |C:ResolveColorBlue |M:Hey Code Voice Commands |D:1
|
20
|
+
|
21
|
+
# 002 001 V C 00:00:53:13 00:00:53:14 00:00:53:13 00:00:53:14
|
22
|
+
# |C:ResolveColorBlue |M:Keyboard Shortcuts |D:1
|
23
|
+
|
24
|
+
# 003 001 V C 00:01:17:20 00:01:17:21 00:01:17:20 00:01:17:21
|
25
|
+
# |C:ResolveColorBlue |M:Create code using voice |D:1
|
26
|
+
|
27
|
+
# 004 001 V C 00:02:17:25 00:02:17:26 00:02:17:25 00:02:17:26
|
28
|
+
# |C:ResolveColorBlue |M:Next steps |D:1
|
29
|
+
|
30
|
+
chapters = []
|
31
|
+
chapter = nil
|
32
|
+
content.each_line do |line|
|
33
|
+
line = line.strip
|
34
|
+
|
35
|
+
if line =~ /^\d{3}/
|
36
|
+
chapter = build_chapter(line)
|
37
|
+
chapters << chapter
|
38
|
+
next
|
39
|
+
end
|
40
|
+
|
41
|
+
chapter[:description] = line.match(/\|M:(.+?)\|/)[1].strip if chapter && line.match(/\|M:(.+?)\|/) && line.match(/\|M:(.+?)\|/)[1]
|
42
|
+
end
|
43
|
+
chapters.map { |item| "#{item[:time]} #{item[:description]}" }.join("\n")
|
44
|
+
end
|
45
|
+
|
46
|
+
def build_chapter(line)
|
47
|
+
time_code = line.split(/\s+/)[5] # Extract time code
|
48
|
+
hours, minutes, seconds, frames = time_code.split(':').map(&:to_i)
|
49
|
+
total_seconds = (hours * 3600) + (minutes * 60) + seconds + (frames / 30.0)
|
50
|
+
formatted_time = format('%<hours>02d:%<minutes>02d:%<seconds>02d', hours: total_seconds / 3600, minutes: (total_seconds / 60) % 60, seconds: total_seconds % 60)
|
51
|
+
|
52
|
+
{ time: formatted_time }
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Openai101
|
4
|
+
module Tools
|
5
|
+
# Gather content from files in a directory, used by gpt_context_gatherer.rb
|
6
|
+
class FileContentGatherer
|
7
|
+
attr_reader :include_patterns, :exclude_patterns
|
8
|
+
|
9
|
+
def initialize(include_pattern: nil, include_patterns: [], exclude_patterns: [])
|
10
|
+
@include_patterns = Array(include_pattern) + include_patterns
|
11
|
+
@exclude_patterns = exclude_patterns
|
12
|
+
end
|
13
|
+
|
14
|
+
def build
|
15
|
+
concatenated_content = []
|
16
|
+
|
17
|
+
include_patterns.each do |pattern|
|
18
|
+
Dir.glob(pattern).each do |file_path|
|
19
|
+
next if excluded?(file_path) || File.directory?(file_path)
|
20
|
+
|
21
|
+
content = "# file: #{file_path}\n\n#{File.read(file_path)}"
|
22
|
+
concatenated_content << content
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
concatenated_content.join("\n\n")
|
27
|
+
end
|
28
|
+
|
29
|
+
private
|
30
|
+
|
31
|
+
def excluded?(file_path)
|
32
|
+
exclude_patterns.any? { |pattern| File.fnmatch(pattern, file_path) }
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|