@eldrforge/ai-service 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/dist/index.d.ts +2 -0
  2. package/dist/index.js.map +1 -0
  3. package/dist/src/ai.d.ts +55 -0
  4. package/{src/index.ts → dist/src/index.d.ts} +1 -2
  5. package/dist/src/interactive.d.ts +122 -0
  6. package/dist/src/logger.d.ts +19 -0
  7. package/dist/src/prompts/commit.d.ts +29 -0
  8. package/dist/src/prompts/index.d.ts +10 -0
  9. package/dist/src/prompts/release.d.ts +25 -0
  10. package/dist/src/prompts/review.d.ts +21 -0
  11. package/dist/src/types.d.ts +99 -0
  12. package/package.json +11 -8
  13. package/.github/dependabot.yml +0 -12
  14. package/.github/workflows/npm-publish.yml +0 -48
  15. package/.github/workflows/test.yml +0 -33
  16. package/eslint.config.mjs +0 -84
  17. package/src/ai.ts +0 -421
  18. package/src/interactive.ts +0 -562
  19. package/src/logger.ts +0 -69
  20. package/src/prompts/commit.ts +0 -85
  21. package/src/prompts/index.ts +0 -28
  22. package/src/prompts/instructions/commit.md +0 -133
  23. package/src/prompts/instructions/release.md +0 -188
  24. package/src/prompts/instructions/review.md +0 -169
  25. package/src/prompts/personas/releaser.md +0 -24
  26. package/src/prompts/personas/you.md +0 -55
  27. package/src/prompts/release.ts +0 -118
  28. package/src/prompts/review.ts +0 -72
  29. package/src/types.ts +0 -112
  30. package/tests/ai-complete-coverage.test.ts +0 -241
  31. package/tests/ai-create-completion.test.ts +0 -288
  32. package/tests/ai-edge-cases.test.ts +0 -221
  33. package/tests/ai-openai-error.test.ts +0 -35
  34. package/tests/ai-transcribe.test.ts +0 -169
  35. package/tests/ai.test.ts +0 -139
  36. package/tests/interactive-editor.test.ts +0 -253
  37. package/tests/interactive-secure-temp.test.ts +0 -264
  38. package/tests/interactive-user-choice.test.ts +0 -173
  39. package/tests/interactive-user-text.test.ts +0 -174
  40. package/tests/interactive.test.ts +0 -94
  41. package/tests/logger-noop.test.ts +0 -40
  42. package/tests/logger.test.ts +0 -122
  43. package/tests/prompts.test.ts +0 -179
  44. package/tsconfig.json +0 -35
  45. package/vite.config.ts +0 -69
  46. package/vitest.config.ts +0 -25
@@ -1,55 +0,0 @@
1
- # You
2
-
3
- You are an intelligent assistant acting as the **default persona** for most KodrDriv commands (commit, review, audio-commit, audio-review). You combine the responsibilities of a GitHub project committer and a software project reviewer.
4
-
5
- ---
6
-
7
- ## 🧑‍💻 Role
8
-
9
- *Role Title*: Project Contributor / Committer & Reviewer
10
- *Scope*: Regular contributor with write access who submits meaningful commits **and** reviews feedback to file actionable issues.
11
-
12
- ---
13
-
14
- ## 🔑 Responsibilities
15
-
16
- ### Submit Meaningful Commits
17
- * Generate clear, purposeful, and well-scoped commit messages that align with project standards.
18
- * Respect linked issues, project priorities, and any provided *User Context*.
19
-
20
- ### Extract & File Actionable Issues
21
- * Analyse review notes (text or audio transcripts).
22
- * Convert spoken or written observations into structured GitHub issues.
23
- * Categorise issues (UI, content, functionality, etc.) and assign sensible priority.
24
-
25
- ### Maintain Focus & Quality
26
- * Filter out non-actionable commentary and subjective opinions.
27
- * Provide concrete suggestions that developers can implement.
28
- * Treat documentation changes with the same diligence as code edits.
29
-
30
- ---
31
-
32
- ## 🛠 Technical Proficiencies
33
-
34
- * Proficient in project languages & tooling (TypeScript, Node.js, etc.).
35
- * Comfortable with Git workflows: feature branching, squash-and-merge, rebase.
36
- * Runs pre-commit hooks, linting, and tests before pushing changes.
37
-
38
- ---
39
-
40
- ## 🧭 Operating Principles
41
-
42
- * **Clarity > Brevity > Cleverness** – commit messages and issues are communication tools.
43
- * Consider the future reader: teammates, open-source collaborators, or even your future self.
44
- * Focus on user experience and practical functionality when filing issues.
45
-
46
- ---
47
-
48
- ## ✏️ Customisation
49
-
50
- Users can customise this persona by creating either of the following optional files in their configuration directory (`.kodrdriv/personas/`):
51
-
52
- * **`you-pre.md`** – Content that will be *prepended* to this default persona.
53
- * **`you-post.md`** – Content that will be *appended* to this default persona.
54
-
55
- If present, KodrDriv will automatically merge these custom snippets, allowing you to fine-tune the behaviour of the default persona without editing this file directly.
@@ -1,118 +0,0 @@
1
- import { ContentItem, Prompt, recipe } from '@riotprompt/riotprompt';
2
- import path from 'path';
3
- import { fileURLToPath } from 'url';
4
-
5
- const __filename = fileURLToPath(import.meta.url);
6
- const __dirname = path.dirname(__filename);
7
-
8
- // Types for the release prompt
9
- export type ReleaseConfig = {
10
- overridePaths?: string[];
11
- overrides?: boolean;
12
- }
13
-
14
- export type ReleaseContent = {
15
- releaseFocus?: string;
16
- logContent: string;
17
- diffContent: string;
18
- milestoneIssues?: string;
19
- };
20
-
21
- export type ReleaseContext = {
22
- context?: string;
23
- directories?: string[];
24
- };
25
-
26
- export type ReleasePromptResult = {
27
- prompt: Prompt;
28
- maxTokens: number;
29
- isLargeRelease: boolean;
30
- };
31
-
32
- /**
33
- * Analyzes release content to determine if it's a large release
34
- * and calculates appropriate token limits
35
- */
36
- const analyzeReleaseSize = (logContent: string, diffContent?: string, milestoneIssues?: string): { isLarge: boolean; maxTokens: number } => {
37
- const logLines = logContent.split('\n').length;
38
- const diffLines = diffContent ? diffContent.split('\n').length : 0;
39
- const milestoneLines = milestoneIssues ? milestoneIssues.split('\n').length : 0;
40
- const totalContentLength = logContent.length + (diffContent?.length || 0) + (milestoneIssues?.length || 0);
41
-
42
- // Consider it a large release if:
43
- // - More than 20 commits (log lines typically ~3-5 per commit)
44
- // - More than 500 diff lines
45
- // - Milestone issues present (indicates significant work)
46
- // - Total content length > 50KB
47
- const isLarge = logLines > 60 || diffLines > 500 || milestoneLines > 50 || totalContentLength > 50000;
48
-
49
- if (isLarge) {
50
- // For large releases, significantly increase token limit
51
- return { isLarge: true, maxTokens: 25000 };
52
- } else {
53
- // Standard token limit for normal releases
54
- return { isLarge: false, maxTokens: 10000 };
55
- }
56
- };
57
-
58
- /**
59
- * Build a release prompt using RiotPrompt Recipes.
60
- */
61
- export const createReleasePrompt = async (
62
- { overrides: _overrides, overridePaths: _overridePaths }: ReleaseConfig,
63
- { releaseFocus, logContent, diffContent, milestoneIssues }: ReleaseContent,
64
- { context, directories }: ReleaseContext = {}
65
- ): Promise<ReleasePromptResult> => {
66
- const basePath = __dirname;
67
-
68
- // Analyze release size to determine token requirements
69
- const { isLarge: isLargeRelease, maxTokens } = analyzeReleaseSize(logContent, diffContent, milestoneIssues);
70
-
71
- // Build content items for the prompt
72
- const contentItems: ContentItem[] = [];
73
- const contextItems: ContentItem[] = [];
74
-
75
- if (diffContent) {
76
- contentItems.push({ content: diffContent, title: 'Diff' });
77
- }
78
- if (logContent) {
79
- contentItems.push({ content: logContent, title: 'Log Context' });
80
- }
81
- if (milestoneIssues) {
82
- contentItems.push({ content: milestoneIssues, title: 'Resolved Issues from Milestone' });
83
- }
84
- if (releaseFocus) {
85
- contentItems.push({ content: releaseFocus, title: 'Release Focus' });
86
- }
87
-
88
- // Add release size context to help guide the AI
89
- if (isLargeRelease) {
90
- contextItems.push({
91
- content: `This appears to be a LARGE RELEASE with significant changes. Please provide comprehensive, detailed release notes that thoroughly document all major changes, improvements, and fixes. Don't summarize - dive deep into the details.`,
92
- title: 'Release Size Context'
93
- });
94
- }
95
-
96
- if (context) {
97
- contextItems.push({ content: context, title: 'User Context' });
98
- }
99
- if (directories && directories.length > 0) {
100
- contextItems.push({ directories, title: 'Directories' });
101
- }
102
-
103
- const prompt = await recipe(basePath)
104
- .persona({ path: 'personas/releaser.md' })
105
- .instructions({ path: 'instructions/release.md' })
106
- .overridePaths(_overridePaths ?? [])
107
- .overrides(_overrides ?? true)
108
- .content(...contentItems)
109
- .context(...contextItems)
110
- .cook();
111
-
112
- return {
113
- prompt,
114
- maxTokens,
115
- isLargeRelease
116
- };
117
- };
118
-
@@ -1,72 +0,0 @@
1
- import { ContentItem, Prompt, recipe } from '@riotprompt/riotprompt';
2
- import path from 'path';
3
- import { fileURLToPath } from 'url';
4
-
5
- const __filename = fileURLToPath(import.meta.url);
6
- const __dirname = path.dirname(__filename);
7
-
8
- export type ReviewConfig = {
9
- overridePaths?: string[];
10
- overrides?: boolean;
11
- }
12
-
13
- export type ReviewContent = {
14
- notes: string;
15
- };
16
-
17
- export type ReviewContext = {
18
- logContext?: string;
19
- diffContext?: string;
20
- releaseNotesContext?: string;
21
- issuesContext?: string;
22
- context?: string;
23
- directories?: string[];
24
- };
25
-
26
- /**
27
- * Build a review prompt using RiotPrompt Recipes.
28
- */
29
- export const createReviewPrompt = async (
30
- { overridePaths: _overridePaths, overrides: _overrides }: ReviewConfig,
31
- { notes }: ReviewContent,
32
- { logContext, diffContext, releaseNotesContext, issuesContext, context, directories }: ReviewContext = {}
33
- ): Promise<Prompt> => {
34
- const basePath = __dirname;
35
-
36
- // Build content items for the prompt
37
- const contentItems: ContentItem[] = [];
38
- const contextItems: ContentItem[] = [];
39
-
40
- if (notes) {
41
- contentItems.push({ content: notes, title: 'Review Notes' });
42
- }
43
-
44
- if (logContext) {
45
- contextItems.push({ content: logContext, title: 'Log Context' });
46
- }
47
- if (diffContext) {
48
- contextItems.push({ content: diffContext, title: 'Diff Context' });
49
- }
50
- if (releaseNotesContext) {
51
- contextItems.push({ content: releaseNotesContext, title: 'Release Notes Context' });
52
- }
53
- if (issuesContext) {
54
- contextItems.push({ content: issuesContext, title: 'Issues Context' });
55
- }
56
- if (context) {
57
- contextItems.push({ content: context, title: 'User Context' });
58
- }
59
- if (directories && directories.length > 0) {
60
- contextItems.push({ directories, title: 'Directories' });
61
- }
62
-
63
- return recipe(basePath)
64
- .persona({ path: 'personas/you.md' })
65
- .instructions({ path: 'instructions/review.md' })
66
- .overridePaths(_overridePaths ?? [])
67
- .overrides(_overrides ?? true)
68
- .content(...contentItems)
69
- .context(...contextItems)
70
- .cook();
71
- };
72
-
package/src/types.ts DELETED
@@ -1,112 +0,0 @@
1
- /**
2
- * Type definitions for AI service
3
- */
4
-
5
- /**
6
- * AI model reasoning effort levels
7
- */
8
- export type ReasoningLevel = 'low' | 'medium' | 'high';
9
-
10
- /**
11
- * Configuration for AI operations
12
- */
13
- export interface AIConfig {
14
- /** OpenAI API key */
15
- apiKey?: string;
16
-
17
- /** Model to use (e.g., 'gpt-4o-mini', 'gpt-4o') */
18
- model?: string;
19
-
20
- /** Reasoning effort level */
21
- reasoning?: ReasoningLevel;
22
-
23
- /** Command-specific configurations */
24
- commands?: {
25
- commit?: {
26
- model?: string;
27
- reasoning?: ReasoningLevel;
28
- };
29
- release?: {
30
- model?: string;
31
- reasoning?: ReasoningLevel;
32
- };
33
- review?: {
34
- model?: string;
35
- reasoning?: ReasoningLevel;
36
- };
37
- };
38
- }
39
-
40
- /**
41
- * Result from AI transcription
42
- */
43
- export interface Transcription {
44
- text: string;
45
- }
46
-
47
- /**
48
- * Storage interface for file operations
49
- * Consumers can provide their own implementation
50
- */
51
- export interface StorageAdapter {
52
- writeOutput(fileName: string, content: string): Promise<void>;
53
- readTemp(fileName: string): Promise<string>;
54
- writeTemp(fileName: string, content: string): Promise<void>;
55
- }
56
-
57
- /**
58
- * Logger interface for optional logging
59
- * Compatible with winston but not required
60
- */
61
- export interface Logger {
62
- info(message: string, ...meta: unknown[]): void;
63
- error(message: string, ...meta: unknown[]): void;
64
- warn(message: string, ...meta: unknown[]): void;
65
- debug(message: string, ...meta: unknown[]): void;
66
- }
67
-
68
- /**
69
- * Choice for interactive prompts
70
- */
71
- export interface Choice {
72
- key: string;
73
- label: string;
74
- }
75
-
76
- /**
77
- * Options for interactive prompts
78
- */
79
- export interface InteractiveOptions {
80
- nonTtyErrorSuggestions?: string[];
81
- logger?: Logger;
82
- }
83
-
84
- /**
85
- * Options for editor integration
86
- */
87
- export interface EditorOptions {
88
- editor?: string;
89
- tempDir?: string;
90
- extension?: string;
91
- logger?: Logger;
92
- }
93
-
94
- /**
95
- * Options for LLM feedback loop
96
- */
97
- export interface FeedbackOptions {
98
- initialContent: string;
99
- systemMessage: string;
100
- aiConfig: AIConfig;
101
- commandName: string;
102
- storage?: StorageAdapter;
103
- outputPrefix?: string;
104
- editor?: EditorOptions;
105
- logger?: Logger;
106
- }
107
-
108
- /**
109
- * Re-export Prompt type from riotprompt for convenience
110
- */
111
- export type { Prompt, ContentItem } from '@riotprompt/riotprompt';
112
-
@@ -1,241 +0,0 @@
1
- import { describe, it, expect, beforeEach, vi } from 'vitest';
2
- import { createCompletion, transcribeAudio } from '../src/ai';
3
- import type { StorageAdapter } from '../src/types';
4
-
5
- // Mock OpenAI
6
- const mockChatCreate = vi.fn();
7
- const mockTranscriptionsCreate = vi.fn();
8
-
9
- vi.mock('openai', () => ({
10
- OpenAI: vi.fn().mockImplementation(() => ({
11
- chat: {
12
- completions: {
13
- create: mockChatCreate,
14
- },
15
- },
16
- audio: {
17
- transcriptions: {
18
- create: mockTranscriptionsCreate,
19
- },
20
- },
21
- })),
22
- }));
23
-
24
- // Mock logger
25
- const mockLoggerInstance = {
26
- info: vi.fn(),
27
- error: vi.fn(),
28
- warn: vi.fn(),
29
- debug: vi.fn(),
30
- };
31
-
32
- vi.mock('../src/logger', () => ({
33
- getLogger: vi.fn(() => mockLoggerInstance),
34
- }));
35
-
36
- // Mock fs
37
- const mockReadStreamDestroy = vi.fn();
38
- const mockReadStreamOn = vi.fn((event: string, callback: any) => {
39
- // Store callbacks but don't call them
40
- return {};
41
- });
42
-
43
- vi.mock('fs', () => ({
44
- default: {
45
- createReadStream: vi.fn(() => ({
46
- destroy: mockReadStreamDestroy,
47
- destroyed: false,
48
- on: mockReadStreamOn,
49
- })),
50
- },
51
- createReadStream: vi.fn(() => ({
52
- destroy: mockReadStreamDestroy,
53
- destroyed: false,
54
- on: mockReadStreamOn,
55
- })),
56
- }));
57
-
58
- // Mock safeJsonParse
59
- vi.mock('@eldrforge/git-tools', () => ({
60
- safeJsonParse: vi.fn((json: string) => JSON.parse(json)),
61
- }));
62
-
63
- describe('Complete Coverage Tests', () => {
64
- beforeEach(() => {
65
- vi.clearAllMocks();
66
- process.env.OPENAI_API_KEY = 'test-key';
67
- mockReadStreamDestroy.mockClear();
68
- mockReadStreamOn.mockClear();
69
- });
70
-
71
- describe('createCompletion - Complete Branch Coverage', () => {
72
- it('should log response size without token usage', async () => {
73
- mockChatCreate.mockResolvedValue({
74
- choices: [{ message: { content: 'Response without usage' } }],
75
- // No usage field
76
- });
77
-
78
- await createCompletion([{ role: 'user', content: 'test' }]);
79
-
80
- // Should log response size even without usage
81
- expect(mockLoggerInstance.info).toHaveBeenCalledWith(
82
- expect.stringContaining('Response size'),
83
- expect.any(String),
84
- expect.any(String)
85
- );
86
- });
87
-
88
- it('should handle reasoning_effort for o3 models', async () => {
89
- mockChatCreate.mockResolvedValue({
90
- choices: [{ message: { content: 'Response' } }],
91
- usage: {},
92
- });
93
-
94
- await createCompletion(
95
- [{ role: 'user', content: 'test' }],
96
- { model: 'o3-mini', openaiReasoning: 'high' }
97
- );
98
-
99
- expect(mockChatCreate).toHaveBeenCalledWith(
100
- expect.objectContaining({ reasoning_effort: 'high' })
101
- );
102
- });
103
-
104
- it('should not add reasoning_effort for non-supported models', async () => {
105
- mockChatCreate.mockResolvedValue({
106
- choices: [{ message: { content: 'Response' } }],
107
- usage: {},
108
- });
109
-
110
- await createCompletion(
111
- [{ role: 'user', content: 'test' }],
112
- { model: 'gpt-4o-mini', openaiReasoning: 'high' }
113
- );
114
-
115
- const callArgs = mockChatCreate.mock.calls[0][0];
116
- expect(callArgs.reasoning_effort).toBeUndefined();
117
- });
118
-
119
- it('should handle debug without storage', async () => {
120
- mockChatCreate.mockResolvedValue({
121
- choices: [{ message: { content: 'Response' } }],
122
- usage: {},
123
- });
124
-
125
- // Debug enabled but no storage - should not throw
126
- await createCompletion(
127
- [{ role: 'user', content: 'test' }],
128
- {
129
- debug: true,
130
- debugFile: 'debug.json',
131
- // No storage provided
132
- }
133
- );
134
-
135
- expect(mockChatCreate).toHaveBeenCalled();
136
- });
137
- });
138
-
139
- describe('transcribeAudio - Complete Branch Coverage', () => {
140
- it('should handle stream error event', async () => {
141
- let errorCallback: any;
142
- mockReadStreamOn.mockImplementation((event: string, callback: any) => {
143
- if (event === 'error') {
144
- errorCallback = callback;
145
- }
146
- return {};
147
- });
148
-
149
- mockTranscriptionsCreate.mockImplementation(async () => {
150
- // Trigger stream error before API call completes
151
- if (errorCallback) {
152
- errorCallback(new Error('Stream error'));
153
- }
154
- return { text: 'Transcribed' };
155
- });
156
-
157
- const result = await transcribeAudio('/path/audio.mp3');
158
-
159
- expect(result.text).toBe('Transcribed');
160
- expect(mockLoggerInstance.error).toHaveBeenCalledWith(
161
- expect.stringContaining('Audio stream error'),
162
- expect.any(String)
163
- );
164
- });
165
-
166
- it('should log debug for stream closure', async () => {
167
- mockTranscriptionsCreate.mockResolvedValue({
168
- text: 'Transcribed',
169
- });
170
-
171
- await transcribeAudio('/path/audio.mp3');
172
-
173
- expect(mockLoggerInstance.debug).toHaveBeenCalledWith(
174
- expect.stringContaining('Audio stream closed successfully')
175
- );
176
- });
177
-
178
- it('should handle destroy failure on stream', async () => {
179
- mockReadStreamDestroy.mockImplementation(() => {
180
- throw new Error('Cannot destroy stream');
181
- });
182
-
183
- mockTranscriptionsCreate.mockResolvedValue({
184
- text: 'Transcribed',
185
- });
186
-
187
- // Should not throw
188
- const result = await transcribeAudio('/path/audio.mp3');
189
-
190
- expect(result.text).toBe('Transcribed');
191
- expect(mockLoggerInstance.debug).toHaveBeenCalled();
192
- });
193
-
194
- it('should write debug request file when storage provided', async () => {
195
- const mockStorage: StorageAdapter = {
196
- writeOutput: vi.fn(),
197
- readTemp: vi.fn(),
198
- writeTemp: vi.fn().mockResolvedValue(undefined),
199
- };
200
-
201
- mockTranscriptionsCreate.mockResolvedValue({
202
- text: 'Transcribed',
203
- });
204
-
205
- await transcribeAudio('/path/audio.mp3', {
206
- debug: true,
207
- debugRequestFile: 'transcribe-request.json',
208
- storage: mockStorage,
209
- });
210
-
211
- expect(mockStorage.writeTemp).toHaveBeenCalledWith(
212
- 'transcribe-request.json',
213
- expect.stringContaining('whisper-1')
214
- );
215
- });
216
-
217
- it('should write debug response file when storage provided', async () => {
218
- const mockStorage: StorageAdapter = {
219
- writeOutput: vi.fn(),
220
- readTemp: vi.fn(),
221
- writeTemp: vi.fn().mockResolvedValue(undefined),
222
- };
223
-
224
- mockTranscriptionsCreate.mockResolvedValue({
225
- text: 'Transcribed text',
226
- });
227
-
228
- await transcribeAudio('/path/audio.mp3', {
229
- debug: true,
230
- debugResponseFile: 'transcribe-response.json',
231
- storage: mockStorage,
232
- });
233
-
234
- expect(mockStorage.writeTemp).toHaveBeenCalledWith(
235
- 'transcribe-response.json',
236
- expect.stringContaining('Transcribed text')
237
- );
238
- });
239
- });
240
- });
241
-