@kjerneverk/execution-openai 1.0.5-dev.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.kodrdriv-test-cache.json +6 -0
- package/LICENSE +18 -0
- package/README.md +80 -0
- package/dist/index.d.ts +85 -0
- package/dist/index.js +103 -0
- package/dist/index.js.map +1 -0
- package/output/kodrdriv/.kodrdriv-parallel-context.json +589 -0
- package/output/kodrdriv/260110-1751-release-notes.md +39 -0
- package/output/kodrdriv/260112-2207-commit-message.md +1 -0
- package/output/kodrdriv/260112-2208-release-notes.md +36 -0
- package/output/kodrdriv/260112-2341-commit-message.md +1 -0
- package/output/kodrdriv/260113-0029-commit-message.md +1 -0
- package/output/kodrdriv/260113-0030-release-notes.md +5 -0
- package/output/kodrdriv/260114-0656-commit-message.md +6 -0
- package/output/kodrdriv/260115-0616-commit-message.md +1 -0
- package/output/kodrdriv/260115-0739-commit-message.md +1 -0
- package/output/kodrdriv/260126-0852-commit-message.md +1 -0
- package/output/kodrdriv/260126-0853-release-notes.md +47 -0
- package/output/kodrdriv/260126-0854-release-notes.md +50 -0
- package/output/kodrdriv/260128-0929-commit-message.md +1 -0
- package/output/kodrdriv/260128-0929-release-notes.md +24 -0
- package/output/kodrdriv/260128-0959-commit-message.md +6 -0
- package/output/kodrdriv/260128-1127-commit-message.md +1 -0
- package/output/kodrdriv/260128-1127-release-notes.md +30 -0
- package/output/kodrdriv/260130-1639-commit-message.md +5 -0
- package/output/kodrdriv/260130-1652-commit-message.md +1 -0
- package/output/kodrdriv/260130-1652-release-notes.md +27 -0
- package/output/kodrdriv/RELEASE_NOTES.md +25 -0
- package/output/kodrdriv/RELEASE_TITLE.md +1 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-13T06-07-12-251Z.md +97 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-13T07-41-08-151Z.md +97 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-13T08-29-26-449Z.md +97 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-14T14-56-03-314Z.md +102 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-15T14-16-42-406Z.md +97 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-15T15-39-56-545Z.md +114 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-26T16-52-42-503Z.md +152 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-28T17-59-30-913Z.md +204 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-31T00-39-30-443Z.md +245 -0
- package/output/kodrdriv/agentic-reflection-commit-2026-01-31T00-52-07-165Z.md +152 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-11T01-51-19-115Z.md +277 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-13T06-08-15-507Z.md +289 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-13T08-30-05-728Z.md +312 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-53-34-128Z.md +417 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-53-55-634Z.md +391 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-54-13-700Z.md +456 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-54-22-655Z.md +496 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-54-32-261Z.md +490 -0
- package/output/kodrdriv/agentic-reflection-release-2026-01-31T00-52-29-053Z.md +334 -0
- package/package.json +58 -0
- package/package.json~ +57 -0
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
# Agentic Workflow - Self-Reflection Report
|
|
2
|
+
|
|
3
|
+
Generated: 2026-01-28T17:59:30.913Z
|
|
4
|
+
|
|
5
|
+
## Execution Summary
|
|
6
|
+
|
|
7
|
+
- **Iterations**: 3
|
|
8
|
+
- **Tool Calls**: 8
|
|
9
|
+
- **Unique Tools**: 7
|
|
10
|
+
|
|
11
|
+
## Tool Effectiveness Analysis
|
|
12
|
+
|
|
13
|
+
| Tool | Calls | Success | Failures | Success Rate | Avg Duration |
|
|
14
|
+
|------|-------|---------|----------|--------------|--------------|
|
|
15
|
+
| get_file_content | 2 | 2 | 0 | 100.0% | 0ms |
|
|
16
|
+
| get_file_modification_times | 1 | 1 | 0 | 100.0% | 0ms |
|
|
17
|
+
| group_files_by_concern | 1 | 1 | 0 | 100.0% | 0ms |
|
|
18
|
+
| get_recent_commits | 1 | 1 | 0 | 100.0% | 12ms |
|
|
19
|
+
| get_file_history | 1 | 1 | 0 | 100.0% | 20ms |
|
|
20
|
+
| get_file_dependencies | 1 | 1 | 0 | 100.0% | 13ms |
|
|
21
|
+
| get_related_tests | 1 | 1 | 0 | 100.0% | 1ms |
|
|
22
|
+
|
|
23
|
+
### Tool Performance Insights
|
|
24
|
+
|
|
25
|
+
**Most Frequently Used:**
|
|
26
|
+
- get_file_content: 2 calls
|
|
27
|
+
- get_file_modification_times: 1 calls
|
|
28
|
+
- group_files_by_concern: 1 calls
|
|
29
|
+
|
|
30
|
+
## Recommendations for Improvement
|
|
31
|
+
|
|
32
|
+
- **Underutilized Tools**: Many tools were called only once. Consider whether all tools are necessary or if the agent needs better guidance on when to use them.
|
|
33
|
+
|
|
34
|
+
## Detailed Execution Timeline
|
|
35
|
+
|
|
36
|
+
| Time | Iteration | Tool | Result | Duration |
|
|
37
|
+
|------|-----------|------|--------|----------|
|
|
38
|
+
| 9:58:50 AM | 1 | get_file_content | ✅ Success | 0ms |
|
|
39
|
+
| 9:58:50 AM | 1 | get_file_content | ✅ Success | 0ms |
|
|
40
|
+
| 9:58:50 AM | 1 | get_file_modification_times | ✅ Success | 0ms |
|
|
41
|
+
| 9:58:50 AM | 1 | group_files_by_concern | ✅ Success | 0ms |
|
|
42
|
+
| 9:58:50 AM | 1 | get_recent_commits | ✅ Success | 12ms |
|
|
43
|
+
| 9:59:08 AM | 2 | get_file_history | ✅ Success | 20ms |
|
|
44
|
+
| 9:59:08 AM | 2 | get_file_dependencies | ✅ Success | 13ms |
|
|
45
|
+
| 9:59:08 AM | 2 | get_related_tests | ✅ Success | 1ms |
|
|
46
|
+
|
|
47
|
+
## Conversation History
|
|
48
|
+
|
|
49
|
+
<details>
|
|
50
|
+
<summary>Click to expand full agentic interaction</summary>
|
|
51
|
+
|
|
52
|
+
```json
|
|
53
|
+
[
|
|
54
|
+
{
|
|
55
|
+
"role": "system",
|
|
56
|
+
"content": "You are a professional software engineer writing commit messages for your team.\n\n## Available Tools\n\nYou have access to tools to investigate the changes:\n- `get_file_content` - Read file contents or diffs for specific files\n- `get_file_modification_times` - See when files were modified relative to each other\n- `group_files_by_concern` - Understand how files relate by type/purpose\n- `get_recent_commits` - Get recent commit history to avoid duplicates\n- `get_file_history` - Understand how code evolved\n- `get_file_dependencies` - Assess impact of changes\n- `get_related_tests` - Find related test files\n- `search_codebase` - Search for usage patterns\n\nUse these tools to investigate the changes rather than asking for information upfront.\n\n## Your Task\n\nAnalyze the staged changes and determine the best way to commit them. Your primary goal is to create **meaningful, atomic commits** that each represent a single logical change.\n\n**CRITICAL**: When there are many changed files, especially after a long work session (multiple hours), you should almost always split them into multiple commits. A single commit with 10+ files usually indicates multiple distinct changes that should be separated.\n\nThink about:\n- What distinct features, fixes, or improvements are represented?\n- Are there natural groupings by functionality, module, or purpose?\n- When were files modified relative to each other?\n- What should reviewers focus on in each commit?\n\n## Investigation Strategy\n\nFor any non-trivial set of changes, gather multiple signals to understand how to group commits:\n\n1. **Check file modification times** using `get_file_modification_times`\n - This reveals *when* files were changed relative to each other\n - Files modified close together *may* be part of the same logical change\n - Large temporal gaps *can* indicate separate work sessions\n - Note: temporal proximity is one signal, not a guarantee of relatedness\n\n2. **Understand logical relationships** using `group_files_by_concern`\n - Groups by module, type (tests, docs, source), and directory structure\n - Reveals which files are functionally related regardless of when they were modified\n\n3. **Cross-reference both signals** to make informed decisions:\n - Files modified together AND logically related → strong candidate for same commit\n - Files modified apart BUT logically related → might still belong together\n - Files modified together BUT logically unrelated → consider splitting\n - Use your judgment - neither signal is definitive on its own\n\n4. **Investigate further** as needed:\n - get_file_content - see full context when diffs are unclear\n - get_file_history - understand how code evolved\n - get_file_dependencies - assess impact of changes\n - get_recent_commits - avoid duplicate messages\n - get_related_tests - understand behavior changes\n - search_codebase - find usage patterns\n\n## When to Split Commits\n\n**Prefer multiple commits when:**\n- Changes span multiple hours of work (check modification times!)\n- Different logical features or fixes are included\n- Test changes could be separated from production code changes\n- Documentation updates are unrelated to code changes\n- Refactoring is mixed with feature work\n- Configuration changes are mixed with implementation changes\n- Files in different modules/packages are changed for different reasons\n\n**Keep as one commit when:**\n- All changes are part of a single, cohesive feature\n- Files were modified together in a focused work session\n- Test changes directly test the production code changes\n- The changes tell a single coherent story\n\n**Default bias**: When in doubt about whether to split, **prefer splitting**. It's better to have too many focused commits than too few bloated ones. A good commit should be understandable and reviewable in isolation.\n\n## Important Context Guidelines\n\nIf additional context is provided (from context files or other sources), use your judgment:\n- If the context is relevant to these specific changes, incorporate it\n- If the context describes unrelated changes or other packages, ignore it\n- Don't force connections between unrelated information\n- Focus on accurately describing what actually changed\n\n## Writing Style\n\nWrite naturally and directly:\n- Use plain language, not corporate speak\n- Be specific and concrete\n- Avoid buzzwords and jargon\n- No emojis or excessive punctuation\n- No phrases like \"this commit\" or \"this PR\"\n- No meta-commentary about the commit itself\n\nFollow conventional commit format when appropriate (feat:, fix:, refactor:, docs:, test:, chore:), but prioritize clarity over formality.\n\n## Output Format\n\nWhen ready, format your response as:\n\nCOMMIT_MESSAGE:\n[Your commit message here - only for the FIRST group of changes if splitting]\n\nIf changes should be split into multiple commits (which is often the case with large changesets):\n\nSUGGESTED_SPLITS:\nSplit 1:\nFiles: [list of files]\nRationale: [why these belong together - mention temporal clustering and/or logical relationship]\nMessage: [commit message for this split]\n\nSplit 2:\nFiles: [list of files]\nRationale: [why these belong together]\nMessage: [commit message for this split]\n\nSplit 3:\n...\n\nOutput only the commit message and splits. No conversational remarks or follow-up offers."
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"role": "user",
|
|
60
|
+
"content": "I have staged changes that need a commit message.\n\nChanged files (2):\n - package.json\n - src/index.ts\n\nUser direction: --model\n\n## Your Analysis Task\n\nUse your tools to investigate the changes:\n1. Use `get_file_content` to read the actual changes in the files\n2. Use `get_file_modification_times` to see when files were modified relative to each other\n3. Use `group_files_by_concern` to understand how files relate by type/purpose\n4. Use `get_recent_commits` to check recent commit history and avoid duplicates\n5. Cross-reference temporal proximity and logical relatedness to determine the best commit structure\n\nConsider:\n- What distinct features, fixes, or improvements are included?\n- Are files that were modified together also logically related?\n- Would separate commits make the history more understandable?\n\nIf changes represent multiple logical concerns, suggest splits.\n\nStart by reading the file contents to understand what actually changed."
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
"role": "assistant",
|
|
64
|
+
"content": null,
|
|
65
|
+
"tool_calls": [
|
|
66
|
+
{
|
|
67
|
+
"id": "call_hWFvNC6ZmicuL30cFXOxaqyC",
|
|
68
|
+
"type": "function",
|
|
69
|
+
"function": {
|
|
70
|
+
"name": "get_file_content",
|
|
71
|
+
"arguments": "{\"filePath\": \"package.json\", \"includeLineNumbers\": true}"
|
|
72
|
+
}
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
"id": "call_Oc0t8fb3piRDqRa3treDchZN",
|
|
76
|
+
"type": "function",
|
|
77
|
+
"function": {
|
|
78
|
+
"name": "get_file_content",
|
|
79
|
+
"arguments": "{\"filePath\": \"src/index.ts\", \"includeLineNumbers\": true}"
|
|
80
|
+
}
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
"id": "call_Vw4GoXGs5Ml5TGyTA7QS3HSQ",
|
|
84
|
+
"type": "function",
|
|
85
|
+
"function": {
|
|
86
|
+
"name": "get_file_modification_times",
|
|
87
|
+
"arguments": "{\"filePaths\": [\"package.json\", \"src/index.ts\"]}"
|
|
88
|
+
}
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
"id": "call_AabXJtLJKXIVMznLt9dCnXuG",
|
|
92
|
+
"type": "function",
|
|
93
|
+
"function": {
|
|
94
|
+
"name": "group_files_by_concern",
|
|
95
|
+
"arguments": "{\"filePaths\": [\"package.json\", \"src/index.ts\"]}"
|
|
96
|
+
}
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
"id": "call_TzPzWOLDP0w2rr5tbIIOmCeL",
|
|
100
|
+
"type": "function",
|
|
101
|
+
"function": {
|
|
102
|
+
"name": "get_recent_commits",
|
|
103
|
+
"arguments": "{\"filePaths\": [\"package.json\", \"src/index.ts\"], \"since\": \"2 weeks ago\", \"limit\": 10}"
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
]
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
"role": "tool",
|
|
110
|
+
"tool_call_id": "call_hWFvNC6ZmicuL30cFXOxaqyC",
|
|
111
|
+
"content": "1: {\n2: \"name\": \"@riotprompt/execution-openai\",\n3: \"version\": \"1.0.3-dev.0\",\n4: \"description\": \"OpenAI provider for execution - implements Provider interface\",\n5: \"type\": \"module\",\n6: \"main\": \"./dist/index.js\",\n7: \"types\": \"./dist/index.d.ts\",\n8: \"exports\": {\n9: \".\": {\n10: \"types\": \"./dist/index.d.ts\",\n11: \"import\": \"./dist/index.js\"\n12: }\n13: },\n14: \"engines\": {\n15: \"node\": \">=24.0.0\"\n16: },\n17: \"scripts\": {\n18: \"clean\": \"rm -rf dist\",\n19: \"build\": \"vite build\",\n20: \"test\": \"vitest run --coverage\",\n21: \"test:coverage\": \"vitest run --coverage\",\n22: \"lint\": \"eslint src\",\n23: \"precommit\": \"npm run build && npm run lint && npm run test\",\n24: \"prepublishOnly\": \"npm run clean && npm run build\"\n25: },\n26: \"keywords\": [\n27: \"llm\",\n28: \"openai\",\n29: \"gpt\",\n30: \"execution\",\n31: \"provider\"\n32: ],\n33: \"author\": \"Tim O'Brien <tobrien@discursive.com>\",\n34: \"license\": \"Apache-2.0\",\n35: \"repository\": {\n36: \"type\": \"git\",\n37: \"url\": \"https://github.com/kjerneverk/execution-openai\"\n38: },\n39: \"dependencies\": {\n40: \"@theunwalked/offrecord\": \"^0.0.1\",\n41: \"@theunwalked/spotclean\": \"^0.0.3\",\n42: \"openai\": \"^6.16.0\",\n43: \"tiktoken\": \"^1.0.21\"\n44: },\n45: \"devDependencies\": {\n46: \"@types/node\": \"^25.0.6\",\n47: \"@typescript-eslint/eslint-plugin\": \"^8.33.1\",\n48: \"@typescript-eslint/parser\": \"^8.33.1\",\n49: \"@vitest/coverage-v8\": \"^4.0.18\",\n50: \"ajv\": \"^8.17.1\",\n51: \"eslint\": \"^9.28.0\",\n52: \"globals\": \"^17.2.0\",\n53: \"typescript\": \"^5.8.3\",\n54: \"vite\": \"^7.0.4\",\n55: \"vite-plugin-dts\": \"^4.5.4\",\n56: \"vitest\": \"^4.0.18\"\n57: }\n58: }\n59: ",
|
|
112
|
+
"name": "get_file_content"
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
"role": "tool",
|
|
116
|
+
"tool_call_id": "call_Oc0t8fb3piRDqRa3treDchZN",
|
|
117
|
+
"content": "1: /**\n2: * Execution OpenAI Package\n3: *\n4: * OpenAI provider implementation for LLM execution.\n5: *\n6: * @packageDocumentation\n7: */\n8: \n9: import OpenAI from 'openai';\n10: import { getRedactor } from '@theunwalked/offrecord';\n11: import { \n12: createSafeError, \n13: configureErrorSanitizer,\n14: configureSecretGuard,\n15: } from '@theunwalked/spotclean';\n16: \n17: // Register OpenAI API key patterns on module load\n18: const redactor = getRedactor();\n19: redactor.register({\n20: name: 'openai',\n21: patterns: [\n22: /sk-[a-zA-Z0-9]{20,}/g,\n23: /sk-proj-[a-zA-Z0-9_-]+/g,\n24: ],\n25: validator: (key: string) => /^sk-(proj-)?[a-zA-Z0-9_-]{20,}$/.test(key),\n26: envVar: 'OPENAI_API_KEY',\n27: description: 'OpenAI API keys',\n28: });\n29: \n30: // Configure spotclean for error sanitization\n31: configureErrorSanitizer({\n32: enabled: true,\n33: environment: process.env.NODE_ENV === 'production' ? 'production' : 'development',\n34: includeCorrelationId: true,\n35: sanitizeStackTraces: process.env.NODE_ENV === 'production',\n36: maxMessageLength: 500,\n37: });\n38: \n39: configureSecretGuard({\n40: enabled: true,\n41: redactionText: '[REDACTED]',\n42: preservePartial: false,\n43: preserveLength: 0,\n44: customPatterns: [\n45: { name: 'openai', pattern: /sk-[a-zA-Z0-9]{20,}/g, description: 'OpenAI API key' },\n46: { name: 'openai-proj', pattern: /sk-proj-[a-zA-Z0-9_-]+/g, description: 'OpenAI project key' },\n47: ],\n48: });\n49: \n50: // ===== INLINE TYPES (from 'execution' package) =====\n51: // These types are duplicated here for build independence.\n52: // When 'execution' is published, these can be imported from there.\n53: \n54: export type Model = string;\n55: \n56: export interface Message {\n57: role: 'user' | 'assistant' | 'system' | 'developer' | 'tool';\n58: content: string | string[] | null;\n59: name?: string;\n60: }\n61: \n62: export interface Request {\n63: messages: Message[];\n64: model: Model;\n65: responseFormat?: any;\n66: validator?: any;\n67: addMessage(message: Message): void;\n68: }\n69: \n70: export interface ProviderResponse {\n71: content: string;\n72: model: string;\n73: usage?: {\n74: inputTokens: number;\n75: outputTokens: number;\n76: };\n77: toolCalls?: Array<{\n78: id: string;\n79: type: 'function';\n80: function: {\n81: name: string;\n82: arguments: string;\n83: };\n84: }>;\n85: }\n86: \n87: export interface ExecutionOptions {\n88: apiKey?: string;\n89: model?: string;\n90: temperature?: number;\n91: maxTokens?: number;\n92: timeout?: number;\n93: retries?: number;\n94: }\n95: \n96: export interface Provider {\n97: readonly name: string;\n98: execute(request: Request, options?: ExecutionOptions): Promise<ProviderResponse>;\n99: supportsModel?(model: Model): boolean;\n100: }\n101: \n102: /**\n103: * OpenAI Provider implementation\n104: */\n105: export class OpenAIProvider implements Provider {\n106: readonly name = 'openai';\n107: \n108: /**\n109: * Check if this provider supports a given model\n110: */\n111: supportsModel(model: Model): boolean {\n112: if (!model) return true; // Default to OpenAI\n113: return (\n114: model.startsWith('gpt') ||\n115: model.startsWith('o1') ||\n116: model.startsWith('o3') ||\n117: model.startsWith('o4')\n118: );\n119: }\n120: \n121: /**\n122: * Execute a request against OpenAI\n123: */\n124: async execute(\n125: request: Request,\n126: options: ExecutionOptions = {}\n127: ): Promise<ProviderResponse> {\n128: const apiKey = options.apiKey || process.env.OPENAI_API_KEY;\n129: \n130: if (!apiKey) {\n131: throw new Error('OpenAI API key is required. Set OPENAI_API_KEY environment variable.');\n132: }\n133: \n134: // Validate key format\n135: const validation = redactor.validateKey(apiKey, 'openai');\n136: if (!validation.valid) {\n137: throw new Error('Invalid OpenAI API key format');\n138: }\n139: \n140: try {\n141: const client = new OpenAI({ apiKey });\n142: \n143: const model = options.model || request.model || 'gpt-4';\n144: \n145: // Convert messages to OpenAI format\n146: const messages = request.messages.map((msg) => {\n147: const role =\n148: msg.role === 'developer' ? 'system' : msg.role;\n149: \n150: return {\n151: role: role,\n152: content:\n153: typeof msg.content === 'string'\n154: ? msg.content\n155: : JSON.stringify(msg.content),\n156: name: msg.name,\n157: } as any;\n158: });\n159: \n160: const response = await client.chat.completions.create({\n161: model: model,\n162: messages: messages,\n163: temperature: options.temperature,\n164: max_tokens: options.maxTokens,\n165: response_format: request.responseFormat,\n166: });\n167: \n168: const choice = response.choices[0];\n169: \n170: return {\n171: content: choice.message.content || '',\n172: model: response.model,\n173: usage: response.usage\n174: ? {\n175: inputTokens: response.usage.prompt_tokens,\n176: outputTokens: response.usage.completion_tokens,\n177: }\n178: : undefined,\n179: toolCalls: choice.message.tool_calls\n180: ?.filter((tc) => tc.type === 'function')\n181: .map((tc) => ({\n182: id: tc.id,\n183: type: 'function' as const,\n184: function: {\n185: name: (tc as any).function.name,\n186: arguments: (tc as any).function.arguments,\n187: },\n188: })),\n189: };\n190: } catch (error) {\n191: // Sanitize error to remove any API keys from error messages\n192: // Use spotclean for comprehensive error sanitization\n193: throw createSafeError(error as Error, { provider: 'openai' });\n194: }\n195: }\n196: }\n197: \n198: /**\n199: * Create a new OpenAI provider instance\n200: */\n201: export function createOpenAIProvider(): OpenAIProvider {\n202: return new OpenAIProvider();\n203: }\n204: \n205: /**\n206: * Package version\n207: */\n208: export const VERSION = '0.0.1';\n209: \n210: export default OpenAIProvider;\n211: ",
|
|
118
|
+
"name": "get_file_content"
|
|
119
|
+
},
|
|
120
|
+
{
|
|
121
|
+
"role": "tool",
|
|
122
|
+
"tool_call_id": "call_Vw4GoXGs5Ml5TGyTA7QS3HSQ",
|
|
123
|
+
"content": "## File Modification Times (sorted oldest to newest)\n\n2026-01-28 17:54:18 package.json\n2026-01-28 17:55:22 src/index.ts\n\n\n## Temporal Clusters (files modified within 10 minutes of each other)\n\nAll 2 files were modified in a single work session.\n",
|
|
124
|
+
"name": "get_file_modification_times"
|
|
125
|
+
},
|
|
126
|
+
{
|
|
127
|
+
"role": "tool",
|
|
128
|
+
"tool_call_id": "call_AabXJtLJKXIVMznLt9dCnXuG",
|
|
129
|
+
"content": "dependencies (1 files):\n - package.json\n\nsource:src (1 files):\n - src/index.ts\n\nSuggestion: These 2 groups might be better as separate commits if they represent different concerns.",
|
|
130
|
+
"name": "group_files_by_concern"
|
|
131
|
+
},
|
|
132
|
+
{
|
|
133
|
+
"role": "tool",
|
|
134
|
+
"tool_call_id": "call_TzPzWOLDP0w2rr5tbIIOmCeL",
|
|
135
|
+
"content": "e580d8e - chore: bump to 1.0.3-dev.0 (Tim O'Brien, 11 minutes ago)\nc9afc87 - chore: bump to 1.0.2-dev.0 (Tim O'Brien, 28 minutes ago)\nb827fd9 - fix: add npm tag detection for prerelease versions in publish workflow (#14) (Tim O'Brien, 29 minutes ago)\na997339 - chore(release): bump @riotprompt/execution-openai to 1.0.0 (#13) (Tim O'Brien, 2 days ago)\n",
|
|
136
|
+
"name": "get_recent_commits"
|
|
137
|
+
},
|
|
138
|
+
{
|
|
139
|
+
"role": "assistant",
|
|
140
|
+
"content": null,
|
|
141
|
+
"tool_calls": [
|
|
142
|
+
{
|
|
143
|
+
"id": "call_7ekoVooghJytb2JylPh84n8c",
|
|
144
|
+
"type": "function",
|
|
145
|
+
"function": {
|
|
146
|
+
"name": "get_file_history",
|
|
147
|
+
"arguments": "{\"filePaths\": [\"src/index.ts\", \"package.json\"], \"limit\": 5, \"format\": \"detailed\"}"
|
|
148
|
+
}
|
|
149
|
+
},
|
|
150
|
+
{
|
|
151
|
+
"id": "call_fCTdgwFlF9CoRPYM5xEmSc1o",
|
|
152
|
+
"type": "function",
|
|
153
|
+
"function": {
|
|
154
|
+
"name": "get_file_dependencies",
|
|
155
|
+
"arguments": "{\"filePaths\": [\"src/index.ts\"]}"
|
|
156
|
+
}
|
|
157
|
+
},
|
|
158
|
+
{
|
|
159
|
+
"id": "call_HjZJrXDUSIPv5KN8D26In14s",
|
|
160
|
+
"type": "function",
|
|
161
|
+
"function": {
|
|
162
|
+
"name": "get_related_tests",
|
|
163
|
+
"arguments": "{\"filePaths\": [\"src/index.ts\"]}"
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
]
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
"role": "tool",
|
|
170
|
+
"tool_call_id": "call_7ekoVooghJytb2JylPh84n8c",
|
|
171
|
+
"content": "e580d8e60671c5b77d6a7d6d8a7fd930ab835da4\nTim O'Brien (tobrien@discursive.com)\nWed Jan 28 09:47:28 2026 -0800\nchore: bump to 1.0.3-dev.0\n\n\n---\nc9afc875aab7b6c185751c33c6efc16170466e9f\nTim O'Brien (tobrien@discursive.com)\nWed Jan 28 09:30:44 2026 -0800\nchore: bump to 1.0.2-dev.0\n\n\n---\nb827fd921b95e0e3a0d8718c5c5c219f39390b90\nTim O'Brien (tobrien@discursive.com)\nWed Jan 28 09:29:35 2026 -0800\nfix: add npm tag detection for prerelease versions in publish workflow (#14)\n\n* chore: bump to 1.0.0-dev.0\n\n* Updating version\n\n* fix: add targeted ajv override for vite-plugin-dts\n\n* fix: add ajv@^8 as explicit devDependency to resolve vite-plugin-dts conflict\n\n* fix: add npm tag detection for prerelease versions in publish workflow\n---\na9973394f6c8a39acc701be4e6d7465cd7424b7a\nTim O'Brien (tobrien@discursive.com)\nMon Jan 26 08:55:12 2026 -0800\nchore(release): bump @riotprompt/execution-openai to 1.0.0 (#13)\n\n* chore: bump to 0.0.4-dev.0\n\n* feat(openai): validate API key and sanitize errors with offrecord/spotclean\n\n- Add offrecord + spotclean dependencies and register OpenAI key patterns\n- Validate OPENAI_API_KEY format before creating client\n- Sanitize provider errors to prevent leaking secrets\n- Mark new deps (and node:crypto) as external in Vite build\n\n* chore(release): bump to 1.0.0-dev.0\n\n* chore: run build in precommit script\n\n* chore(gitignore): add .kodrdriv* pattern to ignore kodrdriv internal files\n\n* chore(release): bump @riotprompt/execution-openai to 1.0.0\n---\n2f6afb38ef9077c54bd911c68d0eed2cadd9917a\nTim O'Brien (tobrien@discursive.com)\nTue Jan 13 00:30:54 2026 -0800\nchore(release): bump @riotprompt/execution-openai to 0.0.4 (#7)\n\n* chore: bump to v0.0.4-dev.0\n\n* chore(build): switch to ESM-only output, require Node 24, and target ES2024\n\n* chore(release): bump @riotprompt/execution-openai to 0.0.4\n---\n",
|
|
172
|
+
"name": "get_file_history"
|
|
173
|
+
},
|
|
174
|
+
{
|
|
175
|
+
"role": "tool",
|
|
176
|
+
"tool_call_id": "call_fCTdgwFlF9CoRPYM5xEmSc1o",
|
|
177
|
+
"content": "No files found that import the specified files",
|
|
178
|
+
"name": "get_file_dependencies"
|
|
179
|
+
},
|
|
180
|
+
{
|
|
181
|
+
"role": "tool",
|
|
182
|
+
"tool_call_id": "call_HjZJrXDUSIPv5KN8D26In14s",
|
|
183
|
+
"content": "Found related test files:\nsrc/index.ts",
|
|
184
|
+
"name": "get_related_tests"
|
|
185
|
+
},
|
|
186
|
+
{
|
|
187
|
+
"role": "assistant",
|
|
188
|
+
"content": "COMMIT_MESSAGE:\nfeat(openai): add model-aware provider and sanitize OpenAI secrets\n\n- Implement OpenAIProvider with model selection via request/options and supportsModel checks\n- Register OpenAI key patterns and validate API key format before client creation\n- Sanitize/redact secrets in thrown errors using offrecord/spotclean\n- Add offrecord/spotclean dependencies to package.json"
|
|
189
|
+
}
|
|
190
|
+
]
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
</details>
|
|
194
|
+
|
|
195
|
+
## Generated Commit Message
|
|
196
|
+
|
|
197
|
+
```
|
|
198
|
+
feat(openai): add model-aware provider and sanitize OpenAI secrets
|
|
199
|
+
|
|
200
|
+
- Implement OpenAIProvider with model selection via request/options and supportsModel checks
|
|
201
|
+
- Register OpenAI key patterns and validate API key format before client creation
|
|
202
|
+
- Sanitize/redact secrets in thrown errors using offrecord/spotclean
|
|
203
|
+
- Add offrecord/spotclean dependencies to package.json
|
|
204
|
+
```
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
# Agentic Workflow - Self-Reflection Report
|
|
2
|
+
|
|
3
|
+
Generated: 2026-01-31T00:39:30.443Z
|
|
4
|
+
|
|
5
|
+
## Execution Summary
|
|
6
|
+
|
|
7
|
+
- **Iterations**: 5
|
|
8
|
+
- **Tool Calls**: 10
|
|
9
|
+
- **Unique Tools**: 7
|
|
10
|
+
|
|
11
|
+
## Tool Effectiveness Analysis
|
|
12
|
+
|
|
13
|
+
| Tool | Calls | Success | Failures | Success Rate | Avg Duration |
|
|
14
|
+
|------|-------|---------|----------|--------------|--------------|
|
|
15
|
+
| get_file_content | 4 | 4 | 0 | 100.0% | 1ms |
|
|
16
|
+
| get_file_modification_times | 1 | 1 | 0 | 100.0% | 0ms |
|
|
17
|
+
| group_files_by_concern | 1 | 1 | 0 | 100.0% | 0ms |
|
|
18
|
+
| get_recent_commits | 1 | 1 | 0 | 100.0% | 12ms |
|
|
19
|
+
| get_file_history | 1 | 1 | 0 | 100.0% | 24ms |
|
|
20
|
+
| get_file_dependencies | 1 | 1 | 0 | 100.0% | 60ms |
|
|
21
|
+
| search_codebase | 1 | 1 | 0 | 100.0% | 12ms |
|
|
22
|
+
|
|
23
|
+
### Tool Performance Insights
|
|
24
|
+
|
|
25
|
+
**Most Frequently Used:**
|
|
26
|
+
- get_file_content: 4 calls
|
|
27
|
+
- get_file_modification_times: 1 calls
|
|
28
|
+
- group_files_by_concern: 1 calls
|
|
29
|
+
|
|
30
|
+
## Recommendations for Improvement
|
|
31
|
+
|
|
32
|
+
- **Underutilized Tools**: Many tools were called only once. Consider whether all tools are necessary or if the agent needs better guidance on when to use them.
|
|
33
|
+
|
|
34
|
+
## Detailed Execution Timeline
|
|
35
|
+
|
|
36
|
+
| Time | Iteration | Tool | Result | Duration |
|
|
37
|
+
|------|-----------|------|--------|----------|
|
|
38
|
+
| 4:39:04 PM | 1 | get_file_content | ✅ Success | 0ms |
|
|
39
|
+
| 4:39:04 PM | 1 | get_file_content | ✅ Success | 1ms |
|
|
40
|
+
| 4:39:04 PM | 1 | get_file_content | ✅ Success | 0ms |
|
|
41
|
+
| 4:39:04 PM | 1 | get_file_modification_times | ✅ Success | 0ms |
|
|
42
|
+
| 4:39:04 PM | 1 | group_files_by_concern | ✅ Success | 0ms |
|
|
43
|
+
| 4:39:04 PM | 1 | get_recent_commits | ✅ Success | 12ms |
|
|
44
|
+
| 4:39:08 PM | 2 | get_file_history | ✅ Success | 24ms |
|
|
45
|
+
| 4:39:08 PM | 2 | get_file_dependencies | ✅ Success | 60ms |
|
|
46
|
+
| 4:39:15 PM | 3 | search_codebase | ✅ Success | 12ms |
|
|
47
|
+
| 4:39:16 PM | 4 | get_file_content | ✅ Success | 1ms |
|
|
48
|
+
|
|
49
|
+
## Conversation History
|
|
50
|
+
|
|
51
|
+
<details>
|
|
52
|
+
<summary>Click to expand full agentic interaction</summary>
|
|
53
|
+
|
|
54
|
+
```json
|
|
55
|
+
[
|
|
56
|
+
{
|
|
57
|
+
"role": "system",
|
|
58
|
+
"content": "You are a professional software engineer writing commit messages for your team.\n\n## Available Tools\n\nYou have access to tools to investigate the changes:\n- `get_file_content` - Read file contents or diffs for specific files\n- `get_file_modification_times` - See when files were modified relative to each other\n- `group_files_by_concern` - Understand how files relate by type/purpose\n- `get_recent_commits` - Get recent commit history to avoid duplicates\n- `get_file_history` - Understand how code evolved\n- `get_file_dependencies` - Assess impact of changes\n- `get_related_tests` - Find related test files\n- `search_codebase` - Search for usage patterns\n\nUse these tools to investigate the changes rather than asking for information upfront.\n\n## Your Task\n\nAnalyze the staged changes and determine the best way to commit them. Your primary goal is to create **meaningful, atomic commits** that each represent a single logical change.\n\n**CRITICAL**: When there are many changed files, especially after a long work session (multiple hours), you should almost always split them into multiple commits. A single commit with 10+ files usually indicates multiple distinct changes that should be separated.\n\nThink about:\n- What distinct features, fixes, or improvements are represented?\n- Are there natural groupings by functionality, module, or purpose?\n- When were files modified relative to each other?\n- What should reviewers focus on in each commit?\n\n## Investigation Strategy\n\nFor any non-trivial set of changes, gather multiple signals to understand how to group commits:\n\n1. **Check file modification times** using `get_file_modification_times`\n - This reveals *when* files were changed relative to each other\n - Files modified close together *may* be part of the same logical change\n - Large temporal gaps *can* indicate separate work sessions\n - Note: temporal proximity is one signal, not a guarantee of relatedness\n\n2. **Understand logical relationships** using `group_files_by_concern`\n - Groups by module, type (tests, docs, source), and directory structure\n - Reveals which files are functionally related regardless of when they were modified\n\n3. **Cross-reference both signals** to make informed decisions:\n - Files modified together AND logically related → strong candidate for same commit\n - Files modified apart BUT logically related → might still belong together\n - Files modified together BUT logically unrelated → consider splitting\n - Use your judgment - neither signal is definitive on its own\n\n4. **Investigate further** as needed:\n - get_file_content - see full context when diffs are unclear\n - get_file_history - understand how code evolved\n - get_file_dependencies - assess impact of changes\n - get_recent_commits - avoid duplicate messages\n - get_related_tests - understand behavior changes\n - search_codebase - find usage patterns\n\n## When to Split Commits\n\n**Prefer multiple commits when:**\n- Changes span multiple hours of work (check modification times!)\n- Different logical features or fixes are included\n- Test changes could be separated from production code changes\n- Documentation updates are unrelated to code changes\n- Refactoring is mixed with feature work\n- Configuration changes are mixed with implementation changes\n- Files in different modules/packages are changed for different reasons\n\n**Keep as one commit when:**\n- All changes are part of a single, cohesive feature\n- Files were modified together in a focused work session\n- Test changes directly test the production code changes\n- The changes tell a single coherent story\n\n**Default bias**: When in doubt about whether to split, **prefer splitting**. It's better to have too many focused commits than too few bloated ones. A good commit should be understandable and reviewable in isolation.\n\n## Important Context Guidelines\n\nIf additional context is provided (from context files or other sources), use your judgment:\n- If the context is relevant to these specific changes, incorporate it\n- If the context describes unrelated changes or other packages, ignore it\n- Don't force connections between unrelated information\n- Focus on accurately describing what actually changed\n\n## Writing Style\n\nWrite naturally and directly:\n- Use plain language, not corporate speak\n- Be specific and concrete\n- Avoid buzzwords and jargon\n- No emojis or excessive punctuation\n- No phrases like \"this commit\" or \"this PR\"\n- No meta-commentary about the commit itself\n\nFollow conventional commit format when appropriate (feat:, fix:, refactor:, docs:, test:, chore:), but prioritize clarity over formality.\n\n## Output Format\n\nWhen ready, format your response as:\n\nCOMMIT_MESSAGE:\n[Your commit message here - only for the FIRST group of changes if splitting]\n\nIf changes should be split into multiple commits (which is often the case with large changesets):\n\nSUGGESTED_SPLITS:\nSplit 1:\nFiles: [list of files]\nRationale: [why these belong together - mention temporal clustering and/or logical relationship]\nMessage: [commit message for this split]\n\nSplit 2:\nFiles: [list of files]\nRationale: [why these belong together]\nMessage: [commit message for this split]\n\nSplit 3:\n...\n\nOutput only the commit message and splits. No conversational remarks or follow-up offers."
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
"role": "user",
|
|
62
|
+
"content": "I have staged changes that need a commit message.\n\nChanged files (3):\n - package.json\n - src/index.ts\n - vite.config.ts\n\nUser direction: --model\n\n## Your Analysis Task\n\nUse your tools to investigate the changes:\n1. Use `get_file_content` to read the actual changes in the files\n2. Use `get_file_modification_times` to see when files were modified relative to each other\n3. Use `group_files_by_concern` to understand how files relate by type/purpose\n4. Use `get_recent_commits` to check recent commit history and avoid duplicates\n5. Cross-reference temporal proximity and logical relatedness to determine the best commit structure\n\nConsider:\n- What distinct features, fixes, or improvements are included?\n- Are files that were modified together also logically related?\n- Would separate commits make the history more understandable?\n\nIf changes represent multiple logical concerns, suggest splits.\n\nStart by reading the file contents to understand what actually changed."
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
"role": "assistant",
|
|
66
|
+
"content": null,
|
|
67
|
+
"tool_calls": [
|
|
68
|
+
{
|
|
69
|
+
"id": "call_DTt3ARIyoDLIIWw6Z27QGB7t",
|
|
70
|
+
"type": "function",
|
|
71
|
+
"function": {
|
|
72
|
+
"name": "get_file_content",
|
|
73
|
+
"arguments": "{\"filePath\": \"package.json\"}"
|
|
74
|
+
}
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
"id": "call_pQRQ8BeMYJSUGIQ5h1CyWHfB",
|
|
78
|
+
"type": "function",
|
|
79
|
+
"function": {
|
|
80
|
+
"name": "get_file_content",
|
|
81
|
+
"arguments": "{\"filePath\": \"src/index.ts\"}"
|
|
82
|
+
}
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
"id": "call_sVfXcAWGtLY4eTPM4O0SlsEj",
|
|
86
|
+
"type": "function",
|
|
87
|
+
"function": {
|
|
88
|
+
"name": "get_file_content",
|
|
89
|
+
"arguments": "{\"filePath\": \"vite.config.ts\"}"
|
|
90
|
+
}
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
"id": "call_tTOCYvBjgFCKmyNCsWX3nei1",
|
|
94
|
+
"type": "function",
|
|
95
|
+
"function": {
|
|
96
|
+
"name": "get_file_modification_times",
|
|
97
|
+
"arguments": "{\"filePaths\": [\"package.json\", \"src/index.ts\", \"vite.config.ts\"]}"
|
|
98
|
+
}
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
"id": "call_R0HIuhxDqJFcCrxMJ4Tm1pV1",
|
|
102
|
+
"type": "function",
|
|
103
|
+
"function": {
|
|
104
|
+
"name": "group_files_by_concern",
|
|
105
|
+
"arguments": "{\"filePaths\": [\"package.json\", \"src/index.ts\", \"vite.config.ts\"]}"
|
|
106
|
+
}
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
"id": "call_Cac8A3wLDWu0xAR0QfbQbjUY",
|
|
110
|
+
"type": "function",
|
|
111
|
+
"function": {
|
|
112
|
+
"name": "get_recent_commits",
|
|
113
|
+
"arguments": "{\"filePaths\": [\"package.json\", \"src/index.ts\", \"vite.config.ts\"], \"since\": \"2 weeks ago\", \"limit\": 10}"
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
]
|
|
117
|
+
},
|
|
118
|
+
{
|
|
119
|
+
"role": "tool",
|
|
120
|
+
"tool_call_id": "call_DTt3ARIyoDLIIWw6Z27QGB7t",
|
|
121
|
+
"content": "{\n \"name\": \"@riotprompt/execution-openai\",\n \"version\": \"1.0.4-dev.0\",\n \"description\": \"OpenAI provider for execution - implements Provider interface\",\n \"type\": \"module\",\n \"main\": \"./dist/index.js\",\n \"types\": \"./dist/index.d.ts\",\n \"exports\": {\n \".\": {\n \"types\": \"./dist/index.d.ts\",\n \"import\": \"./dist/index.js\"\n }\n },\n \"engines\": {\n \"node\": \">=24.0.0\"\n },\n \"scripts\": {\n \"clean\": \"rm -rf dist\",\n \"build\": \"vite build\",\n \"test\": \"vitest run --coverage\",\n \"test:coverage\": \"vitest run --coverage\",\n \"lint\": \"eslint src\",\n \"precommit\": \"npm run build && npm run lint && npm run test\",\n \"prepublishOnly\": \"npm run clean && npm run build\"\n },\n \"keywords\": [\n \"llm\",\n \"openai\",\n \"gpt\",\n \"execution\",\n \"provider\"\n ],\n \"author\": \"Tim O'Brien <tobrien@discursive.com>\",\n \"license\": \"Apache-2.0\",\n \"repository\": {\n \"type\": \"git\",\n \"url\": \"https://github.com/kjerneverk/execution-openai\"\n },\n \"dependencies\": {\n \"@utilarium/offrecord\": \"^0.0.3\",\n \"@utilarium/spotclean\": \"^0.0.4\",\n \"openai\": \"^6.16.0\",\n \"tiktoken\": \"^1.0.21\"\n },\n \"devDependencies\": {\n \"@types/node\": \"^25.0.6\",\n \"@typescript-eslint/eslint-plugin\": \"^8.33.1\",\n \"@typescript-eslint/parser\": \"^8.33.1\",\n \"@vitest/coverage-v8\": \"^4.0.18\",\n \"ajv\": \"^8.17.1\",\n \"eslint\": \"^9.28.0\",\n \"globals\": \"^17.2.0\",\n \"typescript\": \"^5.8.3\",\n \"vite\": \"^7.0.4\",\n \"vite-plugin-dts\": \"^4.5.4\",\n \"vitest\": \"^4.0.18\"\n }\n}\n",
|
|
122
|
+
"name": "get_file_content"
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
"role": "tool",
|
|
126
|
+
"tool_call_id": "call_pQRQ8BeMYJSUGIQ5h1CyWHfB",
|
|
127
|
+
"content": "/**\n * Execution OpenAI Package\n *\n * OpenAI provider implementation for LLM execution.\n *\n * @packageDocumentation\n */\n\nimport OpenAI from 'openai';\nimport { getRedactor } from '@utilarium/offrecord';\nimport { \n createSafeError, \n configureErrorSanitizer,\n configureSecretGuard,\n} from '@utilarium/spotclean';\n\n// Register OpenAI API key patterns on module load\nconst redactor = getRedactor();\nredactor.register({\n name: 'openai',\n patterns: [\n /sk-[a-zA-Z0-9]{20,}/g,\n /sk-proj-[a-zA-Z0-9_-]+/g,\n ],\n validator: (key: string) => /^sk-(proj-)?[a-zA-Z0-9_-]{20,}$/.test(key),\n envVar: 'OPENAI_API_KEY',\n description: 'OpenAI API keys',\n});\n\n// Configure spotclean for error sanitization\nconfigureErrorSanitizer({\n enabled: true,\n environment: process.env.NODE_ENV === 'production' ? 'production' : 'development',\n includeCorrelationId: true,\n sanitizeStackTraces: process.env.NODE_ENV === 'production',\n maxMessageLength: 500,\n});\n\nconfigureSecretGuard({\n enabled: true,\n redactionText: '[REDACTED]',\n preservePartial: false,\n preserveLength: 0,\n customPatterns: [\n { name: 'openai', pattern: /sk-[a-zA-Z0-9]{20,}/g, description: 'OpenAI API key' },\n { name: 'openai-proj', pattern: /sk-proj-[a-zA-Z0-9_-]+/g, description: 'OpenAI project key' },\n ],\n});\n\n// ===== INLINE TYPES (from 'execution' package) =====\n// These types are duplicated here for build independence.\n// When 'execution' is published, these can be imported from there.\n\nexport type Model = string;\n\nexport interface Message {\n role: 'user' | 'assistant' | 'system' | 'developer' | 'tool';\n content: string | string[] | null;\n name?: string;\n}\n\nexport interface Request {\n messages: Message[];\n model: Model;\n responseFormat?: any;\n validator?: any;\n addMessage(message: Message): void;\n}\n\nexport interface ProviderResponse {\n content: string;\n model: string;\n usage?: {\n inputTokens: number;\n outputTokens: number;\n };\n toolCalls?: Array<{\n id: string;\n type: 'function';\n function: {\n name: string;\n arguments: string;\n };\n }>;\n}\n\nexport interface ExecutionOptions {\n apiKey?: string;\n model?: string;\n temperature?: number;\n maxTokens?: number;\n timeout?: number;\n retries?: number;\n}\n\nexport interface Provider {\n readonly name: string;\n execute(request: Request, options?: ExecutionOptions): Promise<ProviderResponse>;\n supportsModel?(model: Model): boolean;\n}\n\n/**\n * OpenAI Provider implementation\n */\nexport class OpenAIProvider implements Provider {\n readonly name = 'openai';\n\n /**\n * Check if this provider supports a given model\n */\n supportsModel(model: Model): boolean {\n if (!model) return true; // Default to OpenAI\n return (\n model.startsWith('gpt') ||\n model.startsWith('o1') ||\n model.startsWith('o3') ||\n model.startsWith('o4')\n );\n }\n\n /**\n * Execute a request against OpenAI\n */\n async execute(\n request: Request,\n options: ExecutionOptions = {}\n ): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.OPENAI_API_KEY;\n \n if (!apiKey) {\n throw new Error('OpenAI API key is required. Set OPENAI_API_KEY environment variable.');\n }\n\n // Validate key format\n const validation = redactor.validateKey(apiKey, 'openai');\n if (!validation.valid) {\n throw new Error('Invalid OpenAI API key format');\n }\n\n try {\n const client = new OpenAI({ apiKey });\n\n const model = options.model || request.model || 'gpt-4';\n\n // Convert messages to OpenAI format\n const messages = request.messages.map((msg) => {\n const role =\n msg.role === 'developer' ? 'system' : msg.role;\n\n return {\n role: role,\n content:\n typeof msg.content === 'string'\n ? msg.content\n : JSON.stringify(msg.content),\n name: msg.name,\n } as any;\n });\n\n const response = await client.chat.completions.create({\n model: model,\n messages: messages,\n temperature: options.temperature,\n max_tokens: options.maxTokens,\n response_format: request.responseFormat,\n });\n\n const choice = response.choices[0];\n\n return {\n content: choice.message.content || '',\n model: response.model,\n usage: response.usage\n ? {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens,\n }\n : undefined,\n toolCalls: choice.message.tool_calls\n ?.filter((tc) => tc.type === 'function')\n .map((tc) => ({\n id: tc.id,\n type: 'function' as const,\n function: {\n name: (tc as any).function.name,\n arguments: (tc as any).function.arguments,\n },\n })),\n };\n } catch (error) {\n // Sanitize error to remove any API keys from error messages\n // Use spotclean for comprehensive error sanitization\n throw createSafeError(error as Error, { provider: 'openai' });\n }\n }\n}\n\n/**\n * Create a new OpenAI provider instance\n */\nexport function createOpenAIProvider(): OpenAIProvider {\n return new OpenAIProvider();\n}\n\n/**\n * Package version\n */\nexport const VERSION = '0.0.1';\n\nexport default OpenAIProvider;\n",
|
|
128
|
+
"name": "get_file_content"
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
"role": "tool",
|
|
132
|
+
"tool_call_id": "call_sVfXcAWGtLY4eTPM4O0SlsEj",
|
|
133
|
+
"content": "import { defineConfig } from \"vite\";\nimport dts from \"vite-plugin-dts\";\n\nexport default defineConfig({\n build: {\n lib: {\n entry: \"src/index.ts\",\n name: \"execution-openai\",\n fileName: () => \"index.js\",\n formats: [\"es\"],\n },\n rollupOptions: {\n external: [\n \"openai\",\n \"tiktoken\",\n \"execution\",\n \"@utilarium/offrecord\",\n \"@utilarium/spotclean\",\n \"node:crypto\",\n ],\n },\n sourcemap: true,\n minify: false,\n },\n plugins: [dts({ rollupTypes: true })],\n});\n",
|
|
134
|
+
"name": "get_file_content"
|
|
135
|
+
},
|
|
136
|
+
{
|
|
137
|
+
"role": "tool",
|
|
138
|
+
"tool_call_id": "call_tTOCYvBjgFCKmyNCsWX3nei1",
|
|
139
|
+
"content": "## File Modification Times (sorted oldest to newest)\n\n2026-01-31 00:29:42 package.json\n2026-01-31 00:30:26 vite.config.ts\n2026-01-31 00:30:38 src/index.ts\n\n\n## Temporal Clusters (files modified within 10 minutes of each other)\n\nAll 3 files were modified in a single work session.\n",
|
|
140
|
+
"name": "get_file_modification_times"
|
|
141
|
+
},
|
|
142
|
+
{
|
|
143
|
+
"role": "tool",
|
|
144
|
+
"tool_call_id": "call_R0HIuhxDqJFcCrxMJ4Tm1pV1",
|
|
145
|
+
"content": "dependencies (1 files):\n - package.json\n\nsource:src (1 files):\n - src/index.ts\n\nother (1 files):\n - vite.config.ts\n\nSuggestion: These 3 groups might be better as separate commits if they represent different concerns.",
|
|
146
|
+
"name": "group_files_by_concern"
|
|
147
|
+
},
|
|
148
|
+
{
|
|
149
|
+
"role": "tool",
|
|
150
|
+
"tool_call_id": "call_Cac8A3wLDWu0xAR0QfbQbjUY",
|
|
151
|
+
"content": "d51cb41 - chore: bump to 1.0.4-dev.0 (Tim O'Brien, 2 days ago)\na6c4d89 - feat(openai): add model-aware provider and sanitize OpenAI secrets (Tim O'Brien, 2 days ago)\n5a30ec6 - chore: bump to 1.0.2-dev.0 (#15) (Tim O'Brien, 2 days ago)\nb827fd9 - fix: add npm tag detection for prerelease versions in publish workflow (#14) (Tim O'Brien, 2 days ago)\na997339 - chore(release): bump @riotprompt/execution-openai to 1.0.0 (#13) (Tim O'Brien, 4 days ago)\n",
|
|
152
|
+
"name": "get_recent_commits"
|
|
153
|
+
},
|
|
154
|
+
{
|
|
155
|
+
"role": "assistant",
|
|
156
|
+
"content": null,
|
|
157
|
+
"tool_calls": [
|
|
158
|
+
{
|
|
159
|
+
"id": "call_ZWT2yt3OSBOpDyoLmPdgeGBA",
|
|
160
|
+
"type": "function",
|
|
161
|
+
"function": {
|
|
162
|
+
"name": "get_file_history",
|
|
163
|
+
"arguments": "{\"filePaths\": [\"src/index.ts\", \"vite.config.ts\", \"package.json\"], \"limit\": 5, \"format\": \"detailed\"}"
|
|
164
|
+
}
|
|
165
|
+
},
|
|
166
|
+
{
|
|
167
|
+
"id": "call_3UneC0koSBubTVskhzqTP9I0",
|
|
168
|
+
"type": "function",
|
|
169
|
+
"function": {
|
|
170
|
+
"name": "get_file_dependencies",
|
|
171
|
+
"arguments": "{\"filePaths\": [\"src/index.ts\", \"vite.config.ts\"]}"
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
]
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
"role": "tool",
|
|
178
|
+
"tool_call_id": "call_ZWT2yt3OSBOpDyoLmPdgeGBA",
|
|
179
|
+
"content": "d51cb414501d896e020f023beb3e20f285187e60\nTim O'Brien (tobrien@discursive.com)\nWed Jan 28 11:29:00 2026 -0800\nchore: bump to 1.0.4-dev.0\n\n\n---\na6c4d897b4a8d8bfdf417633ee4e1fc9ff466d96\nTim O'Brien (tobrien@discursive.com)\nWed Jan 28 11:27:51 2026 -0800\nfeat(openai): add model-aware provider and sanitize OpenAI secrets\n\n- Implement OpenAIProvider with model selection via request/options and supportsModel checks\n- Register OpenAI key patterns and validate API key format before client creation\n-... (#16)\n\n* chore: bump to 1.0.2-dev.0\n\n* chore: bump to 1.0.3-dev.0\n\n* feat(openai): add model-aware provider and sanitize OpenAI secrets\n\n- Implement OpenAIProvider with model selection via request/options and supportsModel checks\n- Register OpenAI key patterns and validate API key format before client creation\n- Sanitize/redact secrets in thrown errors using offrecord/spotclean\n- Add offrecord/spotclean dependencies to package.json\n---\n5a30ec636ed7c9d3118d9ce5d115d632de67f4b9\nTim O'Brien (tobrien@discursive.com)\nWed Jan 28 09:45:18 2026 -0800\nchore: bump to 1.0.2-dev.0 (#15)\n\n\n---\nb827fd921b95e0e3a0d8718c5c5c219f39390b90\nTim O'Brien (tobrien@discursive.com)\nWed Jan 28 09:29:35 2026 -0800\nfix: add npm tag detection for prerelease versions in publish workflow (#14)\n\n* chore: bump to 1.0.0-dev.0\n\n* Updating version\n\n* fix: add targeted ajv override for vite-plugin-dts\n\n* fix: add ajv@^8 as explicit devDependency to resolve vite-plugin-dts conflict\n\n* fix: add npm tag detection for prerelease versions in publish workflow\n---\na9973394f6c8a39acc701be4e6d7465cd7424b7a\nTim O'Brien (tobrien@discursive.com)\nMon Jan 26 08:55:12 2026 -0800\nchore(release): bump @riotprompt/execution-openai to 1.0.0 (#13)\n\n* chore: bump to 0.0.4-dev.0\n\n* feat(openai): validate API key and sanitize errors with offrecord/spotclean\n\n- Add offrecord + spotclean dependencies and register OpenAI key patterns\n- Validate OPENAI_API_KEY format before creating client\n- Sanitize provider errors to prevent leaking secrets\n- Mark new deps (and node:crypto) as external in Vite build\n\n* chore(release): bump to 1.0.0-dev.0\n\n* chore: run build in precommit script\n\n* chore(gitignore): add .kodrdriv* pattern to ignore kodrdriv internal files\n\n* chore(release): bump @riotprompt/execution-openai to 1.0.0\n---\n",
|
|
180
|
+
"name": "get_file_history"
|
|
181
|
+
},
|
|
182
|
+
{
|
|
183
|
+
"role": "tool",
|
|
184
|
+
"tool_call_id": "call_3UneC0koSBubTVskhzqTP9I0",
|
|
185
|
+
"content": "Files importing src/index.ts:\ntests/index.test.ts\n",
|
|
186
|
+
"name": "get_file_dependencies"
|
|
187
|
+
},
|
|
188
|
+
{
|
|
189
|
+
"role": "assistant",
|
|
190
|
+
"content": null,
|
|
191
|
+
"tool_calls": [
|
|
192
|
+
{
|
|
193
|
+
"id": "call_HplmV0aU5RZbQ9B1g48DtzGA",
|
|
194
|
+
"type": "function",
|
|
195
|
+
"function": {
|
|
196
|
+
"name": "search_codebase",
|
|
197
|
+
"arguments": "{\"query\":\"export const VERSION\", \"fileTypes\":[\"ts\",\"js\"]}"
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
]
|
|
201
|
+
},
|
|
202
|
+
{
|
|
203
|
+
"role": "tool",
|
|
204
|
+
"tool_call_id": "call_HplmV0aU5RZbQ9B1g48DtzGA",
|
|
205
|
+
"content": "src/index.ts-206- * Package version\nsrc/index.ts-207- */\nsrc/index.ts:208:export const VERSION = '0.0.1';\nsrc/index.ts-209-\nsrc/index.ts-210-export default OpenAIProvider;\n",
|
|
206
|
+
"name": "search_codebase"
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
"role": "assistant",
|
|
210
|
+
"content": null,
|
|
211
|
+
"tool_calls": [
|
|
212
|
+
{
|
|
213
|
+
"id": "call_hGgQ9JRDoD01Ee4q32FqL2jM",
|
|
214
|
+
"type": "function",
|
|
215
|
+
"function": {
|
|
216
|
+
"name": "get_file_content",
|
|
217
|
+
"arguments": "{\"filePath\":\"tests/index.test.ts\"}"
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
]
|
|
221
|
+
},
|
|
222
|
+
{
|
|
223
|
+
"role": "tool",
|
|
224
|
+
"tool_call_id": "call_hGgQ9JRDoD01Ee4q32FqL2jM",
|
|
225
|
+
"content": "/**\n * Tests for execution-openai package\n */\n\nimport { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';\nimport {\n OpenAIProvider,\n createOpenAIProvider,\n VERSION,\n type Message,\n type Request,\n type ExecutionOptions,\n type ProviderResponse,\n type Provider,\n} from '../src/index.js';\n\ndescribe('OpenAIProvider', () => {\n let provider: OpenAIProvider;\n\n beforeEach(() => {\n provider = new OpenAIProvider();\n });\n\n describe('name', () => {\n it('should have name \"openai\"', () => {\n expect(provider.name).toBe('openai');\n });\n });\n\n describe('supportsModel', () => {\n it('should support GPT models', () => {\n expect(provider.supportsModel('gpt-4')).toBe(true);\n expect(provider.supportsModel('gpt-4o')).toBe(true);\n expect(provider.supportsModel('gpt-4-turbo')).toBe(true);\n expect(provider.supportsModel('gpt-3.5-turbo')).toBe(true);\n });\n\n it('should support O1/O3/O4 models', () => {\n expect(provider.supportsModel('o1-preview')).toBe(true);\n expect(provider.supportsModel('o1-mini')).toBe(true);\n expect(provider.supportsModel('o3-mini')).toBe(true);\n expect(provider.supportsModel('o4-mini')).toBe(true);\n });\n\n it('should not support Claude models', () => {\n expect(provider.supportsModel('claude-3-opus-20240229')).toBe(false);\n expect(provider.supportsModel('claude-3-sonnet-20240229')).toBe(false);\n });\n\n it('should not support Gemini models', () => {\n expect(provider.supportsModel('gemini-1.5-pro')).toBe(false);\n expect(provider.supportsModel('gemini-1.5-flash')).toBe(false);\n });\n\n it('should return true for empty/undefined model (default to OpenAI)', () => {\n expect(provider.supportsModel('')).toBe(true);\n });\n });\n\n describe('execute', () => {\n const originalEnv = process.env;\n\n beforeEach(() => {\n process.env = { ...originalEnv };\n delete process.env.OPENAI_API_KEY;\n });\n\n afterEach(() => {\n process.env = originalEnv;\n });\n\n it('should throw error when no API key is provided', async () => {\n const request: Request = {\n messages: [{ role: 'user', content: 'Hello' }],\n model: 'gpt-4',\n addMessage: vi.fn(),\n };\n\n await expect(provider.execute(request)).rejects.toThrow(\n 'OpenAI API key is required'\n );\n });\n\n it('should throw error when API key is empty string', async () => {\n const request: Request = {\n messages: [{ role: 'user', content: 'Hello' }],\n model: 'gpt-4',\n addMessage: vi.fn(),\n };\n\n await expect(provider.execute(request, { apiKey: '' })).rejects.toThrow(\n 'OpenAI API key is required'\n );\n });\n });\n\n describe('implements Provider interface', () => {\n it('should satisfy Provider interface', () => {\n const p: Provider = provider;\n expect(p.name).toBe('openai');\n expect(typeof p.execute).toBe('function');\n expect(typeof p.supportsModel).toBe('function');\n });\n });\n});\n\ndescribe('createOpenAIProvider', () => {\n it('should create a new OpenAIProvider instance', () => {\n const provider = createOpenAIProvider();\n expect(provider).toBeInstanceOf(OpenAIProvider);\n expect(provider.name).toBe('openai');\n });\n});\n\ndescribe('VERSION', () => {\n it('should export version string', () => {\n expect(VERSION).toBe('0.0.1');\n });\n});\n\ndescribe('Type exports', () => {\n it('should export Message type', () => {\n const msg: Message = {\n role: 'user',\n content: 'Hello',\n };\n expect(msg.role).toBe('user');\n expect(msg.content).toBe('Hello');\n });\n\n it('should support all message roles', () => {\n const roles: Message['role'][] = ['user', 'assistant', 'system', 'developer', 'tool'];\n roles.forEach(role => {\n const msg: Message = { role, content: 'test' };\n expect(msg.role).toBe(role);\n });\n });\n\n it('should support array content', () => {\n const msg: Message = {\n role: 'user',\n content: ['Hello', 'World'],\n };\n expect(Array.isArray(msg.content)).toBe(true);\n });\n\n it('should support null content', () => {\n const msg: Message = {\n role: 'assistant',\n content: null,\n };\n expect(msg.content).toBeNull();\n });\n\n it('should export ExecutionOptions type', () => {\n const opts: ExecutionOptions = {\n apiKey: 'test-key',\n model: 'gpt-4',\n temperature: 0.7,\n maxTokens: 1000,\n timeout: 30000,\n retries: 3,\n };\n expect(opts.apiKey).toBe('test-key');\n expect(opts.temperature).toBe(0.7);\n });\n\n it('should export ProviderResponse type', () => {\n const response: ProviderResponse = {\n content: 'Hello!',\n model: 'gpt-4',\n usage: {\n inputTokens: 10,\n outputTokens: 5,\n },\n };\n expect(response.content).toBe('Hello!');\n expect(response.usage?.inputTokens).toBe(10);\n });\n\n it('should support toolCalls in ProviderResponse', () => {\n const response: ProviderResponse = {\n content: '',\n model: 'gpt-4',\n toolCalls: [\n {\n id: 'call_123',\n type: 'function',\n function: {\n name: 'get_weather',\n arguments: '{\"location\": \"NYC\"}',\n },\n },\n ],\n };\n expect(response.toolCalls).toHaveLength(1);\n expect(response.toolCalls?.[0].function.name).toBe('get_weather');\n });\n});\n\n",
|
|
226
|
+
"name": "get_file_content"
|
|
227
|
+
},
|
|
228
|
+
{
|
|
229
|
+
"role": "assistant",
|
|
230
|
+
"content": "COMMIT_MESSAGE:\nfeat(openai): inline execution types and harden secret/error sanitization\n\n- Register/validate OpenAI key patterns and sanitize provider errors via offrecord/spotclean\n- Vendor minimal `execution` types to keep build independent\n- Update Vite library build externals and package deps to match new runtime requirements"
|
|
231
|
+
}
|
|
232
|
+
]
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
</details>
|
|
236
|
+
|
|
237
|
+
## Generated Commit Message
|
|
238
|
+
|
|
239
|
+
```
|
|
240
|
+
feat(openai): inline execution types and harden secret/error sanitization
|
|
241
|
+
|
|
242
|
+
- Register/validate OpenAI key patterns and sanitize provider errors via offrecord/spotclean
|
|
243
|
+
- Vendor minimal `execution` types to keep build independent
|
|
244
|
+
- Update Vite library build externals and package deps to match new runtime requirements
|
|
245
|
+
```
|