boxsafe 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/.directory +2 -0
  2. package/.env.example +3 -0
  3. package/AUDIT_LANG.md +45 -0
  4. package/BOXSAFE_VERSION_NOTES.md +14 -0
  5. package/README.md +4 -0
  6. package/TODO.md +130 -0
  7. package/adapters/index.ts +27 -0
  8. package/adapters/primary/cli-adapter.ts +56 -0
  9. package/adapters/secondary/filesystem/node-filesystem.ts +307 -0
  10. package/adapters/secondary/system/configuration.ts +147 -0
  11. package/ai/caller.ts +42 -0
  12. package/ai/label.ts +33 -0
  13. package/ai/modelConfig.ts +236 -0
  14. package/ai/provider.ts +111 -0
  15. package/boxsafe.config.json +68 -0
  16. package/core/auth/dasktop/cred/CRED.md +112 -0
  17. package/core/auth/dasktop/cred/credLinux.ts +82 -0
  18. package/core/auth/dasktop/cred/credWin.ts +2 -0
  19. package/core/config/defaults/boxsafeDefaults.ts +67 -0
  20. package/core/config/defaults/index.ts +1 -0
  21. package/core/config/loadConfig.ts +133 -0
  22. package/core/loop/about.md +13 -0
  23. package/core/loop/boxConfig.ts +20 -0
  24. package/core/loop/buildExecCommand.ts +76 -0
  25. package/core/loop/cmd/execode.ts +121 -0
  26. package/core/loop/cmd/test.js +3 -0
  27. package/core/loop/execLoop.ts +341 -0
  28. package/core/loop/git/VERSIONING.md +17 -0
  29. package/core/loop/git/commands.ts +11 -0
  30. package/core/loop/git/gitClient.ts +78 -0
  31. package/core/loop/git/index.ts +99 -0
  32. package/core/loop/git/runVersionControlRunner.ts +33 -0
  33. package/core/loop/initNavigator.ts +44 -0
  34. package/core/loop/initTasksManager.ts +35 -0
  35. package/core/loop/runValidation.ts +25 -0
  36. package/core/loop/tasks/AGENT-TASKS.md +36 -0
  37. package/core/loop/tasks/index.ts +96 -0
  38. package/core/loop/toolCalls.ts +168 -0
  39. package/core/loop/toolDispatcher.ts +146 -0
  40. package/core/loop/traceLogger.ts +106 -0
  41. package/core/loop/types.ts +26 -0
  42. package/core/loop/versionControlAdapter.ts +36 -0
  43. package/core/loop/waterfall.ts +404 -0
  44. package/core/loop/writeArtifactAtomically.ts +13 -0
  45. package/core/navigate/NAVIGATE.md +186 -0
  46. package/core/navigate/about.md +128 -0
  47. package/core/navigate/examples.ts +367 -0
  48. package/core/navigate/handler.ts +148 -0
  49. package/core/navigate/index.ts +32 -0
  50. package/core/navigate/navigate.test.ts +372 -0
  51. package/core/navigate/navigator.ts +437 -0
  52. package/core/navigate/types.ts +132 -0
  53. package/core/navigate/utils.ts +146 -0
  54. package/core/paths/paths.ts +33 -0
  55. package/core/ports/index.ts +271 -0
  56. package/core/segments/CONVENTIONS.md +30 -0
  57. package/core/segments/loop/index.ts +18 -0
  58. package/core/segments/map.ts +56 -0
  59. package/core/segments/navigate/index.ts +20 -0
  60. package/core/segments/versionControl/index.ts +18 -0
  61. package/core/util/logger.ts +128 -0
  62. package/docs/AGENT-TASKS.md +36 -0
  63. package/docs/ARQUITETURA_CORRECAO.md +121 -0
  64. package/docs/CONVENTIONS.md +30 -0
  65. package/docs/CRED.md +112 -0
  66. package/docs/L_RAG.md +567 -0
  67. package/docs/NAVIGATE.md +186 -0
  68. package/docs/PRIMARY_ACTORS.md +78 -0
  69. package/docs/SECONDARY_ACTORS.md +174 -0
  70. package/docs/VERSIONING.md +17 -0
  71. package/docs/boxsafe.config.md +472 -0
  72. package/eslint.config.mts +15 -0
  73. package/main.ts +53 -0
  74. package/memo/generated/codelog.md +13 -0
  75. package/memo/state/tasks/state.json +6 -0
  76. package/memo/state/tasks/tasks/task_001.md +2 -0
  77. package/memo/states-logs/logs.txt +7 -0
  78. package/memo/states-logs/trace-mljvrxvi-9g0k4q.jsonl +11 -0
  79. package/memo/states-logs/trace-mljvvc9j-pe9ekj.jsonl +11 -0
  80. package/memo/states-logs/trace-mljvvm1c-wbnqzp.jsonl +11 -0
  81. package/memo/states-logs/trace-mljxecwn-9xh3nw.jsonl +11 -0
  82. package/memo/states-logs/trace-mljxqkfm-ipijik.jsonl +11 -0
  83. package/memo/states-logs/trace-mljxwtrw-3fanky.jsonl +11 -0
  84. package/memo/states-logs/trace-mljxzen3-m8iinh.jsonl +11 -0
  85. package/memo/states-logs/trace-mljyucef-td6odn.jsonl +11 -0
  86. package/memo/states-logs/trace-mljyuprw-b1a6f4.jsonl +11 -0
  87. package/memo/states-logs/trace-mljyvefl-b6yoce.jsonl +11 -0
  88. package/memo/states-logs/trace-mljyxjo4-n7ibj2.jsonl +13 -0
  89. package/memo/states-logs/trace-mljziez5-8drqtn.jsonl +13 -0
  90. package/memo/states-logs/trace-mljziulp-dtd03z.jsonl +13 -0
  91. package/memo/states-logs/trace-mljzjwrq-1p2krb.jsonl +13 -0
  92. package/memo/states-logs/trace-mljzl0i7-b1cqa6.jsonl +13 -0
  93. package/memo/states-logs/trace-mljzmlk6-7kdyls.jsonl +13 -0
  94. package/memo/states-logs/trace-mlk0oj25-xa3dcu.jsonl +13 -0
  95. package/memo/states-logs/trace-mlk1x59q-713huj.jsonl +14 -0
  96. package/memo/states-logs/trace-mlk22dz8-7fd6hq.jsonl +14 -0
  97. package/memo/states-logs/trace-mlk241uy-wmx907.jsonl +14 -0
  98. package/memo/states-logs/trace-mlk2bf5r-yoh1vg.jsonl +15 -0
  99. package/package.json +44 -0
  100. package/pnpm-workspace.yaml +4 -0
  101. package/prompt_improvement_example.md +55 -0
  102. package/remove.txt +1 -0
  103. package/tests/adapters.test.ts +128 -0
  104. package/tests/extractCode.test.ts +26 -0
  105. package/tests/integration.test.ts +83 -0
  106. package/tests/loadConfig.test.ts +25 -0
  107. package/tests/navigatorBoundary.test.ts +17 -0
  108. package/tests/ports.test.ts +84 -0
  109. package/tests/runAllTests.ts +49 -0
  110. package/tests/toolCalls.test.ts +149 -0
  111. package/tests/waterfall.test.ts +52 -0
  112. package/tsconfig.json +32 -0
  113. package/tsup.config.ts +17 -0
  114. package/types.d.ts +96 -0
  115. package/util/ANSI.ts +29 -0
  116. package/util/extractCode.ts +217 -0
  117. package/util/extractToolCalls.ts +80 -0
  118. package/util/logger.ts +125 -0
@@ -0,0 +1,147 @@
1
+ /**
2
+ * @fileoverview
3
+ * Adapter for ISystemConfigurationPort
4
+ * Implementation based on existing loadConfig
5
+ *
6
+ * @module adapters/secondary/system/configuration
7
+ */
8
+
9
+ import fs from 'node:fs';
10
+ import path from 'node:path';
11
+ import type {
12
+ ISystemConfigurationPort,
13
+ ConfigurationResult,
14
+ ValidationResult,
15
+ BoxSafeConfig
16
+ } from '@core/ports';
17
+ import { DEFAULT_BOXSAFE_CONFIG } from '@core/config/defaults';
18
+
19
+ /**
20
+ * System configuration adapter using JSON file and environment variables
21
+ */
22
+ export class SystemConfigurationAdapter implements ISystemConfigurationPort {
23
+ private defaultConfigPath: string;
24
+
25
+ constructor(defaultConfigPath?: string) {
26
+ this.defaultConfigPath = defaultConfigPath ?? path.resolve(process.cwd(), 'boxsafe.config.json');
27
+ }
28
+
29
+ /**
30
+ * Load system configurations
31
+ */
32
+ async loadConfiguration(configPath?: string): Promise<ConfigurationResult> {
33
+ const p = configPath ?? this.defaultConfigPath;
34
+
35
+ let rawConfig: unknown = null;
36
+ try {
37
+ if (fs.existsSync(p)) {
38
+ rawConfig = JSON.parse(fs.readFileSync(p, 'utf-8'));
39
+ }
40
+ } catch {
41
+ rawConfig = null;
42
+ }
43
+
44
+ const merged = this.deepMerge(DEFAULT_BOXSAFE_CONFIG, rawConfig ?? {});
45
+
46
+ // Specific normalization for loops
47
+ const loopsFallback = typeof DEFAULT_BOXSAFE_CONFIG.limits?.loops === 'number'
48
+ ? DEFAULT_BOXSAFE_CONFIG.limits.loops
49
+ : 2;
50
+
51
+ if (!merged.limits) merged.limits = {} as any;
52
+ (merged.limits as any).loops = this.normalizeLoops((merged.limits as any).loops, loopsFallback);
53
+
54
+ return {
55
+ config: merged as BoxSafeConfig,
56
+ source: { path: p, loaded: Boolean(rawConfig) }
57
+ };
58
+ }
59
+
60
+ /**
61
+ * Validate system configurations
62
+ */
63
+ async validateConfiguration(config: BoxSafeConfig): Promise<ValidationResult> {
64
+ const errors: string[] = [];
65
+ const warnings: string[] = [];
66
+
67
+ // Mandatory validations
68
+ if (!config.project?.workspace) {
69
+ errors.push('project.workspace is required');
70
+ }
71
+
72
+ if (!config.model?.primary?.provider) {
73
+ errors.push('model.primary.provider is required');
74
+ }
75
+
76
+ if (!config.model?.primary?.name) {
77
+ errors.push('model.primary.name is required');
78
+ }
79
+
80
+ // Optional validations with warnings
81
+ if (!config.commands?.run) {
82
+ warnings.push('commands.run not defined, using default value');
83
+ }
84
+
85
+ if (!config.interface?.prompt) {
86
+ warnings.push('interface.prompt not defined, using default value');
87
+ }
88
+
89
+ // Limits validations
90
+ if (config.limits?.loops !== undefined) {
91
+ if (typeof config.limits.loops === 'number' && config.limits.loops <= 0) {
92
+ errors.push('limits.loops must be a positive number');
93
+ }
94
+ }
95
+
96
+ return {
97
+ valid: errors.length === 0,
98
+ errors,
99
+ warnings
100
+ };
101
+ }
102
+
103
+ /**
104
+ * Helper for deep merge of objects
105
+ */
106
+ private deepMerge<T>(base: T, override: unknown): T {
107
+ const isPlainObject = (v: unknown): v is Record<string, unknown> => {
108
+ return Boolean(v) && typeof v === 'object' && !Array.isArray(v);
109
+ };
110
+
111
+ if (!isPlainObject(base) || !isPlainObject(override)) {
112
+ return (override ?? base) as T;
113
+ }
114
+
115
+ const out: Record<string, unknown> = { ...(base as any) };
116
+ for (const [k, v] of Object.entries(override)) {
117
+ const bv = (base as any)[k];
118
+ if (isPlainObject(bv) && isPlainObject(v)) {
119
+ out[k] = this.deepMerge(bv, v);
120
+ } else {
121
+ out[k] = v;
122
+ }
123
+ }
124
+ return out as T;
125
+ }
126
+
127
+ /**
128
+ * Helper to normalize loops value
129
+ */
130
+ private normalizeLoops(v: unknown, fallback: number): number {
131
+ if (typeof v === 'number' && Number.isFinite(v)) return v;
132
+ if (typeof v === 'string') {
133
+ const trimmed = v.trim().toLowerCase();
134
+ if (trimmed === 'infinity') return Number.MAX_SAFE_INTEGER;
135
+ const n = Number(trimmed);
136
+ if (Number.isFinite(n)) return n;
137
+ }
138
+ return fallback;
139
+ }
140
+ }
141
+
142
+ /**
143
+ * Factory function para criar o adapter
144
+ */
145
+ export function createSystemConfigurationAdapter(configPath?: string): SystemConfigurationAdapter {
146
+ return new SystemConfigurationAdapter(configPath);
147
+ }
package/ai/caller.ts ADDED
@@ -0,0 +1,42 @@
1
+ /**
2
+ * @fileoverview Unified LLM runner. Executes a prompt and persists the result.
3
+ * @module pack
4
+ * The code needs to run in the root directory to work
5
+ * this ensures it's called in the main method at the project root.
6
+ */
7
+
8
+ import "dotenv/config";
9
+ import * as path from "path";
10
+ import fs from "fs/promises";
11
+ import { createLLM } from "@ai/provider";
12
+ import { LService, LModel } from "@ai/label";
13
+
14
+ interface RunnerConfig {
15
+ service: LService;
16
+ model: LModel;
17
+ outputPath: string;
18
+ }
19
+
20
+ const DEFAULT_CONFIG: RunnerConfig = {
21
+ service: LService.GOOGLE,
22
+ model: LModel.GEMINI,
23
+ outputPath: process.env.BOXSAFE_MARKDOWN_PATH?.trim() || "./memo/generated/codelog.md",
24
+ };
25
+
26
+ const writeOutput = async (filePath: string, data: string): Promise<void> => {
27
+ const resolved = path.isAbsolute(filePath) ? filePath : path.join(process.cwd(), filePath);
28
+
29
+ await fs.mkdir(path.dirname(resolved), { recursive: true });
30
+ await fs.writeFile(resolved, data, "utf8");
31
+ };
32
+
33
+ export const runLLM = async (
34
+ prompt: string,
35
+ llm: ReturnType<typeof createLLM>,
36
+ config: RunnerConfig = DEFAULT_CONFIG,
37
+ ): Promise<void> => {
38
+ const text = await llm.generate(prompt);
39
+ await writeOutput(config.outputPath, text);
40
+ };
41
+
42
+ // ── entry point ──────────────────────────────────────────────────────────────
package/ai/label.ts ADDED
@@ -0,0 +1,33 @@
1
+ /**
2
+ * @fileoverview Service and model identifiers with compatibility constraints.
3
+ * @module pack/ai
4
+ */
5
+
6
+ export enum LService {
7
+ MOCK = "mock",
8
+ OPENAI = "openai",
9
+ GOOGLE = "google",
10
+ }
11
+
12
+ export enum LModel {
13
+ MOCK = "mock",
14
+ GPT = "gpt-4o-mini",
15
+ GEMINI = "gemini-2.5-flash",
16
+ }
17
+
18
+ /* Valid model options per service */
19
+ export const SERVICE_MODELS: Record<LService, LModel[]> = {
20
+ [LService.MOCK]: [LModel.MOCK],
21
+ [LService.OPENAI]: [LModel.GPT],
22
+ [LService.GOOGLE]: [LModel.GEMINI],
23
+ };
24
+
25
+ export function validateServiceModel(service: LService, model: LModel): void {
26
+ const allowed = SERVICE_MODELS[service];
27
+ if (!allowed.includes(model)) {
28
+ throw new Error(
29
+ `Model "${model}" is not supported by service "${service}". ` +
30
+ `Allowed: ${allowed.join(", ")}`
31
+ );
32
+ }
33
+ }
@@ -0,0 +1,236 @@
1
+ /**
2
+ * Model Configuration - BoxSafe
3
+ *
4
+ * Centralized system for managing model profiles
5
+ * with capabilities, costs, and prompt strategies.
6
+ */
7
+
8
+ export enum ModelCapability {
9
+ LOW = 'low', // < 8k tokens (Llama 7B, local models)
10
+ MEDIUM = 'medium', // 8k-32k tokens (GPT-3.5, Claude Haiku)
11
+ HIGH = 'high', // 32k-128k tokens (GPT-4, Claude Sonnet)
12
+ EXCELLENT = 'excellent' // >128k tokens (GPT-4 Turbo, Claude Opus)
13
+ }
14
+
15
+ export interface ModelProfile {
16
+ name: string;
17
+ capability: ModelCapability;
18
+ maxTokens: number;
19
+ reminderFrequency: number; // interactions before reminder
20
+ contextThreshold: number; // % context usage for reminder trigger
21
+ costPerToken?: number; // for cost optimization
22
+ provider: 'local' | 'openai' | 'anthropic' | 'google';
23
+ description?: string;
24
+ }
25
+
26
+ /**
27
+ * Known model profiles catalog
28
+ * Easily extensible for new models
29
+ */
30
+ export const MODEL_PROFILES: Record<string, ModelProfile> = {
31
+ // === LOCAL MODELS ===
32
+ 'llama-7b': {
33
+ name: 'Llama 7B',
34
+ capability: ModelCapability.LOW,
35
+ maxTokens: 8192,
36
+ reminderFrequency: 3, // Remind every 3 interactions
37
+ contextThreshold: 60, // Remind when using 60% of context
38
+ provider: 'local',
39
+ description: 'Efficient local model for simple tasks'
40
+ },
41
+ 'llama-13b': {
42
+ name: 'Llama 13B',
43
+ capability: ModelCapability.LOW,
44
+ maxTokens: 8192,
45
+ reminderFrequency: 2, // More frequent due to higher capability
46
+ contextThreshold: 50,
47
+ provider: 'local',
48
+ description: 'Local model with more capability than Llama 7B'
49
+ },
50
+ 'mistral-7b': {
51
+ name: 'Mistral 7B',
52
+ capability: ModelCapability.MEDIUM,
53
+ maxTokens: 16384,
54
+ reminderFrequency: 5,
55
+ contextThreshold: 70,
56
+ provider: 'local',
57
+ description: 'Local model with good cost-benefit ratio'
58
+ },
59
+ 'codellama-34b': {
60
+ name: 'CodeLlama 34B',
61
+ capability: ModelCapability.MEDIUM,
62
+ maxTokens: 16384,
63
+ reminderFrequency: 4,
64
+ contextThreshold: 65,
65
+ provider: 'local',
66
+ description: 'Local model specialized in code'
67
+ },
68
+
69
+ // === OPENAI ===
70
+ 'gpt-3.5-turbo': {
71
+ name: 'GPT-3.5 Turbo',
72
+ capability: ModelCapability.MEDIUM,
73
+ maxTokens: 16384,
74
+ reminderFrequency: 8,
75
+ contextThreshold: 75,
76
+ costPerToken: 0.0000015,
77
+ provider: 'openai',
78
+ description: 'Fast and economical model for production'
79
+ },
80
+ 'gpt-4': {
81
+ name: 'GPT-4',
82
+ capability: ModelCapability.HIGH,
83
+ maxTokens: 32768,
84
+ reminderFrequency: 15,
85
+ contextThreshold: 85,
86
+ costPerToken: 0.00003,
87
+ provider: 'openai',
88
+ description: 'High reasoning capability'
89
+ },
90
+ 'gpt-4-turbo': {
91
+ name: 'GPT-4 Turbo',
92
+ capability: ModelCapability.EXCELLENT,
93
+ maxTokens: 128000,
94
+ reminderFrequency: 25,
95
+ contextThreshold: 90,
96
+ costPerToken: 0.00001,
97
+ provider: 'openai',
98
+ description: 'Maximum performance with optimized cost'
99
+ },
100
+ 'gpt-4o': {
101
+ name: 'GPT-4o',
102
+ capability: ModelCapability.EXCELLENT,
103
+ maxTokens: 128000,
104
+ reminderFrequency: 30,
105
+ contextThreshold: 90,
106
+ costPerToken: 0.000005,
107
+ provider: 'openai',
108
+ description: 'Latest model with excellent cost-benefit'
109
+ },
110
+
111
+ // === ANTHROPIC ===
112
+ 'claude-3-haiku': {
113
+ name: 'Claude 3 Haiku',
114
+ capability: ModelCapability.MEDIUM,
115
+ maxTokens: 200000,
116
+ reminderFrequency: 10,
117
+ contextThreshold: 80,
118
+ costPerToken: 0.00000025,
119
+ provider: 'anthropic',
120
+ description: 'Fast and economical for simple tasks'
121
+ },
122
+ 'claude-3-sonnet': {
123
+ name: 'Claude 3 Sonnet',
124
+ capability: ModelCapability.HIGH,
125
+ maxTokens: 200000,
126
+ reminderFrequency: 20,
127
+ contextThreshold: 85,
128
+ costPerToken: 0.000003,
129
+ provider: 'anthropic',
130
+ description: 'Excellent for complex code'
131
+ },
132
+ 'claude-3-opus': {
133
+ name: 'Claude 3 Opus',
134
+ capability: ModelCapability.EXCELLENT,
135
+ maxTokens: 200000,
136
+ reminderFrequency: 25,
137
+ contextThreshold: 90,
138
+ costPerToken: 0.000015,
139
+ provider: 'anthropic',
140
+ description: 'Maximum capability for critical tasks'
141
+ },
142
+
143
+ // === GOOGLE ===
144
+ 'gemini-1.5-flash': {
145
+ name: 'Gemini 1.5 Flash',
146
+ capability: ModelCapability.MEDIUM,
147
+ maxTokens: 1048576,
148
+ reminderFrequency: 7,
149
+ contextThreshold: 75,
150
+ costPerToken: 0.000000075,
151
+ provider: 'google',
152
+ description: 'Fast and economical from Google'
153
+ },
154
+ 'gemini-1.5-pro': {
155
+ name: 'Gemini 1.5 Pro',
156
+ capability: ModelCapability.HIGH,
157
+ maxTokens: 2097152,
158
+ reminderFrequency: 18,
159
+ contextThreshold: 85,
160
+ costPerToken: 0.00000125,
161
+ provider: 'google',
162
+ description: 'High capability from Gemini Pro'
163
+ },
164
+ };
165
+
166
+ /**
167
+ * Utilities for model management
168
+ */
169
+ export class ModelConfigManager {
170
+
171
+ /**
172
+ * Gets model profile with safe fallback
173
+ */
174
+ static getModelProfile(modelName: string): ModelProfile {
175
+ const profile = MODEL_PROFILES[modelName];
176
+ if (!profile) {
177
+ console.warn(`Model ${modelName} not found, using default GPT-3.5 Turbo`);
178
+ return MODEL_PROFILES['gpt-3.5-turbo'] as ModelProfile;
179
+ }
180
+ return profile;
181
+ }
182
+
183
+ /**
184
+ * Lists all models by capability
185
+ */
186
+ static getModelsByCapability(capability: ModelCapability): ModelProfile[] {
187
+ return Object.values(MODEL_PROFILES).filter(model => model.capability === capability);
188
+ }
189
+
190
+ /**
191
+ * Lists all models by provider
192
+ */
193
+ static getModelsByProvider(provider: string): ModelProfile[] {
194
+ return Object.values(MODEL_PROFILES).filter(model => model.provider === provider);
195
+ }
196
+
197
+ /**
198
+ * Calculates estimated cost for an interaction
199
+ */
200
+ static estimateCost(modelName: string, tokenCount: number): number {
201
+ const profile = this.getModelProfile(modelName);
202
+ return profile.costPerToken ? tokenCount * profile.costPerToken : 0;
203
+ }
204
+
205
+ /**
206
+ * Checks if model is local vs API
207
+ */
208
+ static isLocalModel(modelName: string): boolean {
209
+ const profile = this.getModelProfile(modelName);
210
+ return profile.provider === 'local';
211
+ }
212
+
213
+ /**
214
+ * Gets usage recommendations based on profile
215
+ */
216
+ static getRecommendations(modelName: string): {
217
+ maxPromptLength: number;
218
+ reminderFrequency: string;
219
+ costOptimization: string;
220
+ } {
221
+ const profile = this.getModelProfile(modelName);
222
+
223
+ return {
224
+ maxPromptLength: Math.floor(profile.maxTokens * 0.7), // Leave 30% for response
225
+ reminderFrequency: `Every ${profile.reminderFrequency} interactions`,
226
+ costOptimization: profile.costPerToken
227
+ ? `Cost: ${(profile.costPerToken * 1000).toFixed(6)} per 1k tokens`
228
+ : 'Local model - no costs'
229
+ };
230
+ }
231
+ }
232
+
233
+ /**
234
+ * Default configuration for new models
235
+ */
236
+ export const DEFAULT_MODEL_CONFIG: ModelProfile = MODEL_PROFILES['gpt-3.5-turbo']!;
package/ai/provider.ts ADDED
@@ -0,0 +1,111 @@
1
+ /**
2
+ * @fileoverview LLM provider factory with pluggable adapters.
3
+ * @module pack/ai
4
+ */
5
+
6
+ import { generateText } from "ai";
7
+ import { openai } from "@ai-sdk/openai";
8
+ import { google } from "@ai-sdk/google";
9
+ import { LService, LModel, validateServiceModel } from "@ai/label";
10
+ import { Logger } from '@util/logger';
11
+
12
+ const logger = Logger.createModuleLogger('AI');
13
+
14
+ // Support KEYGemini environment variable as the Google/Gemini API key.
15
+ // Some environments expose the Gemini key under a custom name (e.g., KEYGemini).
16
+ // Map it to the environment variables the Google SDKs commonly check:
17
+ // - `GOOGLE_GENERATIVE_AI_API_KEY` (used by Google Generative AI / Gemini)
18
+ // - `GOOGLE_API_KEY` (older/alternate name)
19
+ const _geminiKey = process.env.KEYGemini ?? process.env.KEY_GEMINI ?? process.env.GOOGLE_GENERATIVE_AI_API_KEY;
20
+ if (_geminiKey) {
21
+ if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
22
+ process.env.GOOGLE_GENERATIVE_AI_API_KEY = _geminiKey;
23
+ }
24
+ if (!process.env.GOOGLE_API_KEY) {
25
+ process.env.GOOGLE_API_KEY = _geminiKey;
26
+ }
27
+ }
28
+
29
+ interface LLMContract {
30
+ generate(prompt: string): Promise<string>;
31
+ }
32
+
33
+ type SDKModelFn<M extends LModel> =
34
+ (model: M) => Parameters<typeof generateText>[0]["model"];
35
+
36
+ /** SDK model initializers keyed by service */
37
+ const SDK_PROVIDERS = {
38
+ [LService.OPENAI]: (model: LModel.GPT) => openai(model),
39
+ [LService.GOOGLE]: (model: LModel.GEMINI) => google(model),
40
+ } satisfies {
41
+ [LService.OPENAI]: SDKModelFn<LModel.GPT>;
42
+ [LService.GOOGLE]: SDKModelFn<LModel.GEMINI>;
43
+ };
44
+
45
+ function createSDKAdapter(
46
+ model: Parameters<typeof generateText>[0]["model"]
47
+ ): LLMContract {
48
+ return {
49
+ async generate(prompt: string): Promise<string> {
50
+ const { text } = await generateText({ model, prompt });
51
+ return text;
52
+ },
53
+ };
54
+ }
55
+
56
+ const MOCK_ADAPTER: LLMContract = {
57
+ async generate(): Promise<string> {
58
+ // Return a minimal bash code block so extractCode can find runnable code
59
+ // This keeps mock behavior useful for testing the loop pipeline.
60
+ return "```bash\necho \"Mock response\"\n```";
61
+ },
62
+ };
63
+
64
+ const SERVICE_ADAPTERS = {
65
+ [LService.OPENAI]: (model: LModel.GPT) =>
66
+ createSDKAdapter(SDK_PROVIDERS[LService.OPENAI](model)),
67
+
68
+ [LService.GOOGLE]: (model: LModel.GEMINI) =>
69
+ createSDKAdapter(SDK_PROVIDERS[LService.GOOGLE](model)),
70
+ };
71
+
72
+ // ____________________________________________________________________________
73
+ // ── Factory Function ────────────────────────────────────────────────────────
74
+ export function createLLM(
75
+ service: LService.MOCK,
76
+ model: LModel.MOCK
77
+ ): LLMContract;
78
+
79
+ export function createLLM(
80
+ service: LService.OPENAI,
81
+ model: LModel.GPT
82
+ ): LLMContract;
83
+
84
+ export function createLLM(
85
+ service: LService.GOOGLE,
86
+ model: LModel.GEMINI
87
+ ): LLMContract;
88
+
89
+ export function createLLM(
90
+ service: LService,
91
+ model: LModel
92
+ ): LLMContract;
93
+
94
+ export function createLLM(service: LService, model: LModel): LLMContract {
95
+ // Log chosen service/model and presence of Gemini key for diagnostics
96
+ logger.info(`LLM Provider - requested service=${service} model=${model}`);
97
+ logger.info(`LLM Provider - GOOGLE_GENERATIVE_AI_API_KEY present=${Boolean(process.env.GOOGLE_GENERATIVE_AI_API_KEY)}`);
98
+
99
+ validateServiceModel(service, model);
100
+
101
+ if (service === LService.MOCK) return MOCK_ADAPTER;
102
+
103
+ const create = SERVICE_ADAPTERS[service];
104
+ if (!create) {
105
+ const errorMsg = `No SDK provider registered for service "${service}"`;
106
+ logger.error(errorMsg);
107
+ throw new Error(errorMsg);
108
+ }
109
+
110
+ return create(model as never);
111
+ }
@@ -0,0 +1,68 @@
1
+ {
2
+ "project": {
3
+ "workspace": "./",
4
+ "testDir": "./",
5
+ "todo":"./TODO.md",
6
+ "versionControl": {
7
+ "before": false,
8
+ "after": false,
9
+ "generateNotes": true
10
+ }
11
+ },
12
+
13
+ "model": {
14
+ "primary": {
15
+ "provider": "google",
16
+ "name": "gemini-2.5-flash"
17
+ },
18
+ "fallback": [],
19
+ "endpoint": null,
20
+ "parameters": {}
21
+ },
22
+
23
+ "smartRotation": {
24
+ "enabled": false,
25
+ "simple": [],
26
+ "complex": []
27
+ },
28
+
29
+ "limits": {
30
+ "tokens": 100000,
31
+ "loops": 1,
32
+ "timeout": {
33
+ "enabled": false,
34
+ "duration": "1h",
35
+ "notify": true
36
+ }
37
+ },
38
+
39
+ "sandbox": {
40
+ "enabled": true,
41
+ "engine": "docker",
42
+ "memory": "512m",
43
+ "cpu": 0.5,
44
+ "network": "none"
45
+ },
46
+
47
+ "commands": {
48
+ "setup": "npm install",
49
+ "run": "echo OK",
50
+ "test": null
51
+ },
52
+
53
+ "interface": {
54
+ "channel": "terminal",
55
+ "prompt": "crie um arquivo chamado lv_test.ts, este arquivo deve imprimir hello world no terminal ",
56
+ "notifications": {
57
+ "whatsapp": false,
58
+ "telegram": false,
59
+ "slack": false,
60
+ "email": false
61
+ }
62
+ },
63
+
64
+ "teach": {
65
+ "urls": [],
66
+ "files": []
67
+ }
68
+ }