arki 0.0.8 → 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -68,6 +68,7 @@ arki -p /path/to/project --debug
68
68
  - `/exit` or `/quit` - Exit program
69
69
  - `/clear` - Clear conversation history
70
70
  - `/debug` - Toggle debug mode
71
+ - `/lowcost` - Toggle lowcost mode (e.g., OpenAI flex)
71
72
  - `/help` - Show help
72
73
 
73
74
  ### Debug Mode
@@ -97,9 +98,13 @@ On first run, Arki copies the default configuration template to this location.
97
98
  Each project can have its own configuration in `.arki/` directory:
98
99
 
99
100
  - `.arki/config.json` - Project-specific settings (overrides global config)
100
- - `.arki/state.json` - Project state and cache
101
+ - `.arki/state.json` - Project state (`initialized` flag)
102
+ - `.arki/project_structure.md` - Auto-generated file tree structure
103
+ - `.arki/architecture.xml` - Auto-generated modular architecture graph
101
104
 
102
105
  On first run in a new project, Arki will ask if you trust the project before initializing the `.arki/` directory.
106
+ When you trust a project, the Init agent automatically analyzes the codebase and generates documentation files.
107
+ To regenerate the architecture files, set `initialized` to `false` in `state.json`.
103
108
  Use `--init` to skip the prompt in non-interactive environments.
104
109
 
105
110
  ### Reset to Factory Defaults
@@ -1,4 +1,3 @@
1
1
  {
2
- "initialized": true,
3
- "createdAt": ""
2
+ "initialized": false
4
3
  }
@@ -2,13 +2,18 @@
2
2
  "agents": {
3
3
  "Arki": {
4
4
  "model": "gpt-5.1",
5
- "flex": true,
5
+ "lowcost": false,
6
6
  "reasoningEffort": "medium"
7
7
  },
8
8
  "Coder": {
9
9
  "model": "gpt-5.2",
10
- "flex": true,
10
+ "lowcost": false,
11
11
  "reasoningEffort": "high"
12
+ },
13
+ "Init": {
14
+ "model": "gpt-5.1",
15
+ "lowcost": false,
16
+ "reasoningEffort": "medium"
12
17
  }
13
18
  }
14
19
  }
package/dist/index.d.ts CHANGED
@@ -95,6 +95,13 @@ declare class AsyncToolResultMsg extends Msg {
95
95
  * Symbol indicating tool has detailed manual (needs to call read_tool_manual before use)
96
96
  */
97
97
  declare const HAS_MANUAL = "\uD83D\uDCD8";
98
+ /**
99
+ * Context passed to tool execution
100
+ */
101
+ interface ToolContext {
102
+ /** ID of the agent calling the tool */
103
+ agentId: string;
104
+ }
98
105
  /**
99
106
  * Tool class
100
107
  */
@@ -112,7 +119,7 @@ declare class Tool {
112
119
  parameters: Record<string, unknown>;
113
120
  required: string[];
114
121
  manualContent: string;
115
- execute: (args: Record<string, unknown>) => Promise<string | {
122
+ execute: (args: Record<string, unknown>, context: ToolContext) => Promise<string | {
116
123
  content: string;
117
124
  isError?: boolean;
118
125
  }>;
@@ -132,7 +139,7 @@ declare class Tool {
132
139
  /**
133
140
  * Execute tool (with error handling and logging)
134
141
  */
135
- run(args: Record<string, unknown>): Promise<ToolResult>;
142
+ run(args: Record<string, unknown>, context: ToolContext): Promise<ToolResult>;
136
143
  }
137
144
 
138
145
  /**
@@ -157,35 +164,10 @@ declare class Procedure {
157
164
  };
158
165
  }
159
166
 
160
- /** OS type definition */
161
- interface OS_TYPE {
162
- /** Operating system name: 'windows' | 'mac' | 'linux' | 'other' */
163
- name: 'windows' | 'mac' | 'linux' | 'other';
164
- /** Operating system version */
165
- version: string;
166
- }
167
- /** Global OS information */
168
- declare const OS: OS_TYPE;
169
- /** Working directory */
170
- declare let workingDir: string;
171
- /** Set working directory (for testing) */
172
- declare function setWorkingDir(dir: string): void;
173
- /** Global paths configuration */
174
- declare const PATHS: {
175
- /** Global config directory (~/.config/arki or %APPDATA%\arki) */
176
- globalConfig: string;
177
- /** Project config directory (.arki/) - returns path based on current workingDir */
178
- readonly projectConfig: string;
179
- /** Package's global config template directory */
180
- globalTemplate: string;
181
- /** Package's project config template directory */
182
- projectTemplate: string;
183
- };
184
-
185
167
  /**
186
168
  * Reasoning effort
187
169
  */
188
- type ReasoningEffort$1 = 'low' | 'medium' | 'high';
170
+ type ReasoningEffort = 'low' | 'medium' | 'high';
189
171
  /**
190
172
  * Platform-specific options for adapter
191
173
  */
@@ -213,6 +195,109 @@ declare abstract class Adapter {
213
195
  protected apiKey: string;
214
196
  constructor(apiKey: string);
215
197
  abstract chat(model: string, messages: Msg[], tools: Tool[], options: AdapterOptions, onChunk?: (chunk: string) => void): Promise<AdapterResponse>;
198
+ /**
199
+ * Count tokens for a complete request (messages + tools)
200
+ * @param model Model identifier
201
+ * @param messages Array of messages
202
+ * @param tools Array of tools
203
+ * @returns Estimated token count
204
+ */
205
+ abstract countTokens(model: string, messages: Msg[], tools: Tool[]): number;
206
+ /**
207
+ * Count tokens for plain text
208
+ * @param model Model identifier
209
+ * @param text Plain text to count tokens for
210
+ * @returns Token count
211
+ */
212
+ abstract countTextTokens(model: string, text: string): number;
213
+ }
214
+
215
+ interface AgentResponse {
216
+ response: string;
217
+ toolCalls: Array<{
218
+ name: string;
219
+ arguments: Record<string, unknown>;
220
+ result: string;
221
+ }>;
222
+ usage?: {
223
+ promptTokens: number;
224
+ completionTokens: number;
225
+ totalTokens: number;
226
+ cachedTokens?: number;
227
+ };
228
+ }
229
+ interface AgentConfig {
230
+ /** Agent name for display */
231
+ name: string;
232
+ adapter: Adapter;
233
+ model: string;
234
+ tools: Tool[];
235
+ platformOptions?: AdapterOptions;
236
+ messages: Msg[];
237
+ /** Maximum completion tokens for LLM response */
238
+ maxCompletionTokens?: number;
239
+ /** Context compression threshold (0.0-1.0), triggers compression when exceeded. Default: 0.9 */
240
+ contextCompressionThreshold?: number;
241
+ }
242
+ declare class Agent {
243
+ private config;
244
+ private messages;
245
+ private toolsMap;
246
+ /** Pending async tools: asyncCallId -> info */
247
+ private pendingAsyncTools;
248
+ constructor(config: AgentConfig);
249
+ /** Get agent name */
250
+ get name(): string;
251
+ /**
252
+ * Render template string, replacing {{variable}} style variables
253
+ */
254
+ static renderTemplate(template: string, variables: Record<string, string | number | boolean>): string;
255
+ /**
256
+ * Check and add placeholder results for pending async tools
257
+ * Called before appending new user message
258
+ */
259
+ private insertAsyncPlaceholders;
260
+ /**
261
+ * Handle async tool completion
262
+ */
263
+ private handleAsyncToolComplete;
264
+ run(userInput: string): Promise<AgentResponse>;
265
+ reset(): this;
266
+ /**
267
+ * Set a platform option at runtime
268
+ */
269
+ setPlatformOption(key: string, value: unknown): this;
270
+ /**
271
+ * Get a platform option value
272
+ */
273
+ getPlatformOption(key: string): unknown;
274
+ /**
275
+ * Get model's context window size
276
+ */
277
+ getContextWindow(): number;
278
+ /**
279
+ * Get compression threshold
280
+ */
281
+ getCompressionThreshold(): number;
282
+ /**
283
+ * Count current context tokens (messages + tools)
284
+ */
285
+ getContextTokens(): number;
286
+ /**
287
+ * Compress tool results by replacing long outputs with placeholders
288
+ * Returns the number of tool results compressed
289
+ */
290
+ compressToolResults(): number;
291
+ /**
292
+ * Summarize the conversation using LLM
293
+ * Keeps system messages and replaces conversation with summary
294
+ */
295
+ summarizeConversation(): Promise<void>;
296
+ /**
297
+ * Check context size and manage if threshold exceeded
298
+ * Called before each LLM call
299
+ */
300
+ checkAndManageContext(): Promise<void>;
216
301
  }
217
302
 
218
303
  /** Global tool registry */
@@ -220,30 +305,104 @@ declare const TOOLS: Record<string, Tool>;
220
305
  /** Global procedure registry */
221
306
  declare const PROCEDURES: Record<string, Procedure>;
222
307
  /** Global adapter registry by platform */
223
- declare const adapters: Record<string, Adapter>;
308
+ declare const ADAPTERS: Record<string, Adapter>;
309
+ /** Global agent registry by name */
310
+ declare const AGENTS: Record<string, Agent>;
311
+
312
+ /**
313
+ * FileSystem class provides unified file and directory operations
314
+ */
315
+ declare class FileSystem {
316
+ /**
317
+ * Check if file exists (returns false for directories)
318
+ */
319
+ fileExists(filePath: string): Promise<boolean>;
320
+ /**
321
+ * Read file content as string
322
+ * Returns null if file doesn't exist
323
+ */
324
+ readFile(filePath: string): Promise<string | null>;
325
+ /**
326
+ * Write string content to file
327
+ */
328
+ writeFile(filePath: string, content: string): Promise<void>;
329
+ /**
330
+ * Read JSON file safely
331
+ * Returns null if file doesn't exist or is invalid JSON
332
+ */
333
+ readJsonFile<T>(filePath: string): Promise<T | null>;
334
+ /**
335
+ * Write JSON file with pretty formatting
336
+ */
337
+ writeJsonFile(filePath: string, data: unknown): Promise<void>;
338
+ /**
339
+ * Check if directory exists (returns false for files)
340
+ */
341
+ dirExists(dirPath: string): Promise<boolean>;
342
+ /**
343
+ * Create directory recursively (no error if exists)
344
+ */
345
+ mkdir(dirPath: string): Promise<void>;
346
+ /**
347
+ * Copy directory recursively (creates dest if not exists)
348
+ */
349
+ copyDir(src: string, dest: string): Promise<void>;
350
+ }
351
+ /** Global FileSystem instance for all file/directory operations */
352
+ declare const fileSystem: FileSystem;
353
+
354
+ /** OS type definition */
355
+ interface OS_TYPE {
356
+ /** Operating system name: 'windows' | 'mac' | 'linux' | 'other' */
357
+ name: 'windows' | 'mac' | 'linux' | 'other';
358
+ /** Operating system version */
359
+ version: string;
360
+ }
361
+ /** Global OS information */
362
+ declare const OS: OS_TYPE;
363
+ /** Working directory */
364
+ declare let workingDir: string;
365
+ /** Set working directory (for testing) */
366
+ declare function setWorkingDir(dir: string): void;
367
+ /** Global paths configuration */
368
+ declare const PATHS: {
369
+ /** Global config directory (~/.config/arki or %APPDATA%\arki) */
370
+ globalConfig: string;
371
+ /** Project config directory (.arki/) - returns path based on current workingDir */
372
+ readonly projectConfig: string;
373
+ /** Package's global config template directory */
374
+ globalTemplate: string;
375
+ /** Package's project config template directory */
376
+ projectTemplate: string;
377
+ };
378
+ /**
379
+ * Path variable definitions
380
+ * Format: $VAR or ${VAR}
381
+ */
382
+ declare const PATH_VARS: Record<string, () => string>;
224
383
  /**
225
- * Get adapter by platform name
384
+ * Expand path variables in a string
385
+ * Supports: $VAR and ${VAR} formats
386
+ * @param inputPath Path string that may contain variables
387
+ * @returns Path with variables expanded
226
388
  */
227
- declare function getAdapter(platform: string): Adapter;
389
+ declare function expandPathVars(inputPath: string): string;
390
+
228
391
  /** Initialize global state */
229
392
  declare function init(cwd?: string, forceInit?: boolean): Promise<void>;
230
393
 
231
394
  /**
232
395
  * Agent type
233
396
  */
234
- type AgentType = 'Arki' | 'Coder';
235
- /**
236
- * Reasoning effort
237
- */
238
- type ReasoningEffort = 'low' | 'medium' | 'high';
397
+ type AgentType = 'Arki' | 'Coder' | 'Init';
239
398
  /**
240
399
  * Agent model configuration
241
400
  */
242
401
  interface AgentModelConfig {
243
402
  /** Model ID (provider is derived from MODELS) */
244
403
  model: string;
245
- /** Use Flex API (low priority, low cost) - OpenAI specific */
246
- flex?: boolean;
404
+ /** Low cost mode - adapter specific (e.g., OpenAI flex mode) */
405
+ lowcost?: boolean;
247
406
  /** Reasoning effort (thinking mode) */
248
407
  reasoningEffort?: ReasoningEffort;
249
408
  }
@@ -255,6 +414,13 @@ interface GlobalConfig {
255
414
  [K in AgentType]?: AgentModelConfig;
256
415
  };
257
416
  }
417
+ /**
418
+ * Load and merge configurations
419
+ * - Loads global config from ~/.config/arki/config.json
420
+ * - Loads project config from .arki/config.json (if exists)
421
+ * - Merges them (project config overrides global)
422
+ */
423
+ declare function loadConfigs(): Promise<GlobalConfig>;
258
424
  /**
259
425
  * Get loaded configuration
260
426
  */
@@ -377,7 +543,7 @@ interface OpenAIOptions extends AdapterOptions {
377
543
  /** Use Flex API - low priority, low cost */
378
544
  flex?: boolean;
379
545
  /** Reasoning effort (thinking mode) */
380
- reasoningEffort?: ReasoningEffort$1;
546
+ reasoningEffort?: ReasoningEffort;
381
547
  /** Maximum completion tokens for LLM response */
382
548
  maxCompletionTokens?: number;
383
549
  }
@@ -387,59 +553,33 @@ declare class OpenAIAdapter extends Adapter {
387
553
  private toOpenAIMessages;
388
554
  private formatTools;
389
555
  chat(model: string, messages: Msg[], tools: Tool[], options: OpenAIOptions, onChunk?: (chunk: string) => void): Promise<AdapterResponse>;
390
- }
391
-
392
- interface AgentResponse {
393
- response: string;
394
- toolCalls: Array<{
395
- name: string;
396
- arguments: Record<string, unknown>;
397
- result: string;
398
- }>;
399
- usage?: {
400
- promptTokens: number;
401
- completionTokens: number;
402
- totalTokens: number;
403
- cachedTokens?: number;
404
- };
405
- }
406
- interface AgentConfig {
407
- /** Agent name for display */
408
- name: string;
409
- adapter: Adapter;
410
- model: string;
411
- tools: Tool[];
412
- platformOptions?: AdapterOptions;
413
- messages: Msg[];
414
- /** Maximum completion tokens for LLM response */
415
- maxCompletionTokens?: number;
416
- }
417
- declare class Agent {
418
- private config;
419
- private messages;
420
- private toolsMap;
421
- /** Pending async tools: asyncCallId -> info */
422
- private pendingAsyncTools;
423
- constructor(config: AgentConfig);
424
- /** Get agent name */
425
- get name(): string;
426
- /**
427
- * Render template string, replacing {{variable}} style variables
428
- */
429
- static renderTemplate(template: string, variables: Record<string, string | number | boolean>): string;
430
556
  /**
431
- * Check and add placeholder results for pending async tools
432
- * Called before appending new user message
557
+ * Count tokens for a complete request (messages + tools)
558
+ * Uses openai-chat-tokens for accurate estimation
433
559
  */
434
- private insertAsyncPlaceholders;
560
+ countTokens(_model: string, messages: Msg[], tools: Tool[]): number;
435
561
  /**
436
- * Handle async tool completion
562
+ * Count tokens for plain text
563
+ * Uses tiktoken internally via openai-chat-tokens
437
564
  */
438
- private handleAsyncToolComplete;
439
- run(userInput: string): Promise<AgentResponse>;
440
- reset(): this;
565
+ countTextTokens(_model: string, text: string): number;
441
566
  }
442
567
 
568
+ /**
569
+ * Format messages array into text for summarization
570
+ * @param messages Array of messages to format
571
+ * @returns Formatted conversation text
572
+ */
573
+ declare function formatMessagesForSummary(messages: Msg[]): string;
574
+ /**
575
+ * Summarize messages using LLM
576
+ * @param adapter The adapter to use for LLM call
577
+ * @param model The model to use
578
+ * @param messages Messages to summarize (excluding system messages)
579
+ * @returns Summary string
580
+ */
581
+ declare function summarizeMessages(adapter: Adapter, model: string, messages: Msg[]): Promise<string>;
582
+
443
583
  /**
444
584
  * All available model configurations (keyed by ID)
445
585
  */
@@ -466,4 +606,4 @@ interface Model {
466
606
  readonly capabilities: ModelCapabilities;
467
607
  }
468
608
 
469
- export { AIMsg, Adapter, type AdapterOptions, type AdapterResponse, Agent, type AgentModelConfig, type AgentResponse, type AgentType, AsyncToolResultMsg, type ColorName, type GlobalConfig, HAS_MANUAL, MODELS, type Model, type ModelCapabilities, type ModelProvider, Msg, MsgType, OS, type OS_TYPE, OpenAIAdapter, type OpenAIOptions, PATHS, PROCEDURES, type ReasoningEffort$1 as ReasoningEffort, SystemMsg, TOOLS, Tool, type ToolCall, ToolCallMsg, type ToolResult, ToolResultMsg, UserMsg, adapters, colors, convertColorTags, createColorConverter, debug, error, formatNumber, getAdapter, getAgentConfig, getApiKey, getConfig, getTimestamp, info, init, isDebugMode, log, print, saveConfig, setDebugMode, setWorkingDir, success, warn, workingDir };
609
+ export { ADAPTERS, AGENTS, AIMsg, Adapter, type AdapterOptions, type AdapterResponse, Agent, type AgentConfig, type AgentModelConfig, type AgentResponse, type AgentType, AsyncToolResultMsg, type ColorName, type GlobalConfig, HAS_MANUAL, MODELS, type Model, type ModelCapabilities, type ModelProvider, Msg, MsgType, OS, type OS_TYPE, OpenAIAdapter, type OpenAIOptions, PATHS, PATH_VARS, PROCEDURES, type ReasoningEffort, SystemMsg, TOOLS, Tool, type ToolCall, ToolCallMsg, type ToolContext, type ToolResult, ToolResultMsg, UserMsg, colors, convertColorTags, createColorConverter, debug, error, expandPathVars, fileSystem, formatMessagesForSummary, formatNumber, getAgentConfig, getApiKey, getConfig, getTimestamp, info, init, isDebugMode, loadConfigs, log, print, saveConfig, setDebugMode, setWorkingDir, success, summarizeMessages, warn, workingDir };