archbyte 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. package/README.md +282 -0
  2. package/bin/archbyte.js +213 -0
  3. package/dist/agents/core/component-detector.d.ts +2 -0
  4. package/dist/agents/core/component-detector.js +57 -0
  5. package/dist/agents/core/connection-mapper.d.ts +2 -0
  6. package/dist/agents/core/connection-mapper.js +77 -0
  7. package/dist/agents/core/doc-parser.d.ts +2 -0
  8. package/dist/agents/core/doc-parser.js +64 -0
  9. package/dist/agents/core/env-detector.d.ts +2 -0
  10. package/dist/agents/core/env-detector.js +51 -0
  11. package/dist/agents/core/event-detector.d.ts +2 -0
  12. package/dist/agents/core/event-detector.js +59 -0
  13. package/dist/agents/core/infra-analyzer.d.ts +2 -0
  14. package/dist/agents/core/infra-analyzer.js +72 -0
  15. package/dist/agents/core/structure-scanner.d.ts +2 -0
  16. package/dist/agents/core/structure-scanner.js +55 -0
  17. package/dist/agents/core/validator.d.ts +2 -0
  18. package/dist/agents/core/validator.js +74 -0
  19. package/dist/agents/index.d.ts +24 -0
  20. package/dist/agents/index.js +73 -0
  21. package/dist/agents/llm/index.d.ts +8 -0
  22. package/dist/agents/llm/index.js +185 -0
  23. package/dist/agents/llm/prompt-builder.d.ts +3 -0
  24. package/dist/agents/llm/prompt-builder.js +251 -0
  25. package/dist/agents/llm/response-parser.d.ts +6 -0
  26. package/dist/agents/llm/response-parser.js +174 -0
  27. package/dist/agents/llm/types.d.ts +31 -0
  28. package/dist/agents/llm/types.js +2 -0
  29. package/dist/agents/pipeline/agents/component-identifier.d.ts +3 -0
  30. package/dist/agents/pipeline/agents/component-identifier.js +102 -0
  31. package/dist/agents/pipeline/agents/connection-mapper.d.ts +3 -0
  32. package/dist/agents/pipeline/agents/connection-mapper.js +126 -0
  33. package/dist/agents/pipeline/agents/flow-detector.d.ts +3 -0
  34. package/dist/agents/pipeline/agents/flow-detector.js +101 -0
  35. package/dist/agents/pipeline/agents/service-describer.d.ts +3 -0
  36. package/dist/agents/pipeline/agents/service-describer.js +100 -0
  37. package/dist/agents/pipeline/agents/validator.d.ts +3 -0
  38. package/dist/agents/pipeline/agents/validator.js +102 -0
  39. package/dist/agents/pipeline/index.d.ts +13 -0
  40. package/dist/agents/pipeline/index.js +128 -0
  41. package/dist/agents/pipeline/merger.d.ts +7 -0
  42. package/dist/agents/pipeline/merger.js +212 -0
  43. package/dist/agents/pipeline/response-parser.d.ts +5 -0
  44. package/dist/agents/pipeline/response-parser.js +43 -0
  45. package/dist/agents/pipeline/types.d.ts +92 -0
  46. package/dist/agents/pipeline/types.js +3 -0
  47. package/dist/agents/prompt-data.d.ts +1 -0
  48. package/dist/agents/prompt-data.js +15 -0
  49. package/dist/agents/prompts-encode.d.ts +9 -0
  50. package/dist/agents/prompts-encode.js +26 -0
  51. package/dist/agents/prompts.d.ts +12 -0
  52. package/dist/agents/prompts.js +30 -0
  53. package/dist/agents/providers/anthropic.d.ts +10 -0
  54. package/dist/agents/providers/anthropic.js +117 -0
  55. package/dist/agents/providers/google.d.ts +10 -0
  56. package/dist/agents/providers/google.js +136 -0
  57. package/dist/agents/providers/ollama.d.ts +9 -0
  58. package/dist/agents/providers/ollama.js +162 -0
  59. package/dist/agents/providers/openai.d.ts +9 -0
  60. package/dist/agents/providers/openai.js +142 -0
  61. package/dist/agents/providers/router.d.ts +7 -0
  62. package/dist/agents/providers/router.js +55 -0
  63. package/dist/agents/runtime/orchestrator.d.ts +34 -0
  64. package/dist/agents/runtime/orchestrator.js +193 -0
  65. package/dist/agents/runtime/registry.d.ts +23 -0
  66. package/dist/agents/runtime/registry.js +56 -0
  67. package/dist/agents/runtime/types.d.ts +117 -0
  68. package/dist/agents/runtime/types.js +29 -0
  69. package/dist/agents/static/code-sampler.d.ts +3 -0
  70. package/dist/agents/static/code-sampler.js +153 -0
  71. package/dist/agents/static/component-detector.d.ts +3 -0
  72. package/dist/agents/static/component-detector.js +404 -0
  73. package/dist/agents/static/connection-mapper.d.ts +3 -0
  74. package/dist/agents/static/connection-mapper.js +280 -0
  75. package/dist/agents/static/doc-parser.d.ts +3 -0
  76. package/dist/agents/static/doc-parser.js +358 -0
  77. package/dist/agents/static/env-detector.d.ts +3 -0
  78. package/dist/agents/static/env-detector.js +73 -0
  79. package/dist/agents/static/event-detector.d.ts +3 -0
  80. package/dist/agents/static/event-detector.js +70 -0
  81. package/dist/agents/static/file-tree-collector.d.ts +3 -0
  82. package/dist/agents/static/file-tree-collector.js +51 -0
  83. package/dist/agents/static/index.d.ts +19 -0
  84. package/dist/agents/static/index.js +307 -0
  85. package/dist/agents/static/infra-analyzer.d.ts +3 -0
  86. package/dist/agents/static/infra-analyzer.js +208 -0
  87. package/dist/agents/static/structure-scanner.d.ts +3 -0
  88. package/dist/agents/static/structure-scanner.js +195 -0
  89. package/dist/agents/static/types.d.ts +165 -0
  90. package/dist/agents/static/types.js +2 -0
  91. package/dist/agents/static/utils.d.ts +21 -0
  92. package/dist/agents/static/utils.js +146 -0
  93. package/dist/agents/static/validator.d.ts +2 -0
  94. package/dist/agents/static/validator.js +75 -0
  95. package/dist/agents/tools/claude-code.d.ts +38 -0
  96. package/dist/agents/tools/claude-code.js +129 -0
  97. package/dist/agents/tools/local-fs.d.ts +12 -0
  98. package/dist/agents/tools/local-fs.js +112 -0
  99. package/dist/agents/tools/tool-definitions.d.ts +6 -0
  100. package/dist/agents/tools/tool-definitions.js +66 -0
  101. package/dist/cli/analyze.d.ts +27 -0
  102. package/dist/cli/analyze.js +586 -0
  103. package/dist/cli/auth.d.ts +46 -0
  104. package/dist/cli/auth.js +397 -0
  105. package/dist/cli/config.d.ts +11 -0
  106. package/dist/cli/config.js +177 -0
  107. package/dist/cli/diff.d.ts +10 -0
  108. package/dist/cli/diff.js +144 -0
  109. package/dist/cli/export.d.ts +10 -0
  110. package/dist/cli/export.js +321 -0
  111. package/dist/cli/gate.d.ts +13 -0
  112. package/dist/cli/gate.js +131 -0
  113. package/dist/cli/generate.d.ts +10 -0
  114. package/dist/cli/generate.js +213 -0
  115. package/dist/cli/license-gate.d.ts +27 -0
  116. package/dist/cli/license-gate.js +121 -0
  117. package/dist/cli/patrol.d.ts +15 -0
  118. package/dist/cli/patrol.js +212 -0
  119. package/dist/cli/run.d.ts +11 -0
  120. package/dist/cli/run.js +24 -0
  121. package/dist/cli/serve.d.ts +9 -0
  122. package/dist/cli/serve.js +65 -0
  123. package/dist/cli/setup.d.ts +1 -0
  124. package/dist/cli/setup.js +233 -0
  125. package/dist/cli/shared.d.ts +68 -0
  126. package/dist/cli/shared.js +275 -0
  127. package/dist/cli/stats.d.ts +9 -0
  128. package/dist/cli/stats.js +158 -0
  129. package/dist/cli/ui.d.ts +18 -0
  130. package/dist/cli/ui.js +144 -0
  131. package/dist/cli/validate.d.ts +54 -0
  132. package/dist/cli/validate.js +315 -0
  133. package/dist/cli/workflow.d.ts +10 -0
  134. package/dist/cli/workflow.js +594 -0
  135. package/dist/server/src/generator/index.d.ts +123 -0
  136. package/dist/server/src/generator/index.js +254 -0
  137. package/dist/server/src/index.d.ts +8 -0
  138. package/dist/server/src/index.js +1311 -0
  139. package/package.json +62 -0
  140. package/ui/dist/assets/index-B66Til39.js +70 -0
  141. package/ui/dist/assets/index-BE2OWbzu.css +1 -0
  142. package/ui/dist/index.html +14 -0
@@ -0,0 +1,117 @@
1
+ import Anthropic from "@anthropic-ai/sdk";
2
+ export class AnthropicProvider {
3
+ name = "anthropic";
4
+ client;
5
+ constructor(apiKey) {
6
+ this.client = new Anthropic({ apiKey });
7
+ }
8
+ async chat(params) {
9
+ const response = await this.client.messages.create({
10
+ model: params.model ?? "claude-sonnet-4-5-20250929",
11
+ max_tokens: params.maxTokens ?? 8192,
12
+ system: params.system,
13
+ messages: params.messages.map((m) => ({
14
+ role: m.role,
15
+ content: typeof m.content === "string"
16
+ ? m.content
17
+ : m.content.map((block) => this.toAnthropicBlock(block)),
18
+ })),
19
+ ...(params.tools?.length
20
+ ? {
21
+ tools: params.tools.map((t) => ({
22
+ name: t.name,
23
+ description: t.description,
24
+ input_schema: t.input_schema,
25
+ })),
26
+ }
27
+ : {}),
28
+ });
29
+ return {
30
+ content: response.content.map((block) => this.fromAnthropicBlock(block)),
31
+ stopReason: response.stop_reason === "tool_use" ? "tool_use" : "end_turn",
32
+ usage: {
33
+ inputTokens: response.usage.input_tokens,
34
+ outputTokens: response.usage.output_tokens,
35
+ },
36
+ };
37
+ }
38
+ async *stream(params) {
39
+ const stream = this.client.messages.stream({
40
+ model: params.model ?? "claude-sonnet-4-5-20250929",
41
+ max_tokens: params.maxTokens ?? 8192,
42
+ system: params.system,
43
+ messages: params.messages.map((m) => ({
44
+ role: m.role,
45
+ content: typeof m.content === "string"
46
+ ? m.content
47
+ : m.content.map((block) => this.toAnthropicBlock(block)),
48
+ })),
49
+ ...(params.tools?.length
50
+ ? {
51
+ tools: params.tools.map((t) => ({
52
+ name: t.name,
53
+ description: t.description,
54
+ input_schema: t.input_schema,
55
+ })),
56
+ }
57
+ : {}),
58
+ });
59
+ for await (const event of stream) {
60
+ if (event.type === "content_block_delta" &&
61
+ event.delta.type === "text_delta") {
62
+ yield { type: "text", text: event.delta.text };
63
+ }
64
+ else if (event.type === "content_block_start" &&
65
+ event.content_block.type === "tool_use") {
66
+ yield {
67
+ type: "tool_use_start",
68
+ toolName: event.content_block.name,
69
+ toolId: event.content_block.id,
70
+ };
71
+ }
72
+ else if (event.type === "content_block_delta" &&
73
+ event.delta.type === "input_json_delta") {
74
+ yield { type: "tool_use_input", input: event.delta.partial_json };
75
+ }
76
+ else if (event.type === "message_stop") {
77
+ yield { type: "done" };
78
+ }
79
+ }
80
+ }
81
+ toAnthropicBlock(block) {
82
+ if (block.type === "text") {
83
+ return { type: "text", text: block.text ?? "" };
84
+ }
85
+ if (block.type === "tool_use") {
86
+ return {
87
+ type: "tool_use",
88
+ id: block.id ?? "",
89
+ name: block.name ?? "",
90
+ input: block.input ?? {},
91
+ };
92
+ }
93
+ if (block.type === "tool_result") {
94
+ return {
95
+ type: "tool_result",
96
+ tool_use_id: block.tool_use_id ?? "",
97
+ content: block.content ?? "",
98
+ is_error: block.is_error,
99
+ };
100
+ }
101
+ return { type: "text", text: "" };
102
+ }
103
+ fromAnthropicBlock(block) {
104
+ if (block.type === "text") {
105
+ return { type: "text", text: block.text };
106
+ }
107
+ if (block.type === "tool_use") {
108
+ return {
109
+ type: "tool_use",
110
+ id: block.id,
111
+ name: block.name,
112
+ input: block.input,
113
+ };
114
+ }
115
+ return { type: "text", text: "" };
116
+ }
117
+ }
@@ -0,0 +1,10 @@
1
+ import type { LLMProvider, ChatParams, LLMResponse, LLMChunk } from "../runtime/types.js";
2
+ export declare class GoogleProvider implements LLMProvider {
3
+ name: "google";
4
+ private client;
5
+ constructor(apiKey: string);
6
+ chat(params: ChatParams): Promise<LLMResponse>;
7
+ stream(params: ChatParams): AsyncIterable<LLMChunk>;
8
+ private toGeminiContents;
9
+ private toGeminiFunctionDecl;
10
+ }
@@ -0,0 +1,136 @@
1
+ import { GoogleGenerativeAI, SchemaType, } from "@google/generative-ai";
2
+ export class GoogleProvider {
3
+ name = "google";
4
+ client;
5
+ constructor(apiKey) {
6
+ this.client = new GoogleGenerativeAI(apiKey);
7
+ }
8
+ async chat(params) {
9
+ const model = this.client.getGenerativeModel({
10
+ model: params.model ?? "gemini-2.5-pro",
11
+ systemInstruction: params.system,
12
+ ...(params.tools?.length
13
+ ? {
14
+ tools: [
15
+ {
16
+ functionDeclarations: params.tools.map((t) => this.toGeminiFunctionDecl(t)),
17
+ },
18
+ ],
19
+ }
20
+ : {}),
21
+ });
22
+ const contents = this.toGeminiContents(params.messages);
23
+ const result = await model.generateContent({ contents });
24
+ const response = result.response;
25
+ const content = [];
26
+ let hasToolCalls = false;
27
+ for (const candidate of response.candidates ?? []) {
28
+ for (const part of candidate.content?.parts ?? []) {
29
+ if (part.text) {
30
+ content.push({ type: "text", text: part.text });
31
+ }
32
+ if (part.functionCall) {
33
+ hasToolCalls = true;
34
+ content.push({
35
+ type: "tool_use",
36
+ id: `call_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`,
37
+ name: part.functionCall.name,
38
+ input: part.functionCall.args ?? {},
39
+ });
40
+ }
41
+ }
42
+ }
43
+ return {
44
+ content,
45
+ stopReason: hasToolCalls ? "tool_use" : "end_turn",
46
+ usage: {
47
+ inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
48
+ outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
49
+ },
50
+ };
51
+ }
52
+ async *stream(params) {
53
+ const model = this.client.getGenerativeModel({
54
+ model: params.model ?? "gemini-2.5-pro",
55
+ systemInstruction: params.system,
56
+ ...(params.tools?.length
57
+ ? {
58
+ tools: [
59
+ {
60
+ functionDeclarations: params.tools.map((t) => this.toGeminiFunctionDecl(t)),
61
+ },
62
+ ],
63
+ }
64
+ : {}),
65
+ });
66
+ const contents = this.toGeminiContents(params.messages);
67
+ const result = await model.generateContentStream({ contents });
68
+ for await (const chunk of result.stream) {
69
+ for (const candidate of chunk.candidates ?? []) {
70
+ for (const part of candidate.content?.parts ?? []) {
71
+ if (part.text) {
72
+ yield { type: "text", text: part.text };
73
+ }
74
+ if (part.functionCall) {
75
+ yield {
76
+ type: "tool_use_start",
77
+ toolName: part.functionCall.name,
78
+ toolId: `call_${Date.now()}`,
79
+ };
80
+ yield {
81
+ type: "tool_use_input",
82
+ input: JSON.stringify(part.functionCall.args ?? {}),
83
+ };
84
+ }
85
+ }
86
+ }
87
+ }
88
+ yield { type: "done" };
89
+ }
90
+ toGeminiContents(messages) {
91
+ return messages.map((m) => {
92
+ const parts = [];
93
+ if (typeof m.content === "string") {
94
+ parts.push({ text: m.content });
95
+ }
96
+ else {
97
+ for (const block of m.content) {
98
+ if (block.type === "text" && block.text) {
99
+ parts.push({ text: block.text });
100
+ }
101
+ if (block.type === "tool_use") {
102
+ parts.push({
103
+ functionCall: {
104
+ name: block.name ?? "",
105
+ args: block.input ?? {},
106
+ },
107
+ });
108
+ }
109
+ if (block.type === "tool_result") {
110
+ parts.push({
111
+ functionResponse: {
112
+ name: block.tool_use_id ?? "",
113
+ response: { result: block.content ?? "" },
114
+ },
115
+ });
116
+ }
117
+ }
118
+ }
119
+ return {
120
+ role: m.role === "assistant" ? "model" : "user",
121
+ parts,
122
+ };
123
+ });
124
+ }
125
+ toGeminiFunctionDecl(tool) {
126
+ return {
127
+ name: tool.name,
128
+ description: tool.description,
129
+ parameters: {
130
+ type: SchemaType.OBJECT,
131
+ properties: (tool.input_schema.properties ?? {}),
132
+ required: tool.input_schema.required ?? [],
133
+ },
134
+ };
135
+ }
136
+ }
@@ -0,0 +1,9 @@
1
+ import type { LLMProvider, ChatParams, LLMResponse, LLMChunk } from "../runtime/types.js";
2
+ export declare class OllamaProvider implements LLMProvider {
3
+ name: "ollama";
4
+ private baseUrl;
5
+ constructor(baseUrl?: string);
6
+ chat(params: ChatParams): Promise<LLMResponse>;
7
+ stream(params: ChatParams): AsyncIterable<LLMChunk>;
8
+ private toOllamaMessage;
9
+ }
@@ -0,0 +1,162 @@
1
+ export class OllamaProvider {
2
+ name = "ollama";
3
+ baseUrl;
4
+ constructor(baseUrl = "http://localhost:11434") {
5
+ this.baseUrl = baseUrl.replace(/\/$/, "");
6
+ }
7
+ async chat(params) {
8
+ const messages = [
9
+ { role: "system", content: params.system },
10
+ ...params.messages.map((m) => this.toOllamaMessage(m)),
11
+ ];
12
+ const body = {
13
+ model: params.model ?? "llama3.3",
14
+ messages,
15
+ stream: false,
16
+ };
17
+ if (params.tools?.length) {
18
+ body.tools = params.tools.map((t) => ({
19
+ type: "function",
20
+ function: {
21
+ name: t.name,
22
+ description: t.description,
23
+ parameters: t.input_schema,
24
+ },
25
+ }));
26
+ }
27
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
28
+ method: "POST",
29
+ headers: { "Content-Type": "application/json" },
30
+ body: JSON.stringify(body),
31
+ });
32
+ if (!response.ok) {
33
+ const text = await response.text();
34
+ throw new Error(`Ollama error ${response.status}: ${text}`);
35
+ }
36
+ const data = (await response.json());
37
+ const content = [];
38
+ if (data.message.content) {
39
+ content.push({ type: "text", text: data.message.content });
40
+ }
41
+ let hasToolCalls = false;
42
+ if (data.message.tool_calls) {
43
+ hasToolCalls = true;
44
+ for (const tc of data.message.tool_calls) {
45
+ content.push({
46
+ type: "tool_use",
47
+ id: tc.id ?? `call_${Date.now()}`,
48
+ name: tc.function.name,
49
+ input: JSON.parse(tc.function.arguments),
50
+ });
51
+ }
52
+ }
53
+ return {
54
+ content,
55
+ stopReason: hasToolCalls ? "tool_use" : "end_turn",
56
+ usage: {
57
+ inputTokens: data.prompt_eval_count ?? 0,
58
+ outputTokens: data.eval_count ?? 0,
59
+ },
60
+ };
61
+ }
62
+ async *stream(params) {
63
+ const messages = [
64
+ { role: "system", content: params.system },
65
+ ...params.messages.map((m) => this.toOllamaMessage(m)),
66
+ ];
67
+ const body = {
68
+ model: params.model ?? "llama3.3",
69
+ messages,
70
+ stream: true,
71
+ };
72
+ if (params.tools?.length) {
73
+ body.tools = params.tools.map((t) => ({
74
+ type: "function",
75
+ function: {
76
+ name: t.name,
77
+ description: t.description,
78
+ parameters: t.input_schema,
79
+ },
80
+ }));
81
+ }
82
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
83
+ method: "POST",
84
+ headers: { "Content-Type": "application/json" },
85
+ body: JSON.stringify(body),
86
+ });
87
+ if (!response.ok) {
88
+ const text = await response.text();
89
+ throw new Error(`Ollama error ${response.status}: ${text}`);
90
+ }
91
+ const reader = response.body?.getReader();
92
+ if (!reader)
93
+ throw new Error("No response body");
94
+ const decoder = new TextDecoder();
95
+ let buffer = "";
96
+ while (true) {
97
+ const { done, value } = await reader.read();
98
+ if (done)
99
+ break;
100
+ buffer += decoder.decode(value, { stream: true });
101
+ const lines = buffer.split("\n");
102
+ buffer = lines.pop() ?? "";
103
+ for (const line of lines) {
104
+ if (!line.trim())
105
+ continue;
106
+ const data = JSON.parse(line);
107
+ if (data.message?.content) {
108
+ yield { type: "text", text: data.message.content };
109
+ }
110
+ if (data.message?.tool_calls) {
111
+ for (const tc of data.message.tool_calls) {
112
+ yield {
113
+ type: "tool_use_start",
114
+ toolName: tc.function.name,
115
+ toolId: tc.id ?? `call_${Date.now()}`,
116
+ };
117
+ yield { type: "tool_use_input", input: tc.function.arguments };
118
+ }
119
+ }
120
+ if (data.done) {
121
+ yield { type: "done" };
122
+ }
123
+ }
124
+ }
125
+ }
126
+ toOllamaMessage(m) {
127
+ if (typeof m.content === "string") {
128
+ return { role: m.role, content: m.content };
129
+ }
130
+ const toolResults = m.content.filter((b) => b.type === "tool_result");
131
+ if (toolResults.length > 0) {
132
+ return {
133
+ role: "tool",
134
+ content: toolResults[0].content ?? "",
135
+ tool_call_id: toolResults[0].tool_use_id,
136
+ };
137
+ }
138
+ const toolUses = m.content.filter((b) => b.type === "tool_use");
139
+ if (toolUses.length > 0) {
140
+ const text = m.content
141
+ .filter((b) => b.type === "text")
142
+ .map((b) => b.text ?? "")
143
+ .join("");
144
+ return {
145
+ role: "assistant",
146
+ content: text,
147
+ tool_calls: toolUses.map((b) => ({
148
+ id: b.id ?? "",
149
+ type: "function",
150
+ function: {
151
+ name: b.name ?? "",
152
+ arguments: JSON.stringify(b.input ?? {}),
153
+ },
154
+ })),
155
+ };
156
+ }
157
+ return {
158
+ role: m.role,
159
+ content: m.content.map((b) => b.text ?? "").join(""),
160
+ };
161
+ }
162
+ }
@@ -0,0 +1,9 @@
1
+ import type { LLMProvider, ChatParams, LLMResponse, LLMChunk } from "../runtime/types.js";
2
+ export declare class OpenAIProvider implements LLMProvider {
3
+ name: "openai";
4
+ private client;
5
+ constructor(apiKey: string);
6
+ chat(params: ChatParams): Promise<LLMResponse>;
7
+ stream(params: ChatParams): AsyncIterable<LLMChunk>;
8
+ private toOpenAIMessage;
9
+ }
@@ -0,0 +1,142 @@
1
+ import OpenAI from "openai";
2
+ export class OpenAIProvider {
3
+ name = "openai";
4
+ client;
5
+ constructor(apiKey) {
6
+ this.client = new OpenAI({ apiKey });
7
+ }
8
+ async chat(params) {
9
+ const messages = [
10
+ { role: "system", content: params.system },
11
+ ...params.messages.map((m) => this.toOpenAIMessage(m)),
12
+ ];
13
+ const response = await this.client.chat.completions.create({
14
+ model: params.model ?? "gpt-4o",
15
+ max_tokens: params.maxTokens ?? 8192,
16
+ messages,
17
+ ...(params.tools?.length
18
+ ? {
19
+ tools: params.tools.map((t) => ({
20
+ type: "function",
21
+ function: {
22
+ name: t.name,
23
+ description: t.description,
24
+ parameters: t.input_schema,
25
+ },
26
+ })),
27
+ }
28
+ : {}),
29
+ });
30
+ const choice = response.choices[0];
31
+ const content = [];
32
+ if (choice.message.content) {
33
+ content.push({ type: "text", text: choice.message.content });
34
+ }
35
+ if (choice.message.tool_calls) {
36
+ for (const tc of choice.message.tool_calls) {
37
+ const fn = tc.function;
38
+ content.push({
39
+ type: "tool_use",
40
+ id: tc.id,
41
+ name: fn.name,
42
+ input: JSON.parse(fn.arguments),
43
+ });
44
+ }
45
+ }
46
+ return {
47
+ content,
48
+ stopReason: choice.finish_reason === "tool_calls" ? "tool_use" : "end_turn",
49
+ usage: {
50
+ inputTokens: response.usage?.prompt_tokens ?? 0,
51
+ outputTokens: response.usage?.completion_tokens ?? 0,
52
+ },
53
+ };
54
+ }
55
+ async *stream(params) {
56
+ const messages = [
57
+ { role: "system", content: params.system },
58
+ ...params.messages.map((m) => this.toOpenAIMessage(m)),
59
+ ];
60
+ const stream = await this.client.chat.completions.create({
61
+ model: params.model ?? "gpt-4o",
62
+ max_tokens: params.maxTokens ?? 8192,
63
+ messages,
64
+ stream: true,
65
+ ...(params.tools?.length
66
+ ? {
67
+ tools: params.tools.map((t) => ({
68
+ type: "function",
69
+ function: {
70
+ name: t.name,
71
+ description: t.description,
72
+ parameters: t.input_schema,
73
+ },
74
+ })),
75
+ }
76
+ : {}),
77
+ });
78
+ for await (const chunk of stream) {
79
+ const delta = chunk.choices[0]?.delta;
80
+ if (!delta)
81
+ continue;
82
+ if (delta.content) {
83
+ yield { type: "text", text: delta.content };
84
+ }
85
+ if (delta.tool_calls) {
86
+ for (const tc of delta.tool_calls) {
87
+ if (tc.function?.name) {
88
+ yield {
89
+ type: "tool_use_start",
90
+ toolName: tc.function.name,
91
+ toolId: tc.id ?? "",
92
+ };
93
+ }
94
+ if (tc.function?.arguments) {
95
+ yield { type: "tool_use_input", input: tc.function.arguments };
96
+ }
97
+ }
98
+ }
99
+ if (chunk.choices[0]?.finish_reason) {
100
+ yield { type: "done" };
101
+ }
102
+ }
103
+ }
104
+ toOpenAIMessage(m) {
105
+ if (typeof m.content === "string") {
106
+ return { role: m.role, content: m.content };
107
+ }
108
+ // Handle tool results — OpenAI uses separate "tool" role messages
109
+ const toolResults = m.content.filter((b) => b.type === "tool_result");
110
+ if (toolResults.length > 0) {
111
+ // Return first tool result as a tool message
112
+ const tr = toolResults[0];
113
+ return {
114
+ role: "tool",
115
+ tool_call_id: tr.tool_use_id ?? "",
116
+ content: tr.content ?? "",
117
+ };
118
+ }
119
+ // Handle assistant messages with tool_use
120
+ const toolUses = m.content.filter((b) => b.type === "tool_use");
121
+ if (toolUses.length > 0) {
122
+ const textParts = m.content.filter((b) => b.type === "text");
123
+ return {
124
+ role: "assistant",
125
+ content: textParts.map((b) => b.text ?? "").join("") || null,
126
+ tool_calls: toolUses.map((b) => ({
127
+ id: b.id ?? "",
128
+ type: "function",
129
+ function: {
130
+ name: b.name ?? "",
131
+ arguments: JSON.stringify(b.input ?? {}),
132
+ },
133
+ })),
134
+ };
135
+ }
136
+ // Plain text blocks
137
+ return {
138
+ role: m.role,
139
+ content: m.content.map((b) => b.text ?? "").join(""),
140
+ };
141
+ }
142
+ }
@@ -0,0 +1,7 @@
1
+ import type { LLMProvider, ArchByteConfig } from "../runtime/types.js";
2
+ export declare function createProvider(config: ArchByteConfig): LLMProvider;
3
+ /**
4
+ * Auto-detect provider from environment variables.
5
+ * Checks in order: ARCHBYTE_PROVIDER, then falls back to whichever API key is set.
6
+ */
7
+ export declare function detectConfig(): ArchByteConfig | null;
@@ -0,0 +1,55 @@
1
+ import { AnthropicProvider } from "./anthropic.js";
2
+ import { OpenAIProvider } from "./openai.js";
3
+ import { GoogleProvider } from "./google.js";
4
+ import { OllamaProvider } from "./ollama.js";
5
+ export function createProvider(config) {
6
+ switch (config.provider) {
7
+ case "anthropic":
8
+ return new AnthropicProvider(config.apiKey);
9
+ case "openai":
10
+ return new OpenAIProvider(config.apiKey);
11
+ case "google":
12
+ return new GoogleProvider(config.apiKey);
13
+ case "ollama":
14
+ return new OllamaProvider(config.ollamaBaseUrl ?? "http://localhost:11434");
15
+ default:
16
+ throw new Error(`Unknown provider: ${config.provider}`);
17
+ }
18
+ }
19
+ /**
20
+ * Auto-detect provider from environment variables.
21
+ * Checks in order: ARCHBYTE_PROVIDER, then falls back to whichever API key is set.
22
+ */
23
+ export function detectConfig() {
24
+ const explicit = process.env.ARCHBYTE_PROVIDER;
25
+ const apiKey = process.env.ARCHBYTE_API_KEY;
26
+ if (explicit && apiKey) {
27
+ return {
28
+ provider: explicit,
29
+ apiKey,
30
+ ollamaBaseUrl: process.env.OLLAMA_BASE_URL,
31
+ };
32
+ }
33
+ // Auto-detect from known env vars
34
+ if (process.env.ANTHROPIC_API_KEY) {
35
+ return { provider: "anthropic", apiKey: process.env.ANTHROPIC_API_KEY };
36
+ }
37
+ if (process.env.OPENAI_API_KEY) {
38
+ return { provider: "openai", apiKey: process.env.OPENAI_API_KEY };
39
+ }
40
+ if (process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY) {
41
+ return {
42
+ provider: "google",
43
+ apiKey: process.env.GOOGLE_API_KEY ?? process.env.GEMINI_API_KEY ?? "",
44
+ };
45
+ }
46
+ // Ollama doesn't need an API key
47
+ if (explicit === "ollama") {
48
+ return {
49
+ provider: "ollama",
50
+ apiKey: "",
51
+ ollamaBaseUrl: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434",
52
+ };
53
+ }
54
+ return null;
55
+ }
@@ -0,0 +1,34 @@
1
+ import type { ArchByteAgent, AgentContext, LLMProvider, ToolBackend, License, PipelineResult, ArchByteConfig } from "./types.js";
2
+ /**
3
+ * Agent Orchestrator — runs the multi-agent analysis pipeline.
4
+ *
5
+ * Phase 1 (parallel): structure-scanner, doc-parser, infra-analyzer,
6
+ * component-detector, event-detector, env-detector
7
+ * Phase 2 (sequential): connection-mapper (needs Phase 1 results)
8
+ * Phase 3 (sequential): validator (needs Phase 2 results)
9
+ */
10
+ export declare class Orchestrator {
11
+ private agents;
12
+ private provider;
13
+ private tools;
14
+ private projectRoot;
15
+ private config;
16
+ private license;
17
+ constructor(opts: {
18
+ provider: LLMProvider;
19
+ tools: ToolBackend;
20
+ projectRoot: string;
21
+ config: ArchByteConfig;
22
+ license: License;
23
+ });
24
+ register(agent: ArchByteAgent): void;
25
+ registerAll(agents: ArchByteAgent[]): void;
26
+ run(onProgress?: (msg: string) => void): Promise<PipelineResult>;
27
+ private isAllowed;
28
+ private runAgent;
29
+ }
30
+ /**
31
+ * Execute a tool_use loop for an agent.
32
+ * This is the core pattern: send prompt → get tool calls → execute tools → send results → repeat.
33
+ */
34
+ export declare function executeToolLoop(context: AgentContext, systemPrompt: string, userMessage: string, maxIterations?: number): Promise<string>;