llm-mar 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Renato Junior
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,69 @@
1
+ # LLM-MAR
2
+
3
+ LLM-MAR is a compact CLI that creates LLM agents, lets them debate, answers questions, and builds workflows.
4
+
5
+ ## Installation
6
+
7
+ 1. Clone the repository:
8
+ ```bash
9
+ git clone https://github.com/renatojuniorrs/llm-mar.git
10
+ cd llm-mar
11
+ ```
12
+
13
+ 2. Install dependencies:
14
+ ```bash
15
+ npm install
16
+ ```
17
+
18
+ 3. Set up your OpenAI API key:
19
+ ```bash
20
+ export OPENAI_API_KEY=your_api_key_here
21
+ ```
22
+
23
+ ## Usage
24
+
25
+ ### Creating Agents
26
+
27
+ Create a new agent YAML file:
28
+
29
+ ```bash
30
+ npx llm-mar create agent myagent --model gpt-4 --goal "To answer questions" --role "Assistant" --system-prompt "You are a helpful assistant." --instructions "Think step by step,Answer clearly" --output text
31
+ ```
32
+
33
+ This creates `default/myagent.yaml`.
34
+
35
+ ### Creating Teams
36
+
37
+ Create a team of agents:
38
+
39
+ ```bash
40
+ npx llm-mar create team myteam --agents "default/agent1.yaml,default/agent2.yaml" --output text
41
+ ```
42
+
43
+ ### Running Agents
44
+
45
+ Run an agent with input:
46
+
47
+ ```bash
48
+ npx llm-mar run default/myagent.yaml --input "What is the capital of France?"
49
+ ```
50
+
51
+ ### Running Teams
52
+
53
+ Run a team with a specific agent:
54
+
55
+ ```bash
56
+ npx llm-mar run default/myteam.yaml --agent agent1 --input "Discuss this topic"
57
+ ```
58
+
59
+ ### Running Debates
60
+
61
+ Run a debate (assuming you have a debate YAML):
62
+
63
+ ```bash
64
+ npx llm-mar run default/debate1.yaml
65
+ ```
66
+
67
+ ## YAML Structure
68
+
69
+ See [docs/yaml-structure-guide.md](docs/yaml-structure-guide.md) for details on YAML file formats.
@@ -0,0 +1,157 @@
1
+ const { ChatOpenAI } = require('@langchain/openai');
2
+ const { z } = require('zod');
3
+
4
+ function createZodSchema(structure) {
5
+ if (typeof structure === 'object') {
6
+ const schema = {};
7
+ for (const [key, value] of Object.entries(structure)) {
8
+ if (typeof value === 'string') {
9
+ if (value === 'string') schema[key] = z.string();
10
+ else if (value === 'number') schema[key] = z.number();
11
+ else if (value === 'boolean') schema[key] = z.boolean();
12
+ else schema[key] = z.string(); // default
13
+ } else if (Array.isArray(value)) {
14
+ if (value.length > 0 && typeof value[0] === 'object') {
15
+ schema[key] = z.array(createZodSchema(value[0]));
16
+ } else {
17
+ schema[key] = z.array(z.string()); // default
18
+ }
19
+ } else if (typeof value === 'object') {
20
+ schema[key] = createZodSchema(value);
21
+ }
22
+ }
23
+ return z.object(schema);
24
+ }
25
+ return z.string(); // fallback
26
+ }
27
+
28
+ function createAgent({ modelSettings, systemPrompt, responseFormat }) {
29
+ if (!modelSettings) {
30
+ throw new Error('createAgent requires modelSettings');
31
+ }
32
+ if (!systemPrompt) {
33
+ throw new Error('createAgent requires a systemPrompt');
34
+ }
35
+
36
+ // Handle both string model name (backward compatibility) and object modelSettings
37
+ let modelConfig;
38
+ if (typeof modelSettings === 'string') {
39
+ modelConfig = { modelName: modelSettings, temperature: 0.7 };
40
+ } else {
41
+ modelConfig = {
42
+ modelName: modelSettings.model,
43
+ ...modelSettings
44
+ };
45
+ }
46
+
47
+ const chatModel = new ChatOpenAI(modelConfig);
48
+
49
+ if (responseFormat) {
50
+ return chatModel.withStructuredOutput(responseFormat);
51
+ }
52
+
53
+ return chatModel;
54
+ }
55
+
56
+ async function runAgent(config, input) {
57
+ const name = config.metadata ? config.metadata.name : config.spec.id;
58
+ const id = config.spec.id;
59
+ const outputFormat = config.spec.output || 'text';
60
+
61
+ // Get model name for display
62
+ const modelName = config.spec.model_settings ?
63
+ (typeof config.spec.model_settings === 'string' ? config.spec.model_settings : config.spec.model_settings.model) :
64
+ config.spec.model;
65
+
66
+ let systemPrompt = config.spec.system_prompt;
67
+ let responseFormat = null;
68
+
69
+ if (typeof outputFormat === 'object' && outputFormat.format === 'json' && outputFormat.structure) {
70
+ responseFormat = createZodSchema(outputFormat.structure);
71
+ if (outputFormat.instructions) {
72
+ input = `${outputFormat.instructions}\n\n${input}`;
73
+ }
74
+ } else if (outputFormat === 'json') {
75
+ systemPrompt += '\n\nRespond in valid JSON format only. Structure your response as a JSON object.';
76
+ }
77
+
78
+ const modelSettings = config.spec.model_settings || config.spec.model;
79
+
80
+ const model = createAgent({
81
+ modelSettings: modelSettings,
82
+ systemPrompt: systemPrompt,
83
+ responseFormat: responseFormat
84
+ });
85
+
86
+ try {
87
+ let response;
88
+ if (responseFormat) {
89
+ response = await model.invoke([
90
+ { role: 'system', content: systemPrompt },
91
+ { role: 'user', content: input }
92
+ ]);
93
+ // For structured output, response is already parsed
94
+ } else {
95
+ response = await model.invoke([
96
+ { role: 'system', content: systemPrompt },
97
+ { role: 'user', content: input }
98
+ ]);
99
+ }
100
+
101
+ if (typeof outputFormat === 'object' && outputFormat.format === 'json') {
102
+ const result = {
103
+ agent: name,
104
+ model: modelName,
105
+ input: input,
106
+ response: responseFormat ? response : response.content,
107
+ timestamp: new Date().toISOString()
108
+ };
109
+ console.log(JSON.stringify(result, null, 2));
110
+ } else if (outputFormat === 'json') {
111
+ // Try to parse the response as JSON, if not, wrap it
112
+ try {
113
+ const parsed = JSON.parse(response.content);
114
+ const result = {
115
+ agent: name,
116
+ model: modelName,
117
+ input: input,
118
+ response: parsed,
119
+ timestamp: new Date().toISOString()
120
+ };
121
+ console.log(JSON.stringify(result, null, 2));
122
+ } catch (parseError) {
123
+ // If not valid JSON, wrap the text response
124
+ const result = {
125
+ agent: name,
126
+ model: config.spec.model,
127
+ input: input,
128
+ response: response.content,
129
+ timestamp: new Date().toISOString()
130
+ };
131
+ console.log(JSON.stringify(result, null, 2));
132
+ }
133
+ } else {
134
+ console.log(`Running agent: ${name}`);
135
+ console.log(`Model: ${modelName}`);
136
+ console.log(`Input: ${input}`);
137
+ console.log(`Response: ${response.content}`);
138
+ }
139
+ } catch (error) {
140
+ if (outputFormat === 'json' || (typeof outputFormat === 'object' && outputFormat.format === 'json')) {
141
+ const errorResult = {
142
+ agent: name,
143
+ error: error.message,
144
+ timestamp: new Date().toISOString()
145
+ };
146
+ console.log(JSON.stringify(errorResult, null, 2));
147
+ } else {
148
+ console.error(`Error calling model: ${error.message}`);
149
+ console.log(`Response: [Error: Could not reach model]`);
150
+ }
151
+ }
152
+ }
153
+
154
+ module.exports = {
155
+ createAgent,
156
+ runAgent
157
+ };
@@ -0,0 +1,104 @@
1
+ const fs = require('fs');
2
+ const yaml = require('js-yaml');
3
+
4
+ function setupCreate(program) {
5
+ const createCmd = program.command('create').description('create agents, teams, or debates');
6
+
7
+ createCmd
8
+ .command('agent <name>')
9
+ .description('create a new agent YAML')
10
+ .option('--model <model>', 'model to use', 'gpt-4')
11
+ .option('--goal <goal>', 'goal of the agent', 'To assist with tasks')
12
+ .option('--role <role>', 'role of the agent', 'Assistant')
13
+ .option('--system-prompt <prompt>', 'system prompt', 'You are a helpful assistant.')
14
+ .option('--instructions <instructions>', 'instructions as comma-separated list', 'Think step by step,Answer clearly and concisely')
15
+ .option('--output <format>', 'output format', 'text')
16
+ .action((name, options) => {
17
+ const instructions = options.instructions.split(',').map(s => s.trim());
18
+ const config = {
19
+ version: '1.0',
20
+ kind: 'Agent',
21
+ metadata: {
22
+ name: name,
23
+ description: `An agent named ${name}`
24
+ },
25
+ spec: {
26
+ id: name,
27
+ model: options.model,
28
+ goal: options.goal,
29
+ role: options.role,
30
+ system_prompt: options.systemPrompt,
31
+ instructions: instructions,
32
+ output: options.output
33
+ }
34
+ };
35
+ const yamlStr = yaml.dump(config);
36
+ fs.writeFileSync(`default/${name}.yaml`, yamlStr);
37
+ console.log(`Agent YAML created: default/${name}.yaml`);
38
+ });
39
+
40
+ createCmd
41
+ .command('team <name>')
42
+ .description('create a new team YAML')
43
+ .option('--agents <agents>', 'comma-separated list of agent YAML file paths')
44
+ .option('--output <format>', 'output format', 'text')
45
+ .action((name, options) => {
46
+ if (!options.agents) {
47
+ console.error('Error: --agents is required');
48
+ process.exit(1);
49
+ }
50
+ const agentFiles = options.agents.split(',').map(s => s.trim());
51
+ const config = {
52
+ version: '1.0',
53
+ kind: 'Team',
54
+ metadata: {
55
+ name: name,
56
+ description: `A team named ${name}`
57
+ },
58
+ spec: {
59
+ agents: agentFiles,
60
+ output: options.output
61
+ }
62
+ };
63
+ const yamlStr = yaml.dump(config);
64
+ fs.writeFileSync(`default/${name}.yaml`, yamlStr);
65
+ console.log(`Team YAML created: default/${name}.yaml`);
66
+ });
67
+
68
+ createCmd
69
+ .command('debate <name>')
70
+ .description('create a new debate YAML')
71
+ .option('--method <method>', 'debate method', 'majority_vote')
72
+ .option('--agents <agents>', 'comma-separated list of agent YAML file paths')
73
+ .option('--judges <judges>', 'comma-separated list of judge agent YAML file paths')
74
+ .option('--input <input>', 'debate input topic', 'A topic to debate')
75
+ .option('--output <format>', 'output format', 'text')
76
+ .action((name, options) => {
77
+ if (!options.agents || !options.judges) {
78
+ console.error('Error: --agents and --judges are required');
79
+ process.exit(1);
80
+ }
81
+ const agents = options.agents.split(',').map(s => s.trim());
82
+ const judges = options.judges.split(',').map(s => s.trim());
83
+ const config = {
84
+ version: '1.0',
85
+ kind: 'Debate',
86
+ metadata: {
87
+ name: name,
88
+ description: `A debate named ${name}`
89
+ },
90
+ spec: {
91
+ method: options.method,
92
+ input: options.input,
93
+ judges: judges,
94
+ agents: agents,
95
+ output: options.output
96
+ }
97
+ };
98
+ const yamlStr = yaml.dump(config);
99
+ fs.writeFileSync(`default/${name}.yaml`, yamlStr);
100
+ console.log(`Debate YAML created: default/${name}.yaml`);
101
+ });
102
+ }
103
+
104
+ module.exports = setupCreate;
@@ -0,0 +1,42 @@
1
+ const fs = require('fs');
2
+ const yaml = require('js-yaml');
3
+ const { runAgent } = require('../agents');
4
+ const { runTeam, runDebate } = require('../workflows');
5
+
6
+ function setupRun(program) {
7
+ program
8
+ .command('run <yamlfile>')
9
+ .description('run an agent, team, or debate from a YAML file')
10
+ .option('--input <input>', 'input for agent or team')
11
+ .option('--agent <agentId>', 'agent ID for team')
12
+ .action(async (yamlfile, options) => {
13
+ try {
14
+ const content = fs.readFileSync(yamlfile, 'utf8');
15
+ const config = yaml.load(content);
16
+
17
+ if (config.kind === 'Agent') {
18
+ if (!options.input) {
19
+ console.error('Error: --input is required for agent');
20
+ process.exit(1);
21
+ }
22
+ await runAgent(config, options.input);
23
+ } else if (config.kind === 'Team') {
24
+ if (!options.agent || !options.input) {
25
+ console.error('Error: --agent and --input are required for team');
26
+ process.exit(1);
27
+ }
28
+ await runTeam(config, options.agent, options.input);
29
+ } else if (config.kind === 'Debate') {
30
+ await runDebate(config);
31
+ } else {
32
+ console.error('Error: Unknown kind in YAML file');
33
+ process.exit(1);
34
+ }
35
+ } catch (error) {
36
+ console.error('Error:', error.message);
37
+ process.exit(1);
38
+ }
39
+ });
40
+ }
41
+
42
+ module.exports = setupRun;
@@ -0,0 +1,20 @@
1
+ version: '1.0'
2
+ kind: Agent
3
+ metadata:
4
+ name: Confucius
5
+ description: An agent embodying the wisdom of Confucius
6
+ spec:
7
+ id: agent2
8
+ model_settings:
9
+ model: gpt-5.4-nano
10
+ reasoning:
11
+ effort: high
12
+ goal: To teach moral philosophy and proper conduct in society
13
+ role: Philosopher and Teacher
14
+ system_prompt: You are Confucius, the ancient Chinese philosopher and teacher. Emphasize moral cultivation, proper social relationships, education, and harmony. Draw from concepts like ren (benevolence), li (ritual/propriety), filial piety, and the importance of self-cultivation. Speak with wisdom, use proverbs and analogies, and focus on ethical governance and personal virtue. You should think as if you are Confucius, and your responses should reflect his unique perspective and style of thinking. You should always respond in his tone and manner of expression.
15
+ instructions:
16
+ - Emphasize moral cultivation and self-improvement
17
+ - Stress the importance of proper relationships and social harmony
18
+ - Use proverbs and wise sayings to illustrate points
19
+ - Focus on education and the role of the gentleman (junzi)
20
+
@@ -0,0 +1,19 @@
1
+ version: '1.0'
2
+ kind: Agent
3
+ metadata:
4
+ name: Albert Einstein
5
+ description: An agent that embodies the genius of Albert Einstein
6
+ spec:
7
+ id: agent1
8
+ model_settings:
9
+ model: gpt-5.4-nano
10
+ reasoning:
11
+ effort: high
12
+ goal: To think deeply about scientific and philosophical questions
13
+ role: Theoretical Physicist
14
+ system_prompt: You are Albert Einstein, the brilliant theoretical physicist. Think with profound insight, creativity, and intellectual curiosity. Use thought experiments, challenge conventional wisdom, and express ideas with clarity and elegance. Draw from relativity, quantum mechanics, and broader philosophical implications of science. You should think as if you are Albert Einstein, and your responses should reflect his unique perspective and style of thinking. You should always respond in his tone and manner of expression.
15
+ instructions:
16
+ - Think with deep intellectual curiosity
17
+ - Use thought experiments and analogies
18
+ - Challenge conventional assumptions
19
+ - Express complex ideas with elegant simplicity
@@ -0,0 +1,32 @@
1
+ version: '1.0'
2
+ kind: Agent
3
+ metadata:
4
+ name: Impartial Judge
5
+ description: An impartial judge agent for evaluating debates
6
+ spec:
7
+ id: judge
8
+ model_settings:
9
+ model: gpt-5.4-nano
10
+ reasoning:
11
+ effort: high
12
+ goal: To provide fair and unbiased evaluation of arguments
13
+ role: Debate Judge
14
+ system_prompt: "You are an impartial judge evaluating debates. Remain neutral and objective, focusing on the quality, logic, and persuasiveness of arguments rather than personal opinions. Evaluate based on evidence, reasoning, clarity, and relevance. Provide fair assessments without bias toward any particular viewpoint or participant."
15
+ instructions:
16
+ - Remain completely impartial and neutral
17
+ - Evaluate arguments based on logic, evidence, and clarity
18
+ - Consider both sides fairly
19
+ - Base judgments on merit, not personal preference
20
+ - Reference historical examples of fair judgment when appropriate
21
+ output:
22
+ format: json
23
+ structure:
24
+ winner: string
25
+ votes:
26
+ - agent: string
27
+ vote: string
28
+ instructions: |
29
+ As an impartial judge, evaluate the debate arguments and provide your judgment.
30
+ - winner: filename of the agent with the strongest argument
31
+ - votes: array with detailed reasoning for each agent's argument quality
32
+
@@ -0,0 +1,29 @@
1
+ version: '1.0'
2
+ kind: Debate
3
+ metadata:
4
+ name: Meaning of Life Debate
5
+ description: A philosophical debate between Einstein and Confucius on the meaning of life
6
+ spec:
7
+ method: majority_vote
8
+ input: What is the meaning of life?
9
+ debug: true
10
+ judges:
11
+ - default/agents/judge.yaml
12
+ agents:
13
+ - default/agents/einstein.yaml
14
+ - default/agents/confucius.yaml
15
+ output:
16
+ format: json
17
+ structure:
18
+ winner: string # filename of the winning agent
19
+ reasoning: string # overall explanation for the decision
20
+ votes: # array of individual agent evaluations
21
+ - agent: string # filename of the agent being evaluated
22
+ vote: string # detailed reasoning why this agent should win
23
+ instructions: |
24
+ Evaluate the arguments and provide your judgment in structured JSON format.
25
+ - winner: filename of the winning agent (e.g., "default/agents/einstein.yaml")
26
+ - reasoning: overall explanation for why this agent won the debate
27
+ - votes: array where each item contains:
28
+ - agent: filename of the agent being evaluated
29
+ - vote: detailed reasoning explaining the strengths and weaknesses of this agent's argument, including analysis of logic, evidence, clarity, and persuasiveness
@@ -0,0 +1,41 @@
1
+ version: '1.0'
2
+ kind: Debate
3
+ metadata:
4
+ name: Riddle Creation Debate
5
+ description: Agents create random riddles, judge validates them
6
+ spec:
7
+ method: majority_vote
8
+ input: Create a completely original riddle. The riddle should be clever, have a clear answer, and follow the classic riddle format (a question or statement that describes something without naming it directly).
9
+ debug: false
10
+ judges:
11
+ - default/agents/judge.yaml
12
+ agents:
13
+ - default/agents/einstein.yaml
14
+ - default/agents/confucius.yaml
15
+ output:
16
+ format: json
17
+ structure:
18
+ winner: string # filename of the agent with the best riddle
19
+ winner_riddle: string # the riddle created by the winning agent
20
+ reasoning: string # explanation of why this riddle was chosen as best
21
+ validation_results: # array of validation results for each riddle
22
+ - agent: string # filename of the agent
23
+ riddle: string # the riddle they created
24
+ is_valid_riddle: boolean # whether it's a proper riddle
25
+ validation_reasoning: string # why it is/isn't valid
26
+ quality_score: number # 1-10 rating of riddle quality
27
+ votes: # individual judge evaluations
28
+ - agent: string # filename of the agent being evaluated
29
+ vote: string # detailed reasoning for the vote
30
+ instructions: |
31
+ Evaluate the riddles created by the agents. For each riddle, determine:
32
+ 1. Is it a valid riddle? (has a clear answer, clever wording, not too obvious)
33
+ 2. Quality score (1-10) based on creativity, cleverness, and proper riddle structure
34
+ 3. Choose the best riddle as winner
35
+
36
+ Respond with structured JSON containing:
37
+ - winner: filename of agent with best riddle
38
+ - winner_riddle: the riddle created by the winning agent
39
+ - reasoning: why this riddle won
40
+ - validation_results: array with validation for each agent's riddle
41
+ - votes: detailed evaluation of each riddle
@@ -0,0 +1,287 @@
1
+ # YAML Structure Guide for LLM-MAR
2
+
3
+ This guide explains how to structure YAML configuration files for the LLM-MAR (Large Language Model Multi-Agent Reasoning) tool.
4
+
5
+ ## Overview
6
+
7
+ LLM-MAR uses YAML files to define agents, teams, and debates. Each configuration follows a consistent structure with version, kind, metadata, and spec sections.
8
+
9
+ ## Common Structure
10
+
11
+ All YAML files share this basic structure:
12
+
13
+ ```yaml
14
+ version: '1.0'
15
+ kind: [Agent|Team|Debate]
16
+ metadata:
17
+ name: [string]
18
+ description: [string]
19
+ spec:
20
+ # Kind-specific configuration
21
+ ```
22
+
23
+ ## Agent Configuration
24
+
25
+ Agents are individual AI entities with specific roles and behaviors.
26
+
27
+ ### Basic Agent Structure
28
+
29
+ ```yaml
30
+ version: '1.0'
31
+ kind: Agent
32
+ metadata:
33
+ name: Albert Einstein
34
+ description: Theoretical physicist agent
35
+ spec:
36
+ id: einstein
37
+ model: gpt-4
38
+ goal: To think deeply about scientific questions
39
+ role: Theoretical Physicist
40
+ system_prompt: You are Albert Einstein, the brilliant theoretical physicist...
41
+ instructions:
42
+ - Think with deep intellectual curiosity
43
+ - Use thought experiments and analogies
44
+ - Challenge conventional assumptions
45
+ - Express complex ideas with elegant simplicity
46
+ output: # Optional: for structured responses
47
+ format: json
48
+ structure:
49
+ response: string
50
+ reasoning: string
51
+ ```
52
+
53
+ ### Agent Fields
54
+
55
+ - **id**: Unique identifier for the agent
56
+ - **model**: OpenAI model to use (e.g., gpt-4, gpt-3.5-turbo)
57
+ - **goal**: High-level objective of the agent
58
+ - **role**: Specific role or persona
59
+ - **system_prompt**: Instructions that define the agent's personality and behavior
60
+ - **instructions**: List of specific behavioral guidelines
61
+ - **output**: Optional structured output configuration
62
+
63
+ ### Output Structure
64
+
65
+ For structured responses, define the expected JSON format:
66
+
67
+ ```yaml
68
+ output:
69
+ format: json
70
+ structure:
71
+ field_name: data_type # string, number, boolean
72
+ nested_object:
73
+ subfield: string
74
+ array_field:
75
+ - item_type: string
76
+ instructions: |
77
+ Detailed instructions for the AI about what content to put in each field.
78
+ Explain the purpose of each field and what kind of response is expected.
79
+ This text will be included in the AI prompt to guide its output.
80
+ ```
81
+
82
+ The `instructions` field is optional but recommended for complex structured outputs. It provides specific guidance to the AI about how to fill each field, making the output more predictable and useful.
83
+
84
+ ## Team Configuration
85
+
86
+ Teams coordinate multiple agents to work together on tasks.
87
+
88
+ ### Basic Team Structure
89
+
90
+ ```yaml
91
+ version: '1.0'
92
+ kind: Team
93
+ metadata:
94
+ name: Research Team
95
+ description: A team for collaborative research
96
+ spec:
97
+ method: sequential # or parallel
98
+ input: Research question here
99
+ agents:
100
+ - agent1.yaml
101
+ - agent2.yaml
102
+ output:
103
+ format: json
104
+ structure:
105
+ conclusion: string
106
+ evidence: string
107
+ ```
108
+
109
+ ### Team Fields
110
+
111
+ - **method**: How agents collaborate (sequential, parallel)
112
+ - **input**: The task or question for the team
113
+ - **agents**: List of agent YAML file paths
114
+ - **output**: Optional structured output format
115
+
116
+ ## Debate Configuration
117
+
118
+ Debates pit agents against each other with judges to determine winners.
119
+
120
+ ### Basic Debate Structure
121
+
122
+ ```yaml
123
+ version: '1.0'
124
+ kind: Debate
125
+ metadata:
126
+ name: AI Ethics Debate
127
+ description: Debate on AI regulation
128
+ spec:
129
+ method: majority_vote # or first_judge
130
+ input: Should AI be regulated?
131
+ judges:
132
+ - judge.yaml
133
+ agents:
134
+ - agent1.yaml
135
+ - agent2.yaml
136
+ output:
137
+ format: json
138
+ structure:
139
+ winner: string
140
+ votes:
141
+ - agent: string
142
+ vote: string
143
+ ```
144
+
145
+ ### Debate Fields
146
+
147
+ - **method**: Voting method (majority_vote, first_judge)
148
+ - **input**: The debate topic or question
149
+ - **judges**: List of judge agent YAML file paths
150
+ - **agents**: List of debating agent YAML file paths
151
+ - **output**: Structured output for debate results with optional instructions
152
+
153
+ ### Structured Output with Instructions
154
+
155
+ For debates with complex evaluation requirements, use the `instructions` field to guide the judge's evaluation criteria:
156
+
157
+ ```yaml
158
+ output:
159
+ format: json
160
+ structure:
161
+ winner: string
162
+ reasoning: string
163
+ votes:
164
+ - agent: string
165
+ vote: string
166
+ instructions: |
167
+ Evaluate the debate arguments and provide structured judgment.
168
+ - winner: filename of the winning agent
169
+ - reasoning: overall explanation for the decision
170
+ - votes: detailed analysis of each agent's argument quality
171
+ ```
172
+
173
+ ## File Organization
174
+
175
+ Store YAML files in organized directories:
176
+
177
+ ```
178
+ default/
179
+ agents/
180
+ scientist.yaml
181
+ philosopher.yaml
182
+ judge.yaml
183
+ teams/
184
+ research_team.yaml
185
+ debates/
186
+ ethics_debate.yaml
187
+ ```
188
+
189
+ ## Best Practices
190
+
191
+ 1. **Use descriptive names**: Choose clear, meaningful names for agents and configurations
192
+ 2. **Write detailed system prompts**: Provide comprehensive instructions for agent behavior
193
+ 3. **Define clear roles**: Give each agent a distinct purpose and expertise
194
+ 4. **Structure outputs**: Use structured output for programmatic processing
195
+ 5. **Add instructions for complex outputs**: Use the `instructions` field in output configuration to guide AI behavior for structured responses
196
+ 6. **Test configurations**: Run agents individually before using in teams/debates
197
+ 7. **Version control**: Keep YAML files under version control for collaboration
198
+
199
+ ## Examples
200
+
201
+ ### Simple Agent
202
+ ```yaml
203
+ version: '1.0'
204
+ kind: Agent
205
+ metadata:
206
+ name: Helper
207
+ description: A helpful assistant
208
+ spec:
209
+ id: helper
210
+ model: gpt-4
211
+ goal: To assist users
212
+ role: Assistant
213
+ system_prompt: You are a helpful assistant.
214
+ instructions:
215
+ - Be polite and clear
216
+ - Provide accurate information
217
+ ```
218
+
219
+ ### Structured Output Agent
220
+ ```yaml
221
+ version: '1.0'
222
+ kind: Agent
223
+ metadata:
224
+ name: Analyst
225
+ description: Data analysis agent
226
+ spec:
227
+ id: analyst
228
+ model: gpt-4
229
+ goal: To analyze data and provide insights
230
+ role: Data Analyst
231
+ system_prompt: You are a data analyst who provides structured insights.
232
+ instructions:
233
+ - Analyze data objectively
234
+ - Provide clear conclusions
235
+ - Support claims with evidence
236
+ output:
237
+ format: json
238
+ structure:
239
+ analysis: string
240
+ key_findings:
241
+ - finding: string
242
+ importance: string
243
+ recommendation: string
244
+ ```
245
+
246
+ ### Debate with Multiple Judges
247
+ ```yaml
248
+ version: '1.0'
249
+ kind: Debate
250
+ metadata:
251
+ name: Climate Change Debate
252
+ description: Debate on climate policy
253
+ spec:
254
+ method: majority_vote
255
+ input: What is the best approach to combat climate change?
256
+ judges:
257
+ - judges/scientist_judge.yaml
258
+ - judges/economist_judge.yaml
259
+ agents:
260
+ - agents/environmentalist.yaml
261
+ - agents/economist.yaml
262
+ - agents/technologist.yaml
263
+ output:
264
+ format: json
265
+ structure:
266
+ winner: string
267
+ reasoning: string
268
+ votes:
269
+ - agent: string
270
+ vote: string
271
+ instructions: |
272
+ Evaluate each agent's argument based on:
273
+ - Scientific accuracy and evidence
274
+ - Economic feasibility and cost-benefit analysis
275
+ - Technical implementation challenges
276
+ - Social and political considerations
277
+ Provide detailed reasoning for your judgment.
278
+ ```
279
+
280
+ ## Validation
281
+
282
+ Always validate your YAML files:
283
+ - Use a YAML validator to check syntax
284
+ - Test agents individually with `llm-mar run agent.yaml`
285
+ - Ensure file paths are correct and accessible
286
+ - Verify structured output schemas match expected formats</content>
287
+ <parameter name="filePath">/root/llm-mar/docs/yaml-structure-guide.md
package/index.js ADDED
@@ -0,0 +1,17 @@
1
+ #!/usr/bin/env node
2
+
3
+ const { Command } = require('commander');
4
+ const setupCreate = require('./commands/create');
5
+ const setupRun = require('./commands/run');
6
+
7
+ const program = new Command();
8
+
9
+ program
10
+ .name('llm-mar')
11
+ .description('CLI for LLM agent workflows')
12
+ .version('1.0.0');
13
+
14
+ setupCreate(program);
15
+ setupRun(program);
16
+
17
+ program.parse(process.argv);
package/package.json ADDED
@@ -0,0 +1,25 @@
1
+ {
2
+ "name": "llm-mar",
3
+ "version": "1.0.0",
4
+ "description": "LLM-MAR is a compact CLI that creates LLM agents, lets them debate, answers questions, and builds workflows.",
5
+ "main": "index.js",
6
+ "bin": {
7
+ "llm-mar": "index.js"
8
+ },
9
+ "scripts": {
10
+ "test": "echo \"Error: no test specified\" && exit 1"
11
+ },
12
+ "keywords": [],
13
+ "author": "",
14
+ "repository": {
15
+ "type": "git",
16
+ "url": "https://github.com/renatojuniorrs/llm-mar.git"
17
+ },
18
+ "dependencies": {
19
+ "@langchain/openai": "^1.4.3",
20
+ "commander": "^14.0.3",
21
+ "js-yaml": "^4.1.1",
22
+ "langchain": "^1.3.1",
23
+ "zod": "^4.3.6"
24
+ }
25
+ }
@@ -0,0 +1,301 @@
1
+ const { runAgent } = require('../agents');
2
+ const fs = require('fs');
3
+ const yaml = require('js-yaml');
4
+ const { z } = require('zod');
5
+
6
+ function createZodSchema(structure) {
7
+ if (typeof structure === 'object') {
8
+ const schema = {};
9
+ for (const [key, value] of Object.entries(structure)) {
10
+ if (typeof value === 'string') {
11
+ if (value === 'string') schema[key] = z.string();
12
+ else if (value === 'number') schema[key] = z.number();
13
+ else if (value === 'boolean') schema[key] = z.boolean();
14
+ else schema[key] = z.string(); // default
15
+ } else if (Array.isArray(value)) {
16
+ if (value.length > 0 && typeof value[0] === 'object') {
17
+ schema[key] = z.array(createZodSchema(value[0]));
18
+ } else {
19
+ schema[key] = z.array(z.string()); // default
20
+ }
21
+ } else if (typeof value === 'object') {
22
+ schema[key] = createZodSchema(value);
23
+ }
24
+ }
25
+ return z.object(schema);
26
+ }
27
+ return z.string(); // fallback
28
+ }
29
+
30
+ function buildWorkflow(name) {
31
+ console.log(`Building workflow ${name}...`);
32
+ }
33
+
34
+ async function runTeam(config, agentId, input) {
35
+ // Find the agent: could be embedded object or file path
36
+ let agentConfig;
37
+ const agentEntry = config.agents.find(agent => {
38
+ if (typeof agent === 'string') {
39
+ // File path
40
+ const basename = agent.split('/').pop().replace('.yaml', '');
41
+ return basename === agentId;
42
+ } else {
43
+ // Embedded object
44
+ return agent.id === agentId;
45
+ }
46
+ });
47
+
48
+ if (!agentEntry) {
49
+ throw new Error(`Agent ${agentId} not found in team`);
50
+ }
51
+
52
+ if (typeof agentEntry === 'string') {
53
+ // Load from file
54
+ try {
55
+ const agentYaml = fs.readFileSync(agentEntry, 'utf8');
56
+ const agentData = yaml.load(agentYaml);
57
+ if (agentData.kind !== 'Agent') {
58
+ throw new Error(`${agentEntry} is not an Agent`);
59
+ }
60
+ agentConfig = agentData.spec;
61
+ } catch (e) {
62
+ throw new Error(`Agent file ${agentEntry} not found or invalid: ${e.message}`);
63
+ }
64
+ } else {
65
+ // Embedded
66
+ agentConfig = agentEntry;
67
+ if (!agentConfig.model) {
68
+ // Try to load from default/
69
+ try {
70
+ const agentYaml = fs.readFileSync(`default/${agentId}.yaml`, 'utf8');
71
+ const agentData = yaml.load(agentYaml);
72
+ agentConfig = agentData.spec;
73
+ } catch (e) {
74
+ throw new Error(`Agent ${agentId} not found or invalid`);
75
+ }
76
+ }
77
+ }
78
+
79
+ const agentYaml = {
80
+ kind: 'Agent',
81
+ spec: agentConfig
82
+ };
83
+ await runAgent(agentYaml, input);
84
+ }
85
+
86
+ async function runDebate(config) {
87
+ // Validate API key is set
88
+ if (!process.env.OPENAI_API_KEY) {
89
+ throw new Error('OPENAI_API_KEY environment variable is required to run debates. Please set it with: export OPENAI_API_KEY=your_api_key');
90
+ }
91
+
92
+ // Validate that all agents and judges files exist and are Agents or Teams
93
+ const allFiles = [...config.spec.agents, ...config.spec.judges];
94
+ for (const file of allFiles) {
95
+ try {
96
+ const yamlContent = fs.readFileSync(file, 'utf8');
97
+ const data = yaml.load(yamlContent);
98
+ if (data.kind !== 'Agent' && data.kind !== 'Team') {
99
+ throw new Error(`${file} is not an Agent or Team`);
100
+ }
101
+ } catch (e) {
102
+ throw new Error(`Participant file ${file} not found or invalid: ${e.message}`);
103
+ }
104
+ }
105
+
106
+ const outputFormat = config.spec.output || 'text';
107
+
108
+ // Collect arguments from agents
109
+ const agentArguments = [];
110
+ for (const agentFile of config.spec.agents) {
111
+ try {
112
+ const agentConfig = yaml.load(fs.readFileSync(agentFile, 'utf8'));
113
+ if (agentConfig.kind === 'Agent') {
114
+ // Run the agent with the debate input
115
+ const { ChatOpenAI } = require('@langchain/openai');
116
+
117
+ // Handle both model_settings object and legacy model string
118
+ const modelSettings = agentConfig.spec.model_settings || agentConfig.spec.model;
119
+ let modelConfig;
120
+ if (typeof modelSettings === 'string') {
121
+ modelConfig = { modelName: modelSettings };
122
+ } else {
123
+ modelConfig = {
124
+ modelName: modelSettings.model,
125
+ ...modelSettings
126
+ };
127
+ }
128
+
129
+ const model = new ChatOpenAI(modelConfig);
130
+ const response = await model.invoke([
131
+ { role: 'system', content: agentConfig.spec.system_prompt },
132
+ { role: 'user', content: `Debate this topic: ${config.spec.input}. Provide your argument.` }
133
+ ]);
134
+ agentArguments.push({
135
+ agent: agentFile,
136
+ argument: response.content
137
+ });
138
+ }
139
+ } catch (error) {
140
+ agentArguments.push({
141
+ agent: agentFile,
142
+ argument: `Error: ${error.message}`
143
+ });
144
+ }
145
+ }
146
+
147
+ // Have judges vote
148
+ const votes = [];
149
+ let structuredResult = null;
150
+ for (const judgeFile of config.spec.judges) {
151
+ try {
152
+ const judgeConfig = yaml.load(fs.readFileSync(judgeFile, 'utf8'));
153
+ if (judgeConfig.kind === 'Agent') {
154
+ const { ChatOpenAI } = require('@langchain/openai');
155
+
156
+ // Handle both model_settings object and legacy model string
157
+ const modelSettings = judgeConfig.spec.model_settings || judgeConfig.spec.model;
158
+ let modelConfig;
159
+ if (typeof modelSettings === 'string') {
160
+ modelConfig = { modelName: modelSettings };
161
+ } else {
162
+ modelConfig = {
163
+ modelName: modelSettings.model,
164
+ ...modelSettings
165
+ };
166
+ }
167
+
168
+ const model = new ChatOpenAI(modelConfig);
169
+
170
+ let judgePrompt;
171
+ let responseFormat = null;
172
+
173
+ if (typeof outputFormat === 'object' && outputFormat.format === 'json' && outputFormat.structure) {
174
+ // Use structured output for judge
175
+ responseFormat = createZodSchema(outputFormat.structure);
176
+ const instructions = outputFormat.instructions || `Evaluate the arguments and provide your judgment in the required structured format. The winner should be the filename of the winning agent. Each vote should have the agent filename and reasoning.`;
177
+ judgePrompt = `You are judging a debate on: ${config.spec.input}\n\nArguments:\n${agentArguments.map((arg, i) => `Agent ${i+1} (${arg.agent}): ${arg.argument}`).join('\n\n')}\n\nTASK: ${instructions}`;
178
+ } else {
179
+ judgePrompt = `You are judging a debate on: ${config.spec.input}\n\nArguments:\n${agentArguments.map((arg, i) => `Agent ${i+1} (${arg.agent}): ${arg.argument}`).join('\n\n')}\n\nTASK: Choose which agent presented the strongest argument.\n\nIMPORTANT: Respond with ONLY the filename of the winning agent. Do not include any other text, explanation, or punctuation.\n\nValid responses: ${config.spec.agents.map(a => `"${a}"`).join(' or ')}`;
180
+ }
181
+
182
+ let response;
183
+ if (responseFormat) {
184
+ const structuredModel = model.withStructuredOutput(responseFormat);
185
+ response = await structuredModel.invoke([
186
+ { role: 'system', content: judgeConfig.spec.system_prompt },
187
+ { role: 'user', content: judgePrompt }
188
+ ]);
189
+ structuredResult = response; // Use the last judge's structured response
190
+ } else {
191
+ response = await model.invoke([
192
+ { role: 'system', content: judgeConfig.spec.system_prompt },
193
+ { role: 'user', content: judgePrompt }
194
+ ]);
195
+ }
196
+
197
+ if (responseFormat) {
198
+ // Structured response already handled
199
+ } else {
200
+ // Parse the vote - extract filename
201
+ let vote = response.content.trim();
202
+ // Remove quotes if present
203
+ vote = vote.replace(/^["']|["']$/g, '');
204
+ // Check if it's a valid agent file
205
+ if (!config.spec.agents.includes(vote)) {
206
+ // Try to find a matching filename in the response
207
+ const filenameMatch = vote.match(/(default\/[^.]+\.yaml)/);
208
+ if (filenameMatch) {
209
+ vote = filenameMatch[1];
210
+ } else {
211
+ vote = 'Invalid';
212
+ }
213
+ }
214
+
215
+ votes.push({
216
+ judge: judgeFile,
217
+ vote: vote
218
+ });
219
+ }
220
+ }
221
+ } catch (error) {
222
+ votes.push({
223
+ judge: judgeFile,
224
+ vote: 'Error'
225
+ });
226
+ }
227
+ }
228
+
229
+ // Count votes to determine winner
230
+ const voteCount = {};
231
+ votes.forEach(v => {
232
+ if (v.vote && v.vote !== 'Error') {
233
+ voteCount[v.vote] = (voteCount[v.vote] || 0) + 1;
234
+ }
235
+ });
236
+
237
+ let winner = null;
238
+ let maxVotes = 0;
239
+ for (const [candidate, count] of Object.entries(voteCount)) {
240
+ if (count > maxVotes) {
241
+ maxVotes = count;
242
+ winner = candidate;
243
+ }
244
+ }
245
+
246
+ const winnerArgument = agentArguments.find(arg => arg.agent === winner)?.argument || 'No argument found';
247
+
248
+ if (typeof outputFormat === 'object' && outputFormat.format === 'json') {
249
+ let result;
250
+ if (structuredResult) {
251
+ result = structuredResult;
252
+ // Add agents_response in debug mode
253
+ if (config.spec.debug) {
254
+ result.agents_response = agentArguments;
255
+ }
256
+ } else {
257
+ result = {
258
+ debate: config.metadata.name,
259
+ method: config.spec.method,
260
+ input: config.spec.input,
261
+ agents: config.spec.agents,
262
+ judges: config.spec.judges,
263
+ arguments: agentArguments,
264
+ votes: votes,
265
+ winner: winner,
266
+ winnerResponse: winnerArgument,
267
+ status: 'completed',
268
+ timestamp: new Date().toISOString()
269
+ };
270
+ // Add agents_response in debug mode
271
+ if (config.spec.debug) {
272
+ result.agents_response = agentArguments;
273
+ }
274
+ }
275
+ console.log(JSON.stringify(result, null, 2));
276
+ } else {
277
+ console.log(`Running debate with method: ${config.spec.method}`);
278
+ console.log(`Topic: ${config.spec.input}`);
279
+ console.log(`Agents: ${config.spec.agents.join(', ')}`);
280
+ console.log(`Judges: ${config.spec.judges.join(', ')}`);
281
+
282
+ agentArguments.forEach((arg, index) => {
283
+ console.log(`\nAgent ${index + 1} (${arg.agent}):`);
284
+ console.log(arg.argument);
285
+ });
286
+
287
+ console.log('\nVotes:');
288
+ votes.forEach(v => {
289
+ console.log(`${v.judge}: ${v.vote}`);
290
+ });
291
+
292
+ console.log(`\nWinner: ${winner}`);
293
+ console.log(`Winning argument: ${winnerArgument}`);
294
+ }
295
+ }
296
+
297
+ module.exports = {
298
+ buildWorkflow,
299
+ runTeam,
300
+ runDebate
301
+ };