@amitdeshmukh/ax-crew 7.0.0 → 8.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # Changelog
2
2
 
3
+ ## [8.0.0] - 2025-01-17
4
+
5
+ ### Added
6
+ - **ACE (Agentic Context Engineering) Support** - Per-agent learning from human feedback
7
+ - Agents can learn from real-time feedback and update their behavior
8
+ - Playbook persistence to JSON files or custom storage via callbacks
9
+ - Teacher/Student model architecture for distilling feedback into rules
10
+ - Feedback routing across agent dependency chains (`applyTaskFeedback`)
11
+ - New `ACEConfig` interface with `teacher`, `persistence`, `options`, and `metric` fields
12
+ - New type exports: `ACEConfig`, `ACETeacherConfig`, `ACEPersistenceConfig`, `ACEOptionsConfig`, `ACEMetricConfig`
13
+ - Export `AxCrewOptions` type for consumer type safety
14
+ - ACE examples: `ace-customer-support.ts` and `ace-flight-finder.ts`
15
+ - ACE documentation section in README with configuration options and usage examples
16
+
17
+ ### Changed
18
+ - ACE configuration is presence-based (no redundant `enabled` boolean needed)
19
+
3
20
  ## [7.0.0] - 2025-12-27
4
21
 
5
22
  ### Breaking
package/README.md CHANGED
@@ -851,6 +851,110 @@ npm run dev examples/telemetry-demo.ts
851
851
  4. **Custom Attributes**: Extend traces with custom attributes specific to your use case
852
852
  5. **Performance**: Telemetry adds minimal overhead when properly configured
853
853
 
854
+ ### ACE Support (Agentic Context Engineering)
855
+
856
+ AxCrew integrates [Agentic Context Engineering (ACE)](https://www.youtube.com/watch?v=elgYgPo_vY4) from the Ax framework, enabling agents to learn and improve from human feedback. ACE maintains a "playbook" of learned rules that guide agent behavior, which can be persisted across sessions.
857
+
858
+ #### Key Features
859
+
860
+ - **Online Learning**: Agents learn from real-time feedback during conversations
861
+ - **Playbook Persistence**: Save learned rules to JSON files or custom storage
862
+ - **Teacher/Student Model**: Use a separate "teacher" LLM to distill feedback into actionable rules
863
+ - **Feedback Routing**: Distribute feedback across agent dependency chains automatically
864
+
865
+ #### Configuration
866
+
867
+ Add the `ace` field to any agent configuration:
868
+
869
+ ```typescript
870
+ {
871
+ name: "SupportAgent",
872
+ description: "Customer support agent",
873
+ signature: "ticket:string -> supportResponse:string, decision:string",
874
+ provider: "google-gemini",
875
+ providerKeyName: "GEMINI_API_KEY",
876
+ ai: { model: "gemini-flash-latest", temperature: 0.7 },
877
+ ace: {
878
+ teacher: {
879
+ provider: "google-gemini",
880
+ providerKeyName: "GEMINI_API_KEY",
881
+ ai: { model: "gemini-flash-latest" }
882
+ },
883
+ options: {
884
+ maxEpochs: 1,
885
+ allowDynamicSections: true
886
+ },
887
+ persistence: {
888
+ playbookPath: "playbooks/support-agent.json",
889
+ autoPersist: true
890
+ },
891
+ metric: {
892
+ primaryOutputField: "supportResponse"
893
+ }
894
+ }
895
+ }
896
+ ```
897
+
898
+ #### ACE Configuration Options
899
+
900
+ | Field | Type | Description |
901
+ |-------|------|-------------|
902
+ | `teacher` | object | Teacher model config (provider, model, apiURL) |
903
+ | `persistence.playbookPath` | string | File path to save/load playbook |
904
+ | `persistence.autoPersist` | boolean | Auto-save playbook after updates |
905
+ | `persistence.onPersist` | function | Custom callback for saving playbook |
906
+ | `persistence.onLoad` | function | Custom callback for loading playbook |
907
+ | `options.maxEpochs` | number | Training epochs for offline compile |
908
+ | `options.allowDynamicSections` | boolean | Allow playbook to create new sections |
909
+ | `metric.primaryOutputField` | string | Output field to evaluate for quality |
910
+ | `compileOnStart` | boolean | Run offline compile on agent init |
911
+
912
+ #### Usage: Applying Feedback
913
+
914
+ ```typescript
915
+ import { AxCrew, AxCrewFunctions } from '@amitdeshmukh/ax-crew';
916
+
917
+ const crew = new AxCrew(config, AxCrewFunctions);
918
+ await crew.addAgentsToCrew(['SupportAgent']);
919
+
920
+ const agent = crew.agents.get('SupportAgent');
921
+
922
+ // Run the agent
923
+ const result = await agent.forward({ ticket: "Customer wants refund after 45 days" });
924
+ const taskId = result._taskId;
925
+
926
+ // Apply feedback to teach the agent
927
+ await crew.applyTaskFeedback({
928
+ taskId,
929
+ feedback: "For loyal customers (5+ years), extend return window to 60 days",
930
+ strategy: "all" // Apply to all agents involved in this task
931
+ });
932
+
933
+ // View the learned playbook
934
+ const playbook = agent.getPlaybook?.();
935
+ console.log(playbook);
936
+ ```
937
+
938
+ #### Feedback Strategies
939
+
940
+ | Strategy | Description |
941
+ |----------|-------------|
942
+ | `"all"` | Apply feedback to all agents involved in the task |
943
+ | `"primary"` | Apply only to the primary (entry) agent |
944
+ | `"leaf"` | Apply only to leaf agents (no sub-agents) |
945
+
946
+ #### Examples
947
+
948
+ See the ACE examples for complete demonstrations:
949
+
950
+ - [`ace-customer-support.ts`](examples/ace-customer-support.ts) - Learn edge-case handling beyond standard policies
951
+ - [`ace-feedback-routing.ts`](examples/ace-feedback-routing.ts) - Flight assistant with preference learning
952
+
953
+ ```bash
954
+ # Run the customer support demo
955
+ npx tsx examples/ace-customer-support.ts
956
+ ```
957
+
854
958
  ## Changelog
855
959
 
856
960
  See [CHANGELOG.md](CHANGELOG.md) for a list of changes and version updates.
@@ -0,0 +1,134 @@
1
+ /**
2
+ * ACE (Agentic Context Engineering) integration for AxCrew
3
+ *
4
+ * This module provides helpers to build and manage AxACE optimizers for agents,
5
+ * enabling offline compilation and online learning from feedback.
6
+ *
7
+ * Reference: https://axllm.dev/ace/
8
+ */
9
+ import { AxACE, type AxMetricFn } from "@ax-llm/ax";
10
+ import type { AxAI } from "@ax-llm/ax";
11
+ import type { ACEConfig, ACEPersistenceConfig, ACEMetricConfig, FunctionRegistryType } from "../types.js";
12
+ export type { AxACE, AxMetricFn };
13
+ /**
14
+ * Create an empty playbook structure
15
+ */
16
+ export declare const createEmptyPlaybook: () => ACEPlaybook;
17
+ /**
18
+ * Playbook types (mirroring AxACEPlaybook structure)
19
+ */
20
+ export interface ACEBullet {
21
+ id: string;
22
+ section: string;
23
+ content: string;
24
+ helpfulCount: number;
25
+ harmfulCount: number;
26
+ createdAt: string;
27
+ updatedAt: string;
28
+ metadata?: Record<string, unknown>;
29
+ }
30
+ export interface ACEPlaybook {
31
+ version: number;
32
+ sections: Record<string, ACEBullet[]>;
33
+ stats: {
34
+ bulletCount: number;
35
+ helpfulCount: number;
36
+ harmfulCount: number;
37
+ tokenEstimate: number;
38
+ };
39
+ updatedAt: string;
40
+ description?: string;
41
+ }
42
+ /**
43
+ * Render a playbook into markdown instruction block for injection into prompts.
44
+ * Mirrors the AxACE renderPlaybook function.
45
+ */
46
+ export declare const renderPlaybook: (playbook: Readonly<ACEPlaybook>) => string;
47
+ /**
48
+ * Build an AxACE optimizer for an agent
49
+ *
50
+ * @param studentAI - The agent's AI instance (used as student)
51
+ * @param cfg - ACE configuration
52
+ * @returns Configured AxACE optimizer
53
+ */
54
+ export declare const buildACEOptimizer: (studentAI: AxAI, cfg: ACEConfig) => AxACE;
55
+ /**
56
+ * Load initial playbook from file, callback, or inline config
57
+ *
58
+ * @param cfg - Persistence configuration
59
+ * @returns Loaded playbook or undefined
60
+ */
61
+ export declare const loadInitialPlaybook: (cfg?: ACEPersistenceConfig) => Promise<any | undefined>;
62
+ /**
63
+ * Persist playbook to file or via callback
64
+ *
65
+ * @param pb - Playbook to persist
66
+ * @param cfg - Persistence configuration
67
+ */
68
+ export declare const persistPlaybook: (pb: any, cfg?: ACEPersistenceConfig) => Promise<void>;
69
+ /**
70
+ * Resolve metric function from registry or create equality-based metric
71
+ *
72
+ * @param cfg - Metric configuration
73
+ * @param registry - Function registry to search
74
+ * @returns Metric function or undefined
75
+ */
76
+ export declare const resolveMetric: (cfg: ACEMetricConfig | undefined, registry: FunctionRegistryType) => AxMetricFn | undefined;
77
+ /**
78
+ * Run offline ACE compilation
79
+ *
80
+ * @param args - Compilation arguments
81
+ * @returns Compilation result with optimized program
82
+ */
83
+ export declare const runOfflineCompile: (args: {
84
+ program: any;
85
+ optimizer: AxACE;
86
+ metric: AxMetricFn;
87
+ examples: any[];
88
+ persistence?: ACEPersistenceConfig;
89
+ }) => Promise<any>;
90
+ /**
91
+ * Apply online update with feedback
92
+ *
93
+ * @param args - Update arguments
94
+ * @returns Curator delta (operations applied)
95
+ */
96
+ export declare const runOnlineUpdate: (args: {
97
+ optimizer: AxACE;
98
+ example: any;
99
+ prediction: any;
100
+ feedback?: string;
101
+ persistence?: ACEPersistenceConfig;
102
+ tokenBudget?: number;
103
+ debug?: boolean;
104
+ }) => Promise<any>;
105
+ /**
106
+ * Use LLM to analyze feedback and generate playbook operations.
107
+ *
108
+ * This leverages AxGen with a proper signature (like AxACE's reflector/curator)
109
+ * to properly categorize feedback and extract actionable insights.
110
+ *
111
+ * IMPORTANT: The prompt explicitly tells the LLM to preserve specificity.
112
+ *
113
+ * @param ai - The AI instance to use for analysis
114
+ * @param feedback - User feedback string
115
+ * @param debug - Whether to log debug info
116
+ * @returns Promise of curator operations
117
+ */
118
+ export declare const analyzeAndCategorizeFeedback: (ai: AxAI, feedback: string, debug?: boolean) => Promise<Array<{
119
+ type: "ADD" | "UPDATE" | "REMOVE";
120
+ section: string;
121
+ content: string;
122
+ }>>;
123
+ /**
124
+ * Add feedback to playbook using LLM analysis.
125
+ *
126
+ * Uses the AI to properly understand and categorize the feedback,
127
+ * then applies it as a curator operation.
128
+ *
129
+ * @param playbook - The playbook to update (mutated in place)
130
+ * @param feedback - User feedback string to add
131
+ * @param ai - AI instance for smart categorization
132
+ * @param debug - Whether to log debug info
133
+ */
134
+ export declare const addFeedbackToPlaybook: (playbook: ACEPlaybook, feedback: string, ai: AxAI, debug?: boolean) => Promise<void>;