clm-plugin 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clm/__init__.py ADDED
@@ -0,0 +1,59 @@
1
+ """
2
+ Cognitive Load Manager (CLM) v0.1
3
+
4
+ A metacognitive middleware layer for LLM-based agent loops that monitors
5
+ cognitive load, detects overload conditions, and intervenes through task
6
+ compression, goal anchoring, and clarification requests.
7
+ """
8
+
9
+ __version__ = "0.1.0"
10
+
11
+ from clm.cognitive_load_manager import CognitiveLoadManager
12
+ from clm.core.config import CLMConfig
13
+ from clm.core.models import TaskState, TaskTree, TaskNode, InterventionResponse
14
+ from clm.utils.auto_state import AutoStateBuilder
15
+ from clm.exceptions import (
16
+ CLMError,
17
+ ConfigurationError,
18
+ StorageError,
19
+ EmbeddingError,
20
+ ValidationError,
21
+ ExpansionError,
22
+ )
23
+
24
+ # Shorthand alias — users write CLM() not CognitiveLoadManager()
25
+ CLM = CognitiveLoadManager
26
+
27
+ # Import adapters with graceful fallback for missing optional dependencies
28
+ try:
29
+ from clm.adapters.langchain_adapter import CLMCallbackHandler
30
+ except ImportError:
31
+ pass # LangChain not installed, adapter unavailable
32
+
33
+ try:
34
+ from clm.adapters.openai_adapter import CLMOpenAIHook
35
+ except ImportError:
36
+ pass
37
+
38
+ from clm.adapters.loop_adapter import CLMLoop
39
+
40
+ __all__ = [
41
+ "CognitiveLoadManager",
42
+ "CLM",
43
+ "CLMConfig",
44
+ "TaskState",
45
+ "TaskTree",
46
+ "TaskNode",
47
+ "InterventionResponse",
48
+ "AutoStateBuilder",
49
+ "CLMError",
50
+ "ConfigurationError",
51
+ "StorageError",
52
+ "EmbeddingError",
53
+ "ValidationError",
54
+ "ExpansionError",
55
+ "CLMCallbackHandler",
56
+ "CLMOpenAIHook",
57
+ "CLMLoop",
58
+ ]
59
+
@@ -0,0 +1,11 @@
1
+ from clm.adapters.loop_adapter import CLMLoop
2
+
3
+ try:
4
+ from clm.adapters.langchain_adapter import CLMCallbackHandler
5
+ except ImportError:
6
+ pass
7
+
8
+ try:
9
+ from clm.adapters.openai_adapter import CLMOpenAIHook
10
+ except ImportError:
11
+ pass
@@ -0,0 +1,121 @@
1
+ """
2
+ LangChain adapter for CLM.
3
+ Usage:
4
+ from clm.adapters import CLMCallbackHandler
5
+ agent.run(input, callbacks=[CLMCallbackHandler(verbose=True)])
6
+ """
7
+ import uuid
8
+ from typing import Any
9
+
10
+ try:
11
+ from langchain.callbacks.base import BaseCallbackHandler
12
+ except ImportError:
13
+ raise ImportError(
14
+ "LangChain is required for CLMCallbackHandler. "
15
+ "Install it with: pip install clm-agent[langchain]"
16
+ )
17
+
18
+ from clm import CognitiveLoadManager, CLMConfig
19
+ from clm.core.models import TaskState, TaskTree, TaskNode
20
+
21
+
22
+ class CLMCallbackHandler(BaseCallbackHandler):
23
+ """
24
+ Drop-in CLM integration for any LangChain agent or chain.
25
+
26
+ Automatically builds a TaskTree from LLM calls and tool invocations.
27
+ No manual task state construction required.
28
+
29
+ Usage:
30
+ clm_handler = CLMCallbackHandler(verbose=True)
31
+ agent.run("your task", callbacks=[clm_handler])
32
+
33
+ # After run, inspect what CLM did:
34
+ print(clm_handler.clm.summary())
35
+ """
36
+
37
+ def __init__(self, config: CLMConfig = None, verbose: bool = False):
38
+ super().__init__()
39
+ self.clm = CognitiveLoadManager(config, verbose=verbose)
40
+ self._history: list[str] = []
41
+ self._tool_calls: list[str] = []
42
+ self._root_intent: str = ""
43
+ self._session_id = str(uuid.uuid4())[:8]
44
+
45
+ # Build a live task tree that grows as the agent runs
46
+ root_id = f"root_{self._session_id}"
47
+ self._task_tree = TaskTree(
48
+ root=TaskNode(
49
+ task_id=root_id,
50
+ parent_id=None,
51
+ description="agent_session",
52
+ status="active",
53
+ depth=0
54
+ ),
55
+ root_intent=""
56
+ )
57
+ self._current_task_id = root_id
58
+
59
+ def on_llm_start(self, serialized: dict, prompts: list[str], **kwargs) -> None:
60
+ """Capture root intent from first prompt."""
61
+ if not self._root_intent and prompts:
62
+ self._root_intent = prompts[0][:300]
63
+ self._task_tree.root_intent = self._root_intent
64
+ self._task_tree.root.description = self._root_intent[:100]
65
+
66
+ def on_llm_end(self, response: Any, **kwargs) -> None:
67
+ """Observe after every LLM call — the core integration point."""
68
+ try:
69
+ output = response.generations[0][0].text
70
+ except (IndexError, AttributeError):
71
+ return
72
+
73
+ self._history.append(output)
74
+
75
+ task_state = TaskState(
76
+ task_tree=self._task_tree,
77
+ current_task_id=self._current_task_id,
78
+ reasoning_history=self._history[-3:]
79
+ )
80
+
81
+ result = self.clm.observe(output, task_state)
82
+
83
+ # If patch: update task tree with compressed context
84
+ if result.action == "patch" and result.context:
85
+ self._task_tree.root.description = result.context[:200]
86
+
87
+ def on_tool_start(self, serialized: dict, input_str: str, **kwargs) -> None:
88
+ """Each tool call is a new sub-task node in the tree."""
89
+ tool_name = serialized.get("name", "unknown_tool")
90
+ task_id = f"tool_{tool_name}_{len(self._tool_calls)}"
91
+ self._tool_calls.append(task_id)
92
+
93
+ new_node = TaskNode(
94
+ task_id=task_id,
95
+ parent_id=self._task_tree.root.task_id,
96
+ description=f"{tool_name}: {input_str[:100]}",
97
+ status="active",
98
+ depth=1
99
+ )
100
+ self._task_tree.root.children.append(new_node)
101
+ self._current_task_id = task_id
102
+
103
+ def on_tool_end(self, output: str, **kwargs) -> None:
104
+ """Mark tool task complete, return focus to root."""
105
+ current = self._task_tree.find_node(self._current_task_id)
106
+ if current:
107
+ current.status = "completed"
108
+ self._current_task_id = self._task_tree.root.task_id
109
+
110
+ def on_chain_end(self, outputs: dict, **kwargs) -> None:
111
+ """Chain finished — print summary if verbose."""
112
+ if self.clm.verbose:
113
+ s = self.clm.summary()
114
+ print(
115
+ f"\n[CLM] Session complete — "
116
+ f"{s['steps']} steps | avg score {s['avg_score']} | "
117
+ f"peak {s['peak_score']} | "
118
+ f"patches={s['interventions']['patch']} "
119
+ f"interrupts={s['interventions']['interrupt']}",
120
+ flush=True
121
+ )
@@ -0,0 +1,135 @@
1
+ """
2
+ Generic loop adapter for CLM. Works with any LLM: OpenAI, Anthropic, Gemini, local models.
3
+
4
+ Usage:
5
+ from clm.adapters import CLMLoop
6
+
7
+ @CLMLoop(verbose=True)
8
+ def my_agent_step(prompt: str) -> str:
9
+ return openai_client.chat(prompt) # or any LLM call
10
+
11
+ # Now call it normally — CLM wraps every step automatically
12
+ for i in range(10):
13
+ response = my_agent_step(current_prompt)
14
+ """
15
+ import uuid
16
+ import functools
17
+ from clm import CognitiveLoadManager, CLMConfig
18
+ from clm.core.models import TaskState, TaskTree, TaskNode
19
+
20
+
21
+ class CLMLoop:
22
+ """
23
+ Decorator that wraps any agent step function with CLM.
24
+
25
+ The decorated function must accept a string prompt and return a string output.
26
+ CLM observes every call automatically, maintaining its own internal task tree.
27
+
28
+ Usage as decorator:
29
+ @CLMLoop(verbose=True)
30
+ def agent_step(prompt: str) -> str:
31
+ return llm_call(prompt)
32
+
33
+ Usage as context manager:
34
+ with CLMLoop(verbose=True) as loop:
35
+ for step in range(max_steps):
36
+ output = loop.step(prompt, task_description="current subtask")
37
+ if loop.should_stop():
38
+ break
39
+
40
+ Access CLM internals any time:
41
+ loop.clm.summary()
42
+ loop.clm.get_history()
43
+ loop.clm.get_score()
44
+ """
45
+
46
+ def __init__(self, config: CLMConfig = None, verbose: bool = False):
47
+ self.clm = CognitiveLoadManager(config, verbose=verbose)
48
+ self._history: list[str] = []
49
+ self._session_id = str(uuid.uuid4())[:8]
50
+ self._step_count = 0
51
+ self._root_intent = ""
52
+
53
+ root_id = f"root_{self._session_id}"
54
+ self._task_tree = TaskTree(
55
+ root=TaskNode(root_id, None, "loop_root", "active", depth=0),
56
+ root_intent=""
57
+ )
58
+ self._current_task_id = root_id
59
+
60
+ def __call__(self, fn):
61
+ """Decorator usage: @CLMLoop()"""
62
+ @functools.wraps(fn)
63
+ def wrapper(prompt: str, *args, **kwargs) -> str:
64
+ output = fn(prompt, *args, **kwargs)
65
+ self._observe(prompt, output)
66
+ return output
67
+ wrapper.clm = self.clm
68
+ wrapper.summary = self.clm.summary
69
+ wrapper.get_history = self.clm.get_history
70
+ return wrapper
71
+
72
+ def __enter__(self):
73
+ return self
74
+
75
+ def __exit__(self, *args):
76
+ self.clm.close()
77
+
78
+ def step(self, prompt: str, output: str = None,
79
+ task_description: str = None) -> dict:
80
+ """
81
+ Manually advance one step. Pass prompt + output, get CLM result back.
82
+ If output is None, returns a partial state (pre-LLM call marker).
83
+
84
+ Returns the InterventionResponse as a dict so callers can branch on it.
85
+ """
86
+ if output is None:
87
+ # pre-call: just record the intent
88
+ if not self._root_intent:
89
+ self._root_intent = prompt[:300]
90
+ self._task_tree.root_intent = self._root_intent
91
+ return {"action": "pending"}
92
+
93
+ result = self._observe(prompt, output, task_description)
94
+ return {
95
+ "action": result.action,
96
+ "zone": result.zone,
97
+ "score": result.clm_score,
98
+ "context": result.context,
99
+ "clarification": result.clarification,
100
+ }
101
+
102
+ def should_stop(self) -> bool:
103
+ """
104
+ Returns True if CLM is in Red zone — useful for agents that want to
105
+ pause and ask for clarification rather than continuing blind.
106
+ """
107
+ return self.clm.get_zone() == "Red"
108
+
109
+ def _observe(self, prompt: str, output: str,
110
+ task_description: str = None) -> object:
111
+ self._step_count += 1
112
+ self._history.append(output)
113
+
114
+ if not self._root_intent:
115
+ self._root_intent = prompt[:300]
116
+ self._task_tree.root_intent = self._root_intent
117
+
118
+ # Add step as a child node
119
+ step_id = f"step_{self._step_count}"
120
+ node = TaskNode(
121
+ task_id=step_id,
122
+ parent_id=self._task_tree.root.task_id,
123
+ description=task_description or output[:100],
124
+ status="active",
125
+ depth=1
126
+ )
127
+ self._task_tree.root.children.append(node)
128
+ self._current_task_id = step_id
129
+
130
+ state = TaskState(
131
+ task_tree=self._task_tree,
132
+ current_task_id=self._current_task_id,
133
+ reasoning_history=self._history[-3:]
134
+ )
135
+ return self.clm.observe(output, state)
@@ -0,0 +1,94 @@
1
+ """
2
+ OpenAI Agents SDK adapter for CLM.
3
+
4
+ Usage:
5
+ from clm.adapters import CLMOpenAIHook
6
+
7
+ hook = CLMOpenAIHook(verbose=True)
8
+ # Pass hook.on_message as a callback to your OpenAI agent runner
9
+ # OR use hook.wrap(runner) to auto-attach
10
+ """
11
+ import uuid
12
+ from clm import CognitiveLoadManager, CLMConfig
13
+ from clm.core.models import TaskState, TaskTree, TaskNode
14
+
15
+
16
+ class CLMOpenAIHook:
17
+ """
18
+ Lifecycle hooks for OpenAI Agents SDK.
19
+
20
+ Compatible with openai-agents SDK on_message_end and on_tool_call hooks.
21
+
22
+ Usage:
23
+ hook = CLMOpenAIHook(verbose=True)
24
+
25
+ # Manual hook attachment:
26
+ result = await Runner.run(agent, input,
27
+ hooks=hook.get_hooks())
28
+
29
+ # After run:
30
+ print(hook.clm.summary())
31
+ """
32
+
33
+ def __init__(self, config: CLMConfig = None, verbose: bool = False):
34
+ self.clm = CognitiveLoadManager(config, verbose=verbose)
35
+ self._history: list[str] = []
36
+ self._tool_nodes: list[str] = []
37
+ session_id = str(uuid.uuid4())[:8]
38
+ root_id = f"root_{session_id}"
39
+ self._task_tree = TaskTree(
40
+ root=TaskNode(root_id, None, "openai_agent_session", "active", depth=0),
41
+ root_intent=""
42
+ )
43
+ self._current_task_id = root_id
44
+
45
+ def on_message_end(self, message) -> None:
46
+ """Hook: fires after each agent message/LLM response."""
47
+ content = ""
48
+ if hasattr(message, "content"):
49
+ content = str(message.content)
50
+ elif hasattr(message, "text"):
51
+ content = str(message.text)
52
+
53
+ if not content:
54
+ return
55
+
56
+ self._history.append(content)
57
+ if not self._task_tree.root_intent:
58
+ self._task_tree.root_intent = content[:300]
59
+
60
+ state = TaskState(
61
+ task_tree=self._task_tree,
62
+ current_task_id=self._current_task_id,
63
+ reasoning_history=self._history[-3:]
64
+ )
65
+ self.clm.observe(content, state)
66
+
67
+ def on_tool_call(self, tool_name: str, tool_input: dict) -> None:
68
+ """Hook: fires before each tool call."""
69
+ task_id = f"tool_{tool_name}_{len(self._tool_nodes)}"
70
+ self._tool_nodes.append(task_id)
71
+ node = TaskNode(
72
+ task_id=task_id,
73
+ parent_id=self._task_tree.root.task_id,
74
+ description=f"{tool_name}: {str(tool_input)[:100]}",
75
+ status="active",
76
+ depth=1
77
+ )
78
+ self._task_tree.root.children.append(node)
79
+ self._current_task_id = task_id
80
+
81
+ def on_tool_end(self, tool_name: str, output: str) -> None:
82
+ """Hook: fires after each tool call."""
83
+ node = self._task_tree.find_node(self._current_task_id)
84
+ if node:
85
+ node.status = "completed"
86
+ self._current_task_id = self._task_tree.root.task_id
87
+
88
+ def get_hooks(self) -> dict:
89
+ """Return hooks dict compatible with OpenAI Agents SDK Runner."""
90
+ return {
91
+ "on_message_end": self.on_message_end,
92
+ "on_tool_call": self.on_tool_call,
93
+ "on_tool_end": self.on_tool_end,
94
+ }