scout-ai 1.2.1 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 456939ec51f2ea2c9be5a7fa5eb7992161e893dcfb6618b85eac7e7317af25fa
4
- data.tar.gz: 67d22d1cd09f74c73843f143897d543993df4f5d68b9649ab1751a19a944e8fd
3
+ metadata.gz: 648a917e45f537ed44b068e1e4762a8a2846dbef5280cb9c37a0c2351e9e95b1
4
+ data.tar.gz: 25bf5fdbac1f713e6746a692f4aecefa594b69d6df17b526e4658182529a0b3a
5
5
  SHA512:
6
- metadata.gz: 501bf9ff434058cb2ebad6b084a7ffbccf8a392b1a735c9e6e5726afcc23c7331ef471fc4c675130e49920def430332dd01b647b5f9a473c038b8ef0609d55e1
7
- data.tar.gz: a267f504d6342a15fe1c0b7f180a47e3b7cd532b71fbd4a5fd3868b4fb7337f3a5c4f5d1ce0aa53276eb268e64ec035806d262b7713dba23d65bb50af2d6c3ab
6
+ metadata.gz: 6a46450a6d7e0b49b66cb2c0fe94efaa4ba8d4e030a607f8fd2da8a610af0143ae3272f04a470019e72d6b62e6510c7c291a75f1b70871ae3befeed87138237a
7
+ data.tar.gz: 2fe91febb6d59bca6fa3b942a4bd1fbade3f6001df35be85497e0020979de9c7642185bc8bb221a2f0edbe2872c41fbc24851805f47801425125d320a575e2c8
data/.vimproject CHANGED
@@ -49,7 +49,7 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
49
49
 
50
50
  test_prompt.rb
51
51
 
52
- openclaw
52
+ from_python
53
53
  }
54
54
  lib=lib {
55
55
  scout-ai.rb
@@ -144,17 +144,21 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
144
144
  python=python filter="*"{
145
145
  scout_ai=scout_ai{
146
146
  __init__.py
147
- language_model.py
147
+ agent.py
148
+ chat.py
149
+ data.py
150
+ message.py
151
+ runner.py
148
152
  util.py
149
153
  huggingface=huggingface{
150
154
  data.py
151
155
  eval.py
152
156
  model.py
157
+ rlhf.py
153
158
  train=train{
154
159
  __init__.py
155
160
  next_token.py
156
161
  }
157
- rlhf.py
158
162
  }
159
163
  }
160
164
  }
@@ -188,6 +192,7 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
188
192
  documenter
189
193
  llm=llm{
190
194
  ask
195
+ json
191
196
  template
192
197
  process
193
198
  server
@@ -195,6 +200,7 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
195
200
  agent=agent{
196
201
  ask
197
202
  kb
203
+ find
198
204
  }
199
205
  }
200
206
  }
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.2.1
1
+ 1.2.2
@@ -10,7 +10,7 @@ module LLM
10
10
  end
11
11
 
12
12
  class Agent
13
- attr_accessor :workflow, :knowledge_base, :start_chat, :process_exception, :other_options
13
+ attr_accessor :workflow, :knowledge_base, :start_chat, :process_exception, :other_options, :path
14
14
  def initialize(workflow: nil, knowledge_base: nil, start_chat: nil, **kwargs)
15
15
  @workflow = workflow
16
16
  @workflow = Workflow.require_workflow @workflow if String === @workflow
@@ -161,6 +161,7 @@ module LLM
161
161
  raise ScoutException, "No agent found with name #{agent_name}" unless workflow_path.exists? || agent_path.exists?
162
162
 
163
163
  workflow = if workflow_path.exists?
164
+ agent_path = workflow_path
164
165
  Workflow.require_workflow agent_name
165
166
  elsif agent_path.workflow.find_with_extension("rb").exists?
166
167
  Workflow.require_workflow_file agent_path.workflow.find_with_extension("rb")
@@ -185,7 +186,9 @@ module LLM
185
186
  Chat.setup([ {role: 'introduce', content: workflow.name} ])
186
187
  end
187
188
 
188
- LLM::Agent.new **options.merge(workflow: workflow, knowledge_base: knowledge_base, start_chat: chat)
189
+ agent = LLM::Agent.new **options.merge(workflow: workflow, knowledge_base: knowledge_base, start_chat: chat)
190
+ agent.path = agent_path.find if agent_path
191
+ agent
189
192
  end
190
193
  end
191
194
  end
@@ -48,10 +48,10 @@ module Chat
48
48
  line = line.sub("]]", "")
49
49
  current_content << "\n" << line unless line.strip.empty?
50
50
  next
51
- elsif stripped.match(/^.*:-- .* {{{/)
51
+ elsif stripped.match(/^[^\s]*:-- .* {{{/)
52
52
  in_protected_block = true
53
53
  protected_block_type = :square
54
- line = line.sub(/^.*:-- (.*) {{{.*/, '<cmd_output cmd="\1">')
54
+ line = line.sub(/^[^\s]*:-- (.*) {{{.*/, '<cmd_output cmd="\1">')
55
55
  current_content << "\n" << line unless line.strip.empty?
56
56
  next
57
57
  elsif stripped.match(/^.*:--.* }}}/) && in_protected_block && protected_block_type == :square
@@ -1,35 +1,27 @@
1
- import scout
2
- import torch
3
- from .util import *
4
-
5
- class TSVDataset(torch.utils.data.Dataset):
6
- def __init__(self, tsv):
7
- self.tsv = tsv
8
-
9
- def __getitem__(self, key):
10
- if (type(key) == int):
11
- row = self.tsv.iloc[key]
12
- else:
13
- row = self.tsv.loc[key]
14
-
15
- row = row.to_numpy()
16
- features = row[:-1]
17
- label = row[-1]
18
-
19
- return features, label
20
-
21
- def __len__(self):
22
- return len(self.tsv)
23
-
24
- def tsv_dataset(filename, *args, **kwargs):
25
- return TSVDataset(scout.tsv(filename, *args, **kwargs))
26
-
27
- def tsv(*args, **kwargs):
28
- return tsv_dataset(*args, **kwargs)
29
-
30
- def tsv_loader(*args, **kwargs):
31
- dataset = tsv(*args, kwargs)
32
- return torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True)
33
-
34
- def data_dir():
35
- return scout.path('var/scout_dm/data')
1
+ from .agent import Agent, load_agent
2
+ from .chat import Chat
3
+ from .message import Message
4
+ from .runner import CommandError, ScoutRunner
5
+
6
+ __all__ = [
7
+ "Agent",
8
+ "Chat",
9
+ "CommandError",
10
+ "Message",
11
+ "ScoutRunner",
12
+ "load_agent",
13
+ ]
14
+
15
+ try:
16
+ from .util import deterministic, device, model_device, set_seed
17
+
18
+ __all__ += ["deterministic", "device", "model_device", "set_seed"]
19
+ except Exception:
20
+ pass
21
+
22
+ try:
23
+ from .data import TSVDataset, data_dir, tsv, tsv_dataset, tsv_loader
24
+
25
+ __all__ += ["TSVDataset", "data_dir", "tsv", "tsv_dataset", "tsv_loader"]
26
+ except Exception:
27
+ pass
@@ -0,0 +1,88 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Optional
4
+
5
+ from .chat import Chat
6
+ from .runner import ScoutRunner
7
+
8
+
9
+ class Agent:
10
+ """Thin Python wrapper over Scout-AI agents.
11
+
12
+ The object mirrors Scout-AI's ``start_chat`` / ``current_chat`` split while
13
+ delegating actual execution to the Ruby CLI.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ name: str,
19
+ runner: Optional[ScoutRunner] = None,
20
+ start_chat: Optional[Chat] = None,
21
+ endpoint: Any = None,
22
+ model: Any = None,
23
+ backend: Any = None,
24
+ **options: Any,
25
+ ):
26
+ self.name = name
27
+ self.runner = runner or ScoutRunner()
28
+
29
+ if start_chat is None:
30
+ start_chat = Chat(self.runner.load_agent_start_chat(name), runner=self.runner)
31
+ elif not isinstance(start_chat, Chat):
32
+ start_chat = Chat(start_chat, runner=self.runner)
33
+ else:
34
+ start_chat.runner = self.runner
35
+
36
+ if endpoint is not None:
37
+ start_chat.endpoint(endpoint)
38
+ if model is not None:
39
+ start_chat.model(model)
40
+ if backend is not None:
41
+ start_chat.backend(backend)
42
+ for key, value in options.items():
43
+ if value is not None:
44
+ start_chat.option(key, value)
45
+
46
+ self.start_chat = start_chat
47
+ self.current_chat = self.start_chat.branch()
48
+
49
+ def start(self, chat: Optional[Chat] = None) -> Chat:
50
+ if chat is None:
51
+ self.current_chat = self.start_chat.branch()
52
+ elif isinstance(chat, Chat):
53
+ self.current_chat = chat.branch()
54
+ self.current_chat.runner = self.runner
55
+ else:
56
+ self.current_chat = Chat(chat, runner=self.runner)
57
+ return self.current_chat
58
+
59
+ reset = start
60
+
61
+ def ask(self) -> Chat:
62
+ return self.current_chat.ask(agent_name=self.name)
63
+
64
+ def chat(self):
65
+ delta = self.ask()
66
+ self.current_chat.extend(delta)
67
+ return delta.last_message()
68
+
69
+ def save(self, path, output_format: str = "chat"):
70
+ return self.current_chat.save(path, output_format=output_format)
71
+
72
+ def __getattr__(self, name: str):
73
+ attribute = getattr(self.current_chat, name)
74
+ if callable(attribute):
75
+ def delegated(*args, **kwargs):
76
+ result = attribute(*args, **kwargs)
77
+ if result is self.current_chat:
78
+ return self
79
+ return result
80
+ return delegated
81
+ return attribute
82
+
83
+ def __repr__(self) -> str:
84
+ return f"Agent(name={self.name!r}, current_chat={self.current_chat!r})"
85
+
86
+
87
+ def load_agent(name: str, runner: Optional[ScoutRunner] = None, **kwargs: Any) -> Agent:
88
+ return Agent(name=name, runner=runner, **kwargs)
@@ -0,0 +1,221 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence
5
+
6
+ from .message import Message
7
+ from .runner import ScoutRunner
8
+
9
+ _RESERVED_MESSAGE_ROLES = {"previous_response_id"}
10
+
11
+
12
+ class Chat:
13
+ """Python representation of a Scout chat.
14
+
15
+ The chat is kept as a list of messages. Whenever Scout needs to parse,
16
+ print, or execute the conversation, the package delegates that work to the
17
+ Ruby CLI through :class:`ScoutRunner`.
18
+ """
19
+
20
+ def __init__(self, messages: Optional[Iterable[dict | Message]] = None, runner: Optional[ScoutRunner] = None):
21
+ self.runner = runner or ScoutRunner()
22
+ self.messages: List[Message] = [Message.from_data(message) for message in (messages or [])]
23
+
24
+ @classmethod
25
+ def load(cls, path: str | Path, input_format: str = "chat", runner: Optional[ScoutRunner] = None) -> "Chat":
26
+ runner = runner or ScoutRunner()
27
+ messages = runner.load_messages(path, input_format=input_format)
28
+ return cls(messages, runner=runner)
29
+
30
+ @classmethod
31
+ def from_text(cls, text: str, runner: Optional[ScoutRunner] = None) -> "Chat":
32
+ runner = runner or ScoutRunner()
33
+ messages = runner.parse_chat_text(text)
34
+ return cls(messages, runner=runner)
35
+
36
+ def branch(self) -> "Chat":
37
+ return Chat([message.to_dict() for message in self.messages], runner=self.runner)
38
+
39
+ copy = branch
40
+
41
+ def to_dicts(self) -> List[Dict[str, Any]]:
42
+ return [message.to_dict() for message in self.messages]
43
+
44
+ def to_json(self) -> List[Dict[str, Any]]:
45
+ return self.to_dicts()
46
+
47
+ def render(self) -> str:
48
+ return self.runner.render_chat(self.to_dicts())
49
+
50
+ def save(self, path: str | Path, output_format: str = "chat") -> Path:
51
+ return self.runner.save_messages(path, self.to_dicts(), output_format=output_format)
52
+
53
+ def save_json(self, path: str | Path) -> Path:
54
+ return self.save(path, output_format="json")
55
+
56
+ def save_chat(self, path: str | Path) -> Path:
57
+ return self.save(path, output_format="chat")
58
+
59
+ def message(self, role: str, content: Any = "", **extra: Any) -> "Chat":
60
+ self.messages.append(Message(str(role), content, dict(extra)))
61
+ return self
62
+
63
+ def extend(self, messages: Iterable[dict | Message | "Chat"] | dict | Message | "Chat") -> "Chat":
64
+ if isinstance(messages, Chat):
65
+ iterable = messages.messages
66
+ elif isinstance(messages, (dict, Message)):
67
+ iterable = [messages]
68
+ else:
69
+ iterable = messages
70
+ for message in iterable:
71
+ self.messages.append(Message.from_data(message))
72
+ return self
73
+
74
+ append = extend
75
+
76
+ def user(self, content: Any) -> "Chat":
77
+ return self.message("user", content)
78
+
79
+ def system(self, content: Any) -> "Chat":
80
+ return self.message("system", content)
81
+
82
+ def assistant(self, content: Any) -> "Chat":
83
+ return self.message("assistant", content)
84
+
85
+ def import_(self, file: str | Path) -> "Chat":
86
+ return self.message("import", str(file))
87
+
88
+ def import_last(self, file: str | Path) -> "Chat":
89
+ return self.message("last", str(file))
90
+
91
+ def last(self, file: str | Path) -> "Chat":
92
+ return self.import_last(file)
93
+
94
+ def continue_(self, file: str | Path) -> "Chat":
95
+ return self.message("continue", str(file))
96
+
97
+ def file(self, file: str | Path) -> "Chat":
98
+ return self.message("file", str(file))
99
+
100
+ def directory(self, directory: str | Path) -> "Chat":
101
+ return self.message("directory", str(directory))
102
+
103
+ def image(self, file: str | Path) -> "Chat":
104
+ return self.message("image", str(file))
105
+
106
+ def pdf(self, file: str | Path) -> "Chat":
107
+ return self.message("pdf", str(file))
108
+
109
+ def introduce(self, workflow: str) -> "Chat":
110
+ return self.message("introduce", str(workflow))
111
+
112
+ def tool(self, *parts: Any) -> "Chat":
113
+ parts = [str(part) for part in parts if part is not None]
114
+ return self.message("tool", "\n".join(parts))
115
+
116
+ def use(self, *parts: Any) -> "Chat":
117
+ return self.tool(*parts)
118
+
119
+ def mcp(self, content: Any) -> "Chat":
120
+ return self.message("mcp", content)
121
+
122
+ def task(self, workflow: str, task_name: str, **inputs: Any) -> "Chat":
123
+ content = " ".join([str(workflow), str(task_name)] + [f"{key}={value}" for key, value in inputs.items()])
124
+ return self.message("task", content.strip())
125
+
126
+ def inline_task(self, workflow: str, task_name: str, **inputs: Any) -> "Chat":
127
+ content = " ".join([str(workflow), str(task_name)] + [f"{key}={value}" for key, value in inputs.items()])
128
+ return self.message("inline_task", content.strip())
129
+
130
+ def exec_task(self, workflow: str, task_name: str, **inputs: Any) -> "Chat":
131
+ content = " ".join([str(workflow), str(task_name)] + [f"{key}={value}" for key, value in inputs.items()])
132
+ return self.message("exec_task", content.strip())
133
+
134
+ def job(self, step: Any) -> "Chat":
135
+ value = getattr(step, "path", step)
136
+ return self.message("job", str(value))
137
+
138
+ def inline_job(self, step: Any) -> "Chat":
139
+ value = getattr(step, "path", step)
140
+ return self.message("inline_job", str(value))
141
+
142
+ def association(self, name: str, path: str | Path, **options: Any) -> "Chat":
143
+ parts = [str(name), str(path)] + [f"{key}={value}" for key, value in options.items()]
144
+ return self.message("association", " ".join(parts))
145
+
146
+ def option(self, name: str, value: Any) -> "Chat":
147
+ return self.message("option", f"{name} {value}")
148
+
149
+ def sticky_option(self, name: str, value: Any) -> "Chat":
150
+ return self.message("sticky_option", f"{name} {value}")
151
+
152
+ def endpoint(self, value: Any) -> "Chat":
153
+ return self.message("endpoint", value)
154
+
155
+ def model(self, value: Any) -> "Chat":
156
+ return self.message("model", value)
157
+
158
+ def backend(self, value: Any) -> "Chat":
159
+ return self.message("backend", value)
160
+
161
+ def format(self, value: Any) -> "Chat":
162
+ return self.message("format", value)
163
+
164
+ def persist(self, value: Any = True) -> "Chat":
165
+ return self.message("persist", value)
166
+
167
+ def previous_response_id(self, value: Any) -> "Chat":
168
+ return self.message("previous_response_id", value)
169
+
170
+ def clear(self, content: Any = "") -> "Chat":
171
+ return self.message("clear", content)
172
+
173
+ def skip(self, content: Any = "") -> "Chat":
174
+ return self.message("skip", content)
175
+
176
+ def answer(self) -> Optional[str]:
177
+ message = self.last_message()
178
+ if message is None:
179
+ return None
180
+ return message.content
181
+
182
+ def last_message(self, ignore_roles: Sequence[str] = tuple(_RESERVED_MESSAGE_ROLES)) -> Optional[Message]:
183
+ ignored = {str(role) for role in ignore_roles}
184
+ for message in reversed(self.messages):
185
+ if message.role not in ignored:
186
+ return message
187
+ return None
188
+
189
+ def _delta_from_updated_messages(self, updated_messages: Iterable[dict | Message]) -> "Chat":
190
+ old = self.to_dicts()
191
+ updated = [Message.from_data(message).to_dict() for message in updated_messages]
192
+
193
+ prefix_len = 0
194
+ for current, new in zip(old, updated):
195
+ if current == new:
196
+ prefix_len += 1
197
+ else:
198
+ break
199
+
200
+ return Chat(updated[prefix_len:], runner=self.runner)
201
+
202
+ def ask(self, agent_name: Optional[str] = None) -> "Chat":
203
+ updated_messages = self.runner.ask_messages(self.to_dicts(), agent_name=agent_name)
204
+ return self._delta_from_updated_messages(updated_messages)
205
+
206
+ def chat(self, agent_name: Optional[str] = None) -> Optional[Message]:
207
+ delta = self.ask(agent_name=agent_name)
208
+ self.extend(delta)
209
+ return delta.last_message()
210
+
211
+ def __len__(self) -> int:
212
+ return len(self.messages)
213
+
214
+ def __iter__(self) -> Iterator[Message]:
215
+ return iter(self.messages)
216
+
217
+ def __getitem__(self, item: int) -> Message:
218
+ return self.messages[item]
219
+
220
+ def __repr__(self) -> str:
221
+ return f"Chat(messages={self.messages!r})"
@@ -0,0 +1,39 @@
1
+ import scout
2
+ import torch
3
+
4
+
5
+ class TSVDataset(torch.utils.data.Dataset):
6
+ def __init__(self, tsv):
7
+ self.tsv = tsv
8
+
9
+ def __getitem__(self, key):
10
+ if type(key) == int:
11
+ row = self.tsv.iloc[key]
12
+ else:
13
+ row = self.tsv.loc[key]
14
+
15
+ row = row.to_numpy()
16
+ features = row[:-1]
17
+ label = row[-1]
18
+
19
+ return features, label
20
+
21
+ def __len__(self):
22
+ return len(self.tsv)
23
+
24
+
25
+ def tsv_dataset(filename, *args, **kwargs):
26
+ return TSVDataset(scout.tsv(filename, *args, **kwargs))
27
+
28
+
29
+ def tsv(*args, **kwargs):
30
+ return tsv_dataset(*args, **kwargs)
31
+
32
+
33
+ def tsv_loader(*args, **kwargs):
34
+ dataset = tsv(*args, kwargs)
35
+ return torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True)
36
+
37
+
38
+ def data_dir():
39
+ return scout.path('var/scout_dm/data')
@@ -0,0 +1,57 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Any, Dict, Mapping
5
+
6
+
7
+ @dataclass
8
+ class Message:
9
+ """Small wrapper around a Scout chat message.
10
+
11
+ Messages are still serialized as plain ``{"role": ..., "content": ...}``
12
+ dictionaries, but this wrapper makes the Python API easier to work with and
13
+ gives ``chat()`` a useful return value whose string representation is the
14
+ message content.
15
+ """
16
+
17
+ role: str
18
+ content: Any = ""
19
+ extra: Dict[str, Any] = field(default_factory=dict)
20
+
21
+ @classmethod
22
+ def from_data(cls, data: Any) -> "Message":
23
+ if isinstance(data, cls):
24
+ return cls(data.role, data.content, dict(data.extra))
25
+
26
+ if isinstance(data, Mapping):
27
+ role = str(data.get("role", ""))
28
+ content = data.get("content", "")
29
+ extra = {k: v for k, v in data.items() if k not in ("role", "content")}
30
+ return cls(role=role, content=content, extra=extra)
31
+
32
+ raise TypeError(f"Unsupported message type: {type(data)!r}")
33
+
34
+ def to_dict(self) -> Dict[str, Any]:
35
+ data = {"role": self.role, "content": self.content}
36
+ data.update(self.extra)
37
+ return data
38
+
39
+ def get(self, key: str, default: Any = None) -> Any:
40
+ if key == "role":
41
+ return self.role
42
+ if key == "content":
43
+ return self.content
44
+ return self.extra.get(key, default)
45
+
46
+ def __getitem__(self, key: str) -> Any:
47
+ if key == "role":
48
+ return self.role
49
+ if key == "content":
50
+ return self.content
51
+ return self.extra[key]
52
+
53
+ def __str__(self) -> str:
54
+ return "" if self.content is None else str(self.content)
55
+
56
+ def __repr__(self) -> str:
57
+ return f"Message(role={self.role!r}, content={self.content!r})"
@@ -0,0 +1,159 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import shlex
6
+ import subprocess
7
+ import tempfile
8
+ from pathlib import Path
9
+ from typing import Any, Iterable, List, Optional, Sequence
10
+
11
+
12
+ class CommandError(RuntimeError):
13
+ def __init__(self, cmd: Sequence[str], stdout: str, stderr: str, exit_status: int):
14
+ self.cmd = list(cmd)
15
+ self.stdout = stdout
16
+ self.stderr = stderr
17
+ self.exit_status = exit_status
18
+ message = stderr.strip() or stdout.strip() or f"Command failed with exit status {exit_status}"
19
+ super().__init__(message)
20
+
21
+
22
+ class ScoutRunner:
23
+ """Run Scout-AI CLI commands.
24
+
25
+ The Python package deliberately keeps Ruby as the source of truth for chat
26
+ parsing/printing and LLM execution. This runner is the bridge to those CLI
27
+ commands.
28
+ """
29
+
30
+ def __init__(self, command: Optional[Sequence[str] | str] = None):
31
+ if command is None:
32
+ command = os.environ.get("SCOUT_AI_COMMAND", "scout-ai")
33
+
34
+ if isinstance(command, str):
35
+ self.command = shlex.split(command)
36
+ else:
37
+ self.command = list(command)
38
+
39
+ if not self.command:
40
+ raise ValueError("command can not be empty")
41
+
42
+ def _run(self, *args: str) -> str:
43
+ cmd = self.command + [str(arg) for arg in args]
44
+ proc = subprocess.run(cmd, capture_output=True, text=True)
45
+ if proc.returncode != 0:
46
+ raise CommandError(cmd, proc.stdout, proc.stderr, proc.returncode)
47
+ return proc.stdout
48
+
49
+ def _write_json(self, path: Path, messages: Iterable[dict]) -> None:
50
+ path.write_text(json.dumps(list(messages), ensure_ascii=False, indent=2), encoding="utf-8")
51
+
52
+ def json_to_chat_file(self, messages: Iterable[dict], chat_file: Path) -> Path:
53
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False, encoding="utf-8") as handle:
54
+ json.dump(list(messages), handle, ensure_ascii=False, indent=2)
55
+ json_file = Path(handle.name)
56
+ try:
57
+ self._run("llm", "json", "--json", str(json_file), "--output", str(chat_file))
58
+ finally:
59
+ json_file.unlink(missing_ok=True)
60
+ return chat_file
61
+
62
+ def chat_file_to_messages(self, chat_file: Path) -> List[dict]:
63
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False, encoding="utf-8") as handle:
64
+ json_file = Path(handle.name)
65
+ try:
66
+ self._run("llm", "json", "--chat", str(chat_file), "--output", str(json_file))
67
+ text = json_file.read_text(encoding="utf-8")
68
+ if text.strip() == "":
69
+ return []
70
+ return json.loads(text)
71
+ finally:
72
+ json_file.unlink(missing_ok=True)
73
+
74
+ def render_chat(self, messages: Iterable[dict]) -> str:
75
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".chat", delete=False, encoding="utf-8") as handle:
76
+ chat_file = Path(handle.name)
77
+ try:
78
+ self.json_to_chat_file(messages, chat_file)
79
+ return chat_file.read_text(encoding="utf-8")
80
+ finally:
81
+ chat_file.unlink(missing_ok=True)
82
+
83
+ def parse_chat_text(self, text: str) -> List[dict]:
84
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".chat", delete=False, encoding="utf-8") as handle:
85
+ handle.write(text)
86
+ chat_file = Path(handle.name)
87
+ try:
88
+ return self.chat_file_to_messages(chat_file)
89
+ finally:
90
+ chat_file.unlink(missing_ok=True)
91
+
92
+ def load_messages(self, path: str | Path, input_format: str = "chat") -> List[dict]:
93
+ path = Path(path)
94
+ if input_format == "json":
95
+ text = path.read_text(encoding="utf-8")
96
+ return [] if text.strip() == "" else json.loads(text)
97
+ if input_format != "chat":
98
+ raise ValueError(f"Unsupported input format: {input_format!r}")
99
+ return self.chat_file_to_messages(path)
100
+
101
+ def save_messages(self, path: str | Path, messages: Iterable[dict], output_format: str = "chat") -> Path:
102
+ path = Path(path)
103
+ if output_format == "json":
104
+ path.write_text(json.dumps(list(messages), ensure_ascii=False, indent=2), encoding="utf-8")
105
+ return path
106
+ if output_format != "chat":
107
+ raise ValueError(f"Unsupported output format: {output_format!r}")
108
+ self.json_to_chat_file(messages, path)
109
+ return path
110
+
111
+ def ask_messages(self, messages: Iterable[dict], agent_name: Optional[str] = None) -> List[dict]:
112
+ messages = list(messages)
113
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".chat", delete=False, encoding="utf-8") as handle:
114
+ chat_file = Path(handle.name)
115
+ try:
116
+ self.json_to_chat_file(messages, chat_file)
117
+ if agent_name:
118
+ self._run("agent", "ask", str(agent_name), "--chat", str(chat_file))
119
+ else:
120
+ self._run("llm", "ask", "--chat", str(chat_file))
121
+ return self.chat_file_to_messages(chat_file)
122
+ finally:
123
+ chat_file.unlink(missing_ok=True)
124
+
125
+ def find_agent_path(self, agent_name: str) -> Optional[Path]:
126
+ try:
127
+ output = self._run("agent", "find", str(agent_name)).strip()
128
+ except CommandError:
129
+ return None
130
+ if not output:
131
+ return None
132
+ return Path(output)
133
+
134
+ def load_agent_start_chat(self, agent_name: str) -> List[dict]:
135
+ path = self.find_agent_path(agent_name)
136
+ if path is None:
137
+ return []
138
+
139
+ if path.is_file():
140
+ try:
141
+ return self.load_messages(path, input_format="chat")
142
+ except Exception:
143
+ return []
144
+
145
+ candidates = [
146
+ path / "start_chat",
147
+ path / "start_chat.chat",
148
+ path / "start_chat.txt",
149
+ ]
150
+ candidates.extend(sorted(path.glob("start_chat.*")))
151
+
152
+ for candidate in candidates:
153
+ if candidate.exists() and candidate.is_file():
154
+ try:
155
+ return self.load_messages(candidate, input_format="chat")
156
+ except Exception:
157
+ continue
158
+
159
+ return []
@@ -0,0 +1,67 @@
1
+ import unittest
2
+
3
+ from scout_ai import Chat, Message, load_agent
4
+
5
+
6
+ class FakeRunner:
7
+ def ask_messages(self, messages, agent_name=None):
8
+ messages = list(messages)
9
+ label = agent_name or "llm"
10
+ return messages + [
11
+ {"role": "assistant", "content": f"reply from {label}"},
12
+ {"role": "previous_response_id", "content": f"resp-{label}"},
13
+ ]
14
+
15
+ def load_agent_start_chat(self, agent_name):
16
+ return [{"role": "system", "content": f"Agent {agent_name}"}]
17
+
18
+ def save_messages(self, path, messages, output_format="chat"):
19
+ return path
20
+
21
+
22
+ class ChatAgentTest(unittest.TestCase):
23
+ def test_chat_ask_returns_only_new_messages(self):
24
+ runner = FakeRunner()
25
+ chat = Chat(runner=runner).system("You are concise").user("Hello")
26
+
27
+ delta = chat.ask()
28
+
29
+ self.assertEqual(len(chat), 2)
30
+ self.assertEqual(len(delta), 2)
31
+ self.assertEqual(delta[0].role, "assistant")
32
+ self.assertEqual(delta[0].content, "reply from llm")
33
+ self.assertEqual(delta[1].role, "previous_response_id")
34
+
35
+ def test_chat_chat_mutates_and_returns_last_meaningful_message(self):
36
+ runner = FakeRunner()
37
+ chat = Chat(runner=runner).user("Hello")
38
+
39
+ message = chat.chat()
40
+
41
+ self.assertIsInstance(message, Message)
42
+ self.assertEqual(message.role, "assistant")
43
+ self.assertEqual(str(message), "reply from llm")
44
+ self.assertEqual(len(chat), 3)
45
+ self.assertEqual(chat[-1].role, "previous_response_id")
46
+
47
+ def test_agent_eager_current_chat_and_delegation(self):
48
+ runner = FakeRunner()
49
+ agent = load_agent("Planner", runner=runner, endpoint="nano")
50
+
51
+ self.assertEqual(agent.start_chat[0].role, "system")
52
+ self.assertEqual(agent.start_chat[0].content, "Agent Planner")
53
+ self.assertEqual(agent.current_chat[1].role, "endpoint")
54
+ self.assertEqual(agent.current_chat[1].content, "nano")
55
+
56
+ agent.user("Summarize this")
57
+ self.assertEqual(len(agent.start_chat), 2)
58
+ self.assertEqual(len(agent.current_chat), 3)
59
+
60
+ message = agent.chat()
61
+ self.assertEqual(message.role, "assistant")
62
+ self.assertEqual(message.content, "reply from Planner")
63
+ self.assertEqual(agent.current_chat[-1].role, "previous_response_id")
64
+
65
+
66
+ if __name__ == "__main__":
67
+ unittest.main()
data/scout-ai.gemspec CHANGED
@@ -2,11 +2,11 @@
2
2
  # DO NOT EDIT THIS FILE DIRECTLY
3
3
  # Instead, edit Juwelier::Tasks in Rakefile, and run 'rake gemspec'
4
4
  # -*- encoding: utf-8 -*-
5
- # stub: scout-ai 1.2.1 ruby lib
5
+ # stub: scout-ai 1.2.2 ruby lib
6
6
 
7
7
  Gem::Specification.new do |s|
8
8
  s.name = "scout-ai".freeze
9
- s.version = "1.2.1".freeze
9
+ s.version = "1.2.2".freeze
10
10
 
11
11
  s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
12
12
  s.require_paths = ["lib".freeze]
@@ -86,18 +86,26 @@ Gem::Specification.new do |s|
86
86
  "lib/scout/network/knowledge_base.rb",
87
87
  "lib/scout/network/paths.rb",
88
88
  "python/scout_ai/__init__.py",
89
+ "python/scout_ai/agent.py",
90
+ "python/scout_ai/chat.py",
91
+ "python/scout_ai/data.py",
89
92
  "python/scout_ai/huggingface/data.py",
90
93
  "python/scout_ai/huggingface/eval.py",
91
94
  "python/scout_ai/huggingface/model.py",
92
95
  "python/scout_ai/huggingface/rlhf.py",
93
96
  "python/scout_ai/huggingface/train/__init__.py",
94
97
  "python/scout_ai/huggingface/train/next_token.py",
98
+ "python/scout_ai/message.py",
99
+ "python/scout_ai/runner.py",
95
100
  "python/scout_ai/util.py",
101
+ "python/tests/test_chat_agent.py",
96
102
  "scout-ai.gemspec",
97
103
  "scout_commands/agent/ask",
104
+ "scout_commands/agent/find",
98
105
  "scout_commands/agent/kb",
99
106
  "scout_commands/documenter",
100
107
  "scout_commands/llm/ask",
108
+ "scout_commands/llm/json",
101
109
  "scout_commands/llm/process",
102
110
  "scout_commands/llm/server",
103
111
  "scout_commands/llm/template",
@@ -10,7 +10,7 @@ $0 = "scout #{$previous_commands.any? ? $previous_commands*" " + " " : "" }#{ Fi
10
10
 
11
11
  options = SOPT.setup <<EOF
12
12
 
13
- Ask GPT
13
+ Ask an Agent
14
14
 
15
15
  $ #{$0} [<options>] [question]
16
16
 
@@ -0,0 +1,31 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'scout'
4
+ require 'scout-ai'
5
+
6
+ $0 = "scout #{$previous_commands.any? ? $previous_commands*" " + " " : "" }#{ File.basename(__FILE__) }" if $previous_commands
7
+
8
+ options = SOPT.setup <<EOF
9
+
10
+ Find an Agent
11
+
12
+ $ #{$0} [<options>]
13
+
14
+ -h--help Print this help
15
+ EOF
16
+ if options[:help]
17
+ if defined? scout_usage
18
+ scout_usage
19
+ else
20
+ puts SOPT.doc
21
+ end
22
+ exit 0
23
+ end
24
+
25
+ Log.severity = options.delete(:log).to_i if options.include? :log
26
+
27
+ agent_name, *question_parts = ARGV
28
+
29
+ agent = LLM.load_agent agent_name, options
30
+
31
+ puts agent.path
@@ -0,0 +1,61 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require 'scout'
5
+
6
+ cmd = $previous_commands ?
7
+ "scout #{$previous_commands.any? ? "#{$previous_commands * ' '} " : ''}#{File.basename(__FILE__)}" :
8
+ $PROGRAM_NAME
9
+
10
+ options = SOPT.setup <<~EOF
11
+
12
+ Translate chats to and from json format
13
+
14
+ $ #{cmd} [<options>] <filename>
15
+
16
+ Reads the file as JSON or Chat format and prints it in the oposite format.
17
+ Use `--chat` and `--json` to speciy the format on the input. Use
18
+ the `output` flag to save it to a file.
19
+
20
+ -h--help This help
21
+ -c--chat Load as Chat format
22
+ -j--json Load as JSON format (Default)
23
+ -o--output* Save to file instead of printing to STDOUT
24
+
25
+ EOF
26
+ if options[:help]
27
+ if defined? scout_usage
28
+ scout_usage
29
+ else
30
+ puts SOPT.doc
31
+ end
32
+ exit 0
33
+ end
34
+
35
+ help = IndiferentHash.process_options options, :help
36
+
37
+ filename, _ = ARGV
38
+
39
+ raise MissingParameterException, :filename if filename.nil?
40
+
41
+ raise ParameterException, "Don't use the json and chat flags at the same time, only the one that corresponds to the input file fomat" if options[:chat] && options[:json]
42
+
43
+ input = :json unless options[:chat]
44
+
45
+ if input == :json
46
+ messages = Chat.setup(Open.json(filename))
47
+ else
48
+ messages = LLM.chat(filename)
49
+ end
50
+
51
+ if input == :json
52
+ str = LLM.print messages
53
+ else
54
+ str = messages.to_json
55
+ end
56
+
57
+ if options[:output]
58
+ Open.write(options[:output], str)
59
+ else
60
+ puts str
61
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: scout-ai
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.1
4
+ version: 1.2.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Miguel Vazquez
@@ -154,18 +154,26 @@ files:
154
154
  - lib/scout/network/knowledge_base.rb
155
155
  - lib/scout/network/paths.rb
156
156
  - python/scout_ai/__init__.py
157
+ - python/scout_ai/agent.py
158
+ - python/scout_ai/chat.py
159
+ - python/scout_ai/data.py
157
160
  - python/scout_ai/huggingface/data.py
158
161
  - python/scout_ai/huggingface/eval.py
159
162
  - python/scout_ai/huggingface/model.py
160
163
  - python/scout_ai/huggingface/rlhf.py
161
164
  - python/scout_ai/huggingface/train/__init__.py
162
165
  - python/scout_ai/huggingface/train/next_token.py
166
+ - python/scout_ai/message.py
167
+ - python/scout_ai/runner.py
163
168
  - python/scout_ai/util.py
169
+ - python/tests/test_chat_agent.py
164
170
  - scout-ai.gemspec
165
171
  - scout_commands/agent/ask
172
+ - scout_commands/agent/find
166
173
  - scout_commands/agent/kb
167
174
  - scout_commands/documenter
168
175
  - scout_commands/llm/ask
176
+ - scout_commands/llm/json
169
177
  - scout_commands/llm/process
170
178
  - scout_commands/llm/server
171
179
  - scout_commands/llm/template