lambda-agent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,276 @@
1
+ """
2
+ Sub-Agent Module
3
+ ================
4
+ Provides a lightweight, disposable agent that the main Lambda agent can spawn
5
+ to perform focused tasks in parallel. Each sub-agent gets its own Gemini chat
6
+ session, a restricted set of tools, and a tight iteration budget.
7
+
8
+ The main agent uses the ``dispatch_subagent`` tool function to fire off work.
9
+ """
10
+
11
+ import threading
12
+ from concurrent.futures import ThreadPoolExecutor, Future
13
+
14
+ from rich.text import Text
15
+ from rich.panel import Panel
16
+ from rich import box
17
+
18
+ from . import config
19
+
20
+ try:
21
+ from google import genai
22
+ from google.genai import types
23
+ except ImportError:
24
+ pass
25
+
26
+ # ---------------------------------------------------------------------------
27
+ # Shared console for sub-agent output
28
+ # ---------------------------------------------------------------------------
29
+ try:
30
+ from .spinner import console
31
+ except ImportError:
32
+ from rich.console import Console
33
+
34
+ console = Console()
35
+
36
+ # ---------------------------------------------------------------------------
37
+ # Sub-agent tool set (lazy-loaded to avoid circular imports with tools.py)
38
+ # ---------------------------------------------------------------------------
39
+
40
+ # Default tools — the main agent can override per-task
41
+ _DEFAULT_TOOL_NAMES = ["read_file", "search_repo", "run_command", "write_file"]
42
+
43
+
44
+ def _get_tool_set() -> dict:
45
+ """Lazily import tool functions from tools.py to avoid circular imports."""
46
+ from .tools import read_file, search_repo, run_command, write_file
47
+
48
+ return {
49
+ "read_file": read_file,
50
+ "search_repo": search_repo,
51
+ "run_command": run_command,
52
+ "write_file": write_file,
53
+ }
54
+
55
+
56
+ # ---------------------------------------------------------------------------
57
+ # Thread-safe counter for sub-agent IDs
58
+ # ---------------------------------------------------------------------------
59
+ _id_lock = threading.Lock()
60
+ _next_id = 1
61
+
62
+
63
+ def _get_next_id() -> int:
64
+ global _next_id
65
+ with _id_lock:
66
+ current = _next_id
67
+ _next_id += 1
68
+ return current
69
+
70
+
71
+ # ---------------------------------------------------------------------------
72
+ # SubAgent class
73
+ # ---------------------------------------------------------------------------
74
+
75
+ _SUBAGENT_SYSTEM_INSTRUCTION = """\
76
+ You are a Lambda sub-agent — a focused worker spawned by the main Lambda agent \
77
+ to complete a specific task.
78
+
79
+ RULES:
80
+ 1. Complete the assigned task efficiently. You have a maximum of {max_iter} tool \
81
+ calls before you must produce a final answer.
82
+ 2. Your final answer MUST be a concise summary of your findings or actions — \
83
+ no more than a few sentences. The main agent will read this summary.
84
+ 3. You are fully capable of reading, writing, and editing files. Do so if the task demands it, but otherwise avoid unnecessary modifications.
85
+ 4. Do NOT ask the user questions — you cannot interact with the user.
86
+ 5. If you hit an error, briefly report what went wrong in your summary.
87
+ """
88
+
89
+ MAX_SUBAGENT_ITERATIONS = 5
90
+ RESULT_MAX_CHARS = 500
91
+
92
+
93
+ class SubAgent:
94
+ """A disposable, lightweight agent that runs a short task and returns a summary."""
95
+
96
+ def __init__(
97
+ self,
98
+ task: str,
99
+ context: str = "",
100
+ tool_names: list[str] | None = None,
101
+ model: str | None = None,
102
+ ):
103
+ self.id = _get_next_id()
104
+ self.task = task
105
+ self.context = context
106
+ self.model = model or "gemini-2.0-flash-lite"
107
+
108
+ # Resolve tool set (lazy-loaded to avoid circular imports)
109
+ all_tools = _get_tool_set()
110
+ names = tool_names if tool_names else _DEFAULT_TOOL_NAMES
111
+ self.tool_executors: dict = {}
112
+ self.tool_functions: list = []
113
+ for name in names:
114
+ fn = all_tools.get(name)
115
+ if fn:
116
+ self.tool_executors[name] = fn
117
+ self.tool_functions.append(fn)
118
+
119
+ # Build Gemini session
120
+ self.client = genai.Client(api_key=config.API_KEY)
121
+ sys_instr = _SUBAGENT_SYSTEM_INSTRUCTION.format(
122
+ max_iter=MAX_SUBAGENT_ITERATIONS
123
+ )
124
+
125
+ chat_config = types.GenerateContentConfig(
126
+ system_instruction=sys_instr,
127
+ tools=self.tool_functions if self.tool_functions else None,
128
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
129
+ disable=True
130
+ ),
131
+ )
132
+ self.chat_session = self.client.chats.create(
133
+ model=self.model, config=chat_config
134
+ )
135
+
136
+ def run(self) -> str:
137
+ """Execute the sub-agent task and return a concise result string."""
138
+ # Build the initial prompt
139
+ parts = []
140
+ if self.context:
141
+ parts.append(f"--- CONTEXT ---\n{self.context}\n--- END CONTEXT ---\n\n")
142
+ parts.append(f"Task: {self.task}")
143
+ prompt = "".join(parts)
144
+
145
+ try:
146
+ response = self.chat_session.send_message(prompt)
147
+ except Exception as e:
148
+ return f"[sub-agent {self.id}] Error on initial message: {e}"
149
+
150
+ iterations = 0
151
+ while True:
152
+ iterations += 1
153
+ if iterations > MAX_SUBAGENT_ITERATIONS:
154
+ return self._clip(
155
+ f"[sub-agent {self.id}] Hit iteration limit. "
156
+ f"Last response: {getattr(response, 'text', '(none)')}"
157
+ )
158
+
159
+ try:
160
+ tool_calls = response.function_calls if response.function_calls else []
161
+
162
+ if tool_calls:
163
+ tool_responses = []
164
+ for fc in tool_calls:
165
+ fn_name = fc.name
166
+ args = fc.args
167
+ if hasattr(args, "items"):
168
+ args = {k: v for k, v in args.items()}
169
+ elif not isinstance(args, dict):
170
+ args = dict(args) if args else {}
171
+
172
+ if fn_name in self.tool_executors:
173
+ result = self.tool_executors[fn_name](**args)
174
+ else:
175
+ result = (
176
+ f"Error: Tool '{fn_name}' not available to sub-agent."
177
+ )
178
+
179
+ tool_responses.append(
180
+ types.Part.from_function_response(
181
+ name=fn_name,
182
+ response={"result": str(result)},
183
+ )
184
+ )
185
+
186
+ response = self.chat_session.send_message(tool_responses)
187
+ continue
188
+ else:
189
+ # Final text response
190
+ return self._clip(response.text or "(no output)")
191
+ except Exception as e:
192
+ return f"[sub-agent {self.id}] Error during tool loop: {e}"
193
+
194
+ def _clip(self, text: str) -> str:
195
+ """Truncate result to RESULT_MAX_CHARS."""
196
+ if len(text) <= RESULT_MAX_CHARS:
197
+ return text
198
+ return text[:RESULT_MAX_CHARS] + f"\n...[TRUNCATED — {len(text)} chars total]"
199
+
200
+
201
+ # ---------------------------------------------------------------------------
202
+ # Tool function exposed to the main agent
203
+ # ---------------------------------------------------------------------------
204
+
205
+ # Thread pool for running sub-agents concurrently
206
+ _executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="subagent")
207
+
208
+
209
+ def dispatch_subagent(task: str, context: str = "", tools: str = "") -> str:
210
+ """Spawns a lightweight sub-agent to perform a focused task and returns its result.
211
+
212
+ Use this to delegate independent, parallelizable work such as reading and
213
+ analyzing files, searching the repo, or running investigative commands.
214
+ Multiple dispatch_subagent calls in the *same turn* run in parallel.
215
+
216
+ Args:
217
+ task: A clear, specific description of what the sub-agent should do.
218
+ Must be self-contained — the sub-agent has no access to your
219
+ chat history.
220
+ context: Optional context string to give the sub-agent (e.g. file
221
+ contents, prior findings). Keep this minimal.
222
+ tools: Optional comma-separated list of tool names the sub-agent can
223
+ use. Defaults to 'read_file,search_repo,run_command,write_file'.
224
+ """
225
+ # Parse tool list
226
+ tool_names = None
227
+ if tools.strip():
228
+ tool_names = [t.strip() for t in tools.split(",") if t.strip()]
229
+
230
+ agent = SubAgent(task=task, context=context, tool_names=tool_names)
231
+ agent_id = agent.id
232
+
233
+ # Show dispatch in the terminal
234
+ console.print()
235
+ dispatch_label = Text.assemble(
236
+ (" ⚡ SUB-AGENT ", "bold black on green"),
237
+ (f" #{agent_id}", "bold green"),
238
+ (" → ", "dim"),
239
+ (task[:80] + ("…" if len(task) > 80 else ""), "green"),
240
+ )
241
+ console.print(dispatch_label)
242
+
243
+ # Run the sub-agent in a thread (blocks until done — Gemini processes
244
+ # all tool_calls in a batch, so parallel calls happen naturally)
245
+ future: Future = _executor.submit(agent.run)
246
+ result = future.result(timeout=120) # 2 min hard timeout
247
+
248
+ # Show completion
249
+ status_label = Text.assemble(
250
+ (" ✓ SUB-AGENT ", "bold black on green"),
251
+ (f" #{agent_id} done", "bold green"),
252
+ )
253
+ console.print(status_label)
254
+ console.print(
255
+ Panel(
256
+ result,
257
+ border_style="green",
258
+ box=box.SIMPLE,
259
+ padding=(0, 2),
260
+ )
261
+ )
262
+
263
+ return result
264
+
265
+
266
+ # ---------------------------------------------------------------------------
267
+ # Registration dicts (imported by tools.py)
268
+ # ---------------------------------------------------------------------------
269
+
270
+ SUBAGENT_EXECUTORS = {
271
+ "dispatch_subagent": dispatch_subagent,
272
+ }
273
+
274
+ SUBAGENT_FUNCTIONS = [
275
+ dispatch_subagent,
276
+ ]
lambda_agent/tools.py ADDED
@@ -0,0 +1,217 @@
1
+ import subprocess
2
+ import os
3
+
4
+ from rich.panel import Panel
5
+ from rich.text import Text
6
+ from rich.prompt import Prompt
7
+ from rich import box
8
+ from rich.console import Console
9
+
10
+ from .scratchpad import SCRATCHPAD_EXECUTORS, SCRATCHPAD_FUNCTIONS
11
+ from .subagent import SUBAGENT_EXECUTORS, SUBAGENT_FUNCTIONS
12
+
13
+ # Use the same console as the rest of the app if available; else create one
14
+ try:
15
+ from .spinner import console
16
+ except ImportError:
17
+ console = Console()
18
+
19
+
20
+ def read_file(path: str) -> str:
21
+ """Reads the contents of a file.
22
+
23
+ Args:
24
+ path: The absolute or relative path to the file.
25
+ """
26
+ try:
27
+ with open(path, "r", encoding="utf-8") as f:
28
+ return f.read()
29
+ except Exception as e:
30
+ return f"Error reading file {path}: {str(e)}"
31
+
32
+
33
+ def write_file(path: str, content: str) -> str:
34
+ """Writes content to a specific file path.
35
+
36
+ Args:
37
+ path: The path to the file to write.
38
+ content: The text content to write to the file.
39
+ """
40
+ try:
41
+ with open(path, "w", encoding="utf-8") as f:
42
+ f.write(content)
43
+ return f"Successfully wrote to {path}"
44
+ except Exception as e:
45
+ return f"Error writing to file {path}: {str(e)}"
46
+
47
+
48
+ def run_command(command: str) -> str:
49
+ """Executes a shell command on the host system.
50
+
51
+ Args:
52
+ command: The shell command to execute.
53
+ """
54
+ try:
55
+ result = subprocess.run(
56
+ command, shell=True, capture_output=True, text=True, timeout=30
57
+ )
58
+ output = result.stdout + result.stderr
59
+ return output if output else "Command executed successfully with no output."
60
+ except subprocess.TimeoutExpired:
61
+ return "Error: Command timed out after 30 seconds."
62
+ except Exception as e:
63
+ return f"Error executing command: {str(e)}"
64
+
65
+
66
+ def get_workspace_summary() -> str:
67
+ """Gathers git context, branch, status, recent commits, and project documentation (like README.md or rule files) to help the agent understand the whole project."""
68
+ summary_parts = []
69
+
70
+ # 1. Gather Git Context
71
+ try:
72
+ branch = subprocess.check_output(
73
+ ["git", "branch", "--show-current"], text=True, stderr=subprocess.STDOUT
74
+ ).strip()
75
+ status = subprocess.check_output(
76
+ ["git", "status", "-s"], text=True, stderr=subprocess.STDOUT
77
+ )
78
+ log = subprocess.check_output(
79
+ ["git", "log", "-n", "5", "--oneline"], text=True, stderr=subprocess.STDOUT
80
+ )
81
+
82
+ summary_parts.append(
83
+ f"### Git Context\n**Branch**: {branch}\n**Status**:\n{status if status else 'Clean'}\n**Recent Commits**:\n{log}"
84
+ )
85
+ except Exception:
86
+ summary_parts.append("### Git Context\nNot a git repository or git error.")
87
+
88
+ # 2. Gather Directory Structure (limited to root)
89
+ try:
90
+ files = os.listdir(".")
91
+ summary_parts.append(f"### Root Directory Files\n{', '.join(files)}")
92
+ except Exception as e:
93
+ summary_parts.append(f"### Directory Listing Error\n{e}")
94
+
95
+ # 3. Read important docs
96
+ docs_to_check = [
97
+ "README.md",
98
+ "README",
99
+ ".cursorrules",
100
+ ".agentrules",
101
+ ".agent/scratchpad.md",
102
+ "pyproject.toml",
103
+ "package.json",
104
+ ]
105
+ for doc in docs_to_check:
106
+ if os.path.exists(doc) and os.path.isfile(doc):
107
+ try:
108
+ with open(doc, "r", encoding="utf-8") as f:
109
+ content = f.read()
110
+ # Truncate to save tokens if massive
111
+ if len(content) > 3000:
112
+ content = content[:3000] + "\n...[TRUNCATED]"
113
+ summary_parts.append(f"### Document: {doc}\n```\n{content}\n```")
114
+ except Exception:
115
+ pass
116
+
117
+ return "\n\n".join(summary_parts)
118
+
119
+
120
+ def search_repo(query: str, path: str = ".") -> str:
121
+ """Searches for a specific string query across all text files in the repository.
122
+
123
+ Args:
124
+ query: The substring to search for.
125
+ path: The directory path to search within (defaults to current directory '.').
126
+ """
127
+ try:
128
+ # Use grep for faster searching.
129
+ # -r: recursive, -n: line numbers, -I: ignore binary files
130
+ # -F: fixed strings (prevents regex injection if query has special chars)
131
+ command = [
132
+ "grep",
133
+ "-rnIF",
134
+ "--exclude-dir=.git",
135
+ "--exclude-dir=.venv",
136
+ "--exclude-dir=venv",
137
+ "--exclude-dir=env",
138
+ "--exclude-dir=__pycache__",
139
+ "--exclude-dir=node_modules",
140
+ "--exclude-dir=.ruff_cache",
141
+ query,
142
+ path,
143
+ ]
144
+
145
+ result = subprocess.run(command, capture_output=True, text=True)
146
+
147
+ if result.returncode == 0:
148
+ # grep output is already in the format we want (file:line: content)
149
+ # but we strip it to clean it up lightly
150
+ results = result.stdout.strip().split("\n")
151
+ if not results or not results[0]:
152
+ return f"No matches found for '{query}'"
153
+
154
+ if len(results) > 100:
155
+ return (
156
+ "\n".join(results[:100])
157
+ + f"\n\n... and {len(results) - 100} more matches."
158
+ )
159
+ return "\n".join(results)
160
+ elif result.returncode == 1:
161
+ return f"No matches found for '{query}'"
162
+ else:
163
+ return f"Error searching repository: {result.stderr.strip()}"
164
+ except FileNotFoundError:
165
+ return "Error: 'grep' is not installed or available in PATH."
166
+ except Exception as e:
167
+ return f"Error executing search: {str(e)}"
168
+
169
+
170
+ def ask_user(question: str) -> str:
171
+ """Asks the user a clarifying question and returns their answer.
172
+
173
+ Args:
174
+ question: The question to ask the user.
175
+ """
176
+ try:
177
+ console.print()
178
+ console.print(
179
+ Panel(
180
+ Text(question, style="bold white"),
181
+ border_style="yellow",
182
+ box=box.ROUNDED,
183
+ title=Text(" 🤔 Lambda asks ", style="bold black on bright_yellow"),
184
+ title_align="left",
185
+ padding=(0, 2),
186
+ )
187
+ )
188
+ answer = Prompt.ask(
189
+ "[bold bright_yellow] Your answer[/bold bright_yellow]",
190
+ console=console,
191
+ )
192
+ return answer
193
+ except Exception as e:
194
+ return f"Error asking user: {str(e)}"
195
+
196
+
197
+ # A dictionary mapping tool names to Python functions for dynamic execution
198
+ TOOL_EXECUTORS = {
199
+ "read_file": read_file,
200
+ "write_file": write_file,
201
+ "run_command": run_command,
202
+ "search_repo": search_repo,
203
+ "ask_user": ask_user,
204
+ **SCRATCHPAD_EXECUTORS,
205
+ **SUBAGENT_EXECUTORS,
206
+ }
207
+
208
+ # The list of raw Python functions for the Gemini SDK to auto-generate schemas
209
+ TOOL_FUNCTIONS = [
210
+ read_file,
211
+ write_file,
212
+ run_command,
213
+ search_repo,
214
+ ask_user,
215
+ *SCRATCHPAD_FUNCTIONS,
216
+ *SUBAGENT_FUNCTIONS,
217
+ ]
@@ -0,0 +1,118 @@
1
+ Metadata-Version: 2.4
2
+ Name: lambda-agent
3
+ Version: 0.1.0
4
+ Summary: Lambda - A minimal AI coding agent
5
+ Author: Ayush Ranjan
6
+ License: Apache-2.0
7
+ Project-URL: Homepage, https://github.com/ayusrjn/lambda
8
+ Project-URL: Repository, https://github.com/ayusrjn/lambda.git
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: Apache Software License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Topic :: Software Development :: Code Generators
14
+ Requires-Python: >=3.10
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: google-genai>=1.71.0
18
+ Requires-Dist: python-dotenv>=1.0.0
19
+ Requires-Dist: rich>=13.7.0
20
+ Dynamic: license-file
21
+
22
+ <p align="center">
23
+ <img src="images/logo.png" alt="Lambda Logo" width="120">
24
+ </p>
25
+
26
+ <h1 align="center">Lambda Agent</h1>
27
+
28
+ <p align="center">
29
+ <strong>A minimal, function-driven AI coding assistant built for speed and simplicity.</strong>
30
+ </p>
31
+
32
+ <p align="center">
33
+ <a href="LICENSE"><img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="License"></a>
34
+ <img src="https://img.shields.io/badge/Python-3.10%2B-blue" alt="Python Version">
35
+ <img src="https://img.shields.io/badge/PRs-welcome-brightgreen.svg" alt="PRs Welcome">
36
+ <img src="https://img.shields.io/badge/Maintained%3F-yes-green.svg" alt="Maintained">
37
+ </p>
38
+
39
+ ---
40
+
41
+ <p align="center">
42
+ <img src="images/screen.png" alt="Lambda Interface" width="auto">
43
+ </p>
44
+
45
+ ---
46
+
47
+ ## Overview
48
+
49
+ **Lambda** is a lightweight, command-line AI coding agent driven by Google's Gemini models. Unlike massive IDE extensions or bloated web setups, Lambda lives right in your terminal. It uses a ReAct (Reasoning and Acting) loop to autonomously navigate your codebase, read and write files, run shell commands, and orchestrate complex coding tasks from a single prompt.
50
+
51
+ With a beautiful UI powered by Rich, Lambda makes pair programming with AI feel fast, natural, and highly contextual.
52
+
53
+ ## Key Features
54
+
55
+ - **Autonomous Tool Execution**: Powered by Gemini's function calling, Lambda can `read_file`, `write_file`, `search_repo`, and `run_command` directly on your host machine to get things done.
56
+ - **Agentic Scratchpad**: Lambda uses a hidden local scratchpad (`.scratchpad/`) to draft implementation plans, think through complex logic, and maintain context across long execution chains.
57
+ - **Stunning CLI Experience**: Built with [Rich](https://github.com/Textualize/rich), featuring distinct conversational bubbles, syntax highlighting, active token monitoring, and beautiful live spinners.
58
+ - **Hot-Swappable Models**: Instantly switch between different Gemini models mid-conversation using the `/models` slash command.
59
+ - **Zero-Friction Configuration**: Global configurations (`~/.config/lambda-agent/config.env`) mean you can run `lambda` in *any* directory on your machine instantly.
60
+
61
+ ## Installation
62
+
63
+ Requires **Python 3.10+**. Install Lambda straight from the repository:
64
+
65
+ ```bash
66
+ git clone https://github.com/ayusrjn/lambda.git
67
+ cd lambda
68
+ pip install .
69
+ ```
70
+
71
+ *For local development and modifying the agent, use `pip install -e .` instead.*
72
+
73
+ ## Usage
74
+
75
+ Spin up the agent from any directory simply by running:
76
+
77
+ ```bash
78
+ lambda
79
+ ```
80
+
81
+ ### First-Time Setup
82
+ On your first run, Lambda will securely prompt you for your [Gemini API Key](https://aistudio.google.com/app/apikey) and model preference. This is saved to `~/.config/lambda-agent/config.env`.
83
+
84
+ *Note: You can override global settings by placing a `.env` file in your specific project directory.*
85
+
86
+ ### Built-in Slash Commands
87
+
88
+ During your interactive session, you can use the following commands:
89
+ - `/models` — Display a menu to hot-swap your active AI model (e.g., from Gemini Flash to Pro).
90
+ - `/config` — Quickly update your API key mid-session.
91
+ - `/help` — List all available slash commands.
92
+ - `exit` or `quit` — End the session and review your total token usage.
93
+
94
+ ## Under the Hood
95
+
96
+ Lambda acts autonomously using an extensible set of Python tools:
97
+ - `search_repo(query, path)`: Deep file inspection ignoring `.git`, `.venv`, and binary caches.
98
+ - `run_command(command)`: Real shell execution (with 30s timeout guards).
99
+ - `ask_user(question)`: Ability to explicitly pause and ask the human for clarification.
100
+ - `read_file`, `write_file`: Direct file manipulations.
101
+ - **Scratchpad API**: `read_scratchpad`, `write_scratchpad`, `append_scratchpad` for planning.
102
+
103
+ ## Contributing
104
+
105
+ Contributions make the open-source community an amazing place to learn and build!
106
+
107
+ 1. Fork the Project
108
+ 2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)
109
+ 3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)
110
+ 4. Push to the Branch (`git push origin feature/AmazingFeature`)
111
+ 5. Open a Pull Request
112
+
113
+ ## License & Attribution
114
+
115
+ Distributed under the Apache 2.0 License. See `LICENSE` for more information.
116
+
117
+ - Engine powered by [Google GenAI SDK](https://github.com/google/genai-python).
118
+ - Lambda icon by [shohanur.rahman13](https://www.flaticon.com/authors/shohanur-rahman13) from [Flaticon](https://www.flaticon.com/free-icons/lambda)
@@ -0,0 +1,16 @@
1
+ lambda_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ lambda_agent/agent.py,sha256=6m3-26GdW_mtwe_tnUTlTUQmbSEwYXIcaI0EdVrDD48,13162
3
+ lambda_agent/cli_setup.py,sha256=qmP42G9kZch5e3i3vtzJRCeiNppaoJ4zpJpws_2O7OI,1495
4
+ lambda_agent/config.py,sha256=f9BjoAvpFd8nnHn_J2SmNhh3buod3ua2WHzgdDSUKIA,832
5
+ lambda_agent/context.py,sha256=DWBUb8DJ21dbhCx3syjp47tVqr_8ak0mufdjf4Ow6R4,4641
6
+ lambda_agent/main.py,sha256=dcCWZxS7XpyNLlHXrOYkaxER69xH8TAU-OolcT_T31s,10856
7
+ lambda_agent/scratchpad.py,sha256=w_lID1Ntrq-caWuFTFIhnnAnFHCoKdkZgulLE7wIiXY,4622
8
+ lambda_agent/spinner.py,sha256=06sV_GCAlhqQaCy_Qouow-HMpLWSsJNZ3ibknFzbqOM,1404
9
+ lambda_agent/subagent.py,sha256=P6FOntn7loMVl093bSm18Wxgxq99UF48bkDlV7R2IP0,9739
10
+ lambda_agent/tools.py,sha256=zOeSLsapR31forDvPjKmvKuko2ynGwxn-ZHyvGNu9do,6968
11
+ lambda_agent-0.1.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
12
+ lambda_agent-0.1.0.dist-info/METADATA,sha256=YAjYGfKnqwukJy5liRYuNOKP_IUL3U5jFX3vN83w07U,5099
13
+ lambda_agent-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
14
+ lambda_agent-0.1.0.dist-info/entry_points.txt,sha256=-VGh5epNi7PKGtyyePa75HjslMiLPj3IOS4db5r2bpg,50
15
+ lambda_agent-0.1.0.dist-info/top_level.txt,sha256=DAiCRH5PGk9CdMWklXx332bwemnUpZ9lVjHw0h3hWgg,13
16
+ lambda_agent-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (82.0.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ lambda = lambda_agent.main:main