poca-agent 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,5 @@
1
+ Metadata-Version: 2.4
2
+ Name: poca-agent
3
+ Version: 0.1.0
4
+ Summary: Python Open Coding Agent using DeepAgents and LangChain. Aims to be close to GH Copilot.
5
+ Author: Julius Vetter
@@ -0,0 +1,31 @@
1
+ # Python Open Coding Agent (POCA)
2
+ ---
3
+ This repository contains the source code for a simple coding agent. It uses langchain deep agents
4
+ and the huggingface inference API by default (this can be changed though).
5
+ ---
6
+ ## Prerequisites
7
+ ### Required:
8
+ - git installed
9
+ - python installed
10
+ - pip installed
11
+ ### Optional (but recommended):
12
+ - Virtual Environment
13
+
14
+ ---
15
+ ## Getting started
16
+ 1. **Clone the repository:** Inside a Folder of your choice run `git clone "https://github.com/vetterjulius/poca.git"`.
17
+ 2. **Install required Python packages:** Run `pip install -r requirements.txt`.
18
+ 3. **Configure model, provider and system prompt:** Visit `config.py` and change the constants to your model providers data (by default it uses the huggingface inference API).
19
+ 4. **Set your API key:** Inside the root `poca` directory create a file called `.env` and set the right environment variable for your model provider (for openai and compatible providers it's `OPENAI_API_KEY = your_api_key`).
20
+ 5. **Run the CLI interface:** Run `python poca/cli_interface.py run "your_prompt"`. For help run `python poca/cli_interface.py --help`. You can also use the `PocaAgent` class from the `poca_agent.py` file to integrate POCA into your own projects.
21
+ ---
22
+ ## Project Overview (relevant Python files):
23
+ - `tools.py`: Contains all tools that can be used by the agent (e.g. file manipulation, fetching webpages). You can add your own tools and/or define your own tool groups here.
24
+ - `poca_agent.py`: Contains the `PocaAgent` class. You can use this in your own projects.
25
+ - `config.py`: Different configuration options that get passed to the `PocaAgent` class as defaults (but can be overwritten in the constructor). Configure this file to match the information from your model provider.
26
+ - `cli_interface.py`: Contains the CLI wrapper for the `PocaAgent` class.
27
+ ---
28
+ ## Disclaimer
29
+ - Run the program at your own risk. It shouldn't mess anything up and it hasn't yet but I don't want to guarantee that. I recommend reading and understanding the few lines of source code in this repository before running it.
30
+ ---
31
+ ### Have a nice day, you're doing great! :)
File without changes
@@ -0,0 +1,101 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ POCA CLI - Python Open Coding Agent
4
+ """
5
+
6
+ import argparse
7
+ import sys
8
+ import os
9
+ from poca_agent import PocaAgent
10
+ import config
11
+
12
+ VERSION = "0.1.0"
13
+
14
+
15
+ def build_parser() -> argparse.ArgumentParser:
16
+ parser = argparse.ArgumentParser(
17
+ prog="poca",
18
+ description="Python Open Coding Agent CLI (Copilot-style coding assistant)",
19
+ )
20
+
21
+ parser.add_argument(
22
+ "--version",
23
+ action="version",
24
+ version=f"POCA {VERSION}",
25
+ )
26
+
27
+ subparsers = parser.add_subparsers(dest="command", required=True)
28
+
29
+ # --- run command ---
30
+ run_parser = subparsers.add_parser(
31
+ "run",
32
+ help="Run the coding agent with a prompt",
33
+ description="Execute a prompt using the POCA agent",
34
+ )
35
+
36
+ run_parser.add_argument(
37
+ "prompt",
38
+ type=str,
39
+ help="Prompt to send to the agent",
40
+ )
41
+
42
+ run_parser.add_argument(
43
+ "--model",
44
+ default=config.DEFAULT_MODEL,
45
+ help=f"Model name (default: {config.DEFAULT_MODEL})",
46
+ )
47
+
48
+ run_parser.add_argument(
49
+ "--base-url",
50
+ default=config.DEFAULT_BASE_URL,
51
+ help=f"Model base URL (default: {config.DEFAULT_BASE_URL})",
52
+ )
53
+
54
+ run_parser.add_argument(
55
+ "--max-tokens",
56
+ type=int,
57
+ default=1000,
58
+ help="Max tokens for model output",
59
+ )
60
+
61
+ run_parser.add_argument(
62
+ "--system-prompt",
63
+ default=config.DEFAULT_SYS_PROMPT,
64
+ help="Override system prompt",
65
+ )
66
+
67
+ run_parser.add_argument(
68
+ "--root-dir",
69
+ default=os.getcwd(),
70
+ help="Filesystem root directory for agent",
71
+ )
72
+
73
+ return parser
74
+
75
+
76
+ def cmd_run(args: argparse.Namespace):
77
+ agent = PocaAgent(
78
+ system_prompt=args.system_prompt,
79
+ model=args.model,
80
+ base_url=args.base_url,
81
+ max_tokens=args.max_tokens,
82
+ root_dir=args.root_dir,
83
+ )
84
+
85
+ result = agent.run(args.prompt)
86
+ print(result)
87
+
88
+
89
+ def main():
90
+ parser = build_parser()
91
+ args = parser.parse_args()
92
+
93
+ if args.command == "run":
94
+ cmd_run(args)
95
+ else:
96
+ parser.print_help()
97
+ sys.exit(1)
98
+
99
+
100
+ if __name__ == "__main__":
101
+ main()
@@ -0,0 +1,12 @@
1
+ DEFAULT_SYS_PROMPT = """
2
+ You are a highly specialized coding assistant designed to help with software development tasks.
3
+ You can use all tools available to in order to deliver high-quality code.
4
+ Always ensure that your code is well-structured and follows best practices.
5
+ If you encounter an error, provide a clear explanation and suggest possible fixes.
6
+ """
7
+
8
+ DEFAULT_MODEL = "openai/gpt-oss-120b" #"openai:gpt-4o"
9
+
10
+ DEFAULT_BASE_URL = "https://router.huggingface.co/v1" #"https://openrouter.ai/api/v1"
11
+
12
+ DEFAULT_MODEL_PROVIDER = "openai"
@@ -0,0 +1,46 @@
1
+ # Python Open Coding Agent using DeepAgents and LangChain
2
+ # Aims to be close to GH Copilot
3
+
4
+ import os
5
+ from pathlib import Path
6
+ from dotenv import load_dotenv
7
+ from deepagents import create_deep_agent
8
+ from langchain.chat_models import init_chat_model
9
+ from deepagents.backends import FilesystemBackend
10
+ import config
11
+
12
+ load_dotenv(Path(os.getcwd()) / ".env")
13
+
14
+ class PocaAgent:
15
+ model = None
16
+ def __init__(
17
+ self,
18
+ system_prompt: str = config.DEFAULT_SYS_PROMPT,
19
+ model: str = config.DEFAULT_MODEL,
20
+ model_provider: str = config.DEFAULT_MODEL_PROVIDER,
21
+ base_url: str = config.DEFAULT_BASE_URL,
22
+ max_tokens: int = 1000,
23
+ tools: list = [],
24
+ root_dir: str = str(os.getcwd())
25
+ ):
26
+ self.model = init_chat_model(
27
+ model=model,
28
+ model_provider=model_provider,
29
+ base_url=base_url,
30
+ max_tokens=max_tokens
31
+ )
32
+ self.agent = create_deep_agent(
33
+ model=self.model,
34
+ tools=tools,
35
+ system_prompt=system_prompt,
36
+ backend=FilesystemBackend(
37
+ root_dir=root_dir,
38
+ virtual_mode=True
39
+ )
40
+ )
41
+
42
+ def run(self, prompt: str) -> str:
43
+ result = self.agent.invoke({
44
+ "messages": [{"role": "user", "content": prompt}]
45
+ })
46
+ return result
@@ -0,0 +1,511 @@
1
+ import os
2
+ import subprocess
3
+ from subprocess import run
4
+ from langchain.tools import tool
5
+ import glob
6
+ import re
7
+ from pathlib import Path
8
+
9
+ # Simple in-process terminal manager for background tasks
10
+ _terminals = {}
11
+
12
+ # Track last commands run (synchronous or background) so tools can return them
13
+ _last_commands = []
14
+
15
+ # Store a terminal selection (best-effort placeholder); VS Code selection not directly accessible
16
+ _terminal_selection = ""
17
+
18
+
19
+ @tool("Terminal", description="Runs shell commands")
20
+ def run_in_terminal(command: str) -> str:
21
+ "Runs shell commands"
22
+ try:
23
+ # record last command
24
+ try:
25
+ _last_commands.append(command)
26
+ except Exception:
27
+ pass
28
+
29
+ result = run(command, shell=True, text=True, capture_output=True)
30
+ return result.stdout + result.stderr
31
+ except Exception as e:
32
+ return f"Error: {e}"
33
+
34
+
35
+ @tool("createAndRunTask", description="Runs a command; can start in background (returns id) or run synchronously")
36
+ def create_and_run_task(command: str, isBackground: bool = False) -> str:
37
+ "Runs a command; can start in background (returns id) or run synchronously"
38
+ try:
39
+ # record last command
40
+ try:
41
+ _last_commands.append(command)
42
+ except Exception:
43
+ pass
44
+
45
+ if isBackground:
46
+ proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
47
+ tid = str(proc.pid)
48
+ _terminals[tid] = proc
49
+ return f"Background task started with id {tid}"
50
+ else:
51
+ result = run(command, shell=True, text=True, capture_output=True)
52
+ return result.stdout + result.stderr
53
+ except Exception as e:
54
+ return f"Error: {e}"
55
+
56
+
57
+ @tool("awaitTerminal", description="Waits for a background terminal/task to finish and returns its output")
58
+ def await_terminal(term_id: str, timeout: float = None) -> str:
59
+ "Waits for a background terminal/task to finish and returns its output"
60
+ try:
61
+ proc = _terminals.get(term_id)
62
+ if not proc:
63
+ return f"No terminal with id {term_id}"
64
+ proc.wait(timeout=timeout)
65
+ out, err = proc.communicate()
66
+ # cleanup
67
+ try:
68
+ del _terminals[term_id]
69
+ except KeyError:
70
+ pass
71
+ return (out or "") + (err or "")
72
+ except subprocess.TimeoutExpired:
73
+ return f"Timeout waiting for terminal {term_id}"
74
+ except Exception as e:
75
+ return f"Error: {e}"
76
+
77
+
78
+ @tool("getTerminalOutput", description="Returns the current output status for a background terminal/task")
79
+ def get_terminal_output(term_id: str) -> str:
80
+ "Returns the current output status for a background terminal/task"
81
+ try:
82
+ proc = _terminals.get(term_id)
83
+ if not proc:
84
+ return f"No terminal with id {term_id}"
85
+ if proc.poll() is None:
86
+ return f"Terminal {term_id} is still running"
87
+ out, err = proc.communicate()
88
+ # cleanup
89
+ try:
90
+ del _terminals[term_id]
91
+ except KeyError:
92
+ pass
93
+ return (out or "") + (err or "")
94
+ except Exception as e:
95
+ return f"Error: {e}"
96
+
97
+
98
+ @tool("killTerminal", description="Kills a background terminal/task by id")
99
+ def kill_terminal(term_id: str) -> str:
100
+ "Kills a background terminal/task by id"
101
+ try:
102
+ proc = _terminals.get(term_id)
103
+ if not proc:
104
+ return f"No terminal with id {term_id}"
105
+ proc.kill()
106
+ out, err = proc.communicate()
107
+ try:
108
+ del _terminals[term_id]
109
+ except KeyError:
110
+ pass
111
+ return f"Terminal {term_id} killed.\n" + (out or "") + (err or "")
112
+ except Exception as e:
113
+ return f"Error: {e}"
114
+
115
+
116
+ @tool("terminal_last_command", description="Returns the last command run via these terminal tools")
117
+ def terminal_last_command() -> str:
118
+ "Returns the last command run via these terminal tools"
119
+ try:
120
+ if not _last_commands:
121
+ return "No terminal commands recorded yet"
122
+ return _last_commands[-1]
123
+ except Exception as e:
124
+ return f"Error: {e}"
125
+
126
+
127
+ @tool("terminal_selection", description="Returns the current terminal selection (best-effort placeholder)")
128
+ def terminal_selection() -> str:
129
+ "Returns the current terminal selection (best-effort placeholder)"
130
+ try:
131
+ # We cannot access the VS Code terminal selection from this process.
132
+ # This returns an internal stored value (empty by default).
133
+ return _terminal_selection or ""
134
+ except Exception as e:
135
+ return f"Error: {e}"
136
+
137
+
138
+ @tool("createDirectory", description="Creates directories recursively")
139
+ def create_directory(path: str) -> str:
140
+ "Creates directories recursively"
141
+ try:
142
+ os.makedirs(path, exist_ok=True)
143
+ return f"Directory {path} created."
144
+ except Exception as e:
145
+ return f"Error: {e}"
146
+
147
+
148
+ @tool("createFile", description="Creates a file with content")
149
+ def create_file(path: str, content: str) -> str:
150
+ "Creates a file with content"
151
+ try:
152
+ dirpath = os.path.dirname(path)
153
+ if dirpath:
154
+ os.makedirs(dirpath, exist_ok=True)
155
+ with open(path, "w", encoding="utf-8") as f:
156
+ f.write(content)
157
+ return f"File {path} created."
158
+ except Exception as e:
159
+ return f"Error: {e}"
160
+
161
+
162
+ @tool("file_edit_tool", description="Replace a range of lines in an existing file")
163
+ def file_edit(path: str, start_line: int, end_line: int, new_lines: list[str]) -> str:
164
+ "Replace a range of lines in an existing file"
165
+ try:
166
+ if not os.path.exists(path):
167
+ return f"Error: File {path} does not exist."
168
+
169
+ if start_line < 1 or end_line < start_line:
170
+ return "Error: invalid line range."
171
+
172
+ # Read existing file
173
+ with open(path, "r", encoding="utf-8") as f:
174
+ lines = f.readlines()
175
+
176
+ if end_line > len(lines):
177
+ return f"Error: file only has {len(lines)} lines."
178
+
179
+ # Ensure newline formatting
180
+ replacement = [line.rstrip("\n") + "\n" for line in new_lines]
181
+
182
+ # Replace range
183
+ lines[start_line - 1:end_line] = replacement
184
+
185
+ # Write back
186
+ with open(path, "w", encoding="utf-8") as f:
187
+ f.writelines(lines)
188
+
189
+ return f"Replaced lines {start_line}-{end_line} in {path}."
190
+
191
+ except Exception as e:
192
+ return f"Error: {e}"
193
+
194
+
195
+ @tool("read_file", description="Read file contents by line range (1-based inclusive).")
196
+ def read_file(path: str, start: int = None, end: int = None) -> str:
197
+ "Read file contents by line range (1-based inclusive)."
198
+ try:
199
+ if not os.path.exists(path):
200
+ return f"File not found: {path}"
201
+ with open(path, "r", encoding="utf-8", errors="replace") as f:
202
+ lines = f.readlines()
203
+
204
+ # Default to full file
205
+ total = len(lines)
206
+ if start is None:
207
+ start = 1
208
+ if end is None:
209
+ end = total
210
+
211
+ # Validate
212
+ if start < 1:
213
+ return "Invalid start line: must be >= 1"
214
+ if end < start:
215
+ return "Invalid range: end must be >= start"
216
+ if start > total:
217
+ return "Start line beyond end of file"
218
+
219
+ # Convert to 0-based indices
220
+ s = start - 1
221
+ e = min(end, total)
222
+ selected = lines[s:e]
223
+ return "".join(selected)
224
+ except Exception as e:
225
+ return f"Error reading file: {e}"
226
+
227
+
228
+ @tool("list_directory", description="Lists the content of a directory")
229
+ def list_directory(path: str = ".") -> str:
230
+ "Lists the content of a directory"
231
+ try:
232
+ p = Path(path)
233
+ if not p.exists():
234
+ return f"Path does not exist: {path}"
235
+ items = []
236
+ for child in sorted(p.iterdir()):
237
+ typ = "dir" if child.is_dir() else "file"
238
+ items.append(f"{typ}\t{child.name}")
239
+ return "\n".join(items)
240
+ except Exception as e:
241
+ return f"Error listing directory: {e}"
242
+
243
+
244
+ @tool("get_problems", description="Attempts to read problems/errors for a file (best-effort)")
245
+ def get_problems(path: str) -> str:
246
+ """Try multiple approaches to surface problems for a file.
247
+
248
+ 1) If `pyflakes` is available, run it.
249
+ 2) Otherwise try `python -m py_compile` for syntax errors.
250
+ 3) If neither yields anything, return a helpful message.
251
+ """
252
+ try:
253
+ if not os.path.exists(path):
254
+ return f"File not found: {path}"
255
+
256
+ # Try pyflakes first (good for showing lint-like issues)
257
+ try:
258
+ result = run(f'python -m pyflakes "{path}"', shell=True, text=True, capture_output=True)
259
+ out = (result.stdout or "") + (result.stderr or "")
260
+ if out.strip():
261
+ return out
262
+ except Exception:
263
+ pass
264
+
265
+ # Fallback: try to compile the file to capture syntax errors
266
+ try:
267
+ result = run(["python", "-m", "py_compile", path], text=True, capture_output=True)
268
+ # py_compile reports errors on stderr
269
+ out = (result.stdout or "") + (result.stderr or "")
270
+ if out.strip():
271
+ return out
272
+ except Exception:
273
+ pass
274
+
275
+ return "No problems found (no linters available); tried pyflakes and py_compile."
276
+ except Exception as e:
277
+ return f"Error reading problems: {e}"
278
+
279
+
280
+
281
+ @tool("semantic_search_in_codebase", description="Find relevant file chunks and symbols using a glob pattern")
282
+ def semantic_search_in_codebase(query: str, glob_pattern: str = "**/*.py", max_results: int = 25) -> str:
283
+ """Best-effort semantic-ish search: finds matching lines/chunks for `query`.
284
+
285
+ Returns top matches across files matching `glob_pattern` sorted by number of matches.
286
+ """
287
+ try:
288
+ matches = []
289
+ files = glob.glob(glob_pattern, recursive=True)
290
+ for f in files:
291
+ try:
292
+ with open(f, "r", encoding="utf-8", errors="replace") as fh:
293
+ text = fh.read()
294
+ except Exception:
295
+ continue
296
+
297
+ # simple scoring: count occurrences of query tokens
298
+ score = text.lower().count(query.lower())
299
+ if score <= 0:
300
+ # also check for symbol-like matches
301
+ if re.search(r"\b" + re.escape(query) + r"\b", text):
302
+ score = 1
303
+ else:
304
+ continue
305
+
306
+ # find up to a few contexts
307
+ contexts = []
308
+ for m in re.finditer(re.escape(query), text, flags=re.IGNORECASE):
309
+ start = max(0, m.start() - 200)
310
+ end = min(len(text), m.end() + 200)
311
+ contexts.append(text[start:end])
312
+ if len(contexts) >= 3:
313
+ break
314
+
315
+ matches.append((score, f, contexts))
316
+
317
+ matches.sort(key=lambda x: x[0], reverse=True)
318
+ out_lines = []
319
+ for score, f, contexts in matches[:max_results]:
320
+ out_lines.append(f"File: {f} (score={score})")
321
+ for c in contexts:
322
+ snippet = c.replace("\n", "\n")
323
+ out_lines.append("---")
324
+ out_lines.append(snippet)
325
+ out_lines.append("\n")
326
+
327
+ return "\n".join(out_lines) if out_lines else "No matches found."
328
+ except Exception as e:
329
+ return f"Error during semantic search: {e}"
330
+
331
+
332
+ @tool("file_search", description="Find files by glob pattern")
333
+ def file_search(pattern: str = "**/*") -> str:
334
+ """Return newline-separated list of files matching `pattern` (glob, recursive)."""
335
+ try:
336
+ files = glob.glob(pattern, recursive=True)
337
+ if not files:
338
+ return "No files matched pattern."
339
+ return "\n".join(files)
340
+ except Exception as e:
341
+ return f"Error searching files: {e}"
342
+
343
+
344
+ @tool("text_search", description="Find text in files using a regular expression")
345
+ def text_search(pattern: str, glob_pattern: str = "**/*", ignore_case: bool = True, max_results: int = 200) -> str:
346
+ """Search files matched by `glob_pattern` for regex `pattern` and return matched lines with file:line context."""
347
+ try:
348
+ flags = re.IGNORECASE if ignore_case else 0
349
+ regex = re.compile(pattern, flags)
350
+ out = []
351
+ files = glob.glob(glob_pattern, recursive=True)
352
+ count = 0
353
+ for f in files:
354
+ # skip directories
355
+ if os.path.isdir(f):
356
+ continue
357
+ try:
358
+ with open(f, "r", encoding="utf-8", errors="replace") as fh:
359
+ for i, line in enumerate(fh, start=1):
360
+ if regex.search(line):
361
+ out.append(f"{f}:{i}: {line.rstrip()}")
362
+ count += 1
363
+ if count >= max_results:
364
+ return "\n".join(out)
365
+ except Exception:
366
+ continue
367
+
368
+ return "\n".join(out) if out else "No matches found."
369
+ except Exception as e:
370
+ return f"Error during text search: {e}"
371
+
372
+
373
+ @tool("get_usages", description="Find references/usages of a symbol in the codebase")
374
+ def get_usages(symbol: str, glob_pattern: str = "**/*.py", max_results: int = 200) -> str:
375
+ """Return occurrences of `symbol` across files matching `glob_pattern`.
376
+
377
+ This is a simple textual search (best-effort)."""
378
+ try:
379
+ token_re = re.compile(r"\b" + re.escape(symbol) + r"\b")
380
+ out = []
381
+ files = glob.glob(glob_pattern, recursive=True)
382
+ count = 0
383
+ for f in files:
384
+ if os.path.isdir(f):
385
+ continue
386
+ try:
387
+ with open(f, "r", encoding="utf-8", errors="replace") as fh:
388
+ for i, line in enumerate(fh, start=1):
389
+ if token_re.search(line):
390
+ out.append(f"{f}:{i}: {line.rstrip()}")
391
+ count += 1
392
+ if count >= max_results:
393
+ return "\n".join(out)
394
+ except Exception:
395
+ continue
396
+
397
+ return "\n".join(out) if out else "No usages found."
398
+ except Exception as e:
399
+ return f"Error finding usages: {e}"
400
+
401
+
402
+ @tool("get_diffs", description="Returns changed lines in files (uses git diff if available)")
403
+ def get_diffs(path: str = None) -> str:
404
+ """Return a git-style diff for `path` or for the repo if path is None.
405
+
406
+ Falls back to a message if git is not available or no diffs.
407
+ """
408
+ try:
409
+ # Prefer git if available
410
+ try:
411
+ if path:
412
+ cmd = f'git diff --unified=0 -- "{path}"'
413
+ else:
414
+ cmd = 'git diff --unified=0'
415
+ res = run(cmd, shell=True, text=True, capture_output=True)
416
+ out = (res.stdout or "") + (res.stderr or "")
417
+ if out.strip():
418
+ return out
419
+ except Exception:
420
+ pass
421
+
422
+ return "No git diffs found or git not available."
423
+ except Exception as e:
424
+ return f"Error getting diffs: {e}"
425
+
426
+
427
+
428
+ @tool("fetch_url", description="Fetches content from a URL (GET request)")
429
+ def fetch_url(url: str) -> str:
430
+ "Fetches content from a URL (GET request)"
431
+ try:
432
+ import requests
433
+ response = requests.get(url)
434
+ response.raise_for_status()
435
+ return response.text
436
+ except Exception as e:
437
+ return f"Error fetching URL: {e}"
438
+
439
+
440
+ @tool("search_git_repository", description="Searches for a query in a remote git repository")
441
+ def search_git_repository(repo_url: str, query: str) -> str:
442
+ "Searches for a query in a remote git repository"
443
+ try:
444
+ # Clone the repo to a temp directory
445
+ import tempfile
446
+ with tempfile.TemporaryDirectory() as tmpdir:
447
+ run(f"git clone --depth 1 {repo_url} {tmpdir}", shell=True, check=True, text=True, capture_output=True)
448
+ # Use the text_search tool to find the query in the cloned repo
449
+ return text_search(query, glob_pattern=os.path.join(tmpdir, "**/*"))
450
+ except Exception as e:
451
+ return f"Error searching git repository: {e}"
452
+
453
+
454
+ # Tool Groups for easier selection
455
+
456
+ all_tools = [
457
+ run_in_terminal,
458
+ create_and_run_task,
459
+ await_terminal,
460
+ get_terminal_output,
461
+ kill_terminal,
462
+ terminal_last_command,
463
+ terminal_selection,
464
+ create_directory,
465
+ create_file,
466
+ file_edit,
467
+ read_file,
468
+ list_directory,
469
+ get_problems,
470
+ semantic_search_in_codebase,
471
+ text_search, get_usages,
472
+ get_diffs,
473
+ fetch_url,
474
+ search_git_repository
475
+ ]
476
+
477
+ terminal_tools = [
478
+ run_in_terminal,
479
+ create_and_run_task,
480
+ await_terminal,
481
+ get_terminal_output,
482
+ kill_terminal,
483
+ terminal_last_command,
484
+ terminal_selection
485
+ ]
486
+
487
+ write_tools = [
488
+ create_directory,
489
+ create_file,
490
+ file_edit
491
+ ]
492
+
493
+ read_tools = [
494
+ read_file,
495
+ list_directory,
496
+ get_problems,
497
+ get_diffs
498
+ ]
499
+
500
+ search_tools = [
501
+ semantic_search_in_codebase,
502
+ text_search,
503
+ get_usages,
504
+ file_search,
505
+ search_git_repository
506
+ ]
507
+
508
+ web_tools = [
509
+ fetch_url,
510
+ search_git_repository
511
+ ]
@@ -0,0 +1,5 @@
1
+ Metadata-Version: 2.4
2
+ Name: poca-agent
3
+ Version: 0.1.0
4
+ Summary: Python Open Coding Agent using DeepAgents and LangChain. Aims to be close to GH Copilot.
5
+ Author: Julius Vetter
@@ -0,0 +1,11 @@
1
+ README.md
2
+ pyproject.toml
3
+ poca/__init__.py
4
+ poca/cli_interface.py
5
+ poca/config.py
6
+ poca/poca_agent.py
7
+ poca/tools.py
8
+ poca_agent.egg-info/PKG-INFO
9
+ poca_agent.egg-info/SOURCES.txt
10
+ poca_agent.egg-info/dependency_links.txt
11
+ poca_agent.egg-info/top_level.txt
@@ -0,0 +1,9 @@
1
+ [build-system]
2
+ requires = ["setuptools"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "poca-agent"
7
+ version = "0.1.0"
8
+ description = "Python Open Coding Agent using DeepAgents and LangChain. Aims to be close to GH Copilot."
9
+ authors = [{name="Julius Vetter"}]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+