mini-swe-agent 1.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. mini_swe_agent-1.16.0.dist-info/METADATA +314 -0
  2. mini_swe_agent-1.16.0.dist-info/RECORD +62 -0
  3. mini_swe_agent-1.16.0.dist-info/WHEEL +5 -0
  4. mini_swe_agent-1.16.0.dist-info/entry_points.txt +5 -0
  5. mini_swe_agent-1.16.0.dist-info/licenses/LICENSE.md +21 -0
  6. mini_swe_agent-1.16.0.dist-info/top_level.txt +1 -0
  7. minisweagent/__init__.py +83 -0
  8. minisweagent/__main__.py +7 -0
  9. minisweagent/agents/__init__.py +1 -0
  10. minisweagent/agents/default.py +131 -0
  11. minisweagent/agents/interactive.py +153 -0
  12. minisweagent/agents/interactive_textual.py +450 -0
  13. minisweagent/config/README.md +10 -0
  14. minisweagent/config/__init__.py +27 -0
  15. minisweagent/config/default.yaml +157 -0
  16. minisweagent/config/extra/__init__.py +1 -0
  17. minisweagent/config/extra/swebench.yaml +230 -0
  18. minisweagent/config/extra/swebench_roulette.yaml +233 -0
  19. minisweagent/config/extra/swebench_xml.yaml +215 -0
  20. minisweagent/config/github_issue.yaml +146 -0
  21. minisweagent/config/mini.tcss +86 -0
  22. minisweagent/config/mini.yaml +158 -0
  23. minisweagent/config/mini_no_temp.yaml +158 -0
  24. minisweagent/environments/__init__.py +31 -0
  25. minisweagent/environments/docker.py +114 -0
  26. minisweagent/environments/extra/__init__.py +0 -0
  27. minisweagent/environments/extra/bubblewrap.py +112 -0
  28. minisweagent/environments/extra/swerex_docker.py +47 -0
  29. minisweagent/environments/local.py +38 -0
  30. minisweagent/environments/singularity.py +97 -0
  31. minisweagent/models/__init__.py +114 -0
  32. minisweagent/models/anthropic.py +35 -0
  33. minisweagent/models/extra/__init__.py +0 -0
  34. minisweagent/models/extra/roulette.py +61 -0
  35. minisweagent/models/litellm_model.py +100 -0
  36. minisweagent/models/litellm_response_api_model.py +80 -0
  37. minisweagent/models/openrouter_model.py +125 -0
  38. minisweagent/models/portkey_model.py +154 -0
  39. minisweagent/models/portkey_response_api_model.py +74 -0
  40. minisweagent/models/requesty_model.py +119 -0
  41. minisweagent/models/test_models.py +42 -0
  42. minisweagent/models/utils/__init__.py +0 -0
  43. minisweagent/models/utils/cache_control.py +54 -0
  44. minisweagent/models/utils/key_per_thread.py +20 -0
  45. minisweagent/models/utils/openai_utils.py +41 -0
  46. minisweagent/py.typed +0 -0
  47. minisweagent/run/__init__.py +1 -0
  48. minisweagent/run/extra/__init__.py +0 -0
  49. minisweagent/run/extra/config.py +114 -0
  50. minisweagent/run/extra/swebench.py +266 -0
  51. minisweagent/run/extra/swebench_single.py +79 -0
  52. minisweagent/run/extra/utils/__init__.py +0 -0
  53. minisweagent/run/extra/utils/batch_progress.py +178 -0
  54. minisweagent/run/github_issue.py +87 -0
  55. minisweagent/run/hello_world.py +36 -0
  56. minisweagent/run/inspector.py +212 -0
  57. minisweagent/run/mini.py +108 -0
  58. minisweagent/run/mini_extra.py +44 -0
  59. minisweagent/run/utils/__init__.py +0 -0
  60. minisweagent/run/utils/save.py +78 -0
  61. minisweagent/utils/__init__.py +0 -0
  62. minisweagent/utils/log.py +36 -0
@@ -0,0 +1,74 @@
1
+ import logging
2
+ import os
3
+ from dataclasses import dataclass
4
+
5
+ import litellm
6
+ from tenacity import (
7
+ before_sleep_log,
8
+ retry,
9
+ retry_if_not_exception_type,
10
+ stop_after_attempt,
11
+ wait_exponential,
12
+ )
13
+
14
+ from minisweagent.models import GLOBAL_MODEL_STATS
15
+ from minisweagent.models.portkey_model import PortkeyModel, PortkeyModelConfig
16
+ from minisweagent.models.utils.cache_control import set_cache_control
17
+ from minisweagent.models.utils.openai_utils import coerce_responses_text
18
+
19
+ logger = logging.getLogger("portkey_response_api_model")
20
+
21
+
22
+ @dataclass
23
+ class PortkeyResponseAPIModelConfig(PortkeyModelConfig):
24
+ pass
25
+
26
+
27
+ class PortkeyResponseAPIModel(PortkeyModel):
28
+ def __init__(self, *, config_class: type = PortkeyResponseAPIModelConfig, **kwargs):
29
+ super().__init__(config_class=config_class, **kwargs)
30
+ self._previous_response_id: str | None = None
31
+
32
+ @retry(
33
+ stop=stop_after_attempt(int(os.getenv("MSWEA_MODEL_RETRY_STOP_AFTER_ATTEMPT", "10"))),
34
+ wait=wait_exponential(multiplier=1, min=4, max=60),
35
+ before_sleep=before_sleep_log(logger, logging.WARNING),
36
+ retry=retry_if_not_exception_type((KeyboardInterrupt, TypeError, ValueError)),
37
+ )
38
+ def _query(self, messages: list[dict[str, str]], **kwargs):
39
+ input_messages = messages if self._previous_response_id is None else messages[-1:]
40
+ resp = self.client.responses.create(
41
+ model=self.config.model_name,
42
+ input=input_messages,
43
+ previous_response_id=self._previous_response_id,
44
+ **(self.config.model_kwargs | kwargs),
45
+ )
46
+ self._previous_response_id = getattr(resp, "id", None)
47
+ return resp
48
+
49
+ def query(self, messages: list[dict[str, str]], **kwargs) -> dict:
50
+ if self.config.set_cache_control:
51
+ messages = set_cache_control(messages, mode=self.config.set_cache_control)
52
+ response = self._query(messages, **kwargs)
53
+ text = coerce_responses_text(response)
54
+ try:
55
+ cost = litellm.cost_calculator.completion_cost(response)
56
+ assert cost > 0.0, f"Cost is not positive: {cost}"
57
+ except Exception as e:
58
+ if self.config.cost_tracking != "ignore_errors":
59
+ raise RuntimeError(
60
+ f"Error calculating cost for model {self.config.model_name}: {e}. "
61
+ "You can ignore this issue from your config file with cost_tracking: 'ignore_errors' or "
62
+ "globally with export MSWEA_COST_TRACKING='ignore_errors' to ignore this error. "
63
+ ) from e
64
+ cost = 0.0
65
+ self.n_calls += 1
66
+ self.cost += cost
67
+ GLOBAL_MODEL_STATS.add(cost)
68
+ return {
69
+ "content": text,
70
+ "extra": {
71
+ "response": response.model_dump() if hasattr(response, "model_dump") else {},
72
+ "cost": cost,
73
+ },
74
+ }
@@ -0,0 +1,119 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ from dataclasses import asdict, dataclass, field
5
+ from typing import Any
6
+
7
+ import requests
8
+ from tenacity import (
9
+ before_sleep_log,
10
+ retry,
11
+ retry_if_not_exception_type,
12
+ stop_after_attempt,
13
+ wait_exponential,
14
+ )
15
+
16
+ from minisweagent.models import GLOBAL_MODEL_STATS
17
+
18
+ logger = logging.getLogger("requesty_model")
19
+
20
+
21
+ @dataclass
22
+ class RequestyModelConfig:
23
+ model_name: str
24
+ model_kwargs: dict[str, Any] = field(default_factory=dict)
25
+
26
+
27
+ class RequestyAPIError(Exception):
28
+ """Custom exception for Requesty API errors."""
29
+
30
+ pass
31
+
32
+
33
+ class RequestyAuthenticationError(Exception):
34
+ """Custom exception for Requesty authentication errors."""
35
+
36
+ pass
37
+
38
+
39
+ class RequestyRateLimitError(Exception):
40
+ """Custom exception for Requesty rate limit errors."""
41
+
42
+ pass
43
+
44
+
45
+ class RequestyModel:
46
+ def __init__(self, **kwargs):
47
+ self.config = RequestyModelConfig(**kwargs)
48
+ self.cost = 0.0
49
+ self.n_calls = 0
50
+ self._api_url = "https://router.requesty.ai/v1/chat/completions"
51
+ self._api_key = os.getenv("REQUESTY_API_KEY", "")
52
+
53
+ @retry(
54
+ stop=stop_after_attempt(10),
55
+ wait=wait_exponential(multiplier=1, min=4, max=60),
56
+ before_sleep=before_sleep_log(logger, logging.WARNING),
57
+ retry=retry_if_not_exception_type(
58
+ (
59
+ RequestyAuthenticationError,
60
+ KeyboardInterrupt,
61
+ )
62
+ ),
63
+ )
64
+ def _query(self, messages: list[dict[str, str]], **kwargs):
65
+ headers = {
66
+ "Authorization": f"Bearer {self._api_key}",
67
+ "Content-Type": "application/json",
68
+ "HTTP-Referer": "https://github.com/SWE-agent/mini-swe-agent",
69
+ "X-Title": "mini-swe-agent",
70
+ }
71
+
72
+ payload = {
73
+ "model": self.config.model_name,
74
+ "messages": messages,
75
+ **(self.config.model_kwargs | kwargs),
76
+ }
77
+
78
+ try:
79
+ response = requests.post(self._api_url, headers=headers, data=json.dumps(payload), timeout=60)
80
+ response.raise_for_status()
81
+ return response.json()
82
+ except requests.exceptions.HTTPError as e:
83
+ if response.status_code == 401:
84
+ error_msg = "Authentication failed. You can permanently set your API key with `mini-extra config set REQUESTY_API_KEY YOUR_KEY`."
85
+ raise RequestyAuthenticationError(error_msg) from e
86
+ elif response.status_code == 429:
87
+ raise RequestyRateLimitError("Rate limit exceeded") from e
88
+ else:
89
+ raise RequestyAPIError(f"HTTP {response.status_code}: {response.text}") from e
90
+ except requests.exceptions.RequestException as e:
91
+ raise RequestyAPIError(f"Request failed: {e}") from e
92
+
93
+ def query(self, messages: list[dict[str, str]], **kwargs) -> dict:
94
+ response = self._query(messages, **kwargs)
95
+
96
+ # Extract cost from usage information
97
+ usage = response.get("usage", {})
98
+ cost = usage.get("cost", 0.0)
99
+
100
+ # If cost is not available, raise an error
101
+ if cost == 0.0:
102
+ raise RequestyAPIError(
103
+ f"No cost information available from Requesty API for model {self.config.model_name}. "
104
+ "Cost tracking is required but not provided by the API response."
105
+ )
106
+
107
+ self.n_calls += 1
108
+ self.cost += cost
109
+ GLOBAL_MODEL_STATS.add(cost)
110
+
111
+ return {
112
+ "content": response["choices"][0]["message"]["content"] or "",
113
+ "extra": {
114
+ "response": response, # already is json
115
+ },
116
+ }
117
+
118
+ def get_template_vars(self) -> dict[str, Any]:
119
+ return asdict(self.config) | {"n_model_calls": self.n_calls, "model_cost": self.cost}
@@ -0,0 +1,42 @@
1
+ import logging
2
+ import time
3
+ from dataclasses import asdict, dataclass
4
+ from typing import Any
5
+
6
+ from minisweagent.models import GLOBAL_MODEL_STATS
7
+
8
+
9
+ @dataclass
10
+ class DeterministicModelConfig:
11
+ outputs: list[str]
12
+ model_name: str = "deterministic"
13
+ cost_per_call: float = 1.0
14
+
15
+
16
+ class DeterministicModel:
17
+ def __init__(self, **kwargs):
18
+ """
19
+ Initialize with a list of outputs to return in sequence.
20
+ """
21
+ self.config = DeterministicModelConfig(**kwargs)
22
+ self.current_index = -1
23
+ self.cost = 0.0
24
+ self.n_calls = 0
25
+
26
+ def query(self, messages: list[dict[str, str]], **kwargs) -> dict:
27
+ self.current_index += 1
28
+ output = self.config.outputs[self.current_index]
29
+ if "/sleep" in output:
30
+ print("SLEEPING")
31
+ time.sleep(float(output.split("/sleep")[1]))
32
+ return self.query(messages, **kwargs)
33
+ if "/warning" in output:
34
+ logging.warning(output.split("/warning")[1])
35
+ return self.query(messages, **kwargs)
36
+ self.n_calls += 1
37
+ self.cost += self.config.cost_per_call
38
+ GLOBAL_MODEL_STATS.add(self.config.cost_per_call)
39
+ return {"content": output}
40
+
41
+ def get_template_vars(self) -> dict[str, Any]:
42
+ return asdict(self.config) | {"n_model_calls": self.n_calls, "model_cost": self.cost}
File without changes
@@ -0,0 +1,54 @@
1
+ import copy
2
+ import warnings
3
+ from typing import Literal
4
+
5
+
6
+ def _get_content_text(entry: dict) -> str:
7
+ if isinstance(entry["content"], str):
8
+ return entry["content"]
9
+ assert len(entry["content"]) == 1, "Expected single message in content"
10
+ return entry["content"][0]["text"]
11
+
12
+
13
+ def _clear_cache_control(entry: dict) -> None:
14
+ if isinstance(entry["content"], list):
15
+ assert len(entry["content"]) == 1, "Expected single message in content"
16
+ entry["content"][0].pop("cache_control", None)
17
+ entry.pop("cache_control", None)
18
+
19
+
20
+ def _set_cache_control(entry: dict) -> None:
21
+ if not isinstance(entry["content"], list):
22
+ entry["content"] = [ # type: ignore
23
+ {
24
+ "type": "text",
25
+ "text": _get_content_text(entry),
26
+ "cache_control": {"type": "ephemeral"},
27
+ }
28
+ ]
29
+ else:
30
+ entry["content"][0]["cache_control"] = {"type": "ephemeral"}
31
+ if entry["role"] == "tool":
32
+ # Workaround for weird bug
33
+ entry["content"][0].pop("cache_control", None)
34
+ entry["cache_control"] = {"type": "ephemeral"}
35
+
36
+
37
+ def set_cache_control(
38
+ messages: list[dict], *, mode: Literal["default_end"] | None = "default_end", last_n_messages_offset: int = 0
39
+ ) -> list[dict]:
40
+ """This messages processor adds manual cache control marks to the messages."""
41
+ # ONLY ADD TO THE LAST MESSAGE
42
+ if mode != "default_end":
43
+ raise ValueError(f"Invalid mode: {mode}")
44
+ if last_n_messages_offset:
45
+ warnings.warn("last_n_messages_offset is deprecated and will be removed in the future. It has no effect.")
46
+
47
+ messages = copy.deepcopy(messages)
48
+ new_messages = []
49
+ for i_entry, entry in enumerate(reversed(messages)):
50
+ _clear_cache_control(entry)
51
+ if i_entry == 0:
52
+ _set_cache_control(entry)
53
+ new_messages.append(entry)
54
+ return list(reversed(new_messages))
@@ -0,0 +1,20 @@
1
+ """Utility for anthropic where we need different keys for different parallel
2
+ agents to not mess up prompt caching.
3
+ """
4
+
5
+ import threading
6
+ import warnings
7
+ from typing import Any
8
+
9
+ _THREADS_THAT_USED_API_KEYS: list[Any] = []
10
+
11
+
12
+ def get_key_per_thread(api_keys: list[Any]) -> Any:
13
+ """Choose key based on thread name. Returns None if no keys are available."""
14
+ warnings.warn("get_key_per_thread is deprecated and will be removed in the future")
15
+ thread_name = threading.current_thread().name
16
+ if thread_name not in _THREADS_THAT_USED_API_KEYS:
17
+ _THREADS_THAT_USED_API_KEYS.append(thread_name)
18
+ thread_idx = _THREADS_THAT_USED_API_KEYS.index(thread_name)
19
+ key_idx = thread_idx % len(api_keys)
20
+ return api_keys[key_idx] or None
@@ -0,0 +1,41 @@
1
+ import logging
2
+ from typing import Any
3
+
4
+ from openai.types.responses.response_output_message import ResponseOutputMessage
5
+
6
+ logger = logging.getLogger("openai_utils")
7
+
8
+
9
+ def coerce_responses_text(resp: Any) -> str:
10
+ """Helper to normalize OpenAI Responses API result to text.
11
+
12
+ Works with both OpenAI client responses and LiteLLM/Portkey responses.
13
+ """
14
+ text = getattr(resp, "output_text", None)
15
+ if isinstance(text, str) and text:
16
+ return text
17
+
18
+ try:
19
+ output = []
20
+ for item in resp.output:
21
+ if isinstance(item, dict):
22
+ content = item.get("content", [])
23
+ elif isinstance(item, ResponseOutputMessage):
24
+ content = item.content
25
+ else:
26
+ continue
27
+
28
+ for content_item in content:
29
+ if isinstance(content_item, dict):
30
+ text_val = content_item.get("text")
31
+ elif hasattr(content_item, "text"):
32
+ text_val = content_item.text
33
+ else:
34
+ continue
35
+
36
+ if text_val:
37
+ output.append(text_val)
38
+ return "\n\n".join(output) or ""
39
+ except (AttributeError, IndexError, TypeError):
40
+ logger.warning(f"Could not extract text from response: {resp}")
41
+ return ""
minisweagent/py.typed ADDED
File without changes
@@ -0,0 +1 @@
1
+ """Run scripts for mini-SWE-agent."""
File without changes
@@ -0,0 +1,114 @@
1
+ """Utility to manage the global config file.
2
+
3
+ You can also directly edit the `.env` file in the config directory.
4
+
5
+ It is located at [bold green]{global_config_file}[/bold green].
6
+ """
7
+
8
+ import os
9
+ import subprocess
10
+
11
+ from dotenv import set_key, unset_key
12
+ from prompt_toolkit import prompt
13
+ from rich.console import Console
14
+ from rich.rule import Rule
15
+ from typer import Argument, Typer
16
+
17
+ from minisweagent import global_config_file
18
+
19
+ app = Typer(
20
+ help=__doc__.format(global_config_file=global_config_file), # type: ignore
21
+ no_args_is_help=True,
22
+ rich_markup_mode="rich",
23
+ add_completion=False,
24
+ )
25
+ console = Console(highlight=False)
26
+
27
+
28
+ _SETUP_HELP = """To get started, we need to set up your global config file.
29
+
30
+ You can edit it manually or use the [bold green]mini-extra config set[/bold green] or [bold green]mini-extra config edit[/bold green] commands.
31
+
32
+ This setup will ask you for your model and an API key.
33
+
34
+ Here's a few popular models and the required API keys:
35
+
36
+ [bold green]anthropic/claude-sonnet-4-5-20250929[/bold green] ([bold green]ANTHROPIC_API_KEY[/bold green])
37
+ [bold green]openai/gpt-5[/bold green] or [bold green]openai/gpt-5-mini[/bold green] ([bold green]OPENAI_API_KEY[/bold green])
38
+ [bold green]gemini/gemini-3-pro-preview[/bold green] ([bold green]GEMINI_API_KEY[/bold green])
39
+
40
+ [bold]Note: Please always include the provider (e.g., "openai/") in the model name.[/bold]
41
+
42
+ [bold yellow]You can leave any setting blank to skip it.[/bold yellow]
43
+
44
+ More information at https://mini-swe-agent.com/latest/quickstart/
45
+ To find the best model, check the leaderboard at https://swebench.com/
46
+ """
47
+
48
+
49
+ def configure_if_first_time():
50
+ if not os.getenv("MSWEA_CONFIGURED"):
51
+ console.print(Rule())
52
+ setup()
53
+ console.print(Rule())
54
+
55
+
56
+ @app.command()
57
+ def setup():
58
+ """Setup the global config file."""
59
+ console.print(_SETUP_HELP.format(global_config_file=global_config_file))
60
+ default_model = prompt(
61
+ "Enter your default model (e.g., anthropic/claude-sonnet-4-5-20250929): ",
62
+ default=os.getenv("MSWEA_MODEL_NAME", ""),
63
+ ).strip()
64
+ if default_model:
65
+ set_key(global_config_file, "MSWEA_MODEL_NAME", default_model)
66
+ console.print(
67
+ "[bold yellow]If you already have your API keys set as environment variables, you can ignore the next question.[/bold yellow]"
68
+ )
69
+ key_name = prompt("Enter your API key name (e.g., ANTHROPIC_API_KEY): ").strip()
70
+ key_value = None
71
+ if key_name:
72
+ key_value = prompt("Enter your API key value (e.g., sk-1234567890): ", default=os.getenv(key_name, "")).strip()
73
+ if key_value:
74
+ set_key(global_config_file, key_name, key_value)
75
+ if not key_value:
76
+ console.print(
77
+ "[bold red]API key setup not completed.[/bold red] Totally fine if you have your keys as environment variables."
78
+ )
79
+ set_key(global_config_file, "MSWEA_CONFIGURED", "true")
80
+ console.print(
81
+ "\n[bold yellow]Config finished.[/bold yellow] If you want to revisit it, run [bold green]mini-extra config setup[/bold green]."
82
+ )
83
+
84
+
85
+ @app.command()
86
+ def set(
87
+ key: str | None = Argument(None, help="The key to set"),
88
+ value: str | None = Argument(None, help="The value to set"),
89
+ ):
90
+ """Set a key in the global config file."""
91
+ if key is None:
92
+ key = prompt("Enter the key to set: ")
93
+ if value is None:
94
+ value = prompt(f"Enter the value for {key}: ")
95
+ set_key(global_config_file, key, value)
96
+
97
+
98
+ @app.command()
99
+ def unset(key: str | None = Argument(None, help="The key to unset")):
100
+ """Unset a key in the global config file."""
101
+ if key is None:
102
+ key = prompt("Enter the key to unset: ")
103
+ unset_key(global_config_file, key)
104
+
105
+
106
+ @app.command()
107
+ def edit():
108
+ """Edit the global config file."""
109
+ editor = os.getenv("EDITOR", "nano")
110
+ subprocess.run([editor, global_config_file])
111
+
112
+
113
+ if __name__ == "__main__":
114
+ app()