openosint 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openosint/__init__.py +5 -0
- openosint/agent.py +276 -0
- openosint/cli.py +272 -0
- openosint/config.py +113 -0
- openosint/display.py +226 -0
- openosint/main.py +9 -0
- openosint/providers/__init__.py +8 -0
- openosint/providers/anthropic.py +72 -0
- openosint/providers/base.py +35 -0
- openosint/providers/ollama.py +16 -0
- openosint/providers/openai.py +88 -0
- openosint/tools/__init__.py +1 -0
- openosint/tools/breach_check.py +5 -0
- openosint/tools/breach_tools.py +118 -0
- openosint/tools/dns_tools.py +151 -0
- openosint/tools/domain_check.py +5 -0
- openosint/tools/domain_tools.py +146 -0
- openosint/tools/dork_tools.py +91 -0
- openosint/tools/email_check.py +5 -0
- openosint/tools/email_tools.py +124 -0
- openosint/tools/google_dork.py +5 -0
- openosint/tools/ip_check.py +5 -0
- openosint/tools/ip_tools.py +115 -0
- openosint/tools/metadata_check.py +5 -0
- openosint/tools/metadata_tools.py +126 -0
- openosint/tools/paste_check.py +58 -0
- openosint/tools/phone_check.py +5 -0
- openosint/tools/phone_tools.py +92 -0
- openosint/tools/registry.py +246 -0
- openosint/tools/social_check.py +5 -0
- openosint/tools/username_tools.py +200 -0
- openosint/tools/whois_check.py +10 -0
- openosint/utils/__init__.py +6 -0
- openosint/utils/display.py +5 -0
- openosint/utils/report.py +39 -0
- openosint-1.0.0.dist-info/METADATA +389 -0
- openosint-1.0.0.dist-info/RECORD +40 -0
- openosint-1.0.0.dist-info/WHEEL +4 -0
- openosint-1.0.0.dist-info/entry_points.txt +2 -0
- openosint-1.0.0.dist-info/licenses/LICENSE +21 -0
openosint/__init__.py
ADDED
openosint/agent.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
"""OpenOSINT AI agent — Anthropic native tool use loop."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
import anthropic
|
|
11
|
+
|
|
12
|
+
from .config import Config
|
|
13
|
+
from .display import Display
|
|
14
|
+
from .tools.registry import TOOL_DEFINITIONS, execute_tool
|
|
15
|
+
|
|
16
|
+
SYSTEM_PROMPT = """You are OpenOSINT, an elite AI-powered Open Source Intelligence agent built for
|
|
17
|
+
security researchers, journalists, and investigators.
|
|
18
|
+
|
|
19
|
+
Your mission: conduct thorough, methodical OSINT investigations using only publicly available
|
|
20
|
+
information.
|
|
21
|
+
|
|
22
|
+
## Investigation Protocol
|
|
23
|
+
|
|
24
|
+
When given a target, follow this sequence:
|
|
25
|
+
|
|
26
|
+
1. **Identify** — Determine what type of target this is (email, username, domain, IP, phone,
|
|
27
|
+
person name, or compound).
|
|
28
|
+
2. **Pivot** — Start with the most specific tool for the target type, then pivot on findings:
|
|
29
|
+
- An email → check_email → extract domain → check_domain → check username variants
|
|
30
|
+
→ check_username
|
|
31
|
+
- A domain → check_domain + dns_lookup (TXT for SPF/DMARC) + whois_lookup → check IPs found
|
|
32
|
+
- A username → check_username → note platforms found → check email patterns → generate_dorks
|
|
33
|
+
- An IP → check_ip → reverse DNS → check that domain
|
|
34
|
+
- A phone → check_phone → generate_dorks
|
|
35
|
+
3. **Cross-reference** — Use results from one tool to inform calls to other tools.
|
|
36
|
+
4. **Dorks** — Always generate_dorks as part of every investigation.
|
|
37
|
+
5. **Breach check** — Always attempt check_breach for any email found (even if no HIBP key,
|
|
38
|
+
the model will note it).
|
|
39
|
+
6. **Report** — After exhausting relevant tools, compile the final intelligence report.
|
|
40
|
+
|
|
41
|
+
## Final Report Format
|
|
42
|
+
|
|
43
|
+
Always end with a structured report using this exact markdown structure:
|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
## Target Overview
|
|
48
|
+
Brief description of the target and investigation scope.
|
|
49
|
+
|
|
50
|
+
## Digital Footprint
|
|
51
|
+
Summary of online presence found.
|
|
52
|
+
|
|
53
|
+
## Account Discovery
|
|
54
|
+
List of confirmed accounts/profiles with URLs.
|
|
55
|
+
|
|
56
|
+
## Breach Exposure
|
|
57
|
+
Breach and paste findings.
|
|
58
|
+
|
|
59
|
+
## Technical Infrastructure
|
|
60
|
+
Domain, DNS, IP, SSL findings (if applicable).
|
|
61
|
+
|
|
62
|
+
## OSINT Assessment
|
|
63
|
+
Confidence levels, data quality, and key findings summary.
|
|
64
|
+
|
|
65
|
+
## Recommended Next Steps
|
|
66
|
+
3–5 specific, actionable next investigation steps.
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## Principles
|
|
71
|
+
- Note confidence (HIGH / MEDIUM / LOW) for key findings.
|
|
72
|
+
- Distinguish confirmed vs inferred data.
|
|
73
|
+
- Flag sensitive findings (GPS coordinates, PII) appropriately.
|
|
74
|
+
- Be concise but complete — this report may be read by lawyers, editors, or executives.
|
|
75
|
+
- You operate within legal and ethical bounds: public data only, no deception.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class OpenOSINTAgent:
|
|
80
|
+
def __init__(self, config: Config, display: Display) -> None:
|
|
81
|
+
self.config = config
|
|
82
|
+
self.display = display
|
|
83
|
+
self._setup_client()
|
|
84
|
+
self.messages: list[dict[str, Any]] = []
|
|
85
|
+
self.investigation_log: list[dict[str, Any]] = []
|
|
86
|
+
|
|
87
|
+
def _setup_client(self) -> None:
|
|
88
|
+
if self.config.provider == "anthropic":
|
|
89
|
+
self.client: Any = anthropic.Anthropic(api_key=self.config.anthropic_api_key)
|
|
90
|
+
elif self.config.provider == "openai":
|
|
91
|
+
import openai as _openai
|
|
92
|
+
self.client = _openai.OpenAI(api_key=self.config.openai_api_key)
|
|
93
|
+
elif self.config.provider == "ollama":
|
|
94
|
+
import openai as _openai
|
|
95
|
+
self.client = _openai.OpenAI(
|
|
96
|
+
base_url=f"{self.config.ollama_base_url}/v1",
|
|
97
|
+
api_key="ollama",
|
|
98
|
+
)
|
|
99
|
+
else:
|
|
100
|
+
raise ValueError(f"Unknown provider: {self.config.provider!r}")
|
|
101
|
+
|
|
102
|
+
def investigate(self, target: str) -> str:
|
|
103
|
+
"""Run a full OSINT investigation on the target. Returns the final report."""
|
|
104
|
+
self.messages = [
|
|
105
|
+
{
|
|
106
|
+
"role": "user",
|
|
107
|
+
"content": (
|
|
108
|
+
f"Investigate this target: {target}\n\n"
|
|
109
|
+
"Conduct a thorough OSINT investigation using all relevant tools. "
|
|
110
|
+
"Follow the investigation protocol, cross-reference findings, and "
|
|
111
|
+
"compile a complete intelligence report."
|
|
112
|
+
),
|
|
113
|
+
}
|
|
114
|
+
]
|
|
115
|
+
self.investigation_log = []
|
|
116
|
+
return self._run_loop()
|
|
117
|
+
|
|
118
|
+
def chat(self, user_message: str) -> str:
|
|
119
|
+
"""Send a message in an ongoing investigation session."""
|
|
120
|
+
self.messages.append({"role": "user", "content": user_message})
|
|
121
|
+
return self._run_loop()
|
|
122
|
+
|
|
123
|
+
def reset(self) -> None:
|
|
124
|
+
self.messages = []
|
|
125
|
+
self.investigation_log = []
|
|
126
|
+
|
|
127
|
+
def _run_loop(self) -> str:
|
|
128
|
+
for iteration in range(self.config.max_iterations):
|
|
129
|
+
try:
|
|
130
|
+
with self.display.thinking(
|
|
131
|
+
f"[dim]Iteration {iteration + 1} — thinking...[/]"
|
|
132
|
+
if iteration > 0
|
|
133
|
+
else "Analyzing target..."
|
|
134
|
+
):
|
|
135
|
+
response = self._call_api()
|
|
136
|
+
except Exception as e:
|
|
137
|
+
err = str(e)
|
|
138
|
+
if "401" in err or "authentication" in err.lower() or "api_key" in err.lower():
|
|
139
|
+
raise RuntimeError(f"Authentication failed — check your API key.\n{e}") from e
|
|
140
|
+
if "429" in err or "rate_limit" in err.lower():
|
|
141
|
+
raise RuntimeError(f"Rate limit hit — wait a moment and retry.\n{e}") from e
|
|
142
|
+
raise
|
|
143
|
+
|
|
144
|
+
if self.config.provider == "anthropic":
|
|
145
|
+
return self._handle_anthropic_response(response)
|
|
146
|
+
else:
|
|
147
|
+
return self._handle_openai_response(response)
|
|
148
|
+
|
|
149
|
+
return "Investigation complete (maximum iterations reached)."
|
|
150
|
+
|
|
151
|
+
def _call_api(self) -> Any:
|
|
152
|
+
if self.config.provider == "anthropic":
|
|
153
|
+
return self.client.messages.create(
|
|
154
|
+
model=self.config.model,
|
|
155
|
+
max_tokens=self.config.max_tokens,
|
|
156
|
+
system=SYSTEM_PROMPT,
|
|
157
|
+
tools=TOOL_DEFINITIONS,
|
|
158
|
+
messages=self.messages,
|
|
159
|
+
)
|
|
160
|
+
else:
|
|
161
|
+
# OpenAI / Ollama compatible path
|
|
162
|
+
tools_openai = _convert_tools_for_openai(TOOL_DEFINITIONS)
|
|
163
|
+
return self.client.chat.completions.create(
|
|
164
|
+
model=self.config.model,
|
|
165
|
+
max_tokens=self.config.max_tokens,
|
|
166
|
+
messages=[{"role": "system", "content": SYSTEM_PROMPT}] + self.messages,
|
|
167
|
+
tools=tools_openai,
|
|
168
|
+
tool_choice="auto",
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
def _handle_anthropic_response(self, response: Any) -> str:
|
|
172
|
+
"""Handle Anthropic API response in the tool-use loop."""
|
|
173
|
+
self.messages.append({"role": "assistant", "content": response.content})
|
|
174
|
+
|
|
175
|
+
if response.stop_reason == "end_turn":
|
|
176
|
+
for block in response.content:
|
|
177
|
+
if hasattr(block, "text"):
|
|
178
|
+
return block.text
|
|
179
|
+
return ""
|
|
180
|
+
|
|
181
|
+
if response.stop_reason == "tool_use":
|
|
182
|
+
tool_results: list[dict[str, Any]] = []
|
|
183
|
+
|
|
184
|
+
for block in response.content:
|
|
185
|
+
if block.type == "tool_use":
|
|
186
|
+
self.display.tool_call(block.name, block.input)
|
|
187
|
+
|
|
188
|
+
with self.display.thinking(f"Running {block.name}..."):
|
|
189
|
+
result = execute_tool(block.name, block.input, self.config)
|
|
190
|
+
|
|
191
|
+
self.display.tool_result_brief(block.name, result)
|
|
192
|
+
|
|
193
|
+
self.investigation_log.append(
|
|
194
|
+
{"tool": block.name, "input": block.input, "result": result}
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
tool_results.append(
|
|
198
|
+
{
|
|
199
|
+
"type": "tool_result",
|
|
200
|
+
"tool_use_id": block.id,
|
|
201
|
+
"content": json.dumps(result, default=str),
|
|
202
|
+
}
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
self.messages.append({"role": "user", "content": tool_results})
|
|
206
|
+
return self._run_loop()
|
|
207
|
+
|
|
208
|
+
# Unexpected stop reason — return whatever text we have
|
|
209
|
+
for block in response.content:
|
|
210
|
+
if hasattr(block, "text"):
|
|
211
|
+
return block.text
|
|
212
|
+
return ""
|
|
213
|
+
|
|
214
|
+
def _handle_openai_response(self, response: Any) -> str:
|
|
215
|
+
"""Handle OpenAI-compatible API response in the tool-use loop."""
|
|
216
|
+
msg = response.choices[0].message
|
|
217
|
+
self.messages.append(
|
|
218
|
+
{"role": "assistant", "content": msg.content or "", "tool_calls": msg.tool_calls}
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
if not msg.tool_calls:
|
|
222
|
+
return msg.content or ""
|
|
223
|
+
|
|
224
|
+
for tc in msg.tool_calls:
|
|
225
|
+
fn_name = tc.function.name
|
|
226
|
+
fn_args = json.loads(tc.function.arguments)
|
|
227
|
+
|
|
228
|
+
self.display.tool_call(fn_name, fn_args)
|
|
229
|
+
|
|
230
|
+
with self.display.thinking(f"Running {fn_name}..."):
|
|
231
|
+
result = execute_tool(fn_name, fn_args, self.config)
|
|
232
|
+
|
|
233
|
+
self.display.tool_result_brief(fn_name, result)
|
|
234
|
+
self.investigation_log.append({"tool": fn_name, "input": fn_args, "result": result})
|
|
235
|
+
|
|
236
|
+
self.messages.append(
|
|
237
|
+
{
|
|
238
|
+
"role": "tool",
|
|
239
|
+
"tool_call_id": tc.id,
|
|
240
|
+
"content": json.dumps(result, default=str),
|
|
241
|
+
}
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return self._run_loop()
|
|
245
|
+
|
|
246
|
+
def save_report(self, report_text: str, target: str) -> Path:
|
|
247
|
+
"""Save the investigation report to a file."""
|
|
248
|
+
reports_dir = Path(self.config.reports_dir)
|
|
249
|
+
reports_dir.mkdir(exist_ok=True)
|
|
250
|
+
|
|
251
|
+
safe_target = "".join(c if c.isalnum() or c in "-_." else "_" for c in target)[:50]
|
|
252
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
253
|
+
path = reports_dir / f"report_{safe_target}_{timestamp}.md"
|
|
254
|
+
|
|
255
|
+
header = (
|
|
256
|
+
f"# OpenOSINT Investigation Report\n\n"
|
|
257
|
+
f"**Target:** `{target}` \n"
|
|
258
|
+
f"**Date:** {datetime.now().isoformat()} \n"
|
|
259
|
+
f"**Model:** {self.config.model} \n\n---\n\n"
|
|
260
|
+
)
|
|
261
|
+
path.write_text(header + report_text, encoding="utf-8")
|
|
262
|
+
return path
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def _convert_tools_for_openai(tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
266
|
+
return [
|
|
267
|
+
{
|
|
268
|
+
"type": "function",
|
|
269
|
+
"function": {
|
|
270
|
+
"name": t["name"],
|
|
271
|
+
"description": t["description"],
|
|
272
|
+
"parameters": t["input_schema"],
|
|
273
|
+
},
|
|
274
|
+
}
|
|
275
|
+
for t in tools
|
|
276
|
+
]
|
openosint/cli.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
"""OpenOSINT CLI entry point."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
import click
|
|
10
|
+
|
|
11
|
+
from . import __version__
|
|
12
|
+
from .agent import OpenOSINTAgent
|
|
13
|
+
from .config import CONFIG_FILE, PROVIDER_MODELS, Config
|
|
14
|
+
from .display import Display
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _check_disclaimer(config: "Config", display: "Display") -> None:
|
|
18
|
+
"""Show the legal disclaimer on first run; exit if not accepted."""
|
|
19
|
+
if config.disclaimer_accepted:
|
|
20
|
+
return
|
|
21
|
+
accepted = display.show_disclaimer_banner()
|
|
22
|
+
if not accepted:
|
|
23
|
+
display.warn("You must accept the disclaimer to use OpenOSINT.")
|
|
24
|
+
sys.exit(0)
|
|
25
|
+
config.disclaimer_accepted = True
|
|
26
|
+
config.save()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
HELP_TEXT = """
|
|
30
|
+
[bold bright_cyan]Commands available in interactive mode:[/]
|
|
31
|
+
|
|
32
|
+
[cyan]investigate[/] <target> Run an investigation (or just type the target)
|
|
33
|
+
[cyan]clear[/] Clear conversation history
|
|
34
|
+
[cyan]save[/] Save the last report to file
|
|
35
|
+
[cyan]help[/] Show this help
|
|
36
|
+
[cyan]quit[/] / [cyan]exit[/] Exit OpenOSINT
|
|
37
|
+
|
|
38
|
+
[bold bright_cyan]Examples:[/]
|
|
39
|
+
|
|
40
|
+
[dim]openosint ❯[/] john.doe@gmail.com
|
|
41
|
+
[dim]openosint ❯[/] investigate @johndoe
|
|
42
|
+
[dim]openosint ❯[/] 8.8.8.8
|
|
43
|
+
[dim]openosint ❯[/] example.com
|
|
44
|
+
[dim]openosint ❯[/] +1 555 867 5309
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@click.group(invoke_without_command=True, context_settings={"help_option_names": ["-h", "--help"]})
|
|
49
|
+
@click.version_option(__version__, "-V", "--version", prog_name="OpenOSINT")
|
|
50
|
+
@click.option("--quiet", "-q", is_flag=True, help="Suppress banner")
|
|
51
|
+
@click.pass_context
|
|
52
|
+
def cli(ctx: click.Context, quiet: bool) -> None:
|
|
53
|
+
"""OpenOSINT — AI-powered Open Source Intelligence agent."""
|
|
54
|
+
ctx.ensure_object(dict)
|
|
55
|
+
ctx.obj["quiet"] = quiet
|
|
56
|
+
|
|
57
|
+
if ctx.invoked_subcommand is None:
|
|
58
|
+
_interactive_mode(quiet=quiet)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@cli.command()
|
|
62
|
+
@click.argument("target")
|
|
63
|
+
@click.option("--save", "-s", is_flag=True, help="Save report to file")
|
|
64
|
+
@click.option("--output", "-o", type=click.Path(), help="Output file path (implies --save)")
|
|
65
|
+
@click.option("--quiet", "-q", is_flag=True, help="Suppress banner")
|
|
66
|
+
def investigate(target: str, save: bool, output: Optional[str], quiet: bool) -> None:
|
|
67
|
+
"""Investigate a target and produce an intelligence report.
|
|
68
|
+
|
|
69
|
+
TARGET can be an email, username, domain, IP address, or phone number.
|
|
70
|
+
|
|
71
|
+
\b
|
|
72
|
+
Examples:
|
|
73
|
+
openosint investigate john@example.com
|
|
74
|
+
openosint investigate example.com --save
|
|
75
|
+
openosint investigate 8.8.8.8
|
|
76
|
+
"""
|
|
77
|
+
display = Display(quiet=quiet)
|
|
78
|
+
if not quiet:
|
|
79
|
+
display.banner(__version__)
|
|
80
|
+
|
|
81
|
+
config = Config.load()
|
|
82
|
+
_check_disclaimer(config, display)
|
|
83
|
+
errors = config.validate()
|
|
84
|
+
if errors:
|
|
85
|
+
for err in errors:
|
|
86
|
+
display.error(err)
|
|
87
|
+
display.info("Run [bright_cyan]openosint config[/] to set up your API keys.")
|
|
88
|
+
sys.exit(1)
|
|
89
|
+
|
|
90
|
+
agent = OpenOSINTAgent(config, display)
|
|
91
|
+
display.investigation_start(target)
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
report = agent.investigate(target)
|
|
95
|
+
except RuntimeError as e:
|
|
96
|
+
display.error(str(e))
|
|
97
|
+
sys.exit(1)
|
|
98
|
+
except KeyboardInterrupt:
|
|
99
|
+
display.print()
|
|
100
|
+
display.info("Investigation interrupted.")
|
|
101
|
+
sys.exit(0)
|
|
102
|
+
|
|
103
|
+
display.final_report(report, target)
|
|
104
|
+
|
|
105
|
+
if save or output:
|
|
106
|
+
if output:
|
|
107
|
+
path = Path(output)
|
|
108
|
+
header = f"# OpenOSINT Investigation Report\n\n**Target:** `{target}`\n\n---\n\n"
|
|
109
|
+
path.write_text(header + report, encoding="utf-8")
|
|
110
|
+
display.success(f"Report saved to [cyan]{path}[/]")
|
|
111
|
+
else:
|
|
112
|
+
path = agent.save_report(report, target)
|
|
113
|
+
display.success(f"Report saved to [cyan]{path}[/]")
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
@cli.command()
|
|
117
|
+
@click.option(
|
|
118
|
+
"--provider",
|
|
119
|
+
type=click.Choice(["anthropic", "openai", "ollama"]),
|
|
120
|
+
help="AI provider",
|
|
121
|
+
)
|
|
122
|
+
@click.option("--model", help="Model name override")
|
|
123
|
+
@click.option("--show", is_flag=True, help="Show current configuration")
|
|
124
|
+
def config(provider: Optional[str], model: Optional[str], show: bool) -> None:
|
|
125
|
+
"""Configure OpenOSINT: provider, model, and API keys.
|
|
126
|
+
|
|
127
|
+
API keys are read from environment variables or .env file:
|
|
128
|
+
|
|
129
|
+
\b
|
|
130
|
+
ANTHROPIC_API_KEY — for Anthropic (default provider)
|
|
131
|
+
OPENAI_API_KEY — for OpenAI
|
|
132
|
+
HIBP_API_KEY — for HaveIBeenPwned breach checks
|
|
133
|
+
ABUSEIPDB_API_KEY — for IP reputation checks
|
|
134
|
+
"""
|
|
135
|
+
display = Display()
|
|
136
|
+
cfg = Config.load()
|
|
137
|
+
|
|
138
|
+
if show or (not provider and not model):
|
|
139
|
+
display.config_table(
|
|
140
|
+
{
|
|
141
|
+
"provider": cfg.provider,
|
|
142
|
+
"model": cfg.model,
|
|
143
|
+
"anthropic_api_key": cfg.anthropic_api_key,
|
|
144
|
+
"openai_api_key": cfg.openai_api_key,
|
|
145
|
+
"hibp_api_key": cfg.hibp_api_key,
|
|
146
|
+
"abuseipdb_api_key": cfg.abuseipdb_api_key,
|
|
147
|
+
"config_file": str(CONFIG_FILE),
|
|
148
|
+
"max_tokens": cfg.max_tokens,
|
|
149
|
+
"max_iterations": cfg.max_iterations,
|
|
150
|
+
"save_reports": cfg.save_reports,
|
|
151
|
+
"reports_dir": cfg.reports_dir,
|
|
152
|
+
}
|
|
153
|
+
)
|
|
154
|
+
display.info("Set API keys via environment variables or [cyan].env[/] file.")
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
if provider:
|
|
158
|
+
cfg.provider = provider # type: ignore[assignment]
|
|
159
|
+
if not model:
|
|
160
|
+
cfg.model = PROVIDER_MODELS.get(provider, cfg.model)
|
|
161
|
+
display.success(f"Provider set to [cyan]{provider}[/]")
|
|
162
|
+
|
|
163
|
+
if model:
|
|
164
|
+
cfg.model = model
|
|
165
|
+
display.success(f"Model set to [cyan]{model}[/]")
|
|
166
|
+
|
|
167
|
+
cfg.save()
|
|
168
|
+
display.success(f"Configuration saved to [cyan]{CONFIG_FILE}[/]")
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
@cli.command()
|
|
172
|
+
def version() -> None:
|
|
173
|
+
"""Print version information."""
|
|
174
|
+
display = Display()
|
|
175
|
+
display.print(f"[bold bright_cyan]OpenOSINT[/] [dim]v{__version__}[/]")
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def _interactive_mode(quiet: bool = False) -> None:
|
|
179
|
+
"""Interactive chat-style investigation session."""
|
|
180
|
+
display = Display(quiet=quiet)
|
|
181
|
+
if not quiet:
|
|
182
|
+
display.banner(__version__)
|
|
183
|
+
|
|
184
|
+
config = Config.load()
|
|
185
|
+
_check_disclaimer(config, display)
|
|
186
|
+
errors = config.validate()
|
|
187
|
+
if errors:
|
|
188
|
+
for err in errors:
|
|
189
|
+
display.error(err)
|
|
190
|
+
display.warn("Set [cyan]ANTHROPIC_API_KEY[/] in your environment or [cyan].env[/] file.")
|
|
191
|
+
display.warn("Run [cyan]openosint config --show[/] for current settings.")
|
|
192
|
+
sys.exit(1)
|
|
193
|
+
|
|
194
|
+
display.info(f"Provider: [cyan]{config.provider}[/] Model: [cyan]{config.model}[/]")
|
|
195
|
+
display.info(
|
|
196
|
+
"Type a target to investigate, or [cyan]help[/] for commands. "
|
|
197
|
+
"[cyan]Ctrl-C[/] or [cyan]quit[/] to exit."
|
|
198
|
+
)
|
|
199
|
+
display.print()
|
|
200
|
+
|
|
201
|
+
agent = OpenOSINTAgent(config, display)
|
|
202
|
+
last_report: Optional[str] = None
|
|
203
|
+
|
|
204
|
+
while True:
|
|
205
|
+
try:
|
|
206
|
+
user_input = display.prompt()
|
|
207
|
+
except (KeyboardInterrupt, EOFError):
|
|
208
|
+
display.print()
|
|
209
|
+
display.info("Goodbye.")
|
|
210
|
+
break
|
|
211
|
+
|
|
212
|
+
user_input = user_input.strip()
|
|
213
|
+
if not user_input:
|
|
214
|
+
continue
|
|
215
|
+
|
|
216
|
+
cmd = user_input.lower()
|
|
217
|
+
|
|
218
|
+
if cmd in ("quit", "exit", "q", ":q"):
|
|
219
|
+
display.info("Goodbye.")
|
|
220
|
+
break
|
|
221
|
+
|
|
222
|
+
elif cmd == "help":
|
|
223
|
+
display.print(HELP_TEXT)
|
|
224
|
+
|
|
225
|
+
elif cmd == "clear":
|
|
226
|
+
agent.reset()
|
|
227
|
+
display.success("Conversation cleared.")
|
|
228
|
+
|
|
229
|
+
elif cmd == "save":
|
|
230
|
+
if last_report and agent.messages:
|
|
231
|
+
target = _extract_target_from_messages(agent.messages)
|
|
232
|
+
path = agent.save_report(last_report, target or "investigation")
|
|
233
|
+
display.success(f"Saved to [cyan]{path}[/]")
|
|
234
|
+
else:
|
|
235
|
+
display.warn("Nothing to save yet.")
|
|
236
|
+
|
|
237
|
+
else:
|
|
238
|
+
# Strip "investigate " prefix if present
|
|
239
|
+
target = user_input.removeprefix("investigate ").strip()
|
|
240
|
+
|
|
241
|
+
try:
|
|
242
|
+
if not agent.messages:
|
|
243
|
+
display.investigation_start(target)
|
|
244
|
+
report = agent.investigate(target)
|
|
245
|
+
else:
|
|
246
|
+
report = agent.chat(user_input)
|
|
247
|
+
except RuntimeError as e:
|
|
248
|
+
display.error(str(e))
|
|
249
|
+
continue
|
|
250
|
+
except KeyboardInterrupt:
|
|
251
|
+
display.print()
|
|
252
|
+
display.info("Investigation interrupted. Continue or type [cyan]quit[/].")
|
|
253
|
+
continue
|
|
254
|
+
|
|
255
|
+
display.final_report(report, target)
|
|
256
|
+
last_report = report
|
|
257
|
+
|
|
258
|
+
if config.save_reports:
|
|
259
|
+
path = agent.save_report(report, target)
|
|
260
|
+
display.info(f"Auto-saved to [cyan]{path}[/]")
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def _extract_target_from_messages(messages: list) -> Optional[str]:
|
|
264
|
+
for msg in messages:
|
|
265
|
+
if isinstance(msg, dict) and msg.get("role") == "user":
|
|
266
|
+
content = msg.get("content", "")
|
|
267
|
+
if isinstance(content, str) and "Investigate this target:" in content:
|
|
268
|
+
lines = content.split("\n")
|
|
269
|
+
for line in lines:
|
|
270
|
+
if line.startswith("Investigate this target:"):
|
|
271
|
+
return line.replace("Investigate this target:", "").strip()
|
|
272
|
+
return None
|
openosint/config.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""Configuration management for OpenOSINT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Literal, Optional
|
|
10
|
+
|
|
11
|
+
from dotenv import load_dotenv
|
|
12
|
+
|
|
13
|
+
CONFIG_DIR = Path.home() / ".config" / "openosint"
|
|
14
|
+
CONFIG_FILE = CONFIG_DIR / "config.json"
|
|
15
|
+
|
|
16
|
+
PROVIDER_MODELS: dict[str, str] = {
|
|
17
|
+
"anthropic": "claude-sonnet-4-20250514",
|
|
18
|
+
"openai": "gpt-4o",
|
|
19
|
+
"ollama": "llama3.1",
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class Config:
|
|
25
|
+
provider: Literal["anthropic", "openai", "ollama"] = "anthropic"
|
|
26
|
+
model: str = ""
|
|
27
|
+
|
|
28
|
+
anthropic_api_key: Optional[str] = None
|
|
29
|
+
openai_api_key: Optional[str] = None
|
|
30
|
+
hibp_api_key: Optional[str] = None
|
|
31
|
+
abuseipdb_api_key: Optional[str] = None
|
|
32
|
+
|
|
33
|
+
ollama_base_url: str = "http://localhost:11434"
|
|
34
|
+
|
|
35
|
+
max_tokens: int = 8192
|
|
36
|
+
max_iterations: int = 25
|
|
37
|
+
|
|
38
|
+
save_reports: bool = True
|
|
39
|
+
reports_dir: str = "reports"
|
|
40
|
+
|
|
41
|
+
disclaimer_accepted: bool = False
|
|
42
|
+
|
|
43
|
+
def __post_init__(self) -> None:
|
|
44
|
+
if not self.model:
|
|
45
|
+
self.model = PROVIDER_MODELS.get(self.provider, "claude-sonnet-4-20250514")
|
|
46
|
+
|
|
47
|
+
@classmethod
|
|
48
|
+
def load(cls) -> "Config":
|
|
49
|
+
load_dotenv()
|
|
50
|
+
|
|
51
|
+
config = cls()
|
|
52
|
+
|
|
53
|
+
if CONFIG_FILE.exists():
|
|
54
|
+
try:
|
|
55
|
+
with open(CONFIG_FILE) as f:
|
|
56
|
+
data = json.load(f)
|
|
57
|
+
for key, value in data.items():
|
|
58
|
+
if hasattr(config, key):
|
|
59
|
+
setattr(config, key, value)
|
|
60
|
+
except (json.JSONDecodeError, OSError):
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
# Environment variables always win
|
|
64
|
+
if v := os.getenv("ANTHROPIC_API_KEY"):
|
|
65
|
+
config.anthropic_api_key = v
|
|
66
|
+
if v := os.getenv("OPENAI_API_KEY"):
|
|
67
|
+
config.openai_api_key = v
|
|
68
|
+
if v := os.getenv("HIBP_API_KEY"):
|
|
69
|
+
config.hibp_api_key = v
|
|
70
|
+
if v := os.getenv("ABUSEIPDB_API_KEY"):
|
|
71
|
+
config.abuseipdb_api_key = v
|
|
72
|
+
if v := os.getenv("OPENOSINT_PROVIDER"):
|
|
73
|
+
config.provider = v # type: ignore[assignment]
|
|
74
|
+
if v := os.getenv("OPENOSINT_MODEL"):
|
|
75
|
+
config.model = v
|
|
76
|
+
if v := os.getenv("OLLAMA_BASE_URL"):
|
|
77
|
+
config.ollama_base_url = v
|
|
78
|
+
|
|
79
|
+
if not config.model:
|
|
80
|
+
config.model = PROVIDER_MODELS.get(config.provider, "claude-sonnet-4-20250514")
|
|
81
|
+
|
|
82
|
+
return config
|
|
83
|
+
|
|
84
|
+
def save(self) -> None:
|
|
85
|
+
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
86
|
+
data = {
|
|
87
|
+
"provider": self.provider,
|
|
88
|
+
"model": self.model,
|
|
89
|
+
"ollama_base_url": self.ollama_base_url,
|
|
90
|
+
"max_tokens": self.max_tokens,
|
|
91
|
+
"max_iterations": self.max_iterations,
|
|
92
|
+
"save_reports": self.save_reports,
|
|
93
|
+
"reports_dir": self.reports_dir,
|
|
94
|
+
"disclaimer_accepted": self.disclaimer_accepted,
|
|
95
|
+
}
|
|
96
|
+
with open(CONFIG_FILE, "w") as f:
|
|
97
|
+
json.dump(data, f, indent=2)
|
|
98
|
+
|
|
99
|
+
def validate(self) -> list[str]:
|
|
100
|
+
errors: list[str] = []
|
|
101
|
+
if self.provider == "anthropic" and not self.anthropic_api_key:
|
|
102
|
+
errors.append("ANTHROPIC_API_KEY is not set (export it or add to .env)")
|
|
103
|
+
elif self.provider == "openai" and not self.openai_api_key:
|
|
104
|
+
errors.append("OPENAI_API_KEY is not set (export it or add to .env)")
|
|
105
|
+
return errors
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def active_api_key(self) -> Optional[str]:
|
|
109
|
+
if self.provider == "anthropic":
|
|
110
|
+
return self.anthropic_api_key
|
|
111
|
+
if self.provider == "openai":
|
|
112
|
+
return self.openai_api_key
|
|
113
|
+
return None
|