eva-exploit 3.4__tar.gz → 3.4.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {eva_exploit-3.4 → eva_exploit-3.4.2}/PKG-INFO +1 -1
- {eva_exploit-3.4 → eva_exploit-3.4.2}/config.py +1 -1
- {eva_exploit-3.4 → eva_exploit-3.4.2}/eva.py +1 -1
- {eva_exploit-3.4 → eva_exploit-3.4.2}/eva_exploit.egg-info/PKG-INFO +1 -1
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/llm.py +184 -25
- {eva_exploit-3.4 → eva_exploit-3.4.2}/pyproject.toml +1 -1
- {eva_exploit-3.4 → eva_exploit-3.4.2}/sessions/eva_session.py +35 -4
- {eva_exploit-3.4 → eva_exploit-3.4.2}/utils/system.py +20 -35
- {eva_exploit-3.4 → eva_exploit-3.4.2}/README.md +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/eva_exploit.egg-info/SOURCES.txt +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/eva_exploit.egg-info/dependency_links.txt +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/eva_exploit.egg-info/entry_points.txt +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/eva_exploit.egg-info/requires.txt +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/eva_exploit.egg-info/top_level.txt +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/__init__.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/attack_map.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/exploit_search.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/prompt_builder.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/reporting.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/tooling.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/vuln_intel.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/modules/workflow.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/sessions/__init__.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/setup.cfg +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/utils/__init__.py +0 -0
- {eva_exploit-3.4 → eva_exploit-3.4.2}/utils/ui.py +0 -0
|
@@ -166,7 +166,7 @@ def cli():
|
|
|
166
166
|
parser.add_argument("-v", "--version", action="store_true", help="Show EVA version")
|
|
167
167
|
parser.add_argument("-d", "--delete", action="store_true", help="Delete stored sessions & files")
|
|
168
168
|
parser.add_argument("-c", "--config", action="store_true", help="Open EVA config.py in your default editor")
|
|
169
|
-
parser.add_argument("--custom-api", action="store_true", help="Open custom API
|
|
169
|
+
parser.add_argument("--custom-api", action="store_true", help="Open the active custom API handler file")
|
|
170
170
|
parser.add_argument(
|
|
171
171
|
"-s",
|
|
172
172
|
"--search",
|
|
@@ -41,6 +41,7 @@ LAST_OUTPUT_CHUNK_SIZE = 2800
|
|
|
41
41
|
OLLAMA_HTTP_CONNECT_TIMEOUT = 5
|
|
42
42
|
OLLAMA_HTTP_READ_TIMEOUT = 240
|
|
43
43
|
OLLAMA_RUN_TIMEOUT = 240
|
|
44
|
+
OLLAMA_STREAM_POLL_DELAY = 0.01
|
|
44
45
|
STREAM_HIDE_MARKERS = [
|
|
45
46
|
"[:::] analysis_output:",
|
|
46
47
|
"output valid json only",
|
|
@@ -68,6 +69,10 @@ REFUSAL_PATTERNS = [
|
|
|
68
69
|
r"\bviolate\b.*\bguidelines\b",
|
|
69
70
|
]
|
|
70
71
|
DEBUG_LOG_PATH = "/tmp/eva_query.log"
|
|
72
|
+
DEFAULT_COMMAND_FALLBACK = (
|
|
73
|
+
"printf '[EVA_NOTICE] No model command inferred; collecting baseline evidence.\\n' "
|
|
74
|
+
"&& whoami && id && uname -a"
|
|
75
|
+
)
|
|
71
76
|
|
|
72
77
|
|
|
73
78
|
def _is_followup_analysis_request(user_msg):
|
|
@@ -97,6 +102,22 @@ def _build_no_output_analysis(last_output):
|
|
|
97
102
|
)
|
|
98
103
|
|
|
99
104
|
|
|
105
|
+
def _ensure_commands(commands, analysis="", last_output=""):
|
|
106
|
+
merged = _coerce_commands(commands)
|
|
107
|
+
if not merged:
|
|
108
|
+
merged = extract_commands_anywhere(str(analysis or ""))
|
|
109
|
+
if merged:
|
|
110
|
+
return _dedupe_keep_order(merged)[:3]
|
|
111
|
+
|
|
112
|
+
if "[EVA_NOTICE] Command produced no stdout/stderr" in str(last_output or ""):
|
|
113
|
+
return [
|
|
114
|
+
"printf '[EVA_NOTICE] Previous step had no visible output; collecting observable host evidence.\\n' "
|
|
115
|
+
"&& pwd && ls -la | head -n 40 && id"
|
|
116
|
+
]
|
|
117
|
+
|
|
118
|
+
return [DEFAULT_COMMAND_FALLBACK]
|
|
119
|
+
|
|
120
|
+
|
|
100
121
|
def _dedupe_keep_order(items):
|
|
101
122
|
out = []
|
|
102
123
|
seen = set()
|
|
@@ -324,6 +345,12 @@ def _clean_analysis_text(text):
|
|
|
324
345
|
parts = re.split(marker, cleaned, maxsplit=1)
|
|
325
346
|
cleaned = parts[0]
|
|
326
347
|
|
|
348
|
+
cleaned = re.sub(
|
|
349
|
+
r"(?im)^\s*(?:\[\s*sudo\s*\]\s*password\s+for\s+\S+\s*:|password\s+for\s+\S+\s*:|sudo:\s+a\s+password\s+is\s+required)\s*$",
|
|
350
|
+
"",
|
|
351
|
+
cleaned,
|
|
352
|
+
)
|
|
353
|
+
cleaned = re.sub(r"(?im)^\s*root@[\w.-]+:[^#\n]*#\s*$", "", cleaned)
|
|
327
354
|
cleaned = re.sub(r"```(?:json)?\s*\{.*?\}\s*```", "", cleaned, flags=re.DOTALL | re.IGNORECASE)
|
|
328
355
|
cleaned = re.sub(r"```(?:bash|sh|shell)?\s*.*?```", "", cleaned, flags=re.DOTALL | re.IGNORECASE)
|
|
329
356
|
cleaned = re.sub(r"\n{3,}", "\n\n", cleaned)
|
|
@@ -431,38 +458,154 @@ def _stream_visible_fragment(text, pending, suppress_output):
|
|
|
431
458
|
|
|
432
459
|
def _ollama_chat(messages, on_stream_start=None):
|
|
433
460
|
def _ollama_run_stream_fallback(prompt):
|
|
461
|
+
proc = None
|
|
434
462
|
try:
|
|
435
|
-
proc = subprocess.
|
|
463
|
+
proc = subprocess.Popen(
|
|
436
464
|
["ollama", "run", OLLAMA_MODEL],
|
|
437
|
-
|
|
465
|
+
stdin=subprocess.PIPE,
|
|
466
|
+
stdout=subprocess.PIPE,
|
|
467
|
+
stderr=subprocess.STDOUT,
|
|
438
468
|
text=True,
|
|
439
|
-
|
|
440
|
-
timeout=OLLAMA_RUN_TIMEOUT,
|
|
469
|
+
bufsize=1,
|
|
441
470
|
)
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
471
|
+
raw_parts = []
|
|
472
|
+
pending = ""
|
|
473
|
+
suppress_output = False
|
|
474
|
+
stream_started = False
|
|
475
|
+
style_open = False
|
|
476
|
+
printed = False
|
|
477
|
+
started_at = time.monotonic()
|
|
478
|
+
|
|
479
|
+
if proc.stdin:
|
|
480
|
+
proc.stdin.write(prompt)
|
|
481
|
+
if not prompt.endswith("\n"):
|
|
482
|
+
proc.stdin.write("\n")
|
|
483
|
+
proc.stdin.close()
|
|
484
|
+
|
|
485
|
+
while True:
|
|
486
|
+
if time.monotonic() - started_at > OLLAMA_RUN_TIMEOUT:
|
|
487
|
+
proc.kill()
|
|
488
|
+
break
|
|
489
|
+
chunk = proc.stdout.read(1) if proc.stdout else ""
|
|
490
|
+
if chunk:
|
|
491
|
+
raw_parts.append(chunk)
|
|
492
|
+
if on_stream_start:
|
|
493
|
+
visible, pending, suppress_output = _stream_visible_fragment(
|
|
494
|
+
chunk,
|
|
495
|
+
pending,
|
|
496
|
+
suppress_output,
|
|
497
|
+
)
|
|
498
|
+
if visible:
|
|
499
|
+
if not stream_started:
|
|
500
|
+
on_stream_start()
|
|
501
|
+
stream_started = True
|
|
502
|
+
if not style_open:
|
|
503
|
+
print(Style.BRIGHT + Fore.CYAN, end="", flush=True)
|
|
504
|
+
style_open = True
|
|
505
|
+
print(visible, end="", flush=True)
|
|
506
|
+
printed = True
|
|
507
|
+
continue
|
|
508
|
+
if proc.poll() is not None:
|
|
509
|
+
break
|
|
510
|
+
time.sleep(OLLAMA_STREAM_POLL_DELAY)
|
|
511
|
+
|
|
512
|
+
if on_stream_start and pending and not suppress_output:
|
|
513
|
+
if not stream_started:
|
|
514
|
+
on_stream_start()
|
|
515
|
+
stream_started = True
|
|
516
|
+
if not style_open:
|
|
517
|
+
print(Style.BRIGHT + Fore.CYAN, end="", flush=True)
|
|
518
|
+
style_open = True
|
|
519
|
+
print(pending, end="", flush=True)
|
|
520
|
+
printed = True
|
|
521
|
+
|
|
522
|
+
if style_open:
|
|
523
|
+
print(Style.RESET_ALL, end="", flush=True)
|
|
524
|
+
if printed:
|
|
525
|
+
print()
|
|
526
|
+
|
|
527
|
+
return "".join(raw_parts), printed
|
|
528
|
+
except OSError:
|
|
447
529
|
return "", False
|
|
448
530
|
except KeyboardInterrupt:
|
|
531
|
+
try:
|
|
532
|
+
proc.kill()
|
|
533
|
+
except Exception:
|
|
534
|
+
pass
|
|
449
535
|
raise
|
|
450
|
-
|
|
536
|
+
|
|
537
|
+
payload = {"model": OLLAMA_MODEL, "messages": messages, "stream": True}
|
|
538
|
+
r = None
|
|
451
539
|
try:
|
|
452
540
|
r = requests.post(
|
|
453
541
|
"http://127.0.0.1:11434/api/chat",
|
|
454
542
|
json=payload,
|
|
455
543
|
timeout=(OLLAMA_HTTP_CONNECT_TIMEOUT, OLLAMA_HTTP_READ_TIMEOUT),
|
|
544
|
+
stream=True,
|
|
456
545
|
)
|
|
457
546
|
r.raise_for_status()
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
547
|
+
|
|
548
|
+
raw_parts = []
|
|
549
|
+
pending = ""
|
|
550
|
+
suppress_output = False
|
|
551
|
+
stream_started = False
|
|
552
|
+
style_open = False
|
|
553
|
+
printed = False
|
|
554
|
+
|
|
555
|
+
for line in r.iter_lines(decode_unicode=True):
|
|
556
|
+
if not line:
|
|
557
|
+
continue
|
|
558
|
+
try:
|
|
559
|
+
data = json.loads(line)
|
|
560
|
+
except json.JSONDecodeError:
|
|
561
|
+
continue
|
|
562
|
+
|
|
563
|
+
chunk = data.get("message", {}).get("content", "") or data.get("response", "")
|
|
564
|
+
if not chunk:
|
|
565
|
+
continue
|
|
566
|
+
|
|
567
|
+
raw_parts.append(chunk)
|
|
568
|
+
if on_stream_start:
|
|
569
|
+
visible, pending, suppress_output = _stream_visible_fragment(
|
|
570
|
+
chunk,
|
|
571
|
+
pending,
|
|
572
|
+
suppress_output,
|
|
573
|
+
)
|
|
574
|
+
if visible:
|
|
575
|
+
if not stream_started:
|
|
576
|
+
on_stream_start()
|
|
577
|
+
stream_started = True
|
|
578
|
+
if not style_open:
|
|
579
|
+
print(Style.BRIGHT + Fore.CYAN, end="", flush=True)
|
|
580
|
+
style_open = True
|
|
581
|
+
print(visible, end="", flush=True)
|
|
582
|
+
printed = True
|
|
583
|
+
|
|
584
|
+
if on_stream_start and pending and not suppress_output:
|
|
585
|
+
if not stream_started:
|
|
586
|
+
on_stream_start()
|
|
587
|
+
stream_started = True
|
|
588
|
+
if not style_open:
|
|
589
|
+
print(Style.BRIGHT + Fore.CYAN, end="", flush=True)
|
|
590
|
+
style_open = True
|
|
591
|
+
print(pending, end="", flush=True)
|
|
592
|
+
printed = True
|
|
593
|
+
|
|
594
|
+
if style_open:
|
|
595
|
+
print(Style.RESET_ALL, end="", flush=True)
|
|
596
|
+
if printed:
|
|
597
|
+
print()
|
|
598
|
+
|
|
599
|
+
raw = "".join(raw_parts)
|
|
462
600
|
if raw:
|
|
463
|
-
return raw,
|
|
464
|
-
except
|
|
601
|
+
return raw, printed
|
|
602
|
+
except requests.RequestException:
|
|
465
603
|
pass
|
|
604
|
+
finally:
|
|
605
|
+
try:
|
|
606
|
+
r.close()
|
|
607
|
+
except Exception:
|
|
608
|
+
pass
|
|
466
609
|
|
|
467
610
|
prompt = messages[-1].get("content", "") if messages else ""
|
|
468
611
|
return _ollama_run_stream_fallback(prompt)
|
|
@@ -560,8 +703,10 @@ def _query_g4f(history):
|
|
|
560
703
|
return ""
|
|
561
704
|
|
|
562
705
|
|
|
563
|
-
def _query_custom_api(history):
|
|
706
|
+
def _query_custom_api(history, prompt_text=""):
|
|
564
707
|
endpoint = str(getattr(config_module, "API_ENDPOINT", API_ENDPOINT) or "").strip()
|
|
708
|
+
if endpoint == "NOT_SET":
|
|
709
|
+
endpoint = ""
|
|
565
710
|
handler_path = str(getattr(config_module, "CUSTOM_API_HANDLER", CUSTOM_API_HANDLER) or "").strip()
|
|
566
711
|
|
|
567
712
|
if handler_path and handler_path != "NOT_SET":
|
|
@@ -576,12 +721,18 @@ def _query_custom_api(history):
|
|
|
576
721
|
query_fn = getattr(module, "query_custom_api", None)
|
|
577
722
|
if callable(query_fn):
|
|
578
723
|
try:
|
|
579
|
-
result = query_fn(history=history, endpoint=endpoint)
|
|
724
|
+
result = query_fn(history=history, endpoint=endpoint, prompt=prompt_text)
|
|
580
725
|
except TypeError:
|
|
581
726
|
try:
|
|
582
|
-
result = query_fn(history, endpoint)
|
|
727
|
+
result = query_fn(history=history, endpoint=endpoint)
|
|
583
728
|
except TypeError:
|
|
584
|
-
|
|
729
|
+
try:
|
|
730
|
+
result = query_fn(history, endpoint, prompt_text)
|
|
731
|
+
except TypeError:
|
|
732
|
+
try:
|
|
733
|
+
result = query_fn(history, endpoint)
|
|
734
|
+
except TypeError:
|
|
735
|
+
result = query_fn(history)
|
|
585
736
|
if isinstance(result, (dict, list)):
|
|
586
737
|
return json.dumps(result)
|
|
587
738
|
if result is None:
|
|
@@ -594,7 +745,8 @@ def _query_custom_api(history):
|
|
|
594
745
|
print(Fore.RED + "⚠️ Custom API endpoint is not configured.")
|
|
595
746
|
return ""
|
|
596
747
|
|
|
597
|
-
|
|
748
|
+
payload = {"conversation": history, "prompt": prompt_text}
|
|
749
|
+
r = requests.post(endpoint, json=payload, timeout=None)
|
|
598
750
|
return r.text
|
|
599
751
|
|
|
600
752
|
|
|
@@ -714,9 +866,11 @@ class LLM:
|
|
|
714
866
|
or "[EVA_NOTICE] Command produced no stdout/stderr" in str(last_output or "")
|
|
715
867
|
):
|
|
716
868
|
analysis = _build_no_output_analysis(last_output)
|
|
869
|
+
commands = _ensure_commands([], analysis=analysis, last_output=last_output)
|
|
717
870
|
self.history.append({"role": "user", "content": user_msg})
|
|
718
|
-
|
|
719
|
-
|
|
871
|
+
assistant_memory = analysis + "\n\nCommands:\n" + "\n".join(commands)
|
|
872
|
+
self.history.append({"role": "assistant", "content": assistant_memory})
|
|
873
|
+
return {"analysis": analysis, "commands": commands, "__streamed": False}
|
|
720
874
|
|
|
721
875
|
system_prompt = build_system_prompt(
|
|
722
876
|
_context_for_system_prompt(last_output),
|
|
@@ -763,7 +917,7 @@ class LLM:
|
|
|
763
917
|
elif self.backend == "g4f":
|
|
764
918
|
raw = _query_g4f(request_messages)
|
|
765
919
|
elif self.backend == "api":
|
|
766
|
-
raw = _query_custom_api(request_messages)
|
|
920
|
+
raw = _query_custom_api(request_messages, prompt_text=prompt)
|
|
767
921
|
elif self.backend == "gpt":
|
|
768
922
|
raw = _query_openai(request_messages)
|
|
769
923
|
elif self.backend == "anthropic":
|
|
@@ -813,7 +967,7 @@ class LLM:
|
|
|
813
967
|
if _save_parse_debug_log(self.backend, user_msg, last_output, raw):
|
|
814
968
|
print(Style.BRIGHT + Fore.RED + f"A debug log has been saved in {DEBUG_LOG_PATH}")
|
|
815
969
|
else:
|
|
816
|
-
print(Style.BRIGHT + Fore.
|
|
970
|
+
print(Style.BRIGHT + Fore.YELLOW + "Could not save debug log to /tmp/eva_query.log")
|
|
817
971
|
elif not data.get("commands"):
|
|
818
972
|
data["commands"] = inferred_commands
|
|
819
973
|
|
|
@@ -826,6 +980,11 @@ class LLM:
|
|
|
826
980
|
str(data.get("analysis", "")),
|
|
827
981
|
)
|
|
828
982
|
data["analysis"] = _clean_analysis_text(data.get("analysis", ""))
|
|
983
|
+
data["commands"] = _ensure_commands(
|
|
984
|
+
data.get("commands"),
|
|
985
|
+
analysis=data.get("analysis", ""),
|
|
986
|
+
last_output=last_output,
|
|
987
|
+
)
|
|
829
988
|
data["__streamed"] = streamed
|
|
830
989
|
|
|
831
990
|
assistant_memory = data["analysis"]
|
|
@@ -45,6 +45,29 @@ from utils.system import (
|
|
|
45
45
|
from utils.ui import cyber, menu, raw_input, spinner_start, spinner_stop
|
|
46
46
|
|
|
47
47
|
ANSI_ESCAPE_RE = re.compile(r"\x1B\[[0-?]*[ -/]*[@-~]")
|
|
48
|
+
CONTROL_CHARS_RE = re.compile(r"[\x00-\x08\x0b-\x1f\x7f]")
|
|
49
|
+
SUDO_PASSWORD_PROMPT_RE = re.compile(
|
|
50
|
+
r"^\s*(?:\[\s*sudo\s*\]\s*password\s+for\s+\S+\s*:|password\s+for\s+\S+\s*:|sudo:\s+a\s+password\s+is\s+required)\s*$",
|
|
51
|
+
flags=re.IGNORECASE,
|
|
52
|
+
)
|
|
53
|
+
ROOT_PROMPT_LINE_RE = re.compile(r"^\s*root@[\w.-]+:[^#\n]*#\s*$", flags=re.IGNORECASE)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _sanitize_command_output(text):
|
|
57
|
+
cleaned = ANSI_ESCAPE_RE.sub("", str(text or ""))
|
|
58
|
+
cleaned = cleaned.replace("\r\n", "\n").replace("\r", "\n")
|
|
59
|
+
cleaned = CONTROL_CHARS_RE.sub("", cleaned)
|
|
60
|
+
|
|
61
|
+
out_lines = []
|
|
62
|
+
for line in cleaned.split("\n"):
|
|
63
|
+
stripped = line.strip()
|
|
64
|
+
if stripped and (SUDO_PASSWORD_PROMPT_RE.match(stripped) or ROOT_PROMPT_LINE_RE.match(stripped)):
|
|
65
|
+
continue
|
|
66
|
+
out_lines.append(line)
|
|
67
|
+
|
|
68
|
+
sanitized = "\n".join(out_lines)
|
|
69
|
+
sanitized = re.sub(r"\n{3,}", "\n\n", sanitized)
|
|
70
|
+
return sanitized
|
|
48
71
|
|
|
49
72
|
|
|
50
73
|
# +-------------------------------------------+
|
|
@@ -244,13 +267,21 @@ class Eva:
|
|
|
244
267
|
proc.stdin.flush()
|
|
245
268
|
proc.stdin.close()
|
|
246
269
|
for line in proc.stdout:
|
|
247
|
-
|
|
248
|
-
|
|
270
|
+
safe_line = _sanitize_command_output(line)
|
|
271
|
+
if not safe_line:
|
|
272
|
+
continue
|
|
273
|
+
if not safe_line.endswith("\n"):
|
|
274
|
+
safe_line += "\n"
|
|
275
|
+
print(safe_line, end="")
|
|
276
|
+
out += safe_line
|
|
249
277
|
return_code = proc.wait()
|
|
250
278
|
except KeyboardInterrupt:
|
|
251
279
|
os.killpg(os.getpgid(proc.pid), signal.SIGINT)
|
|
252
280
|
print(Fore.RED + "\n/// 🜂 Command stopped by user.")
|
|
253
281
|
return_code = proc.wait()
|
|
282
|
+
out = _sanitize_command_output(out)
|
|
283
|
+
if out and not out.endswith("\n"):
|
|
284
|
+
out += "\n"
|
|
254
285
|
if not out.strip():
|
|
255
286
|
notice = f"[EVA_NOTICE] Command produced no stdout/stderr. exit_code={return_code if return_code is not None else 'unknown'}"
|
|
256
287
|
out = notice + "\n"
|
|
@@ -368,7 +399,7 @@ class Eva:
|
|
|
368
399
|
|
|
369
400
|
elif item["type"] == "command":
|
|
370
401
|
cyber(f"EXECUTED → {item['cmd']}", color=Fore.CYAN)
|
|
371
|
-
print(item["output"] + "\n")
|
|
402
|
+
print(_sanitize_command_output(item["output"]) + "\n")
|
|
372
403
|
|
|
373
404
|
def chat(self):
|
|
374
405
|
self._render_session_header()
|
|
@@ -452,7 +483,7 @@ class Eva:
|
|
|
452
483
|
if raw_search_output:
|
|
453
484
|
print(raw_search_output, end="" if raw_search_output.endswith("\n") else "\n")
|
|
454
485
|
|
|
455
|
-
clean_output =
|
|
486
|
+
clean_output = _sanitize_command_output(raw_search_output).strip()
|
|
456
487
|
if not clean_output:
|
|
457
488
|
clean_output = f"[EVA_NOTICE] /search returned no output. exit_code={rc}"
|
|
458
489
|
|
|
@@ -70,14 +70,19 @@ def _custom_api_template(endpoint):
|
|
|
70
70
|
"#!/usr/bin/env python3\n"
|
|
71
71
|
"import requests\n\n"
|
|
72
72
|
f"API_ENDPOINT = {json.dumps(target)}\n\n"
|
|
73
|
-
"def query_custom_api(history, endpoint=None):\n"
|
|
73
|
+
"def query_custom_api(history, endpoint=None, prompt=None):\n"
|
|
74
74
|
" target = endpoint or API_ENDPOINT\n"
|
|
75
|
-
"
|
|
76
|
-
"
|
|
77
|
-
"
|
|
78
|
-
"
|
|
79
|
-
"
|
|
80
|
-
"
|
|
75
|
+
" compiled_prompt = str(prompt or \"\").strip()\n"
|
|
76
|
+
" if not compiled_prompt:\n"
|
|
77
|
+
" for item in reversed(history):\n"
|
|
78
|
+
" if item.get(\"role\") == \"user\":\n"
|
|
79
|
+
" compiled_prompt = str(item.get(\"content\", \"\")).strip()\n"
|
|
80
|
+
" break\n"
|
|
81
|
+
" payload = {\n"
|
|
82
|
+
" \"prompt\": compiled_prompt,\n"
|
|
83
|
+
" \"conversation\": history,\n"
|
|
84
|
+
" \"session\": \"eva-session\",\n"
|
|
85
|
+
" }\n"
|
|
81
86
|
" r = requests.post(target, json=payload, timeout=None)\n"
|
|
82
87
|
" try:\n"
|
|
83
88
|
" data = r.json()\n"
|
|
@@ -102,23 +107,16 @@ def _ensure_custom_api_handler_file(path, endpoint):
|
|
|
102
107
|
|
|
103
108
|
def configure_custom_api(open_handler=True):
|
|
104
109
|
endpoint_current = str(getattr(config_module, "API_ENDPOINT", API_ENDPOINT) or "").strip()
|
|
105
|
-
if endpoint_current == "NOT_SET":
|
|
106
|
-
endpoint_current = ""
|
|
107
110
|
handler_current = str(getattr(config_module, "CUSTOM_API_HANDLER", CUSTOM_API_HANDLER) or "").strip()
|
|
108
111
|
if not handler_current or handler_current == "NOT_SET":
|
|
109
112
|
handler_current = str(_default_custom_api_handler_path())
|
|
113
|
+
_persist_key_to_config("CUSTOM_API_HANDLER", handler_current)
|
|
114
|
+
setattr(config_module, "CUSTOM_API_HANDLER", handler_current)
|
|
110
115
|
|
|
111
116
|
clear()
|
|
112
|
-
cyber("CUSTOM API
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
_persist_key_to_config("API_ENDPOINT", endpoint_value)
|
|
116
|
-
setattr(config_module, "API_ENDPOINT", endpoint_value)
|
|
117
|
-
|
|
118
|
-
handler_input = input(f"Custom API handler file [{handler_current}] > ").strip()
|
|
119
|
-
handler_value = handler_input or handler_current
|
|
120
|
-
_persist_key_to_config("CUSTOM_API_HANDLER", handler_value)
|
|
121
|
-
setattr(config_module, "CUSTOM_API_HANDLER", handler_value)
|
|
117
|
+
cyber("CUSTOM API HANDLER", color=Fore.CYAN)
|
|
118
|
+
endpoint_value = endpoint_current or "NOT_SET"
|
|
119
|
+
handler_value = handler_current
|
|
122
120
|
|
|
123
121
|
try:
|
|
124
122
|
handler_path = _ensure_custom_api_handler_file(handler_value, endpoint_value)
|
|
@@ -385,27 +383,14 @@ def run_self_update():
|
|
|
385
383
|
branch = _git_branch()
|
|
386
384
|
pull_result = subprocess.run(
|
|
387
385
|
["git", "pull", "--tags", "origin", branch],
|
|
388
|
-
stdout=devnull,
|
|
389
|
-
stderr=devnull,
|
|
390
386
|
text=True
|
|
391
387
|
)
|
|
392
388
|
updated = pull_result.returncode == 0
|
|
393
389
|
else:
|
|
394
390
|
pip_result = subprocess.run(
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
"pip",
|
|
399
|
-
"install",
|
|
400
|
-
"--upgrade",
|
|
401
|
-
PYPI_PACKAGE,
|
|
402
|
-
"--break-system-packages",
|
|
403
|
-
"-q",
|
|
404
|
-
],
|
|
405
|
-
stdout=devnull,
|
|
406
|
-
stderr=devnull,
|
|
407
|
-
text=True
|
|
408
|
-
)
|
|
391
|
+
[sys.executable, "-m", "pip", "install", "--upgrade", PYPI_PACKAGE,"--break-system-packages"],
|
|
392
|
+
text=True
|
|
393
|
+
)
|
|
409
394
|
updated = pip_result.returncode == 0
|
|
410
395
|
|
|
411
396
|
print(Fore.CYAN + "Almost done . . . . ")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|