lybic-guiagents 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lybic-guiagents might be problematic. Click here for more details.

Files changed (85) hide show
  1. desktop_env/__init__.py +1 -0
  2. desktop_env/actions.py +203 -0
  3. desktop_env/controllers/__init__.py +0 -0
  4. desktop_env/controllers/python.py +471 -0
  5. desktop_env/controllers/setup.py +882 -0
  6. desktop_env/desktop_env.py +509 -0
  7. desktop_env/evaluators/__init__.py +5 -0
  8. desktop_env/evaluators/getters/__init__.py +41 -0
  9. desktop_env/evaluators/getters/calc.py +15 -0
  10. desktop_env/evaluators/getters/chrome.py +1774 -0
  11. desktop_env/evaluators/getters/file.py +154 -0
  12. desktop_env/evaluators/getters/general.py +42 -0
  13. desktop_env/evaluators/getters/gimp.py +38 -0
  14. desktop_env/evaluators/getters/impress.py +126 -0
  15. desktop_env/evaluators/getters/info.py +24 -0
  16. desktop_env/evaluators/getters/misc.py +406 -0
  17. desktop_env/evaluators/getters/replay.py +20 -0
  18. desktop_env/evaluators/getters/vlc.py +86 -0
  19. desktop_env/evaluators/getters/vscode.py +35 -0
  20. desktop_env/evaluators/metrics/__init__.py +160 -0
  21. desktop_env/evaluators/metrics/basic_os.py +68 -0
  22. desktop_env/evaluators/metrics/chrome.py +493 -0
  23. desktop_env/evaluators/metrics/docs.py +1011 -0
  24. desktop_env/evaluators/metrics/general.py +665 -0
  25. desktop_env/evaluators/metrics/gimp.py +637 -0
  26. desktop_env/evaluators/metrics/libreoffice.py +28 -0
  27. desktop_env/evaluators/metrics/others.py +92 -0
  28. desktop_env/evaluators/metrics/pdf.py +31 -0
  29. desktop_env/evaluators/metrics/slides.py +957 -0
  30. desktop_env/evaluators/metrics/table.py +585 -0
  31. desktop_env/evaluators/metrics/thunderbird.py +176 -0
  32. desktop_env/evaluators/metrics/utils.py +719 -0
  33. desktop_env/evaluators/metrics/vlc.py +524 -0
  34. desktop_env/evaluators/metrics/vscode.py +283 -0
  35. desktop_env/providers/__init__.py +35 -0
  36. desktop_env/providers/aws/__init__.py +0 -0
  37. desktop_env/providers/aws/manager.py +278 -0
  38. desktop_env/providers/aws/provider.py +186 -0
  39. desktop_env/providers/aws/provider_with_proxy.py +315 -0
  40. desktop_env/providers/aws/proxy_pool.py +193 -0
  41. desktop_env/providers/azure/__init__.py +0 -0
  42. desktop_env/providers/azure/manager.py +87 -0
  43. desktop_env/providers/azure/provider.py +207 -0
  44. desktop_env/providers/base.py +97 -0
  45. desktop_env/providers/gcp/__init__.py +0 -0
  46. desktop_env/providers/gcp/manager.py +0 -0
  47. desktop_env/providers/gcp/provider.py +0 -0
  48. desktop_env/providers/virtualbox/__init__.py +0 -0
  49. desktop_env/providers/virtualbox/manager.py +463 -0
  50. desktop_env/providers/virtualbox/provider.py +124 -0
  51. desktop_env/providers/vmware/__init__.py +0 -0
  52. desktop_env/providers/vmware/manager.py +455 -0
  53. desktop_env/providers/vmware/provider.py +105 -0
  54. gui_agents/__init__.py +0 -0
  55. gui_agents/agents/Action.py +209 -0
  56. gui_agents/agents/__init__.py +0 -0
  57. gui_agents/agents/agent_s.py +832 -0
  58. gui_agents/agents/global_state.py +610 -0
  59. gui_agents/agents/grounding.py +651 -0
  60. gui_agents/agents/hardware_interface.py +129 -0
  61. gui_agents/agents/manager.py +568 -0
  62. gui_agents/agents/translator.py +132 -0
  63. gui_agents/agents/worker.py +355 -0
  64. gui_agents/cli_app.py +560 -0
  65. gui_agents/core/__init__.py +0 -0
  66. gui_agents/core/engine.py +1496 -0
  67. gui_agents/core/knowledge.py +449 -0
  68. gui_agents/core/mllm.py +555 -0
  69. gui_agents/tools/__init__.py +0 -0
  70. gui_agents/tools/tools.py +727 -0
  71. gui_agents/unit_test/__init__.py +0 -0
  72. gui_agents/unit_test/run_tests.py +65 -0
  73. gui_agents/unit_test/test_manager.py +330 -0
  74. gui_agents/unit_test/test_worker.py +269 -0
  75. gui_agents/utils/__init__.py +0 -0
  76. gui_agents/utils/analyze_display.py +301 -0
  77. gui_agents/utils/common_utils.py +263 -0
  78. gui_agents/utils/display_viewer.py +281 -0
  79. gui_agents/utils/embedding_manager.py +53 -0
  80. gui_agents/utils/image_axis_utils.py +27 -0
  81. lybic_guiagents-0.1.0.dist-info/METADATA +416 -0
  82. lybic_guiagents-0.1.0.dist-info/RECORD +85 -0
  83. lybic_guiagents-0.1.0.dist-info/WHEEL +5 -0
  84. lybic_guiagents-0.1.0.dist-info/licenses/LICENSE +201 -0
  85. lybic_guiagents-0.1.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,301 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Display.json analyzer - Extract and analyze execution statistics from display.json files
5
+ """
6
+
7
+ import json
8
+ import os
9
+ import glob
10
+ import re
11
+ from typing import Dict, List, Tuple
12
+
13
+
14
+ def extract_cost_value(cost_str: str) -> tuple:
15
+ """
16
+ Extract numeric value and currency symbol from cost string (e.g., "0.000343¥" -> (0.000343, "¥"))
17
+
18
+ Args:
19
+ cost_str: Cost string with currency symbol
20
+
21
+ Returns:
22
+ Tuple of (float value, currency symbol)
23
+ """
24
+ # Extract numeric value and currency symbol
25
+ match = re.search(r'([\d.]+)([¥$€£¥]*)', cost_str)
26
+ if match:
27
+ value = float(match.group(1))
28
+ currency = match.group(2) if match.group(2) else "¥" # Default to ¥ if no symbol found
29
+ return value, currency
30
+ return 0.0, "¥"
31
+
32
+
33
+ def convert_currency_to_yuan(value: float, currency: str) -> float:
34
+ """
35
+ Convert different currencies to yuan (¥) for consistent cost calculation
36
+
37
+ Args:
38
+ value: Cost value
39
+ currency: Currency symbol
40
+
41
+ Returns:
42
+ Value converted to yuan
43
+ """
44
+ # Simple conversion rates (you might want to use real-time rates in production)
45
+ conversion_rates = {
46
+ "¥": 1.0,
47
+ "¥": 1.0,
48
+ "$": 7.2, # USD to CNY (approximate)
49
+ "€": 7.8, # EUR to CNY (approximate)
50
+ "£": 9.1, # GBP to CNY (approximate)
51
+ }
52
+
53
+ rate = conversion_rates.get(currency, 1.0)
54
+ return value * rate
55
+
56
+
57
+ def analyze_display_json(file_path: str) -> Dict:
58
+ """
59
+ Analyze a single display.json file and extract statistics
60
+
61
+ Args:
62
+ file_path: Path to the display.json file
63
+
64
+ Returns:
65
+ Dictionary containing analysis results
66
+ """
67
+ try:
68
+ with open(file_path, 'r', encoding='utf-8') as f:
69
+ data = json.load(f)
70
+ except Exception as e:
71
+ print(f"Error reading {file_path}: {e}")
72
+ return {}
73
+
74
+ # Initialize counters
75
+ fast_action_count = 0
76
+ total_duration = 0
77
+ total_input_tokens = 0
78
+ total_output_tokens = 0
79
+ total_tokens = 0
80
+ total_cost = 0.0
81
+ currency_symbol = "¥" # Default currency symbol
82
+
83
+ # Check if this is a fast mode or normal mode display.json
84
+ is_fast_mode = False
85
+ if 'operations' in data and 'agent' in data['operations']:
86
+ for operation in data['operations']['agent']:
87
+ if operation.get('operation') == 'fast_action_execution':
88
+ is_fast_mode = True
89
+ break
90
+
91
+ if is_fast_mode:
92
+ # Fast mode analysis - similar to original logic
93
+ if 'operations' in data and 'agent' in data['operations']:
94
+ for operation in data['operations']['agent']:
95
+ if operation.get('operation') == 'fast_action_execution':
96
+ fast_action_count += 1
97
+
98
+ # Extract tokens
99
+ tokens = operation.get('tokens', [0, 0, 0])
100
+ if len(tokens) >= 3:
101
+ total_input_tokens += tokens[0]
102
+ total_output_tokens += tokens[1]
103
+ total_tokens += tokens[2]
104
+
105
+ # Extract cost
106
+ cost_str = operation.get('cost', '0¥')
107
+ cost_value, currency = extract_cost_value(cost_str)
108
+ # Convert to yuan for consistent calculation
109
+ cost_in_yuan = convert_currency_to_yuan(cost_value, currency)
110
+ total_cost += cost_in_yuan
111
+ currency_symbol = "¥" # Always use ¥ for consistency
112
+
113
+ # Extract total execution time for fast mode
114
+ if 'operations' in data and 'other' in data['operations']:
115
+ for operation in data['operations']['other']:
116
+ if operation.get('operation') == 'total_execution_time_fast':
117
+ total_duration = int(operation.get('duration', 0))
118
+ break
119
+ else:
120
+ # Normal mode analysis - analyze specific operations
121
+ if 'operations' in data:
122
+ # Define the operations to count for tokens and cost
123
+ token_cost_operations = {
124
+ 'formulate_query', 'retrieve_narrative_experience', 'retrieve_knowledge',
125
+ 'knowledge_fusion', 'subtask_planner', 'generated_dag', 'reflection',
126
+ 'episode_summarization', 'action_plan', 'grounding_model_response'
127
+ }
128
+
129
+ # Count hardware operations as steps
130
+ if 'hardware' in data['operations']:
131
+ fast_action_count = len(data['operations']['hardware'])
132
+
133
+ # Extract tokens and cost from specific operations across all modules
134
+ for module_name, module_operations in data['operations'].items():
135
+ if isinstance(module_operations, list):
136
+ for operation in module_operations:
137
+ operation_type = operation.get('operation', '')
138
+
139
+ # Only count tokens and cost for specified operations
140
+ if operation_type in token_cost_operations:
141
+ # Extract tokens if available
142
+ tokens = operation.get('tokens', [0, 0, 0])
143
+ if isinstance(tokens, list) and len(tokens) >= 3:
144
+ total_input_tokens += tokens[0]
145
+ total_output_tokens += tokens[1]
146
+ total_tokens += tokens[2]
147
+
148
+ # Extract cost if available
149
+ cost_str = operation.get('cost', '0¥')
150
+ cost_value, currency = extract_cost_value(cost_str)
151
+ # Convert to yuan for consistent calculation
152
+ cost_in_yuan = convert_currency_to_yuan(cost_value, currency)
153
+ total_cost += cost_in_yuan
154
+ # Always use ¥ for consistency
155
+ currency_symbol = "¥"
156
+
157
+ # Extract total execution time for normal mode
158
+ if 'other' in data['operations']:
159
+ for operation in data['operations']['other']:
160
+ if operation.get('operation') == 'total_execution_time':
161
+ total_duration = int(operation.get('duration', 0))
162
+ break
163
+
164
+ return {
165
+ 'fast_action_count': fast_action_count,
166
+ 'total_duration': total_duration,
167
+ 'total_input_tokens': total_input_tokens,
168
+ 'total_output_tokens': total_output_tokens,
169
+ 'total_tokens': total_tokens,
170
+ 'total_cost': total_cost,
171
+ 'currency_symbol': currency_symbol
172
+ }
173
+
174
+
175
+ def analyze_folder(folder_path: str) -> List[Dict]:
176
+ """
177
+ Analyze all display.json files in a folder
178
+
179
+ Args:
180
+ folder_path: Path to the folder containing display.json files
181
+
182
+ Returns:
183
+ List of analysis results for each file
184
+ """
185
+ results = []
186
+
187
+ # Find all display.json files recursively
188
+ pattern = os.path.join(folder_path, "**", "display.json")
189
+ display_files = glob.glob(pattern, recursive=True)
190
+
191
+ if not display_files:
192
+ print(f"No display.json files found in {folder_path}")
193
+ return results
194
+
195
+ print(f"Found {len(display_files)} display.json files")
196
+
197
+ for file_path in display_files:
198
+ print(f"Analyzing: {file_path}")
199
+ result = analyze_display_json(file_path)
200
+ if result:
201
+ result['file_path'] = file_path
202
+ results.append(result)
203
+
204
+ return results
205
+
206
+
207
+ def aggregate_results(results: List[Dict]) -> Dict:
208
+ """
209
+ Aggregate results from multiple files
210
+
211
+ Args:
212
+ results: List of analysis results
213
+
214
+ Returns:
215
+ Aggregated statistics
216
+ """
217
+ if not results:
218
+ return {}
219
+
220
+ total_fast_actions = sum(r['fast_action_count'] for r in results)
221
+ total_duration = max(r['total_duration'] for r in results) if results else 0
222
+ total_input_tokens = sum(r['total_input_tokens'] for r in results)
223
+ total_output_tokens = sum(r['total_output_tokens'] for r in results)
224
+ total_tokens = sum(r['total_tokens'] for r in results)
225
+ total_cost = sum(r['total_cost'] for r in results)
226
+
227
+ # Use the currency symbol from the first result, or default to ¥
228
+ currency_symbol = results[0].get('currency_symbol', '¥') if results else '¥'
229
+
230
+ return {
231
+ 'total_fast_actions': total_fast_actions,
232
+ 'total_duration': total_duration,
233
+ 'total_input_tokens': total_input_tokens,
234
+ 'total_output_tokens': total_output_tokens,
235
+ 'total_tokens': total_tokens,
236
+ 'total_cost': total_cost,
237
+ 'currency_symbol': currency_symbol
238
+ }
239
+
240
+
241
+ def format_output_line(stats: Dict) -> str:
242
+ """
243
+ Format statistics into a single output line
244
+
245
+ Args:
246
+ stats: Statistics dictionary
247
+
248
+ Returns:
249
+ Formatted output line
250
+ """
251
+ if not stats:
252
+ return "No data available"
253
+
254
+ # Format: steps, duration (seconds), tokens, cost
255
+ steps = stats.get('fast_action_count', 0)
256
+ duration = stats.get('total_duration', 0)
257
+ tokens = (stats.get('total_input_tokens', 0),stats.get('total_output_tokens', 0),stats.get('total_tokens', 0))
258
+ cost = stats.get('total_cost', 0.0)
259
+
260
+ return f"{steps}, {duration}, {tokens}, {cost:.4f}{stats.get('currency_symbol', '¥')}"
261
+
262
+
263
+ def main():
264
+ """
265
+ Main function to analyze display.json files
266
+ """
267
+ import sys
268
+
269
+ if len(sys.argv) < 2:
270
+ print("Usage: python analyze_display.py <folder_path>")
271
+ print("Example: python analyze_display.py lybicguiagents/runtime")
272
+ return
273
+
274
+ folder_path = sys.argv[1]
275
+
276
+ if not os.path.exists(folder_path):
277
+ print(f"Folder not found: {folder_path}")
278
+ return
279
+
280
+ # Analyze all display.json files in the folder
281
+ results = analyze_folder(folder_path)
282
+
283
+ if not results:
284
+ print("No valid display.json files found")
285
+ return
286
+
287
+ # Aggregate results
288
+ aggregated_stats = aggregate_results(results)
289
+
290
+ # Print the required single line output
291
+ print("\nStatistics:")
292
+ print("-" * 80)
293
+ print("Steps, Duration (seconds), (Input Tokens, Output Tokens, Total Tokens), Cost")
294
+ print("-" * 80)
295
+ output_line = format_output_line(aggregated_stats)
296
+ print(output_line)
297
+ print("-" * 80)
298
+
299
+
300
+ if __name__ == "__main__":
301
+ main()
@@ -0,0 +1,263 @@
1
+ import json
2
+ import re
3
+ from typing import List
4
+ import time
5
+ import tiktoken
6
+ import numpy as np
7
+
8
+ from typing import Tuple, List, Union, Dict
9
+
10
+ from pydantic import BaseModel, ValidationError
11
+
12
+ import pickle
13
+
14
+
15
+ class Node(BaseModel):
16
+ name: str
17
+ info: str
18
+
19
+
20
+ class Dag(BaseModel):
21
+ nodes: List[Node]
22
+ edges: List[List[Node]]
23
+
24
+
25
+ NUM_IMAGE_TOKEN = 1105 # Value set of screen of size 1920x1080 for openai vision
26
+
27
+ def calculate_tokens(messages, num_image_token=NUM_IMAGE_TOKEN) -> Tuple[int, int]:
28
+
29
+ num_input_images = 0
30
+ output_message = messages[-1]
31
+
32
+ input_message = messages[:-1]
33
+
34
+ input_string = """"""
35
+ for message in input_message:
36
+ input_string += message["content"][0]["text"] + "\n"
37
+ if len(message["content"]) > 1:
38
+ num_input_images += 1
39
+
40
+ input_text_tokens = get_input_token_length(input_string)
41
+
42
+ input_image_tokens = num_image_token * num_input_images
43
+
44
+ output_tokens = get_input_token_length(output_message["content"][0]["text"])
45
+
46
+ return (input_text_tokens + input_image_tokens), output_tokens
47
+
48
+ def parse_dag(text):
49
+ """
50
+ Try extracting JSON from <json>…</json> tags first;
51
+ if not found, try ```json … ``` Markdown fences.
52
+ If both fail, try to parse the entire text as JSON.
53
+ """
54
+ import logging
55
+ logger = logging.getLogger("desktopenv.agent")
56
+
57
+ def _extract(pattern):
58
+ m = re.search(pattern, text, re.DOTALL)
59
+ return m.group(1).strip() if m else None
60
+
61
+ # 1) look for <json>…</json>
62
+ json_str = _extract(r"<json>(.*?)</json>")
63
+ # 2) fallback to ```json … ```
64
+ if json_str is None:
65
+ json_str = _extract(r"```json\s*(.*?)\s*```")
66
+ if json_str is None:
67
+ # 3) try other possible code block formats
68
+ json_str = _extract(r"```\s*(.*?)\s*```")
69
+
70
+ # 4) if still not found, try to parse the entire text
71
+ if json_str is None:
72
+ logger.warning("JSON markers not found, attempting to parse entire text")
73
+ json_str = text.strip()
74
+
75
+ # Log the extracted JSON string
76
+ logger.debug(f"Extracted JSON string: {json_str[:100]}...")
77
+
78
+ try:
79
+ # Try to parse as JSON directly
80
+ payload = json.loads(json_str)
81
+ except json.JSONDecodeError as e:
82
+ logger.error(f"JSON parsing error: {e}")
83
+
84
+ # Try to fix common JSON format issues
85
+ try:
86
+ # Replace single quotes with double quotes
87
+ fixed_json = json_str.replace("'", "\"")
88
+ payload = json.loads(fixed_json)
89
+ logger.info("Successfully fixed JSON by replacing single quotes with double quotes")
90
+ except json.JSONDecodeError:
91
+ # Try to find and extract possible JSON objects
92
+ try:
93
+ # Look for content between { and }
94
+ match = re.search(r"\{(.*)\}", json_str, re.DOTALL)
95
+ if match:
96
+ fixed_json = "{" + match.group(1) + "}"
97
+ payload = json.loads(fixed_json)
98
+ logger.info("Successfully fixed JSON by extracting JSON object")
99
+ else:
100
+ logger.error("Unable to fix JSON format")
101
+ return None
102
+ except Exception:
103
+ logger.error("All JSON fixing attempts failed")
104
+ return None
105
+
106
+ # Check if payload contains dag key
107
+ if "dag" not in payload:
108
+ logger.warning("'dag' key not found in JSON, attempting to use entire JSON object")
109
+ # If no dag key, try to use the entire payload
110
+ try:
111
+ # Check if payload directly conforms to Dag structure
112
+ if "nodes" in payload and "edges" in payload:
113
+ return Dag(**payload)
114
+ else:
115
+ # Iterate through top-level keys to find possible dag structure
116
+ for key, value in payload.items():
117
+ if isinstance(value, dict) and "nodes" in value and "edges" in value:
118
+ logger.info(f"Found DAG structure in key '{key}'")
119
+ return Dag(**value)
120
+
121
+ logger.error("Could not find valid DAG structure in JSON")
122
+ return None
123
+ except ValidationError as e:
124
+ logger.error(f"Data structure validation error: {e}")
125
+ return None
126
+
127
+ # Normal case, use value of dag key
128
+ try:
129
+ return Dag(**payload["dag"])
130
+ except ValidationError as e:
131
+ logger.error(f"DAG data structure validation error: {e}")
132
+ return None
133
+ except Exception as e:
134
+ logger.error(f"Unknown error parsing DAG: {e}")
135
+ return None
136
+
137
+
138
+ def parse_single_code_from_string(input_string):
139
+ input_string = input_string.strip()
140
+ if input_string.strip() in ["WAIT", "DONE", "FAIL"]:
141
+ return input_string.strip()
142
+
143
+ pattern = r"```(?:\w+\s+)?(.*?)```"
144
+ matches = re.findall(pattern, input_string, re.DOTALL)
145
+ codes = []
146
+ for match in matches:
147
+ match = match.strip()
148
+ commands = ["WAIT", "DONE", "FAIL"]
149
+ if match in commands:
150
+ codes.append(match.strip())
151
+ elif match.split("\n")[-1] in commands:
152
+ if len(match.split("\n")) > 1:
153
+ codes.append("\n".join(match.split("\n")[:-1]))
154
+ codes.append(match.split("\n")[-1])
155
+ else:
156
+ codes.append(match)
157
+ if len(codes) > 0:
158
+ return codes[0]
159
+ # The pattern matches function calls with balanced parentheses and quotes
160
+ code_match = re.search(r"(\w+\.\w+\((?:[^()]*|\([^()]*\))*\))", input_string)
161
+ if code_match:
162
+ return code_match.group(1)
163
+ lines = [line.strip() for line in input_string.splitlines() if line.strip()]
164
+ if lines:
165
+ return lines[0]
166
+ return "fail"
167
+
168
+
169
+ def get_input_token_length(input_string):
170
+ enc = tiktoken.encoding_for_model("gpt-4")
171
+ tokens = enc.encode(input_string)
172
+ return len(tokens)
173
+
174
+
175
+ def sanitize_code(code):
176
+ # This pattern captures the outermost double-quoted text
177
+ if "\n" in code:
178
+ pattern = r'(".*?")'
179
+ # Find all matches in the text
180
+ matches = re.findall(pattern, code, flags=re.DOTALL)
181
+ if matches:
182
+ # Replace the first occurrence only
183
+ first_match = matches[0]
184
+ code = code.replace(first_match, f'"""{first_match[1:-1]}"""', 1)
185
+ return code
186
+
187
+
188
+ def extract_first_agent_function(code_string):
189
+ # Regular expression pattern to match 'agent' functions with any arguments, including nested parentheses
190
+ pattern = r'agent\.[a-zA-Z_]+\((?:[^()\'"]|\'[^\']*\'|"[^"]*")*\)'
191
+
192
+ # Find all matches in the string
193
+ matches = re.findall(pattern, code_string)
194
+
195
+ # Return the first match if found, otherwise return None
196
+ return matches[0] if matches else None
197
+
198
+
199
+ def load_knowledge_base(kb_path: str) -> Dict:
200
+ try:
201
+ with open(kb_path, "r") as f:
202
+ return json.load(f)
203
+ except Exception as e:
204
+ print(f"Error loading knowledge base: {e}")
205
+ return {}
206
+
207
+
208
+ def clean_empty_embeddings(embeddings: Dict) -> Dict:
209
+ to_delete = []
210
+ for k, v in embeddings.items():
211
+ arr = np.array(v)
212
+ if arr.size == 0 or arr.shape == () or (
213
+ isinstance(v, list) and v and isinstance(v[0], str) and v[0].startswith('Error:')
214
+ ) or (isinstance(v, str) and v.startswith('Error:')):
215
+ to_delete.append(k)
216
+ for k in to_delete:
217
+ del embeddings[k]
218
+ return embeddings
219
+
220
+
221
+ def load_embeddings(embeddings_path: str) -> Dict:
222
+ try:
223
+ with open(embeddings_path, "rb") as f:
224
+ embeddings = pickle.load(f)
225
+ embeddings = clean_empty_embeddings(embeddings)
226
+ return embeddings
227
+ except Exception as e:
228
+ # print(f"Error loading embeddings: {e}")
229
+ print(f"Empty embeddings file: {embeddings_path}")
230
+ return {}
231
+
232
+
233
+ def save_embeddings(embeddings_path: str, embeddings: Dict):
234
+ try:
235
+ import os
236
+ os.makedirs(os.path.dirname(embeddings_path), exist_ok=True)
237
+ with open(embeddings_path, "wb") as f:
238
+ pickle.dump(embeddings, f)
239
+ except Exception as e:
240
+ print(f"Error saving embeddings: {e}")
241
+
242
+
243
+ def agent_log_to_string(agent_log: List[Dict]) -> str:
244
+ """
245
+ Converts a list of agent log entries into a single string for LLM consumption.
246
+
247
+ Args:
248
+ agent_log: A list of dictionaries, where each dictionary is an agent log entry.
249
+
250
+ Returns:
251
+ A formatted string representing the agent log.
252
+ """
253
+ if not agent_log:
254
+ return "No agent log entries yet."
255
+
256
+ log_strings = ["[AGENT LOG]"]
257
+ for entry in agent_log:
258
+ entry_id = entry.get("id", "N/A")
259
+ entry_type = entry.get("type", "N/A").capitalize()
260
+ content = entry.get("content", "")
261
+ log_strings.append(f"[Entry {entry_id} - {entry_type}] {content}")
262
+
263
+ return "\n".join(log_strings)