agent-spy-monitor 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_spy/__init__.py +6 -0
- agent_spy/cost_calculator.py +51 -0
- agent_spy/logger.py +16 -0
- agent_spy/monitor.py +111 -0
- agent_spy/old_monitor.py +91 -0
- agent_spy/utils.py +10 -0
- agent_spy/visualizer.py +49 -0
- agent_spy_monitor-0.1.0.dist-info/METADATA +70 -0
- agent_spy_monitor-0.1.0.dist-info/RECORD +12 -0
- agent_spy_monitor-0.1.0.dist-info/WHEEL +5 -0
- agent_spy_monitor-0.1.0.dist-info/licenses/LICENSE +10 -0
- agent_spy_monitor-0.1.0.dist-info/top_level.txt +1 -0
agent_spy/__init__.py
ADDED
@@ -0,0 +1,51 @@
|
|
1
|
+
# agent_spy/cost_calculator.py
|
2
|
+
|
3
|
+
class CostCalculator:
|
4
|
+
# Define the cost per token in a constant
|
5
|
+
COST_PER_TOKEN = 1_000_000
|
6
|
+
|
7
|
+
# Base pricing template to avoid repetition
|
8
|
+
BASE_PRICING = {
|
9
|
+
"gpt-4o": (2.50, 10.00),
|
10
|
+
"gpt-4o-mini": (0.150, 0.600),
|
11
|
+
"gpt-4": (3.00, 6.00),
|
12
|
+
"gpt-4o-audio-preview": (2.50, 10.00),
|
13
|
+
"o1-mini": (3.00, 12.00),
|
14
|
+
"o1-preview": (15.00, 60.00),
|
15
|
+
"chatgpt-4o-latest": (5.00, 15.00),
|
16
|
+
"gpt-4-turbo-preview": (10.00, 30.00),
|
17
|
+
"gpt-4-32k": (60.00, 120.00),
|
18
|
+
"gpt-4-turbo": (10.00, 30.00),
|
19
|
+
"gpt-4-vision-preview": (10.00, 30.00),
|
20
|
+
"gpt-3.5-turbo": (1.50, 2.00),
|
21
|
+
"gpt-3.5-turbo-16k": (3.00, 4.00),
|
22
|
+
"claude-instant-1.2": (0.163, 0.551),
|
23
|
+
"claude-2": (8.00, 24.00),
|
24
|
+
"claude-2.1": (8.00, 24.00),
|
25
|
+
"claude-3-haiku-20240307": (0.25, 1.25),
|
26
|
+
"claude-3-5-haiku-20241022": (1.00, 5.00),
|
27
|
+
"claude-3-opus-20240229": (15.00, 75.00),
|
28
|
+
"claude-3-sonnet-20240229": (3.00, 15.00),
|
29
|
+
"claude-3-5-sonnet-20240620": (3.00, 15.00),
|
30
|
+
"claude-3-5-sonnet-20241022": (3.00, 15.00),
|
31
|
+
"gemini-pro": (0.5, 0.5),
|
32
|
+
"gemini-1.0-pro": (0.5, 0.5),
|
33
|
+
"gemini-1.5-pro": (1.25, 5.00),
|
34
|
+
"gemini-1.5-flash": (0.075, 0.30),
|
35
|
+
}
|
36
|
+
|
37
|
+
def __init__(self, model: str):
|
38
|
+
if model not in self.BASE_PRICING:
|
39
|
+
raise ValueError(f"Model {model} not supported.")
|
40
|
+
self.model = model
|
41
|
+
|
42
|
+
def calculate_cost(self, input_tokens: int, output_tokens: int) -> float:
|
43
|
+
# Retrieve the base pricing for the selected model
|
44
|
+
input_rate, output_rate = self.BASE_PRICING[self.model]
|
45
|
+
|
46
|
+
# Calculate the costs using the cost per token
|
47
|
+
input_cost = input_tokens * (input_rate / self.COST_PER_TOKEN)
|
48
|
+
output_cost = output_tokens * (output_rate / self.COST_PER_TOKEN)
|
49
|
+
|
50
|
+
# Return the total cost
|
51
|
+
return input_cost + output_cost
|
agent_spy/logger.py
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
# agent_spy/logger.py
|
2
|
+
|
3
|
+
import logging
|
4
|
+
|
5
|
+
class Logger:
|
6
|
+
def __init__(self):
|
7
|
+
logging.basicConfig(
|
8
|
+
filename='agent_spy.log',
|
9
|
+
level=logging.INFO,
|
10
|
+
format='%(asctime)s - %(levelname)s - %(message)s'
|
11
|
+
)
|
12
|
+
self.logger = logging.getLogger()
|
13
|
+
|
14
|
+
def log(self, message: str):
|
15
|
+
print(message)
|
16
|
+
self.logger.info(message)
|
agent_spy/monitor.py
ADDED
@@ -0,0 +1,111 @@
|
|
1
|
+
# agent_spy/monitor.py
|
2
|
+
|
3
|
+
import time
|
4
|
+
import os
|
5
|
+
import psutil
|
6
|
+
import threading
|
7
|
+
from .cost_calculator import CostCalculator
|
8
|
+
from .logger import Logger
|
9
|
+
from .utils import calculate_carbon_emissions
|
10
|
+
import tiktoken
|
11
|
+
from .visualizer import Visualizer
|
12
|
+
|
13
|
+
class AgentSpy:
|
14
|
+
def __init__(self, model: str, enable_monitoring: bool = True, encoding_name: str = "cl100k_base"):
|
15
|
+
self.model = model
|
16
|
+
self.start_time = None
|
17
|
+
self.end_time = None
|
18
|
+
self.input_tokens = 0
|
19
|
+
self.output_tokens = 0
|
20
|
+
self.total_tokens = 0
|
21
|
+
self.cost = 0.0
|
22
|
+
self.cpu_usage = []
|
23
|
+
self.memory_usage = []
|
24
|
+
self.logger = Logger()
|
25
|
+
self.cost_calculator = CostCalculator(model)
|
26
|
+
self.monitoring = False
|
27
|
+
self.monitor_thread = None
|
28
|
+
self.carbon_emissions = 0.0
|
29
|
+
try:
|
30
|
+
self.tokenizer = tiktoken.encoding_for_model(model)
|
31
|
+
except KeyError:
|
32
|
+
self.tokenizer = tiktoken.get_encoding(encoding_name) # Default to "cl100k_base" if model is not recognized
|
33
|
+
self.enable_monitoring = enable_monitoring
|
34
|
+
|
35
|
+
def start(self):
|
36
|
+
self.logger.log("Monitoring started.")
|
37
|
+
self.start_time = time.time()
|
38
|
+
if self.enable_monitoring:
|
39
|
+
self.monitoring = True
|
40
|
+
self.monitor_thread = threading.Thread(target=self._monitor_resources)
|
41
|
+
self.monitor_thread.start()
|
42
|
+
|
43
|
+
def end(self):
|
44
|
+
self.end_time = time.time()
|
45
|
+
if self.enable_monitoring:
|
46
|
+
self.monitoring = False
|
47
|
+
if self.monitor_thread:
|
48
|
+
self.monitor_thread.join()
|
49
|
+
self.total_time = self.end_time - self.start_time
|
50
|
+
self.cost = self.cost_calculator.calculate_cost(self.input_tokens, self.output_tokens)
|
51
|
+
self.carbon_emissions = calculate_carbon_emissions(self.total_tokens, self.model)
|
52
|
+
self.logger.log("Monitoring ended.")
|
53
|
+
self.logger.log(f"Total Time: {self.total_time:.2f} seconds")
|
54
|
+
self.logger.log(f"Input Tokens: {self.input_tokens}")
|
55
|
+
self.logger.log(f"Output Tokens: {self.output_tokens}")
|
56
|
+
self.logger.log(f"Total Tokens: {self.total_tokens}")
|
57
|
+
self.logger.log(f"Cost: ${self.cost:.6f}")
|
58
|
+
self.logger.log(f"CPU Usage: {self.cpu_usage}")
|
59
|
+
self.logger.log(f"Memory Usage: {self.memory_usage} MB")
|
60
|
+
self.logger.log(f"Carbon Emissions: {self.carbon_emissions:.6f} kg CO2")
|
61
|
+
|
62
|
+
def _monitor_resources(self):
|
63
|
+
process = psutil.Process(os.getpid())
|
64
|
+
while self.monitoring:
|
65
|
+
cpu = process.cpu_percent(interval=1)
|
66
|
+
mem = process.memory_info().rss / (1024 * 1024) # in MB
|
67
|
+
self.cpu_usage.append(cpu)
|
68
|
+
self.memory_usage.append(mem)
|
69
|
+
time.sleep(1)
|
70
|
+
|
71
|
+
def set_token_counts(self, input_text: str, output_text: str):
|
72
|
+
self.input_tokens = self.count_tokens(input_text)
|
73
|
+
self.output_tokens = self.count_tokens(output_text)
|
74
|
+
self.total_tokens = self.input_tokens + self.output_tokens
|
75
|
+
self.logger.log(f"Input Tokens: {self.input_tokens}")
|
76
|
+
self.logger.log(f"Output Tokens: {self.output_tokens}")
|
77
|
+
self.logger.log(f"Total Tokens: {self.total_tokens}")
|
78
|
+
|
79
|
+
def set_token_usage_from_crew_output(self, crew_output):
|
80
|
+
"""
|
81
|
+
Extracts token usage metrics from a CrewOutput object and sets the token counts accordingly.
|
82
|
+
"""
|
83
|
+
if hasattr(crew_output, 'token_usage') and crew_output.token_usage:
|
84
|
+
self.input_tokens = crew_output.token_usage.prompt_tokens
|
85
|
+
self.output_tokens = crew_output.token_usage.completion_tokens
|
86
|
+
self.total_tokens = crew_output.token_usage.total_tokens
|
87
|
+
self.cost = self.cost_calculator.calculate_cost(self.input_tokens, self.output_tokens)
|
88
|
+
self.carbon_emissions = calculate_carbon_emissions(self.total_tokens, self.model)
|
89
|
+
self.logger.log(f"Input Tokens: {self.input_tokens}")
|
90
|
+
self.logger.log(f"Output Tokens: {self.output_tokens}")
|
91
|
+
self.logger.log(f"Total Tokens: {self.total_tokens}")
|
92
|
+
self.logger.log(f"Cost: ${self.cost:.6f}")
|
93
|
+
else:
|
94
|
+
self.logger.log("No token usage information available in CrewOutput.")
|
95
|
+
|
96
|
+
def count_tokens(self, text: str) -> int:
|
97
|
+
return len(self.tokenizer.encode(text))
|
98
|
+
|
99
|
+
|
100
|
+
class AgentSpyExtended(AgentSpy):
|
101
|
+
def __init__(self, model: str, enable_monitoring: bool = True, encoding_name: str = "cl100k_base"):
|
102
|
+
super().__init__(model, enable_monitoring, encoding_name)
|
103
|
+
self.visualizer = Visualizer(self)
|
104
|
+
|
105
|
+
def visualize(self, method='cli'):
|
106
|
+
if method == 'cli':
|
107
|
+
self.visualizer.cli_summary()
|
108
|
+
elif method == 'streamlit':
|
109
|
+
self.visualizer.streamlit_dashboard()
|
110
|
+
else:
|
111
|
+
raise ValueError("Unsupported visualization method. Choose 'cli' or 'streamlit'.")
|
agent_spy/old_monitor.py
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
# agent_spy/monitor.py
|
2
|
+
|
3
|
+
import time
|
4
|
+
import os
|
5
|
+
import psutil
|
6
|
+
import threading
|
7
|
+
from .cost_calculator import CostCalculator
|
8
|
+
from .logger import Logger
|
9
|
+
from .utils import calculate_carbon_emissions
|
10
|
+
import tiktoken
|
11
|
+
from .visualizer import Visualizer
|
12
|
+
|
13
|
+
class AgentSpy:
|
14
|
+
def __init__(self, model: str, enable_monitoring: bool = True):
|
15
|
+
self.model = model
|
16
|
+
self.start_time = None
|
17
|
+
self.end_time = None
|
18
|
+
self.input_tokens = 0
|
19
|
+
self.output_tokens = 0
|
20
|
+
self.total_tokens = 0
|
21
|
+
self.cost = 0.0
|
22
|
+
self.cpu_usage = []
|
23
|
+
self.memory_usage = []
|
24
|
+
self.logger = Logger()
|
25
|
+
self.cost_calculator = CostCalculator(model)
|
26
|
+
self.monitoring = False
|
27
|
+
self.monitor_thread = None
|
28
|
+
self.carbon_emissions = 0.0
|
29
|
+
self.tokenizer = tiktoken.get_encoding("cl100k_base") # Adjust encoding as needed
|
30
|
+
self.enable_monitoring = enable_monitoring
|
31
|
+
|
32
|
+
def start(self):
|
33
|
+
self.logger.log("Monitoring started.")
|
34
|
+
self.start_time = time.time()
|
35
|
+
if self.enable_monitoring:
|
36
|
+
self.monitoring = True
|
37
|
+
self.monitor_thread = threading.Thread(target=self._monitor_resources)
|
38
|
+
self.monitor_thread.start()
|
39
|
+
|
40
|
+
def end(self):
|
41
|
+
self.end_time = time.time()
|
42
|
+
if self.enable_monitoring:
|
43
|
+
self.monitoring = False
|
44
|
+
if self.monitor_thread:
|
45
|
+
self.monitor_thread.join()
|
46
|
+
self.total_time = self.end_time - self.start_time
|
47
|
+
self.cost = self.cost_calculator.calculate_cost(self.input_tokens, self.output_tokens)
|
48
|
+
self.carbon_emissions = calculate_carbon_emissions(self.total_tokens, self.model)
|
49
|
+
self.logger.log("Monitoring ended.")
|
50
|
+
self.logger.log(f"Total Time: {self.total_time:.2f} seconds")
|
51
|
+
self.logger.log(f"Input Tokens: {self.input_tokens}")
|
52
|
+
self.logger.log(f"Output Tokens: {self.output_tokens}")
|
53
|
+
self.logger.log(f"Total Tokens: {self.total_tokens}")
|
54
|
+
self.logger.log(f"Cost: ${self.cost:.6f}")
|
55
|
+
self.logger.log(f"CPU Usage: {self.cpu_usage}")
|
56
|
+
self.logger.log(f"Memory Usage: {self.memory_usage} MB")
|
57
|
+
self.logger.log(f"Carbon Emissions: {self.carbon_emissions:.6f} kg CO2")
|
58
|
+
|
59
|
+
def _monitor_resources(self):
|
60
|
+
process = psutil.Process(os.getpid())
|
61
|
+
while self.monitoring:
|
62
|
+
cpu = process.cpu_percent(interval=1)
|
63
|
+
mem = process.memory_info().rss / (1024 * 1024) # in MB
|
64
|
+
self.cpu_usage.append(cpu)
|
65
|
+
self.memory_usage.append(mem)
|
66
|
+
time.sleep(1)
|
67
|
+
|
68
|
+
def set_token_counts(self, input_text: str, output_text: str):
|
69
|
+
self.input_tokens = self.count_tokens(input_text)
|
70
|
+
self.output_tokens = self.count_tokens(output_text)
|
71
|
+
self.total_tokens = self.input_tokens + self.output_tokens
|
72
|
+
self.logger.log(f"Input Tokens: {self.input_tokens}")
|
73
|
+
self.logger.log(f"Output Tokens: {self.output_tokens}")
|
74
|
+
self.logger.log(f"Total Tokens: {self.total_tokens}")
|
75
|
+
|
76
|
+
def count_tokens(self, text: str) -> int:
|
77
|
+
return len(self.tokenizer.encode(text))
|
78
|
+
|
79
|
+
|
80
|
+
class AgentSpyExtended(AgentSpy):
|
81
|
+
def __init__(self, model: str):
|
82
|
+
super().__init__(model)
|
83
|
+
self.visualizer = Visualizer(self)
|
84
|
+
|
85
|
+
def visualize(self, method='cli'):
|
86
|
+
if method == 'cli':
|
87
|
+
self.visualizer.cli_summary()
|
88
|
+
elif method == 'streamlit':
|
89
|
+
self.visualizer.streamlit_dashboard()
|
90
|
+
else:
|
91
|
+
raise ValueError("Unsupported visualization method. Choose 'cli' or 'streamlit'.")
|
agent_spy/utils.py
ADDED
@@ -0,0 +1,10 @@
|
|
1
|
+
# agent_spy/utils.py
|
2
|
+
|
3
|
+
def calculate_carbon_emissions(total_tokens: int, model: str) -> float:
|
4
|
+
# Placeholder for actual carbon calculation
|
5
|
+
# This can be replaced with a real formula or API call
|
6
|
+
emissions_per_token = {
|
7
|
+
"gpt-4o": 0.0001, # kg CO2 per token
|
8
|
+
"gpt-4o-mini": 0.00005
|
9
|
+
}
|
10
|
+
return total_tokens * emissions_per_token.get(model, 0)
|
agent_spy/visualizer.py
ADDED
@@ -0,0 +1,49 @@
|
|
1
|
+
# agent_spy/visualizer.py
|
2
|
+
|
3
|
+
import matplotlib.pyplot as plt
|
4
|
+
import streamlit as st
|
5
|
+
|
6
|
+
class Visualizer:
|
7
|
+
def __init__(self, monitor):
|
8
|
+
self.monitor = monitor
|
9
|
+
|
10
|
+
def cli_summary(self):
|
11
|
+
print("\n--- Agent Spy Summary ---")
|
12
|
+
print(f"Total Time: {self.monitor.total_time:.2f} seconds")
|
13
|
+
print(f"Input Tokens: {self.monitor.input_tokens}")
|
14
|
+
print(f"Output Tokens: {self.monitor.output_tokens}")
|
15
|
+
print(f"Total Tokens: {self.monitor.total_tokens}")
|
16
|
+
print(f"Cost: ${self.monitor.cost:.6f}")
|
17
|
+
avg_cpu = sum(self.monitor.cpu_usage) / len(self.monitor.cpu_usage) if self.monitor.cpu_usage else 0
|
18
|
+
avg_mem = sum(self.monitor.memory_usage) / len(self.monitor.memory_usage) if self.monitor.memory_usage else 0
|
19
|
+
print(f"Average CPU Usage: {avg_cpu:.2f}%")
|
20
|
+
print(f"Average Memory Usage: {avg_mem:.2f} MB")
|
21
|
+
print(f"Carbon Emissions: {self.monitor.carbon_emissions:.6f} kg CO2")
|
22
|
+
print("----------------------------\n")
|
23
|
+
|
24
|
+
def streamlit_dashboard(self):
|
25
|
+
st.title("Agent Spy Dashboard")
|
26
|
+
st.header("Performance Metrics")
|
27
|
+
st.write(f"**Total Time:** {self.monitor.total_time:.2f} seconds")
|
28
|
+
st.write(f"**Input Tokens:** {self.monitor.input_tokens}")
|
29
|
+
st.write(f"**Output Tokens:** {self.monitor.output_tokens}")
|
30
|
+
st.write(f"**Total Tokens:** {self.monitor.total_tokens}")
|
31
|
+
st.write(f"**Cost:** ${self.monitor.cost:.6f}")
|
32
|
+
st.write(f"**Carbon Emissions:** {self.monitor.carbon_emissions:.6f} kg CO2")
|
33
|
+
|
34
|
+
st.header("Resource Utilization")
|
35
|
+
if self.monitor.cpu_usage and self.monitor.memory_usage:
|
36
|
+
fig, ax = plt.subplots(2, 1, figsize=(10, 6))
|
37
|
+
ax[0].plot(self.monitor.cpu_usage, label='CPU Usage (%)', color='blue')
|
38
|
+
ax[0].set_xlabel('Time (s)')
|
39
|
+
ax[0].set_ylabel('CPU Usage (%)')
|
40
|
+
ax[0].legend()
|
41
|
+
|
42
|
+
ax[1].plot(self.monitor.memory_usage, label='Memory Usage (MB)', color='orange')
|
43
|
+
ax[1].set_xlabel('Time (s)')
|
44
|
+
ax[1].set_ylabel('Memory Usage (MB)')
|
45
|
+
ax[1].legend()
|
46
|
+
|
47
|
+
st.pyplot(fig)
|
48
|
+
else:
|
49
|
+
st.write("No resource usage data available.")
|
@@ -0,0 +1,70 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: agent-spy-monitor
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: An operational monitoring library for AI agent applications
|
5
|
+
Author-email: Nidhish Wakodikar <nidhishwakodikar@gmail.com>
|
6
|
+
License: MIT
|
7
|
+
Project-URL: Homepage, https://github.com/nidhishwakodikar/agent-spy-monitor
|
8
|
+
Project-URL: Repository, https://github.com/nidhishwakodikar/agent-spy-monitor
|
9
|
+
Project-URL: Issues, https://github.com/nidhishwakodikar/agent-spy-monitor/issues
|
10
|
+
Project-URL: Documentation, https://github.com/nidhishwakodikar/agent-spy-monitor#readme
|
11
|
+
Keywords: ai,agent,monitoring,tokens,cost,performance
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
13
|
+
Classifier: Intended Audience :: Developers
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
15
|
+
Classifier: Operating System :: OS Independent
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
17
|
+
Classifier: Programming Language :: Python :: 3.8
|
18
|
+
Classifier: Programming Language :: Python :: 3.9
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
20
|
+
Classifier: Programming Language :: Python :: 3.11
|
21
|
+
Classifier: Programming Language :: Python :: 3.12
|
22
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
23
|
+
Classifier: Topic :: System :: Monitoring
|
24
|
+
Requires-Python: >=3.8
|
25
|
+
Description-Content-Type: text/markdown
|
26
|
+
License-File: LICENSE
|
27
|
+
Requires-Dist: matplotlib>=3.5.0
|
28
|
+
Requires-Dist: psutil>=5.8.0
|
29
|
+
Requires-Dist: streamlit>=1.20.0
|
30
|
+
Requires-Dist: tiktoken>=0.4.0
|
31
|
+
Provides-Extra: test
|
32
|
+
Requires-Dist: pytest>=7.0.0; extra == "test"
|
33
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "test"
|
34
|
+
Provides-Extra: dev
|
35
|
+
Requires-Dist: black; extra == "dev"
|
36
|
+
Requires-Dist: flake8; extra == "dev"
|
37
|
+
Requires-Dist: mypy; extra == "dev"
|
38
|
+
Dynamic: license-file
|
39
|
+
|
40
|
+
## Features
|
41
|
+
|
42
|
+
1. **Token Counting**
|
43
|
+
- Total tokens
|
44
|
+
- Input tokens
|
45
|
+
- Output tokens
|
46
|
+
|
47
|
+
2. **Cost Calculation**
|
48
|
+
- Based on the token usage and model pricing
|
49
|
+
|
50
|
+
3. **Performance Metrics**
|
51
|
+
- Time taken for each call
|
52
|
+
- CPU and memory consumption
|
53
|
+
|
54
|
+
4. **Environmental Impact**
|
55
|
+
- Estimated CO₂ emissions
|
56
|
+
|
57
|
+
5. **Logging**
|
58
|
+
- Comprehensive logs of all operations
|
59
|
+
|
60
|
+
6. **Visualization**
|
61
|
+
- CLI summaries
|
62
|
+
- Streamlit dashboard for detailed insights
|
63
|
+
|
64
|
+
## Installation
|
65
|
+
|
66
|
+
You can install **Agent Watch** via [PyPI](https://pypi.org/project/agent-watch/) using `pip`:
|
67
|
+
|
68
|
+
```bash
|
69
|
+
pip install agent-watch
|
70
|
+
```
|
@@ -0,0 +1,12 @@
|
|
1
|
+
agent_spy/__init__.py,sha256=VPKLiCnT4FpHuurmIEqHDMyIPmEAJ1exhJRcmnTWNTY,170
|
2
|
+
agent_spy/cost_calculator.py,sha256=Q-xTEoJI8YWifd1kywEnCLrX_NaShGDhPSe7CfXCjY0,2001
|
3
|
+
agent_spy/logger.py,sha256=b6zx4Enpi5zhB4KT2WgNtush217-YioarTyY9HvbA2E,396
|
4
|
+
agent_spy/monitor.py,sha256=RP4TEdKyFikOmod8mwVa_ICbTd5qFL0CQbkRELiLA7c,4953
|
5
|
+
agent_spy/old_monitor.py,sha256=Um_3tw9f4aajsZwGun4-YVKj7oA2EWn2swxOX1irNkg,3640
|
6
|
+
agent_spy/utils.py,sha256=kKHKXZkWHokLza_b9FcVqrM7ZeA-ioPTEBBR4XdMvE4,384
|
7
|
+
agent_spy/visualizer.py,sha256=n2eMIntGvjuN8s9dz4jwXYUABPV1heY4j2HeRJZefHk,2260
|
8
|
+
agent_spy_monitor-0.1.0.dist-info/licenses/LICENSE,sha256=raBN8zay4QP5JYmgq8MjtDhNcnN6VMmHRA2rso99TTk,1082
|
9
|
+
agent_spy_monitor-0.1.0.dist-info/METADATA,sha256=NZRl5ndhKCN_h7-cs1qA06Tqj9VO6M_wNmK5eGMyTp0,2348
|
10
|
+
agent_spy_monitor-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
11
|
+
agent_spy_monitor-0.1.0.dist-info/top_level.txt,sha256=WT-r-wmnQU8bxivGmY0edi3x3-JDiL7Mcj-gpPFniiI,10
|
12
|
+
agent_spy_monitor-0.1.0.dist-info/RECORD,,
|
@@ -0,0 +1,10 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) [2024] [AI Anytime]
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
6
|
+
|
7
|
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
8
|
+
|
9
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
10
|
+
|
@@ -0,0 +1 @@
|
|
1
|
+
agent_spy
|