praisonaiagents 0.0.101__tar.gz → 0.0.103__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/PKG-INFO +7 -3
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/__init__.py +49 -1
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/llm/__init__.py +11 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/llm/llm.py +6 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/memory/memory.py +125 -45
- praisonaiagents-0.0.103/praisonaiagents/telemetry/__init__.py +102 -0
- praisonaiagents-0.0.103/praisonaiagents/telemetry/integration.py +242 -0
- praisonaiagents-0.0.103/praisonaiagents/telemetry/telemetry.py +350 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents.egg-info/PKG-INFO +7 -3
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents.egg-info/SOURCES.txt +3 -1
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents.egg-info/requires.txt +7 -2
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/pyproject.toml +12 -5
- praisonaiagents-0.0.101/MANIFEST.in +0 -15
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/README.md +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/agent/agent.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/agents/autoagents.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/mcp/mcp.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/setup.cfg +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.101 → praisonaiagents-0.0.103}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: praisonaiagents
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.103
|
4
4
|
Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
|
5
5
|
Author: Mervin Praison
|
6
6
|
Requires-Python: >=3.10
|
@@ -8,13 +8,14 @@ Requires-Dist: pydantic
|
|
8
8
|
Requires-Dist: rich
|
9
9
|
Requires-Dist: openai
|
10
10
|
Requires-Dist: mcp>=1.6.0
|
11
|
+
Requires-Dist: posthog>=4.0.0
|
11
12
|
Provides-Extra: mcp
|
12
13
|
Requires-Dist: mcp>=1.6.0; extra == "mcp"
|
13
14
|
Requires-Dist: fastapi>=0.115.0; extra == "mcp"
|
14
15
|
Requires-Dist: uvicorn>=0.34.0; extra == "mcp"
|
15
16
|
Provides-Extra: memory
|
16
17
|
Requires-Dist: chromadb>=1.0.0; extra == "memory"
|
17
|
-
Requires-Dist: litellm>=1.
|
18
|
+
Requires-Dist: litellm>=1.72.0; extra == "memory"
|
18
19
|
Provides-Extra: knowledge
|
19
20
|
Requires-Dist: mem0ai>=0.1.0; extra == "knowledge"
|
20
21
|
Requires-Dist: chromadb>=1.0.0; extra == "knowledge"
|
@@ -24,11 +25,13 @@ Provides-Extra: graph
|
|
24
25
|
Requires-Dist: mem0ai[graph]>=0.1.0; extra == "graph"
|
25
26
|
Requires-Dist: chromadb>=1.0.0; extra == "graph"
|
26
27
|
Provides-Extra: llm
|
27
|
-
Requires-Dist: litellm>=1.
|
28
|
+
Requires-Dist: litellm>=1.72.0; extra == "llm"
|
28
29
|
Requires-Dist: pydantic>=2.4.2; extra == "llm"
|
29
30
|
Provides-Extra: api
|
30
31
|
Requires-Dist: fastapi>=0.115.0; extra == "api"
|
31
32
|
Requires-Dist: uvicorn>=0.34.0; extra == "api"
|
33
|
+
Provides-Extra: telemetry
|
34
|
+
Requires-Dist: posthog>=4.0.0; extra == "telemetry"
|
32
35
|
Provides-Extra: all
|
33
36
|
Requires-Dist: praisonaiagents[memory]; extra == "all"
|
34
37
|
Requires-Dist: praisonaiagents[knowledge]; extra == "all"
|
@@ -36,3 +39,4 @@ Requires-Dist: praisonaiagents[graph]; extra == "all"
|
|
36
39
|
Requires-Dist: praisonaiagents[llm]; extra == "all"
|
37
40
|
Requires-Dist: praisonaiagents[mcp]; extra == "all"
|
38
41
|
Requires-Dist: praisonaiagents[api]; extra == "all"
|
42
|
+
Requires-Dist: praisonaiagents[telemetry]; extra == "all"
|
@@ -12,6 +12,7 @@ from .knowledge.knowledge import Knowledge
|
|
12
12
|
from .knowledge.chunking import Chunking
|
13
13
|
from .mcp.mcp import MCP
|
14
14
|
from .session import Session
|
15
|
+
from .memory.memory import Memory
|
15
16
|
from .guardrails import GuardrailResult, LLMGuardrail
|
16
17
|
from .main import (
|
17
18
|
TaskOutput,
|
@@ -29,9 +30,50 @@ from .main import (
|
|
29
30
|
async_display_callbacks,
|
30
31
|
)
|
31
32
|
|
33
|
+
# Telemetry support (lazy loaded)
|
34
|
+
try:
|
35
|
+
from .telemetry import (
|
36
|
+
get_telemetry,
|
37
|
+
enable_telemetry,
|
38
|
+
disable_telemetry,
|
39
|
+
MinimalTelemetry,
|
40
|
+
TelemetryCollector
|
41
|
+
)
|
42
|
+
_telemetry_available = True
|
43
|
+
except ImportError:
|
44
|
+
# Telemetry not available - provide stub functions
|
45
|
+
_telemetry_available = False
|
46
|
+
def get_telemetry():
|
47
|
+
return None
|
48
|
+
|
49
|
+
def enable_telemetry(*args, **kwargs):
|
50
|
+
import logging
|
51
|
+
logging.warning(
|
52
|
+
"Telemetry not available. Install with: pip install praisonaiagents[telemetry]"
|
53
|
+
)
|
54
|
+
return None
|
55
|
+
|
56
|
+
def disable_telemetry():
|
57
|
+
pass
|
58
|
+
|
59
|
+
MinimalTelemetry = None
|
60
|
+
TelemetryCollector = None
|
61
|
+
|
32
62
|
# Add Agents as an alias for PraisonAIAgents
|
33
63
|
Agents = PraisonAIAgents
|
34
64
|
|
65
|
+
# Apply telemetry auto-instrumentation after all imports
|
66
|
+
if _telemetry_available:
|
67
|
+
try:
|
68
|
+
# Only instrument if telemetry is enabled
|
69
|
+
_telemetry = get_telemetry()
|
70
|
+
if _telemetry and _telemetry.enabled:
|
71
|
+
from .telemetry.integration import auto_instrument_all
|
72
|
+
auto_instrument_all(_telemetry)
|
73
|
+
except Exception:
|
74
|
+
# Silently fail if there are any issues
|
75
|
+
pass
|
76
|
+
|
35
77
|
__all__ = [
|
36
78
|
'Agent',
|
37
79
|
'ImageAgent',
|
@@ -43,6 +85,7 @@ __all__ = [
|
|
43
85
|
'ReflectionOutput',
|
44
86
|
'AutoAgents',
|
45
87
|
'Session',
|
88
|
+
'Memory',
|
46
89
|
'display_interaction',
|
47
90
|
'display_self_reflection',
|
48
91
|
'display_instruction',
|
@@ -58,5 +101,10 @@ __all__ = [
|
|
58
101
|
'Chunking',
|
59
102
|
'MCP',
|
60
103
|
'GuardrailResult',
|
61
|
-
'LLMGuardrail'
|
104
|
+
'LLMGuardrail',
|
105
|
+
'get_telemetry',
|
106
|
+
'enable_telemetry',
|
107
|
+
'disable_telemetry',
|
108
|
+
'MinimalTelemetry',
|
109
|
+
'TelemetryCollector'
|
62
110
|
]
|
@@ -1,5 +1,9 @@
|
|
1
1
|
import logging
|
2
2
|
import warnings
|
3
|
+
import os
|
4
|
+
|
5
|
+
# Disable litellm telemetry before any imports
|
6
|
+
os.environ["LITELLM_TELEMETRY"] = "False"
|
3
7
|
|
4
8
|
# Suppress all relevant logs at module level
|
5
9
|
logging.getLogger("litellm").setLevel(logging.ERROR)
|
@@ -17,4 +21,11 @@ logging.basicConfig(level=logging.WARNING)
|
|
17
21
|
# Import after suppressing warnings
|
18
22
|
from .llm import LLM, LLMContextLengthExceededException
|
19
23
|
|
24
|
+
# Ensure telemetry is disabled after import as well
|
25
|
+
try:
|
26
|
+
import litellm
|
27
|
+
litellm.telemetry = False
|
28
|
+
except ImportError:
|
29
|
+
pass
|
30
|
+
|
20
31
|
__all__ = ["LLM", "LLMContextLengthExceededException"]
|
@@ -17,6 +17,9 @@ from ..main import (
|
|
17
17
|
from rich.console import Console
|
18
18
|
from rich.live import Live
|
19
19
|
|
20
|
+
# Disable litellm telemetry before any imports
|
21
|
+
os.environ["LITELLM_TELEMETRY"] = "False"
|
22
|
+
|
20
23
|
# TODO: Include in-build tool calling in LLM class
|
21
24
|
# TODO: Restructure so that duplicate calls are not made (Sync with agent.py)
|
22
25
|
class LLMContextLengthExceededException(Exception):
|
@@ -108,6 +111,9 @@ class LLM:
|
|
108
111
|
):
|
109
112
|
try:
|
110
113
|
import litellm
|
114
|
+
# Disable telemetry
|
115
|
+
litellm.telemetry = False
|
116
|
+
|
111
117
|
# Set litellm options globally
|
112
118
|
litellm.set_verbose = False
|
113
119
|
litellm.success_callback = []
|
@@ -6,6 +6,9 @@ import shutil
|
|
6
6
|
from typing import Any, Dict, List, Optional, Union, Literal
|
7
7
|
import logging
|
8
8
|
|
9
|
+
# Disable litellm telemetry before any imports
|
10
|
+
os.environ["LITELLM_TELEMETRY"] = "False"
|
11
|
+
|
9
12
|
# Set up logger
|
10
13
|
logger = logging.getLogger(__name__)
|
11
14
|
|
@@ -29,6 +32,13 @@ try:
|
|
29
32
|
except ImportError:
|
30
33
|
OPENAI_AVAILABLE = False
|
31
34
|
|
35
|
+
try:
|
36
|
+
import litellm
|
37
|
+
litellm.telemetry = False # Disable telemetry
|
38
|
+
LITELLM_AVAILABLE = True
|
39
|
+
except ImportError:
|
40
|
+
LITELLM_AVAILABLE = False
|
41
|
+
|
32
42
|
|
33
43
|
|
34
44
|
|
@@ -340,14 +350,28 @@ class Memory:
|
|
340
350
|
|
341
351
|
elif self.use_rag and hasattr(self, "chroma_col"):
|
342
352
|
try:
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
353
|
+
if LITELLM_AVAILABLE:
|
354
|
+
# Use LiteLLM for consistency with the rest of the codebase
|
355
|
+
import litellm
|
356
|
+
|
357
|
+
response = litellm.embedding(
|
358
|
+
model="text-embedding-3-small",
|
359
|
+
input=query
|
360
|
+
)
|
361
|
+
query_embedding = response.data[0]["embedding"]
|
362
|
+
elif OPENAI_AVAILABLE:
|
363
|
+
# Fallback to OpenAI client
|
364
|
+
from openai import OpenAI
|
365
|
+
client = OpenAI()
|
366
|
+
|
367
|
+
response = client.embeddings.create(
|
368
|
+
input=query,
|
369
|
+
model="text-embedding-3-small"
|
370
|
+
)
|
371
|
+
query_embedding = response.data[0].embedding
|
372
|
+
else:
|
373
|
+
self._log_verbose("Neither litellm nor openai available for embeddings", logging.WARNING)
|
374
|
+
return []
|
351
375
|
|
352
376
|
resp = self.chroma_col.query(
|
353
377
|
query_embeddings=[query_embedding],
|
@@ -464,19 +488,39 @@ class Memory:
|
|
464
488
|
# Store in vector database if enabled
|
465
489
|
if self.use_rag and hasattr(self, "chroma_col"):
|
466
490
|
try:
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
491
|
+
if LITELLM_AVAILABLE:
|
492
|
+
# Use LiteLLM for consistency with the rest of the codebase
|
493
|
+
import litellm
|
494
|
+
|
495
|
+
logger.info("Getting embeddings from LiteLLM...")
|
496
|
+
logger.debug(f"Embedding input text: {text}")
|
497
|
+
|
498
|
+
response = litellm.embedding(
|
499
|
+
model="text-embedding-3-small",
|
500
|
+
input=text
|
501
|
+
)
|
502
|
+
embedding = response.data[0]["embedding"]
|
503
|
+
logger.info("Successfully got embeddings from LiteLLM")
|
504
|
+
logger.debug(f"Received embedding of length: {len(embedding)}")
|
505
|
+
|
506
|
+
elif OPENAI_AVAILABLE:
|
507
|
+
# Fallback to OpenAI client
|
508
|
+
from openai import OpenAI
|
509
|
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
510
|
+
|
511
|
+
logger.info("Getting embeddings from OpenAI...")
|
512
|
+
logger.debug(f"Embedding input text: {text}")
|
513
|
+
|
514
|
+
response = client.embeddings.create(
|
515
|
+
input=text,
|
516
|
+
model="text-embedding-3-small"
|
517
|
+
)
|
518
|
+
embedding = response.data[0].embedding
|
519
|
+
logger.info("Successfully got embeddings from OpenAI")
|
520
|
+
logger.debug(f"Received embedding of length: {len(embedding)}")
|
521
|
+
else:
|
522
|
+
logger.warning("Neither litellm nor openai available for embeddings")
|
523
|
+
return
|
480
524
|
|
481
525
|
# Sanitize metadata for ChromaDB
|
482
526
|
sanitized_metadata = self._sanitize_metadata(metadata)
|
@@ -527,15 +571,28 @@ class Memory:
|
|
527
571
|
|
528
572
|
elif self.use_rag and hasattr(self, "chroma_col"):
|
529
573
|
try:
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
574
|
+
if LITELLM_AVAILABLE:
|
575
|
+
# Use LiteLLM for consistency with the rest of the codebase
|
576
|
+
import litellm
|
577
|
+
|
578
|
+
response = litellm.embedding(
|
579
|
+
model="text-embedding-3-small",
|
580
|
+
input=query
|
581
|
+
)
|
582
|
+
query_embedding = response.data[0]["embedding"]
|
583
|
+
elif OPENAI_AVAILABLE:
|
584
|
+
# Fallback to OpenAI client
|
585
|
+
from openai import OpenAI
|
586
|
+
client = OpenAI()
|
587
|
+
|
588
|
+
response = client.embeddings.create(
|
589
|
+
input=query,
|
590
|
+
model="text-embedding-3-small"
|
591
|
+
)
|
592
|
+
query_embedding = response.data[0].embedding
|
593
|
+
else:
|
594
|
+
self._log_verbose("Neither litellm nor openai available for embeddings", logging.WARNING)
|
595
|
+
return []
|
539
596
|
|
540
597
|
# Search ChromaDB with embedding
|
541
598
|
resp = self.chroma_col.query(
|
@@ -910,21 +967,44 @@ class Memory:
|
|
910
967
|
"""
|
911
968
|
|
912
969
|
try:
|
913
|
-
|
914
|
-
|
915
|
-
|
916
|
-
|
917
|
-
|
918
|
-
|
919
|
-
|
920
|
-
|
921
|
-
|
922
|
-
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
970
|
+
if LITELLM_AVAILABLE:
|
971
|
+
# Use LiteLLM for consistency with the rest of the codebase
|
972
|
+
import litellm
|
973
|
+
|
974
|
+
# Convert model name if it's in litellm format
|
975
|
+
model_name = llm or "gpt-4o-mini"
|
976
|
+
|
977
|
+
response = litellm.completion(
|
978
|
+
model=model_name,
|
979
|
+
messages=[{
|
980
|
+
"role": "user",
|
981
|
+
"content": custom_prompt or default_prompt
|
982
|
+
}],
|
983
|
+
response_format={"type": "json_object"},
|
984
|
+
temperature=0.3
|
985
|
+
)
|
986
|
+
elif OPENAI_AVAILABLE:
|
987
|
+
# Fallback to OpenAI client
|
988
|
+
from openai import OpenAI
|
989
|
+
client = OpenAI()
|
990
|
+
|
991
|
+
response = client.chat.completions.create(
|
992
|
+
model=llm or "gpt-4o-mini",
|
993
|
+
messages=[{
|
994
|
+
"role": "user",
|
995
|
+
"content": custom_prompt or default_prompt
|
996
|
+
}],
|
997
|
+
response_format={"type": "json_object"},
|
998
|
+
temperature=0.3
|
999
|
+
)
|
1000
|
+
else:
|
1001
|
+
logger.error("Neither litellm nor openai available for quality calculation")
|
1002
|
+
return {
|
1003
|
+
"completeness": 0.0,
|
1004
|
+
"relevance": 0.0,
|
1005
|
+
"clarity": 0.0,
|
1006
|
+
"accuracy": 0.0
|
1007
|
+
}
|
928
1008
|
|
929
1009
|
metrics = json.loads(response.choices[0].message.content)
|
930
1010
|
|
@@ -0,0 +1,102 @@
|
|
1
|
+
"""
|
2
|
+
PraisonAI Agents Minimal Telemetry Module
|
3
|
+
|
4
|
+
This module provides anonymous usage tracking with privacy-first design.
|
5
|
+
Telemetry is opt-out and can be disabled via environment variables:
|
6
|
+
- PRAISONAI_TELEMETRY_DISABLED=true
|
7
|
+
- PRAISONAI_DISABLE_TELEMETRY=true
|
8
|
+
- DO_NOT_TRACK=true
|
9
|
+
|
10
|
+
No personal data, prompts, or responses are collected.
|
11
|
+
"""
|
12
|
+
|
13
|
+
import os
|
14
|
+
import atexit
|
15
|
+
from typing import Optional, TYPE_CHECKING
|
16
|
+
|
17
|
+
if TYPE_CHECKING:
|
18
|
+
from .telemetry import MinimalTelemetry, TelemetryCollector
|
19
|
+
|
20
|
+
# Import the classes for real (not just type checking)
|
21
|
+
from .telemetry import MinimalTelemetry, TelemetryCollector
|
22
|
+
|
23
|
+
__all__ = [
|
24
|
+
'get_telemetry',
|
25
|
+
'enable_telemetry',
|
26
|
+
'disable_telemetry',
|
27
|
+
'MinimalTelemetry',
|
28
|
+
'TelemetryCollector', # For backward compatibility
|
29
|
+
]
|
30
|
+
|
31
|
+
|
32
|
+
def get_telemetry() -> 'MinimalTelemetry':
|
33
|
+
"""Get the global telemetry instance."""
|
34
|
+
from .telemetry import get_telemetry as _get_telemetry
|
35
|
+
return _get_telemetry()
|
36
|
+
|
37
|
+
|
38
|
+
def enable_telemetry():
|
39
|
+
"""Enable telemetry (if not disabled by environment)."""
|
40
|
+
from .telemetry import enable_telemetry as _enable_telemetry
|
41
|
+
_enable_telemetry()
|
42
|
+
|
43
|
+
|
44
|
+
def disable_telemetry():
|
45
|
+
"""Disable telemetry."""
|
46
|
+
from .telemetry import disable_telemetry as _disable_telemetry
|
47
|
+
_disable_telemetry()
|
48
|
+
|
49
|
+
|
50
|
+
# Auto-instrumentation and cleanup setup
|
51
|
+
_initialized = False
|
52
|
+
_atexit_registered = False
|
53
|
+
|
54
|
+
def _ensure_atexit():
|
55
|
+
"""Ensure atexit handler is registered."""
|
56
|
+
global _atexit_registered
|
57
|
+
if _atexit_registered:
|
58
|
+
return
|
59
|
+
|
60
|
+
# Check if telemetry should be disabled
|
61
|
+
telemetry_disabled = any([
|
62
|
+
os.environ.get('PRAISONAI_TELEMETRY_DISABLED', '').lower() in ('true', '1', 'yes'),
|
63
|
+
os.environ.get('PRAISONAI_DISABLE_TELEMETRY', '').lower() in ('true', '1', 'yes'),
|
64
|
+
os.environ.get('DO_NOT_TRACK', '').lower() in ('true', '1', 'yes'),
|
65
|
+
])
|
66
|
+
|
67
|
+
if not telemetry_disabled:
|
68
|
+
# Register atexit handler to flush telemetry on exit
|
69
|
+
atexit.register(lambda: get_telemetry().flush())
|
70
|
+
_atexit_registered = True
|
71
|
+
|
72
|
+
def _initialize_telemetry():
|
73
|
+
"""Initialize telemetry with auto-instrumentation and cleanup."""
|
74
|
+
global _initialized
|
75
|
+
if _initialized:
|
76
|
+
return
|
77
|
+
|
78
|
+
# Ensure atexit is registered
|
79
|
+
_ensure_atexit()
|
80
|
+
|
81
|
+
# Check if telemetry should be disabled
|
82
|
+
telemetry_disabled = any([
|
83
|
+
os.environ.get('PRAISONAI_TELEMETRY_DISABLED', '').lower() in ('true', '1', 'yes'),
|
84
|
+
os.environ.get('PRAISONAI_DISABLE_TELEMETRY', '').lower() in ('true', '1', 'yes'),
|
85
|
+
os.environ.get('DO_NOT_TRACK', '').lower() in ('true', '1', 'yes'),
|
86
|
+
])
|
87
|
+
|
88
|
+
if not telemetry_disabled:
|
89
|
+
try:
|
90
|
+
# Defer the actual instrumentation to avoid circular imports
|
91
|
+
# This will be called when get_telemetry() is first accessed
|
92
|
+
_initialized = True
|
93
|
+
except Exception:
|
94
|
+
# Silently fail if there are any issues
|
95
|
+
pass
|
96
|
+
|
97
|
+
|
98
|
+
# No need for lazy auto-instrumentation here since main __init__.py handles it
|
99
|
+
|
100
|
+
|
101
|
+
# Initialize atexit handler early
|
102
|
+
_ensure_atexit()
|