pygeai 0.6.0b11__py3-none-any.whl → 0.6.0b12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygeai/_docs/source/content/ai_lab/cli.rst +4 -4
- pygeai/_docs/source/content/ai_lab/models.rst +169 -35
- pygeai/_docs/source/content/ai_lab/runner.rst +2 -2
- pygeai/_docs/source/content/ai_lab/spec.rst +9 -9
- pygeai/_docs/source/content/ai_lab/usage.rst +34 -34
- pygeai/_docs/source/content/ai_lab.rst +1 -1
- pygeai/_docs/source/content/analytics.rst +598 -0
- pygeai/_docs/source/content/api_reference/chat.rst +428 -2
- pygeai/_docs/source/content/api_reference/embeddings.rst +1 -1
- pygeai/_docs/source/content/api_reference/project.rst +184 -0
- pygeai/_docs/source/content/api_reference/rag.rst +2 -2
- pygeai/_docs/source/content/authentication.rst +295 -0
- pygeai/_docs/source/content/cli.rst +79 -2
- pygeai/_docs/source/content/debugger.rst +1 -1
- pygeai/_docs/source/content/migration.rst +19 -2
- pygeai/_docs/source/index.rst +2 -0
- pygeai/_docs/source/pygeai.analytics.rst +53 -0
- pygeai/_docs/source/pygeai.cli.commands.rst +8 -0
- pygeai/_docs/source/pygeai.rst +1 -0
- pygeai/_docs/source/pygeai.tests.analytics.rst +45 -0
- pygeai/_docs/source/pygeai.tests.auth.rst +8 -0
- pygeai/_docs/source/pygeai.tests.rst +1 -1
- pygeai/analytics/__init__.py +0 -0
- pygeai/analytics/clients.py +505 -0
- pygeai/analytics/endpoints.py +35 -0
- pygeai/analytics/managers.py +606 -0
- pygeai/analytics/mappers.py +207 -0
- pygeai/analytics/responses.py +240 -0
- pygeai/cli/commands/analytics.py +525 -0
- pygeai/cli/commands/base.py +16 -0
- pygeai/cli/commands/common.py +28 -24
- pygeai/cli/commands/migrate.py +75 -6
- pygeai/cli/commands/organization.py +265 -0
- pygeai/cli/commands/validators.py +144 -1
- pygeai/cli/error_handler.py +41 -6
- pygeai/cli/geai.py +99 -16
- pygeai/cli/parsers.py +75 -31
- pygeai/cli/texts/help.py +75 -6
- pygeai/core/base/clients.py +18 -4
- pygeai/core/base/session.py +46 -7
- pygeai/core/common/config.py +25 -2
- pygeai/core/common/exceptions.py +64 -1
- pygeai/core/services/rest.py +20 -2
- pygeai/evaluation/clients.py +5 -3
- pygeai/lab/agents/clients.py +3 -3
- pygeai/lab/agents/endpoints.py +2 -2
- pygeai/lab/agents/mappers.py +50 -2
- pygeai/lab/clients.py +5 -2
- pygeai/lab/managers.py +7 -9
- pygeai/lab/models.py +70 -2
- pygeai/lab/tools/clients.py +1 -59
- pygeai/migration/__init__.py +3 -1
- pygeai/migration/strategies.py +72 -3
- pygeai/organization/clients.py +110 -1
- pygeai/organization/endpoints.py +11 -7
- pygeai/organization/managers.py +134 -2
- pygeai/organization/mappers.py +28 -2
- pygeai/organization/responses.py +11 -1
- pygeai/tests/analytics/__init__.py +0 -0
- pygeai/tests/analytics/test_clients.py +86 -0
- pygeai/tests/analytics/test_managers.py +94 -0
- pygeai/tests/analytics/test_mappers.py +84 -0
- pygeai/tests/analytics/test_responses.py +73 -0
- pygeai/tests/auth/test_oauth.py +172 -0
- pygeai/tests/cli/commands/test_migrate.py +14 -1
- pygeai/tests/cli/commands/test_organization.py +69 -1
- pygeai/tests/cli/test_error_handler.py +4 -4
- pygeai/tests/cli/test_geai_driver.py +1 -1
- pygeai/tests/lab/agents/test_mappers.py +128 -1
- pygeai/tests/lab/test_models.py +2 -0
- pygeai/tests/lab/tools/test_clients.py +2 -31
- pygeai/tests/organization/test_clients.py +180 -1
- pygeai/tests/organization/test_managers.py +40 -0
- pygeai/tests/snippets/analytics/__init__.py +0 -0
- pygeai/tests/snippets/analytics/get_agent_usage_per_user.py +16 -0
- pygeai/tests/snippets/analytics/get_agents_created_and_modified.py +11 -0
- pygeai/tests/snippets/analytics/get_average_cost_per_request.py +10 -0
- pygeai/tests/snippets/analytics/get_overall_error_rate.py +10 -0
- pygeai/tests/snippets/analytics/get_top_10_agents_by_requests.py +12 -0
- pygeai/tests/snippets/analytics/get_total_active_users.py +10 -0
- pygeai/tests/snippets/analytics/get_total_cost.py +10 -0
- pygeai/tests/snippets/analytics/get_total_requests_per_day.py +12 -0
- pygeai/tests/snippets/analytics/get_total_tokens.py +12 -0
- pygeai/tests/snippets/chat/get_response_complete_example.py +67 -0
- pygeai/tests/snippets/chat/get_response_with_instructions.py +19 -0
- pygeai/tests/snippets/chat/get_response_with_metadata.py +24 -0
- pygeai/tests/snippets/chat/get_response_with_parallel_tools.py +58 -0
- pygeai/tests/snippets/chat/get_response_with_reasoning.py +21 -0
- pygeai/tests/snippets/chat/get_response_with_store.py +38 -0
- pygeai/tests/snippets/chat/get_response_with_truncation.py +24 -0
- pygeai/tests/snippets/lab/agents/create_agent_with_permissions.py +39 -0
- pygeai/tests/snippets/lab/agents/create_agent_with_properties.py +46 -0
- pygeai/tests/snippets/lab/agents/get_agent_with_new_fields.py +62 -0
- pygeai/tests/snippets/lab/agents/update_agent_properties.py +50 -0
- pygeai/tests/snippets/organization/add_project_member.py +10 -0
- pygeai/tests/snippets/organization/add_project_member_batch.py +44 -0
- {pygeai-0.6.0b11.dist-info → pygeai-0.6.0b12.dist-info}/METADATA +1 -1
- {pygeai-0.6.0b11.dist-info → pygeai-0.6.0b12.dist-info}/RECORD +102 -92
- pygeai/_docs/source/pygeai.tests.snippets.assistants.data_analyst.rst +0 -37
- pygeai/_docs/source/pygeai.tests.snippets.assistants.rag.rst +0 -85
- pygeai/_docs/source/pygeai.tests.snippets.assistants.rst +0 -78
- pygeai/_docs/source/pygeai.tests.snippets.auth.rst +0 -10
- pygeai/_docs/source/pygeai.tests.snippets.chat.rst +0 -125
- pygeai/_docs/source/pygeai.tests.snippets.dbg.rst +0 -45
- pygeai/_docs/source/pygeai.tests.snippets.embeddings.rst +0 -61
- pygeai/_docs/source/pygeai.tests.snippets.evaluation.dataset.rst +0 -197
- pygeai/_docs/source/pygeai.tests.snippets.evaluation.plan.rst +0 -133
- pygeai/_docs/source/pygeai.tests.snippets.evaluation.result.rst +0 -37
- pygeai/_docs/source/pygeai.tests.snippets.evaluation.rst +0 -20
- pygeai/_docs/source/pygeai.tests.snippets.extras.rst +0 -37
- pygeai/_docs/source/pygeai.tests.snippets.files.rst +0 -53
- pygeai/_docs/source/pygeai.tests.snippets.gam.rst +0 -21
- pygeai/_docs/source/pygeai.tests.snippets.lab.agents.rst +0 -93
- pygeai/_docs/source/pygeai.tests.snippets.lab.processes.jobs.rst +0 -21
- pygeai/_docs/source/pygeai.tests.snippets.lab.processes.kbs.rst +0 -45
- pygeai/_docs/source/pygeai.tests.snippets.lab.processes.rst +0 -46
- pygeai/_docs/source/pygeai.tests.snippets.lab.rst +0 -82
- pygeai/_docs/source/pygeai.tests.snippets.lab.samples.rst +0 -21
- pygeai/_docs/source/pygeai.tests.snippets.lab.strategies.rst +0 -45
- pygeai/_docs/source/pygeai.tests.snippets.lab.tools.rst +0 -85
- pygeai/_docs/source/pygeai.tests.snippets.lab.use_cases.rst +0 -117
- pygeai/_docs/source/pygeai.tests.snippets.migrate.rst +0 -10
- pygeai/_docs/source/pygeai.tests.snippets.organization.rst +0 -109
- pygeai/_docs/source/pygeai.tests.snippets.rag.rst +0 -85
- pygeai/_docs/source/pygeai.tests.snippets.rerank.rst +0 -21
- pygeai/_docs/source/pygeai.tests.snippets.rst +0 -32
- pygeai/_docs/source/pygeai.tests.snippets.secrets.rst +0 -10
- pygeai/_docs/source/pygeai.tests.snippets.usage_limit.rst +0 -77
- {pygeai-0.6.0b11.dist-info → pygeai-0.6.0b12.dist-info}/WHEEL +0 -0
- {pygeai-0.6.0b11.dist-info → pygeai-0.6.0b12.dist-info}/entry_points.txt +0 -0
- {pygeai-0.6.0b11.dist-info → pygeai-0.6.0b12.dist-info}/licenses/LICENSE +0 -0
- {pygeai-0.6.0b11.dist-info → pygeai-0.6.0b12.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from pygeai.chat.clients import ChatClient
|
|
2
|
+
|
|
3
|
+
# Example: Comprehensive usage of get_response with all major parameters
|
|
4
|
+
client = ChatClient()
|
|
5
|
+
|
|
6
|
+
model = "openai/gpt-4-turbo-preview"
|
|
7
|
+
input_text = "Analyze the benefits of renewable energy and suggest implementation strategies"
|
|
8
|
+
|
|
9
|
+
# Define a tool for additional data retrieval
|
|
10
|
+
tools = [
|
|
11
|
+
{
|
|
12
|
+
"type": "function",
|
|
13
|
+
"function": {
|
|
14
|
+
"name": "get_energy_stats",
|
|
15
|
+
"description": "Get statistics about renewable energy adoption",
|
|
16
|
+
"parameters": {
|
|
17
|
+
"type": "object",
|
|
18
|
+
"properties": {
|
|
19
|
+
"country": {
|
|
20
|
+
"type": "string",
|
|
21
|
+
"description": "Country name"
|
|
22
|
+
},
|
|
23
|
+
"year": {
|
|
24
|
+
"type": "integer",
|
|
25
|
+
"description": "Year for statistics"
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
"required": ["country"]
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
# Comprehensive configuration
|
|
35
|
+
response = client.get_response(
|
|
36
|
+
model=model,
|
|
37
|
+
input=input_text,
|
|
38
|
+
instructions="You are an energy policy expert. Provide detailed, data-driven analysis with actionable recommendations.",
|
|
39
|
+
tools=tools,
|
|
40
|
+
tool_choice="auto", # Let the model decide when to use tools
|
|
41
|
+
parallel_tool_calls=True,
|
|
42
|
+
temperature=0.7,
|
|
43
|
+
max_output_tokens=2000,
|
|
44
|
+
top_p=0.9,
|
|
45
|
+
metadata={
|
|
46
|
+
"request_id": "energy_analysis_001",
|
|
47
|
+
"department": "sustainability",
|
|
48
|
+
"priority": "high"
|
|
49
|
+
},
|
|
50
|
+
user="analyst_john_doe",
|
|
51
|
+
reasoning={
|
|
52
|
+
"effort": "high" # Request thorough reasoning
|
|
53
|
+
},
|
|
54
|
+
truncation="auto",
|
|
55
|
+
store=True # Store for future reference
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
print("=== Comprehensive Response ===")
|
|
59
|
+
print(response)
|
|
60
|
+
|
|
61
|
+
# Extract key information if response is a dict
|
|
62
|
+
if isinstance(response, dict):
|
|
63
|
+
print("\n=== Response Metadata ===")
|
|
64
|
+
if "usage" in response:
|
|
65
|
+
print(f"Tokens used: {response['usage']}")
|
|
66
|
+
if "model" in response:
|
|
67
|
+
print(f"Model used: {response['model']}")
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from pygeai.chat.clients import ChatClient
|
|
2
|
+
|
|
3
|
+
# Example: Using get_response with custom instructions
|
|
4
|
+
client = ChatClient()
|
|
5
|
+
|
|
6
|
+
model = "openai/gpt-4-turbo-preview"
|
|
7
|
+
input_text = "Tell me about machine learning"
|
|
8
|
+
|
|
9
|
+
# Using instructions to customize the model's behavior
|
|
10
|
+
response = client.get_response(
|
|
11
|
+
model=model,
|
|
12
|
+
input=input_text,
|
|
13
|
+
instructions="You are an expert educator who explains complex topics in simple terms suitable for beginners. Always use analogies and examples.",
|
|
14
|
+
temperature=0.8,
|
|
15
|
+
max_output_tokens=1500
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
print("Response with custom instructions:")
|
|
19
|
+
print(response)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from pygeai.chat.clients import ChatClient
|
|
2
|
+
|
|
3
|
+
# Example: Using get_response with metadata and user identification
|
|
4
|
+
client = ChatClient()
|
|
5
|
+
|
|
6
|
+
model = "openai/gpt-4-turbo-preview"
|
|
7
|
+
input_text = "What are the benefits of using AI in healthcare?"
|
|
8
|
+
|
|
9
|
+
# Using metadata to pass additional context
|
|
10
|
+
response = client.get_response(
|
|
11
|
+
model=model,
|
|
12
|
+
input=input_text,
|
|
13
|
+
metadata={
|
|
14
|
+
"request_id": "req_12345",
|
|
15
|
+
"department": "healthcare",
|
|
16
|
+
"priority": "high"
|
|
17
|
+
},
|
|
18
|
+
user="user_jane_doe", # User identifier for tracking
|
|
19
|
+
temperature=0.7,
|
|
20
|
+
max_output_tokens=1500
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
print("Response with metadata:")
|
|
24
|
+
print(response)
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from pygeai.chat.clients import ChatClient
|
|
2
|
+
|
|
3
|
+
# Example: Using get_response with parallel tool calls
|
|
4
|
+
client = ChatClient()
|
|
5
|
+
|
|
6
|
+
model = "openai/gpt-4-turbo-preview"
|
|
7
|
+
input_text = "What's the weather in New York and what time is it in Tokyo?"
|
|
8
|
+
|
|
9
|
+
# Define tools
|
|
10
|
+
tools = [
|
|
11
|
+
{
|
|
12
|
+
"type": "function",
|
|
13
|
+
"function": {
|
|
14
|
+
"name": "get_weather",
|
|
15
|
+
"description": "Get current weather for a location",
|
|
16
|
+
"parameters": {
|
|
17
|
+
"type": "object",
|
|
18
|
+
"properties": {
|
|
19
|
+
"location": {
|
|
20
|
+
"type": "string",
|
|
21
|
+
"description": "City name"
|
|
22
|
+
}
|
|
23
|
+
},
|
|
24
|
+
"required": ["location"]
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"type": "function",
|
|
30
|
+
"function": {
|
|
31
|
+
"name": "get_time",
|
|
32
|
+
"description": "Get current time for a location",
|
|
33
|
+
"parameters": {
|
|
34
|
+
"type": "object",
|
|
35
|
+
"properties": {
|
|
36
|
+
"location": {
|
|
37
|
+
"type": "string",
|
|
38
|
+
"description": "City name"
|
|
39
|
+
}
|
|
40
|
+
},
|
|
41
|
+
"required": ["location"]
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
# Enable parallel tool calls to execute both tools simultaneously
|
|
48
|
+
response = client.get_response(
|
|
49
|
+
model=model,
|
|
50
|
+
input=input_text,
|
|
51
|
+
tools=tools,
|
|
52
|
+
parallel_tool_calls=True, # Allow multiple tools to be called in parallel
|
|
53
|
+
temperature=0.7,
|
|
54
|
+
max_output_tokens=1000
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
print("Response with parallel tool calls:")
|
|
58
|
+
print(response)
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from pygeai.chat.clients import ChatClient
|
|
2
|
+
|
|
3
|
+
# Example: Using get_response with reasoning parameter
|
|
4
|
+
client = ChatClient()
|
|
5
|
+
|
|
6
|
+
model = "openai/o1-preview"
|
|
7
|
+
input_text = "Explain the concept of quantum entanglement in simple terms"
|
|
8
|
+
|
|
9
|
+
# Using reasoning parameter to control reasoning effort
|
|
10
|
+
response = client.get_response(
|
|
11
|
+
model=model,
|
|
12
|
+
input=input_text,
|
|
13
|
+
reasoning={
|
|
14
|
+
"type": "default", # Can be "default", "high", "medium", "low"
|
|
15
|
+
"effort": "medium"
|
|
16
|
+
},
|
|
17
|
+
max_output_tokens=2000
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
print("Response with reasoning:")
|
|
21
|
+
print(response)
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from pygeai.chat.clients import ChatClient
|
|
2
|
+
|
|
3
|
+
# Example: Using get_response with store parameter to persist conversation
|
|
4
|
+
client = ChatClient()
|
|
5
|
+
|
|
6
|
+
model = "openai/gpt-4-turbo-preview"
|
|
7
|
+
input_text = "Remember that my favorite color is blue. What did I just tell you?"
|
|
8
|
+
|
|
9
|
+
# Use store parameter to enable conversation storage
|
|
10
|
+
response = client.get_response(
|
|
11
|
+
model=model,
|
|
12
|
+
input=input_text,
|
|
13
|
+
store=True, # Store this conversation for future reference
|
|
14
|
+
user="user_123", # Associate with specific user
|
|
15
|
+
metadata={
|
|
16
|
+
"session_id": "session_abc",
|
|
17
|
+
"conversation_type": "preference_learning"
|
|
18
|
+
},
|
|
19
|
+
temperature=0.7,
|
|
20
|
+
max_output_tokens=500
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
print("Response with conversation storage:")
|
|
24
|
+
print(response)
|
|
25
|
+
|
|
26
|
+
# Follow-up request can reference stored context
|
|
27
|
+
follow_up = client.get_response(
|
|
28
|
+
model=model,
|
|
29
|
+
input="What's my favorite color?",
|
|
30
|
+
store=True,
|
|
31
|
+
user="user_123",
|
|
32
|
+
metadata={
|
|
33
|
+
"session_id": "session_abc"
|
|
34
|
+
}
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
print("\nFollow-up response:")
|
|
38
|
+
print(follow_up)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from pygeai.chat.clients import ChatClient
|
|
2
|
+
|
|
3
|
+
# Example: Using get_response with truncation strategy
|
|
4
|
+
client = ChatClient()
|
|
5
|
+
|
|
6
|
+
model = "openai/gpt-4-turbo-preview"
|
|
7
|
+
input_text = """
|
|
8
|
+
This is a very long input text that might exceed the context window.
|
|
9
|
+
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod
|
|
10
|
+
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
|
|
11
|
+
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
|
|
12
|
+
""" * 100 # Simulate long input
|
|
13
|
+
|
|
14
|
+
# Using truncation to handle long inputs
|
|
15
|
+
response = client.get_response(
|
|
16
|
+
model=model,
|
|
17
|
+
input=input_text,
|
|
18
|
+
truncation="auto", # Options: "auto", "disabled"
|
|
19
|
+
max_output_tokens=1000,
|
|
20
|
+
temperature=0.7
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
print("Response with truncation handling:")
|
|
24
|
+
print(response)
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from pygeai.lab.managers import AILabManager
|
|
2
|
+
from pygeai.lab.models import Agent, AgentData, Prompt, LlmConfig, Model, Permission
|
|
3
|
+
|
|
4
|
+
# Create an agent with permissions configured
|
|
5
|
+
agent = Agent(
|
|
6
|
+
name="Agent_With_Permissions",
|
|
7
|
+
description="Agent demonstrating permissions configuration",
|
|
8
|
+
access_scope="private",
|
|
9
|
+
is_draft=True,
|
|
10
|
+
sharing_scope="organization",
|
|
11
|
+
permissions=Permission(
|
|
12
|
+
allow_chat_sharing=True,
|
|
13
|
+
allow_external_execution=False
|
|
14
|
+
),
|
|
15
|
+
agent_data=AgentData(
|
|
16
|
+
prompt=Prompt(
|
|
17
|
+
instructions="You are a collaborative assistant that can be shared within the organization",
|
|
18
|
+
inputs=["user_query"]
|
|
19
|
+
),
|
|
20
|
+
llm_config=LlmConfig(max_tokens=2000),
|
|
21
|
+
strategy_name="Dynamic Prompting",
|
|
22
|
+
models=[Model(name="gpt-4-turbo-preview")]
|
|
23
|
+
)
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
manager = AILabManager()
|
|
27
|
+
result = manager.create_agent(agent=agent, automatic_publish=False)
|
|
28
|
+
|
|
29
|
+
print(f"Created agent: {result.name}")
|
|
30
|
+
print(f"Agent ID: {result.id}")
|
|
31
|
+
print(f"Sharing scope: {result.sharing_scope}")
|
|
32
|
+
if result.permissions:
|
|
33
|
+
print(f"\nPermissions:")
|
|
34
|
+
print(f" - Allow chat sharing: {result.permissions.allow_chat_sharing}")
|
|
35
|
+
print(f" - Allow external execution: {result.permissions.allow_external_execution}")
|
|
36
|
+
if result.effective_permissions:
|
|
37
|
+
print(f"\nEffective Permissions:")
|
|
38
|
+
print(f" - Allow chat sharing: {result.effective_permissions.allow_chat_sharing}")
|
|
39
|
+
print(f" - Allow external execution: {result.effective_permissions.allow_external_execution}")
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from pygeai.lab.managers import AILabManager
|
|
2
|
+
from pygeai.lab.models import Agent, AgentData, Prompt, LlmConfig, Model, Property
|
|
3
|
+
|
|
4
|
+
# Create an agent with properties
|
|
5
|
+
agent = Agent(
|
|
6
|
+
name="Agent_With_Properties",
|
|
7
|
+
description="Agent demonstrating the properties field",
|
|
8
|
+
access_scope="private",
|
|
9
|
+
is_draft=True,
|
|
10
|
+
agent_data=AgentData(
|
|
11
|
+
prompt=Prompt(
|
|
12
|
+
instructions="You are a helpful assistant with custom properties",
|
|
13
|
+
inputs=["user_query"]
|
|
14
|
+
),
|
|
15
|
+
llm_config=LlmConfig(max_tokens=2000),
|
|
16
|
+
strategy_name="Dynamic Prompting",
|
|
17
|
+
models=[Model(name="gpt-4-turbo-preview")],
|
|
18
|
+
properties=[
|
|
19
|
+
Property(
|
|
20
|
+
data_type="string",
|
|
21
|
+
key="environment",
|
|
22
|
+
value="production"
|
|
23
|
+
),
|
|
24
|
+
Property(
|
|
25
|
+
data_type="number",
|
|
26
|
+
key="max_retries",
|
|
27
|
+
value="3"
|
|
28
|
+
),
|
|
29
|
+
Property(
|
|
30
|
+
data_type="boolean",
|
|
31
|
+
key="enable_logging",
|
|
32
|
+
value="true"
|
|
33
|
+
)
|
|
34
|
+
]
|
|
35
|
+
)
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
manager = AILabManager()
|
|
39
|
+
result = manager.create_agent(agent=agent, automatic_publish=False)
|
|
40
|
+
|
|
41
|
+
print(f"Created agent with {len(result.agent_data.properties) if result.agent_data and result.agent_data.properties else 0} properties")
|
|
42
|
+
print(f"Agent ID: {result.id}")
|
|
43
|
+
if result.agent_data and result.agent_data.properties:
|
|
44
|
+
print("\nProperties:")
|
|
45
|
+
for prop in result.agent_data.properties:
|
|
46
|
+
print(f" - {prop.key} ({prop.data_type}): {prop.value}")
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from pygeai.lab.managers import AILabManager
|
|
2
|
+
|
|
3
|
+
# Get an agent and inspect all new fields
|
|
4
|
+
manager = AILabManager()
|
|
5
|
+
|
|
6
|
+
# First, list agents to get an ID
|
|
7
|
+
agents = manager.get_agent_list()
|
|
8
|
+
if len(agents) == 0:
|
|
9
|
+
print("No agents found. Please create an agent first.")
|
|
10
|
+
exit(1)
|
|
11
|
+
|
|
12
|
+
# Get the first agent
|
|
13
|
+
agent_id = agents[0].id
|
|
14
|
+
agent = manager.get_agent(agent_id)
|
|
15
|
+
|
|
16
|
+
print(f"Agent: {agent.name} (ID: {agent.id})")
|
|
17
|
+
print(f"Status: {agent.status}")
|
|
18
|
+
print(f"Is Draft: {agent.is_draft}")
|
|
19
|
+
print(f"\n=== New Fields ===")
|
|
20
|
+
|
|
21
|
+
# Sharing scope
|
|
22
|
+
print(f"\nSharing Scope: {agent.sharing_scope}")
|
|
23
|
+
|
|
24
|
+
# Permissions
|
|
25
|
+
if agent.permissions:
|
|
26
|
+
print(f"\nPermissions:")
|
|
27
|
+
print(f" - Allow chat sharing: {agent.permissions.allow_chat_sharing}")
|
|
28
|
+
print(f" - Allow external execution: {agent.permissions.allow_external_execution}")
|
|
29
|
+
else:
|
|
30
|
+
print(f"\nPermissions: None")
|
|
31
|
+
|
|
32
|
+
# Effective permissions
|
|
33
|
+
if agent.effective_permissions:
|
|
34
|
+
print(f"\nEffective Permissions:")
|
|
35
|
+
print(f" - Allow chat sharing: {agent.effective_permissions.allow_chat_sharing}")
|
|
36
|
+
print(f" - Allow external execution: {agent.effective_permissions.allow_external_execution}")
|
|
37
|
+
else:
|
|
38
|
+
print(f"\nEffective Permissions: None")
|
|
39
|
+
|
|
40
|
+
# Agent data properties
|
|
41
|
+
if agent.agent_data:
|
|
42
|
+
print(f"\nAgent Data:")
|
|
43
|
+
print(f" - Strategy Name: {agent.agent_data.strategy_name}")
|
|
44
|
+
|
|
45
|
+
if agent.agent_data.properties:
|
|
46
|
+
print(f" - Properties ({len(agent.agent_data.properties)}):")
|
|
47
|
+
for prop in agent.agent_data.properties:
|
|
48
|
+
print(f" * {prop.key} ({prop.data_type}): {prop.value}")
|
|
49
|
+
else:
|
|
50
|
+
print(f" - Properties: None")
|
|
51
|
+
|
|
52
|
+
if agent.agent_data.models:
|
|
53
|
+
print(f" - Models: {len(agent.agent_data.models)} configured")
|
|
54
|
+
else:
|
|
55
|
+
print(f" - Models: None")
|
|
56
|
+
|
|
57
|
+
if agent.agent_data.resource_pools:
|
|
58
|
+
print(f" - Resource Pools: {len(agent.agent_data.resource_pools)} configured")
|
|
59
|
+
else:
|
|
60
|
+
print(f" - Resource Pools: None")
|
|
61
|
+
else:
|
|
62
|
+
print(f"\nAgent Data: None")
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from pygeai.lab.managers import AILabManager
|
|
2
|
+
from pygeai.lab.models import Property
|
|
3
|
+
|
|
4
|
+
# Update an agent's properties
|
|
5
|
+
manager = AILabManager()
|
|
6
|
+
|
|
7
|
+
# Get an existing agent
|
|
8
|
+
agents = manager.get_agent_list()
|
|
9
|
+
if len(agents) == 0:
|
|
10
|
+
print("No agents found. Please create an agent first.")
|
|
11
|
+
exit(1)
|
|
12
|
+
|
|
13
|
+
agent = manager.get_agent(agents[0].id)
|
|
14
|
+
print(f"Updating agent: {agent.name}")
|
|
15
|
+
|
|
16
|
+
# Update agent data properties
|
|
17
|
+
if agent.agent_data:
|
|
18
|
+
# Add or update properties
|
|
19
|
+
agent.agent_data.properties = [
|
|
20
|
+
Property(
|
|
21
|
+
data_type="string",
|
|
22
|
+
key="version",
|
|
23
|
+
value="2.0.0"
|
|
24
|
+
),
|
|
25
|
+
Property(
|
|
26
|
+
data_type="string",
|
|
27
|
+
key="region",
|
|
28
|
+
value="us-east-1"
|
|
29
|
+
),
|
|
30
|
+
Property(
|
|
31
|
+
data_type="boolean",
|
|
32
|
+
key="auto_scale",
|
|
33
|
+
value="true"
|
|
34
|
+
)
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
# Update the agent
|
|
38
|
+
updated_agent = manager.update_agent(agent)
|
|
39
|
+
|
|
40
|
+
print(f"\nAgent updated successfully!")
|
|
41
|
+
print(f"Agent ID: {updated_agent.id}")
|
|
42
|
+
|
|
43
|
+
if updated_agent.agent_data and updated_agent.agent_data.properties:
|
|
44
|
+
print(f"\nUpdated Properties ({len(updated_agent.agent_data.properties)}):")
|
|
45
|
+
for prop in updated_agent.agent_data.properties:
|
|
46
|
+
print(f" - {prop.key} ({prop.data_type}): {prop.value}")
|
|
47
|
+
else:
|
|
48
|
+
print("\nNo properties found after update")
|
|
49
|
+
else:
|
|
50
|
+
print("Agent has no agent_data, cannot update properties")
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from pygeai.organization.managers import OrganizationManager
|
|
2
|
+
|
|
3
|
+
manager = OrganizationManager()
|
|
4
|
+
|
|
5
|
+
response = manager.add_project_member(
|
|
6
|
+
project_id="1956c032-3c66-4435-acb8-6a06e52f819f",
|
|
7
|
+
user_email="newuser@example.com",
|
|
8
|
+
roles=["Project member", "Project administrator"]
|
|
9
|
+
)
|
|
10
|
+
print(f"response: {response}")
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
from pygeai.organization.clients import OrganizationClient
|
|
3
|
+
|
|
4
|
+
client = OrganizationClient()
|
|
5
|
+
|
|
6
|
+
batch_file = "project_members.csv"
|
|
7
|
+
|
|
8
|
+
successful = 0
|
|
9
|
+
failed = 0
|
|
10
|
+
errors = []
|
|
11
|
+
|
|
12
|
+
with open(batch_file, 'r') as f:
|
|
13
|
+
csv_reader = csv.reader(f)
|
|
14
|
+
for line_num, row in enumerate(csv_reader, start=1):
|
|
15
|
+
if len(row) < 3:
|
|
16
|
+
error_msg = f"Line {line_num}: Invalid format - expected at least 3 columns (project_id, email, role1, ...)"
|
|
17
|
+
errors.append(error_msg)
|
|
18
|
+
failed += 1
|
|
19
|
+
continue
|
|
20
|
+
|
|
21
|
+
project_id = row[0].strip()
|
|
22
|
+
email = row[1].strip()
|
|
23
|
+
roles = [r.strip() for r in row[2:] if r.strip()]
|
|
24
|
+
|
|
25
|
+
if not (project_id and email and roles):
|
|
26
|
+
error_msg = f"Line {line_num}: Missing required fields"
|
|
27
|
+
errors.append(error_msg)
|
|
28
|
+
failed += 1
|
|
29
|
+
continue
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
result = client.add_project_member(project_id, email, roles)
|
|
33
|
+
print(f"Successfully added {email} to project {project_id}")
|
|
34
|
+
successful += 1
|
|
35
|
+
except Exception as e:
|
|
36
|
+
error_msg = f"Line {line_num}: Failed to add {email} to project {project_id}: {str(e)}"
|
|
37
|
+
errors.append(error_msg)
|
|
38
|
+
failed += 1
|
|
39
|
+
|
|
40
|
+
print(f"\nBatch processing complete: {successful} successful, {failed} failed")
|
|
41
|
+
if errors:
|
|
42
|
+
print("\nErrors:")
|
|
43
|
+
for error in errors:
|
|
44
|
+
print(f" - {error}")
|