pygeai 0.6.0b3__py3-none-any.whl → 0.6.0b6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygeai/_docs/Makefile +20 -0
- pygeai/_docs/make.bat +35 -0
- pygeai/_docs/source/conf.py +45 -0
- pygeai/_docs/source/content/ai_lab/cli.rst +747 -0
- pygeai/_docs/source/content/ai_lab/models.rst +1600 -0
- pygeai/_docs/source/content/ai_lab/runner.rst +253 -0
- pygeai/_docs/source/content/ai_lab/spec.rst +431 -0
- pygeai/_docs/source/content/ai_lab/usage.rst +1011 -0
- pygeai/_docs/source/content/ai_lab.rst +102 -0
- pygeai/_docs/source/content/api_reference/chat.rst +328 -0
- pygeai/_docs/source/content/api_reference/embeddings.rst +124 -0
- pygeai/_docs/source/content/api_reference/project.rst +552 -0
- pygeai/_docs/source/content/api_reference/rag.rst +710 -0
- pygeai/_docs/source/content/api_reference.rst +46 -0
- pygeai/_docs/source/content/chat_gui.rst +121 -0
- pygeai/_docs/source/content/cli.rst +126 -0
- pygeai/_docs/source/content/debugger.rst +188 -0
- pygeai/_docs/source/content/intro.rst +67 -0
- pygeai/_docs/source/content/modules.rst +7 -0
- pygeai/_docs/source/content/quickstart.rst +143 -0
- pygeai/_docs/source/content/samples.rst +394 -0
- pygeai/_docs/source/index.rst +21 -0
- pygeai/_docs/source/modules.rst +7 -0
- pygeai/_docs/source/pygeai.admin.rst +29 -0
- pygeai/_docs/source/pygeai.assistant.data.rst +21 -0
- pygeai/_docs/source/pygeai.assistant.data_analyst.rst +29 -0
- pygeai/_docs/source/pygeai.assistant.rag.rst +53 -0
- pygeai/_docs/source/pygeai.assistant.rst +55 -0
- pygeai/_docs/source/pygeai.chat.rst +69 -0
- pygeai/_docs/source/pygeai.cli.commands.flows.rst +10 -0
- pygeai/_docs/source/pygeai.cli.commands.lab.rst +53 -0
- pygeai/_docs/source/pygeai.cli.commands.rst +198 -0
- pygeai/_docs/source/pygeai.cli.rst +54 -0
- pygeai/_docs/source/pygeai.cli.texts.rst +21 -0
- pygeai/_docs/source/pygeai.core.base.rst +53 -0
- pygeai/_docs/source/pygeai.core.common.rst +37 -0
- pygeai/_docs/source/pygeai.core.embeddings.rst +61 -0
- pygeai/_docs/source/pygeai.core.feedback.rst +37 -0
- pygeai/_docs/source/pygeai.core.files.rst +61 -0
- pygeai/_docs/source/pygeai.core.llm.rst +29 -0
- pygeai/_docs/source/pygeai.core.plugins.rst +37 -0
- pygeai/_docs/source/pygeai.core.rerank.rst +53 -0
- pygeai/_docs/source/pygeai.core.rst +63 -0
- pygeai/_docs/source/pygeai.core.secrets.rst +29 -0
- pygeai/_docs/source/pygeai.core.services.llm.rst +29 -0
- pygeai/_docs/source/pygeai.core.services.rst +37 -0
- pygeai/_docs/source/pygeai.core.utils.rst +21 -0
- pygeai/_docs/source/pygeai.dbg.rst +21 -0
- pygeai/_docs/source/pygeai.evaluation.dataset.rst +29 -0
- pygeai/_docs/source/pygeai.evaluation.plan.rst +29 -0
- pygeai/_docs/source/pygeai.evaluation.result.rst +29 -0
- pygeai/_docs/source/pygeai.evaluation.rst +31 -0
- pygeai/_docs/source/pygeai.flows.rst +29 -0
- pygeai/_docs/source/pygeai.gam.rst +29 -0
- pygeai/_docs/source/pygeai.health.rst +29 -0
- pygeai/_docs/source/pygeai.lab.agents.rst +37 -0
- pygeai/_docs/source/pygeai.lab.processes.rst +37 -0
- pygeai/_docs/source/pygeai.lab.rst +65 -0
- pygeai/_docs/source/pygeai.lab.spec.rst +29 -0
- pygeai/_docs/source/pygeai.lab.strategies.rst +37 -0
- pygeai/_docs/source/pygeai.lab.tools.rst +37 -0
- pygeai/_docs/source/pygeai.man.man1.rst +10 -0
- pygeai/_docs/source/pygeai.man.rst +18 -0
- pygeai/_docs/source/pygeai.migration.rst +29 -0
- pygeai/_docs/source/pygeai.organization.limits.rst +45 -0
- pygeai/_docs/source/pygeai.organization.rst +61 -0
- pygeai/_docs/source/pygeai.proxy.rst +53 -0
- pygeai/_docs/source/pygeai.rst +33 -0
- pygeai/_docs/source/pygeai.tests.admin.rst +21 -0
- pygeai/_docs/source/pygeai.tests.assistants.rag.rst +37 -0
- pygeai/_docs/source/pygeai.tests.assistants.rst +45 -0
- pygeai/_docs/source/pygeai.tests.chat.rst +45 -0
- pygeai/_docs/source/pygeai.tests.cli.commands.lab.rst +37 -0
- pygeai/_docs/source/pygeai.tests.cli.commands.rst +149 -0
- pygeai/_docs/source/pygeai.tests.cli.docker.rst +10 -0
- pygeai/_docs/source/pygeai.tests.cli.rst +30 -0
- pygeai/_docs/source/pygeai.tests.core.base.data.rst +29 -0
- pygeai/_docs/source/pygeai.tests.core.base.rst +37 -0
- pygeai/_docs/source/pygeai.tests.core.common.data.rst +10 -0
- pygeai/_docs/source/pygeai.tests.core.common.rst +37 -0
- pygeai/_docs/source/pygeai.tests.core.embeddings.rst +21 -0
- pygeai/_docs/source/pygeai.tests.core.feedback.rst +21 -0
- pygeai/_docs/source/pygeai.tests.core.files.rst +45 -0
- pygeai/_docs/source/pygeai.tests.core.llm.rst +21 -0
- pygeai/_docs/source/pygeai.tests.core.rerank.rst +37 -0
- pygeai/_docs/source/pygeai.tests.core.rst +38 -0
- pygeai/_docs/source/pygeai.tests.core.secrets.rst +21 -0
- pygeai/_docs/source/pygeai.tests.core.services.rst +21 -0
- pygeai/_docs/source/pygeai.tests.core.utils.rst +21 -0
- pygeai/_docs/source/pygeai.tests.dbg.rst +21 -0
- pygeai/_docs/source/pygeai.tests.gam.rst +21 -0
- pygeai/_docs/source/pygeai.tests.health.rst +21 -0
- pygeai/_docs/source/pygeai.tests.integration.assistants.rag.rst +21 -0
- pygeai/_docs/source/pygeai.tests.integration.assistants.rst +18 -0
- pygeai/_docs/source/pygeai.tests.integration.chat.rst +21 -0
- pygeai/_docs/source/pygeai.tests.integration.lab.agents.rst +69 -0
- pygeai/_docs/source/pygeai.tests.integration.lab.processes.rst +69 -0
- pygeai/_docs/source/pygeai.tests.integration.lab.reasoning_strategies.rst +37 -0
- pygeai/_docs/source/pygeai.tests.integration.lab.rst +21 -0
- pygeai/_docs/source/pygeai.tests.integration.lab.tools.rst +77 -0
- pygeai/_docs/source/pygeai.tests.integration.rst +20 -0
- pygeai/_docs/source/pygeai.tests.lab.agents.rst +29 -0
- pygeai/_docs/source/pygeai.tests.lab.processes.rst +29 -0
- pygeai/_docs/source/pygeai.tests.lab.rst +49 -0
- pygeai/_docs/source/pygeai.tests.lab.spec.rst +29 -0
- pygeai/_docs/source/pygeai.tests.lab.strategies.rst +29 -0
- pygeai/_docs/source/pygeai.tests.lab.tools.rst +29 -0
- pygeai/_docs/source/pygeai.tests.migration.rst +29 -0
- pygeai/_docs/source/pygeai.tests.organization.limits.rst +29 -0
- pygeai/_docs/source/pygeai.tests.organization.rst +45 -0
- pygeai/_docs/source/pygeai.tests.proxy.rst +61 -0
- pygeai/_docs/source/pygeai.tests.rst +31 -0
- pygeai/_docs/source/pygeai.tests.snippets.assistants.data_analyst.rst +37 -0
- pygeai/_docs/source/pygeai.tests.snippets.assistants.rag.rst +85 -0
- pygeai/_docs/source/pygeai.tests.snippets.assistants.rst +78 -0
- pygeai/_docs/source/pygeai.tests.snippets.chat.rst +85 -0
- pygeai/_docs/source/pygeai.tests.snippets.embeddings.rst +21 -0
- pygeai/_docs/source/pygeai.tests.snippets.evaluation.rst +10 -0
- pygeai/_docs/source/pygeai.tests.snippets.extras.rst +37 -0
- pygeai/_docs/source/pygeai.tests.snippets.files.rst +53 -0
- pygeai/_docs/source/pygeai.tests.snippets.gam.rst +21 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.agents.rst +93 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.processes.jobs.rst +21 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.processes.kbs.rst +45 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.processes.rst +46 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.rst +82 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.samples.rst +21 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.strategies.rst +45 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.tools.rst +85 -0
- pygeai/_docs/source/pygeai.tests.snippets.lab.use_cases.rst +117 -0
- pygeai/_docs/source/pygeai.tests.snippets.migrate.rst +10 -0
- pygeai/_docs/source/pygeai.tests.snippets.organization.rst +69 -0
- pygeai/_docs/source/pygeai.tests.snippets.rag.rst +85 -0
- pygeai/_docs/source/pygeai.tests.snippets.rerank.rst +21 -0
- pygeai/_docs/source/pygeai.tests.snippets.rst +30 -0
- pygeai/_docs/source/pygeai.tests.snippets.secrets.rst +10 -0
- pygeai/_docs/source/pygeai.tests.snippets.usage_limit.rst +77 -0
- pygeai/cli/commands/base.py +9 -9
- pygeai/cli/commands/docs.py +2 -2
- {pygeai-0.6.0b3.dist-info → pygeai-0.6.0b6.dist-info}/METADATA +1 -1
- {pygeai-0.6.0b3.dist-info → pygeai-0.6.0b6.dist-info}/RECORD +145 -8
- {pygeai-0.6.0b3.dist-info → pygeai-0.6.0b6.dist-info}/WHEEL +0 -0
- {pygeai-0.6.0b3.dist-info → pygeai-0.6.0b6.dist-info}/entry_points.txt +0 -0
- {pygeai-0.6.0b3.dist-info → pygeai-0.6.0b6.dist-info}/licenses/LICENSE +0 -0
- {pygeai-0.6.0b3.dist-info → pygeai-0.6.0b6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
The Lab
|
|
2
|
+
=======
|
|
3
|
+
|
|
4
|
+
The Globant Enterprise AI Lab is a comprehensive framework designed to create, manage, and orchestrate autonomous AI agents capable of addressing complex tasks with minimal human intervention. It provides a structured environment for defining agents, their associated tools, reasoning strategies, and workflows, all integrated within a cohesive ecosystem. The PyGEAI SDK serves as the primary interface for developers to interact with the Lab, offering a Python-native experience through the `lab` module, which enables seamless management of the Lab’s resources and operations.
|
|
5
|
+
|
|
6
|
+
Overview
|
|
7
|
+
--------
|
|
8
|
+
|
|
9
|
+
The Globant Enterprise AI Lab enables the creation of intelligent AI agents, from collaborative co-pilots to fully
|
|
10
|
+
autonomous systems, capable of executing intricate tasks. Its modular design ensures flexibility, allowing developers
|
|
11
|
+
to define agent behaviors, orchestrate collaborative workflows, and manage knowledge artifacts. The PyGEAI SDK
|
|
12
|
+
streamlines these processes by providing an intuitive, Python-centric interface that abstracts the Lab’s underlying
|
|
13
|
+
APIs, making it accessible to developers familiar with Python conventions.
|
|
14
|
+
|
|
15
|
+
The Lab’s core modules are:
|
|
16
|
+
|
|
17
|
+
- **Agents & Tools Repository**: A centralized hub for defining and managing agents and their resources, such as skills, tools, and external API integrations.
|
|
18
|
+
- **Agentic Flows**: A system for creating workflows that combine tasks, agents, and knowledge artifacts to achieve broader objectives.
|
|
19
|
+
- **Knowledge Base**: A repository for storing and organizing artifacts (e.g., documents, data outputs) that agents consume or produce during workflows.
|
|
20
|
+
- **Agent Runtime**: The execution environment where agents perform tasks, interact with artifacts, and respond to events within defined workflows.
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
Interacting with the Lab via PyGEAI SDK
|
|
24
|
+
--------------------------------------
|
|
25
|
+
|
|
26
|
+
The PyGEAI SDK’s `lab` module provides a streamlined interface for developers to engage with the Globant Enterprise AI Lab. Designed to align with Python conventions, it offers a command-line tool that facilitates interaction with the Lab’s resources, including agents, tools, reasoning strategies, processes, tasks, and runtime instances. The `lab` module supports a range of operations, ensuring developers can efficiently manage the Lab’s ecosystem.
|
|
27
|
+
|
|
28
|
+
### Managing Agents
|
|
29
|
+
|
|
30
|
+
The `lab` module enables developers to define and manage AI agents within the Lab. Agents are entities configured with specific prompts, language models, and operational parameters to perform designated tasks. Through the `lab` module, developers can create agents with custom attributes, update their configurations, retrieve details, list available agents, publish revisions, share agents via links, or remove them as needed. This functionality allows for precise control over agent lifecycle and behavior within the Lab’s environment.
|
|
31
|
+
|
|
32
|
+
### Configuring Tools
|
|
33
|
+
|
|
34
|
+
Tools extend agent capabilities by providing access to external APIs, built-in functions, or custom logic. The `lab` module supports the creation and management of tools, allowing developers to define tools with specific scopes (e.g., API-based or external), configure their parameters, and control their accessibility. Developers can list tools, retrieve tool details, update configurations, publish revisions, set parameters, or delete tools, ensuring tools are seamlessly integrated into the Lab’s workflows.
|
|
35
|
+
|
|
36
|
+
### Defining Reasoning Strategies
|
|
37
|
+
|
|
38
|
+
Reasoning strategies guide how agents process information and make decisions. The `lab` module allows developers to create and manage these strategies, specifying system prompts and access scopes to tailor agent reasoning. Developers can list available strategies, retrieve details, update configurations, and ensure strategies align with project requirements, enhancing agent performance within the Lab.
|
|
39
|
+
|
|
40
|
+
### Orchestrating Processes
|
|
41
|
+
|
|
42
|
+
Processes in the Lab define workflows that combine agents, tasks, and knowledge artifacts to achieve complex objectives. The `lab` module facilitates process management by enabling developers to create processes, define their structure (including activities, signals, and sequence flows), and update configurations. Developers can list processes, retrieve details, publish revisions, or delete processes, providing full control over workflow orchestration within the Lab.
|
|
43
|
+
|
|
44
|
+
### Managing Tasks
|
|
45
|
+
|
|
46
|
+
Tasks are individual units of work within processes, assigned to agents for execution. The `lab` module supports task creation, allowing developers to specify task prompts, artifact types, and descriptions. Developers can list tasks, retrieve task details, update configurations, publish revisions, or delete tasks, ensuring tasks are effectively integrated into the Lab’s workflows.
|
|
47
|
+
|
|
48
|
+
### Controlling Runtime Instances
|
|
49
|
+
|
|
50
|
+
The Lab’s runtime environment executes processes, where agents perform tasks and interact with artifacts. The `lab` module provides commands to manage runtime instances, enabling developers to start process instances, monitor their progress, retrieve instance details, access execution history, send signals to influence workflow, or abort instances as needed. This ensures dynamic control over the Lab’s operational execution.
|
|
51
|
+
|
|
52
|
+
### Running Agents with the Runner
|
|
53
|
+
|
|
54
|
+
The `Runner` class in the `lab` module provides a direct interface for executing agent tasks asynchronously within the Lab’s runtime environment. It allows developers to run agents with flexible input formats—strings, `ChatMessage`, or `ChatMessageList`—and customizable LLM settings, enabling tailored interactions for testing or production use. The `Runner` simplifies agent execution by handling message processing and LLM configuration, returning a `ProviderResponse` object containing the agent’s response and metadata.
|
|
55
|
+
|
|
56
|
+
SDK Tools and Utilities
|
|
57
|
+
-----------------------
|
|
58
|
+
|
|
59
|
+
The PyGEAI SDK provides robust programmatic interfaces for interacting with the Globant Enterprise AI Lab, enabling developers to manage agents, tools, reasoning strategies, processes, tasks, and runtime instances directly within Python applications. Beyond the command-line interface, the SDK offers a high-level manager and low-level client classes, designed to integrate seamlessly into development workflows with structured, object-oriented access or flexible JSON-based interactions.
|
|
60
|
+
|
|
61
|
+
High-Level Interface: AILabManager
|
|
62
|
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
63
|
+
The AILabManager class serves as the primary high-level interface, offering a Pythonic, object-oriented approach to managing the Lab’s resources. It abstracts the underlying API complexity, mapping responses to structured Python objects such as Agent, Tool, ReasoningStrategy, AgenticProcess, Task, and ProcessInstance. This allows developers to work with strongly typed models, ensuring clarity and reducing errors when creating, updating, retrieving, or deleting Lab entities.
|
|
64
|
+
|
|
65
|
+
- Agent Management: Create, update, retrieve, list, publish, share, or delete agents using methods like create_agent, update_agent, get_agent, and delete_agent. Agents are represented as Agent objects, encapsulating properties like name, prompts, and LLM configurations.
|
|
66
|
+
- Tool Management: Define and manage tools with methods such as create_tool, update_tool, get_tool, list_tools, publish_tool_revision, and delete_tool. Tools are modeled as Tool objects, supporting API-based or custom configurations with parameters (ToolParameter).
|
|
67
|
+
- Reasoning Strategies: Configure agent reasoning with create_reasoning_strategy, update_reasoning_strategy, get_reasoning_strategy, and list_reasoning_strategies. Strategies are represented as ReasoningStrategy objects, defining system prompts and access scopes.
|
|
68
|
+
- Process Orchestration: Manage workflows through create_process, update_process, get_process, list_processes, publish_process_revision, and delete_process. Processes are encapsulated as AgenticProcess objects, detailing activities, signals, and sequence flows.
|
|
69
|
+
- Task Management: Create and manage tasks with create_task, update_task, get_task, list_tasks, publish_task_revision, and delete_task. Tasks are modeled as Task objects, specifying prompts and artifact types.
|
|
70
|
+
- Runtime Control: Start, monitor, and control process instances using start_instance, get_instance, list_process_instances, get_instance_history, send_user_signal, and abort_instance. Instances are represented as ProcessInstance objects, with execution details and thread information accessible via get_thread_information.
|
|
71
|
+
|
|
72
|
+
The AILabManager is initialized with an API key, base URL, and optional alias, providing a unified entry point for all Lab operations. Its methods handle error mapping (ErrorListResponse) and response validation, making it ideal for rapid development and integration into larger applications.
|
|
73
|
+
|
|
74
|
+
Low-Level Interface: Client Classes
|
|
75
|
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
76
|
+
For developers requiring fine-grained control or preferring to work directly with JSON responses, the SDK includes low-level client classes: AgentClient, ToolClient, ReasoningStrategyClient, and AgenticProcessClient. These clients interact with the Lab’s APIs without mapping responses to Python objects, returning raw JSON or text for maximum flexibility.
|
|
77
|
+
|
|
78
|
+
- AgentClient: Supports operations like create_agent, update_agent, get_agent, list_agents, publish_agent_revision, create_sharing_link, and delete_agent. It handles agent-specific API endpoints, passing parameters like project ID, agent name, prompts, and LLM configurations as dictionaries.
|
|
79
|
+
- ToolClient: Provides methods such as create_tool, update_tool, get_tool, list_tools, publish_tool_revision, get_parameter, set_parameter, and delete_tool. It manages tool configurations, including OpenAPI specifications and parameter lists, with validation for scopes and access levels.
|
|
80
|
+
- ReasoningStrategyClient: Includes create_reasoning_strategy, update_reasoning_strategy, get_reasoning_strategy, and list_reasoning_strategies, allowing direct manipulation of strategy definitions like system prompts and localized descriptions.
|
|
81
|
+
- AgenticProcessClient: Offers comprehensive process and task management with methods like create_process, update_process, get_process, list_processes, publish_process_revision, delete_process, create_task, update_task, get_task, list_tasks, publish_task_revision, delete_task, start_instance, get_instance, list_process_instances, get_instance_history, get_thread_information, send_user_signal, and abort_instance. It handles complex process structures and runtime operations in JSON format.
|
|
82
|
+
|
|
83
|
+
Each client is initialized with an API key and base URL, using a BaseClient for HTTP requests. They provide direct access to the Lab’s endpoints, enabling custom parsing or integration with external systems where object mapping is unnecessary.
|
|
84
|
+
|
|
85
|
+
Integration and Flexibility
|
|
86
|
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
87
|
+
Both the AILabManager and client classes are installable via pip install pygeai and support cross-platform development. The high-level AILabManager is suited for structured applications requiring type safety and ease of use, while the low-level clients cater to scenarios demanding raw API responses or custom workflows. Developers can combine these interfaces within the same project, leveraging AILabManager for rapid prototyping and clients for specialized tasks.
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
PyGEAI SDK - Lab components
|
|
91
|
+
---------------------------
|
|
92
|
+
|
|
93
|
+
.. toctree::
|
|
94
|
+
:maxdepth: 2
|
|
95
|
+
:caption: Contents:
|
|
96
|
+
|
|
97
|
+
ai_lab/models
|
|
98
|
+
ai_lab/runner
|
|
99
|
+
ai_lab/usage
|
|
100
|
+
ai_lab/cli
|
|
101
|
+
ai_lab/spec
|
|
102
|
+
|
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
Chat
|
|
2
|
+
====
|
|
3
|
+
|
|
4
|
+
Chat Completion
|
|
5
|
+
~~~~~~~~~~~~~~~
|
|
6
|
+
|
|
7
|
+
The GEAI SDK provides functionality to interact with the Globant Enterprise AI chat system, allowing users to generate chat completions using specified models and parameters. This can be achieved through the command line interface, the low-level service layer (ChatClient), or the high-level service layer (ChatManager). The `stream` parameter, which enables streaming responses, is supported in the command line and low-level service layer but not in the high-level service layer.
|
|
8
|
+
|
|
9
|
+
Command Line
|
|
10
|
+
^^^^^^^^^^^^
|
|
11
|
+
|
|
12
|
+
The `geai chat completion` command generates a chat completion based on the provided model and messages. Various flags allow customization of the response, such as streaming, temperature, and maximum tokens.
|
|
13
|
+
|
|
14
|
+
.. code-block:: shell
|
|
15
|
+
|
|
16
|
+
geai chat completion \
|
|
17
|
+
--model "saia:assistant:Welcome data Assistant 3" \
|
|
18
|
+
--messages '[{"role": "user", "content": "Hi, welcome to Globant Enterprise AI!!"}]' \
|
|
19
|
+
--temperature 0.7 \
|
|
20
|
+
--max-tokens 1000 \
|
|
21
|
+
--stream 1
|
|
22
|
+
|
|
23
|
+
To use a different API key alias for authentication:
|
|
24
|
+
|
|
25
|
+
.. code-block:: shell
|
|
26
|
+
|
|
27
|
+
geai --alias admin chat completion \
|
|
28
|
+
--model "saia:assistant:Welcome data Assistant 3" \
|
|
29
|
+
--messages '[{"role": "user", "content": "What is Globant Enterprise AI?"}]' \
|
|
30
|
+
--temperature 0.5 \
|
|
31
|
+
--max-tokens 500
|
|
32
|
+
|
|
33
|
+
For a non-streaming response with additional parameters like frequency and presence penalties:
|
|
34
|
+
|
|
35
|
+
.. code-block:: shell
|
|
36
|
+
|
|
37
|
+
geai chat completion \
|
|
38
|
+
--model "saia:assistant:Welcome data Assistant 3" \
|
|
39
|
+
--messages '[{"role": "user", "content": "Can you explain AI solutions offered by Globant?"}]' \
|
|
40
|
+
--temperature 0.6 \
|
|
41
|
+
--max-tokens 800 \
|
|
42
|
+
--frequency-penalty 0.1 \
|
|
43
|
+
--presence-penalty 0.2 \
|
|
44
|
+
--stream 0
|
|
45
|
+
|
|
46
|
+
Using tools and tool choice to fetch weather data:
|
|
47
|
+
|
|
48
|
+
.. code-block:: shell
|
|
49
|
+
|
|
50
|
+
geai chat completion \
|
|
51
|
+
--model "saia:assistant:Welcome data Assistant 3" \
|
|
52
|
+
--messages '[{"role": "user", "content": "Please get the current weather for San Francisco."}]' \
|
|
53
|
+
--temperature 0.6 \
|
|
54
|
+
--max-tokens 800 \
|
|
55
|
+
--tools '[{"name": "get_weather", "description": "Fetches the current weather for a given location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "City name"}}, "required": ["location"]}, "strict": true}]' \
|
|
56
|
+
--tool-choice '{"type": "function", "function": {"name": "get_weather"}}' \
|
|
57
|
+
--stream 1
|
|
58
|
+
|
|
59
|
+
Low Level Service Layer
|
|
60
|
+
^^^^^^^^^^^^^^^^^^^^^^^
|
|
61
|
+
|
|
62
|
+
The `ChatClient` class provides a low-level interface to generate chat completions. It supports both streaming and non-streaming responses and allows fine-grained control over parameters.
|
|
63
|
+
|
|
64
|
+
.. code-block:: python
|
|
65
|
+
|
|
66
|
+
from pygeai.chat.clients import ChatClient
|
|
67
|
+
|
|
68
|
+
client = ChatClient()
|
|
69
|
+
|
|
70
|
+
response = client.chat_completion(
|
|
71
|
+
model="saia:assistant:Welcome data Assistant 3",
|
|
72
|
+
messages=[{"role": "user", "content": "What is Globant Enterprise AI?"}],
|
|
73
|
+
temperature=0.5,
|
|
74
|
+
max_tokens=500,
|
|
75
|
+
stream=False
|
|
76
|
+
)
|
|
77
|
+
print(response)
|
|
78
|
+
|
|
79
|
+
Streaming response with tools:
|
|
80
|
+
|
|
81
|
+
.. code-block:: python
|
|
82
|
+
|
|
83
|
+
from pygeai.chat.clients import ChatClient
|
|
84
|
+
|
|
85
|
+
client = ChatClient()
|
|
86
|
+
|
|
87
|
+
llm_settings = {
|
|
88
|
+
"temperature": 0.6,
|
|
89
|
+
"max_tokens": 800,
|
|
90
|
+
"frequency_penalty": 0.1,
|
|
91
|
+
"presence_penalty": 0.2
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
messages = [{"role": "user", "content": "Please get the current weather for San Francisco."}]
|
|
95
|
+
|
|
96
|
+
tools = [
|
|
97
|
+
{
|
|
98
|
+
"name": "get_weather",
|
|
99
|
+
"description": "Fetches the current weather for a given location",
|
|
100
|
+
"parameters": {
|
|
101
|
+
"type": "object",
|
|
102
|
+
"properties": {"location": {"type": "string", "description": "City name"}},
|
|
103
|
+
"required": ["location"]
|
|
104
|
+
},
|
|
105
|
+
"strict": True
|
|
106
|
+
}
|
|
107
|
+
]
|
|
108
|
+
|
|
109
|
+
tool_choice = {"type": "function", "function": {"name": "get_weather"}}
|
|
110
|
+
|
|
111
|
+
response = client.chat_completion(
|
|
112
|
+
model="saia:assistant:Welcome data Assistant 3",
|
|
113
|
+
messages=messages,
|
|
114
|
+
stream=True,
|
|
115
|
+
tools=tools,
|
|
116
|
+
tool_choice=tool_choice,
|
|
117
|
+
**llm_settings
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
for chunk in response:
|
|
121
|
+
print(chunk, end="")
|
|
122
|
+
|
|
123
|
+
Using variables and thread ID:
|
|
124
|
+
|
|
125
|
+
.. code-block:: python
|
|
126
|
+
|
|
127
|
+
from pygeai.chat.clients import ChatClient
|
|
128
|
+
|
|
129
|
+
client = ChatClient()
|
|
130
|
+
|
|
131
|
+
response = client.chat_completion(
|
|
132
|
+
model="saia:assistant:Welcome data Assistant 3",
|
|
133
|
+
messages=[
|
|
134
|
+
{"role": "system", "content": "You are a helpful assistant for Globant Enterprise AI."},
|
|
135
|
+
{"role": "user", "content": "What AI solutions does Globant offer?"}
|
|
136
|
+
],
|
|
137
|
+
temperature=0.8,
|
|
138
|
+
max_tokens=2000,
|
|
139
|
+
presence_penalty=0.1,
|
|
140
|
+
thread_id="thread_123e4567-e89b-12d3-a456-426614174000",
|
|
141
|
+
variables=[{"key": "user_region", "value": "North America"}, {"key": "industry", "value": "Technology"}],
|
|
142
|
+
stream=False
|
|
143
|
+
)
|
|
144
|
+
print(response)
|
|
145
|
+
|
|
146
|
+
High Level Service Layer
|
|
147
|
+
^^^^^^^^^^^^^^^^^^^^^^^
|
|
148
|
+
|
|
149
|
+
The `ChatManager` class provides a high-level interface for generating chat completions. It does not support streaming responses but simplifies the process by using structured models like `ChatMessageList` and `LlmSettings`.
|
|
150
|
+
|
|
151
|
+
.. code-block:: python
|
|
152
|
+
|
|
153
|
+
from pygeai.chat.managers import ChatManager
|
|
154
|
+
from pygeai.core.models import LlmSettings, ChatMessageList, ChatMessage
|
|
155
|
+
|
|
156
|
+
manager = ChatManager()
|
|
157
|
+
|
|
158
|
+
llm_settings = LlmSettings(
|
|
159
|
+
temperature=0.5,
|
|
160
|
+
max_tokens=500,
|
|
161
|
+
frequency_penalty=0.2
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
messages = ChatMessageList(
|
|
165
|
+
messages=[ChatMessage(role="user", content="Can you explain what Globant Enterprise AI does?")]
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
response = manager.chat_completion(
|
|
169
|
+
model="saia:assistant:Welcome data Assistant 3",
|
|
170
|
+
messages=messages,
|
|
171
|
+
llm_settings=llm_settings
|
|
172
|
+
)
|
|
173
|
+
print(response)
|
|
174
|
+
|
|
175
|
+
Using tools to check weather and send an email:
|
|
176
|
+
|
|
177
|
+
.. code-block:: python
|
|
178
|
+
|
|
179
|
+
from pygeai.chat.managers import ChatManager
|
|
180
|
+
from pygeai.core.models import LlmSettings, ChatMessageList, ChatMessage, ChatTool, ChatToolList
|
|
181
|
+
|
|
182
|
+
manager = ChatManager()
|
|
183
|
+
|
|
184
|
+
llm_settings = LlmSettings(
|
|
185
|
+
temperature=0.7,
|
|
186
|
+
max_tokens=1000,
|
|
187
|
+
frequency_penalty=0.3,
|
|
188
|
+
presence_penalty=0.2
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
messages = ChatMessageList(
|
|
192
|
+
messages=[ChatMessage(role="user", content="Can you check the weather for New York and send an email summary?")]
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
tools = ChatToolList(
|
|
196
|
+
variables=[
|
|
197
|
+
ChatTool(
|
|
198
|
+
name="get_weather",
|
|
199
|
+
description="Fetches the current weather for a given location",
|
|
200
|
+
parameters={
|
|
201
|
+
"type": "object",
|
|
202
|
+
"properties": {"location": {"type": "string", "description": "City name"}},
|
|
203
|
+
"required": ["location"]
|
|
204
|
+
},
|
|
205
|
+
strict=True
|
|
206
|
+
),
|
|
207
|
+
ChatTool(
|
|
208
|
+
name="send_email",
|
|
209
|
+
description="Sends an email to a recipient with a subject and body",
|
|
210
|
+
parameters={
|
|
211
|
+
"type": "object",
|
|
212
|
+
"properties": {
|
|
213
|
+
"recipient": {"type": "string", "description": "Email address"},
|
|
214
|
+
"subject": {"type": "string", "description": "Email subject"},
|
|
215
|
+
"body": {"type": "string", "description": "Email content"}
|
|
216
|
+
},
|
|
217
|
+
"required": ["recipient", "subject", "body"]
|
|
218
|
+
},
|
|
219
|
+
strict=False
|
|
220
|
+
)
|
|
221
|
+
]
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
response = manager.chat_completion(
|
|
225
|
+
model="saia:assistant:Welcome data Assistant 3",
|
|
226
|
+
messages=messages,
|
|
227
|
+
llm_settings=llm_settings,
|
|
228
|
+
tools=tools
|
|
229
|
+
)
|
|
230
|
+
print(response)
|
|
231
|
+
|
|
232
|
+
With variables and thread ID:
|
|
233
|
+
|
|
234
|
+
.. code-block:: python
|
|
235
|
+
|
|
236
|
+
from pygeai.chat.managers import ChatManager
|
|
237
|
+
from pygeai.core.models import LlmSettings, ChatMessageList, ChatMessage, ChatVariable, ChatVariableList
|
|
238
|
+
|
|
239
|
+
manager = ChatManager()
|
|
240
|
+
|
|
241
|
+
llm_settings = LlmSettings(
|
|
242
|
+
temperature=0.8,
|
|
243
|
+
max_tokens=2000,
|
|
244
|
+
presence_penalty=0.1
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
messages = ChatMessageList(
|
|
248
|
+
messages=[
|
|
249
|
+
ChatMessage(role="system", content="You are a helpful assistant for Globant Enterprise AI."),
|
|
250
|
+
ChatMessage(role="user", content="What AI solutions does Globant offer?")
|
|
251
|
+
]
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
variables = ChatVariableList(
|
|
255
|
+
variables=[
|
|
256
|
+
ChatVariable(key="user_region", value="North America"),
|
|
257
|
+
ChatVariable(key="industry", value="Technology")
|
|
258
|
+
]
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
response = manager.chat_completion(
|
|
262
|
+
model="saia:assistant:Welcome data Assistant 3",
|
|
263
|
+
messages=messages,
|
|
264
|
+
llm_settings=llm_settings,
|
|
265
|
+
thread_id="thread_123e4567-e89b-12d3-a456-426614174000",
|
|
266
|
+
variables=variables
|
|
267
|
+
)
|
|
268
|
+
print(response)
|
|
269
|
+
|
|
270
|
+
With tool choice:
|
|
271
|
+
|
|
272
|
+
.. code-block:: python
|
|
273
|
+
|
|
274
|
+
from pygeai.chat.managers import ChatManager
|
|
275
|
+
from pygeai.core.models import LlmSettings, ChatMessageList, ChatMessage, ChatTool, ChatToolList, ToolChoice, ToolChoiceObject, ToolChoiceFunction
|
|
276
|
+
|
|
277
|
+
manager = ChatManager()
|
|
278
|
+
|
|
279
|
+
llm_settings = LlmSettings(
|
|
280
|
+
temperature=0.6,
|
|
281
|
+
max_tokens=800,
|
|
282
|
+
frequency_penalty=0.1,
|
|
283
|
+
presence_penalty=0.2
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
messages = ChatMessageList(
|
|
287
|
+
messages=[ChatMessage(role="user", content="Please get the current weather for San Francisco.")]
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
tools = ChatToolList(
|
|
291
|
+
variables=[
|
|
292
|
+
ChatTool(
|
|
293
|
+
name="get_weather",
|
|
294
|
+
description="Fetches the current weather for a given location",
|
|
295
|
+
parameters={
|
|
296
|
+
"type": "object",
|
|
297
|
+
"properties": {"location": {"type": "string", "description": "City name"}},
|
|
298
|
+
"required": ["location"]
|
|
299
|
+
},
|
|
300
|
+
strict=True
|
|
301
|
+
),
|
|
302
|
+
ChatTool(
|
|
303
|
+
name="send_notification",
|
|
304
|
+
description="Sends a notification with a message",
|
|
305
|
+
parameters={
|
|
306
|
+
"type": "object",
|
|
307
|
+
"properties": {"message": {"type": "string", "description": "Notification content"}},
|
|
308
|
+
"required": ["message"]
|
|
309
|
+
},
|
|
310
|
+
strict=False
|
|
311
|
+
)
|
|
312
|
+
]
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
tool_choice = ToolChoice(
|
|
316
|
+
value=ToolChoiceObject(
|
|
317
|
+
function=ToolChoiceFunction(name="get_weather")
|
|
318
|
+
)
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
response = manager.chat_completion(
|
|
322
|
+
model="saia:assistant:Welcome data Assistant 3",
|
|
323
|
+
messages=messages,
|
|
324
|
+
llm_settings=llm_settings,
|
|
325
|
+
tool_choice=tool_choice,
|
|
326
|
+
tools=tools
|
|
327
|
+
)
|
|
328
|
+
print(response)
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
Embeddings
|
|
2
|
+
==========
|
|
3
|
+
|
|
4
|
+
The API Reference enables you to generate embeddings from various input types, including text and images. You can leverage different LLM providers and their respective models for this purpose.
|
|
5
|
+
|
|
6
|
+
* Generate embeddings: Generates embeddings for a given list of inputs using a specified LLM model.
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
Generate embeddings
|
|
10
|
+
~~~~~~~~~~~~~~
|
|
11
|
+
|
|
12
|
+
Generates embeddings from different input types using `PyGEA </pygeai>`_. It can interact with several LLM providers and their respective models for embedding generation.
|
|
13
|
+
|
|
14
|
+
To achieve this, you have three options:
|
|
15
|
+
|
|
16
|
+
* `Command Line </docs/source/content/api_reference.rst#command-line>`_
|
|
17
|
+
* `Low-Level Service Layer </docs/source/content/api_reference.rst#low-level-service-layer>`_
|
|
18
|
+
* `High-Level Service Layer </docs/source/content/api_reference.rst#high-level-service-layer>`_
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
Command line
|
|
22
|
+
^^^^^^^^^^^^
|
|
23
|
+
|
|
24
|
+
Use the following command to generate embeddings:
|
|
25
|
+
|
|
26
|
+
.. code-block:: shell
|
|
27
|
+
|
|
28
|
+
geai emb generate \
|
|
29
|
+
-i "<your_text_input>" \
|
|
30
|
+
-i "<your_image_input>" \
|
|
31
|
+
-m "<provider>/<model_name>"
|
|
32
|
+
|
|
33
|
+
Replace the placeholders with your desired values:
|
|
34
|
+
|
|
35
|
+
* `<your_text_input>`: The text you want to generate an embedding for. For example: `"Help me with Globant Enterprise AI."`
|
|
36
|
+
* `<your_image_input>`: The image data, encoded appropriately (e.g., base64). For example: `"image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAAEElEQVR4nGK6HcwNCAAA//8DTgE8HuxwEQAAAABJRU5ErkJggg=="`
|
|
37
|
+
* `<provider>/<model_name>`: The provider and model to use for embedding generation. For example: `"awsbedrock/amazon.titan-embed-text-v1"`
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
Low level service layer
|
|
41
|
+
^^^^^^^^^^^^^^^^^^^^^^^
|
|
42
|
+
|
|
43
|
+
Use the following code snippet to generate embeddings using the low-level service layer:
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
.. code-block:: python
|
|
47
|
+
|
|
48
|
+
from pygeai.core.embeddings.clients import EmbeddingsClient
|
|
49
|
+
from pygeai.core.services.llm.model import Model
|
|
50
|
+
from pygeai.core.services.llm.providers import Provider
|
|
51
|
+
|
|
52
|
+
client = EmbeddingsClient()
|
|
53
|
+
|
|
54
|
+
inputs = [
|
|
55
|
+
"<your_text_input>",
|
|
56
|
+
"<your_image_input>"
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
embeddings = client.generate_embeddings(
|
|
60
|
+
input_list=inputs,
|
|
61
|
+
model=f"{Provider.<provider>}/{Model.<provider>.<model_name>}",
|
|
62
|
+
encoding_format=None,
|
|
63
|
+
dimensions=None,
|
|
64
|
+
user=None,
|
|
65
|
+
input_type=None,
|
|
66
|
+
timeout=600,
|
|
67
|
+
cache=False
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
print(embeddings)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
Replace the placeholders with your desired values:
|
|
74
|
+
|
|
75
|
+
* `<your_text_input>`: Text you want to generate an embedding for. For example: `"Help me with Globant Enterprise AI"`
|
|
76
|
+
* `<your_image_input>`: Image data, encoded appropriately (e.g., base64). For example: `"image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAAEElEQVR4nGK6HcwNCAAA//8DTgE8HuxwEQAAAABJRU5ErkJggg=="`
|
|
77
|
+
* `<provider>`: LLM provider. For example: `AWS_BEDROCK`
|
|
78
|
+
* `<model_name>`: Specific model from the provider. For example: `AMAZON_TITAN_EMBED_TEXT_V1`
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
High level service layer
|
|
82
|
+
^^^^^^^^^^^^^^^^^^^^^^^^
|
|
83
|
+
|
|
84
|
+
Use the following code snippet to generate embeddings using the high-level service layer:
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
.. code-block:: python
|
|
88
|
+
|
|
89
|
+
from pygeai.core.embeddings.managers import EmbeddingsManager
|
|
90
|
+
from pygeai.core.embeddings.models import EmbeddingConfiguration
|
|
91
|
+
from pygeai.core.services.llm.model import Model
|
|
92
|
+
from pygeai.core.services.llm.providers import Provider
|
|
93
|
+
|
|
94
|
+
manager = EmbeddingsManager()
|
|
95
|
+
|
|
96
|
+
inputs = [
|
|
97
|
+
"<your_text_input>",
|
|
98
|
+
"<your_image_input>"
|
|
99
|
+
]
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
configuration = EmbeddingConfiguration(
|
|
103
|
+
inputs=inputs,
|
|
104
|
+
model=f"{Provider.<provider>}/{Model.<provider>.<model_name>}",
|
|
105
|
+
encoding_format=None,
|
|
106
|
+
dimensions=None,
|
|
107
|
+
user=None,
|
|
108
|
+
input_type=None,
|
|
109
|
+
timeout=600,
|
|
110
|
+
cache=False
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
embeddings = manager.generate_embeddings(configuration)
|
|
114
|
+
print(embeddings)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
Replace the placeholders with your desired values:
|
|
118
|
+
|
|
119
|
+
* `<your_text_input>`: Text you want to generate an embedding for. For example: `"Help me with Globant Enterprise AI"`
|
|
120
|
+
* `<your_image_input>`: Image data, encoded appropriately (e.g., base64). For example: `"image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAAEElEQVR4nGK6HcwNCAAA//8DTgE8HuxwEQAAAABJRU5ErkJggg=="`
|
|
121
|
+
* `<provider>`: LLM provider. For example: `AWS_BEDROCK`
|
|
122
|
+
* `<model_name>`: Specific model from the provider. For example: `AMAZON_TITAN_EMBED_TEXT_V1`
|
|
123
|
+
|
|
124
|
+
|