pygeai 0.6.0b7__py3-none-any.whl → 0.6.0b10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. pygeai/_docs/source/conf.py +78 -6
  2. pygeai/_docs/source/content/api_reference/embeddings.rst +31 -1
  3. pygeai/_docs/source/content/api_reference/evaluation.rst +590 -0
  4. pygeai/_docs/source/content/api_reference/feedback.rst +237 -0
  5. pygeai/_docs/source/content/api_reference/files.rst +592 -0
  6. pygeai/_docs/source/content/api_reference/gam.rst +401 -0
  7. pygeai/_docs/source/content/api_reference/proxy.rst +318 -0
  8. pygeai/_docs/source/content/api_reference/secrets.rst +495 -0
  9. pygeai/_docs/source/content/api_reference/usage_limits.rst +390 -0
  10. pygeai/_docs/source/content/api_reference.rst +7 -0
  11. pygeai/_docs/source/content/debugger.rst +376 -83
  12. pygeai/_docs/source/content/migration.rst +528 -0
  13. pygeai/_docs/source/content/modules.rst +1 -1
  14. pygeai/_docs/source/pygeai.cli.rst +8 -0
  15. pygeai/_docs/source/pygeai.tests.cli.rst +16 -0
  16. pygeai/_docs/source/pygeai.tests.core.embeddings.rst +16 -0
  17. pygeai/_docs/source/pygeai.tests.snippets.chat.rst +40 -0
  18. pygeai/_docs/source/pygeai.tests.snippets.dbg.rst +45 -0
  19. pygeai/_docs/source/pygeai.tests.snippets.embeddings.rst +40 -0
  20. pygeai/_docs/source/pygeai.tests.snippets.evaluation.dataset.rst +197 -0
  21. pygeai/_docs/source/pygeai.tests.snippets.evaluation.plan.rst +133 -0
  22. pygeai/_docs/source/pygeai.tests.snippets.evaluation.result.rst +37 -0
  23. pygeai/_docs/source/pygeai.tests.snippets.evaluation.rst +10 -0
  24. pygeai/_docs/source/pygeai.tests.snippets.rst +1 -0
  25. pygeai/admin/clients.py +5 -0
  26. pygeai/assistant/clients.py +7 -0
  27. pygeai/assistant/data_analyst/clients.py +2 -0
  28. pygeai/assistant/rag/clients.py +11 -0
  29. pygeai/chat/clients.py +191 -25
  30. pygeai/chat/endpoints.py +2 -1
  31. pygeai/cli/commands/chat.py +227 -1
  32. pygeai/cli/commands/embeddings.py +56 -8
  33. pygeai/cli/commands/migrate.py +994 -434
  34. pygeai/cli/error_handler.py +116 -0
  35. pygeai/cli/geai.py +28 -10
  36. pygeai/cli/parsers.py +8 -2
  37. pygeai/core/base/clients.py +3 -1
  38. pygeai/core/common/exceptions.py +11 -10
  39. pygeai/core/embeddings/__init__.py +19 -0
  40. pygeai/core/embeddings/clients.py +17 -2
  41. pygeai/core/embeddings/mappers.py +16 -2
  42. pygeai/core/embeddings/responses.py +9 -2
  43. pygeai/core/feedback/clients.py +1 -0
  44. pygeai/core/files/clients.py +5 -7
  45. pygeai/core/files/managers.py +42 -0
  46. pygeai/core/llm/clients.py +4 -0
  47. pygeai/core/plugins/clients.py +1 -0
  48. pygeai/core/rerank/clients.py +1 -0
  49. pygeai/core/secrets/clients.py +6 -0
  50. pygeai/core/services/rest.py +1 -1
  51. pygeai/dbg/__init__.py +3 -0
  52. pygeai/dbg/debugger.py +565 -70
  53. pygeai/evaluation/clients.py +1 -1
  54. pygeai/evaluation/dataset/clients.py +45 -44
  55. pygeai/evaluation/plan/clients.py +27 -26
  56. pygeai/evaluation/result/clients.py +37 -5
  57. pygeai/gam/clients.py +4 -0
  58. pygeai/health/clients.py +1 -0
  59. pygeai/lab/agents/clients.py +8 -1
  60. pygeai/lab/models.py +3 -3
  61. pygeai/lab/processes/clients.py +21 -0
  62. pygeai/lab/strategies/clients.py +4 -0
  63. pygeai/lab/tools/clients.py +1 -0
  64. pygeai/migration/__init__.py +31 -0
  65. pygeai/migration/strategies.py +404 -155
  66. pygeai/migration/tools.py +170 -3
  67. pygeai/organization/clients.py +13 -0
  68. pygeai/organization/limits/clients.py +15 -0
  69. pygeai/proxy/clients.py +3 -1
  70. pygeai/tests/admin/test_clients.py +16 -11
  71. pygeai/tests/assistants/rag/test_clients.py +35 -23
  72. pygeai/tests/assistants/test_clients.py +22 -15
  73. pygeai/tests/auth/test_clients.py +14 -6
  74. pygeai/tests/chat/test_clients.py +211 -1
  75. pygeai/tests/cli/commands/test_embeddings.py +32 -9
  76. pygeai/tests/cli/commands/test_evaluation.py +7 -0
  77. pygeai/tests/cli/commands/test_migrate.py +112 -243
  78. pygeai/tests/cli/test_error_handler.py +225 -0
  79. pygeai/tests/cli/test_geai_driver.py +154 -0
  80. pygeai/tests/cli/test_parsers.py +5 -5
  81. pygeai/tests/core/embeddings/test_clients.py +144 -0
  82. pygeai/tests/core/embeddings/test_managers.py +171 -0
  83. pygeai/tests/core/embeddings/test_mappers.py +142 -0
  84. pygeai/tests/core/feedback/test_clients.py +2 -0
  85. pygeai/tests/core/files/test_clients.py +1 -0
  86. pygeai/tests/core/llm/test_clients.py +14 -9
  87. pygeai/tests/core/plugins/test_clients.py +5 -3
  88. pygeai/tests/core/rerank/test_clients.py +1 -0
  89. pygeai/tests/core/secrets/test_clients.py +19 -13
  90. pygeai/tests/dbg/test_debugger.py +453 -75
  91. pygeai/tests/evaluation/dataset/test_clients.py +3 -1
  92. pygeai/tests/evaluation/plan/test_clients.py +4 -2
  93. pygeai/tests/evaluation/result/test_clients.py +7 -5
  94. pygeai/tests/gam/test_clients.py +1 -1
  95. pygeai/tests/health/test_clients.py +1 -0
  96. pygeai/tests/lab/agents/test_clients.py +9 -0
  97. pygeai/tests/lab/processes/test_clients.py +36 -0
  98. pygeai/tests/lab/processes/test_mappers.py +3 -0
  99. pygeai/tests/lab/strategies/test_clients.py +14 -9
  100. pygeai/tests/migration/test_strategies.py +45 -218
  101. pygeai/tests/migration/test_tools.py +133 -9
  102. pygeai/tests/organization/limits/test_clients.py +17 -0
  103. pygeai/tests/organization/test_clients.py +22 -0
  104. pygeai/tests/proxy/test_clients.py +2 -0
  105. pygeai/tests/proxy/test_integration.py +1 -0
  106. pygeai/tests/snippets/chat/chat_completion_with_reasoning_effort.py +18 -0
  107. pygeai/tests/snippets/chat/get_response.py +15 -0
  108. pygeai/tests/snippets/chat/get_response_streaming.py +20 -0
  109. pygeai/tests/snippets/chat/get_response_with_files.py +16 -0
  110. pygeai/tests/snippets/chat/get_response_with_tools.py +36 -0
  111. pygeai/tests/snippets/dbg/__init__.py +0 -0
  112. pygeai/tests/snippets/dbg/basic_debugging.py +32 -0
  113. pygeai/tests/snippets/dbg/breakpoint_management.py +48 -0
  114. pygeai/tests/snippets/dbg/stack_navigation.py +45 -0
  115. pygeai/tests/snippets/dbg/stepping_example.py +40 -0
  116. pygeai/tests/snippets/embeddings/cache_example.py +31 -0
  117. pygeai/tests/snippets/embeddings/cohere_example.py +41 -0
  118. pygeai/tests/snippets/embeddings/openai_base64_example.py +27 -0
  119. pygeai/tests/snippets/embeddings/openai_example.py +30 -0
  120. pygeai/tests/snippets/embeddings/similarity_example.py +42 -0
  121. pygeai/tests/snippets/evaluation/dataset/__init__.py +0 -0
  122. pygeai/tests/snippets/evaluation/dataset/complete_workflow_example.py +195 -0
  123. pygeai/tests/snippets/evaluation/dataset/create_dataset.py +26 -0
  124. pygeai/tests/snippets/evaluation/dataset/create_dataset_from_file.py +11 -0
  125. pygeai/tests/snippets/evaluation/dataset/create_dataset_row.py +17 -0
  126. pygeai/tests/snippets/evaluation/dataset/create_expected_source.py +18 -0
  127. pygeai/tests/snippets/evaluation/dataset/create_filter_variable.py +19 -0
  128. pygeai/tests/snippets/evaluation/dataset/delete_dataset.py +9 -0
  129. pygeai/tests/snippets/evaluation/dataset/delete_dataset_row.py +10 -0
  130. pygeai/tests/snippets/evaluation/dataset/delete_expected_source.py +15 -0
  131. pygeai/tests/snippets/evaluation/dataset/delete_filter_variable.py +15 -0
  132. pygeai/tests/snippets/evaluation/dataset/get_dataset.py +9 -0
  133. pygeai/tests/snippets/evaluation/dataset/get_dataset_row.py +10 -0
  134. pygeai/tests/snippets/evaluation/dataset/get_expected_source.py +15 -0
  135. pygeai/tests/snippets/evaluation/dataset/get_filter_variable.py +15 -0
  136. pygeai/tests/snippets/evaluation/dataset/list_dataset_rows.py +9 -0
  137. pygeai/tests/snippets/evaluation/dataset/list_datasets.py +6 -0
  138. pygeai/tests/snippets/evaluation/dataset/list_expected_sources.py +10 -0
  139. pygeai/tests/snippets/evaluation/dataset/list_filter_variables.py +10 -0
  140. pygeai/tests/snippets/evaluation/dataset/update_dataset.py +15 -0
  141. pygeai/tests/snippets/evaluation/dataset/update_dataset_row.py +20 -0
  142. pygeai/tests/snippets/evaluation/dataset/update_expected_source.py +18 -0
  143. pygeai/tests/snippets/evaluation/dataset/update_filter_variable.py +19 -0
  144. pygeai/tests/snippets/evaluation/dataset/upload_dataset_rows_file.py +10 -0
  145. pygeai/tests/snippets/evaluation/plan/__init__.py +0 -0
  146. pygeai/tests/snippets/evaluation/plan/add_plan_system_metric.py +13 -0
  147. pygeai/tests/snippets/evaluation/plan/complete_workflow_example.py +136 -0
  148. pygeai/tests/snippets/evaluation/plan/create_evaluation_plan.py +24 -0
  149. pygeai/tests/snippets/evaluation/plan/create_rag_evaluation_plan.py +22 -0
  150. pygeai/tests/snippets/evaluation/plan/delete_evaluation_plan.py +9 -0
  151. pygeai/tests/snippets/evaluation/plan/delete_plan_system_metric.py +13 -0
  152. pygeai/tests/snippets/evaluation/plan/execute_evaluation_plan.py +11 -0
  153. pygeai/tests/snippets/evaluation/plan/get_evaluation_plan.py +9 -0
  154. pygeai/tests/snippets/evaluation/plan/get_plan_system_metric.py +13 -0
  155. pygeai/tests/snippets/evaluation/plan/get_system_metric.py +9 -0
  156. pygeai/tests/snippets/evaluation/plan/list_evaluation_plans.py +7 -0
  157. pygeai/tests/snippets/evaluation/plan/list_plan_system_metrics.py +9 -0
  158. pygeai/tests/snippets/evaluation/plan/list_system_metrics.py +7 -0
  159. pygeai/tests/snippets/evaluation/plan/update_evaluation_plan.py +22 -0
  160. pygeai/tests/snippets/evaluation/plan/update_plan_system_metric.py +14 -0
  161. pygeai/tests/snippets/evaluation/result/__init__.py +0 -0
  162. pygeai/tests/snippets/evaluation/result/complete_workflow_example.py +150 -0
  163. pygeai/tests/snippets/evaluation/result/get_evaluation_result.py +26 -0
  164. pygeai/tests/snippets/evaluation/result/list_evaluation_results.py +17 -0
  165. pygeai/tests/snippets/migrate/__init__.py +45 -0
  166. pygeai/tests/snippets/migrate/agent_migration.py +110 -0
  167. pygeai/tests/snippets/migrate/assistant_migration.py +64 -0
  168. pygeai/tests/snippets/migrate/orchestrator_examples.py +179 -0
  169. pygeai/tests/snippets/migrate/process_migration.py +64 -0
  170. pygeai/tests/snippets/migrate/project_migration.py +42 -0
  171. pygeai/tests/snippets/migrate/tool_migration.py +64 -0
  172. pygeai/tests/snippets/organization/create_project.py +2 -2
  173. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b10.dist-info}/METADATA +1 -1
  174. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b10.dist-info}/RECORD +178 -96
  175. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b10.dist-info}/WHEEL +0 -0
  176. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b10.dist-info}/entry_points.txt +0 -0
  177. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b10.dist-info}/licenses/LICENSE +0 -0
  178. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b10.dist-info}/top_level.txt +0 -0
pygeai/chat/clients.py CHANGED
@@ -1,12 +1,14 @@
1
1
  import json
2
2
  from json import JSONDecodeError
3
+ from pathlib import Path
3
4
  from typing import List, Dict, Optional, Union, Generator
4
5
 
5
6
  from pygeai import logger
6
- from pygeai.chat.endpoints import CHAT_V1, GENERATE_IMAGE_V1
7
+ from pygeai.chat.endpoints import CHAT_V1, GENERATE_IMAGE_V1, RESPONSES_V1
7
8
  from pygeai.core.base.clients import BaseClient
8
9
  from pygeai.core.common.exceptions import InvalidAPIResponseException
9
10
  from pygeai.core.utils.validators import validate_status_code
11
+ from pygeai.core.utils.parsers import parse_json_response
10
12
 
11
13
 
12
14
  class ChatClient(BaseClient):
@@ -15,8 +17,8 @@ class ChatClient(BaseClient):
15
17
  response = self.api_service.post(
16
18
  endpoint=CHAT_V1
17
19
  )
18
- result = response.json()
19
- return result
20
+ validate_status_code(response)
21
+ return parse_json_response(response, "chat")
20
22
 
21
23
  def chat_completion(
22
24
  self,
@@ -40,7 +42,8 @@ class ChatClient(BaseClient):
40
42
  stream_options: Optional[Dict] = None,
41
43
  store: Optional[bool] = None,
42
44
  metadata: Optional[Dict] = None,
43
- user: Optional[str] = None
45
+ user: Optional[str] = None,
46
+ reasoning_effort: Optional[str] = None
44
47
  ) -> Union[dict, str, Generator[str, None, None]]:
45
48
  """
46
49
  Generates a chat completion response using the specified model and parameters.
@@ -50,8 +53,8 @@ class ChatClient(BaseClient):
50
53
  :param messages: List[Dict[str, str]] - A list of messages to include in the chat completion. Each message should be a dictionary with
51
54
  the following structure:
52
55
  {
53
- "role": "string", # Possible values: "user", "system", "assistant", or others supported by the model
54
- "content": "string" # The content of the message
56
+ "role": "string",
57
+ "content": "string"
55
58
  } (Required)
56
59
  :param stream: bool - Whether the response should be streamed. Possible values:
57
60
  - False: Returns the complete response as a dictionary or string (default).
@@ -78,6 +81,9 @@ class ChatClient(BaseClient):
78
81
  :param store: Optional[bool] - Whether to store the output for model distillation or evals. (Optional)
79
82
  :param metadata: Optional[Dict] - Up to 16 key-value pairs to attach to the object. (Optional)
80
83
  :param user: Optional[str] - A unique identifier for the end-user to monitor abuse. (Optional)
84
+ :param reasoning_effort: Optional[str] - Controls the depth of reasoning applied by supported models.
85
+ Possible values: "low", "medium", "high". Supported by OpenAI models from version 5,
86
+ Claude models from version 4.1, and Gemini models from version 2.0. (Optional)
81
87
  :return: Union[dict, str, Generator[str, None, None]] - For non-streaming (stream=False), returns a dictionary containing the chat completion
82
88
  result or a string if JSON decoding fails. For streaming (stream=True), returns a generator yielding content strings extracted from the
83
89
  streaming response chunks.
@@ -87,19 +93,16 @@ class ChatClient(BaseClient):
87
93
  'messages': messages,
88
94
  'stream': stream
89
95
  }
90
- if temperature:
96
+ if temperature is not None:
91
97
  data['temperature'] = temperature
92
98
 
93
99
  if max_tokens:
94
100
  data['max_completion_tokens'] = max_tokens
95
101
 
96
- if thread_id:
97
- data['threadId'] = thread_id
98
-
99
- if frequency_penalty:
102
+ if frequency_penalty is not None:
100
103
  data['frequency_penalty'] = frequency_penalty
101
104
 
102
- if presence_penalty:
105
+ if presence_penalty is not None:
103
106
  data['presence_penalty'] = presence_penalty
104
107
 
105
108
  if variables is not None and any(variables):
@@ -141,6 +144,9 @@ class ChatClient(BaseClient):
141
144
  if user is not None:
142
145
  data['user'] = user
143
146
 
147
+ if reasoning_effort is not None:
148
+ data['reasoning_effort'] = reasoning_effort
149
+
144
150
  headers = {}
145
151
  if thread_id:
146
152
  headers["saia-conversation-id"] = thread_id
@@ -160,12 +166,13 @@ class ChatClient(BaseClient):
160
166
  data=data,
161
167
  headers=headers
162
168
  )
163
- try:
164
- result = response.json()
165
- logger.debug(f"Chat completion result: {result}")
166
- return result
167
- except JSONDecodeError as e:
168
- raise InvalidAPIResponseException(f"Unable to process chat request: {response.text}")
169
+ validate_status_code(response)
170
+
171
+ result = parse_json_response(response, "process chat request")
172
+
173
+ logger.debug(f"Chat completion result: {result}")
174
+
175
+ return result
169
176
 
170
177
  def stream_chat_generator(self, response) -> Generator[str, None, None]:
171
178
  """
@@ -195,6 +202,34 @@ class ChatClient(BaseClient):
195
202
  except Exception as e:
196
203
  raise InvalidAPIResponseException(f"Unable to process streaming chat response: {e}")
197
204
 
205
+ def stream_response_generator(self, response) -> Generator[str, None, None]:
206
+ """
207
+ Processes a streaming response from the Responses API and yields content strings.
208
+
209
+ :param response: The streaming response from the API.
210
+ :return: Generator[str, None, None] - Yields content strings extracted from streaming chunks.
211
+ """
212
+ try:
213
+ for line in response:
214
+ if line.startswith("data:"):
215
+ chunk = line[5:].strip()
216
+ if chunk == "[DONE]":
217
+ break
218
+ try:
219
+ json_data = json.loads(chunk)
220
+ if (
221
+ json_data.get("choices")
222
+ and len(json_data["choices"]) > 0
223
+ and "delta" in json_data["choices"][0]
224
+ and "content" in json_data["choices"][0]["delta"]
225
+ ):
226
+ content = json_data["choices"][0]["delta"]["content"]
227
+ yield content
228
+ except JSONDecodeError as e:
229
+ continue
230
+ except Exception as e:
231
+ raise InvalidAPIResponseException(f"Unable to process streaming response: {e}")
232
+
198
233
  def generate_image(
199
234
  self,
200
235
  model: str,
@@ -212,7 +247,7 @@ class ChatClient(BaseClient):
212
247
  :param n: int - Number of images to generate (1-10, depending on the model). (Required)
213
248
  :param quality: str - Rendering quality, e.g., "high". (Required)
214
249
  :param size: str - Image dimensions, e.g., "1024x1024". (Required)
215
- :param aspect_ratio: Optional[str] - Relationship between images width and height, e.g., "1:1", "9:16", "16:9", "3:4", "4:3". (Optional)
250
+ :param aspect_ratio: Optional[str] - Relationship between image's width and height, e.g., "1:1", "9:16", "16:9", "3:4", "4:3". (Optional)
216
251
  :return: dict - The API response containing the generated image data.
217
252
  :raises InvalidAPIResponseException: If the API response cannot be processed.
218
253
  """
@@ -234,9 +269,140 @@ class ChatClient(BaseClient):
234
269
  data=data
235
270
  )
236
271
 
237
- try:
238
- result = response.json()
239
- logger.debug(f"Image generation result: {result}")
240
- return result
241
- except JSONDecodeError as e:
242
- raise InvalidAPIResponseException(f"Unable to process image generation request: {response.text}")
272
+ validate_status_code(response)
273
+
274
+ result = parse_json_response(response, "generate image")
275
+ logger.debug(f"Image generation result: {result}")
276
+ return result
277
+
278
+ def get_response(
279
+ self,
280
+ model: str,
281
+ input: str,
282
+ files: Optional[List[str]] = None,
283
+ tools: Optional[List[Dict]] = None,
284
+ tool_choice: Optional[Union[str, Dict]] = None,
285
+ temperature: Optional[float] = None,
286
+ max_output_tokens: Optional[int] = None,
287
+ top_p: Optional[float] = None,
288
+ metadata: Optional[Dict] = None,
289
+ user: Optional[str] = None,
290
+ instructions: Optional[str] = None,
291
+ reasoning: Optional[Dict] = None,
292
+ truncation: Optional[str] = None,
293
+ parallel_tool_calls: Optional[bool] = None,
294
+ store: Optional[bool] = None,
295
+ stream: bool = False
296
+ ) -> Union[dict, str, Generator[str, None, None]]:
297
+ """
298
+ Generates a response using the Responses API with support for images and PDF files.
299
+
300
+ :param model: str - The model specification, e.g., "openai/o1-pro". (Required)
301
+ :param input: str - The user input text. (Required)
302
+ :param files: Optional[List[str]] - List of file paths (images or PDFs) to include in the request. (Optional)
303
+ :param tools: Optional[List[Dict]] - A list of tools (e.g., functions) the model may call. (Optional)
304
+ :param tool_choice: Optional[Union[str, Dict]] - Controls which tool is called (e.g., "none", "auto", or specific tool). (Optional)
305
+ :param temperature: Optional[float] - Controls the randomness of the response. (Optional)
306
+ :param max_output_tokens: Optional[int] - The maximum number of tokens to generate in the response. (Optional)
307
+ :param top_p: Optional[float] - Nucleus sampling parameter. (Optional)
308
+ :param metadata: Optional[Dict] - Up to 16 key-value pairs to attach to the object. (Optional)
309
+ :param user: Optional[str] - A unique identifier for the end-user. (Optional)
310
+ :param instructions: Optional[str] - Additional instructions for the model. (Optional)
311
+ :param reasoning: Optional[Dict] - Reasoning configuration, e.g., {"effort": "medium"}. (Optional)
312
+ :param truncation: Optional[str] - Truncation strategy, e.g., "disabled". (Optional)
313
+ :param parallel_tool_calls: Optional[bool] - Whether to enable parallel tool calls. (Optional)
314
+ :param store: Optional[bool] - Whether to store the output. (Optional)
315
+ :param stream: bool - Whether the response should be streamed. Possible values:
316
+ - False: Returns the complete response as a dictionary or string (default).
317
+ - True: Returns a generator yielding content strings as they are received. (Optional)
318
+ :return: Union[dict, str, Generator[str, None, None]] - For non-streaming (stream=False), returns a dictionary containing the response
319
+ result or a string if JSON decoding fails. For streaming (stream=True), returns a generator yielding content strings extracted from the
320
+ streaming response chunks.
321
+ :raises InvalidAPIResponseException: If the API response cannot be processed.
322
+ """
323
+ data = {
324
+ 'model': model,
325
+ 'input': input,
326
+ 'stream': stream
327
+ }
328
+
329
+ if temperature is not None:
330
+ data['temperature'] = temperature
331
+
332
+ if max_output_tokens is not None:
333
+ data['max_output_tokens'] = max_output_tokens
334
+
335
+ if top_p is not None:
336
+ data['top_p'] = top_p
337
+
338
+ if tools is not None:
339
+ data['tools'] = tools
340
+
341
+ if tool_choice is not None:
342
+ data['tool_choice'] = tool_choice
343
+
344
+ if metadata is not None:
345
+ data['metadata'] = metadata
346
+
347
+ if user is not None:
348
+ data['user'] = user
349
+
350
+ if instructions is not None:
351
+ data['instructions'] = instructions
352
+
353
+ if reasoning is not None:
354
+ data['reasoning'] = reasoning
355
+
356
+ if truncation is not None:
357
+ data['truncation'] = truncation
358
+
359
+ if parallel_tool_calls is not None:
360
+ data['parallel_tool_calls'] = parallel_tool_calls
361
+
362
+ if store is not None:
363
+ data['store'] = store
364
+
365
+ logger.debug(f"Generating response with data: {data}")
366
+
367
+ if files:
368
+ if stream:
369
+ raise InvalidAPIResponseException("Streaming is not supported when uploading files")
370
+
371
+ file_handles = []
372
+ try:
373
+ files_dict = {}
374
+ for idx, file_path in enumerate(files):
375
+ path = Path(file_path)
376
+ if not path.is_file():
377
+ raise FileNotFoundError(f"File not found: {file_path}")
378
+
379
+ file_handle = path.open("rb")
380
+ file_handles.append(file_handle)
381
+ files_dict[f"file{idx}"] = file_handle
382
+
383
+ response = self.api_service.post_files_multipart(
384
+ endpoint=RESPONSES_V1,
385
+ data=data,
386
+ files=files_dict
387
+ )
388
+ finally:
389
+ for fh in file_handles:
390
+ fh.close()
391
+ else:
392
+ if stream:
393
+ response = self.api_service.stream_post(
394
+ endpoint=RESPONSES_V1,
395
+ data=data
396
+ )
397
+ return self.stream_response_generator(response)
398
+ else:
399
+ response = self.api_service.post(
400
+ endpoint=RESPONSES_V1,
401
+ data=data
402
+ )
403
+
404
+ validate_status_code(response)
405
+ result = parse_json_response(response, "get response")
406
+ logger.debug(f"Response result: {result}")
407
+
408
+ return result
pygeai/chat/endpoints.py CHANGED
@@ -1,3 +1,4 @@
1
1
  CHAT_V1 = "/chat" # POST Chat endpoint
2
2
  CHAT_COMPLETION_V1 = "/chat/completion" # POST Chat completion endpoint
3
- GENERATE_IMAGE_V1 = "/images" # POST Generate image
3
+ GENERATE_IMAGE_V1 = "/images" # POST Generate image
4
+ RESPONSES_V1 = "/responses" # POST Responses API endpoint
@@ -52,6 +52,7 @@ def get_chat_completion(option_list: list):
52
52
  store = None
53
53
  metadata = None
54
54
  user = None
55
+ reasoning_effort = None
55
56
 
56
57
  for option_flag, option_arg in option_list:
57
58
  if option_flag.name == "model":
@@ -124,6 +125,8 @@ def get_chat_completion(option_list: list):
124
125
  raise WrongArgumentError("metadata must be a valid JSON object")
125
126
  if option_flag.name == "user":
126
127
  user = option_arg
128
+ if option_flag.name == "reasoning_effort":
129
+ reasoning_effort = option_arg
127
130
 
128
131
  messages = get_messages(message_list)
129
132
 
@@ -151,7 +154,8 @@ def get_chat_completion(option_list: list):
151
154
  stream_options=stream_options,
152
155
  store=store,
153
156
  metadata=metadata,
154
- user=user
157
+ user=user,
158
+ reasoning_effort=reasoning_effort
155
159
  )
156
160
  if stream:
157
161
  Console.write_stdout("Streaming chat completion:")
@@ -295,6 +299,14 @@ chat_completion_options = [
295
299
  "Optional string identifier for the end-user to monitor abuse.",
296
300
  True
297
301
  ),
302
+ Option(
303
+ "reasoning_effort",
304
+ ["--reasoning-effort"],
305
+ "Optional string to control the depth of reasoning applied by supported models. "
306
+ "Possible values: 'low', 'medium', 'high'. Supported by OpenAI models from version 5, "
307
+ "Claude models from version 4.1, and Gemini models from version 2.0.",
308
+ True
309
+ ),
298
310
  ]
299
311
 
300
312
 
@@ -649,6 +661,211 @@ generate_image_options = [
649
661
  ]
650
662
 
651
663
 
664
+ def get_response(option_list: list):
665
+ model = None
666
+ input_text = None
667
+ files = None
668
+ tools = None
669
+ tool_choice = None
670
+ temperature = None
671
+ max_output_tokens = None
672
+ top_p = None
673
+ metadata = None
674
+ user = None
675
+ instructions = None
676
+ reasoning = None
677
+ truncation = None
678
+ parallel_tool_calls = None
679
+ store = None
680
+ stream = False
681
+
682
+ for option_flag, option_arg in option_list:
683
+ if option_flag.name == "model":
684
+ model = option_arg
685
+ if option_flag.name == "input":
686
+ input_text = option_arg
687
+ if option_flag.name == "files":
688
+ try:
689
+ files = json.loads(option_arg) if option_arg else None
690
+ if files and not isinstance(files, list):
691
+ raise WrongArgumentError("files must be a JSON array of file paths")
692
+ except json.JSONDecodeError:
693
+ raise WrongArgumentError("files must be a valid JSON array")
694
+ if option_flag.name == "tools":
695
+ try:
696
+ tools = json.loads(option_arg) if option_arg else None
697
+ except json.JSONDecodeError:
698
+ raise WrongArgumentError("tools must be a valid JSON array")
699
+ if option_flag.name == "tool_choice":
700
+ try:
701
+ tool_choice = json.loads(option_arg) if option_arg else None
702
+ except json.JSONDecodeError:
703
+ tool_choice = option_arg
704
+ if option_flag.name == "temperature":
705
+ temperature = float(option_arg) if option_arg is not None else None
706
+ if option_flag.name == "max_output_tokens":
707
+ max_output_tokens = int(option_arg) if option_arg is not None else None
708
+ if option_flag.name == "top_p":
709
+ top_p = float(option_arg) if option_arg else None
710
+ if option_flag.name == "metadata":
711
+ try:
712
+ metadata = json.loads(option_arg) if option_arg else None
713
+ except json.JSONDecodeError:
714
+ raise WrongArgumentError("metadata must be a valid JSON object")
715
+ if option_flag.name == "user":
716
+ user = option_arg
717
+ if option_flag.name == "instructions":
718
+ instructions = option_arg
719
+ if option_flag.name == "reasoning":
720
+ try:
721
+ reasoning = json.loads(option_arg) if option_arg else None
722
+ except json.JSONDecodeError:
723
+ raise WrongArgumentError("reasoning must be a valid JSON object")
724
+ if option_flag.name == "truncation":
725
+ truncation = option_arg
726
+ if option_flag.name == "parallel_tool_calls":
727
+ parallel_tool_calls = get_boolean_value(option_arg) if option_arg else None
728
+ if option_flag.name == "store":
729
+ store = get_boolean_value(option_arg) if option_arg else None
730
+ if option_flag.name == "stream":
731
+ if option_arg:
732
+ stream = get_boolean_value(option_arg)
733
+
734
+ if not (model and input_text):
735
+ raise MissingRequirementException("Cannot get response without specifying model and input")
736
+
737
+ client = ChatClient()
738
+ try:
739
+ result = client.get_response(
740
+ model=model,
741
+ input=input_text,
742
+ files=files,
743
+ tools=tools,
744
+ tool_choice=tool_choice,
745
+ temperature=temperature,
746
+ max_output_tokens=max_output_tokens,
747
+ top_p=top_p,
748
+ metadata=metadata,
749
+ user=user,
750
+ instructions=instructions,
751
+ reasoning=reasoning,
752
+ truncation=truncation,
753
+ parallel_tool_calls=parallel_tool_calls,
754
+ store=store,
755
+ stream=stream
756
+ )
757
+ if stream:
758
+ Console.write_stdout("Streaming response:")
759
+ for chunk in result:
760
+ Console.write_stdout(f"{chunk}", end="")
761
+ sys.stdout.flush()
762
+ Console.write_stdout()
763
+ else:
764
+ Console.write_stdout(f"Response result: \n{json.dumps(result, indent=2)}\n")
765
+ except Exception as e:
766
+ logger.error(f"Error getting response: {e}")
767
+ Console.write_stderr(f"Failed to get response: {e}")
768
+
769
+
770
+ response_options = [
771
+ Option(
772
+ "model",
773
+ ["--model", "-m"],
774
+ "The model specification, e.g., 'openai/o1-pro'.",
775
+ True
776
+ ),
777
+ Option(
778
+ "input",
779
+ ["--input", "-i"],
780
+ "The user input text.",
781
+ True
782
+ ),
783
+ Option(
784
+ "files",
785
+ ["--files", "-f"],
786
+ "JSON array of file paths (images or PDFs) to include in the request, e.g., '[\"image.jpg\", \"doc.pdf\"]'.",
787
+ True
788
+ ),
789
+ Option(
790
+ "tools",
791
+ ["--tools"],
792
+ "Optional JSON array of tools (e.g., functions) the model may call.",
793
+ True
794
+ ),
795
+ Option(
796
+ "tool_choice",
797
+ ["--tool-choice"],
798
+ "Optional string (e.g., \"none\", \"auto\") or JSON object to control which tool is called.",
799
+ True
800
+ ),
801
+ Option(
802
+ "temperature",
803
+ ["--temperature", "--temp"],
804
+ "Float value to set randomness of the response (between 0 and 2).",
805
+ True
806
+ ),
807
+ Option(
808
+ "max_output_tokens",
809
+ ["--max-output-tokens"],
810
+ "Integer value to set max tokens in the output.",
811
+ True
812
+ ),
813
+ Option(
814
+ "top_p",
815
+ ["--top-p"],
816
+ "Optional float value for nucleus sampling (between 0 and 1).",
817
+ True
818
+ ),
819
+ Option(
820
+ "metadata",
821
+ ["--metadata"],
822
+ "Optional JSON object with up to 16 key-value pairs to attach to the object.",
823
+ True
824
+ ),
825
+ Option(
826
+ "user",
827
+ ["--user"],
828
+ "Optional string identifier for the end-user.",
829
+ True
830
+ ),
831
+ Option(
832
+ "instructions",
833
+ ["--instructions"],
834
+ "Optional additional instructions for the model.",
835
+ True
836
+ ),
837
+ Option(
838
+ "reasoning",
839
+ ["--reasoning"],
840
+ "Optional JSON object for reasoning configuration, e.g., {\"effort\": \"medium\"}.",
841
+ True
842
+ ),
843
+ Option(
844
+ "truncation",
845
+ ["--truncation"],
846
+ "Optional truncation strategy, e.g., \"disabled\".",
847
+ True
848
+ ),
849
+ Option(
850
+ "parallel_tool_calls",
851
+ ["--parallel-tool-calls"],
852
+ "Optional boolean to enable parallel tool calls. Possible values: 0: OFF; 1: ON",
853
+ True
854
+ ),
855
+ Option(
856
+ "store",
857
+ ["--store"],
858
+ "Optional boolean to store the output. Possible values: 0: OFF; 1: ON",
859
+ True
860
+ ),
861
+ Option(
862
+ "stream",
863
+ ["--stream"],
864
+ "Whether to stream the response. Possible values: 0: OFF; 1: ON",
865
+ True
866
+ ),
867
+ ]
868
+
652
869
  chat_commands = [
653
870
  Command(
654
871
  "help",
@@ -695,5 +912,14 @@ chat_commands = [
695
912
  [],
696
913
  generate_image_options
697
914
  ),
915
+ Command(
916
+ "response",
917
+ ["response", "resp"],
918
+ "Get a response using the Responses API with support for images and PDFs",
919
+ get_response,
920
+ ArgumentsEnum.REQUIRED,
921
+ [],
922
+ response_options
923
+ ),
698
924
 
699
925
  ]
@@ -1,5 +1,7 @@
1
+ import json
1
2
  from pygeai.cli.commands import Command, Option, ArgumentsEnum
2
3
  from pygeai.cli.commands.builders import build_help_text
4
+ from pygeai.cli.commands.common import get_boolean_value
3
5
  from pygeai.cli.texts.help import EMBEDDINGS_HELP_TEXT
4
6
  from pygeai.core.common.exceptions import MissingRequirementException, WrongArgumentError
5
7
  from pygeai.core.embeddings.clients import EmbeddingsClient
@@ -22,6 +24,7 @@ def generate_embeddings(option_list: list):
22
24
  input_type = None
23
25
  timeout = None
24
26
  cache = None
27
+ preview = True
25
28
  input_list = list()
26
29
 
27
30
  for option_flag, option_arg in option_list:
@@ -32,18 +35,23 @@ def generate_embeddings(option_list: list):
32
35
  if option_flag.name == "encoding_format":
33
36
  encoding_format = option_arg
34
37
  if option_flag.name == "dimensions":
35
- dimensions = option_arg
38
+ try:
39
+ dimensions = int(option_arg)
40
+ except (ValueError, TypeError):
41
+ raise WrongArgumentError("dimensions must be an integer")
36
42
  if option_flag.name == "user":
37
43
  user = option_arg
38
44
  if option_flag.name == "input_type":
39
45
  input_type = option_arg
40
46
  if option_flag.name == "timeout":
41
- timeout = option_arg
47
+ try:
48
+ timeout = int(option_arg)
49
+ except (ValueError, TypeError):
50
+ raise WrongArgumentError("timeout must be an integer")
42
51
  if option_flag.name == "cache":
43
- if not str(option_arg).isdigit() or int(option_arg) not in [0, 1]:
44
- raise WrongArgumentError("If specified, cache must be 0 or 1")
45
-
46
- cache = bool(int(option_arg))
52
+ cache = get_boolean_value(option_arg)
53
+ if option_flag.name == "preview":
54
+ preview = get_boolean_value(option_arg)
47
55
 
48
56
  if not (model and any(input_list)):
49
57
  raise MissingRequirementException("Cannot generate embeddings without specifying model and at least one input")
@@ -59,7 +67,40 @@ def generate_embeddings(option_list: list):
59
67
  timeout=timeout,
60
68
  cache=cache
61
69
  )
62
- Console.write_stdout(f"Embeddings detail: \n{result}")
70
+
71
+ output = {
72
+ "model": result.get("model"),
73
+ "object": result.get("object"),
74
+ "embeddings_count": len(result.get("data", [])),
75
+ "usage": result.get("usage"),
76
+ "data": []
77
+ }
78
+
79
+ for item in result.get("data", []):
80
+ embedding_data = item.get("embedding")
81
+ if isinstance(embedding_data, list):
82
+ embedding_info = {
83
+ "index": item.get("index"),
84
+ "dimensions": len(embedding_data),
85
+ "object": item.get("object")
86
+ }
87
+ if preview:
88
+ embedding_info["preview"] = embedding_data[:5] if len(embedding_data) > 5 else embedding_data
89
+ else:
90
+ embedding_info["embedding"] = embedding_data
91
+ else:
92
+ embedding_info = {
93
+ "index": item.get("index"),
94
+ "object": item.get("object"),
95
+ "format": "base64"
96
+ }
97
+ if preview:
98
+ embedding_info["preview"] = str(embedding_data)[:50] + "..." if len(str(embedding_data)) > 50 else embedding_data
99
+ else:
100
+ embedding_info["embedding"] = embedding_data
101
+ output["data"].append(embedding_info)
102
+
103
+ Console.write_stdout(json.dumps(output, indent=2))
63
104
 
64
105
 
65
106
  generate_embeddings_options = [
@@ -109,10 +150,17 @@ generate_embeddings_options = [
109
150
  Option(
110
151
  "cache",
111
152
  ["--cache"],
112
- "Enable X-Saia-Cache-Enabled to cache the embeddings for the model; it applies by Organization/Project."
153
+ "Enable X-Saia-Cache-Enabled to cache the embeddings for the model; it applies by Organization/Project. "
113
154
  "1 to set to True and 0 to false. 0 is default",
114
155
  True
115
156
  ),
157
+ Option(
158
+ "preview",
159
+ ["--preview"],
160
+ "Control embedding display in output. 1 (default) shows a preview (first 5 values for float, 50 chars for base64). "
161
+ "0 shows the full embedding vector. Use 0 to get complete embeddings for further processing",
162
+ True
163
+ ),
116
164
 
117
165
  ]
118
166