pygeai 0.6.0b7__py3-none-any.whl → 0.6.0b11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. pygeai/_docs/source/conf.py +78 -6
  2. pygeai/_docs/source/content/api_reference/embeddings.rst +31 -1
  3. pygeai/_docs/source/content/api_reference/evaluation.rst +590 -0
  4. pygeai/_docs/source/content/api_reference/feedback.rst +237 -0
  5. pygeai/_docs/source/content/api_reference/files.rst +592 -0
  6. pygeai/_docs/source/content/api_reference/gam.rst +401 -0
  7. pygeai/_docs/source/content/api_reference/proxy.rst +318 -0
  8. pygeai/_docs/source/content/api_reference/secrets.rst +495 -0
  9. pygeai/_docs/source/content/api_reference/usage_limits.rst +390 -0
  10. pygeai/_docs/source/content/api_reference.rst +7 -0
  11. pygeai/_docs/source/content/debugger.rst +376 -83
  12. pygeai/_docs/source/content/migration.rst +528 -0
  13. pygeai/_docs/source/content/modules.rst +1 -1
  14. pygeai/_docs/source/pygeai.cli.rst +8 -0
  15. pygeai/_docs/source/pygeai.tests.cli.rst +16 -0
  16. pygeai/_docs/source/pygeai.tests.core.embeddings.rst +16 -0
  17. pygeai/_docs/source/pygeai.tests.snippets.chat.rst +40 -0
  18. pygeai/_docs/source/pygeai.tests.snippets.dbg.rst +45 -0
  19. pygeai/_docs/source/pygeai.tests.snippets.embeddings.rst +40 -0
  20. pygeai/_docs/source/pygeai.tests.snippets.evaluation.dataset.rst +197 -0
  21. pygeai/_docs/source/pygeai.tests.snippets.evaluation.plan.rst +133 -0
  22. pygeai/_docs/source/pygeai.tests.snippets.evaluation.result.rst +37 -0
  23. pygeai/_docs/source/pygeai.tests.snippets.evaluation.rst +10 -0
  24. pygeai/_docs/source/pygeai.tests.snippets.rst +1 -0
  25. pygeai/admin/clients.py +5 -0
  26. pygeai/assistant/clients.py +7 -0
  27. pygeai/assistant/data_analyst/clients.py +2 -0
  28. pygeai/assistant/rag/clients.py +11 -0
  29. pygeai/chat/clients.py +236 -25
  30. pygeai/chat/endpoints.py +3 -1
  31. pygeai/cli/commands/chat.py +322 -1
  32. pygeai/cli/commands/embeddings.py +56 -8
  33. pygeai/cli/commands/migrate.py +994 -434
  34. pygeai/cli/error_handler.py +116 -0
  35. pygeai/cli/geai.py +28 -10
  36. pygeai/cli/parsers.py +8 -2
  37. pygeai/core/base/clients.py +3 -1
  38. pygeai/core/common/exceptions.py +11 -10
  39. pygeai/core/embeddings/__init__.py +19 -0
  40. pygeai/core/embeddings/clients.py +17 -2
  41. pygeai/core/embeddings/mappers.py +16 -2
  42. pygeai/core/embeddings/responses.py +9 -2
  43. pygeai/core/feedback/clients.py +1 -0
  44. pygeai/core/files/clients.py +5 -7
  45. pygeai/core/files/managers.py +42 -0
  46. pygeai/core/llm/clients.py +4 -0
  47. pygeai/core/plugins/clients.py +1 -0
  48. pygeai/core/rerank/clients.py +1 -0
  49. pygeai/core/secrets/clients.py +6 -0
  50. pygeai/core/services/rest.py +1 -1
  51. pygeai/dbg/__init__.py +3 -0
  52. pygeai/dbg/debugger.py +565 -70
  53. pygeai/evaluation/clients.py +1 -1
  54. pygeai/evaluation/dataset/clients.py +45 -44
  55. pygeai/evaluation/plan/clients.py +27 -26
  56. pygeai/evaluation/result/clients.py +37 -5
  57. pygeai/gam/clients.py +4 -0
  58. pygeai/health/clients.py +1 -0
  59. pygeai/lab/agents/clients.py +8 -1
  60. pygeai/lab/models.py +3 -3
  61. pygeai/lab/processes/clients.py +21 -0
  62. pygeai/lab/strategies/clients.py +4 -0
  63. pygeai/lab/tools/clients.py +1 -0
  64. pygeai/migration/__init__.py +31 -0
  65. pygeai/migration/strategies.py +404 -155
  66. pygeai/migration/tools.py +170 -3
  67. pygeai/organization/clients.py +13 -0
  68. pygeai/organization/limits/clients.py +15 -0
  69. pygeai/proxy/clients.py +3 -1
  70. pygeai/tests/admin/test_clients.py +16 -11
  71. pygeai/tests/assistants/rag/test_clients.py +35 -23
  72. pygeai/tests/assistants/test_clients.py +22 -15
  73. pygeai/tests/auth/test_clients.py +14 -6
  74. pygeai/tests/chat/test_clients.py +211 -1
  75. pygeai/tests/cli/commands/test_embeddings.py +32 -9
  76. pygeai/tests/cli/commands/test_evaluation.py +7 -0
  77. pygeai/tests/cli/commands/test_migrate.py +112 -243
  78. pygeai/tests/cli/test_error_handler.py +225 -0
  79. pygeai/tests/cli/test_geai_driver.py +154 -0
  80. pygeai/tests/cli/test_parsers.py +5 -5
  81. pygeai/tests/core/embeddings/test_clients.py +144 -0
  82. pygeai/tests/core/embeddings/test_managers.py +171 -0
  83. pygeai/tests/core/embeddings/test_mappers.py +142 -0
  84. pygeai/tests/core/feedback/test_clients.py +2 -0
  85. pygeai/tests/core/files/test_clients.py +1 -0
  86. pygeai/tests/core/llm/test_clients.py +14 -9
  87. pygeai/tests/core/plugins/test_clients.py +5 -3
  88. pygeai/tests/core/rerank/test_clients.py +1 -0
  89. pygeai/tests/core/secrets/test_clients.py +19 -13
  90. pygeai/tests/dbg/test_debugger.py +453 -75
  91. pygeai/tests/evaluation/dataset/test_clients.py +3 -1
  92. pygeai/tests/evaluation/plan/test_clients.py +4 -2
  93. pygeai/tests/evaluation/result/test_clients.py +7 -5
  94. pygeai/tests/gam/test_clients.py +1 -1
  95. pygeai/tests/health/test_clients.py +1 -0
  96. pygeai/tests/lab/agents/test_clients.py +9 -0
  97. pygeai/tests/lab/processes/test_clients.py +36 -0
  98. pygeai/tests/lab/processes/test_mappers.py +3 -0
  99. pygeai/tests/lab/strategies/test_clients.py +14 -9
  100. pygeai/tests/migration/test_strategies.py +45 -218
  101. pygeai/tests/migration/test_tools.py +133 -9
  102. pygeai/tests/organization/limits/test_clients.py +17 -0
  103. pygeai/tests/organization/test_clients.py +22 -0
  104. pygeai/tests/proxy/test_clients.py +2 -0
  105. pygeai/tests/proxy/test_integration.py +1 -0
  106. pygeai/tests/snippets/chat/chat_completion_with_reasoning_effort.py +18 -0
  107. pygeai/tests/snippets/chat/get_response.py +15 -0
  108. pygeai/tests/snippets/chat/get_response_streaming.py +20 -0
  109. pygeai/tests/snippets/chat/get_response_with_files.py +16 -0
  110. pygeai/tests/snippets/chat/get_response_with_tools.py +36 -0
  111. pygeai/tests/snippets/dbg/__init__.py +0 -0
  112. pygeai/tests/snippets/dbg/basic_debugging.py +32 -0
  113. pygeai/tests/snippets/dbg/breakpoint_management.py +48 -0
  114. pygeai/tests/snippets/dbg/stack_navigation.py +45 -0
  115. pygeai/tests/snippets/dbg/stepping_example.py +40 -0
  116. pygeai/tests/snippets/embeddings/cache_example.py +31 -0
  117. pygeai/tests/snippets/embeddings/cohere_example.py +41 -0
  118. pygeai/tests/snippets/embeddings/openai_base64_example.py +27 -0
  119. pygeai/tests/snippets/embeddings/openai_example.py +30 -0
  120. pygeai/tests/snippets/embeddings/similarity_example.py +42 -0
  121. pygeai/tests/snippets/evaluation/dataset/__init__.py +0 -0
  122. pygeai/tests/snippets/evaluation/dataset/complete_workflow_example.py +195 -0
  123. pygeai/tests/snippets/evaluation/dataset/create_dataset.py +26 -0
  124. pygeai/tests/snippets/evaluation/dataset/create_dataset_from_file.py +11 -0
  125. pygeai/tests/snippets/evaluation/dataset/create_dataset_row.py +17 -0
  126. pygeai/tests/snippets/evaluation/dataset/create_expected_source.py +18 -0
  127. pygeai/tests/snippets/evaluation/dataset/create_filter_variable.py +19 -0
  128. pygeai/tests/snippets/evaluation/dataset/delete_dataset.py +9 -0
  129. pygeai/tests/snippets/evaluation/dataset/delete_dataset_row.py +10 -0
  130. pygeai/tests/snippets/evaluation/dataset/delete_expected_source.py +15 -0
  131. pygeai/tests/snippets/evaluation/dataset/delete_filter_variable.py +15 -0
  132. pygeai/tests/snippets/evaluation/dataset/get_dataset.py +9 -0
  133. pygeai/tests/snippets/evaluation/dataset/get_dataset_row.py +10 -0
  134. pygeai/tests/snippets/evaluation/dataset/get_expected_source.py +15 -0
  135. pygeai/tests/snippets/evaluation/dataset/get_filter_variable.py +15 -0
  136. pygeai/tests/snippets/evaluation/dataset/list_dataset_rows.py +9 -0
  137. pygeai/tests/snippets/evaluation/dataset/list_datasets.py +6 -0
  138. pygeai/tests/snippets/evaluation/dataset/list_expected_sources.py +10 -0
  139. pygeai/tests/snippets/evaluation/dataset/list_filter_variables.py +10 -0
  140. pygeai/tests/snippets/evaluation/dataset/update_dataset.py +15 -0
  141. pygeai/tests/snippets/evaluation/dataset/update_dataset_row.py +20 -0
  142. pygeai/tests/snippets/evaluation/dataset/update_expected_source.py +18 -0
  143. pygeai/tests/snippets/evaluation/dataset/update_filter_variable.py +19 -0
  144. pygeai/tests/snippets/evaluation/dataset/upload_dataset_rows_file.py +10 -0
  145. pygeai/tests/snippets/evaluation/plan/__init__.py +0 -0
  146. pygeai/tests/snippets/evaluation/plan/add_plan_system_metric.py +13 -0
  147. pygeai/tests/snippets/evaluation/plan/complete_workflow_example.py +136 -0
  148. pygeai/tests/snippets/evaluation/plan/create_evaluation_plan.py +24 -0
  149. pygeai/tests/snippets/evaluation/plan/create_rag_evaluation_plan.py +22 -0
  150. pygeai/tests/snippets/evaluation/plan/delete_evaluation_plan.py +9 -0
  151. pygeai/tests/snippets/evaluation/plan/delete_plan_system_metric.py +13 -0
  152. pygeai/tests/snippets/evaluation/plan/execute_evaluation_plan.py +11 -0
  153. pygeai/tests/snippets/evaluation/plan/get_evaluation_plan.py +9 -0
  154. pygeai/tests/snippets/evaluation/plan/get_plan_system_metric.py +13 -0
  155. pygeai/tests/snippets/evaluation/plan/get_system_metric.py +9 -0
  156. pygeai/tests/snippets/evaluation/plan/list_evaluation_plans.py +7 -0
  157. pygeai/tests/snippets/evaluation/plan/list_plan_system_metrics.py +9 -0
  158. pygeai/tests/snippets/evaluation/plan/list_system_metrics.py +7 -0
  159. pygeai/tests/snippets/evaluation/plan/update_evaluation_plan.py +22 -0
  160. pygeai/tests/snippets/evaluation/plan/update_plan_system_metric.py +14 -0
  161. pygeai/tests/snippets/evaluation/result/__init__.py +0 -0
  162. pygeai/tests/snippets/evaluation/result/complete_workflow_example.py +150 -0
  163. pygeai/tests/snippets/evaluation/result/get_evaluation_result.py +26 -0
  164. pygeai/tests/snippets/evaluation/result/list_evaluation_results.py +17 -0
  165. pygeai/tests/snippets/migrate/__init__.py +45 -0
  166. pygeai/tests/snippets/migrate/agent_migration.py +110 -0
  167. pygeai/tests/snippets/migrate/assistant_migration.py +64 -0
  168. pygeai/tests/snippets/migrate/orchestrator_examples.py +179 -0
  169. pygeai/tests/snippets/migrate/process_migration.py +64 -0
  170. pygeai/tests/snippets/migrate/project_migration.py +42 -0
  171. pygeai/tests/snippets/migrate/tool_migration.py +64 -0
  172. pygeai/tests/snippets/organization/create_project.py +2 -2
  173. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/METADATA +1 -1
  174. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/RECORD +178 -96
  175. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/WHEEL +0 -0
  176. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/entry_points.txt +0 -0
  177. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/licenses/LICENSE +0 -0
  178. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/top_level.txt +0 -0
pygeai/chat/clients.py CHANGED
@@ -1,12 +1,14 @@
1
1
  import json
2
2
  from json import JSONDecodeError
3
+ from pathlib import Path
3
4
  from typing import List, Dict, Optional, Union, Generator
4
5
 
5
6
  from pygeai import logger
6
- from pygeai.chat.endpoints import CHAT_V1, GENERATE_IMAGE_V1
7
+ from pygeai.chat.endpoints import CHAT_V1, GENERATE_IMAGE_V1, EDIT_IMAGE_V1, RESPONSES_V1
7
8
  from pygeai.core.base.clients import BaseClient
8
9
  from pygeai.core.common.exceptions import InvalidAPIResponseException
9
10
  from pygeai.core.utils.validators import validate_status_code
11
+ from pygeai.core.utils.parsers import parse_json_response
10
12
 
11
13
 
12
14
  class ChatClient(BaseClient):
@@ -15,8 +17,8 @@ class ChatClient(BaseClient):
15
17
  response = self.api_service.post(
16
18
  endpoint=CHAT_V1
17
19
  )
18
- result = response.json()
19
- return result
20
+ validate_status_code(response)
21
+ return parse_json_response(response, "chat")
20
22
 
21
23
  def chat_completion(
22
24
  self,
@@ -40,7 +42,8 @@ class ChatClient(BaseClient):
40
42
  stream_options: Optional[Dict] = None,
41
43
  store: Optional[bool] = None,
42
44
  metadata: Optional[Dict] = None,
43
- user: Optional[str] = None
45
+ user: Optional[str] = None,
46
+ reasoning_effort: Optional[str] = None
44
47
  ) -> Union[dict, str, Generator[str, None, None]]:
45
48
  """
46
49
  Generates a chat completion response using the specified model and parameters.
@@ -50,8 +53,8 @@ class ChatClient(BaseClient):
50
53
  :param messages: List[Dict[str, str]] - A list of messages to include in the chat completion. Each message should be a dictionary with
51
54
  the following structure:
52
55
  {
53
- "role": "string", # Possible values: "user", "system", "assistant", or others supported by the model
54
- "content": "string" # The content of the message
56
+ "role": "string",
57
+ "content": "string"
55
58
  } (Required)
56
59
  :param stream: bool - Whether the response should be streamed. Possible values:
57
60
  - False: Returns the complete response as a dictionary or string (default).
@@ -78,6 +81,9 @@ class ChatClient(BaseClient):
78
81
  :param store: Optional[bool] - Whether to store the output for model distillation or evals. (Optional)
79
82
  :param metadata: Optional[Dict] - Up to 16 key-value pairs to attach to the object. (Optional)
80
83
  :param user: Optional[str] - A unique identifier for the end-user to monitor abuse. (Optional)
84
+ :param reasoning_effort: Optional[str] - Controls the depth of reasoning applied by supported models.
85
+ Possible values: "low", "medium", "high". Supported by OpenAI models from version 5,
86
+ Claude models from version 4.1, and Gemini models from version 2.0. (Optional)
81
87
  :return: Union[dict, str, Generator[str, None, None]] - For non-streaming (stream=False), returns a dictionary containing the chat completion
82
88
  result or a string if JSON decoding fails. For streaming (stream=True), returns a generator yielding content strings extracted from the
83
89
  streaming response chunks.
@@ -87,19 +93,16 @@ class ChatClient(BaseClient):
87
93
  'messages': messages,
88
94
  'stream': stream
89
95
  }
90
- if temperature:
96
+ if temperature is not None:
91
97
  data['temperature'] = temperature
92
98
 
93
99
  if max_tokens:
94
100
  data['max_completion_tokens'] = max_tokens
95
101
 
96
- if thread_id:
97
- data['threadId'] = thread_id
98
-
99
- if frequency_penalty:
102
+ if frequency_penalty is not None:
100
103
  data['frequency_penalty'] = frequency_penalty
101
104
 
102
- if presence_penalty:
105
+ if presence_penalty is not None:
103
106
  data['presence_penalty'] = presence_penalty
104
107
 
105
108
  if variables is not None and any(variables):
@@ -141,6 +144,9 @@ class ChatClient(BaseClient):
141
144
  if user is not None:
142
145
  data['user'] = user
143
146
 
147
+ if reasoning_effort is not None:
148
+ data['reasoning_effort'] = reasoning_effort
149
+
144
150
  headers = {}
145
151
  if thread_id:
146
152
  headers["saia-conversation-id"] = thread_id
@@ -160,12 +166,13 @@ class ChatClient(BaseClient):
160
166
  data=data,
161
167
  headers=headers
162
168
  )
163
- try:
164
- result = response.json()
165
- logger.debug(f"Chat completion result: {result}")
166
- return result
167
- except JSONDecodeError as e:
168
- raise InvalidAPIResponseException(f"Unable to process chat request: {response.text}")
169
+ validate_status_code(response)
170
+
171
+ result = parse_json_response(response, "process chat request")
172
+
173
+ logger.debug(f"Chat completion result: {result}")
174
+
175
+ return result
169
176
 
170
177
  def stream_chat_generator(self, response) -> Generator[str, None, None]:
171
178
  """
@@ -195,6 +202,34 @@ class ChatClient(BaseClient):
195
202
  except Exception as e:
196
203
  raise InvalidAPIResponseException(f"Unable to process streaming chat response: {e}")
197
204
 
205
+ def stream_response_generator(self, response) -> Generator[str, None, None]:
206
+ """
207
+ Processes a streaming response from the Responses API and yields content strings.
208
+
209
+ :param response: The streaming response from the API.
210
+ :return: Generator[str, None, None] - Yields content strings extracted from streaming chunks.
211
+ """
212
+ try:
213
+ for line in response:
214
+ if line.startswith("data:"):
215
+ chunk = line[5:].strip()
216
+ if chunk == "[DONE]":
217
+ break
218
+ try:
219
+ json_data = json.loads(chunk)
220
+ if (
221
+ json_data.get("choices")
222
+ and len(json_data["choices"]) > 0
223
+ and "delta" in json_data["choices"][0]
224
+ and "content" in json_data["choices"][0]["delta"]
225
+ ):
226
+ content = json_data["choices"][0]["delta"]["content"]
227
+ yield content
228
+ except JSONDecodeError as e:
229
+ continue
230
+ except Exception as e:
231
+ raise InvalidAPIResponseException(f"Unable to process streaming response: {e}")
232
+
198
233
  def generate_image(
199
234
  self,
200
235
  model: str,
@@ -212,7 +247,7 @@ class ChatClient(BaseClient):
212
247
  :param n: int - Number of images to generate (1-10, depending on the model). (Required)
213
248
  :param quality: str - Rendering quality, e.g., "high". (Required)
214
249
  :param size: str - Image dimensions, e.g., "1024x1024". (Required)
215
- :param aspect_ratio: Optional[str] - Relationship between images width and height, e.g., "1:1", "9:16", "16:9", "3:4", "4:3". (Optional)
250
+ :param aspect_ratio: Optional[str] - Relationship between image's width and height, e.g., "1:1", "9:16", "16:9", "3:4", "4:3". (Optional)
216
251
  :return: dict - The API response containing the generated image data.
217
252
  :raises InvalidAPIResponseException: If the API response cannot be processed.
218
253
  """
@@ -234,9 +269,185 @@ class ChatClient(BaseClient):
234
269
  data=data
235
270
  )
236
271
 
237
- try:
238
- result = response.json()
239
- logger.debug(f"Image generation result: {result}")
240
- return result
241
- except JSONDecodeError as e:
242
- raise InvalidAPIResponseException(f"Unable to process image generation request: {response.text}")
272
+ validate_status_code(response)
273
+
274
+ result = parse_json_response(response, "generate image")
275
+ logger.debug(f"Image generation result: {result}")
276
+ return result
277
+
278
+ def edit_image(
279
+ self,
280
+ model: str,
281
+ prompt: str,
282
+ image: str,
283
+ size: str,
284
+ n: int = 1,
285
+ quality: Optional[str] = None
286
+ ) -> dict:
287
+ """
288
+ Edits an existing image based on the provided parameters.
289
+
290
+ :param model: str - The model specification for image editing, e.g., "openai/gpt-image-1". (Required)
291
+ :param prompt: str - Description of the desired edit, e.g., "remove background people". (Required)
292
+ :param image: str - URL of the image to be edited, e.g., "https://example.com/image.jpg". (Required)
293
+ :param size: str - Desired dimensions of the output image in pixels, e.g., "1024x1024". (Required)
294
+ :param n: int - Number of edited images to generate (1-10, depending on the model). Default is 1. (Optional)
295
+ :param quality: Optional[str] - Rendering quality, e.g., "high", "medium", "low". (Optional)
296
+ :return: dict - The API response containing the edited image data.
297
+ :raises InvalidAPIResponseException: If the API response cannot be processed.
298
+ """
299
+ data = {
300
+ 'model': model,
301
+ 'prompt': prompt,
302
+ 'image': image,
303
+ 'size': size,
304
+ 'n': n
305
+ }
306
+
307
+ if quality:
308
+ data['quality'] = quality
309
+
310
+ logger.debug(f"Editing image with data: {data}")
311
+
312
+ response = self.api_service.post(
313
+ endpoint=EDIT_IMAGE_V1,
314
+ data=data
315
+ )
316
+
317
+ validate_status_code(response)
318
+
319
+ result = parse_json_response(response, "edit image")
320
+ logger.debug(f"Image editing result: {result}")
321
+ return result
322
+
323
+ def get_response(
324
+ self,
325
+ model: str,
326
+ input: str,
327
+ files: Optional[List[str]] = None,
328
+ tools: Optional[List[Dict]] = None,
329
+ tool_choice: Optional[Union[str, Dict]] = None,
330
+ temperature: Optional[float] = None,
331
+ max_output_tokens: Optional[int] = None,
332
+ top_p: Optional[float] = None,
333
+ metadata: Optional[Dict] = None,
334
+ user: Optional[str] = None,
335
+ instructions: Optional[str] = None,
336
+ reasoning: Optional[Dict] = None,
337
+ truncation: Optional[str] = None,
338
+ parallel_tool_calls: Optional[bool] = None,
339
+ store: Optional[bool] = None,
340
+ stream: bool = False
341
+ ) -> Union[dict, str, Generator[str, None, None]]:
342
+ """
343
+ Generates a response using the Responses API with support for images and PDF files.
344
+
345
+ :param model: str - The model specification, e.g., "openai/o1-pro". (Required)
346
+ :param input: str - The user input text. (Required)
347
+ :param files: Optional[List[str]] - List of file paths (images or PDFs) to include in the request. (Optional)
348
+ :param tools: Optional[List[Dict]] - A list of tools (e.g., functions) the model may call. (Optional)
349
+ :param tool_choice: Optional[Union[str, Dict]] - Controls which tool is called (e.g., "none", "auto", or specific tool). (Optional)
350
+ :param temperature: Optional[float] - Controls the randomness of the response. (Optional)
351
+ :param max_output_tokens: Optional[int] - The maximum number of tokens to generate in the response. (Optional)
352
+ :param top_p: Optional[float] - Nucleus sampling parameter. (Optional)
353
+ :param metadata: Optional[Dict] - Up to 16 key-value pairs to attach to the object. (Optional)
354
+ :param user: Optional[str] - A unique identifier for the end-user. (Optional)
355
+ :param instructions: Optional[str] - Additional instructions for the model. (Optional)
356
+ :param reasoning: Optional[Dict] - Reasoning configuration, e.g., {"effort": "medium"}. (Optional)
357
+ :param truncation: Optional[str] - Truncation strategy, e.g., "disabled". (Optional)
358
+ :param parallel_tool_calls: Optional[bool] - Whether to enable parallel tool calls. (Optional)
359
+ :param store: Optional[bool] - Whether to store the output. (Optional)
360
+ :param stream: bool - Whether the response should be streamed. Possible values:
361
+ - False: Returns the complete response as a dictionary or string (default).
362
+ - True: Returns a generator yielding content strings as they are received. (Optional)
363
+ :return: Union[dict, str, Generator[str, None, None]] - For non-streaming (stream=False), returns a dictionary containing the response
364
+ result or a string if JSON decoding fails. For streaming (stream=True), returns a generator yielding content strings extracted from the
365
+ streaming response chunks.
366
+ :raises InvalidAPIResponseException: If the API response cannot be processed.
367
+ """
368
+ data = {
369
+ 'model': model,
370
+ 'input': input,
371
+ 'stream': stream
372
+ }
373
+
374
+ if temperature is not None:
375
+ data['temperature'] = temperature
376
+
377
+ if max_output_tokens is not None:
378
+ data['max_output_tokens'] = max_output_tokens
379
+
380
+ if top_p is not None:
381
+ data['top_p'] = top_p
382
+
383
+ if tools is not None:
384
+ data['tools'] = tools
385
+
386
+ if tool_choice is not None:
387
+ data['tool_choice'] = tool_choice
388
+
389
+ if metadata is not None:
390
+ data['metadata'] = metadata
391
+
392
+ if user is not None:
393
+ data['user'] = user
394
+
395
+ if instructions is not None:
396
+ data['instructions'] = instructions
397
+
398
+ if reasoning is not None:
399
+ data['reasoning'] = reasoning
400
+
401
+ if truncation is not None:
402
+ data['truncation'] = truncation
403
+
404
+ if parallel_tool_calls is not None:
405
+ data['parallel_tool_calls'] = parallel_tool_calls
406
+
407
+ if store is not None:
408
+ data['store'] = store
409
+
410
+ logger.debug(f"Generating response with data: {data}")
411
+
412
+ if files:
413
+ if stream:
414
+ raise InvalidAPIResponseException("Streaming is not supported when uploading files")
415
+
416
+ file_handles = []
417
+ try:
418
+ files_dict = {}
419
+ for idx, file_path in enumerate(files):
420
+ path = Path(file_path)
421
+ if not path.is_file():
422
+ raise FileNotFoundError(f"File not found: {file_path}")
423
+
424
+ file_handle = path.open("rb")
425
+ file_handles.append(file_handle)
426
+ files_dict[f"file{idx}"] = file_handle
427
+
428
+ response = self.api_service.post_files_multipart(
429
+ endpoint=RESPONSES_V1,
430
+ data=data,
431
+ files=files_dict
432
+ )
433
+ finally:
434
+ for fh in file_handles:
435
+ fh.close()
436
+ else:
437
+ if stream:
438
+ response = self.api_service.stream_post(
439
+ endpoint=RESPONSES_V1,
440
+ data=data
441
+ )
442
+ return self.stream_response_generator(response)
443
+ else:
444
+ response = self.api_service.post(
445
+ endpoint=RESPONSES_V1,
446
+ data=data
447
+ )
448
+
449
+ validate_status_code(response)
450
+ result = parse_json_response(response, "get response")
451
+ logger.debug(f"Response result: {result}")
452
+
453
+ return result
pygeai/chat/endpoints.py CHANGED
@@ -1,3 +1,5 @@
1
1
  CHAT_V1 = "/chat" # POST Chat endpoint
2
2
  CHAT_COMPLETION_V1 = "/chat/completion" # POST Chat completion endpoint
3
- GENERATE_IMAGE_V1 = "/images" # POST Generate image
3
+ GENERATE_IMAGE_V1 = "/images" # POST Generate image
4
+ EDIT_IMAGE_V1 = "/images/edits" # POST Edit image
5
+ RESPONSES_V1 = "/responses" # POST Responses API endpoint