pygeai 0.6.0b7__py3-none-any.whl → 0.6.0b11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. pygeai/_docs/source/conf.py +78 -6
  2. pygeai/_docs/source/content/api_reference/embeddings.rst +31 -1
  3. pygeai/_docs/source/content/api_reference/evaluation.rst +590 -0
  4. pygeai/_docs/source/content/api_reference/feedback.rst +237 -0
  5. pygeai/_docs/source/content/api_reference/files.rst +592 -0
  6. pygeai/_docs/source/content/api_reference/gam.rst +401 -0
  7. pygeai/_docs/source/content/api_reference/proxy.rst +318 -0
  8. pygeai/_docs/source/content/api_reference/secrets.rst +495 -0
  9. pygeai/_docs/source/content/api_reference/usage_limits.rst +390 -0
  10. pygeai/_docs/source/content/api_reference.rst +7 -0
  11. pygeai/_docs/source/content/debugger.rst +376 -83
  12. pygeai/_docs/source/content/migration.rst +528 -0
  13. pygeai/_docs/source/content/modules.rst +1 -1
  14. pygeai/_docs/source/pygeai.cli.rst +8 -0
  15. pygeai/_docs/source/pygeai.tests.cli.rst +16 -0
  16. pygeai/_docs/source/pygeai.tests.core.embeddings.rst +16 -0
  17. pygeai/_docs/source/pygeai.tests.snippets.chat.rst +40 -0
  18. pygeai/_docs/source/pygeai.tests.snippets.dbg.rst +45 -0
  19. pygeai/_docs/source/pygeai.tests.snippets.embeddings.rst +40 -0
  20. pygeai/_docs/source/pygeai.tests.snippets.evaluation.dataset.rst +197 -0
  21. pygeai/_docs/source/pygeai.tests.snippets.evaluation.plan.rst +133 -0
  22. pygeai/_docs/source/pygeai.tests.snippets.evaluation.result.rst +37 -0
  23. pygeai/_docs/source/pygeai.tests.snippets.evaluation.rst +10 -0
  24. pygeai/_docs/source/pygeai.tests.snippets.rst +1 -0
  25. pygeai/admin/clients.py +5 -0
  26. pygeai/assistant/clients.py +7 -0
  27. pygeai/assistant/data_analyst/clients.py +2 -0
  28. pygeai/assistant/rag/clients.py +11 -0
  29. pygeai/chat/clients.py +236 -25
  30. pygeai/chat/endpoints.py +3 -1
  31. pygeai/cli/commands/chat.py +322 -1
  32. pygeai/cli/commands/embeddings.py +56 -8
  33. pygeai/cli/commands/migrate.py +994 -434
  34. pygeai/cli/error_handler.py +116 -0
  35. pygeai/cli/geai.py +28 -10
  36. pygeai/cli/parsers.py +8 -2
  37. pygeai/core/base/clients.py +3 -1
  38. pygeai/core/common/exceptions.py +11 -10
  39. pygeai/core/embeddings/__init__.py +19 -0
  40. pygeai/core/embeddings/clients.py +17 -2
  41. pygeai/core/embeddings/mappers.py +16 -2
  42. pygeai/core/embeddings/responses.py +9 -2
  43. pygeai/core/feedback/clients.py +1 -0
  44. pygeai/core/files/clients.py +5 -7
  45. pygeai/core/files/managers.py +42 -0
  46. pygeai/core/llm/clients.py +4 -0
  47. pygeai/core/plugins/clients.py +1 -0
  48. pygeai/core/rerank/clients.py +1 -0
  49. pygeai/core/secrets/clients.py +6 -0
  50. pygeai/core/services/rest.py +1 -1
  51. pygeai/dbg/__init__.py +3 -0
  52. pygeai/dbg/debugger.py +565 -70
  53. pygeai/evaluation/clients.py +1 -1
  54. pygeai/evaluation/dataset/clients.py +45 -44
  55. pygeai/evaluation/plan/clients.py +27 -26
  56. pygeai/evaluation/result/clients.py +37 -5
  57. pygeai/gam/clients.py +4 -0
  58. pygeai/health/clients.py +1 -0
  59. pygeai/lab/agents/clients.py +8 -1
  60. pygeai/lab/models.py +3 -3
  61. pygeai/lab/processes/clients.py +21 -0
  62. pygeai/lab/strategies/clients.py +4 -0
  63. pygeai/lab/tools/clients.py +1 -0
  64. pygeai/migration/__init__.py +31 -0
  65. pygeai/migration/strategies.py +404 -155
  66. pygeai/migration/tools.py +170 -3
  67. pygeai/organization/clients.py +13 -0
  68. pygeai/organization/limits/clients.py +15 -0
  69. pygeai/proxy/clients.py +3 -1
  70. pygeai/tests/admin/test_clients.py +16 -11
  71. pygeai/tests/assistants/rag/test_clients.py +35 -23
  72. pygeai/tests/assistants/test_clients.py +22 -15
  73. pygeai/tests/auth/test_clients.py +14 -6
  74. pygeai/tests/chat/test_clients.py +211 -1
  75. pygeai/tests/cli/commands/test_embeddings.py +32 -9
  76. pygeai/tests/cli/commands/test_evaluation.py +7 -0
  77. pygeai/tests/cli/commands/test_migrate.py +112 -243
  78. pygeai/tests/cli/test_error_handler.py +225 -0
  79. pygeai/tests/cli/test_geai_driver.py +154 -0
  80. pygeai/tests/cli/test_parsers.py +5 -5
  81. pygeai/tests/core/embeddings/test_clients.py +144 -0
  82. pygeai/tests/core/embeddings/test_managers.py +171 -0
  83. pygeai/tests/core/embeddings/test_mappers.py +142 -0
  84. pygeai/tests/core/feedback/test_clients.py +2 -0
  85. pygeai/tests/core/files/test_clients.py +1 -0
  86. pygeai/tests/core/llm/test_clients.py +14 -9
  87. pygeai/tests/core/plugins/test_clients.py +5 -3
  88. pygeai/tests/core/rerank/test_clients.py +1 -0
  89. pygeai/tests/core/secrets/test_clients.py +19 -13
  90. pygeai/tests/dbg/test_debugger.py +453 -75
  91. pygeai/tests/evaluation/dataset/test_clients.py +3 -1
  92. pygeai/tests/evaluation/plan/test_clients.py +4 -2
  93. pygeai/tests/evaluation/result/test_clients.py +7 -5
  94. pygeai/tests/gam/test_clients.py +1 -1
  95. pygeai/tests/health/test_clients.py +1 -0
  96. pygeai/tests/lab/agents/test_clients.py +9 -0
  97. pygeai/tests/lab/processes/test_clients.py +36 -0
  98. pygeai/tests/lab/processes/test_mappers.py +3 -0
  99. pygeai/tests/lab/strategies/test_clients.py +14 -9
  100. pygeai/tests/migration/test_strategies.py +45 -218
  101. pygeai/tests/migration/test_tools.py +133 -9
  102. pygeai/tests/organization/limits/test_clients.py +17 -0
  103. pygeai/tests/organization/test_clients.py +22 -0
  104. pygeai/tests/proxy/test_clients.py +2 -0
  105. pygeai/tests/proxy/test_integration.py +1 -0
  106. pygeai/tests/snippets/chat/chat_completion_with_reasoning_effort.py +18 -0
  107. pygeai/tests/snippets/chat/get_response.py +15 -0
  108. pygeai/tests/snippets/chat/get_response_streaming.py +20 -0
  109. pygeai/tests/snippets/chat/get_response_with_files.py +16 -0
  110. pygeai/tests/snippets/chat/get_response_with_tools.py +36 -0
  111. pygeai/tests/snippets/dbg/__init__.py +0 -0
  112. pygeai/tests/snippets/dbg/basic_debugging.py +32 -0
  113. pygeai/tests/snippets/dbg/breakpoint_management.py +48 -0
  114. pygeai/tests/snippets/dbg/stack_navigation.py +45 -0
  115. pygeai/tests/snippets/dbg/stepping_example.py +40 -0
  116. pygeai/tests/snippets/embeddings/cache_example.py +31 -0
  117. pygeai/tests/snippets/embeddings/cohere_example.py +41 -0
  118. pygeai/tests/snippets/embeddings/openai_base64_example.py +27 -0
  119. pygeai/tests/snippets/embeddings/openai_example.py +30 -0
  120. pygeai/tests/snippets/embeddings/similarity_example.py +42 -0
  121. pygeai/tests/snippets/evaluation/dataset/__init__.py +0 -0
  122. pygeai/tests/snippets/evaluation/dataset/complete_workflow_example.py +195 -0
  123. pygeai/tests/snippets/evaluation/dataset/create_dataset.py +26 -0
  124. pygeai/tests/snippets/evaluation/dataset/create_dataset_from_file.py +11 -0
  125. pygeai/tests/snippets/evaluation/dataset/create_dataset_row.py +17 -0
  126. pygeai/tests/snippets/evaluation/dataset/create_expected_source.py +18 -0
  127. pygeai/tests/snippets/evaluation/dataset/create_filter_variable.py +19 -0
  128. pygeai/tests/snippets/evaluation/dataset/delete_dataset.py +9 -0
  129. pygeai/tests/snippets/evaluation/dataset/delete_dataset_row.py +10 -0
  130. pygeai/tests/snippets/evaluation/dataset/delete_expected_source.py +15 -0
  131. pygeai/tests/snippets/evaluation/dataset/delete_filter_variable.py +15 -0
  132. pygeai/tests/snippets/evaluation/dataset/get_dataset.py +9 -0
  133. pygeai/tests/snippets/evaluation/dataset/get_dataset_row.py +10 -0
  134. pygeai/tests/snippets/evaluation/dataset/get_expected_source.py +15 -0
  135. pygeai/tests/snippets/evaluation/dataset/get_filter_variable.py +15 -0
  136. pygeai/tests/snippets/evaluation/dataset/list_dataset_rows.py +9 -0
  137. pygeai/tests/snippets/evaluation/dataset/list_datasets.py +6 -0
  138. pygeai/tests/snippets/evaluation/dataset/list_expected_sources.py +10 -0
  139. pygeai/tests/snippets/evaluation/dataset/list_filter_variables.py +10 -0
  140. pygeai/tests/snippets/evaluation/dataset/update_dataset.py +15 -0
  141. pygeai/tests/snippets/evaluation/dataset/update_dataset_row.py +20 -0
  142. pygeai/tests/snippets/evaluation/dataset/update_expected_source.py +18 -0
  143. pygeai/tests/snippets/evaluation/dataset/update_filter_variable.py +19 -0
  144. pygeai/tests/snippets/evaluation/dataset/upload_dataset_rows_file.py +10 -0
  145. pygeai/tests/snippets/evaluation/plan/__init__.py +0 -0
  146. pygeai/tests/snippets/evaluation/plan/add_plan_system_metric.py +13 -0
  147. pygeai/tests/snippets/evaluation/plan/complete_workflow_example.py +136 -0
  148. pygeai/tests/snippets/evaluation/plan/create_evaluation_plan.py +24 -0
  149. pygeai/tests/snippets/evaluation/plan/create_rag_evaluation_plan.py +22 -0
  150. pygeai/tests/snippets/evaluation/plan/delete_evaluation_plan.py +9 -0
  151. pygeai/tests/snippets/evaluation/plan/delete_plan_system_metric.py +13 -0
  152. pygeai/tests/snippets/evaluation/plan/execute_evaluation_plan.py +11 -0
  153. pygeai/tests/snippets/evaluation/plan/get_evaluation_plan.py +9 -0
  154. pygeai/tests/snippets/evaluation/plan/get_plan_system_metric.py +13 -0
  155. pygeai/tests/snippets/evaluation/plan/get_system_metric.py +9 -0
  156. pygeai/tests/snippets/evaluation/plan/list_evaluation_plans.py +7 -0
  157. pygeai/tests/snippets/evaluation/plan/list_plan_system_metrics.py +9 -0
  158. pygeai/tests/snippets/evaluation/plan/list_system_metrics.py +7 -0
  159. pygeai/tests/snippets/evaluation/plan/update_evaluation_plan.py +22 -0
  160. pygeai/tests/snippets/evaluation/plan/update_plan_system_metric.py +14 -0
  161. pygeai/tests/snippets/evaluation/result/__init__.py +0 -0
  162. pygeai/tests/snippets/evaluation/result/complete_workflow_example.py +150 -0
  163. pygeai/tests/snippets/evaluation/result/get_evaluation_result.py +26 -0
  164. pygeai/tests/snippets/evaluation/result/list_evaluation_results.py +17 -0
  165. pygeai/tests/snippets/migrate/__init__.py +45 -0
  166. pygeai/tests/snippets/migrate/agent_migration.py +110 -0
  167. pygeai/tests/snippets/migrate/assistant_migration.py +64 -0
  168. pygeai/tests/snippets/migrate/orchestrator_examples.py +179 -0
  169. pygeai/tests/snippets/migrate/process_migration.py +64 -0
  170. pygeai/tests/snippets/migrate/project_migration.py +42 -0
  171. pygeai/tests/snippets/migrate/tool_migration.py +64 -0
  172. pygeai/tests/snippets/organization/create_project.py +2 -2
  173. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/METADATA +1 -1
  174. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/RECORD +178 -96
  175. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/WHEEL +0 -0
  176. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/entry_points.txt +0 -0
  177. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/licenses/LICENSE +0 -0
  178. {pygeai-0.6.0b7.dist-info → pygeai-0.6.0b11.dist-info}/top_level.txt +0 -0
@@ -52,6 +52,7 @@ def get_chat_completion(option_list: list):
52
52
  store = None
53
53
  metadata = None
54
54
  user = None
55
+ reasoning_effort = None
55
56
 
56
57
  for option_flag, option_arg in option_list:
57
58
  if option_flag.name == "model":
@@ -124,6 +125,8 @@ def get_chat_completion(option_list: list):
124
125
  raise WrongArgumentError("metadata must be a valid JSON object")
125
126
  if option_flag.name == "user":
126
127
  user = option_arg
128
+ if option_flag.name == "reasoning_effort":
129
+ reasoning_effort = option_arg
127
130
 
128
131
  messages = get_messages(message_list)
129
132
 
@@ -151,7 +154,8 @@ def get_chat_completion(option_list: list):
151
154
  stream_options=stream_options,
152
155
  store=store,
153
156
  metadata=metadata,
154
- user=user
157
+ user=user,
158
+ reasoning_effort=reasoning_effort
155
159
  )
156
160
  if stream:
157
161
  Console.write_stdout("Streaming chat completion:")
@@ -295,6 +299,14 @@ chat_completion_options = [
295
299
  "Optional string identifier for the end-user to monitor abuse.",
296
300
  True
297
301
  ),
302
+ Option(
303
+ "reasoning_effort",
304
+ ["--reasoning-effort"],
305
+ "Optional string to control the depth of reasoning applied by supported models. "
306
+ "Possible values: 'low', 'medium', 'high'. Supported by OpenAI models from version 5, "
307
+ "Claude models from version 4.1, and Gemini models from version 2.0.",
308
+ True
309
+ ),
298
310
  ]
299
311
 
300
312
 
@@ -649,6 +661,297 @@ generate_image_options = [
649
661
  ]
650
662
 
651
663
 
664
+ def get_edit_image(option_list: list):
665
+ model = None
666
+ prompt = None
667
+ image = None
668
+ size = None
669
+ n = 1
670
+ quality = None
671
+
672
+ for option_flag, option_arg in option_list:
673
+ if option_flag.name == "model":
674
+ model = option_arg
675
+ if option_flag.name == "prompt":
676
+ prompt = option_arg
677
+ if option_flag.name == "image":
678
+ image = option_arg
679
+ if option_flag.name == "size":
680
+ size = option_arg
681
+ if option_flag.name == "n":
682
+ try:
683
+ n = int(option_arg)
684
+ if n < 1 or n > 10:
685
+ raise WrongArgumentError("n must be an integer between 1 and 10.")
686
+ except ValueError:
687
+ raise WrongArgumentError("n must be a valid integer.")
688
+ if option_flag.name == "quality":
689
+ quality = option_arg
690
+
691
+ if not (model and prompt and image and size):
692
+ raise MissingRequirementException("Cannot edit image without specifying model, prompt, image, and size.")
693
+
694
+ client = ChatClient()
695
+ try:
696
+ result = client.edit_image(
697
+ model=model,
698
+ prompt=prompt,
699
+ image=image,
700
+ size=size,
701
+ n=n,
702
+ quality=quality
703
+ )
704
+ Console.write_stdout(f"Image editing result: \n{result}\n")
705
+ except Exception as e:
706
+ logger.error(f"Error editing image: {e}")
707
+ Console.write_stderr(f"Failed to edit image: {e}")
708
+
709
+
710
+ edit_image_options = [
711
+ Option(
712
+ "model",
713
+ ["--model", "-m"],
714
+ "The model specification for image editing, e.g., 'openai/gpt-image-1'.",
715
+ True
716
+ ),
717
+ Option(
718
+ "prompt",
719
+ ["--prompt", "-p"],
720
+ "Description of the desired edit, e.g., 'remove the ball'.",
721
+ True
722
+ ),
723
+ Option(
724
+ "image",
725
+ ["--image", "-img"],
726
+ "URL of the image to be edited, e.g., 'https://example.com/image.jpg'.",
727
+ True
728
+ ),
729
+ Option(
730
+ "size",
731
+ ["--size", "-s"],
732
+ "Desired dimensions of the output image in pixels, e.g., '1024x1024'.",
733
+ True
734
+ ),
735
+ Option(
736
+ "n",
737
+ ["--n"],
738
+ "Number of edited images to generate (1-10, depending on the model). Default is 1.",
739
+ True
740
+ ),
741
+ Option(
742
+ "quality",
743
+ ["--quality", "-q"],
744
+ "Rendering quality, e.g., 'high', 'medium', 'low'.",
745
+ True
746
+ ),
747
+ ]
748
+
749
+
750
+ def get_response(option_list: list):
751
+ model = None
752
+ input_text = None
753
+ files = None
754
+ tools = None
755
+ tool_choice = None
756
+ temperature = None
757
+ max_output_tokens = None
758
+ top_p = None
759
+ metadata = None
760
+ user = None
761
+ instructions = None
762
+ reasoning = None
763
+ truncation = None
764
+ parallel_tool_calls = None
765
+ store = None
766
+ stream = False
767
+
768
+ for option_flag, option_arg in option_list:
769
+ if option_flag.name == "model":
770
+ model = option_arg
771
+ if option_flag.name == "input":
772
+ input_text = option_arg
773
+ if option_flag.name == "files":
774
+ try:
775
+ files = json.loads(option_arg) if option_arg else None
776
+ if files and not isinstance(files, list):
777
+ raise WrongArgumentError("files must be a JSON array of file paths")
778
+ except json.JSONDecodeError:
779
+ raise WrongArgumentError("files must be a valid JSON array")
780
+ if option_flag.name == "tools":
781
+ try:
782
+ tools = json.loads(option_arg) if option_arg else None
783
+ except json.JSONDecodeError:
784
+ raise WrongArgumentError("tools must be a valid JSON array")
785
+ if option_flag.name == "tool_choice":
786
+ try:
787
+ tool_choice = json.loads(option_arg) if option_arg else None
788
+ except json.JSONDecodeError:
789
+ tool_choice = option_arg
790
+ if option_flag.name == "temperature":
791
+ temperature = float(option_arg) if option_arg is not None else None
792
+ if option_flag.name == "max_output_tokens":
793
+ max_output_tokens = int(option_arg) if option_arg is not None else None
794
+ if option_flag.name == "top_p":
795
+ top_p = float(option_arg) if option_arg else None
796
+ if option_flag.name == "metadata":
797
+ try:
798
+ metadata = json.loads(option_arg) if option_arg else None
799
+ except json.JSONDecodeError:
800
+ raise WrongArgumentError("metadata must be a valid JSON object")
801
+ if option_flag.name == "user":
802
+ user = option_arg
803
+ if option_flag.name == "instructions":
804
+ instructions = option_arg
805
+ if option_flag.name == "reasoning":
806
+ try:
807
+ reasoning = json.loads(option_arg) if option_arg else None
808
+ except json.JSONDecodeError:
809
+ raise WrongArgumentError("reasoning must be a valid JSON object")
810
+ if option_flag.name == "truncation":
811
+ truncation = option_arg
812
+ if option_flag.name == "parallel_tool_calls":
813
+ parallel_tool_calls = get_boolean_value(option_arg) if option_arg else None
814
+ if option_flag.name == "store":
815
+ store = get_boolean_value(option_arg) if option_arg else None
816
+ if option_flag.name == "stream":
817
+ if option_arg:
818
+ stream = get_boolean_value(option_arg)
819
+
820
+ if not (model and input_text):
821
+ raise MissingRequirementException("Cannot get response without specifying model and input")
822
+
823
+ client = ChatClient()
824
+ try:
825
+ result = client.get_response(
826
+ model=model,
827
+ input=input_text,
828
+ files=files,
829
+ tools=tools,
830
+ tool_choice=tool_choice,
831
+ temperature=temperature,
832
+ max_output_tokens=max_output_tokens,
833
+ top_p=top_p,
834
+ metadata=metadata,
835
+ user=user,
836
+ instructions=instructions,
837
+ reasoning=reasoning,
838
+ truncation=truncation,
839
+ parallel_tool_calls=parallel_tool_calls,
840
+ store=store,
841
+ stream=stream
842
+ )
843
+ if stream:
844
+ Console.write_stdout("Streaming response:")
845
+ for chunk in result:
846
+ Console.write_stdout(f"{chunk}", end="")
847
+ sys.stdout.flush()
848
+ Console.write_stdout()
849
+ else:
850
+ Console.write_stdout(f"Response result: \n{json.dumps(result, indent=2)}\n")
851
+ except Exception as e:
852
+ logger.error(f"Error getting response: {e}")
853
+ Console.write_stderr(f"Failed to get response: {e}")
854
+
855
+
856
+ response_options = [
857
+ Option(
858
+ "model",
859
+ ["--model", "-m"],
860
+ "The model specification, e.g., 'openai/o1-pro'.",
861
+ True
862
+ ),
863
+ Option(
864
+ "input",
865
+ ["--input", "-i"],
866
+ "The user input text.",
867
+ True
868
+ ),
869
+ Option(
870
+ "files",
871
+ ["--files", "-f"],
872
+ "JSON array of file paths (images or PDFs) to include in the request, e.g., '[\"image.jpg\", \"doc.pdf\"]'.",
873
+ True
874
+ ),
875
+ Option(
876
+ "tools",
877
+ ["--tools"],
878
+ "Optional JSON array of tools (e.g., functions) the model may call.",
879
+ True
880
+ ),
881
+ Option(
882
+ "tool_choice",
883
+ ["--tool-choice"],
884
+ "Optional string (e.g., \"none\", \"auto\") or JSON object to control which tool is called.",
885
+ True
886
+ ),
887
+ Option(
888
+ "temperature",
889
+ ["--temperature", "--temp"],
890
+ "Float value to set randomness of the response (between 0 and 2).",
891
+ True
892
+ ),
893
+ Option(
894
+ "max_output_tokens",
895
+ ["--max-output-tokens"],
896
+ "Integer value to set max tokens in the output.",
897
+ True
898
+ ),
899
+ Option(
900
+ "top_p",
901
+ ["--top-p"],
902
+ "Optional float value for nucleus sampling (between 0 and 1).",
903
+ True
904
+ ),
905
+ Option(
906
+ "metadata",
907
+ ["--metadata"],
908
+ "Optional JSON object with up to 16 key-value pairs to attach to the object.",
909
+ True
910
+ ),
911
+ Option(
912
+ "user",
913
+ ["--user"],
914
+ "Optional string identifier for the end-user.",
915
+ True
916
+ ),
917
+ Option(
918
+ "instructions",
919
+ ["--instructions"],
920
+ "Optional additional instructions for the model.",
921
+ True
922
+ ),
923
+ Option(
924
+ "reasoning",
925
+ ["--reasoning"],
926
+ "Optional JSON object for reasoning configuration, e.g., {\"effort\": \"medium\"}.",
927
+ True
928
+ ),
929
+ Option(
930
+ "truncation",
931
+ ["--truncation"],
932
+ "Optional truncation strategy, e.g., \"disabled\".",
933
+ True
934
+ ),
935
+ Option(
936
+ "parallel_tool_calls",
937
+ ["--parallel-tool-calls"],
938
+ "Optional boolean to enable parallel tool calls. Possible values: 0: OFF; 1: ON",
939
+ True
940
+ ),
941
+ Option(
942
+ "store",
943
+ ["--store"],
944
+ "Optional boolean to store the output. Possible values: 0: OFF; 1: ON",
945
+ True
946
+ ),
947
+ Option(
948
+ "stream",
949
+ ["--stream"],
950
+ "Whether to stream the response. Possible values: 0: OFF; 1: ON",
951
+ True
952
+ ),
953
+ ]
954
+
652
955
  chat_commands = [
653
956
  Command(
654
957
  "help",
@@ -695,5 +998,23 @@ chat_commands = [
695
998
  [],
696
999
  generate_image_options
697
1000
  ),
1001
+ Command(
1002
+ "edit_image",
1003
+ ["edit-image", "edit-img"],
1004
+ "Edit an existing image using the specified model and parameters",
1005
+ get_edit_image,
1006
+ ArgumentsEnum.REQUIRED,
1007
+ [],
1008
+ edit_image_options
1009
+ ),
1010
+ Command(
1011
+ "response",
1012
+ ["response", "resp"],
1013
+ "Get a response using the Responses API with support for images and PDFs",
1014
+ get_response,
1015
+ ArgumentsEnum.REQUIRED,
1016
+ [],
1017
+ response_options
1018
+ ),
698
1019
 
699
1020
  ]
@@ -1,5 +1,7 @@
1
+ import json
1
2
  from pygeai.cli.commands import Command, Option, ArgumentsEnum
2
3
  from pygeai.cli.commands.builders import build_help_text
4
+ from pygeai.cli.commands.common import get_boolean_value
3
5
  from pygeai.cli.texts.help import EMBEDDINGS_HELP_TEXT
4
6
  from pygeai.core.common.exceptions import MissingRequirementException, WrongArgumentError
5
7
  from pygeai.core.embeddings.clients import EmbeddingsClient
@@ -22,6 +24,7 @@ def generate_embeddings(option_list: list):
22
24
  input_type = None
23
25
  timeout = None
24
26
  cache = None
27
+ preview = True
25
28
  input_list = list()
26
29
 
27
30
  for option_flag, option_arg in option_list:
@@ -32,18 +35,23 @@ def generate_embeddings(option_list: list):
32
35
  if option_flag.name == "encoding_format":
33
36
  encoding_format = option_arg
34
37
  if option_flag.name == "dimensions":
35
- dimensions = option_arg
38
+ try:
39
+ dimensions = int(option_arg)
40
+ except (ValueError, TypeError):
41
+ raise WrongArgumentError("dimensions must be an integer")
36
42
  if option_flag.name == "user":
37
43
  user = option_arg
38
44
  if option_flag.name == "input_type":
39
45
  input_type = option_arg
40
46
  if option_flag.name == "timeout":
41
- timeout = option_arg
47
+ try:
48
+ timeout = int(option_arg)
49
+ except (ValueError, TypeError):
50
+ raise WrongArgumentError("timeout must be an integer")
42
51
  if option_flag.name == "cache":
43
- if not str(option_arg).isdigit() or int(option_arg) not in [0, 1]:
44
- raise WrongArgumentError("If specified, cache must be 0 or 1")
45
-
46
- cache = bool(int(option_arg))
52
+ cache = get_boolean_value(option_arg)
53
+ if option_flag.name == "preview":
54
+ preview = get_boolean_value(option_arg)
47
55
 
48
56
  if not (model and any(input_list)):
49
57
  raise MissingRequirementException("Cannot generate embeddings without specifying model and at least one input")
@@ -59,7 +67,40 @@ def generate_embeddings(option_list: list):
59
67
  timeout=timeout,
60
68
  cache=cache
61
69
  )
62
- Console.write_stdout(f"Embeddings detail: \n{result}")
70
+
71
+ output = {
72
+ "model": result.get("model"),
73
+ "object": result.get("object"),
74
+ "embeddings_count": len(result.get("data", [])),
75
+ "usage": result.get("usage"),
76
+ "data": []
77
+ }
78
+
79
+ for item in result.get("data", []):
80
+ embedding_data = item.get("embedding")
81
+ if isinstance(embedding_data, list):
82
+ embedding_info = {
83
+ "index": item.get("index"),
84
+ "dimensions": len(embedding_data),
85
+ "object": item.get("object")
86
+ }
87
+ if preview:
88
+ embedding_info["preview"] = embedding_data[:5] if len(embedding_data) > 5 else embedding_data
89
+ else:
90
+ embedding_info["embedding"] = embedding_data
91
+ else:
92
+ embedding_info = {
93
+ "index": item.get("index"),
94
+ "object": item.get("object"),
95
+ "format": "base64"
96
+ }
97
+ if preview:
98
+ embedding_info["preview"] = str(embedding_data)[:50] + "..." if len(str(embedding_data)) > 50 else embedding_data
99
+ else:
100
+ embedding_info["embedding"] = embedding_data
101
+ output["data"].append(embedding_info)
102
+
103
+ Console.write_stdout(json.dumps(output, indent=2))
63
104
 
64
105
 
65
106
  generate_embeddings_options = [
@@ -109,10 +150,17 @@ generate_embeddings_options = [
109
150
  Option(
110
151
  "cache",
111
152
  ["--cache"],
112
- "Enable X-Saia-Cache-Enabled to cache the embeddings for the model; it applies by Organization/Project."
153
+ "Enable X-Saia-Cache-Enabled to cache the embeddings for the model; it applies by Organization/Project. "
113
154
  "1 to set to True and 0 to false. 0 is default",
114
155
  True
115
156
  ),
157
+ Option(
158
+ "preview",
159
+ ["--preview"],
160
+ "Control embedding display in output. 1 (default) shows a preview (first 5 values for float, 50 chars for base64). "
161
+ "0 shows the full embedding vector. Use 0 to get complete embeddings for further processing",
162
+ True
163
+ ),
116
164
 
117
165
  ]
118
166