lionagi 0.5.5__py3-none-any.whl → 0.6.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +18 -24
- lionagi/{core/_class_registry.py → _class_registry.py} +51 -10
- lionagi/_errors.py +35 -0
- lionagi/libs/__init__.py +3 -0
- lionagi/libs/compress/__init__.py +3 -0
- lionagi/libs/compress/models.py +6 -2
- lionagi/libs/compress/utils.py +4 -16
- lionagi/libs/file/__init__.py +3 -0
- lionagi/libs/file/chunk.py +4 -0
- lionagi/libs/file/file_ops.py +4 -0
- lionagi/libs/file/params.py +4 -41
- lionagi/libs/file/process.py +4 -0
- lionagi/libs/file/save.py +5 -1
- lionagi/libs/{parse/flatten → nested}/flatten.py +4 -0
- lionagi/libs/{parse/nested → nested}/nfilter.py +4 -0
- lionagi/libs/{parse/nested → nested}/nget.py +6 -1
- lionagi/libs/{parse/nested → nested}/ninsert.py +5 -1
- lionagi/libs/{parse/nested → nested}/nmerge.py +4 -0
- lionagi/libs/{parse/nested → nested}/npop.py +5 -2
- lionagi/libs/{parse/nested → nested}/nset.py +6 -1
- lionagi/libs/{parse/flatten → nested}/unflatten.py +4 -0
- lionagi/libs/{parse/nested → nested}/utils.py +5 -1
- lionagi/libs/package/__init__.py +3 -0
- lionagi/libs/package/imports.py +6 -2
- lionagi/libs/package/management.py +7 -3
- lionagi/libs/package/params.py +4 -0
- lionagi/libs/package/system.py +4 -0
- lionagi/libs/parse.py +30 -0
- lionagi/libs/{parse/json → schema}/as_readable.py +10 -4
- lionagi/libs/{parse/string_parse/code_block.py → schema/extract_code_block.py} +4 -0
- lionagi/libs/{parse/string_parse/docstring.py → schema/extract_docstring.py} +4 -0
- lionagi/libs/{parse/string_parse/function_.py → schema/function_to_schema.py} +21 -9
- lionagi/libs/{parse/json/schema.py → schema/json_schema.py} +5 -1
- lionagi/libs/validate/common_field_validators.py +170 -0
- lionagi/libs/{parse/validate/keys.py → validate/fuzzy_match_keys.py} +42 -8
- lionagi/libs/{parse/validate/mapping.py → validate/fuzzy_validate_mapping.py} +41 -6
- lionagi/libs/{string_similarity/algorithms.py → validate/string_similarity.py} +115 -1
- lionagi/libs/{parse/validate/boolean.py → validate/validate_boolean.py} +42 -3
- lionagi/operations/__init__.py +13 -3
- lionagi/operations/brainstorm/__init__.py +3 -3
- lionagi/operations/brainstorm/brainstorm.py +33 -19
- lionagi/operations/brainstorm/prompt.py +4 -0
- lionagi/operations/plan/__init__.py +4 -0
- lionagi/operations/plan/plan.py +19 -16
- lionagi/operations/plan/prompt.py +4 -0
- lionagi/operations/select/__init__.py +4 -0
- lionagi/operations/select/prompt.py +4 -0
- lionagi/operations/select/select.py +2 -2
- lionagi/operations/select/utils.py +4 -4
- lionagi/{strategies → operations/strategies}/base.py +6 -2
- lionagi/{strategies → operations/strategies}/concurrent.py +8 -5
- lionagi/{strategies → operations/strategies}/concurrent_chunk.py +6 -3
- lionagi/{strategies → operations/strategies}/concurrent_sequential_chunk.py +8 -4
- lionagi/{strategies → operations/strategies}/params.py +26 -6
- lionagi/{strategies → operations/strategies}/sequential.py +6 -2
- lionagi/{strategies → operations/strategies}/sequential_chunk.py +7 -3
- lionagi/{strategies → operations/strategies}/sequential_concurrent_chunk.py +9 -4
- lionagi/{strategies → operations/strategies}/utils.py +6 -3
- lionagi/operations/types.py +13 -0
- lionagi/operations/utils.py +6 -3
- lionagi/operatives/action/function_calling.py +136 -0
- lionagi/operatives/action/manager.py +236 -0
- lionagi/operatives/action/request_response_model.py +90 -0
- lionagi/operatives/action/tool.py +141 -0
- lionagi/{protocols/operatives/action.py → operatives/action/utils.py} +52 -90
- lionagi/{core → operatives}/forms/base.py +9 -4
- lionagi/{core → operatives}/forms/form.py +8 -13
- lionagi/{core → operatives}/forms/report.py +5 -3
- lionagi/operatives/instruct/base.py +79 -0
- lionagi/operatives/instruct/instruct.py +105 -0
- lionagi/operatives/instruct/instruct_collection.py +52 -0
- lionagi/operatives/instruct/node.py +13 -0
- lionagi/{protocols/operatives → operatives/instruct}/prompts.py +0 -34
- lionagi/{protocols/operatives → operatives/instruct}/reason.py +14 -7
- lionagi/{core/models/__init__.py → operatives/manager.py} +5 -1
- lionagi/operatives/models/field_model.py +194 -0
- lionagi/operatives/models/model_params.py +307 -0
- lionagi/{core → operatives}/models/note.py +20 -28
- lionagi/{core → operatives}/models/operable_model.py +153 -71
- lionagi/{core → operatives}/models/schema_model.py +4 -3
- lionagi/{protocols/operatives → operatives}/operative.py +10 -7
- lionagi/{protocols/operatives → operatives}/step.py +67 -26
- lionagi/operatives/types.py +69 -0
- lionagi/protocols/_concepts.py +94 -0
- lionagi/protocols/adapters/adapter.py +23 -7
- lionagi/protocols/adapters/json_adapter.py +72 -14
- lionagi/protocols/adapters/pandas_/csv_adapter.py +50 -0
- lionagi/protocols/adapters/pandas_/excel_adapter.py +52 -0
- lionagi/protocols/adapters/pandas_/pd_dataframe_adapter.py +31 -0
- lionagi/protocols/adapters/pandas_/pd_series_adapter.py +17 -0
- lionagi/protocols/adapters/types.py +18 -0
- lionagi/protocols/generic/element.py +460 -0
- lionagi/protocols/generic/event.py +177 -0
- lionagi/protocols/generic/log.py +237 -0
- lionagi/{core → protocols}/generic/pile.py +193 -131
- lionagi/protocols/generic/processor.py +316 -0
- lionagi/protocols/generic/progression.py +500 -0
- lionagi/protocols/graph/edge.py +166 -0
- lionagi/protocols/graph/graph.py +290 -0
- lionagi/protocols/graph/node.py +125 -0
- lionagi/protocols/mail/exchange.py +116 -0
- lionagi/protocols/mail/mail.py +25 -0
- lionagi/protocols/mail/mailbox.py +47 -0
- lionagi/protocols/mail/manager.py +168 -0
- lionagi/protocols/mail/package.py +55 -0
- lionagi/protocols/messages/action_request.py +165 -0
- lionagi/protocols/messages/action_response.py +132 -0
- lionagi/{core/communication → protocols/messages}/assistant_response.py +55 -79
- lionagi/protocols/messages/base.py +73 -0
- lionagi/protocols/messages/instruction.py +582 -0
- lionagi/protocols/messages/manager.py +429 -0
- lionagi/protocols/messages/message.py +216 -0
- lionagi/protocols/messages/system.py +115 -0
- lionagi/protocols/messages/templates/assistant_response.jinja2 +6 -0
- lionagi/{core/communication → protocols/messages}/templates/instruction_message.jinja2 +2 -2
- lionagi/protocols/types.py +96 -0
- lionagi/service/__init__.py +1 -16
- lionagi/service/endpoints/base.py +517 -0
- lionagi/service/endpoints/chat_completion.py +102 -0
- lionagi/service/endpoints/match_endpoint.py +60 -0
- lionagi/service/endpoints/rate_limited_processor.py +146 -0
- lionagi/service/endpoints/token_calculator.py +209 -0
- lionagi/service/imodel.py +263 -96
- lionagi/service/manager.py +45 -0
- lionagi/service/providers/anthropic_/messages.py +64 -0
- lionagi/service/providers/groq_/chat_completions.py +56 -0
- lionagi/service/providers/openai_/chat_completions.py +62 -0
- lionagi/service/providers/openrouter_/chat_completions.py +62 -0
- lionagi/service/providers/perplexity_/__init__.py +3 -0
- lionagi/service/providers/perplexity_/chat_completions.py +40 -0
- lionagi/service/types.py +18 -0
- lionagi/session/__init__.py +3 -0
- lionagi/session/branch.py +1287 -0
- lionagi/session/session.py +296 -0
- lionagi/settings.py +62 -118
- lionagi/utils.py +2386 -0
- lionagi/version.py +1 -1
- {lionagi-0.5.5.dist-info → lionagi-0.6.1.dist-info}/METADATA +10 -9
- lionagi-0.6.1.dist-info/RECORD +169 -0
- lionagi/core/action/action_manager.py +0 -289
- lionagi/core/action/base.py +0 -109
- lionagi/core/action/function_calling.py +0 -153
- lionagi/core/action/tool.py +0 -202
- lionagi/core/action/types.py +0 -16
- lionagi/core/communication/action_request.py +0 -163
- lionagi/core/communication/action_response.py +0 -149
- lionagi/core/communication/base_mail.py +0 -49
- lionagi/core/communication/instruction.py +0 -376
- lionagi/core/communication/message.py +0 -286
- lionagi/core/communication/message_manager.py +0 -543
- lionagi/core/communication/system.py +0 -116
- lionagi/core/communication/templates/assistant_response.jinja2 +0 -2
- lionagi/core/communication/types.py +0 -27
- lionagi/core/communication/utils.py +0 -256
- lionagi/core/forms/types.py +0 -13
- lionagi/core/generic/component.py +0 -422
- lionagi/core/generic/edge.py +0 -163
- lionagi/core/generic/element.py +0 -199
- lionagi/core/generic/graph.py +0 -377
- lionagi/core/generic/log.py +0 -151
- lionagi/core/generic/log_manager.py +0 -320
- lionagi/core/generic/node.py +0 -11
- lionagi/core/generic/progression.py +0 -395
- lionagi/core/generic/types.py +0 -23
- lionagi/core/generic/utils.py +0 -53
- lionagi/core/models/base.py +0 -28
- lionagi/core/models/field_model.py +0 -145
- lionagi/core/models/model_params.py +0 -194
- lionagi/core/models/types.py +0 -19
- lionagi/core/session/branch.py +0 -130
- lionagi/core/session/branch_mixins.py +0 -581
- lionagi/core/session/session.py +0 -163
- lionagi/core/session/types.py +0 -8
- lionagi/core/typing/__init__.py +0 -9
- lionagi/core/typing/_concepts.py +0 -173
- lionagi/core/typing/_id.py +0 -104
- lionagi/core/typing/_pydantic.py +0 -33
- lionagi/core/typing/_typing.py +0 -54
- lionagi/integrations/_services.py +0 -17
- lionagi/integrations/anthropic_/AnthropicModel.py +0 -268
- lionagi/integrations/anthropic_/AnthropicService.py +0 -127
- lionagi/integrations/anthropic_/anthropic_max_output_token_data.yaml +0 -12
- lionagi/integrations/anthropic_/anthropic_price_data.yaml +0 -34
- lionagi/integrations/anthropic_/api_endpoints/api_request.py +0 -277
- lionagi/integrations/anthropic_/api_endpoints/data_models.py +0 -40
- lionagi/integrations/anthropic_/api_endpoints/match_response.py +0 -119
- lionagi/integrations/anthropic_/api_endpoints/messages/request/message_models.py +0 -14
- lionagi/integrations/anthropic_/api_endpoints/messages/request/request_body.py +0 -74
- lionagi/integrations/anthropic_/api_endpoints/messages/response/__init__.py +0 -0
- lionagi/integrations/anthropic_/api_endpoints/messages/response/content_models.py +0 -32
- lionagi/integrations/anthropic_/api_endpoints/messages/response/response_body.py +0 -101
- lionagi/integrations/anthropic_/api_endpoints/messages/response/usage_models.py +0 -25
- lionagi/integrations/anthropic_/version.py +0 -5
- lionagi/integrations/groq_/GroqModel.py +0 -325
- lionagi/integrations/groq_/GroqService.py +0 -156
- lionagi/integrations/groq_/api_endpoints/__init__.py +0 -0
- lionagi/integrations/groq_/api_endpoints/data_models.py +0 -187
- lionagi/integrations/groq_/api_endpoints/groq_request.py +0 -288
- lionagi/integrations/groq_/api_endpoints/match_response.py +0 -106
- lionagi/integrations/groq_/api_endpoints/response_utils.py +0 -105
- lionagi/integrations/groq_/groq_max_output_token_data.yaml +0 -21
- lionagi/integrations/groq_/groq_price_data.yaml +0 -58
- lionagi/integrations/groq_/groq_rate_limits.yaml +0 -105
- lionagi/integrations/groq_/version.py +0 -5
- lionagi/integrations/litellm_/imodel.py +0 -76
- lionagi/integrations/ollama_/OllamaModel.py +0 -244
- lionagi/integrations/ollama_/OllamaService.py +0 -142
- lionagi/integrations/ollama_/api_endpoints/api_request.py +0 -179
- lionagi/integrations/ollama_/api_endpoints/chat_completion/message_models.py +0 -31
- lionagi/integrations/ollama_/api_endpoints/chat_completion/request_body.py +0 -46
- lionagi/integrations/ollama_/api_endpoints/chat_completion/response_body.py +0 -67
- lionagi/integrations/ollama_/api_endpoints/chat_completion/tool_models.py +0 -49
- lionagi/integrations/ollama_/api_endpoints/completion/__init__.py +0 -0
- lionagi/integrations/ollama_/api_endpoints/completion/request_body.py +0 -72
- lionagi/integrations/ollama_/api_endpoints/completion/response_body.py +0 -59
- lionagi/integrations/ollama_/api_endpoints/data_models.py +0 -15
- lionagi/integrations/ollama_/api_endpoints/embedding/__init__.py +0 -0
- lionagi/integrations/ollama_/api_endpoints/embedding/request_body.py +0 -33
- lionagi/integrations/ollama_/api_endpoints/embedding/response_body.py +0 -29
- lionagi/integrations/ollama_/api_endpoints/match_data_model.py +0 -62
- lionagi/integrations/ollama_/api_endpoints/match_response.py +0 -190
- lionagi/integrations/ollama_/api_endpoints/model/copy_model.py +0 -13
- lionagi/integrations/ollama_/api_endpoints/model/create_model.py +0 -28
- lionagi/integrations/ollama_/api_endpoints/model/delete_model.py +0 -11
- lionagi/integrations/ollama_/api_endpoints/model/list_model.py +0 -60
- lionagi/integrations/ollama_/api_endpoints/model/pull_model.py +0 -34
- lionagi/integrations/ollama_/api_endpoints/model/push_model.py +0 -35
- lionagi/integrations/ollama_/api_endpoints/model/show_model.py +0 -36
- lionagi/integrations/ollama_/api_endpoints/option_models.py +0 -68
- lionagi/integrations/openai_/OpenAIModel.py +0 -419
- lionagi/integrations/openai_/OpenAIService.py +0 -435
- lionagi/integrations/openai_/__init__.py +0 -0
- lionagi/integrations/openai_/api_endpoints/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/api_request.py +0 -277
- lionagi/integrations/openai_/api_endpoints/audio/__init__.py +0 -9
- lionagi/integrations/openai_/api_endpoints/audio/speech_models.py +0 -34
- lionagi/integrations/openai_/api_endpoints/audio/transcription_models.py +0 -136
- lionagi/integrations/openai_/api_endpoints/audio/translation_models.py +0 -41
- lionagi/integrations/openai_/api_endpoints/audio/types.py +0 -41
- lionagi/integrations/openai_/api_endpoints/batch/__init__.py +0 -17
- lionagi/integrations/openai_/api_endpoints/batch/batch_models.py +0 -146
- lionagi/integrations/openai_/api_endpoints/batch/cancel_batch.py +0 -7
- lionagi/integrations/openai_/api_endpoints/batch/create_batch.py +0 -26
- lionagi/integrations/openai_/api_endpoints/batch/list_batch.py +0 -37
- lionagi/integrations/openai_/api_endpoints/batch/request_object_models.py +0 -65
- lionagi/integrations/openai_/api_endpoints/batch/retrieve_batch.py +0 -7
- lionagi/integrations/openai_/api_endpoints/batch/types.py +0 -4
- lionagi/integrations/openai_/api_endpoints/chat_completions/__init__.py +0 -1
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/__init__.py +0 -39
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/message_models.py +0 -121
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/request_body.py +0 -221
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/response_format.py +0 -71
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/stream_options.py +0 -14
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_choice_models.py +0 -17
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/tool_models.py +0 -54
- lionagi/integrations/openai_/api_endpoints/chat_completions/request/types.py +0 -18
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/__init__.py +0 -0
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/choice_models.py +0 -62
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/function_models.py +0 -16
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/log_prob_models.py +0 -47
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/message_models.py +0 -25
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/response_body.py +0 -99
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/types.py +0 -8
- lionagi/integrations/openai_/api_endpoints/chat_completions/response/usage_models.py +0 -24
- lionagi/integrations/openai_/api_endpoints/chat_completions/util.py +0 -46
- lionagi/integrations/openai_/api_endpoints/data_models.py +0 -23
- lionagi/integrations/openai_/api_endpoints/embeddings/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/embeddings/request_body.py +0 -79
- lionagi/integrations/openai_/api_endpoints/embeddings/response_body.py +0 -67
- lionagi/integrations/openai_/api_endpoints/files/__init__.py +0 -11
- lionagi/integrations/openai_/api_endpoints/files/delete_file.py +0 -20
- lionagi/integrations/openai_/api_endpoints/files/file_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/files/list_files.py +0 -27
- lionagi/integrations/openai_/api_endpoints/files/retrieve_file.py +0 -9
- lionagi/integrations/openai_/api_endpoints/files/upload_file.py +0 -38
- lionagi/integrations/openai_/api_endpoints/fine_tuning/__init__.py +0 -37
- lionagi/integrations/openai_/api_endpoints/fine_tuning/cancel_jobs.py +0 -9
- lionagi/integrations/openai_/api_endpoints/fine_tuning/create_jobs.py +0 -133
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_checkpoint_models.py +0 -58
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_event_models.py +0 -31
- lionagi/integrations/openai_/api_endpoints/fine_tuning/fine_tuning_job_models.py +0 -140
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_checkpoints.py +0 -51
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_events.py +0 -42
- lionagi/integrations/openai_/api_endpoints/fine_tuning/list_fine_tuning_jobs.py +0 -31
- lionagi/integrations/openai_/api_endpoints/fine_tuning/retrieve_jobs.py +0 -9
- lionagi/integrations/openai_/api_endpoints/fine_tuning/training_format.py +0 -30
- lionagi/integrations/openai_/api_endpoints/images/__init__.py +0 -9
- lionagi/integrations/openai_/api_endpoints/images/image_edit_models.py +0 -69
- lionagi/integrations/openai_/api_endpoints/images/image_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/images/image_variation_models.py +0 -56
- lionagi/integrations/openai_/api_endpoints/images/response_body.py +0 -30
- lionagi/integrations/openai_/api_endpoints/match_data_model.py +0 -197
- lionagi/integrations/openai_/api_endpoints/match_response.py +0 -336
- lionagi/integrations/openai_/api_endpoints/models/__init__.py +0 -7
- lionagi/integrations/openai_/api_endpoints/models/delete_fine_tuned_model.py +0 -17
- lionagi/integrations/openai_/api_endpoints/models/models_models.py +0 -31
- lionagi/integrations/openai_/api_endpoints/models/retrieve_model.py +0 -9
- lionagi/integrations/openai_/api_endpoints/moderations/__init__.py +0 -3
- lionagi/integrations/openai_/api_endpoints/moderations/request_body.py +0 -20
- lionagi/integrations/openai_/api_endpoints/moderations/response_body.py +0 -139
- lionagi/integrations/openai_/api_endpoints/uploads/__init__.py +0 -19
- lionagi/integrations/openai_/api_endpoints/uploads/add_upload_part.py +0 -11
- lionagi/integrations/openai_/api_endpoints/uploads/cancel_upload.py +0 -7
- lionagi/integrations/openai_/api_endpoints/uploads/complete_upload.py +0 -18
- lionagi/integrations/openai_/api_endpoints/uploads/create_upload.py +0 -17
- lionagi/integrations/openai_/api_endpoints/uploads/uploads_models.py +0 -52
- lionagi/integrations/openai_/image_token_calculator/__init__.py +0 -0
- lionagi/integrations/openai_/image_token_calculator/image_token_calculator.py +0 -98
- lionagi/integrations/openai_/image_token_calculator/openai_image_token_data.yaml +0 -15
- lionagi/integrations/openai_/openai_max_output_token_data.yaml +0 -12
- lionagi/integrations/openai_/openai_price_data.yaml +0 -26
- lionagi/integrations/openai_/version.py +0 -1
- lionagi/integrations/pandas_/__init__.py +0 -24
- lionagi/integrations/pandas_/extend_df.py +0 -61
- lionagi/integrations/pandas_/read.py +0 -103
- lionagi/integrations/pandas_/remove_rows.py +0 -61
- lionagi/integrations/pandas_/replace_keywords.py +0 -65
- lionagi/integrations/pandas_/save.py +0 -131
- lionagi/integrations/pandas_/search_keywords.py +0 -69
- lionagi/integrations/pandas_/to_df.py +0 -196
- lionagi/integrations/pandas_/update_cells.py +0 -54
- lionagi/integrations/perplexity_/PerplexityModel.py +0 -274
- lionagi/integrations/perplexity_/PerplexityService.py +0 -118
- lionagi/integrations/perplexity_/api_endpoints/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/api_request.py +0 -171
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/request/request_body.py +0 -121
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/__init__.py +0 -0
- lionagi/integrations/perplexity_/api_endpoints/chat_completions/response/response_body.py +0 -146
- lionagi/integrations/perplexity_/api_endpoints/data_models.py +0 -63
- lionagi/integrations/perplexity_/api_endpoints/match_response.py +0 -26
- lionagi/integrations/perplexity_/perplexity_max_output_token_data.yaml +0 -3
- lionagi/integrations/perplexity_/perplexity_price_data.yaml +0 -10
- lionagi/integrations/perplexity_/version.py +0 -1
- lionagi/integrations/pydantic_/__init__.py +0 -8
- lionagi/integrations/pydantic_/break_down_annotation.py +0 -81
- lionagi/integrations/pydantic_/new_model.py +0 -208
- lionagi/libs/constants.py +0 -98
- lionagi/libs/file/path.py +0 -301
- lionagi/libs/file/types.py +0 -22
- lionagi/libs/func/__init__.py +0 -0
- lionagi/libs/func/async_calls/__init__.py +0 -24
- lionagi/libs/func/async_calls/alcall.py +0 -210
- lionagi/libs/func/async_calls/bcall.py +0 -130
- lionagi/libs/func/async_calls/mcall.py +0 -134
- lionagi/libs/func/async_calls/pcall.py +0 -149
- lionagi/libs/func/async_calls/rcall.py +0 -217
- lionagi/libs/func/async_calls/tcall.py +0 -114
- lionagi/libs/func/async_calls/ucall.py +0 -85
- lionagi/libs/func/decorators.py +0 -277
- lionagi/libs/func/lcall.py +0 -57
- lionagi/libs/func/params.py +0 -64
- lionagi/libs/func/throttle.py +0 -119
- lionagi/libs/func/types.py +0 -39
- lionagi/libs/func/utils.py +0 -96
- lionagi/libs/package/types.py +0 -26
- lionagi/libs/parse/__init__.py +0 -1
- lionagi/libs/parse/flatten/__init__.py +0 -9
- lionagi/libs/parse/flatten/params.py +0 -52
- lionagi/libs/parse/json/__init__.py +0 -27
- lionagi/libs/parse/json/extract.py +0 -102
- lionagi/libs/parse/json/parse.py +0 -179
- lionagi/libs/parse/json/to_json.py +0 -71
- lionagi/libs/parse/nested/__init__.py +0 -33
- lionagi/libs/parse/nested/to_flat_list.py +0 -64
- lionagi/libs/parse/params.py +0 -0
- lionagi/libs/parse/string_parse/__init__.py +0 -11
- lionagi/libs/parse/type_convert/__init__.py +0 -19
- lionagi/libs/parse/type_convert/params.py +0 -145
- lionagi/libs/parse/type_convert/to_dict.py +0 -333
- lionagi/libs/parse/type_convert/to_list.py +0 -186
- lionagi/libs/parse/type_convert/to_num.py +0 -358
- lionagi/libs/parse/type_convert/to_str.py +0 -195
- lionagi/libs/parse/types.py +0 -9
- lionagi/libs/parse/validate/__init__.py +0 -14
- lionagi/libs/parse/validate/params.py +0 -62
- lionagi/libs/parse/xml/__init__.py +0 -10
- lionagi/libs/parse/xml/convert.py +0 -56
- lionagi/libs/parse/xml/parser.py +0 -93
- lionagi/libs/string_similarity/__init__.py +0 -32
- lionagi/libs/string_similarity/matcher.py +0 -102
- lionagi/libs/string_similarity/utils.py +0 -15
- lionagi/libs/utils.py +0 -266
- lionagi/protocols/adapters/pandas_adapter.py +0 -96
- lionagi/protocols/configs/__init__.py +0 -0
- lionagi/protocols/configs/branch_config.py +0 -86
- lionagi/protocols/configs/id_config.py +0 -15
- lionagi/protocols/configs/imodel_config.py +0 -73
- lionagi/protocols/configs/log_config.py +0 -93
- lionagi/protocols/configs/retry_config.py +0 -29
- lionagi/protocols/configs/types.py +0 -15
- lionagi/protocols/operatives/instruct.py +0 -194
- lionagi/protocols/operatives/types.py +0 -19
- lionagi/protocols/registries/_component_registry.py +0 -23
- lionagi/protocols/registries/_pile_registry.py +0 -30
- lionagi/service/complete_request_info.py +0 -11
- lionagi/service/rate_limiter.py +0 -108
- lionagi/service/service.py +0 -41
- lionagi/service/service_match_util.py +0 -131
- lionagi/service/service_util.py +0 -72
- lionagi/service/token_calculator.py +0 -51
- lionagi/strategies/__init__.py +0 -0
- lionagi/strategies/types.py +0 -21
- lionagi-0.5.5.dist-info/RECORD +0 -374
- /lionagi/{core → libs/nested}/__init__.py +0 -0
- /lionagi/{core/action → libs/schema}/__init__.py +0 -0
- /lionagi/{core/communication → libs/validate}/__init__.py +0 -0
- /lionagi/{core/forms → operations/strategies}/__init__.py +0 -0
- /lionagi/{core/generic → operatives}/__init__.py +0 -0
- /lionagi/{core/session → operatives/action}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_ → operatives/forms}/__init__.py +0 -0
- /lionagi/{core → operatives}/forms/utils.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints → operatives/instruct}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints/messages → operatives/models}/__init__.py +0 -0
- /lionagi/{integrations → protocols/adapters/pandas_}/__init__.py +0 -0
- /lionagi/{integrations/anthropic_/api_endpoints/messages/request → protocols/generic}/__init__.py +0 -0
- /lionagi/{integrations/groq_ → protocols/graph}/__init__.py +0 -0
- /lionagi/{integrations/litellm_ → protocols/mail}/__init__.py +0 -0
- /lionagi/{integrations/ollama_ → protocols/messages}/__init__.py +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/README.md +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/action_request.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/action_response.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/system_message.jinja2 +0 -0
- /lionagi/{core/communication → protocols/messages}/templates/tool_schemas.jinja2 +0 -0
- /lionagi/{integrations/ollama_/api_endpoints → service/endpoints}/__init__.py +0 -0
- /lionagi/{integrations/ollama_/api_endpoints/chat_completion → service/providers}/__init__.py +0 -0
- /lionagi/{integrations/ollama_/api_endpoints/model → service/providers/anthropic_}/__init__.py +0 -0
- /lionagi/{integrations/perplexity_ → service/providers/groq_}/__init__.py +0 -0
- /lionagi/{protocols/operatives → service/providers/openai_}/__init__.py +0 -0
- /lionagi/{protocols/registries → service/providers/openrouter_}/__init__.py +0 -0
- {lionagi-0.5.5.dist-info → lionagi-0.6.1.dist-info}/WHEEL +0 -0
- {lionagi-0.5.5.dist-info → lionagi-0.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,288 +0,0 @@
|
|
1
|
-
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
-
#
|
3
|
-
# SPDX-License-Identifier: Apache-2.0
|
4
|
-
|
5
|
-
import json
|
6
|
-
import re
|
7
|
-
from collections.abc import AsyncGenerator
|
8
|
-
from os import getenv
|
9
|
-
from typing import Any
|
10
|
-
|
11
|
-
import aiohttp
|
12
|
-
from pydantic import BaseModel, Field, field_validator
|
13
|
-
|
14
|
-
from .data_models import (
|
15
|
-
GroqEndpointPathParam,
|
16
|
-
GroqEndpointQueryParam,
|
17
|
-
GroqEndpointRequestBody,
|
18
|
-
)
|
19
|
-
from .response_utils import match_response
|
20
|
-
|
21
|
-
GROQ_BASE_URL = "https://api.groq.com/openai/v1"
|
22
|
-
DEFAULT_TIMEOUT = 30 # seconds
|
23
|
-
|
24
|
-
|
25
|
-
class GroqRequest(BaseModel):
|
26
|
-
api_key: str = Field(
|
27
|
-
description="API key for authentication", exclude=True
|
28
|
-
)
|
29
|
-
endpoint: str = Field(description="API endpoint")
|
30
|
-
method: str = Field(description="HTTP method")
|
31
|
-
content_type: str | None = Field(
|
32
|
-
default="application/json", description="Content type for the request"
|
33
|
-
)
|
34
|
-
timeout: float = Field(
|
35
|
-
default=DEFAULT_TIMEOUT,
|
36
|
-
description="Request timeout in seconds",
|
37
|
-
ge=0,
|
38
|
-
)
|
39
|
-
|
40
|
-
model_config = {
|
41
|
-
"arbitrary_types_allowed": True,
|
42
|
-
"extra": "allow", # Allow extra attributes for mocking
|
43
|
-
}
|
44
|
-
|
45
|
-
@field_validator("api_key")
|
46
|
-
@classmethod
|
47
|
-
def validate_api_key(cls, v: str) -> str:
|
48
|
-
"""Validate API key format."""
|
49
|
-
# First try to get from env if v looks like an env var
|
50
|
-
if re.match(r"^[A-Z_][A-Z0-9_]*$", v):
|
51
|
-
env_value = getenv(v)
|
52
|
-
if env_value:
|
53
|
-
v = env_value
|
54
|
-
|
55
|
-
# Validate key format
|
56
|
-
if not v.startswith(("groq_", "gsk_")):
|
57
|
-
raise ValueError("Invalid Groq API key format")
|
58
|
-
return v
|
59
|
-
|
60
|
-
def get_headers(self) -> dict[str, str]:
|
61
|
-
"""Get headers for the request."""
|
62
|
-
headers = {
|
63
|
-
"Authorization": f"Bearer {self.api_key}",
|
64
|
-
}
|
65
|
-
if self.content_type:
|
66
|
-
headers["Content-Type"] = self.content_type
|
67
|
-
return headers
|
68
|
-
|
69
|
-
def get_endpoint(
|
70
|
-
self, path_param: GroqEndpointPathParam | None = None
|
71
|
-
) -> str:
|
72
|
-
"""Get the formatted endpoint URL."""
|
73
|
-
endpoint = self.endpoint
|
74
|
-
if path_param:
|
75
|
-
try:
|
76
|
-
endpoint = endpoint.format(**path_param.model_dump())
|
77
|
-
except KeyError as e:
|
78
|
-
raise ValueError(f"Missing path parameter: {e}")
|
79
|
-
return f"{GROQ_BASE_URL}/{endpoint}"
|
80
|
-
|
81
|
-
async def invoke(
|
82
|
-
self,
|
83
|
-
request_body: GroqEndpointRequestBody | None = None,
|
84
|
-
params: GroqEndpointQueryParam | None = None,
|
85
|
-
form_data: dict[str, Any] | None = None,
|
86
|
-
path_param: GroqEndpointPathParam | None = None,
|
87
|
-
output_file: str | None = None,
|
88
|
-
with_response_header: bool = False,
|
89
|
-
parse_response: bool = True,
|
90
|
-
**kwargs,
|
91
|
-
) -> Any:
|
92
|
-
"""Make a request to the Groq API."""
|
93
|
-
url = self.get_endpoint(path_param)
|
94
|
-
headers = self.get_headers()
|
95
|
-
timeout = aiohttp.ClientTimeout(total=self.timeout)
|
96
|
-
|
97
|
-
# Convert Pydantic models to dict
|
98
|
-
json_data = (
|
99
|
-
request_body.model_dump(exclude_unset=True)
|
100
|
-
if request_body
|
101
|
-
else None
|
102
|
-
)
|
103
|
-
params = params.model_dump(exclude_unset=True) if params else None
|
104
|
-
|
105
|
-
try:
|
106
|
-
async with aiohttp.ClientSession(timeout=timeout) as session:
|
107
|
-
if form_data:
|
108
|
-
data = aiohttp.FormData()
|
109
|
-
for key, value in form_data.items():
|
110
|
-
data.add_field(key, value)
|
111
|
-
async with session.request(
|
112
|
-
self.method,
|
113
|
-
url,
|
114
|
-
headers=headers,
|
115
|
-
data=data,
|
116
|
-
params=params,
|
117
|
-
) as response:
|
118
|
-
return await self._handle_response(
|
119
|
-
response,
|
120
|
-
output_file,
|
121
|
-
with_response_header,
|
122
|
-
parse_response,
|
123
|
-
)
|
124
|
-
else:
|
125
|
-
async with session.request(
|
126
|
-
self.method,
|
127
|
-
url,
|
128
|
-
headers=headers,
|
129
|
-
json=json_data,
|
130
|
-
params=params,
|
131
|
-
) as response:
|
132
|
-
return await self._handle_response(
|
133
|
-
response,
|
134
|
-
output_file,
|
135
|
-
with_response_header,
|
136
|
-
parse_response,
|
137
|
-
)
|
138
|
-
except aiohttp.ClientError as e:
|
139
|
-
raise ConnectionError(f"Failed to connect to Groq API: {str(e)}")
|
140
|
-
except TimeoutError:
|
141
|
-
raise TimeoutError(
|
142
|
-
f"Request timed out after {self.timeout} seconds"
|
143
|
-
)
|
144
|
-
|
145
|
-
async def stream(
|
146
|
-
self,
|
147
|
-
request_body: GroqEndpointRequestBody | None = None,
|
148
|
-
output_file: str | None = None,
|
149
|
-
with_response_header: bool = False,
|
150
|
-
) -> AsyncGenerator[dict[str, Any], None]:
|
151
|
-
"""Stream response from the Groq API."""
|
152
|
-
if request_body and not getattr(request_body, "stream", False):
|
153
|
-
raise ValueError(
|
154
|
-
"Request does not support stream. "
|
155
|
-
"Only requests with stream=True are supported"
|
156
|
-
)
|
157
|
-
|
158
|
-
url = self.get_endpoint()
|
159
|
-
headers = self.get_headers()
|
160
|
-
timeout = aiohttp.ClientTimeout(total=self.timeout)
|
161
|
-
|
162
|
-
# Convert Pydantic model to dict
|
163
|
-
json_data = (
|
164
|
-
request_body.model_dump(exclude_unset=True)
|
165
|
-
if request_body
|
166
|
-
else None
|
167
|
-
)
|
168
|
-
|
169
|
-
try:
|
170
|
-
async with aiohttp.ClientSession(timeout=timeout) as session:
|
171
|
-
async with session.request(
|
172
|
-
self.method, url, headers=headers, json=json_data
|
173
|
-
) as response:
|
174
|
-
if response.status != 200:
|
175
|
-
error_text = await self._get_error_text(response)
|
176
|
-
raise aiohttp.ClientResponseError(
|
177
|
-
request_info=response.request_info,
|
178
|
-
history=response.history,
|
179
|
-
status=response.status,
|
180
|
-
message=error_text,
|
181
|
-
headers=response.headers,
|
182
|
-
)
|
183
|
-
|
184
|
-
file_handle = None
|
185
|
-
if output_file:
|
186
|
-
try:
|
187
|
-
file_handle = open(output_file, "w")
|
188
|
-
except Exception as e:
|
189
|
-
raise ValueError(
|
190
|
-
f"Failed to open output file {output_file}: {str(e)}"
|
191
|
-
)
|
192
|
-
|
193
|
-
try:
|
194
|
-
async for line in response.content:
|
195
|
-
if line:
|
196
|
-
try:
|
197
|
-
line = line.decode("utf-8").strip()
|
198
|
-
except UnicodeDecodeError:
|
199
|
-
import warnings
|
200
|
-
|
201
|
-
warnings.warn(
|
202
|
-
"Failed to decode response chunk"
|
203
|
-
)
|
204
|
-
continue
|
205
|
-
|
206
|
-
if line.startswith("data: "):
|
207
|
-
line = line[6:] # Remove "data: " prefix
|
208
|
-
|
209
|
-
if line == "[DONE]":
|
210
|
-
break
|
211
|
-
|
212
|
-
try:
|
213
|
-
chunk_data = json.loads(line)
|
214
|
-
if file_handle:
|
215
|
-
file_handle.write(
|
216
|
-
json.dumps(chunk_data) + "\n"
|
217
|
-
)
|
218
|
-
yield chunk_data
|
219
|
-
except json.JSONDecodeError:
|
220
|
-
import warnings
|
221
|
-
|
222
|
-
warnings.warn(
|
223
|
-
f"Failed to parse response chunk: {line}"
|
224
|
-
)
|
225
|
-
continue
|
226
|
-
|
227
|
-
if with_response_header:
|
228
|
-
yield dict(response.headers)
|
229
|
-
finally:
|
230
|
-
if file_handle:
|
231
|
-
file_handle.close()
|
232
|
-
|
233
|
-
except aiohttp.ClientError as e:
|
234
|
-
raise ConnectionError(f"Failed to connect to Groq API: {str(e)}")
|
235
|
-
except TimeoutError:
|
236
|
-
raise TimeoutError(
|
237
|
-
f"Stream timed out after {self.timeout} seconds"
|
238
|
-
)
|
239
|
-
|
240
|
-
async def _handle_response(
|
241
|
-
self,
|
242
|
-
response: aiohttp.ClientResponse,
|
243
|
-
output_file: str | None = None,
|
244
|
-
with_response_header: bool = False,
|
245
|
-
parse_response: bool = True,
|
246
|
-
) -> Any:
|
247
|
-
"""Handle the API response."""
|
248
|
-
if response.status != 200:
|
249
|
-
error_text = await self._get_error_text(response)
|
250
|
-
raise aiohttp.ClientResponseError(
|
251
|
-
request_info=response.request_info,
|
252
|
-
history=response.history,
|
253
|
-
status=response.status,
|
254
|
-
message=error_text,
|
255
|
-
headers=response.headers,
|
256
|
-
)
|
257
|
-
|
258
|
-
if output_file:
|
259
|
-
with open(output_file, "wb") as f:
|
260
|
-
async for chunk in response.content.iter_chunked(1024):
|
261
|
-
f.write(chunk)
|
262
|
-
return None
|
263
|
-
|
264
|
-
response_body = await response.json()
|
265
|
-
|
266
|
-
if parse_response:
|
267
|
-
response_body = match_response(self, response_body)
|
268
|
-
|
269
|
-
if with_response_header:
|
270
|
-
return response_body, dict(response.headers)
|
271
|
-
return response_body
|
272
|
-
|
273
|
-
async def _get_error_text(self, response: aiohttp.ClientResponse) -> str:
|
274
|
-
"""Extract error text from response."""
|
275
|
-
try:
|
276
|
-
error_json = await response.json()
|
277
|
-
return json.dumps(error_json)
|
278
|
-
except Exception:
|
279
|
-
try:
|
280
|
-
return await response.text()
|
281
|
-
except Exception:
|
282
|
-
return f"HTTP {response.status}"
|
283
|
-
|
284
|
-
def __repr__(self):
|
285
|
-
return (
|
286
|
-
f"GroqRequest(endpoint={self.endpoint}, method={self.method}, "
|
287
|
-
f"content_type={self.content_type})"
|
288
|
-
)
|
@@ -1,106 +0,0 @@
|
|
1
|
-
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
-
#
|
3
|
-
# SPDX-License-Identifier: Apache-2.0
|
4
|
-
|
5
|
-
from typing import Any
|
6
|
-
|
7
|
-
from .data_models import GroqAudioResponse, GroqChatCompletionResponse
|
8
|
-
from .groq_request import GroqRequest
|
9
|
-
|
10
|
-
|
11
|
-
def match_response(
|
12
|
-
request_model: GroqRequest,
|
13
|
-
response: dict[str, Any] | list[dict[str, Any]] | None,
|
14
|
-
) -> dict[str, Any] | list[dict[str, Any]] | None:
|
15
|
-
"""Match response to appropriate model and format."""
|
16
|
-
if response is None:
|
17
|
-
return None
|
18
|
-
|
19
|
-
endpoint = request_model.endpoint.split("/")[0]
|
20
|
-
|
21
|
-
# Chat completions endpoint
|
22
|
-
if endpoint == "chat":
|
23
|
-
if isinstance(response, dict):
|
24
|
-
# Single response
|
25
|
-
try:
|
26
|
-
# Validate and parse through Pydantic model
|
27
|
-
parsed = GroqChatCompletionResponse(**response)
|
28
|
-
# Return in consistent format
|
29
|
-
return {
|
30
|
-
"choices": parsed.choices,
|
31
|
-
"model": parsed.model,
|
32
|
-
"usage": parsed.usage,
|
33
|
-
}
|
34
|
-
except Exception as e:
|
35
|
-
import warnings
|
36
|
-
|
37
|
-
warnings.warn(f"Failed to parse chat response: {str(e)}")
|
38
|
-
return response
|
39
|
-
else:
|
40
|
-
# Stream response list
|
41
|
-
result = []
|
42
|
-
for chunk in response:
|
43
|
-
if not isinstance(chunk, dict):
|
44
|
-
continue
|
45
|
-
|
46
|
-
try:
|
47
|
-
if "choices" in chunk:
|
48
|
-
# Regular chunk with content
|
49
|
-
result.append(
|
50
|
-
{
|
51
|
-
"choices": [
|
52
|
-
{
|
53
|
-
"delta": {
|
54
|
-
"content": choice.get(
|
55
|
-
"delta", {}
|
56
|
-
).get("content", ""),
|
57
|
-
"role": "assistant",
|
58
|
-
}
|
59
|
-
}
|
60
|
-
for choice in chunk["choices"]
|
61
|
-
]
|
62
|
-
}
|
63
|
-
)
|
64
|
-
|
65
|
-
# Add usage if present
|
66
|
-
if "usage" in chunk:
|
67
|
-
result[-1]["usage"] = chunk["usage"]
|
68
|
-
|
69
|
-
elif "usage" in chunk:
|
70
|
-
# Final chunk with usage stats
|
71
|
-
result.append(
|
72
|
-
{
|
73
|
-
"choices": [
|
74
|
-
{
|
75
|
-
"delta": {
|
76
|
-
"content": "",
|
77
|
-
"role": "assistant",
|
78
|
-
},
|
79
|
-
"finish_reason": "stop",
|
80
|
-
}
|
81
|
-
],
|
82
|
-
"usage": chunk["usage"],
|
83
|
-
}
|
84
|
-
)
|
85
|
-
except Exception as e:
|
86
|
-
import warnings
|
87
|
-
|
88
|
-
warnings.warn(f"Failed to parse stream chunk: {str(e)}")
|
89
|
-
continue
|
90
|
-
|
91
|
-
return result
|
92
|
-
|
93
|
-
# Audio endpoints
|
94
|
-
elif endpoint == "audio":
|
95
|
-
try:
|
96
|
-
if isinstance(response, dict):
|
97
|
-
parsed = GroqAudioResponse(**response)
|
98
|
-
return {"text": parsed.text, "metadata": parsed.x_groq}
|
99
|
-
except Exception as e:
|
100
|
-
import warnings
|
101
|
-
|
102
|
-
warnings.warn(f"Failed to parse audio response: {str(e)}")
|
103
|
-
return response
|
104
|
-
|
105
|
-
# Default case
|
106
|
-
return response
|
@@ -1,105 +0,0 @@
|
|
1
|
-
# Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
-
#
|
3
|
-
# SPDX-License-Identifier: Apache-2.0
|
4
|
-
|
5
|
-
from typing import Any
|
6
|
-
|
7
|
-
from .data_models import GroqAudioResponse, GroqChatCompletionResponse
|
8
|
-
|
9
|
-
|
10
|
-
def match_response(
|
11
|
-
request_model: Any,
|
12
|
-
response: dict[str, Any] | list[dict[str, Any]] | None,
|
13
|
-
) -> dict[str, Any] | list[dict[str, Any]] | None:
|
14
|
-
"""Match response to appropriate model and format."""
|
15
|
-
if response is None:
|
16
|
-
return None
|
17
|
-
|
18
|
-
endpoint = request_model.endpoint.split("/")[0]
|
19
|
-
|
20
|
-
# Chat completions endpoint
|
21
|
-
if endpoint == "chat":
|
22
|
-
if isinstance(response, dict):
|
23
|
-
# Single response
|
24
|
-
try:
|
25
|
-
# Validate and parse through Pydantic model
|
26
|
-
parsed = GroqChatCompletionResponse(**response)
|
27
|
-
# Return in consistent format
|
28
|
-
return {
|
29
|
-
"choices": parsed.choices,
|
30
|
-
"model": parsed.model,
|
31
|
-
"usage": parsed.usage,
|
32
|
-
}
|
33
|
-
except Exception as e:
|
34
|
-
import warnings
|
35
|
-
|
36
|
-
warnings.warn(f"Failed to parse chat response: {str(e)}")
|
37
|
-
return response
|
38
|
-
else:
|
39
|
-
# Stream response list
|
40
|
-
result = []
|
41
|
-
for chunk in response:
|
42
|
-
if not isinstance(chunk, dict):
|
43
|
-
continue
|
44
|
-
|
45
|
-
try:
|
46
|
-
if "choices" in chunk:
|
47
|
-
# Regular chunk with content
|
48
|
-
result.append(
|
49
|
-
{
|
50
|
-
"choices": [
|
51
|
-
{
|
52
|
-
"delta": {
|
53
|
-
"content": choice.get(
|
54
|
-
"delta", {}
|
55
|
-
).get("content", ""),
|
56
|
-
"role": "assistant",
|
57
|
-
}
|
58
|
-
}
|
59
|
-
for choice in chunk["choices"]
|
60
|
-
]
|
61
|
-
}
|
62
|
-
)
|
63
|
-
|
64
|
-
# Add usage if present
|
65
|
-
if "usage" in chunk:
|
66
|
-
result[-1]["usage"] = chunk["usage"]
|
67
|
-
|
68
|
-
elif "usage" in chunk:
|
69
|
-
# Final chunk with usage stats
|
70
|
-
result.append(
|
71
|
-
{
|
72
|
-
"choices": [
|
73
|
-
{
|
74
|
-
"delta": {
|
75
|
-
"content": "",
|
76
|
-
"role": "assistant",
|
77
|
-
},
|
78
|
-
"finish_reason": "stop",
|
79
|
-
}
|
80
|
-
],
|
81
|
-
"usage": chunk["usage"],
|
82
|
-
}
|
83
|
-
)
|
84
|
-
except Exception as e:
|
85
|
-
import warnings
|
86
|
-
|
87
|
-
warnings.warn(f"Failed to parse stream chunk: {str(e)}")
|
88
|
-
continue
|
89
|
-
|
90
|
-
return result
|
91
|
-
|
92
|
-
# Audio endpoints
|
93
|
-
elif endpoint == "audio":
|
94
|
-
try:
|
95
|
-
if isinstance(response, dict):
|
96
|
-
parsed = GroqAudioResponse(**response)
|
97
|
-
return {"text": parsed.text, "metadata": parsed.x_groq}
|
98
|
-
except Exception as e:
|
99
|
-
import warnings
|
100
|
-
|
101
|
-
warnings.warn(f"Failed to parse audio response: {str(e)}")
|
102
|
-
return response
|
103
|
-
|
104
|
-
# Default case
|
105
|
-
return response
|
@@ -1,21 +0,0 @@
|
|
1
|
-
# Text Models
|
2
|
-
gemma2-9b-it: 8192
|
3
|
-
gemma-7b-it: 8192
|
4
|
-
llama3-groq-70b-8192-tool-use-preview: 8192
|
5
|
-
llama3-groq-8b-8192-tool-use-preview: 8192
|
6
|
-
llama-3.1-70b-versatile: 32768
|
7
|
-
llama-3.1-70b-specdec: 8192
|
8
|
-
llama-3.1-8b-instant: 8192
|
9
|
-
llama-3.2-1b-preview: 8192
|
10
|
-
llama-3.2-3b-preview: 8192
|
11
|
-
llama-3.2-11b-vision-preview: 8192
|
12
|
-
llama-3.2-90b-vision-preview: 8192
|
13
|
-
llama-guard-3-8b: 8192
|
14
|
-
llama3-70b-8192: 8192
|
15
|
-
llama3-8b-8192: 8192
|
16
|
-
mixtral-8x7b-32768: 32768
|
17
|
-
|
18
|
-
# Audio Models (25MB max file size)
|
19
|
-
whisper-large-v3: 448
|
20
|
-
whisper-large-v3-turbo: 448
|
21
|
-
distil-whisper-large-v3-en: 448
|
@@ -1,58 +0,0 @@
|
|
1
|
-
model:
|
2
|
-
# Text Models
|
3
|
-
gemma2-9b-it:
|
4
|
-
input_tokens: 0
|
5
|
-
output_tokens: 0
|
6
|
-
gemma-7b-it:
|
7
|
-
input_tokens: 0
|
8
|
-
output_tokens: 0
|
9
|
-
llama3-groq-70b-8192-tool-use-preview:
|
10
|
-
input_tokens: 0
|
11
|
-
output_tokens: 0
|
12
|
-
llama3-groq-8b-8192-tool-use-preview:
|
13
|
-
input_tokens: 0
|
14
|
-
output_tokens: 0
|
15
|
-
llama-3.1-70b-versatile:
|
16
|
-
input_tokens: 0
|
17
|
-
output_tokens: 0
|
18
|
-
llama-3.1-70b-specdec:
|
19
|
-
input_tokens: 0
|
20
|
-
output_tokens: 0
|
21
|
-
llama-3.1-8b-instant:
|
22
|
-
input_tokens: 0
|
23
|
-
output_tokens: 0
|
24
|
-
llama-3.2-1b-preview:
|
25
|
-
input_tokens: 0
|
26
|
-
output_tokens: 0
|
27
|
-
llama-3.2-3b-preview:
|
28
|
-
input_tokens: 0
|
29
|
-
output_tokens: 0
|
30
|
-
llama-3.2-11b-vision-preview:
|
31
|
-
input_tokens: 0
|
32
|
-
output_tokens: 0
|
33
|
-
llama-3.2-90b-vision-preview:
|
34
|
-
input_tokens: 0
|
35
|
-
output_tokens: 0
|
36
|
-
llama-guard-3-8b:
|
37
|
-
input_tokens: 0
|
38
|
-
output_tokens: 0
|
39
|
-
llama3-70b-8192:
|
40
|
-
input_tokens: 0
|
41
|
-
output_tokens: 0
|
42
|
-
llama3-8b-8192:
|
43
|
-
input_tokens: 0
|
44
|
-
output_tokens: 0
|
45
|
-
mixtral-8x7b-32768:
|
46
|
-
input_tokens: 0
|
47
|
-
output_tokens: 0
|
48
|
-
|
49
|
-
# Audio Models
|
50
|
-
whisper-large-v3:
|
51
|
-
input_tokens: 0
|
52
|
-
output_tokens: 0
|
53
|
-
whisper-large-v3-turbo:
|
54
|
-
input_tokens: 0
|
55
|
-
output_tokens: 0
|
56
|
-
distil-whisper-large-v3-en:
|
57
|
-
input_tokens: 0
|
58
|
-
output_tokens: 0
|
@@ -1,105 +0,0 @@
|
|
1
|
-
# Text Models Rate Limits
|
2
|
-
text_models:
|
3
|
-
gemma-7b-it:
|
4
|
-
requests_per_minute: 30
|
5
|
-
requests_per_day: 14400
|
6
|
-
tokens_per_minute: 15000
|
7
|
-
tokens_per_day: 500000
|
8
|
-
gemma2-9b-it:
|
9
|
-
requests_per_minute: 30
|
10
|
-
requests_per_day: 14400
|
11
|
-
tokens_per_minute: 15000
|
12
|
-
tokens_per_day: 500000
|
13
|
-
llama-3.1-70b-versatile:
|
14
|
-
requests_per_minute: 30
|
15
|
-
requests_per_day: 14400
|
16
|
-
tokens_per_minute: 6000
|
17
|
-
tokens_per_day: 200000
|
18
|
-
llama-3.1-8b-instant:
|
19
|
-
requests_per_minute: 30
|
20
|
-
requests_per_day: 14400
|
21
|
-
tokens_per_minute: 20000
|
22
|
-
tokens_per_day: 500000
|
23
|
-
llama-3.2-11b-text-preview:
|
24
|
-
requests_per_minute: 30
|
25
|
-
requests_per_day: 7000
|
26
|
-
tokens_per_minute: 7000
|
27
|
-
tokens_per_day: 500000
|
28
|
-
llama-3.2-11b-vision-preview:
|
29
|
-
requests_per_minute: 30
|
30
|
-
requests_per_day: 7000
|
31
|
-
tokens_per_minute: 7000
|
32
|
-
tokens_per_day: 500000
|
33
|
-
llama-3.2-1b-preview:
|
34
|
-
requests_per_minute: 30
|
35
|
-
requests_per_day: 7000
|
36
|
-
tokens_per_minute: 7000
|
37
|
-
tokens_per_day: 500000
|
38
|
-
llama-3.2-3b-preview:
|
39
|
-
requests_per_minute: 30
|
40
|
-
requests_per_day: 7000
|
41
|
-
tokens_per_minute: 7000
|
42
|
-
tokens_per_day: 500000
|
43
|
-
llama-3.2-90b-text-preview:
|
44
|
-
requests_per_minute: 30
|
45
|
-
requests_per_day: 7000
|
46
|
-
tokens_per_minute: 7000
|
47
|
-
tokens_per_day: 500000
|
48
|
-
llama-3.2-90b-vision-preview:
|
49
|
-
requests_per_minute: 15
|
50
|
-
requests_per_day: 3500
|
51
|
-
tokens_per_minute: 7000
|
52
|
-
tokens_per_day: 250000
|
53
|
-
llama-guard-3-8b:
|
54
|
-
requests_per_minute: 30
|
55
|
-
requests_per_day: 14400
|
56
|
-
tokens_per_minute: 15000
|
57
|
-
tokens_per_day: 500000
|
58
|
-
llama3-70b-8192:
|
59
|
-
requests_per_minute: 30
|
60
|
-
requests_per_day: 14400
|
61
|
-
tokens_per_minute: 6000
|
62
|
-
tokens_per_day: 500000
|
63
|
-
llama3-8b-8192:
|
64
|
-
requests_per_minute: 30
|
65
|
-
requests_per_day: 14400
|
66
|
-
tokens_per_minute: 30000
|
67
|
-
tokens_per_day: 500000
|
68
|
-
llama3-groq-70b-8192-tool-use-preview:
|
69
|
-
requests_per_minute: 30
|
70
|
-
requests_per_day: 14400
|
71
|
-
tokens_per_minute: 15000
|
72
|
-
tokens_per_day: 500000
|
73
|
-
llama3-groq-8b-8192-tool-use-preview:
|
74
|
-
requests_per_minute: 30
|
75
|
-
requests_per_day: 14400
|
76
|
-
tokens_per_minute: 15000
|
77
|
-
tokens_per_day: 500000
|
78
|
-
llava-v1.5-7b-4096-preview:
|
79
|
-
requests_per_minute: 30
|
80
|
-
requests_per_day: 14400
|
81
|
-
tokens_per_minute: 30000
|
82
|
-
tokens_per_day: null # No limit
|
83
|
-
mixtral-8x7b-32768:
|
84
|
-
requests_per_minute: 30
|
85
|
-
requests_per_day: 14400
|
86
|
-
tokens_per_minute: 5000
|
87
|
-
tokens_per_day: 500000
|
88
|
-
|
89
|
-
# Audio Models Rate Limits
|
90
|
-
audio_models:
|
91
|
-
distil-whisper-large-v3-en:
|
92
|
-
requests_per_minute: 20
|
93
|
-
requests_per_day: 2000
|
94
|
-
audio_seconds_per_hour: 7200
|
95
|
-
audio_seconds_per_day: 28800
|
96
|
-
whisper-large-v3:
|
97
|
-
requests_per_minute: 20
|
98
|
-
requests_per_day: 2000
|
99
|
-
audio_seconds_per_hour: 7200
|
100
|
-
audio_seconds_per_day: 28800
|
101
|
-
whisper-large-v3-turbo:
|
102
|
-
requests_per_minute: 20
|
103
|
-
requests_per_day: 2000
|
104
|
-
audio_seconds_per_hour: 7200
|
105
|
-
audio_seconds_per_day: 28800
|