frogml-core 0.0.113__py3-none-any.whl → 0.0.114__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. frogml_core/__init__.py +1 -1
  2. frogml_core/inner/di_configuration/__init__.py +0 -6
  3. {frogml_core-0.0.113.dist-info → frogml_core-0.0.114.dist-info}/METADATA +1 -1
  4. {frogml_core-0.0.113.dist-info → frogml_core-0.0.114.dist-info}/RECORD +8 -115
  5. frogml_services_mock/mocks/frogml_mocks.py +0 -11
  6. frogml_services_mock/services_mock.py +0 -48
  7. frogml_storage/__init__.py +1 -1
  8. frogml_core/clients/prompt_manager/__init__.py +0 -0
  9. frogml_core/clients/prompt_manager/model_descriptor_mapper.py +0 -196
  10. frogml_core/clients/prompt_manager/prompt_manager_client.py +0 -190
  11. frogml_core/clients/prompt_manager/prompt_proto_mapper.py +0 -264
  12. frogml_core/clients/vector_store/__init__.py +0 -2
  13. frogml_core/clients/vector_store/management_client.py +0 -127
  14. frogml_core/clients/vector_store/serving_client.py +0 -157
  15. frogml_core/clients/workspace_manager/__init__.py +0 -1
  16. frogml_core/clients/workspace_manager/client.py +0 -224
  17. frogml_core/llmops/__init__.py +0 -0
  18. frogml_core/llmops/generation/__init__.py +0 -0
  19. frogml_core/llmops/generation/_steaming.py +0 -78
  20. frogml_core/llmops/generation/base.py +0 -5
  21. frogml_core/llmops/generation/chat/__init__.py +0 -0
  22. frogml_core/llmops/generation/chat/openai/LICENSE.txt +0 -201
  23. frogml_core/llmops/generation/chat/openai/types/__init__.py +0 -0
  24. frogml_core/llmops/generation/chat/openai/types/chat/__init__.py +0 -0
  25. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion.py +0 -88
  26. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_assistant_message_param.py +0 -65
  27. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_chunk.py +0 -153
  28. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_content_part_text_param.py +0 -28
  29. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_function_call_option_param.py +0 -25
  30. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_function_message_param.py +0 -33
  31. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_message.py +0 -56
  32. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_message_param.py +0 -34
  33. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_message_tool_call.py +0 -46
  34. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_message_tool_call_param.py +0 -44
  35. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_named_tool_choice_param.py +0 -32
  36. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_role.py +0 -20
  37. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_system_message_param.py +0 -35
  38. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_token_logprob.py +0 -71
  39. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_tool_choice_option_param.py +0 -28
  40. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_tool_message_param.py +0 -31
  41. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_tool_param.py +0 -29
  42. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_user_message_param.py +0 -35
  43. frogml_core/llmops/generation/chat/openai/types/chat/completion_create_params.py +0 -279
  44. frogml_core/llmops/generation/chat/openai/types/completion_choice.py +0 -47
  45. frogml_core/llmops/generation/chat/openai/types/completion_create_params.py +0 -209
  46. frogml_core/llmops/generation/chat/openai/types/completion_usage.py +0 -30
  47. frogml_core/llmops/generation/chat/openai/types/model.py +0 -35
  48. frogml_core/llmops/generation/chat/openai/types/shared/__init__.py +0 -3
  49. frogml_core/llmops/generation/chat/openai/types/shared/error_object.py +0 -27
  50. frogml_core/llmops/generation/chat/openai/types/shared/function_definition.py +0 -49
  51. frogml_core/llmops/generation/chat/openai/types/shared/function_parameters.py +0 -20
  52. frogml_core/llmops/generation/chat/openai/types/shared_params/__init__.py +0 -2
  53. frogml_core/llmops/generation/chat/openai/types/shared_params/function_definition.py +0 -49
  54. frogml_core/llmops/generation/chat/openai/types/shared_params/function_parameters.py +0 -22
  55. frogml_core/llmops/generation/streaming.py +0 -26
  56. frogml_core/llmops/model/__init__.py +0 -0
  57. frogml_core/llmops/model/descriptor.py +0 -40
  58. frogml_core/llmops/prompt/__init__.py +0 -0
  59. frogml_core/llmops/prompt/base.py +0 -136
  60. frogml_core/llmops/prompt/chat/__init__.py +0 -0
  61. frogml_core/llmops/prompt/chat/message.py +0 -24
  62. frogml_core/llmops/prompt/chat/template.py +0 -113
  63. frogml_core/llmops/prompt/chat/value.py +0 -10
  64. frogml_core/llmops/prompt/manager.py +0 -138
  65. frogml_core/llmops/prompt/template.py +0 -24
  66. frogml_core/llmops/prompt/value.py +0 -14
  67. frogml_core/llmops/provider/__init__.py +0 -0
  68. frogml_core/llmops/provider/chat.py +0 -44
  69. frogml_core/llmops/provider/openai/__init__.py +0 -0
  70. frogml_core/llmops/provider/openai/client.py +0 -126
  71. frogml_core/llmops/provider/openai/provider.py +0 -93
  72. frogml_core/vector_store/__init__.py +0 -4
  73. frogml_core/vector_store/client.py +0 -151
  74. frogml_core/vector_store/collection.py +0 -429
  75. frogml_core/vector_store/filters.py +0 -359
  76. frogml_core/vector_store/inference_client.py +0 -105
  77. frogml_core/vector_store/rest_helpers.py +0 -81
  78. frogml_core/vector_store/utils/__init__.py +0 -0
  79. frogml_core/vector_store/utils/filter_utils.py +0 -23
  80. frogml_core/vector_store/utils/upsert_utils.py +0 -218
  81. frogml_proto/qwak/prompt/v1/prompt/prompt_manager_service_pb2.py +0 -77
  82. frogml_proto/qwak/prompt/v1/prompt/prompt_manager_service_pb2.pyi +0 -417
  83. frogml_proto/qwak/prompt/v1/prompt/prompt_manager_service_pb2_grpc.py +0 -441
  84. frogml_proto/qwak/prompt/v1/prompt/prompt_pb2.py +0 -69
  85. frogml_proto/qwak/prompt/v1/prompt/prompt_pb2.pyi +0 -415
  86. frogml_proto/qwak/prompt/v1/prompt/prompt_pb2_grpc.py +0 -4
  87. frogml_proto/qwak/vectors/v1/collection/collection_pb2.py +0 -46
  88. frogml_proto/qwak/vectors/v1/collection/collection_pb2.pyi +0 -287
  89. frogml_proto/qwak/vectors/v1/collection/collection_pb2_grpc.py +0 -4
  90. frogml_proto/qwak/vectors/v1/collection/collection_service_pb2.py +0 -60
  91. frogml_proto/qwak/vectors/v1/collection/collection_service_pb2.pyi +0 -258
  92. frogml_proto/qwak/vectors/v1/collection/collection_service_pb2_grpc.py +0 -304
  93. frogml_proto/qwak/vectors/v1/collection/event/collection_event_pb2.py +0 -28
  94. frogml_proto/qwak/vectors/v1/collection/event/collection_event_pb2.pyi +0 -41
  95. frogml_proto/qwak/vectors/v1/collection/event/collection_event_pb2_grpc.py +0 -4
  96. frogml_proto/qwak/vectors/v1/filters_pb2.py +0 -52
  97. frogml_proto/qwak/vectors/v1/filters_pb2.pyi +0 -297
  98. frogml_proto/qwak/vectors/v1/filters_pb2_grpc.py +0 -4
  99. frogml_proto/qwak/vectors/v1/vector_pb2.py +0 -38
  100. frogml_proto/qwak/vectors/v1/vector_pb2.pyi +0 -142
  101. frogml_proto/qwak/vectors/v1/vector_pb2_grpc.py +0 -4
  102. frogml_proto/qwak/vectors/v1/vector_service_pb2.py +0 -53
  103. frogml_proto/qwak/vectors/v1/vector_service_pb2.pyi +0 -243
  104. frogml_proto/qwak/vectors/v1/vector_service_pb2_grpc.py +0 -201
  105. frogml_proto/qwak/workspace/workspace_pb2.py +0 -50
  106. frogml_proto/qwak/workspace/workspace_pb2.pyi +0 -331
  107. frogml_proto/qwak/workspace/workspace_pb2_grpc.py +0 -4
  108. frogml_proto/qwak/workspace/workspace_service_pb2.py +0 -84
  109. frogml_proto/qwak/workspace/workspace_service_pb2.pyi +0 -393
  110. frogml_proto/qwak/workspace/workspace_service_pb2_grpc.py +0 -507
  111. frogml_services_mock/mocks/prompt_manager_service.py +0 -281
  112. frogml_services_mock/mocks/vector_serving_api.py +0 -159
  113. frogml_services_mock/mocks/vectors_management_api.py +0 -97
  114. frogml_services_mock/mocks/workspace_manager_service_mock.py +0 -202
  115. {frogml_core-0.0.113.dist-info → frogml_core-0.0.114.dist-info}/WHEEL +0 -0
@@ -1,24 +0,0 @@
1
- from abc import ABC
2
- from dataclasses import dataclass, field
3
-
4
- from frogml_core.llmops.prompt.value import PromptValue
5
-
6
-
7
- @dataclass
8
- class BaseMessage(PromptValue, ABC):
9
- content: str
10
- role_name: str = field(
11
- init=False,
12
- )
13
-
14
-
15
- class AIMessage(BaseMessage):
16
- role_name: str = "ai"
17
-
18
-
19
- class HumanMessage(BaseMessage):
20
- role_name: str = "human"
21
-
22
-
23
- class SystemMessage(BaseMessage):
24
- role_name: str = "system"
@@ -1,113 +0,0 @@
1
- import re
2
- from abc import ABC, abstractmethod
3
- from dataclasses import dataclass, field
4
- from typing import Dict, List, Tuple, Union
5
-
6
- from frogml_core.llmops.prompt.chat.message import (
7
- AIMessage,
8
- BaseMessage,
9
- HumanMessage,
10
- SystemMessage,
11
- )
12
- from frogml_core.llmops.prompt.chat.value import ChatPromptValue
13
- from frogml_core.llmops.prompt.template import BasePromptTemplate, StringPromptTemplate
14
-
15
-
16
- @dataclass
17
- class BaseMessagePromptTemplate(BasePromptTemplate):
18
- @abstractmethod
19
- def render(self, variables: Dict[str, any]) -> BaseMessage:
20
- pass
21
-
22
-
23
- @dataclass
24
- class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
25
- template: StringPromptTemplate = field(init=False)
26
- role_name: str = field(init=False)
27
-
28
- def __init__(self, template: str):
29
- self.template = StringPromptTemplate(template=template)
30
-
31
-
32
- class AIMessagePromptTemplate(BaseStringMessagePromptTemplate):
33
- role_name: str = "ai"
34
-
35
- def render(self, variables: Dict[str, any]) -> BaseMessage:
36
- return AIMessage(content=self.template.render(variables=variables).to_string())
37
-
38
-
39
- class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate):
40
- role_name: str = "human"
41
-
42
- def render(self, variables: Dict[str, any]) -> BaseMessage:
43
- return HumanMessage(
44
- content=self.template.render(variables=variables).to_string()
45
- )
46
-
47
-
48
- class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate):
49
- role_name: str = "system"
50
-
51
- def render(self, variables: Dict[str, any]) -> BaseMessage:
52
- return SystemMessage(
53
- content=self.template.render(variables=variables).to_string()
54
- )
55
-
56
-
57
- @dataclass
58
- class ChatPromptTemplate(BasePromptTemplate):
59
- messages: List[Union[BaseMessage, BaseStringMessagePromptTemplate]]
60
-
61
- def render(self, variables: Dict[str, any]) -> ChatPromptValue:
62
- resulting_messages: List[BaseMessage] = list()
63
-
64
- for message in self.messages:
65
- if isinstance(message, BaseMessage):
66
- resulting_messages.append(message)
67
- elif isinstance(message, BaseStringMessagePromptTemplate):
68
- resulting_messages.append(message.render(variables=variables))
69
- else:
70
- raise ValueError(
71
- f"Got unsupported message type: {repr(message)}. \n"
72
- "Supported messages are: "
73
- "AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, \n"
74
- "AIMessage, HumanMessage, SystemMessage."
75
- )
76
-
77
- return ChatPromptValue(messages=resulting_messages)
78
-
79
- def to_messages(self) -> List[Tuple[str, str]]:
80
- """
81
- Useful for integration with other libraries such as Langchain.
82
-
83
- ```
84
- ChatPromptTemplate(
85
- messages=[
86
- SystemMessage("you are an assistant"),
87
- HumanMessagePromptTemplate("{{question}}")
88
- ]
89
- ).to_messages()
90
-
91
- resulting in:
92
-
93
- [("system", "you are an assistant"),
94
- ("human", "{question}")]
95
- ```
96
-
97
- """
98
-
99
- def strip_curly(string: str) -> str:
100
- return re.sub(r"\{\{\s*([\w\s]+)\s*\}\}", repl=r"{\g<1>}", string=string)
101
-
102
- if not self.messages:
103
- return []
104
-
105
- result: List[Tuple[str, str]] = []
106
-
107
- for msg in self.messages:
108
- if isinstance(msg, BaseMessage):
109
- result.append((msg.role_name, msg.content))
110
- elif isinstance(msg, BaseStringMessagePromptTemplate):
111
- result.append((msg.role_name, strip_curly(msg.template.template)))
112
-
113
- return result
@@ -1,10 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import List
3
-
4
- from frogml_core.llmops.prompt.chat.message import BaseMessage
5
- from frogml_core.llmops.prompt.value import PromptValue
6
-
7
-
8
- @dataclass
9
- class ChatPromptValue(PromptValue):
10
- messages: List[BaseMessage]
@@ -1,138 +0,0 @@
1
- from typing import Optional
2
-
3
- from frogml_proto.qwak.prompt.v1.prompt.prompt_pb2 import Prompt as ProtoPrompt
4
- from frogml_proto.qwak.prompt.v1.prompt.prompt_pb2 import (
5
- PromptVersion as ProtoPromptVersion,
6
- )
7
- from frogml_proto.qwak.prompt.v1.prompt.prompt_pb2 import (
8
- PromptVersionDefinition as ProtoPromptVersionDefinition,
9
- )
10
- from frogml_proto.qwak.prompt.v1.prompt.prompt_pb2 import (
11
- PromptVersionSpec as ProtoPromptVersionSpec,
12
- )
13
- from frogml_core.clients.prompt_manager.prompt_manager_client import PromptManagerClient
14
- from frogml_core.clients.prompt_manager.prompt_proto_mapper import PromptProtoMapper
15
- from frogml_core.exceptions import FrogmlException
16
- from frogml_core.llmops.prompt.base import BasePrompt, ChatPrompt, RegisteredPrompt
17
-
18
-
19
- class PromptManager:
20
- _prompt_manager_client: PromptManagerClient
21
-
22
- def __init__(self):
23
- self._prompt_manager_client = PromptManagerClient()
24
-
25
- def register(
26
- self,
27
- name: str,
28
- prompt: BasePrompt,
29
- prompt_description: Optional[str] = None,
30
- version_description: Optional[str] = None,
31
- ) -> RegisteredPrompt:
32
- """
33
- Registers a new prompt in Qwak platform. Name must be unique
34
- and conform to ^[a-z0-9](?:[-_]?[a-z0-9]+)+$
35
- """
36
- if not isinstance(prompt, ChatPrompt):
37
- raise FrogmlException(f"Got unsupported prompt type: {prompt}")
38
-
39
- version_spec: ProtoPromptVersionSpec = PromptProtoMapper.to_prompt_version_spec(
40
- version_description=version_description,
41
- prompt_template=prompt.template,
42
- model_descriptor=prompt.model,
43
- )
44
-
45
- registered_prompt: ProtoPrompt = self._prompt_manager_client.create_prompt(
46
- name=name, prompt_description=prompt_description, version_spec=version_spec
47
- )
48
-
49
- return PromptProtoMapper.from_prompt(
50
- name=registered_prompt.name,
51
- prompt_description=registered_prompt.prompt_spec.description,
52
- version_description=registered_prompt.default_version_definition.version_spec.description,
53
- version=registered_prompt.default_version_definition.version_number,
54
- target_default_version=True,
55
- prompt_version_definition=registered_prompt.default_version_definition,
56
- )
57
-
58
- def update(
59
- self,
60
- name: str,
61
- prompt: BasePrompt,
62
- version_description: Optional[str] = None,
63
- set_default: bool = False,
64
- ) -> RegisteredPrompt:
65
- """
66
- Creates a new version for an existing prompt, prompt name must already exist.
67
- `set_default` set to True if this version is to become the default one immediately.
68
- """
69
- if not isinstance(prompt, ChatPrompt):
70
- raise FrogmlException(f"Got unsupported prompt type: {prompt}")
71
-
72
- version_spec: ProtoPromptVersionSpec = PromptProtoMapper.to_prompt_version_spec(
73
- version_description=version_description,
74
- prompt_template=prompt.template,
75
- model_descriptor=prompt.model,
76
- )
77
-
78
- prompt_version: ProtoPromptVersion = (
79
- self._prompt_manager_client.create_prompt_version(
80
- name=name, version_spec=version_spec, set_default=set_default
81
- )
82
- )
83
-
84
- version_for_get_request = (
85
- None
86
- if set_default
87
- else prompt_version.prompt_version_definition.version_number
88
- )
89
- return self.get_prompt(name=name, version=version_for_get_request)
90
-
91
- def set_default(self, name: str, version: int):
92
- """
93
- Set a version of a registered prompt named: `name`, as the default version
94
- """
95
- self._prompt_manager_client.set_default_prompt_version(
96
- name=name, version=version
97
- )
98
-
99
- def delete_prompt(self, name: str):
100
- """
101
- Delete all version of a prompt, by name
102
- """
103
- self._prompt_manager_client.delete_prompt(name=name)
104
-
105
- def delete_prompt_version(self, name: str, version: int):
106
- """
107
- Deletes a specific version of a registered prompt
108
- """
109
- self._prompt_manager_client.delete_prompt_version(name=name, version=version)
110
-
111
- def get_prompt(self, name: str, version: Optional[int] = None) -> RegisteredPrompt:
112
- """
113
- Get a registered prompt by name. To get the default version omit the `version` param, else
114
- fetch the specified version.
115
- """
116
-
117
- prompt_default_version: ProtoPrompt = (
118
- self._prompt_manager_client.get_prompt_by_name(name=name)
119
- )
120
- prompt_version_definition: ProtoPromptVersionDefinition = (
121
- prompt_default_version.default_version_definition
122
- )
123
- if version:
124
- prompt_version: ProtoPromptVersion = (
125
- self._prompt_manager_client.get_prompt_version_by_name(
126
- name=name, version=version
127
- )
128
- )
129
- prompt_version_definition = prompt_version.prompt_version_definition
130
-
131
- return PromptProtoMapper.from_prompt(
132
- name=prompt_default_version.name,
133
- prompt_description=prompt_default_version.prompt_spec.description,
134
- version_description=prompt_version_definition.version_spec.description,
135
- version=prompt_version_definition.version_number,
136
- target_default_version=not bool(version),
137
- prompt_version_definition=prompt_version_definition,
138
- )
@@ -1,24 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from dataclasses import dataclass
3
- from typing import Dict
4
-
5
- from frogml_core.llmops.prompt.value import PromptValue, StringPromptValue
6
-
7
-
8
- @dataclass
9
- class BasePromptTemplate(ABC):
10
- @abstractmethod
11
- def render(self, variables: Dict[str, any]) -> PromptValue:
12
- pass
13
-
14
-
15
- @dataclass
16
- class StringPromptTemplate(BasePromptTemplate):
17
- template: str
18
-
19
- def render(self, variables: Dict[str, any]) -> StringPromptValue:
20
- from chevron import renderer
21
-
22
- return StringPromptValue(
23
- text=renderer.render(template=self.template, data=variables, warn=True)
24
- )
@@ -1,14 +0,0 @@
1
- from abc import ABC
2
- from dataclasses import dataclass
3
-
4
-
5
- class PromptValue(ABC):
6
- pass
7
-
8
-
9
- @dataclass
10
- class StringPromptValue(PromptValue):
11
- text: str
12
-
13
- def to_string(self) -> str:
14
- return self.text
File without changes
@@ -1,44 +0,0 @@
1
- from functools import lru_cache
2
- from typing import Union
3
-
4
- from frogml_core.llmops.generation.chat.openai.types.chat.chat_completion import (
5
- ChatCompletion,
6
- )
7
- from frogml_core.llmops.generation.streaming import ChatCompletionStream
8
- from frogml_core.llmops.model.descriptor import ChatModelDescriptor, OpenAIChat
9
- from frogml_core.llmops.prompt.chat.value import ChatPromptValue
10
- from frogml_core.llmops.provider.openai.provider import OpenAIProvider
11
-
12
-
13
- class ChatCompletionProvider:
14
- @staticmethod
15
- @lru_cache(maxsize=None)
16
- def _get_openai_provider():
17
- return OpenAIProvider()
18
-
19
- @staticmethod
20
- def invoke(
21
- chat_prompt_value: ChatPromptValue,
22
- chat_model_descriptor: ChatModelDescriptor,
23
- stream: bool = False,
24
- ) -> Union[ChatCompletion, ChatCompletionStream]:
25
- if isinstance(chat_model_descriptor, OpenAIChat):
26
- return ChatCompletionProvider._invoke_openai_chat(
27
- chat_prompt_value=chat_prompt_value,
28
- chat_model_descriptor=chat_model_descriptor,
29
- stream=stream,
30
- )
31
- else:
32
- raise ValueError("Can't invoke prompt and model combination!")
33
-
34
- @staticmethod
35
- def _invoke_openai_chat(
36
- chat_prompt_value: ChatPromptValue,
37
- chat_model_descriptor: OpenAIChat,
38
- stream: bool = False,
39
- ) -> Union[ChatCompletion, ChatCompletionStream]:
40
- return ChatCompletionProvider._get_openai_provider().create_chat_completion(
41
- chat_prompt_value=chat_prompt_value,
42
- chat_model_descriptor=chat_model_descriptor,
43
- stream=stream,
44
- )
File without changes
@@ -1,126 +0,0 @@
1
- import json
2
- import os
3
- from typing import Dict, Iterable, List, Optional, Union
4
- from urllib.parse import urljoin
5
-
6
- import requests
7
- from dacite import Config, from_dict
8
- from requests import Response
9
- from typing_extensions import Literal
10
-
11
- from frogml_core.exceptions.frogml_external_exception import FrogmlExternalException
12
- from frogml_core.llmops.generation._steaming import BaseSSEDecoder # noqa
13
- from frogml_core.llmops.generation.chat.openai.types.chat.chat_completion import (
14
- ChatCompletion,
15
- )
16
- from frogml_core.llmops.generation.chat.openai.types.chat.chat_completion_chunk import (
17
- ChatCompletionChunk,
18
- )
19
- from frogml_core.llmops.generation.chat.openai.types.chat.chat_completion_tool_choice_option_param import (
20
- ChatCompletionToolChoiceOptionParam,
21
- )
22
- from frogml_core.llmops.generation.chat.openai.types.chat.chat_completion_tool_param import (
23
- ChatCompletionToolParam,
24
- )
25
- from frogml_core.llmops.generation.streaming import ChatCompletionStream
26
- from frogml_core.utils.dict_utils import remove_none_value_keys
27
-
28
-
29
- class OpenAIChatCompletionStream(
30
- BaseSSEDecoder[ChatCompletionChunk], ChatCompletionStream
31
- ):
32
- def __init__(self, response: requests.Response):
33
- super().__init__(response=response, parse_to=ChatCompletionChunk)
34
-
35
-
36
- class OpenAIClient:
37
- base_url: str
38
-
39
- def __init__(self):
40
- self.base_url: str = os.environ.get(
41
- "_QWAK_OPEN_AI_BASE_URL", "https://api.openai.com"
42
- )
43
-
44
- def invoke_chat_completion(
45
- self,
46
- api_key: str,
47
- model: str,
48
- messages: List[Dict],
49
- frequency_penalty: Optional[float] = None,
50
- logit_bias: Optional[Dict[str, int]] = None,
51
- logprobs: Optional[bool] = None,
52
- max_tokens: Optional[int] = None,
53
- n: Optional[int] = None,
54
- presence_penalty: Optional[float] = None,
55
- response_format: Literal["text", "json_object"] = None,
56
- seed: Optional[int] = None,
57
- stop: Union[Optional[str], List[str]] = None,
58
- stream: Optional[bool] = False,
59
- temperature: Optional[float] = None,
60
- top_logprobs: Optional[int] = None,
61
- top_p: Optional[float] = None,
62
- user: Optional[str] = None,
63
- tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
64
- tools: Iterable[ChatCompletionToolParam] = None,
65
- extra_headers: Optional[Dict[str, str]] = None,
66
- extra_body: Optional[Dict[str, str]] = None,
67
- timeout_seconds: Optional[float] = None,
68
- ) -> Union[ChatCompletion, ChatCompletionStream]:
69
- url: str = urljoin(self.base_url, "v1/chat/completions")
70
- headers: Dict[str, str] = {
71
- "Content-Type": "application/json",
72
- "Authorization": f"Bearer {api_key}",
73
- }
74
- body = {
75
- "messages": messages,
76
- "model": model,
77
- "frequency_penalty": frequency_penalty,
78
- "logit_bias": logit_bias,
79
- "logprobs": logprobs,
80
- "max_tokens": max_tokens,
81
- "n": n,
82
- "presence_penalty": presence_penalty,
83
- "response_format": {"type": response_format} if response_format else None,
84
- "seed": seed,
85
- "stop": stop,
86
- "temperature": temperature,
87
- "tool_choice": tool_choice if tools else None,
88
- "tools": tools if tools else None,
89
- "top_logprobs": top_logprobs,
90
- "top_p": top_p,
91
- "user": user,
92
- "stream": stream if stream else None,
93
- }
94
- body = remove_none_value_keys(body)
95
-
96
- if extra_headers:
97
- headers.update(extra_headers)
98
-
99
- if extra_body:
100
- body.update(extra_body)
101
-
102
- http_request_timeout_seconds: float = (
103
- timeout_seconds
104
- if timeout_seconds
105
- else float(os.environ.get("_QWAK_OPEN_AI_TIMEOUT_SECONDS", 60.0))
106
- )
107
- response: Response = requests.post(
108
- url=url,
109
- data=json.dumps(body),
110
- headers=headers,
111
- stream=stream,
112
- timeout=http_request_timeout_seconds,
113
- )
114
-
115
- try:
116
- response.raise_for_status()
117
- except requests.exceptions.HTTPError as e:
118
- raise FrogmlExternalException(message=e.response.content.decode())
119
- if stream:
120
- return OpenAIChatCompletionStream(response=response)
121
- else:
122
- return from_dict(
123
- data_class=ChatCompletion,
124
- data=response.json(),
125
- config=Config(check_types=False),
126
- )
@@ -1,93 +0,0 @@
1
- import random
2
- from typing import Dict, List, Optional, Union
3
-
4
- from frogml_core.clients.integration_management.integration_utils import (
5
- IntegrationUtils,
6
- )
7
- from frogml_core.clients.integration_management.openai.openai_system_secret import (
8
- OpenAIApiKeySystemSecret,
9
- )
10
- from frogml_core.exceptions import FrogmlException
11
- from frogml_core.llmops.generation.chat.openai.types.chat.chat_completion import (
12
- ChatCompletion,
13
- )
14
- from frogml_core.llmops.generation.streaming import ChatCompletionStream
15
- from frogml_core.llmops.model.descriptor import OpenAIChat
16
- from frogml_core.llmops.prompt.chat.message import (
17
- AIMessage,
18
- BaseMessage,
19
- HumanMessage,
20
- SystemMessage,
21
- )
22
- from frogml_core.llmops.prompt.chat.value import ChatPromptValue
23
- from frogml_core.llmops.provider.openai.client import OpenAIClient
24
-
25
-
26
- class OpenAIProvider:
27
- client: OpenAIClient
28
-
29
- def __init__(self):
30
- self.client = OpenAIClient()
31
-
32
- def _get_random_openai_api_key(self) -> Optional[str]:
33
- openai_api_keys: List[OpenAIApiKeySystemSecret] = (
34
- IntegrationUtils().get_openai_api_keys()
35
- )
36
- if len(openai_api_keys) == 0:
37
- return None
38
-
39
- return random.choice(openai_api_keys).get_api_key() # nosec
40
-
41
- def _chat_value_to_json(self, chat_prompt_value: ChatPromptValue) -> List[Dict]:
42
- return [self._map_message(m) for m in chat_prompt_value.messages]
43
-
44
- def _map_message(self, message: BaseMessage) -> Dict[str, str]:
45
- role: str
46
- content: str = message.content
47
-
48
- if isinstance(message, AIMessage):
49
- role = "assistant"
50
- elif isinstance(message, SystemMessage):
51
- role = "system"
52
- elif isinstance(message, HumanMessage):
53
- role = "user"
54
- else:
55
- raise FrogmlException(f"Can't handle message of type: {repr(message)}")
56
-
57
- return {"role": role, "content": content}
58
-
59
- def create_chat_completion(
60
- self,
61
- chat_prompt_value: ChatPromptValue,
62
- chat_model_descriptor: OpenAIChat,
63
- stream: bool = False,
64
- ) -> Union[ChatCompletion, ChatCompletionStream]:
65
- openai_api_key: Optional[str] = self._get_random_openai_api_key()
66
- if not openai_api_key:
67
- raise FrogmlException(
68
- "Could not find Open AI integration, Please create one."
69
- )
70
-
71
- d = chat_model_descriptor
72
-
73
- return self.client.invoke_chat_completion(
74
- stream=stream,
75
- api_key=openai_api_key,
76
- model=d.model_id,
77
- messages=self._chat_value_to_json(chat_prompt_value),
78
- frequency_penalty=d.frequency_penalty,
79
- logit_bias=d.logit_bias,
80
- logprobs=d.logprobs,
81
- max_tokens=d.max_tokens,
82
- n=d.n,
83
- presence_penalty=d.presence_penalty,
84
- response_format=d.response_format,
85
- seed=d.seed,
86
- stop=d.stop,
87
- temperature=d.temperature,
88
- top_logprobs=d.top_logprobs,
89
- top_p=d.top_p,
90
- user=d.user,
91
- tool_choice=d.tool_choice,
92
- tools=d.tools,
93
- )
@@ -1,4 +0,0 @@
1
- from .client import VectorStoreClient
2
- from .collection import Collection
3
-
4
- __all__ = ["VectorStoreClient", "Collection"]