casedev 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. casedev/__init__.py +104 -0
  2. casedev/_base_client.py +1995 -0
  3. casedev/_client.py +560 -0
  4. casedev/_compat.py +219 -0
  5. casedev/_constants.py +14 -0
  6. casedev/_exceptions.py +108 -0
  7. casedev/_files.py +123 -0
  8. casedev/_models.py +857 -0
  9. casedev/_qs.py +150 -0
  10. casedev/_resource.py +43 -0
  11. casedev/_response.py +830 -0
  12. casedev/_streaming.py +333 -0
  13. casedev/_types.py +260 -0
  14. casedev/_utils/__init__.py +64 -0
  15. casedev/_utils/_compat.py +45 -0
  16. casedev/_utils/_datetime_parse.py +136 -0
  17. casedev/_utils/_logs.py +25 -0
  18. casedev/_utils/_proxy.py +65 -0
  19. casedev/_utils/_reflection.py +42 -0
  20. casedev/_utils/_resources_proxy.py +24 -0
  21. casedev/_utils/_streams.py +12 -0
  22. casedev/_utils/_sync.py +58 -0
  23. casedev/_utils/_transform.py +457 -0
  24. casedev/_utils/_typing.py +156 -0
  25. casedev/_utils/_utils.py +421 -0
  26. casedev/_version.py +4 -0
  27. casedev/lib/.keep +4 -0
  28. casedev/py.typed +0 -0
  29. casedev/resources/__init__.py +173 -0
  30. casedev/resources/actions/__init__.py +33 -0
  31. casedev/resources/actions/actions.py +102 -0
  32. casedev/resources/actions/v1.py +640 -0
  33. casedev/resources/compute/__init__.py +33 -0
  34. casedev/resources/compute/compute.py +102 -0
  35. casedev/resources/compute/v1/__init__.py +89 -0
  36. casedev/resources/compute/v1/environments.py +492 -0
  37. casedev/resources/compute/v1/functions.py +278 -0
  38. casedev/resources/compute/v1/invoke.py +216 -0
  39. casedev/resources/compute/v1/runs.py +290 -0
  40. casedev/resources/compute/v1/secrets.py +655 -0
  41. casedev/resources/compute/v1/v1.py +583 -0
  42. casedev/resources/convert/__init__.py +33 -0
  43. casedev/resources/convert/convert.py +102 -0
  44. casedev/resources/convert/v1/__init__.py +33 -0
  45. casedev/resources/convert/v1/jobs.py +254 -0
  46. casedev/resources/convert/v1/v1.py +450 -0
  47. casedev/resources/format/__init__.py +33 -0
  48. casedev/resources/format/format.py +102 -0
  49. casedev/resources/format/v1/__init__.py +33 -0
  50. casedev/resources/format/v1/templates.py +419 -0
  51. casedev/resources/format/v1/v1.py +244 -0
  52. casedev/resources/llm/__init__.py +33 -0
  53. casedev/resources/llm/llm.py +192 -0
  54. casedev/resources/llm/v1/__init__.py +33 -0
  55. casedev/resources/llm/v1/chat.py +243 -0
  56. casedev/resources/llm/v1/v1.py +317 -0
  57. casedev/resources/ocr/__init__.py +33 -0
  58. casedev/resources/ocr/ocr.py +102 -0
  59. casedev/resources/ocr/v1.py +407 -0
  60. casedev/resources/search/__init__.py +33 -0
  61. casedev/resources/search/search.py +102 -0
  62. casedev/resources/search/v1.py +1052 -0
  63. casedev/resources/templates/__init__.py +33 -0
  64. casedev/resources/templates/templates.py +102 -0
  65. casedev/resources/templates/v1.py +633 -0
  66. casedev/resources/vault/__init__.py +47 -0
  67. casedev/resources/vault/graphrag.py +256 -0
  68. casedev/resources/vault/objects.py +571 -0
  69. casedev/resources/vault/vault.py +764 -0
  70. casedev/resources/voice/__init__.py +61 -0
  71. casedev/resources/voice/streaming.py +160 -0
  72. casedev/resources/voice/transcription.py +327 -0
  73. casedev/resources/voice/v1/__init__.py +33 -0
  74. casedev/resources/voice/v1/speak.py +478 -0
  75. casedev/resources/voice/v1/v1.py +290 -0
  76. casedev/resources/voice/voice.py +166 -0
  77. casedev/resources/webhooks/__init__.py +33 -0
  78. casedev/resources/webhooks/v1.py +447 -0
  79. casedev/resources/webhooks/webhooks.py +102 -0
  80. casedev/resources/workflows/__init__.py +33 -0
  81. casedev/resources/workflows/v1.py +1053 -0
  82. casedev/resources/workflows/workflows.py +102 -0
  83. casedev/types/__init__.py +12 -0
  84. casedev/types/actions/__init__.py +8 -0
  85. casedev/types/actions/v1_create_params.py +22 -0
  86. casedev/types/actions/v1_create_response.py +33 -0
  87. casedev/types/actions/v1_execute_params.py +16 -0
  88. casedev/types/actions/v1_execute_response.py +31 -0
  89. casedev/types/compute/__init__.py +7 -0
  90. casedev/types/compute/v1/__init__.py +18 -0
  91. casedev/types/compute/v1/environment_create_params.py +12 -0
  92. casedev/types/compute/v1/environment_create_response.py +34 -0
  93. casedev/types/compute/v1/environment_delete_response.py +11 -0
  94. casedev/types/compute/v1/function_get_logs_params.py +12 -0
  95. casedev/types/compute/v1/function_list_params.py +12 -0
  96. casedev/types/compute/v1/invoke_run_params.py +21 -0
  97. casedev/types/compute/v1/invoke_run_response.py +39 -0
  98. casedev/types/compute/v1/run_list_params.py +18 -0
  99. casedev/types/compute/v1/secret_create_params.py +24 -0
  100. casedev/types/compute/v1/secret_create_response.py +24 -0
  101. casedev/types/compute/v1/secret_delete_group_params.py +18 -0
  102. casedev/types/compute/v1/secret_list_params.py +15 -0
  103. casedev/types/compute/v1/secret_retrieve_group_params.py +12 -0
  104. casedev/types/compute/v1/secret_update_group_params.py +16 -0
  105. casedev/types/compute/v1_deploy_params.py +114 -0
  106. casedev/types/compute/v1_deploy_response.py +30 -0
  107. casedev/types/compute/v1_get_usage_params.py +15 -0
  108. casedev/types/convert/__init__.py +8 -0
  109. casedev/types/convert/v1/__init__.py +3 -0
  110. casedev/types/convert/v1_process_params.py +15 -0
  111. casedev/types/convert/v1_process_response.py +19 -0
  112. casedev/types/convert/v1_webhook_params.py +32 -0
  113. casedev/types/convert/v1_webhook_response.py +13 -0
  114. casedev/types/format/__init__.py +5 -0
  115. casedev/types/format/v1/__init__.py +7 -0
  116. casedev/types/format/v1/template_create_params.py +32 -0
  117. casedev/types/format/v1/template_create_response.py +27 -0
  118. casedev/types/format/v1/template_list_params.py +12 -0
  119. casedev/types/format/v1_create_document_params.py +42 -0
  120. casedev/types/llm/__init__.py +5 -0
  121. casedev/types/llm/v1/__init__.py +6 -0
  122. casedev/types/llm/v1/chat_create_completion_params.py +42 -0
  123. casedev/types/llm/v1/chat_create_completion_response.py +49 -0
  124. casedev/types/llm/v1_create_embedding_params.py +27 -0
  125. casedev/types/ocr/__init__.py +6 -0
  126. casedev/types/ocr/v1_process_params.py +44 -0
  127. casedev/types/ocr/v1_process_response.py +32 -0
  128. casedev/types/search/__init__.py +15 -0
  129. casedev/types/search/v1_answer_params.py +45 -0
  130. casedev/types/search/v1_answer_response.py +35 -0
  131. casedev/types/search/v1_contents_params.py +42 -0
  132. casedev/types/search/v1_contents_response.py +31 -0
  133. casedev/types/search/v1_research_params.py +23 -0
  134. casedev/types/search/v1_research_response.py +20 -0
  135. casedev/types/search/v1_retrieve_research_params.py +15 -0
  136. casedev/types/search/v1_search_params.py +56 -0
  137. casedev/types/search/v1_search_response.py +38 -0
  138. casedev/types/search/v1_similar_params.py +44 -0
  139. casedev/types/search/v1_similar_response.py +33 -0
  140. casedev/types/templates/__init__.py +8 -0
  141. casedev/types/templates/v1_execute_params.py +22 -0
  142. casedev/types/templates/v1_execute_response.py +31 -0
  143. casedev/types/templates/v1_list_params.py +32 -0
  144. casedev/types/templates/v1_search_params.py +18 -0
  145. casedev/types/vault/__init__.py +6 -0
  146. casedev/types/vault/object_create_presigned_url_params.py +22 -0
  147. casedev/types/vault/object_create_presigned_url_response.py +51 -0
  148. casedev/types/vault_create_params.py +20 -0
  149. casedev/types/vault_create_response.py +36 -0
  150. casedev/types/vault_ingest_response.py +26 -0
  151. casedev/types/vault_list_response.py +40 -0
  152. casedev/types/vault_search_params.py +27 -0
  153. casedev/types/vault_search_response.py +53 -0
  154. casedev/types/vault_upload_params.py +26 -0
  155. casedev/types/vault_upload_response.py +39 -0
  156. casedev/types/voice/__init__.py +7 -0
  157. casedev/types/voice/transcription_create_params.py +36 -0
  158. casedev/types/voice/transcription_retrieve_response.py +41 -0
  159. casedev/types/voice/v1/__init__.py +6 -0
  160. casedev/types/voice/v1/speak_create_params.py +59 -0
  161. casedev/types/voice/v1/speak_stream_params.py +58 -0
  162. casedev/types/voice/v1_list_voices_params.py +36 -0
  163. casedev/types/webhooks/__init__.py +6 -0
  164. casedev/types/webhooks/v1_create_params.py +20 -0
  165. casedev/types/webhooks/v1_create_response.py +33 -0
  166. casedev/types/workflows/__init__.py +19 -0
  167. casedev/types/workflows/v1_create_params.py +32 -0
  168. casedev/types/workflows/v1_create_response.py +29 -0
  169. casedev/types/workflows/v1_delete_response.py +13 -0
  170. casedev/types/workflows/v1_deploy_response.py +20 -0
  171. casedev/types/workflows/v1_execute_params.py +12 -0
  172. casedev/types/workflows/v1_execute_response.py +22 -0
  173. casedev/types/workflows/v1_list_executions_params.py +13 -0
  174. casedev/types/workflows/v1_list_executions_response.py +27 -0
  175. casedev/types/workflows/v1_list_params.py +18 -0
  176. casedev/types/workflows/v1_list_response.py +37 -0
  177. casedev/types/workflows/v1_retrieve_execution_response.py +31 -0
  178. casedev/types/workflows/v1_retrieve_response.py +35 -0
  179. casedev/types/workflows/v1_undeploy_response.py +13 -0
  180. casedev/types/workflows/v1_update_params.py +26 -0
  181. casedev/types/workflows/v1_update_response.py +17 -0
  182. casedev-0.1.0.dist-info/METADATA +454 -0
  183. casedev-0.1.0.dist-info/RECORD +185 -0
  184. casedev-0.1.0.dist-info/WHEEL +4 -0
  185. casedev-0.1.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,192 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ import httpx
6
+
7
+ from .v1.v1 import (
8
+ V1Resource,
9
+ AsyncV1Resource,
10
+ V1ResourceWithRawResponse,
11
+ AsyncV1ResourceWithRawResponse,
12
+ V1ResourceWithStreamingResponse,
13
+ AsyncV1ResourceWithStreamingResponse,
14
+ )
15
+ from ..._types import Body, Query, Headers, NoneType, NotGiven, not_given
16
+ from ..._compat import cached_property
17
+ from ..._resource import SyncAPIResource, AsyncAPIResource
18
+ from ..._response import (
19
+ to_raw_response_wrapper,
20
+ to_streamed_response_wrapper,
21
+ async_to_raw_response_wrapper,
22
+ async_to_streamed_response_wrapper,
23
+ )
24
+ from ..._base_client import make_request_options
25
+
26
+ __all__ = ["LlmResource", "AsyncLlmResource"]
27
+
28
+
29
+ class LlmResource(SyncAPIResource):
30
+ @cached_property
31
+ def v1(self) -> V1Resource:
32
+ return V1Resource(self._client)
33
+
34
+ @cached_property
35
+ def with_raw_response(self) -> LlmResourceWithRawResponse:
36
+ """
37
+ This property can be used as a prefix for any HTTP method call to return
38
+ the raw response object instead of the parsed content.
39
+
40
+ For more information, see https://www.github.com/CaseMark/casedev-python#accessing-raw-response-data-eg-headers
41
+ """
42
+ return LlmResourceWithRawResponse(self)
43
+
44
+ @cached_property
45
+ def with_streaming_response(self) -> LlmResourceWithStreamingResponse:
46
+ """
47
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
48
+
49
+ For more information, see https://www.github.com/CaseMark/casedev-python#with_streaming_response
50
+ """
51
+ return LlmResourceWithStreamingResponse(self)
52
+
53
+ def get_config(
54
+ self,
55
+ *,
56
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
57
+ # The extra values given here take precedence over values defined on the client or passed to this method.
58
+ extra_headers: Headers | None = None,
59
+ extra_query: Query | None = None,
60
+ extra_body: Body | None = None,
61
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
62
+ ) -> None:
63
+ """
64
+ Retrieves the AI Gateway configuration including all available language models
65
+ and their specifications. This endpoint returns model information compatible
66
+ with the Vercel AI SDK Gateway format, making it easy to integrate with existing
67
+ AI applications.
68
+
69
+ Use this endpoint to:
70
+
71
+ - Discover available language models
72
+ - Get model specifications and pricing
73
+ - Configure AI SDK clients
74
+ - Build model selection interfaces
75
+ """
76
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
77
+ return self._get(
78
+ "/llm/config",
79
+ options=make_request_options(
80
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
81
+ ),
82
+ cast_to=NoneType,
83
+ )
84
+
85
+
86
+ class AsyncLlmResource(AsyncAPIResource):
87
+ @cached_property
88
+ def v1(self) -> AsyncV1Resource:
89
+ return AsyncV1Resource(self._client)
90
+
91
+ @cached_property
92
+ def with_raw_response(self) -> AsyncLlmResourceWithRawResponse:
93
+ """
94
+ This property can be used as a prefix for any HTTP method call to return
95
+ the raw response object instead of the parsed content.
96
+
97
+ For more information, see https://www.github.com/CaseMark/casedev-python#accessing-raw-response-data-eg-headers
98
+ """
99
+ return AsyncLlmResourceWithRawResponse(self)
100
+
101
+ @cached_property
102
+ def with_streaming_response(self) -> AsyncLlmResourceWithStreamingResponse:
103
+ """
104
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
105
+
106
+ For more information, see https://www.github.com/CaseMark/casedev-python#with_streaming_response
107
+ """
108
+ return AsyncLlmResourceWithStreamingResponse(self)
109
+
110
+ async def get_config(
111
+ self,
112
+ *,
113
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
114
+ # The extra values given here take precedence over values defined on the client or passed to this method.
115
+ extra_headers: Headers | None = None,
116
+ extra_query: Query | None = None,
117
+ extra_body: Body | None = None,
118
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
119
+ ) -> None:
120
+ """
121
+ Retrieves the AI Gateway configuration including all available language models
122
+ and their specifications. This endpoint returns model information compatible
123
+ with the Vercel AI SDK Gateway format, making it easy to integrate with existing
124
+ AI applications.
125
+
126
+ Use this endpoint to:
127
+
128
+ - Discover available language models
129
+ - Get model specifications and pricing
130
+ - Configure AI SDK clients
131
+ - Build model selection interfaces
132
+ """
133
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
134
+ return await self._get(
135
+ "/llm/config",
136
+ options=make_request_options(
137
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
138
+ ),
139
+ cast_to=NoneType,
140
+ )
141
+
142
+
143
+ class LlmResourceWithRawResponse:
144
+ def __init__(self, llm: LlmResource) -> None:
145
+ self._llm = llm
146
+
147
+ self.get_config = to_raw_response_wrapper(
148
+ llm.get_config,
149
+ )
150
+
151
+ @cached_property
152
+ def v1(self) -> V1ResourceWithRawResponse:
153
+ return V1ResourceWithRawResponse(self._llm.v1)
154
+
155
+
156
+ class AsyncLlmResourceWithRawResponse:
157
+ def __init__(self, llm: AsyncLlmResource) -> None:
158
+ self._llm = llm
159
+
160
+ self.get_config = async_to_raw_response_wrapper(
161
+ llm.get_config,
162
+ )
163
+
164
+ @cached_property
165
+ def v1(self) -> AsyncV1ResourceWithRawResponse:
166
+ return AsyncV1ResourceWithRawResponse(self._llm.v1)
167
+
168
+
169
+ class LlmResourceWithStreamingResponse:
170
+ def __init__(self, llm: LlmResource) -> None:
171
+ self._llm = llm
172
+
173
+ self.get_config = to_streamed_response_wrapper(
174
+ llm.get_config,
175
+ )
176
+
177
+ @cached_property
178
+ def v1(self) -> V1ResourceWithStreamingResponse:
179
+ return V1ResourceWithStreamingResponse(self._llm.v1)
180
+
181
+
182
+ class AsyncLlmResourceWithStreamingResponse:
183
+ def __init__(self, llm: AsyncLlmResource) -> None:
184
+ self._llm = llm
185
+
186
+ self.get_config = async_to_streamed_response_wrapper(
187
+ llm.get_config,
188
+ )
189
+
190
+ @cached_property
191
+ def v1(self) -> AsyncV1ResourceWithStreamingResponse:
192
+ return AsyncV1ResourceWithStreamingResponse(self._llm.v1)
@@ -0,0 +1,33 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .v1 import (
4
+ V1Resource,
5
+ AsyncV1Resource,
6
+ V1ResourceWithRawResponse,
7
+ AsyncV1ResourceWithRawResponse,
8
+ V1ResourceWithStreamingResponse,
9
+ AsyncV1ResourceWithStreamingResponse,
10
+ )
11
+ from .chat import (
12
+ ChatResource,
13
+ AsyncChatResource,
14
+ ChatResourceWithRawResponse,
15
+ AsyncChatResourceWithRawResponse,
16
+ ChatResourceWithStreamingResponse,
17
+ AsyncChatResourceWithStreamingResponse,
18
+ )
19
+
20
+ __all__ = [
21
+ "ChatResource",
22
+ "AsyncChatResource",
23
+ "ChatResourceWithRawResponse",
24
+ "AsyncChatResourceWithRawResponse",
25
+ "ChatResourceWithStreamingResponse",
26
+ "AsyncChatResourceWithStreamingResponse",
27
+ "V1Resource",
28
+ "AsyncV1Resource",
29
+ "V1ResourceWithRawResponse",
30
+ "AsyncV1ResourceWithRawResponse",
31
+ "V1ResourceWithStreamingResponse",
32
+ "AsyncV1ResourceWithStreamingResponse",
33
+ ]
@@ -0,0 +1,243 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Iterable
6
+
7
+ import httpx
8
+
9
+ from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
10
+ from ...._utils import maybe_transform, async_maybe_transform
11
+ from ...._compat import cached_property
12
+ from ...._resource import SyncAPIResource, AsyncAPIResource
13
+ from ...._response import (
14
+ to_raw_response_wrapper,
15
+ to_streamed_response_wrapper,
16
+ async_to_raw_response_wrapper,
17
+ async_to_streamed_response_wrapper,
18
+ )
19
+ from ...._base_client import make_request_options
20
+ from ....types.llm.v1 import chat_create_completion_params
21
+ from ....types.llm.v1.chat_create_completion_response import ChatCreateCompletionResponse
22
+
23
+ __all__ = ["ChatResource", "AsyncChatResource"]
24
+
25
+
26
+ class ChatResource(SyncAPIResource):
27
+ @cached_property
28
+ def with_raw_response(self) -> ChatResourceWithRawResponse:
29
+ """
30
+ This property can be used as a prefix for any HTTP method call to return
31
+ the raw response object instead of the parsed content.
32
+
33
+ For more information, see https://www.github.com/CaseMark/casedev-python#accessing-raw-response-data-eg-headers
34
+ """
35
+ return ChatResourceWithRawResponse(self)
36
+
37
+ @cached_property
38
+ def with_streaming_response(self) -> ChatResourceWithStreamingResponse:
39
+ """
40
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
41
+
42
+ For more information, see https://www.github.com/CaseMark/casedev-python#with_streaming_response
43
+ """
44
+ return ChatResourceWithStreamingResponse(self)
45
+
46
+ def create_completion(
47
+ self,
48
+ *,
49
+ messages: Iterable[chat_create_completion_params.Message],
50
+ frequency_penalty: float | Omit = omit,
51
+ max_tokens: int | Omit = omit,
52
+ model: str | Omit = omit,
53
+ presence_penalty: float | Omit = omit,
54
+ stream: bool | Omit = omit,
55
+ temperature: float | Omit = omit,
56
+ top_p: float | Omit = omit,
57
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
58
+ # The extra values given here take precedence over values defined on the client or passed to this method.
59
+ extra_headers: Headers | None = None,
60
+ extra_query: Query | None = None,
61
+ extra_body: Body | None = None,
62
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
63
+ ) -> ChatCreateCompletionResponse:
64
+ """Create a completion for the provided prompt and parameters.
65
+
66
+ Compatible with
67
+ OpenAI's chat completions API. Supports 40+ models including GPT-4, Claude,
68
+ Gemini, and CaseMark legal AI models. Includes streaming support, token
69
+ counting, and usage tracking.
70
+
71
+ Args:
72
+ messages: List of messages comprising the conversation
73
+
74
+ frequency_penalty: Frequency penalty parameter
75
+
76
+ max_tokens: Maximum number of tokens to generate
77
+
78
+ model: Model to use for completion. Defaults to casemark-core-1 if not specified
79
+
80
+ presence_penalty: Presence penalty parameter
81
+
82
+ stream: Whether to stream back partial progress
83
+
84
+ temperature: Sampling temperature between 0 and 2
85
+
86
+ top_p: Nucleus sampling parameter
87
+
88
+ extra_headers: Send extra headers
89
+
90
+ extra_query: Add additional query parameters to the request
91
+
92
+ extra_body: Add additional JSON properties to the request
93
+
94
+ timeout: Override the client-level default timeout for this request, in seconds
95
+ """
96
+ return self._post(
97
+ "/llm/v1/chat/completions",
98
+ body=maybe_transform(
99
+ {
100
+ "messages": messages,
101
+ "frequency_penalty": frequency_penalty,
102
+ "max_tokens": max_tokens,
103
+ "model": model,
104
+ "presence_penalty": presence_penalty,
105
+ "stream": stream,
106
+ "temperature": temperature,
107
+ "top_p": top_p,
108
+ },
109
+ chat_create_completion_params.ChatCreateCompletionParams,
110
+ ),
111
+ options=make_request_options(
112
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
113
+ ),
114
+ cast_to=ChatCreateCompletionResponse,
115
+ )
116
+
117
+
118
+ class AsyncChatResource(AsyncAPIResource):
119
+ @cached_property
120
+ def with_raw_response(self) -> AsyncChatResourceWithRawResponse:
121
+ """
122
+ This property can be used as a prefix for any HTTP method call to return
123
+ the raw response object instead of the parsed content.
124
+
125
+ For more information, see https://www.github.com/CaseMark/casedev-python#accessing-raw-response-data-eg-headers
126
+ """
127
+ return AsyncChatResourceWithRawResponse(self)
128
+
129
+ @cached_property
130
+ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse:
131
+ """
132
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
133
+
134
+ For more information, see https://www.github.com/CaseMark/casedev-python#with_streaming_response
135
+ """
136
+ return AsyncChatResourceWithStreamingResponse(self)
137
+
138
+ async def create_completion(
139
+ self,
140
+ *,
141
+ messages: Iterable[chat_create_completion_params.Message],
142
+ frequency_penalty: float | Omit = omit,
143
+ max_tokens: int | Omit = omit,
144
+ model: str | Omit = omit,
145
+ presence_penalty: float | Omit = omit,
146
+ stream: bool | Omit = omit,
147
+ temperature: float | Omit = omit,
148
+ top_p: float | Omit = omit,
149
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
150
+ # The extra values given here take precedence over values defined on the client or passed to this method.
151
+ extra_headers: Headers | None = None,
152
+ extra_query: Query | None = None,
153
+ extra_body: Body | None = None,
154
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
155
+ ) -> ChatCreateCompletionResponse:
156
+ """Create a completion for the provided prompt and parameters.
157
+
158
+ Compatible with
159
+ OpenAI's chat completions API. Supports 40+ models including GPT-4, Claude,
160
+ Gemini, and CaseMark legal AI models. Includes streaming support, token
161
+ counting, and usage tracking.
162
+
163
+ Args:
164
+ messages: List of messages comprising the conversation
165
+
166
+ frequency_penalty: Frequency penalty parameter
167
+
168
+ max_tokens: Maximum number of tokens to generate
169
+
170
+ model: Model to use for completion. Defaults to casemark-core-1 if not specified
171
+
172
+ presence_penalty: Presence penalty parameter
173
+
174
+ stream: Whether to stream back partial progress
175
+
176
+ temperature: Sampling temperature between 0 and 2
177
+
178
+ top_p: Nucleus sampling parameter
179
+
180
+ extra_headers: Send extra headers
181
+
182
+ extra_query: Add additional query parameters to the request
183
+
184
+ extra_body: Add additional JSON properties to the request
185
+
186
+ timeout: Override the client-level default timeout for this request, in seconds
187
+ """
188
+ return await self._post(
189
+ "/llm/v1/chat/completions",
190
+ body=await async_maybe_transform(
191
+ {
192
+ "messages": messages,
193
+ "frequency_penalty": frequency_penalty,
194
+ "max_tokens": max_tokens,
195
+ "model": model,
196
+ "presence_penalty": presence_penalty,
197
+ "stream": stream,
198
+ "temperature": temperature,
199
+ "top_p": top_p,
200
+ },
201
+ chat_create_completion_params.ChatCreateCompletionParams,
202
+ ),
203
+ options=make_request_options(
204
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
205
+ ),
206
+ cast_to=ChatCreateCompletionResponse,
207
+ )
208
+
209
+
210
+ class ChatResourceWithRawResponse:
211
+ def __init__(self, chat: ChatResource) -> None:
212
+ self._chat = chat
213
+
214
+ self.create_completion = to_raw_response_wrapper(
215
+ chat.create_completion,
216
+ )
217
+
218
+
219
+ class AsyncChatResourceWithRawResponse:
220
+ def __init__(self, chat: AsyncChatResource) -> None:
221
+ self._chat = chat
222
+
223
+ self.create_completion = async_to_raw_response_wrapper(
224
+ chat.create_completion,
225
+ )
226
+
227
+
228
+ class ChatResourceWithStreamingResponse:
229
+ def __init__(self, chat: ChatResource) -> None:
230
+ self._chat = chat
231
+
232
+ self.create_completion = to_streamed_response_wrapper(
233
+ chat.create_completion,
234
+ )
235
+
236
+
237
+ class AsyncChatResourceWithStreamingResponse:
238
+ def __init__(self, chat: AsyncChatResource) -> None:
239
+ self._chat = chat
240
+
241
+ self.create_completion = async_to_streamed_response_wrapper(
242
+ chat.create_completion,
243
+ )