posthoganalytics 6.7.0__py3-none-any.whl → 7.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- posthoganalytics/__init__.py +84 -7
- posthoganalytics/ai/anthropic/__init__.py +10 -0
- posthoganalytics/ai/anthropic/anthropic.py +95 -65
- posthoganalytics/ai/anthropic/anthropic_async.py +95 -65
- posthoganalytics/ai/anthropic/anthropic_converter.py +443 -0
- posthoganalytics/ai/gemini/__init__.py +15 -1
- posthoganalytics/ai/gemini/gemini.py +66 -71
- posthoganalytics/ai/gemini/gemini_async.py +423 -0
- posthoganalytics/ai/gemini/gemini_converter.py +652 -0
- posthoganalytics/ai/langchain/callbacks.py +58 -13
- posthoganalytics/ai/openai/__init__.py +16 -1
- posthoganalytics/ai/openai/openai.py +140 -149
- posthoganalytics/ai/openai/openai_async.py +127 -82
- posthoganalytics/ai/openai/openai_converter.py +741 -0
- posthoganalytics/ai/sanitization.py +248 -0
- posthoganalytics/ai/types.py +125 -0
- posthoganalytics/ai/utils.py +339 -356
- posthoganalytics/client.py +345 -97
- posthoganalytics/contexts.py +81 -0
- posthoganalytics/exception_utils.py +250 -2
- posthoganalytics/feature_flags.py +26 -10
- posthoganalytics/flag_definition_cache.py +127 -0
- posthoganalytics/integrations/django.py +157 -19
- posthoganalytics/request.py +203 -23
- posthoganalytics/test/test_client.py +250 -22
- posthoganalytics/test/test_exception_capture.py +418 -0
- posthoganalytics/test/test_feature_flag_result.py +441 -2
- posthoganalytics/test/test_feature_flags.py +308 -104
- posthoganalytics/test/test_flag_definition_cache.py +612 -0
- posthoganalytics/test/test_module.py +0 -8
- posthoganalytics/test/test_request.py +536 -0
- posthoganalytics/test/test_utils.py +4 -1
- posthoganalytics/types.py +40 -0
- posthoganalytics/version.py +1 -1
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-7.4.3.dist-info}/METADATA +12 -12
- posthoganalytics-7.4.3.dist-info/RECORD +57 -0
- posthoganalytics-6.7.0.dist-info/RECORD +0 -49
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-7.4.3.dist-info}/WHEEL +0 -0
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-7.4.3.dist-info}/licenses/LICENSE +0 -0
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-7.4.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,443 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Anthropic-specific conversion utilities.
|
|
3
|
+
|
|
4
|
+
This module handles the conversion of Anthropic API responses and inputs
|
|
5
|
+
into standardized formats for PostHog tracking.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
10
|
+
|
|
11
|
+
from posthoganalytics.ai.types import (
|
|
12
|
+
FormattedContentItem,
|
|
13
|
+
FormattedFunctionCall,
|
|
14
|
+
FormattedMessage,
|
|
15
|
+
FormattedTextContent,
|
|
16
|
+
StreamingContentBlock,
|
|
17
|
+
TokenUsage,
|
|
18
|
+
ToolInProgress,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def format_anthropic_response(response: Any) -> List[FormattedMessage]:
|
|
23
|
+
"""
|
|
24
|
+
Format an Anthropic response into standardized message format.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
response: The response object from Anthropic API
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
List of formatted messages with role and content
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
output: List[FormattedMessage] = []
|
|
34
|
+
|
|
35
|
+
if response is None:
|
|
36
|
+
return output
|
|
37
|
+
|
|
38
|
+
content: List[FormattedContentItem] = []
|
|
39
|
+
|
|
40
|
+
# Process content blocks from the response
|
|
41
|
+
if hasattr(response, "content"):
|
|
42
|
+
for choice in response.content:
|
|
43
|
+
if (
|
|
44
|
+
hasattr(choice, "type")
|
|
45
|
+
and choice.type == "text"
|
|
46
|
+
and hasattr(choice, "text")
|
|
47
|
+
and choice.text
|
|
48
|
+
):
|
|
49
|
+
text_content: FormattedTextContent = {
|
|
50
|
+
"type": "text",
|
|
51
|
+
"text": choice.text,
|
|
52
|
+
}
|
|
53
|
+
content.append(text_content)
|
|
54
|
+
|
|
55
|
+
elif (
|
|
56
|
+
hasattr(choice, "type")
|
|
57
|
+
and choice.type == "tool_use"
|
|
58
|
+
and hasattr(choice, "name")
|
|
59
|
+
and hasattr(choice, "id")
|
|
60
|
+
):
|
|
61
|
+
function_call: FormattedFunctionCall = {
|
|
62
|
+
"type": "function",
|
|
63
|
+
"id": choice.id,
|
|
64
|
+
"function": {
|
|
65
|
+
"name": choice.name,
|
|
66
|
+
"arguments": getattr(choice, "input", {}),
|
|
67
|
+
},
|
|
68
|
+
}
|
|
69
|
+
content.append(function_call)
|
|
70
|
+
|
|
71
|
+
if content:
|
|
72
|
+
message: FormattedMessage = {
|
|
73
|
+
"role": "assistant",
|
|
74
|
+
"content": content,
|
|
75
|
+
}
|
|
76
|
+
output.append(message)
|
|
77
|
+
|
|
78
|
+
return output
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def format_anthropic_input(
|
|
82
|
+
messages: List[Dict[str, Any]], system: Optional[str] = None
|
|
83
|
+
) -> List[FormattedMessage]:
|
|
84
|
+
"""
|
|
85
|
+
Format Anthropic input messages with optional system prompt.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
messages: List of message dictionaries
|
|
89
|
+
system: Optional system prompt to prepend
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
List of formatted messages
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
formatted_messages: List[FormattedMessage] = []
|
|
96
|
+
|
|
97
|
+
# Add system message if provided
|
|
98
|
+
if system is not None:
|
|
99
|
+
formatted_messages.append({"role": "system", "content": system})
|
|
100
|
+
|
|
101
|
+
# Add user messages
|
|
102
|
+
if messages:
|
|
103
|
+
for msg in messages:
|
|
104
|
+
# Messages are already in the correct format, just ensure type safety
|
|
105
|
+
formatted_msg: FormattedMessage = {
|
|
106
|
+
"role": msg.get("role", "user"),
|
|
107
|
+
"content": msg.get("content", ""),
|
|
108
|
+
}
|
|
109
|
+
formatted_messages.append(formatted_msg)
|
|
110
|
+
|
|
111
|
+
return formatted_messages
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def extract_anthropic_tools(kwargs: Dict[str, Any]) -> Optional[Any]:
|
|
115
|
+
"""
|
|
116
|
+
Extract tool definitions from Anthropic API kwargs.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
kwargs: Keyword arguments passed to Anthropic API
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Tool definitions if present, None otherwise
|
|
123
|
+
"""
|
|
124
|
+
|
|
125
|
+
return kwargs.get("tools", None)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def format_anthropic_streaming_content(
|
|
129
|
+
content_blocks: List[StreamingContentBlock],
|
|
130
|
+
) -> List[FormattedContentItem]:
|
|
131
|
+
"""
|
|
132
|
+
Format content blocks from Anthropic streaming response.
|
|
133
|
+
|
|
134
|
+
Used by streaming handlers to format accumulated content blocks.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
content_blocks: List of content block dictionaries from streaming
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
List of formatted content items
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
formatted: List[FormattedContentItem] = []
|
|
144
|
+
|
|
145
|
+
for block in content_blocks:
|
|
146
|
+
if block.get("type") == "text":
|
|
147
|
+
formatted.append(
|
|
148
|
+
{
|
|
149
|
+
"type": "text",
|
|
150
|
+
"text": block.get("text") or "",
|
|
151
|
+
}
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
elif block.get("type") == "function":
|
|
155
|
+
formatted.append(
|
|
156
|
+
{
|
|
157
|
+
"type": "function",
|
|
158
|
+
"id": block.get("id"),
|
|
159
|
+
"function": block.get("function") or {},
|
|
160
|
+
}
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
return formatted
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def extract_anthropic_web_search_count(response: Any) -> int:
|
|
167
|
+
"""
|
|
168
|
+
Extract web search count from Anthropic response.
|
|
169
|
+
|
|
170
|
+
Anthropic provides exact web search counts via usage.server_tool_use.web_search_requests.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
response: The response from Anthropic API
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
Number of web search requests (0 if none)
|
|
177
|
+
"""
|
|
178
|
+
if not hasattr(response, "usage"):
|
|
179
|
+
return 0
|
|
180
|
+
|
|
181
|
+
if not hasattr(response.usage, "server_tool_use"):
|
|
182
|
+
return 0
|
|
183
|
+
|
|
184
|
+
server_tool_use = response.usage.server_tool_use
|
|
185
|
+
|
|
186
|
+
if hasattr(server_tool_use, "web_search_requests"):
|
|
187
|
+
return max(0, int(getattr(server_tool_use, "web_search_requests", 0)))
|
|
188
|
+
|
|
189
|
+
return 0
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def extract_anthropic_usage_from_response(response: Any) -> TokenUsage:
|
|
193
|
+
"""
|
|
194
|
+
Extract usage from a full Anthropic response (non-streaming).
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
response: The complete response from Anthropic API
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
TokenUsage with standardized usage
|
|
201
|
+
"""
|
|
202
|
+
if not hasattr(response, "usage"):
|
|
203
|
+
return TokenUsage(input_tokens=0, output_tokens=0)
|
|
204
|
+
|
|
205
|
+
result = TokenUsage(
|
|
206
|
+
input_tokens=getattr(response.usage, "input_tokens", 0),
|
|
207
|
+
output_tokens=getattr(response.usage, "output_tokens", 0),
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
if hasattr(response.usage, "cache_read_input_tokens"):
|
|
211
|
+
cache_read = response.usage.cache_read_input_tokens
|
|
212
|
+
if cache_read and cache_read > 0:
|
|
213
|
+
result["cache_read_input_tokens"] = cache_read
|
|
214
|
+
|
|
215
|
+
if hasattr(response.usage, "cache_creation_input_tokens"):
|
|
216
|
+
cache_creation = response.usage.cache_creation_input_tokens
|
|
217
|
+
if cache_creation and cache_creation > 0:
|
|
218
|
+
result["cache_creation_input_tokens"] = cache_creation
|
|
219
|
+
|
|
220
|
+
web_search_count = extract_anthropic_web_search_count(response)
|
|
221
|
+
if web_search_count > 0:
|
|
222
|
+
result["web_search_count"] = web_search_count
|
|
223
|
+
|
|
224
|
+
return result
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def extract_anthropic_usage_from_event(event: Any) -> TokenUsage:
|
|
228
|
+
"""
|
|
229
|
+
Extract usage statistics from an Anthropic streaming event.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
event: Streaming event from Anthropic API
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Dictionary of usage statistics
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
usage: TokenUsage = TokenUsage()
|
|
239
|
+
|
|
240
|
+
# Handle usage stats from message_start event
|
|
241
|
+
if hasattr(event, "type") and event.type == "message_start":
|
|
242
|
+
if hasattr(event, "message") and hasattr(event.message, "usage"):
|
|
243
|
+
usage["input_tokens"] = getattr(event.message.usage, "input_tokens", 0)
|
|
244
|
+
usage["cache_creation_input_tokens"] = getattr(
|
|
245
|
+
event.message.usage, "cache_creation_input_tokens", 0
|
|
246
|
+
)
|
|
247
|
+
usage["cache_read_input_tokens"] = getattr(
|
|
248
|
+
event.message.usage, "cache_read_input_tokens", 0
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
# Handle usage stats from message_delta event
|
|
252
|
+
if hasattr(event, "usage") and event.usage:
|
|
253
|
+
usage["output_tokens"] = getattr(event.usage, "output_tokens", 0)
|
|
254
|
+
|
|
255
|
+
# Extract web search count from usage
|
|
256
|
+
if hasattr(event.usage, "server_tool_use"):
|
|
257
|
+
server_tool_use = event.usage.server_tool_use
|
|
258
|
+
if hasattr(server_tool_use, "web_search_requests"):
|
|
259
|
+
web_search_count = int(
|
|
260
|
+
getattr(server_tool_use, "web_search_requests", 0)
|
|
261
|
+
)
|
|
262
|
+
if web_search_count > 0:
|
|
263
|
+
usage["web_search_count"] = web_search_count
|
|
264
|
+
|
|
265
|
+
return usage
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def handle_anthropic_content_block_start(
|
|
269
|
+
event: Any,
|
|
270
|
+
) -> Tuple[Optional[StreamingContentBlock], Optional[ToolInProgress]]:
|
|
271
|
+
"""
|
|
272
|
+
Handle content block start event from Anthropic streaming.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
event: Content block start event
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
Tuple of (content_block, tool_in_progress)
|
|
279
|
+
"""
|
|
280
|
+
|
|
281
|
+
if not (hasattr(event, "type") and event.type == "content_block_start"):
|
|
282
|
+
return None, None
|
|
283
|
+
|
|
284
|
+
if not hasattr(event, "content_block"):
|
|
285
|
+
return None, None
|
|
286
|
+
|
|
287
|
+
block = event.content_block
|
|
288
|
+
|
|
289
|
+
if not hasattr(block, "type"):
|
|
290
|
+
return None, None
|
|
291
|
+
|
|
292
|
+
if block.type == "text":
|
|
293
|
+
content_block: StreamingContentBlock = {"type": "text", "text": ""}
|
|
294
|
+
return content_block, None
|
|
295
|
+
|
|
296
|
+
elif block.type == "tool_use":
|
|
297
|
+
tool_block: StreamingContentBlock = {
|
|
298
|
+
"type": "function",
|
|
299
|
+
"id": getattr(block, "id", ""),
|
|
300
|
+
"function": {"name": getattr(block, "name", ""), "arguments": {}},
|
|
301
|
+
}
|
|
302
|
+
tool_in_progress: ToolInProgress = {"block": tool_block, "input_string": ""}
|
|
303
|
+
return tool_block, tool_in_progress
|
|
304
|
+
|
|
305
|
+
return None, None
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def handle_anthropic_text_delta(
|
|
309
|
+
event: Any, current_block: Optional[StreamingContentBlock]
|
|
310
|
+
) -> Optional[str]:
|
|
311
|
+
"""
|
|
312
|
+
Handle text delta event from Anthropic streaming.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
event: Delta event
|
|
316
|
+
current_block: Current text block being accumulated
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
Text delta if present
|
|
320
|
+
"""
|
|
321
|
+
|
|
322
|
+
if hasattr(event, "delta") and hasattr(event.delta, "text"):
|
|
323
|
+
delta_text = event.delta.text or ""
|
|
324
|
+
|
|
325
|
+
if current_block is not None and current_block.get("type") == "text":
|
|
326
|
+
text_val = current_block.get("text")
|
|
327
|
+
if text_val is not None:
|
|
328
|
+
current_block["text"] = text_val + delta_text
|
|
329
|
+
else:
|
|
330
|
+
current_block["text"] = delta_text
|
|
331
|
+
|
|
332
|
+
return delta_text
|
|
333
|
+
|
|
334
|
+
return None
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def handle_anthropic_tool_delta(
|
|
338
|
+
event: Any,
|
|
339
|
+
content_blocks: List[StreamingContentBlock],
|
|
340
|
+
tools_in_progress: Dict[str, ToolInProgress],
|
|
341
|
+
) -> None:
|
|
342
|
+
"""
|
|
343
|
+
Handle tool input delta event from Anthropic streaming.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
event: Tool delta event
|
|
347
|
+
content_blocks: List of content blocks
|
|
348
|
+
tools_in_progress: Dictionary tracking tools being accumulated
|
|
349
|
+
"""
|
|
350
|
+
|
|
351
|
+
if not (hasattr(event, "type") and event.type == "content_block_delta"):
|
|
352
|
+
return
|
|
353
|
+
|
|
354
|
+
if not (
|
|
355
|
+
hasattr(event, "delta")
|
|
356
|
+
and hasattr(event.delta, "type")
|
|
357
|
+
and event.delta.type == "input_json_delta"
|
|
358
|
+
):
|
|
359
|
+
return
|
|
360
|
+
|
|
361
|
+
if hasattr(event, "index") and event.index < len(content_blocks):
|
|
362
|
+
block = content_blocks[event.index]
|
|
363
|
+
|
|
364
|
+
if block.get("type") == "function" and block.get("id") in tools_in_progress:
|
|
365
|
+
tool = tools_in_progress[block["id"]]
|
|
366
|
+
partial_json = getattr(event.delta, "partial_json", "")
|
|
367
|
+
tool["input_string"] += partial_json
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
def finalize_anthropic_tool_input(
|
|
371
|
+
event: Any,
|
|
372
|
+
content_blocks: List[StreamingContentBlock],
|
|
373
|
+
tools_in_progress: Dict[str, ToolInProgress],
|
|
374
|
+
) -> None:
|
|
375
|
+
"""
|
|
376
|
+
Finalize tool input when content block stops.
|
|
377
|
+
|
|
378
|
+
Args:
|
|
379
|
+
event: Content block stop event
|
|
380
|
+
content_blocks: List of content blocks
|
|
381
|
+
tools_in_progress: Dictionary tracking tools being accumulated
|
|
382
|
+
"""
|
|
383
|
+
|
|
384
|
+
if not (hasattr(event, "type") and event.type == "content_block_stop"):
|
|
385
|
+
return
|
|
386
|
+
|
|
387
|
+
if hasattr(event, "index") and event.index < len(content_blocks):
|
|
388
|
+
block = content_blocks[event.index]
|
|
389
|
+
|
|
390
|
+
if block.get("type") == "function" and block.get("id") in tools_in_progress:
|
|
391
|
+
tool = tools_in_progress[block["id"]]
|
|
392
|
+
|
|
393
|
+
try:
|
|
394
|
+
block["function"]["arguments"] = json.loads(tool["input_string"])
|
|
395
|
+
except (json.JSONDecodeError, Exception):
|
|
396
|
+
# Keep empty dict if parsing fails
|
|
397
|
+
pass
|
|
398
|
+
|
|
399
|
+
del tools_in_progress[block["id"]]
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
def format_anthropic_streaming_input(kwargs: Dict[str, Any]) -> Any:
|
|
403
|
+
"""
|
|
404
|
+
Format Anthropic streaming input using system prompt merging.
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
kwargs: Keyword arguments passed to Anthropic API
|
|
408
|
+
|
|
409
|
+
Returns:
|
|
410
|
+
Formatted input ready for PostHog tracking
|
|
411
|
+
"""
|
|
412
|
+
from posthoganalytics.ai.utils import merge_system_prompt
|
|
413
|
+
|
|
414
|
+
return merge_system_prompt(kwargs, "anthropic")
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def format_anthropic_streaming_output_complete(
|
|
418
|
+
content_blocks: List[StreamingContentBlock], accumulated_content: str
|
|
419
|
+
) -> List[FormattedMessage]:
|
|
420
|
+
"""
|
|
421
|
+
Format complete Anthropic streaming output.
|
|
422
|
+
|
|
423
|
+
Combines existing logic for formatting content blocks with fallback to accumulated content.
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
content_blocks: List of content blocks accumulated during streaming
|
|
427
|
+
accumulated_content: Raw accumulated text content as fallback
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
Formatted messages ready for PostHog tracking
|
|
431
|
+
"""
|
|
432
|
+
formatted_content = format_anthropic_streaming_content(content_blocks)
|
|
433
|
+
|
|
434
|
+
if formatted_content:
|
|
435
|
+
return [{"role": "assistant", "content": formatted_content}]
|
|
436
|
+
else:
|
|
437
|
+
# Fallback to accumulated content if no blocks
|
|
438
|
+
return [
|
|
439
|
+
{
|
|
440
|
+
"role": "assistant",
|
|
441
|
+
"content": [{"type": "text", "text": accumulated_content}],
|
|
442
|
+
}
|
|
443
|
+
]
|
|
@@ -1,11 +1,25 @@
|
|
|
1
1
|
from .gemini import Client
|
|
2
|
+
from .gemini_async import AsyncClient
|
|
3
|
+
from .gemini_converter import (
|
|
4
|
+
format_gemini_input,
|
|
5
|
+
format_gemini_response,
|
|
6
|
+
extract_gemini_tools,
|
|
7
|
+
)
|
|
2
8
|
|
|
3
9
|
|
|
4
10
|
# Create a genai-like module for perfect drop-in replacement
|
|
5
11
|
class _GenAI:
|
|
6
12
|
Client = Client
|
|
13
|
+
AsyncClient = AsyncClient
|
|
7
14
|
|
|
8
15
|
|
|
9
16
|
genai = _GenAI()
|
|
10
17
|
|
|
11
|
-
__all__ = [
|
|
18
|
+
__all__ = [
|
|
19
|
+
"Client",
|
|
20
|
+
"AsyncClient",
|
|
21
|
+
"genai",
|
|
22
|
+
"format_gemini_input",
|
|
23
|
+
"format_gemini_response",
|
|
24
|
+
"extract_gemini_tools",
|
|
25
|
+
]
|