veadk-python 0.2.4__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of veadk-python might be problematic. Click here for more details.
- veadk/agent.py +40 -8
- veadk/cli/cli_deploy.py +5 -1
- veadk/cli/cli_init.py +25 -6
- veadk/cloud/cloud_app.py +21 -6
- veadk/consts.py +33 -1
- veadk/database/database_adapter.py +88 -0
- veadk/database/kv/redis_database.py +47 -0
- veadk/database/local_database.py +22 -4
- veadk/database/relational/mysql_database.py +58 -0
- veadk/database/vector/opensearch_vector_database.py +6 -3
- veadk/database/viking/viking_database.py +72 -3
- veadk/integrations/ve_cr/__init__.py +13 -0
- veadk/integrations/ve_cr/ve_cr.py +205 -0
- veadk/integrations/ve_faas/template/cookiecutter.json +2 -1
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/clean.py +23 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/app.py +28 -2
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/requirements.txt +3 -1
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/run.sh +5 -2
- veadk/integrations/ve_faas/ve_faas.py +2 -0
- veadk/integrations/ve_faas/web_template/cookiecutter.json +17 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/__init__.py +13 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/clean.py +23 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/config.yaml.example +2 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/deploy.py +41 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/Dockerfile +23 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/app.py +123 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/init_db.py +46 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/models.py +36 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/requirements.txt +4 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/run.sh +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/static/css/style.css +368 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/static/js/admin.js +0 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/dashboard.html +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/edit_post.html +24 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/login.html +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/posts.html +53 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/base.html +45 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/index.html +29 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/post.html +14 -0
- veadk/integrations/ve_tos/ve_tos.py +238 -0
- veadk/knowledgebase/knowledgebase.py +8 -0
- veadk/runner.py +140 -34
- veadk/tools/builtin_tools/image_edit.py +236 -0
- veadk/tools/builtin_tools/image_generate.py +236 -0
- veadk/tools/builtin_tools/video_generate.py +326 -0
- veadk/tools/sandbox/browser_sandbox.py +19 -9
- veadk/tools/sandbox/code_sandbox.py +21 -11
- veadk/tools/sandbox/computer_sandbox.py +16 -9
- veadk/tracing/base_tracer.py +0 -19
- veadk/tracing/telemetry/attributes/extractors/common_attributes_extractors.py +5 -0
- veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +311 -128
- veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py +20 -14
- veadk/tracing/telemetry/attributes/extractors/types.py +15 -4
- veadk/tracing/telemetry/exporters/inmemory_exporter.py +3 -0
- veadk/tracing/telemetry/opentelemetry_tracer.py +15 -6
- veadk/tracing/telemetry/telemetry.py +128 -24
- veadk/utils/misc.py +40 -0
- veadk/version.py +1 -1
- {veadk_python-0.2.4.dist-info → veadk_python-0.2.6.dist-info}/METADATA +1 -1
- {veadk_python-0.2.4.dist-info → veadk_python-0.2.6.dist-info}/RECORD +64 -37
- {veadk_python-0.2.4.dist-info → veadk_python-0.2.6.dist-info}/WHEEL +0 -0
- {veadk_python-0.2.4.dist-info → veadk_python-0.2.6.dist-info}/entry_points.txt +0 -0
- {veadk_python-0.2.4.dist-info → veadk_python-0.2.6.dist-info}/licenses/LICENSE +0 -0
- {veadk_python-0.2.4.dist-info → veadk_python-0.2.6.dist-info}/top_level.txt +0 -0
|
@@ -18,6 +18,7 @@ from veadk.tracing.telemetry.attributes.extractors.types import (
|
|
|
18
18
|
ExtractorResponse,
|
|
19
19
|
LLMAttributesParams,
|
|
20
20
|
)
|
|
21
|
+
from veadk.utils.misc import safe_json_serialize
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
def llm_gen_ai_request_model(params: LLMAttributesParams) -> ExtractorResponse:
|
|
@@ -102,10 +103,11 @@ def llm_gen_ai_usage_cache_read_input_tokens(
|
|
|
102
103
|
def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
|
|
103
104
|
# a part is a message
|
|
104
105
|
messages: list[dict] = []
|
|
106
|
+
idx = 0
|
|
105
107
|
|
|
106
108
|
for content in params.llm_request.contents:
|
|
107
109
|
if content.parts:
|
|
108
|
-
for
|
|
110
|
+
for part in content.parts:
|
|
109
111
|
message = {}
|
|
110
112
|
# text part
|
|
111
113
|
if part.text:
|
|
@@ -133,13 +135,25 @@ def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
|
|
|
133
135
|
else "<unknown_function_name>"
|
|
134
136
|
)
|
|
135
137
|
message[f"gen_ai.prompt.{idx}.tool_calls.0.function.arguments"] = (
|
|
136
|
-
|
|
138
|
+
safe_json_serialize(part.function_call.args)
|
|
137
139
|
if part.function_call.args
|
|
138
140
|
else json.dumps({})
|
|
139
141
|
)
|
|
142
|
+
# image
|
|
143
|
+
if part.inline_data:
|
|
144
|
+
message[f"gen_ai.prompt.{idx}.type"] = "image_url"
|
|
145
|
+
message[f"gen_ai.prompt.{idx}.image_url.name"] = (
|
|
146
|
+
part.inline_data.display_name.split("/")[-1]
|
|
147
|
+
if part.inline_data.display_name
|
|
148
|
+
else "<unknown_image_name>"
|
|
149
|
+
)
|
|
150
|
+
message[f"gen_ai.prompt.{idx}.image_url.url"] = (
|
|
151
|
+
part.inline_data.display_name
|
|
152
|
+
)
|
|
140
153
|
|
|
141
154
|
if message:
|
|
142
155
|
messages.append(message)
|
|
156
|
+
idx += 1
|
|
143
157
|
|
|
144
158
|
return ExtractorResponse(content=messages)
|
|
145
159
|
|
|
@@ -168,7 +182,7 @@ def llm_gen_ai_completion(params: LLMAttributesParams) -> ExtractorResponse:
|
|
|
168
182
|
else "<unknown_function_name>"
|
|
169
183
|
)
|
|
170
184
|
message[f"gen_ai.completion.{idx}.tool_calls.0.function.arguments"] = (
|
|
171
|
-
|
|
185
|
+
safe_json_serialize(part.function_call.args)
|
|
172
186
|
if part.function_call.args
|
|
173
187
|
else json.dumps({})
|
|
174
188
|
)
|
|
@@ -178,127 +192,241 @@ def llm_gen_ai_completion(params: LLMAttributesParams) -> ExtractorResponse:
|
|
|
178
192
|
return ExtractorResponse(content=messages)
|
|
179
193
|
|
|
180
194
|
|
|
181
|
-
def
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
if content.parts:
|
|
207
|
-
if len(content.parts) == 1:
|
|
208
|
-
if content.parts[0].text:
|
|
209
|
-
message_parts.append(
|
|
210
|
-
{
|
|
211
|
-
"role": content.role,
|
|
212
|
-
"content": content.parts[0].text,
|
|
213
|
-
}
|
|
214
|
-
)
|
|
215
|
-
elif content.parts[0].function_response:
|
|
216
|
-
message_parts.append(
|
|
217
|
-
{
|
|
218
|
-
"role": content.role,
|
|
219
|
-
"content": str(
|
|
220
|
-
content.parts[0].function_response.response
|
|
221
|
-
),
|
|
222
|
-
}
|
|
223
|
-
)
|
|
224
|
-
else:
|
|
225
|
-
message_part = {"role": content.role}
|
|
195
|
+
def llm_gen_ai_messages(params: LLMAttributesParams) -> ExtractorResponse:
|
|
196
|
+
events = []
|
|
197
|
+
|
|
198
|
+
# system message
|
|
199
|
+
events.append(
|
|
200
|
+
{
|
|
201
|
+
"gen_ai.system.message": {
|
|
202
|
+
"role": "system",
|
|
203
|
+
"content": str(params.llm_request.config.system_instruction),
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# user, tool, and assistant message
|
|
209
|
+
if params.llm_request and params.llm_request.contents:
|
|
210
|
+
for content in params.llm_request.contents:
|
|
211
|
+
if content and content.parts:
|
|
212
|
+
# content.role == "user"
|
|
213
|
+
# part.function_response -> gen_ai.tool.message
|
|
214
|
+
# not part.function_response -> gen_ai.user.message
|
|
215
|
+
# content.role == "model" -> gen_ai.assistant.message
|
|
216
|
+
if content.role == "user":
|
|
217
|
+
user_event = {}
|
|
218
|
+
user_event["gen_ai.user.message"] = {"role": content.role}
|
|
226
219
|
for idx, part in enumerate(content.parts):
|
|
227
|
-
# text part
|
|
228
|
-
if part.text:
|
|
229
|
-
message_part[f"parts.{idx}.type"] = "text"
|
|
230
|
-
message_part[f"parts.{idx}.content"] = part.text
|
|
231
|
-
# function response
|
|
232
220
|
if part.function_response:
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
221
|
+
events.append(
|
|
222
|
+
{
|
|
223
|
+
"gen_ai.tool.message": {
|
|
224
|
+
"role": "tool",
|
|
225
|
+
"id": part.function_response.id,
|
|
226
|
+
"content": safe_json_serialize(
|
|
227
|
+
part.function_response.response
|
|
228
|
+
),
|
|
229
|
+
}
|
|
230
|
+
}
|
|
236
231
|
)
|
|
232
|
+
else:
|
|
233
|
+
if part.text:
|
|
234
|
+
if len(content.parts) == 1:
|
|
235
|
+
user_event["gen_ai.user.message"].update(
|
|
236
|
+
{"content": part.text}
|
|
237
|
+
)
|
|
238
|
+
else:
|
|
239
|
+
user_event["gen_ai.user.message"].update(
|
|
240
|
+
{
|
|
241
|
+
f"parts.{idx}.type": "text",
|
|
242
|
+
f"parts.{idx}.text": part.text,
|
|
243
|
+
},
|
|
244
|
+
)
|
|
245
|
+
if part.inline_data:
|
|
246
|
+
if len(content.parts) == 1:
|
|
247
|
+
# TODO(qingliang)
|
|
248
|
+
pass
|
|
249
|
+
else:
|
|
250
|
+
user_event["gen_ai.user.message"].update(
|
|
251
|
+
{
|
|
252
|
+
f"parts.{idx}.type": "image_url",
|
|
253
|
+
f"parts.{idx}.image_url.name": (
|
|
254
|
+
part.inline_data.display_name.split(
|
|
255
|
+
"/"
|
|
256
|
+
)[-1]
|
|
257
|
+
if part.inline_data.display_name
|
|
258
|
+
else "<unknown_image_name>"
|
|
259
|
+
),
|
|
260
|
+
f"parts.{idx}.image_url.url": (
|
|
261
|
+
part.inline_data.display_name
|
|
262
|
+
if part.inline_data.display_name
|
|
263
|
+
else "<unknown_image_url>"
|
|
264
|
+
),
|
|
265
|
+
}
|
|
266
|
+
)
|
|
267
|
+
# in case of only function response
|
|
268
|
+
if len(user_event["gen_ai.user.message"].items()) > 1:
|
|
269
|
+
events.append(user_event)
|
|
270
|
+
elif content.role == "model":
|
|
271
|
+
event = {}
|
|
272
|
+
event["gen_ai.assistant.message"] = {"role": content.role}
|
|
273
|
+
for idx, part in enumerate(content.parts):
|
|
274
|
+
if part.text:
|
|
275
|
+
event["gen_ai.assistant.message"].update(
|
|
276
|
+
{
|
|
277
|
+
f"parts.{idx}.type": "text",
|
|
278
|
+
f"parts.{idx}.text": part.text,
|
|
279
|
+
}
|
|
280
|
+
)
|
|
281
|
+
if part.function_call:
|
|
282
|
+
event["gen_ai.assistant.message"].update(
|
|
283
|
+
{
|
|
284
|
+
"tool_calls.0.id": str(part.function_call.id),
|
|
285
|
+
"tool_calls.0.type": "function",
|
|
286
|
+
"tool_calls.0.function.name": part.function_call.name
|
|
287
|
+
if part.function_call.name
|
|
288
|
+
else "<unknown_function_name>",
|
|
289
|
+
"tool_calls.0.function.arguments": safe_json_serialize(
|
|
290
|
+
part.function_call.args
|
|
291
|
+
)
|
|
292
|
+
if part.function_call.args
|
|
293
|
+
else json.dumps({}),
|
|
294
|
+
}
|
|
295
|
+
)
|
|
296
|
+
events.append(event)
|
|
237
297
|
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
if message_parts:
|
|
241
|
-
messages.extend(message_parts)
|
|
242
|
-
|
|
243
|
-
return ExtractorResponse(type="event", content=messages)
|
|
298
|
+
return ExtractorResponse(type="event_list", content=events)
|
|
244
299
|
|
|
245
300
|
|
|
246
|
-
def
|
|
247
|
-
#
|
|
248
|
-
|
|
301
|
+
def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
|
|
302
|
+
# return params.llm_request.stream
|
|
303
|
+
return ExtractorResponse(content=None)
|
|
249
304
|
|
|
250
|
-
# each part in each content we make it a message
|
|
251
|
-
# e.g. 2 contents and 3 parts each means 6 messages
|
|
252
|
-
for content in params.llm_request.contents:
|
|
253
|
-
if content.role == "model":
|
|
254
|
-
message_parts = []
|
|
255
|
-
|
|
256
|
-
# each part we make it a message
|
|
257
|
-
if content.parts:
|
|
258
|
-
# only one part
|
|
259
|
-
if len(content.parts) == 1:
|
|
260
|
-
if content.parts[0].text:
|
|
261
|
-
message_parts.append(
|
|
262
|
-
{
|
|
263
|
-
"role": content.role,
|
|
264
|
-
"content": content.parts[0].text,
|
|
265
|
-
}
|
|
266
|
-
)
|
|
267
|
-
elif content.parts[0].function_call:
|
|
268
|
-
pass
|
|
269
|
-
# multiple parts
|
|
270
|
-
else:
|
|
271
|
-
message_part = {"role": content.role}
|
|
272
305
|
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
if part.text:
|
|
276
|
-
message_part[f"parts.{idx}.type"] = "text"
|
|
277
|
-
message_part[f"parts.{idx}.content"] = part.text
|
|
278
|
-
# parse tool_calls
|
|
279
|
-
if part.function_call:
|
|
280
|
-
message_part["tool_calls.0.id"] = (
|
|
281
|
-
part.function_call.id
|
|
282
|
-
if part.function_call.id
|
|
283
|
-
else "<unkown_function_call_id>"
|
|
284
|
-
)
|
|
285
|
-
message_part["tool_calls.0.type"] = "function"
|
|
286
|
-
message_part["tool_calls.0.function.name"] = (
|
|
287
|
-
part.function_call.name
|
|
288
|
-
if part.function_call.name
|
|
289
|
-
else "<unknown_function_name>"
|
|
290
|
-
)
|
|
291
|
-
message_part["tool_calls.0.function.arguments"] = (
|
|
292
|
-
json.dumps(part.function_call.args)
|
|
293
|
-
if part.function_call.args
|
|
294
|
-
else json.dumps({})
|
|
295
|
-
)
|
|
296
|
-
message_parts.append(message_part)
|
|
306
|
+
def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
|
|
307
|
+
return ExtractorResponse(content="chat")
|
|
297
308
|
|
|
298
|
-
if message_parts:
|
|
299
|
-
messages.extend(message_parts)
|
|
300
309
|
|
|
301
|
-
|
|
310
|
+
# def llm_gen_ai_system_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
311
|
+
# event_attributes = {
|
|
312
|
+
# "content": str(params.llm_request.config.system_instruction),
|
|
313
|
+
# "role": "system",
|
|
314
|
+
# }
|
|
315
|
+
# return ExtractorResponse(type="event", content=event_attributes)
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
# def llm_gen_ai_user_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
319
|
+
# # a content is a message
|
|
320
|
+
# messages = []
|
|
321
|
+
|
|
322
|
+
# for content in params.llm_request.contents:
|
|
323
|
+
# if content.role == "user":
|
|
324
|
+
# message_parts = []
|
|
325
|
+
|
|
326
|
+
# if content.parts:
|
|
327
|
+
# if len(content.parts) == 1:
|
|
328
|
+
# if content.parts[0].text:
|
|
329
|
+
# message_parts.append(
|
|
330
|
+
# {
|
|
331
|
+
# "role": content.role,
|
|
332
|
+
# "content": content.parts[0].text,
|
|
333
|
+
# }
|
|
334
|
+
# )
|
|
335
|
+
# elif content.parts[0].function_response:
|
|
336
|
+
# message_parts.append(
|
|
337
|
+
# {
|
|
338
|
+
# "role": content.role,
|
|
339
|
+
# "content": str(
|
|
340
|
+
# content.parts[0].function_response.response
|
|
341
|
+
# ),
|
|
342
|
+
# }
|
|
343
|
+
# )
|
|
344
|
+
# else:
|
|
345
|
+
# message_part = {"role": content.role}
|
|
346
|
+
# for idx, part in enumerate(content.parts):
|
|
347
|
+
# # text part
|
|
348
|
+
# if part.text:
|
|
349
|
+
# message_part[f"parts.{idx}.type"] = "text"
|
|
350
|
+
# message_part[f"parts.{idx}.content"] = part.text
|
|
351
|
+
# # function response
|
|
352
|
+
# if part.function_response:
|
|
353
|
+
# message_part[f"parts.{idx}.type"] = "function"
|
|
354
|
+
# message_part[f"parts.{idx}.content"] = str(
|
|
355
|
+
# part.function_response
|
|
356
|
+
# )
|
|
357
|
+
# if part.inline_data:
|
|
358
|
+
# message_part[f"parts.{idx}.type"] = "image_url"
|
|
359
|
+
# message_part[f"parts.{idx}.image_url.name"] = (
|
|
360
|
+
# part.inline_data.display_name.split("/")[-1]
|
|
361
|
+
# )
|
|
362
|
+
# message_part[f"parts.{idx}.image_url.url"] = (
|
|
363
|
+
# part.inline_data.display_name
|
|
364
|
+
# )
|
|
365
|
+
|
|
366
|
+
# message_parts.append(message_part)
|
|
367
|
+
|
|
368
|
+
# if message_parts:
|
|
369
|
+
# messages.extend(message_parts)
|
|
370
|
+
|
|
371
|
+
# return ExtractorResponse(type="event", content=messages)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
# def llm_gen_ai_assistant_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
375
|
+
# # a content is a message
|
|
376
|
+
# messages = []
|
|
377
|
+
|
|
378
|
+
# # each part in each content we make it a message
|
|
379
|
+
# # e.g. 2 contents and 3 parts each means 6 messages
|
|
380
|
+
# for content in params.llm_request.contents:
|
|
381
|
+
# if content.role == "model":
|
|
382
|
+
# message_parts = []
|
|
383
|
+
|
|
384
|
+
# # each part we make it a message
|
|
385
|
+
# if content.parts:
|
|
386
|
+
# # only one part
|
|
387
|
+
# if len(content.parts) == 1:
|
|
388
|
+
# if content.parts[0].text:
|
|
389
|
+
# message_parts.append(
|
|
390
|
+
# {
|
|
391
|
+
# "role": content.role,
|
|
392
|
+
# "content": content.parts[0].text,
|
|
393
|
+
# }
|
|
394
|
+
# )
|
|
395
|
+
# elif content.parts[0].function_call:
|
|
396
|
+
# pass
|
|
397
|
+
# # multiple parts
|
|
398
|
+
# else:
|
|
399
|
+
# message_part = {"role": content.role}
|
|
400
|
+
|
|
401
|
+
# for idx, part in enumerate(content.parts):
|
|
402
|
+
# # parse content
|
|
403
|
+
# if part.text:
|
|
404
|
+
# message_part[f"parts.{idx}.type"] = "text"
|
|
405
|
+
# message_part[f"parts.{idx}.content"] = part.text
|
|
406
|
+
# # parse tool_calls
|
|
407
|
+
# if part.function_call:
|
|
408
|
+
# message_part["tool_calls.0.id"] = (
|
|
409
|
+
# part.function_call.id
|
|
410
|
+
# if part.function_call.id
|
|
411
|
+
# else "<unkown_function_call_id>"
|
|
412
|
+
# )
|
|
413
|
+
# message_part["tool_calls.0.type"] = "function"
|
|
414
|
+
# message_part["tool_calls.0.function.name"] = (
|
|
415
|
+
# part.function_call.name
|
|
416
|
+
# if part.function_call.name
|
|
417
|
+
# else "<unknown_function_name>"
|
|
418
|
+
# )
|
|
419
|
+
# message_part["tool_calls.0.function.arguments"] = (
|
|
420
|
+
# safe_json_serialize(part.function_call.args)
|
|
421
|
+
# if part.function_call.args
|
|
422
|
+
# else json.dumps({})
|
|
423
|
+
# )
|
|
424
|
+
# message_parts.append(message_part)
|
|
425
|
+
|
|
426
|
+
# if message_parts:
|
|
427
|
+
# messages.extend(message_parts)
|
|
428
|
+
|
|
429
|
+
# return ExtractorResponse(type="event", content=messages)
|
|
302
430
|
|
|
303
431
|
|
|
304
432
|
def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
|
|
@@ -326,7 +454,7 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
|
|
|
326
454
|
else "<unknown_function_name>"
|
|
327
455
|
)
|
|
328
456
|
message["message.tool_calls.0.function.arguments"] = (
|
|
329
|
-
|
|
457
|
+
safe_json_serialize(part.function_call.args)
|
|
330
458
|
if part.function_call.args
|
|
331
459
|
else json.dumps({})
|
|
332
460
|
)
|
|
@@ -351,7 +479,7 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
|
|
|
351
479
|
else "<unknown_function_name>"
|
|
352
480
|
)
|
|
353
481
|
message["message.tool_calls.0.function.arguments"] = (
|
|
354
|
-
|
|
482
|
+
safe_json_serialize(part.function_call.args)
|
|
355
483
|
if part.function_call.args
|
|
356
484
|
else json.dumps({})
|
|
357
485
|
)
|
|
@@ -359,34 +487,89 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
|
|
|
359
487
|
return ExtractorResponse(type="event", content=message)
|
|
360
488
|
|
|
361
489
|
|
|
490
|
+
def llm_input_value(params: LLMAttributesParams) -> ExtractorResponse:
|
|
491
|
+
return ExtractorResponse(
|
|
492
|
+
content=str(params.llm_request.model_dump(exclude_none=True))
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse:
|
|
497
|
+
return ExtractorResponse(
|
|
498
|
+
content=str(params.llm_response.model_dump(exclude_none=True))
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorResponse:
|
|
503
|
+
functions = []
|
|
504
|
+
|
|
505
|
+
for idx, (tool_name, tool_instance) in enumerate(
|
|
506
|
+
params.llm_request.tools_dict.items()
|
|
507
|
+
):
|
|
508
|
+
functions.append(
|
|
509
|
+
{
|
|
510
|
+
f"gen_ai.request.functions.{idx}.name": tool_instance.name,
|
|
511
|
+
f"gen_ai.request.functions.{idx}.description": tool_instance.description,
|
|
512
|
+
f"gen_ai.request.functions.{idx}.parameters": str(
|
|
513
|
+
tool_instance._get_declaration().parameters.model_dump( # type: ignore
|
|
514
|
+
exclude_none=True
|
|
515
|
+
)
|
|
516
|
+
if tool_instance._get_declaration()
|
|
517
|
+
and tool_instance._get_declaration().parameters # type: ignore
|
|
518
|
+
else {}
|
|
519
|
+
),
|
|
520
|
+
}
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
return ExtractorResponse(content=functions)
|
|
524
|
+
|
|
525
|
+
|
|
362
526
|
LLM_ATTRIBUTES = {
|
|
363
|
-
#
|
|
527
|
+
# -> 1. attributes
|
|
528
|
+
# -> 1.1. request
|
|
364
529
|
"gen_ai.request.model": llm_gen_ai_request_model,
|
|
365
530
|
"gen_ai.request.type": llm_gen_ai_request_type,
|
|
366
531
|
"gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
|
|
367
532
|
"gen_ai.request.temperature": llm_gen_ai_request_temperature,
|
|
368
533
|
"gen_ai.request.top_p": llm_gen_ai_request_top_p,
|
|
369
|
-
#
|
|
534
|
+
# CozeLoop required
|
|
535
|
+
"gen_ai.request.functions": llm_gen_ai_request_functions,
|
|
536
|
+
# -> 1.2. response
|
|
370
537
|
"gen_ai.response.model": llm_gen_ai_response_model,
|
|
371
538
|
"gen_ai.response.stop_reason": llm_gen_ai_response_stop_reason,
|
|
372
539
|
"gen_ai.response.finish_reason": llm_gen_ai_response_finish_reason,
|
|
373
|
-
#
|
|
540
|
+
# -> 1.3. streaming
|
|
374
541
|
"gen_ai.is_streaming": llm_gen_ai_is_streaming,
|
|
375
|
-
#
|
|
542
|
+
# -> 1.4. span kind
|
|
376
543
|
"gen_ai.operation.name": llm_gen_ai_operation_name,
|
|
377
|
-
#
|
|
378
|
-
# events
|
|
379
|
-
"gen_ai.system.message": llm_gen_ai_system_message,
|
|
380
|
-
"gen_ai.user.message": llm_gen_ai_user_message,
|
|
381
|
-
"gen_ai.assistant.message": llm_gen_ai_assistant_message,
|
|
382
|
-
"gen_ai.choice": llm_gen_ai_choice,
|
|
383
|
-
# attributes
|
|
544
|
+
# -> 1.5. inputs
|
|
384
545
|
"gen_ai.prompt": llm_gen_ai_prompt,
|
|
546
|
+
# -> 1.6. outputs
|
|
385
547
|
"gen_ai.completion": llm_gen_ai_completion,
|
|
386
|
-
#
|
|
548
|
+
# -> 1.7. usage
|
|
387
549
|
"gen_ai.usage.input_tokens": llm_gen_ai_usage_input_tokens,
|
|
388
550
|
"gen_ai.usage.output_tokens": llm_gen_ai_usage_output_tokens,
|
|
389
551
|
"gen_ai.usage.total_tokens": llm_gen_ai_usage_total_tokens,
|
|
390
552
|
"gen_ai.usage.cache_creation_input_tokens": llm_gen_ai_usage_cache_creation_input_tokens,
|
|
391
553
|
"gen_ai.usage.cache_read_input_tokens": llm_gen_ai_usage_cache_read_input_tokens,
|
|
554
|
+
# -> 2. events
|
|
555
|
+
# -> 2.1. inputs
|
|
556
|
+
# In order to adapt OpenTelemetry and CozeLoop rendering,
|
|
557
|
+
# and avoid error sequence of tool-call and too-response,
|
|
558
|
+
# we use `llm_gen_ai_messages` to upload system message, user message,
|
|
559
|
+
# and assistant message together.
|
|
560
|
+
# Correct sequence: system message, user message, tool message,
|
|
561
|
+
# and assistant message.
|
|
562
|
+
"gen_ai.messages": llm_gen_ai_messages,
|
|
563
|
+
# [depracated]
|
|
564
|
+
# "gen_ai.system.message": llm_gen_ai_system_message,
|
|
565
|
+
# [depracated]
|
|
566
|
+
# "gen_ai.user.message": llm_gen_ai_user_message,
|
|
567
|
+
# [depracated]
|
|
568
|
+
# "gen_ai.assistant.message": llm_gen_ai_assistant_message,
|
|
569
|
+
# -> 2.2. outputs
|
|
570
|
+
"gen_ai.choice": llm_gen_ai_choice,
|
|
571
|
+
# [debugging]
|
|
572
|
+
# "input.value": llm_input_value,
|
|
573
|
+
# [debugging]
|
|
574
|
+
# "output.value": llm_output_value,
|
|
392
575
|
}
|
|
@@ -12,12 +12,11 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
import json
|
|
16
|
-
|
|
17
15
|
from veadk.tracing.telemetry.attributes.extractors.types import (
|
|
18
16
|
ExtractorResponse,
|
|
19
17
|
ToolAttributesParams,
|
|
20
18
|
)
|
|
19
|
+
from veadk.utils.misc import safe_json_serialize
|
|
21
20
|
|
|
22
21
|
|
|
23
22
|
def tool_gen_ai_operation_name(params: ToolAttributesParams) -> ExtractorResponse:
|
|
@@ -26,9 +25,8 @@ def tool_gen_ai_operation_name(params: ToolAttributesParams) -> ExtractorRespons
|
|
|
26
25
|
|
|
27
26
|
def tool_gen_ai_tool_message(params: ToolAttributesParams) -> ExtractorResponse:
|
|
28
27
|
tool_input = {
|
|
29
|
-
"id": "123",
|
|
30
28
|
"role": "tool",
|
|
31
|
-
"content":
|
|
29
|
+
"content": safe_json_serialize(
|
|
32
30
|
{
|
|
33
31
|
"name": params.tool.name,
|
|
34
32
|
"description": params.tool.description,
|
|
@@ -39,32 +37,40 @@ def tool_gen_ai_tool_message(params: ToolAttributesParams) -> ExtractorResponse:
|
|
|
39
37
|
return ExtractorResponse(type="event", content=tool_input)
|
|
40
38
|
|
|
41
39
|
|
|
42
|
-
def
|
|
40
|
+
def tool_gen_ai_tool_input(params: ToolAttributesParams) -> ExtractorResponse:
|
|
43
41
|
tool_input = {
|
|
44
42
|
"name": params.tool.name,
|
|
45
43
|
"description": params.tool.description,
|
|
46
44
|
"parameters": params.args,
|
|
47
45
|
}
|
|
48
|
-
return ExtractorResponse(
|
|
46
|
+
return ExtractorResponse(
|
|
47
|
+
content=safe_json_serialize(tool_input) or "<unknown_tool_input>"
|
|
48
|
+
)
|
|
49
49
|
|
|
50
50
|
|
|
51
51
|
def tool_gen_ai_tool_name(params: ToolAttributesParams) -> ExtractorResponse:
|
|
52
52
|
return ExtractorResponse(content=params.tool.name or "<unknown_tool_name>")
|
|
53
53
|
|
|
54
54
|
|
|
55
|
-
def
|
|
56
|
-
function_response = params.function_response_event.get_function_responses()[
|
|
55
|
+
def tool_gen_ai_tool_output(params: ToolAttributesParams) -> ExtractorResponse:
|
|
56
|
+
function_response = params.function_response_event.get_function_responses()[
|
|
57
|
+
0
|
|
58
|
+
].model_dump()
|
|
57
59
|
tool_output = {
|
|
58
|
-
"id": function_response
|
|
59
|
-
"name": function_response
|
|
60
|
-
"response": function_response
|
|
60
|
+
"id": function_response["id"],
|
|
61
|
+
"name": function_response["name"],
|
|
62
|
+
"response": function_response["response"],
|
|
61
63
|
}
|
|
62
|
-
return ExtractorResponse(
|
|
64
|
+
return ExtractorResponse(
|
|
65
|
+
content=safe_json_serialize(tool_output) or "<unknown_tool_output>"
|
|
66
|
+
)
|
|
63
67
|
|
|
64
68
|
|
|
65
69
|
TOOL_ATTRIBUTES = {
|
|
66
70
|
"gen_ai.operation.name": tool_gen_ai_operation_name,
|
|
67
71
|
"gen_ai.tool.name": tool_gen_ai_tool_name, # TLS required
|
|
68
|
-
"
|
|
69
|
-
"
|
|
72
|
+
"gen_ai.tool.input": tool_gen_ai_tool_input, # TLS required
|
|
73
|
+
"gen_ai.tool.output": tool_gen_ai_tool_output, # TLS required
|
|
74
|
+
"cozeloop.input": tool_gen_ai_tool_input, # CozeLoop required
|
|
75
|
+
"cozeloop.output": tool_gen_ai_tool_output, # CozeLoop required
|
|
70
76
|
}
|
|
@@ -26,13 +26,14 @@ from opentelemetry.trace.span import Span
|
|
|
26
26
|
|
|
27
27
|
@dataclass
|
|
28
28
|
class ExtractorResponse:
|
|
29
|
-
content:
|
|
29
|
+
content: Any
|
|
30
30
|
|
|
31
|
-
type: Literal["attribute", "event"] = "attribute"
|
|
31
|
+
type: Literal["attribute", "event", "event_list"] = "attribute"
|
|
32
32
|
"""Type of extractor response.
|
|
33
33
|
|
|
34
34
|
`attribute`: span.add_attribute(attr_name, attr_value)
|
|
35
35
|
`event`: span.add_event(...)
|
|
36
|
+
`event_list`: span.add_event(...) for each event in the list
|
|
36
37
|
"""
|
|
37
38
|
|
|
38
39
|
@staticmethod
|
|
@@ -41,7 +42,7 @@ class ExtractorResponse:
|
|
|
41
42
|
) -> None:
|
|
42
43
|
if response.type == "attribute":
|
|
43
44
|
res = response.content
|
|
44
|
-
if isinstance(res, list):
|
|
45
|
+
if isinstance(res, list):
|
|
45
46
|
for _res in res:
|
|
46
47
|
if isinstance(_res, dict):
|
|
47
48
|
for k, v in _res.items():
|
|
@@ -54,7 +55,17 @@ class ExtractorResponse:
|
|
|
54
55
|
span.add_event(attr_name, response.content)
|
|
55
56
|
elif isinstance(response.content, list):
|
|
56
57
|
for event in response.content:
|
|
57
|
-
span.add_event(attr_name, event)
|
|
58
|
+
span.add_event(attr_name, event) # type: ignore
|
|
59
|
+
elif response.type == "event_list":
|
|
60
|
+
if isinstance(response.content, list):
|
|
61
|
+
for event in response.content:
|
|
62
|
+
if isinstance(event, dict):
|
|
63
|
+
# we ensure this dict only have one key-value pair
|
|
64
|
+
key, value = next(iter(event.items()))
|
|
65
|
+
span.add_event(key, value)
|
|
66
|
+
else:
|
|
67
|
+
# Unsupported response type, discard it.
|
|
68
|
+
pass
|
|
58
69
|
else:
|
|
59
70
|
# Unsupported response type, discard it.
|
|
60
71
|
pass
|