veadk-python 0.2.5__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of veadk-python might be problematic. Click here for more details.
- veadk/agent.py +19 -7
- veadk/cli/cli_deploy.py +2 -0
- veadk/cli/cli_init.py +25 -6
- veadk/consts.py +20 -1
- veadk/database/database_adapter.py +88 -0
- veadk/database/kv/redis_database.py +47 -0
- veadk/database/local_database.py +22 -4
- veadk/database/relational/mysql_database.py +58 -0
- veadk/database/vector/opensearch_vector_database.py +6 -3
- veadk/database/viking/viking_database.py +69 -0
- veadk/integrations/ve_cr/__init__.py +13 -0
- veadk/integrations/ve_cr/ve_cr.py +205 -0
- veadk/integrations/ve_faas/template/cookiecutter.json +2 -1
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/app.py +24 -1
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/requirements.txt +3 -1
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/run.sh +0 -7
- veadk/integrations/ve_faas/ve_faas.py +2 -0
- veadk/integrations/ve_faas/web_template/cookiecutter.json +17 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/__init__.py +13 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/clean.py +23 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/config.yaml.example +2 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/deploy.py +41 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/Dockerfile +23 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/app.py +123 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/init_db.py +46 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/models.py +36 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/requirements.txt +4 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/run.sh +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/static/css/style.css +368 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/static/js/admin.js +0 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/dashboard.html +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/edit_post.html +24 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/login.html +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/posts.html +53 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/base.html +45 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/index.html +29 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/post.html +14 -0
- veadk/integrations/ve_tos/ve_tos.py +92 -30
- veadk/knowledgebase/knowledgebase.py +8 -0
- veadk/runner.py +49 -16
- veadk/tracing/telemetry/attributes/extractors/common_attributes_extractors.py +5 -0
- veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +253 -129
- veadk/tracing/telemetry/attributes/extractors/types.py +15 -4
- veadk/tracing/telemetry/opentelemetry_tracer.py +11 -5
- veadk/tracing/telemetry/telemetry.py +19 -4
- veadk/version.py +1 -1
- {veadk_python-0.2.5.dist-info → veadk_python-0.2.6.dist-info}/METADATA +1 -1
- {veadk_python-0.2.5.dist-info → veadk_python-0.2.6.dist-info}/RECORD +52 -30
- {veadk_python-0.2.5.dist-info → veadk_python-0.2.6.dist-info}/WHEEL +0 -0
- {veadk_python-0.2.5.dist-info → veadk_python-0.2.6.dist-info}/entry_points.txt +0 -0
- {veadk_python-0.2.5.dist-info → veadk_python-0.2.6.dist-info}/licenses/LICENSE +0 -0
- {veadk_python-0.2.5.dist-info → veadk_python-0.2.6.dist-info}/top_level.txt +0 -0
|
@@ -144,6 +144,8 @@ def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
|
|
|
144
144
|
message[f"gen_ai.prompt.{idx}.type"] = "image_url"
|
|
145
145
|
message[f"gen_ai.prompt.{idx}.image_url.name"] = (
|
|
146
146
|
part.inline_data.display_name.split("/")[-1]
|
|
147
|
+
if part.inline_data.display_name
|
|
148
|
+
else "<unknown_image_name>"
|
|
147
149
|
)
|
|
148
150
|
message[f"gen_ai.prompt.{idx}.image_url.url"] = (
|
|
149
151
|
part.inline_data.display_name
|
|
@@ -190,135 +192,241 @@ def llm_gen_ai_completion(params: LLMAttributesParams) -> ExtractorResponse:
|
|
|
190
192
|
return ExtractorResponse(content=messages)
|
|
191
193
|
|
|
192
194
|
|
|
193
|
-
def
|
|
194
|
-
|
|
195
|
-
return ExtractorResponse(content=None)
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
|
|
199
|
-
return ExtractorResponse(content="chat")
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
def llm_gen_ai_system_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
203
|
-
event_attributes = {
|
|
204
|
-
"content": str(params.llm_request.config.system_instruction),
|
|
205
|
-
"role": "system",
|
|
206
|
-
}
|
|
207
|
-
return ExtractorResponse(type="event", content=event_attributes)
|
|
208
|
-
|
|
195
|
+
def llm_gen_ai_messages(params: LLMAttributesParams) -> ExtractorResponse:
|
|
196
|
+
events = []
|
|
209
197
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
198
|
+
# system message
|
|
199
|
+
events.append(
|
|
200
|
+
{
|
|
201
|
+
"gen_ai.system.message": {
|
|
202
|
+
"role": "system",
|
|
203
|
+
"content": str(params.llm_request.config.system_instruction),
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
)
|
|
213
207
|
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
}
|
|
226
|
-
)
|
|
227
|
-
elif content.parts[0].function_response:
|
|
228
|
-
message_parts.append(
|
|
229
|
-
{
|
|
230
|
-
"role": content.role,
|
|
231
|
-
"content": str(
|
|
232
|
-
content.parts[0].function_response.response
|
|
233
|
-
),
|
|
234
|
-
}
|
|
235
|
-
)
|
|
236
|
-
else:
|
|
237
|
-
message_part = {"role": content.role}
|
|
208
|
+
# user, tool, and assistant message
|
|
209
|
+
if params.llm_request and params.llm_request.contents:
|
|
210
|
+
for content in params.llm_request.contents:
|
|
211
|
+
if content and content.parts:
|
|
212
|
+
# content.role == "user"
|
|
213
|
+
# part.function_response -> gen_ai.tool.message
|
|
214
|
+
# not part.function_response -> gen_ai.user.message
|
|
215
|
+
# content.role == "model" -> gen_ai.assistant.message
|
|
216
|
+
if content.role == "user":
|
|
217
|
+
user_event = {}
|
|
218
|
+
user_event["gen_ai.user.message"] = {"role": content.role}
|
|
238
219
|
for idx, part in enumerate(content.parts):
|
|
239
|
-
# text part
|
|
240
|
-
if part.text:
|
|
241
|
-
message_part[f"parts.{idx}.type"] = "text"
|
|
242
|
-
message_part[f"parts.{idx}.content"] = part.text
|
|
243
|
-
# function response
|
|
244
220
|
if part.function_response:
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
221
|
+
events.append(
|
|
222
|
+
{
|
|
223
|
+
"gen_ai.tool.message": {
|
|
224
|
+
"role": "tool",
|
|
225
|
+
"id": part.function_response.id,
|
|
226
|
+
"content": safe_json_serialize(
|
|
227
|
+
part.function_response.response
|
|
228
|
+
),
|
|
229
|
+
}
|
|
230
|
+
}
|
|
248
231
|
)
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
232
|
+
else:
|
|
233
|
+
if part.text:
|
|
234
|
+
if len(content.parts) == 1:
|
|
235
|
+
user_event["gen_ai.user.message"].update(
|
|
236
|
+
{"content": part.text}
|
|
237
|
+
)
|
|
238
|
+
else:
|
|
239
|
+
user_event["gen_ai.user.message"].update(
|
|
240
|
+
{
|
|
241
|
+
f"parts.{idx}.type": "text",
|
|
242
|
+
f"parts.{idx}.text": part.text,
|
|
243
|
+
},
|
|
244
|
+
)
|
|
245
|
+
if part.inline_data:
|
|
246
|
+
if len(content.parts) == 1:
|
|
247
|
+
# TODO(qingliang)
|
|
248
|
+
pass
|
|
249
|
+
else:
|
|
250
|
+
user_event["gen_ai.user.message"].update(
|
|
251
|
+
{
|
|
252
|
+
f"parts.{idx}.type": "image_url",
|
|
253
|
+
f"parts.{idx}.image_url.name": (
|
|
254
|
+
part.inline_data.display_name.split(
|
|
255
|
+
"/"
|
|
256
|
+
)[-1]
|
|
257
|
+
if part.inline_data.display_name
|
|
258
|
+
else "<unknown_image_name>"
|
|
259
|
+
),
|
|
260
|
+
f"parts.{idx}.image_url.url": (
|
|
261
|
+
part.inline_data.display_name
|
|
262
|
+
if part.inline_data.display_name
|
|
263
|
+
else "<unknown_image_url>"
|
|
264
|
+
),
|
|
265
|
+
}
|
|
266
|
+
)
|
|
267
|
+
# in case of only function response
|
|
268
|
+
if len(user_event["gen_ai.user.message"].items()) > 1:
|
|
269
|
+
events.append(user_event)
|
|
270
|
+
elif content.role == "model":
|
|
271
|
+
event = {}
|
|
272
|
+
event["gen_ai.assistant.message"] = {"role": content.role}
|
|
273
|
+
for idx, part in enumerate(content.parts):
|
|
274
|
+
if part.text:
|
|
275
|
+
event["gen_ai.assistant.message"].update(
|
|
276
|
+
{
|
|
277
|
+
f"parts.{idx}.type": "text",
|
|
278
|
+
f"parts.{idx}.text": part.text,
|
|
279
|
+
}
|
|
253
280
|
)
|
|
254
|
-
|
|
255
|
-
|
|
281
|
+
if part.function_call:
|
|
282
|
+
event["gen_ai.assistant.message"].update(
|
|
283
|
+
{
|
|
284
|
+
"tool_calls.0.id": str(part.function_call.id),
|
|
285
|
+
"tool_calls.0.type": "function",
|
|
286
|
+
"tool_calls.0.function.name": part.function_call.name
|
|
287
|
+
if part.function_call.name
|
|
288
|
+
else "<unknown_function_name>",
|
|
289
|
+
"tool_calls.0.function.arguments": safe_json_serialize(
|
|
290
|
+
part.function_call.args
|
|
291
|
+
)
|
|
292
|
+
if part.function_call.args
|
|
293
|
+
else json.dumps({}),
|
|
294
|
+
}
|
|
256
295
|
)
|
|
296
|
+
events.append(event)
|
|
257
297
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
if message_parts:
|
|
261
|
-
messages.extend(message_parts)
|
|
262
|
-
|
|
263
|
-
return ExtractorResponse(type="event", content=messages)
|
|
298
|
+
return ExtractorResponse(type="event_list", content=events)
|
|
264
299
|
|
|
265
300
|
|
|
266
|
-
def
|
|
267
|
-
#
|
|
268
|
-
|
|
301
|
+
def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
|
|
302
|
+
# return params.llm_request.stream
|
|
303
|
+
return ExtractorResponse(content=None)
|
|
269
304
|
|
|
270
|
-
# each part in each content we make it a message
|
|
271
|
-
# e.g. 2 contents and 3 parts each means 6 messages
|
|
272
|
-
for content in params.llm_request.contents:
|
|
273
|
-
if content.role == "model":
|
|
274
|
-
message_parts = []
|
|
275
|
-
|
|
276
|
-
# each part we make it a message
|
|
277
|
-
if content.parts:
|
|
278
|
-
# only one part
|
|
279
|
-
if len(content.parts) == 1:
|
|
280
|
-
if content.parts[0].text:
|
|
281
|
-
message_parts.append(
|
|
282
|
-
{
|
|
283
|
-
"role": content.role,
|
|
284
|
-
"content": content.parts[0].text,
|
|
285
|
-
}
|
|
286
|
-
)
|
|
287
|
-
elif content.parts[0].function_call:
|
|
288
|
-
pass
|
|
289
|
-
# multiple parts
|
|
290
|
-
else:
|
|
291
|
-
message_part = {"role": content.role}
|
|
292
305
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
if part.text:
|
|
296
|
-
message_part[f"parts.{idx}.type"] = "text"
|
|
297
|
-
message_part[f"parts.{idx}.content"] = part.text
|
|
298
|
-
# parse tool_calls
|
|
299
|
-
if part.function_call:
|
|
300
|
-
message_part["tool_calls.0.id"] = (
|
|
301
|
-
part.function_call.id
|
|
302
|
-
if part.function_call.id
|
|
303
|
-
else "<unkown_function_call_id>"
|
|
304
|
-
)
|
|
305
|
-
message_part["tool_calls.0.type"] = "function"
|
|
306
|
-
message_part["tool_calls.0.function.name"] = (
|
|
307
|
-
part.function_call.name
|
|
308
|
-
if part.function_call.name
|
|
309
|
-
else "<unknown_function_name>"
|
|
310
|
-
)
|
|
311
|
-
message_part["tool_calls.0.function.arguments"] = (
|
|
312
|
-
safe_json_serialize(part.function_call.args)
|
|
313
|
-
if part.function_call.args
|
|
314
|
-
else json.dumps({})
|
|
315
|
-
)
|
|
316
|
-
message_parts.append(message_part)
|
|
306
|
+
def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
|
|
307
|
+
return ExtractorResponse(content="chat")
|
|
317
308
|
|
|
318
|
-
if message_parts:
|
|
319
|
-
messages.extend(message_parts)
|
|
320
309
|
|
|
321
|
-
|
|
310
|
+
# def llm_gen_ai_system_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
311
|
+
# event_attributes = {
|
|
312
|
+
# "content": str(params.llm_request.config.system_instruction),
|
|
313
|
+
# "role": "system",
|
|
314
|
+
# }
|
|
315
|
+
# return ExtractorResponse(type="event", content=event_attributes)
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
# def llm_gen_ai_user_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
319
|
+
# # a content is a message
|
|
320
|
+
# messages = []
|
|
321
|
+
|
|
322
|
+
# for content in params.llm_request.contents:
|
|
323
|
+
# if content.role == "user":
|
|
324
|
+
# message_parts = []
|
|
325
|
+
|
|
326
|
+
# if content.parts:
|
|
327
|
+
# if len(content.parts) == 1:
|
|
328
|
+
# if content.parts[0].text:
|
|
329
|
+
# message_parts.append(
|
|
330
|
+
# {
|
|
331
|
+
# "role": content.role,
|
|
332
|
+
# "content": content.parts[0].text,
|
|
333
|
+
# }
|
|
334
|
+
# )
|
|
335
|
+
# elif content.parts[0].function_response:
|
|
336
|
+
# message_parts.append(
|
|
337
|
+
# {
|
|
338
|
+
# "role": content.role,
|
|
339
|
+
# "content": str(
|
|
340
|
+
# content.parts[0].function_response.response
|
|
341
|
+
# ),
|
|
342
|
+
# }
|
|
343
|
+
# )
|
|
344
|
+
# else:
|
|
345
|
+
# message_part = {"role": content.role}
|
|
346
|
+
# for idx, part in enumerate(content.parts):
|
|
347
|
+
# # text part
|
|
348
|
+
# if part.text:
|
|
349
|
+
# message_part[f"parts.{idx}.type"] = "text"
|
|
350
|
+
# message_part[f"parts.{idx}.content"] = part.text
|
|
351
|
+
# # function response
|
|
352
|
+
# if part.function_response:
|
|
353
|
+
# message_part[f"parts.{idx}.type"] = "function"
|
|
354
|
+
# message_part[f"parts.{idx}.content"] = str(
|
|
355
|
+
# part.function_response
|
|
356
|
+
# )
|
|
357
|
+
# if part.inline_data:
|
|
358
|
+
# message_part[f"parts.{idx}.type"] = "image_url"
|
|
359
|
+
# message_part[f"parts.{idx}.image_url.name"] = (
|
|
360
|
+
# part.inline_data.display_name.split("/")[-1]
|
|
361
|
+
# )
|
|
362
|
+
# message_part[f"parts.{idx}.image_url.url"] = (
|
|
363
|
+
# part.inline_data.display_name
|
|
364
|
+
# )
|
|
365
|
+
|
|
366
|
+
# message_parts.append(message_part)
|
|
367
|
+
|
|
368
|
+
# if message_parts:
|
|
369
|
+
# messages.extend(message_parts)
|
|
370
|
+
|
|
371
|
+
# return ExtractorResponse(type="event", content=messages)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
# def llm_gen_ai_assistant_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
375
|
+
# # a content is a message
|
|
376
|
+
# messages = []
|
|
377
|
+
|
|
378
|
+
# # each part in each content we make it a message
|
|
379
|
+
# # e.g. 2 contents and 3 parts each means 6 messages
|
|
380
|
+
# for content in params.llm_request.contents:
|
|
381
|
+
# if content.role == "model":
|
|
382
|
+
# message_parts = []
|
|
383
|
+
|
|
384
|
+
# # each part we make it a message
|
|
385
|
+
# if content.parts:
|
|
386
|
+
# # only one part
|
|
387
|
+
# if len(content.parts) == 1:
|
|
388
|
+
# if content.parts[0].text:
|
|
389
|
+
# message_parts.append(
|
|
390
|
+
# {
|
|
391
|
+
# "role": content.role,
|
|
392
|
+
# "content": content.parts[0].text,
|
|
393
|
+
# }
|
|
394
|
+
# )
|
|
395
|
+
# elif content.parts[0].function_call:
|
|
396
|
+
# pass
|
|
397
|
+
# # multiple parts
|
|
398
|
+
# else:
|
|
399
|
+
# message_part = {"role": content.role}
|
|
400
|
+
|
|
401
|
+
# for idx, part in enumerate(content.parts):
|
|
402
|
+
# # parse content
|
|
403
|
+
# if part.text:
|
|
404
|
+
# message_part[f"parts.{idx}.type"] = "text"
|
|
405
|
+
# message_part[f"parts.{idx}.content"] = part.text
|
|
406
|
+
# # parse tool_calls
|
|
407
|
+
# if part.function_call:
|
|
408
|
+
# message_part["tool_calls.0.id"] = (
|
|
409
|
+
# part.function_call.id
|
|
410
|
+
# if part.function_call.id
|
|
411
|
+
# else "<unkown_function_call_id>"
|
|
412
|
+
# )
|
|
413
|
+
# message_part["tool_calls.0.type"] = "function"
|
|
414
|
+
# message_part["tool_calls.0.function.name"] = (
|
|
415
|
+
# part.function_call.name
|
|
416
|
+
# if part.function_call.name
|
|
417
|
+
# else "<unknown_function_name>"
|
|
418
|
+
# )
|
|
419
|
+
# message_part["tool_calls.0.function.arguments"] = (
|
|
420
|
+
# safe_json_serialize(part.function_call.args)
|
|
421
|
+
# if part.function_call.args
|
|
422
|
+
# else json.dumps({})
|
|
423
|
+
# )
|
|
424
|
+
# message_parts.append(message_part)
|
|
425
|
+
|
|
426
|
+
# if message_parts:
|
|
427
|
+
# messages.extend(message_parts)
|
|
428
|
+
|
|
429
|
+
# return ExtractorResponse(type="event", content=messages)
|
|
322
430
|
|
|
323
431
|
|
|
324
432
|
def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
|
|
@@ -416,36 +524,52 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
|
|
|
416
524
|
|
|
417
525
|
|
|
418
526
|
LLM_ATTRIBUTES = {
|
|
419
|
-
#
|
|
527
|
+
# -> 1. attributes
|
|
528
|
+
# -> 1.1. request
|
|
420
529
|
"gen_ai.request.model": llm_gen_ai_request_model,
|
|
421
530
|
"gen_ai.request.type": llm_gen_ai_request_type,
|
|
422
531
|
"gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
|
|
423
532
|
"gen_ai.request.temperature": llm_gen_ai_request_temperature,
|
|
424
533
|
"gen_ai.request.top_p": llm_gen_ai_request_top_p,
|
|
534
|
+
# CozeLoop required
|
|
425
535
|
"gen_ai.request.functions": llm_gen_ai_request_functions,
|
|
426
|
-
#
|
|
536
|
+
# -> 1.2. response
|
|
427
537
|
"gen_ai.response.model": llm_gen_ai_response_model,
|
|
428
538
|
"gen_ai.response.stop_reason": llm_gen_ai_response_stop_reason,
|
|
429
539
|
"gen_ai.response.finish_reason": llm_gen_ai_response_finish_reason,
|
|
430
|
-
#
|
|
540
|
+
# -> 1.3. streaming
|
|
431
541
|
"gen_ai.is_streaming": llm_gen_ai_is_streaming,
|
|
432
|
-
#
|
|
542
|
+
# -> 1.4. span kind
|
|
433
543
|
"gen_ai.operation.name": llm_gen_ai_operation_name,
|
|
434
|
-
#
|
|
435
|
-
# events
|
|
436
|
-
"gen_ai.system.message": llm_gen_ai_system_message,
|
|
437
|
-
"gen_ai.user.message": llm_gen_ai_user_message,
|
|
438
|
-
"gen_ai.assistant.message": llm_gen_ai_assistant_message,
|
|
439
|
-
"gen_ai.choice": llm_gen_ai_choice,
|
|
440
|
-
# attributes
|
|
544
|
+
# -> 1.5. inputs
|
|
441
545
|
"gen_ai.prompt": llm_gen_ai_prompt,
|
|
546
|
+
# -> 1.6. outputs
|
|
442
547
|
"gen_ai.completion": llm_gen_ai_completion,
|
|
443
|
-
#
|
|
444
|
-
# "output.value": llm_output_value,
|
|
445
|
-
# ===== usage =====
|
|
548
|
+
# -> 1.7. usage
|
|
446
549
|
"gen_ai.usage.input_tokens": llm_gen_ai_usage_input_tokens,
|
|
447
550
|
"gen_ai.usage.output_tokens": llm_gen_ai_usage_output_tokens,
|
|
448
551
|
"gen_ai.usage.total_tokens": llm_gen_ai_usage_total_tokens,
|
|
449
552
|
"gen_ai.usage.cache_creation_input_tokens": llm_gen_ai_usage_cache_creation_input_tokens,
|
|
450
553
|
"gen_ai.usage.cache_read_input_tokens": llm_gen_ai_usage_cache_read_input_tokens,
|
|
554
|
+
# -> 2. events
|
|
555
|
+
# -> 2.1. inputs
|
|
556
|
+
# In order to adapt OpenTelemetry and CozeLoop rendering,
|
|
557
|
+
# and avoid error sequence of tool-call and too-response,
|
|
558
|
+
# we use `llm_gen_ai_messages` to upload system message, user message,
|
|
559
|
+
# and assistant message together.
|
|
560
|
+
# Correct sequence: system message, user message, tool message,
|
|
561
|
+
# and assistant message.
|
|
562
|
+
"gen_ai.messages": llm_gen_ai_messages,
|
|
563
|
+
# [depracated]
|
|
564
|
+
# "gen_ai.system.message": llm_gen_ai_system_message,
|
|
565
|
+
# [depracated]
|
|
566
|
+
# "gen_ai.user.message": llm_gen_ai_user_message,
|
|
567
|
+
# [depracated]
|
|
568
|
+
# "gen_ai.assistant.message": llm_gen_ai_assistant_message,
|
|
569
|
+
# -> 2.2. outputs
|
|
570
|
+
"gen_ai.choice": llm_gen_ai_choice,
|
|
571
|
+
# [debugging]
|
|
572
|
+
# "input.value": llm_input_value,
|
|
573
|
+
# [debugging]
|
|
574
|
+
# "output.value": llm_output_value,
|
|
451
575
|
}
|
|
@@ -26,13 +26,14 @@ from opentelemetry.trace.span import Span
|
|
|
26
26
|
|
|
27
27
|
@dataclass
|
|
28
28
|
class ExtractorResponse:
|
|
29
|
-
content:
|
|
29
|
+
content: Any
|
|
30
30
|
|
|
31
|
-
type: Literal["attribute", "event"] = "attribute"
|
|
31
|
+
type: Literal["attribute", "event", "event_list"] = "attribute"
|
|
32
32
|
"""Type of extractor response.
|
|
33
33
|
|
|
34
34
|
`attribute`: span.add_attribute(attr_name, attr_value)
|
|
35
35
|
`event`: span.add_event(...)
|
|
36
|
+
`event_list`: span.add_event(...) for each event in the list
|
|
36
37
|
"""
|
|
37
38
|
|
|
38
39
|
@staticmethod
|
|
@@ -41,7 +42,7 @@ class ExtractorResponse:
|
|
|
41
42
|
) -> None:
|
|
42
43
|
if response.type == "attribute":
|
|
43
44
|
res = response.content
|
|
44
|
-
if isinstance(res, list):
|
|
45
|
+
if isinstance(res, list):
|
|
45
46
|
for _res in res:
|
|
46
47
|
if isinstance(_res, dict):
|
|
47
48
|
for k, v in _res.items():
|
|
@@ -54,7 +55,17 @@ class ExtractorResponse:
|
|
|
54
55
|
span.add_event(attr_name, response.content)
|
|
55
56
|
elif isinstance(response.content, list):
|
|
56
57
|
for event in response.content:
|
|
57
|
-
span.add_event(attr_name, event)
|
|
58
|
+
span.add_event(attr_name, event) # type: ignore
|
|
59
|
+
elif response.type == "event_list":
|
|
60
|
+
if isinstance(response.content, list):
|
|
61
|
+
for event in response.content:
|
|
62
|
+
if isinstance(event, dict):
|
|
63
|
+
# we ensure this dict only have one key-value pair
|
|
64
|
+
key, value = next(iter(event.items()))
|
|
65
|
+
span.add_event(key, value)
|
|
66
|
+
else:
|
|
67
|
+
# Unsupported response type, discard it.
|
|
68
|
+
pass
|
|
58
69
|
else:
|
|
59
70
|
# Unsupported response type, discard it.
|
|
60
71
|
pass
|
|
@@ -30,9 +30,9 @@ from typing_extensions import override
|
|
|
30
30
|
from veadk.tracing.base_tracer import BaseTracer
|
|
31
31
|
from veadk.tracing.telemetry.exporters.apmplus_exporter import APMPlusExporter
|
|
32
32
|
from veadk.tracing.telemetry.exporters.base_exporter import BaseExporter
|
|
33
|
-
from veadk.tracing.telemetry.exporters.inmemory_exporter import InMemoryExporter
|
|
34
33
|
from veadk.tracing.telemetry.exporters.inmemory_exporter import (
|
|
35
34
|
_INMEMORY_EXPORTER_INSTANCE,
|
|
35
|
+
InMemoryExporter,
|
|
36
36
|
)
|
|
37
37
|
from veadk.utils.logger import get_logger
|
|
38
38
|
from veadk.utils.patches import patch_google_adk_telemetry
|
|
@@ -58,6 +58,8 @@ class OpentelemetryTracer(BaseModel, BaseTracer):
|
|
|
58
58
|
description="The exporters to export spans.",
|
|
59
59
|
)
|
|
60
60
|
|
|
61
|
+
# Forbid InMemoryExporter in exporters list
|
|
62
|
+
# cause we need to set custom in-memory span processor by VeADK
|
|
61
63
|
@field_validator("exporters")
|
|
62
64
|
@classmethod
|
|
63
65
|
def forbid_inmemory_exporter(cls, v: list[BaseExporter]) -> list[BaseExporter]:
|
|
@@ -67,14 +69,18 @@ class OpentelemetryTracer(BaseModel, BaseTracer):
|
|
|
67
69
|
return v
|
|
68
70
|
|
|
69
71
|
def model_post_init(self, context: Any) -> None:
|
|
72
|
+
# Replace Google ADK tracing funcs
|
|
73
|
+
# `trace_call_llm` and `trace_tool_call`
|
|
70
74
|
patch_google_adk_telemetry()
|
|
71
|
-
self._init_global_tracer_provider()
|
|
72
|
-
|
|
73
|
-
# GoogleADKInstrumentor().instrument()
|
|
74
75
|
|
|
75
|
-
|
|
76
|
+
# We save internal processors for tracing data dump
|
|
76
77
|
self._processors = []
|
|
77
78
|
|
|
79
|
+
# Initialize global tracer provider to avoid VeFaaS global tracer
|
|
80
|
+
# provider conflicts
|
|
81
|
+
self._init_global_tracer_provider()
|
|
82
|
+
|
|
83
|
+
def _init_global_tracer_provider(self) -> None:
|
|
78
84
|
# set provider anyway, then get global provider
|
|
79
85
|
trace_api.set_tracer_provider(trace_sdk.TracerProvider())
|
|
80
86
|
global_tracer_provider: TracerProvider = trace_api.get_tracer_provider() # type: ignore
|
|
@@ -92,10 +92,16 @@ def _set_agent_input_attribute(
|
|
|
92
92
|
"gen_ai.user.message",
|
|
93
93
|
{
|
|
94
94
|
f"parts.{idx}.type": "image_url",
|
|
95
|
-
f"parts.{idx}.image_url.name":
|
|
96
|
-
"/"
|
|
97
|
-
|
|
98
|
-
|
|
95
|
+
f"parts.{idx}.image_url.name": (
|
|
96
|
+
part.inline_data.display_name.split("/")[-1]
|
|
97
|
+
if part.inline_data.display_name
|
|
98
|
+
else "<unknown_image_name>"
|
|
99
|
+
),
|
|
100
|
+
f"parts.{idx}.image_url.url": (
|
|
101
|
+
part.inline_data.display_name
|
|
102
|
+
if part.inline_data.display_name
|
|
103
|
+
else "<unknown_image_url>"
|
|
104
|
+
),
|
|
99
105
|
},
|
|
100
106
|
)
|
|
101
107
|
|
|
@@ -221,6 +227,15 @@ def trace_call_llm(
|
|
|
221
227
|
model_name=invocation_context.agent.model_name
|
|
222
228
|
if isinstance(invocation_context.agent, Agent)
|
|
223
229
|
else "",
|
|
230
|
+
call_type=(
|
|
231
|
+
span.context.trace_state.get("call_type", "")
|
|
232
|
+
if (
|
|
233
|
+
hasattr(span, "context")
|
|
234
|
+
and hasattr(span.context, "trace_state")
|
|
235
|
+
and hasattr(span.context.trace_state, "get")
|
|
236
|
+
)
|
|
237
|
+
else ""
|
|
238
|
+
),
|
|
224
239
|
)
|
|
225
240
|
|
|
226
241
|
llm_attributes_mapping = ATTRIBUTES.get("llm", {})
|
veadk/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: veadk-python
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.6
|
|
4
4
|
Summary: Volcengine agent development kit, integrations with Volcengine cloud services.
|
|
5
5
|
Author-email: Yaozheng Fang <fangyozheng@gmail.com>, Guodong Li <cu.eric.lee@gmail.com>, Zhi Han <sliverydayday@gmail.com>, Meng Wang <mengwangwm@gmail.com>
|
|
6
6
|
License: Apache License
|