monocle-apptrace 0.3.0b6__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (50) hide show
  1. monocle_apptrace/__init__.py +1 -0
  2. monocle_apptrace/exporters/aws/s3_exporter.py +20 -6
  3. monocle_apptrace/exporters/aws/s3_exporter_opendal.py +22 -11
  4. monocle_apptrace/exporters/azure/blob_exporter.py +22 -8
  5. monocle_apptrace/exporters/azure/blob_exporter_opendal.py +23 -8
  6. monocle_apptrace/exporters/exporter_processor.py +128 -3
  7. monocle_apptrace/exporters/file_exporter.py +16 -0
  8. monocle_apptrace/exporters/monocle_exporters.py +15 -3
  9. monocle_apptrace/exporters/okahu/okahu_exporter.py +8 -6
  10. monocle_apptrace/instrumentation/__init__.py +1 -0
  11. monocle_apptrace/instrumentation/common/__init__.py +2 -0
  12. monocle_apptrace/instrumentation/common/constants.py +7 -1
  13. monocle_apptrace/instrumentation/common/instrumentor.py +105 -20
  14. monocle_apptrace/instrumentation/common/span_handler.py +46 -28
  15. monocle_apptrace/instrumentation/common/tracing.md +68 -0
  16. monocle_apptrace/instrumentation/common/utils.py +70 -26
  17. monocle_apptrace/instrumentation/common/wrapper.py +27 -23
  18. monocle_apptrace/instrumentation/common/wrapper_method.py +5 -2
  19. monocle_apptrace/instrumentation/metamodel/anthropic/__init__.py +0 -0
  20. monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +64 -0
  21. monocle_apptrace/instrumentation/metamodel/anthropic/entities/__init__.py +0 -0
  22. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +72 -0
  23. monocle_apptrace/instrumentation/metamodel/anthropic/methods.py +24 -0
  24. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +2 -2
  25. monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +2 -1
  26. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +45 -3
  27. monocle_apptrace/instrumentation/metamodel/flask/entities/http.py +49 -0
  28. monocle_apptrace/instrumentation/metamodel/flask/methods.py +10 -1
  29. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +4 -1
  30. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +1 -4
  31. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +12 -4
  32. monocle_apptrace/instrumentation/metamodel/langchain/methods.py +6 -14
  33. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +2 -15
  34. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +9 -4
  35. monocle_apptrace/instrumentation/metamodel/openai/methods.py +16 -2
  36. monocle_apptrace/instrumentation/metamodel/requests/_helper.py +31 -0
  37. monocle_apptrace/instrumentation/metamodel/requests/entities/http.py +51 -0
  38. monocle_apptrace/instrumentation/metamodel/requests/methods.py +2 -1
  39. monocle_apptrace/instrumentation/metamodel/teamsai/__init__.py +0 -0
  40. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +58 -0
  41. monocle_apptrace/instrumentation/metamodel/teamsai/entities/__init__.py +0 -0
  42. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/__init__.py +0 -0
  43. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +80 -0
  44. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +70 -0
  45. monocle_apptrace/instrumentation/metamodel/teamsai/methods.py +26 -0
  46. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1.dist-info}/METADATA +2 -1
  47. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1.dist-info}/RECORD +50 -35
  48. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1.dist-info}/WHEEL +0 -0
  49. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1.dist-info}/licenses/LICENSE +0 -0
  50. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1.dist-info}/licenses/NOTICE +0 -0
@@ -13,7 +13,6 @@ LLAMAINDEX_METHODS = [
13
13
  "package": "llama_index.core.indices.base_retriever",
14
14
  "object": "BaseRetriever",
15
15
  "method": "retrieve",
16
- "span_name": "llamaindex.retrieve",
17
16
  "wrapper_method": task_wrapper,
18
17
  "output_processor": RETRIEVAL
19
18
  },
@@ -21,7 +20,6 @@ LLAMAINDEX_METHODS = [
21
20
  "package": "llama_index.core.indices.base_retriever",
22
21
  "object": "BaseRetriever",
23
22
  "method": "aretrieve",
24
- "span_name": "llamaindex.retrieve",
25
23
  "wrapper_method": atask_wrapper,
26
24
  "output_processor": RETRIEVAL
27
25
  },
@@ -29,23 +27,18 @@ LLAMAINDEX_METHODS = [
29
27
  "package": "llama_index.core.base.base_query_engine",
30
28
  "object": "BaseQueryEngine",
31
29
  "method": "query",
32
- "span_name": "llamaindex.query",
33
- "wrapper_method": task_wrapper,
34
- "span_type": "workflow"
30
+ "wrapper_method": task_wrapper
35
31
  },
36
32
  {
37
33
  "package": "llama_index.core.base.base_query_engine",
38
34
  "object": "BaseQueryEngine",
39
35
  "method": "aquery",
40
- "span_name": "llamaindex.query",
41
- "wrapper_method": atask_wrapper,
42
- "span_type": "workflow"
36
+ "wrapper_method": atask_wrapper
43
37
  },
44
38
  {
45
39
  "package": "llama_index.core.llms.custom",
46
40
  "object": "CustomLLM",
47
41
  "method": "chat",
48
- "span_name": "llamaindex.llmchat",
49
42
  "wrapper_method": task_wrapper,
50
43
  "output_processor": INFERENCE
51
44
  },
@@ -53,7 +46,6 @@ LLAMAINDEX_METHODS = [
53
46
  "package": "llama_index.core.llms.custom",
54
47
  "object": "CustomLLM",
55
48
  "method": "achat",
56
- "span_name": "llamaindex.llmchat",
57
49
  "wrapper_method": atask_wrapper,
58
50
  "output_processor": INFERENCE,
59
51
 
@@ -62,7 +54,6 @@ LLAMAINDEX_METHODS = [
62
54
  "package": "llama_index.llms.openai.base",
63
55
  "object": "OpenAI",
64
56
  "method": "chat",
65
- "span_name": "llamaindex.openai",
66
57
  "wrapper_method": task_wrapper,
67
58
  "output_processor": INFERENCE
68
59
  },
@@ -70,7 +61,6 @@ LLAMAINDEX_METHODS = [
70
61
  "package": "llama_index.llms.openai.base",
71
62
  "object": "OpenAI",
72
63
  "method": "achat",
73
- "span_name": "llamaindex.openai",
74
64
  "wrapper_method": atask_wrapper,
75
65
  "output_processor": INFERENCE
76
66
  },
@@ -78,7 +68,6 @@ LLAMAINDEX_METHODS = [
78
68
  "package": "llama_index.llms.mistralai.base",
79
69
  "object": "MistralAI",
80
70
  "method": "chat",
81
- "span_name": "llamaindex.mistralai",
82
71
  "wrapper_method": task_wrapper,
83
72
  "output_processor": INFERENCE
84
73
  },
@@ -86,7 +75,6 @@ LLAMAINDEX_METHODS = [
86
75
  "package": "llama_index.llms.mistralai.base",
87
76
  "object": "MistralAI",
88
77
  "method": "achat",
89
- "span_name": "llamaindex.mistralai",
90
78
  "wrapper_method": atask_wrapper,
91
79
  "output_processor": INFERENCE
92
80
  },
@@ -94,7 +82,6 @@ LLAMAINDEX_METHODS = [
94
82
  "package": "llama_index.core.agent",
95
83
  "object": "ReActAgent",
96
84
  "method": "chat",
97
- "span_name": "react.agent",
98
85
  "wrapper_method": task_wrapper,
99
86
  "output_processor": AGENT
100
87
  }
@@ -19,6 +19,10 @@ def extract_messages(kwargs):
19
19
  """Extract system and user messages"""
20
20
  try:
21
21
  messages = []
22
+ if 'instructions' in kwargs:
23
+ messages.append({'instructions': kwargs.get('instructions', {})})
24
+ if 'input' in kwargs:
25
+ messages.append({'input': kwargs.get('input', {})})
22
26
  if 'messages' in kwargs and len(kwargs['messages']) >0:
23
27
  for msg in kwargs['messages']:
24
28
  if msg.get('content') and msg.get('role'):
@@ -32,6 +36,8 @@ def extract_messages(kwargs):
32
36
 
33
37
  def extract_assistant_message(response):
34
38
  try:
39
+ if hasattr(response,"output_text") and len(response.output_text):
40
+ return response.output_text
35
41
  if response is not None and hasattr(response,"choices") and len(response.choices) >0:
36
42
  if hasattr(response.choices[0],"message"):
37
43
  return response.choices[0].message.content
@@ -85,10 +91,9 @@ def update_span_from_llm_response(response):
85
91
  response_metadata = response.response_metadata
86
92
  token_usage = response_metadata.get("token_usage")
87
93
  if token_usage is not None:
88
- meta_dict.update(
89
- {"completion_tokens": getattr(response.usage, "completion_tokens", None)})
90
- meta_dict.update({"prompt_tokens": getattr(response.usage, "prompt_tokens", None)})
91
- meta_dict.update({"total_tokens": getattr(response.usage, "total_tokens", None)})
94
+ meta_dict.update({"completion_tokens": getattr(token_usage,"completion_tokens",None) or getattr(token_usage,"output_tokens",None)})
95
+ meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens", None) or getattr(token_usage, "input_tokens", None)})
96
+ meta_dict.update({"total_tokens": getattr(token_usage,"total_tokens")})
92
97
  return meta_dict
93
98
 
94
99
  def extract_vector_input(vector_input: dict):
@@ -28,7 +28,6 @@ OPENAI_METHODS = [
28
28
  "object": "Embeddings",
29
29
  "method": "create",
30
30
  "wrapper_method": task_wrapper,
31
- "span_name": "openai_embeddings",
32
31
  "span_handler": "non_framework_handler",
33
32
  "output_processor": RETRIEVAL
34
33
  },
@@ -37,9 +36,24 @@ OPENAI_METHODS = [
37
36
  "object": "AsyncEmbeddings",
38
37
  "method": "create",
39
38
  "wrapper_method": atask_wrapper,
40
- "span_name": "openai_embeddings",
41
39
  "span_handler": "non_framework_handler",
42
40
  "output_processor": RETRIEVAL
41
+ },
42
+ {
43
+ "package": "openai.resources.responses",
44
+ "object": "Responses",
45
+ "method": "create",
46
+ "wrapper_method": task_wrapper,
47
+ "span_handler": "non_framework_handler",
48
+ "output_processor": INFERENCE
49
+ },
50
+ {
51
+ "package": "openai.resources.responses",
52
+ "object": "AsyncResponses",
53
+ "method": "create",
54
+ "wrapper_method": atask_wrapper,
55
+ "span_handler": "non_framework_handler",
56
+ "output_processor": INFERENCE
43
57
  }
44
58
 
45
59
  ]
@@ -2,6 +2,36 @@ import os
2
2
  from monocle_apptrace.instrumentation.metamodel.requests import allowed_urls
3
3
  from opentelemetry.propagate import inject
4
4
  from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
5
+ from monocle_apptrace.instrumentation.common.utils import add_monocle_trace_state
6
+ from urllib.parse import urlparse, ParseResult
7
+
8
+
9
+ def get_route(kwargs):
10
+ url:str = kwargs['url']
11
+ parsed_url:ParseResult = urlparse(url)
12
+ return f"{parsed_url.netloc}{parsed_url.path}"
13
+
14
+ def get_method(kwargs) -> str:
15
+ return kwargs['method'] if 'method' in kwargs else 'GET'
16
+
17
+ def get_params(kwargs) -> dict:
18
+ url:str = kwargs['url']
19
+ parsed_url:ParseResult = urlparse(url)
20
+ return parsed_url.query
21
+
22
+ def get_headers(kwargs) -> dict:
23
+ return kwargs['headers'] if 'headers' in kwargs else {}
24
+
25
+ def get_body(kwargs) -> dict:
26
+ body = {}
27
+ return body
28
+
29
+ def extract_response(result) -> str:
30
+ return result.text if hasattr(result, 'text') else str(result)
31
+
32
+ def extract_status(result) -> str:
33
+ return f"{result.status_code} {result.reason}"
34
+
5
35
 
6
36
  def request_pre_task_processor(kwargs):
7
37
  # add traceparent to the request headers in kwargs
@@ -9,6 +39,7 @@ def request_pre_task_processor(kwargs):
9
39
  headers = {}
10
40
  else:
11
41
  headers = kwargs['headers'].copy()
42
+ add_monocle_trace_state(headers)
12
43
  inject(headers)
13
44
  kwargs['headers'] = headers
14
45
 
@@ -0,0 +1,51 @@
1
+ from monocle_apptrace.instrumentation.metamodel.requests import _helper
2
+ REQUEST_HTTP_PROCESSOR = {
3
+ "type": "http.send",
4
+ "attributes": [
5
+ [
6
+ {
7
+ "_comment": "request method, request URI",
8
+ "attribute": "method",
9
+ "accessor": lambda arguments: _helper.get_method(arguments['kwargs'])
10
+ },
11
+ {
12
+ "_comment": "request method, request URI",
13
+ "attribute": "URL",
14
+ "accessor": lambda arguments: _helper.get_route(arguments['kwargs'])
15
+ }
16
+
17
+ ]
18
+ ],
19
+ "events": [
20
+ {"name": "data.input",
21
+ "attributes": [
22
+ {
23
+ "_comment": "route params",
24
+ "attribute": "http.params",
25
+ "accessor": lambda arguments: _helper.get_params(arguments['kwargs'])
26
+ },
27
+ {
28
+ "_comment": "route body",
29
+ "attribute": "body",
30
+ "accessor": lambda arguments: _helper.get_body(arguments['kwargs'])
31
+ },
32
+
33
+ ]
34
+ },
35
+ {
36
+ "name": "data.output",
37
+ "attributes": [
38
+ {
39
+ "_comment": "status from HTTP response",
40
+ "attribute": "status",
41
+ "accessor": lambda arguments: _helper.extract_status(arguments['result'])
42
+ },
43
+ {
44
+ "_comment": "this is result from LLM",
45
+ "attribute": "response",
46
+ "accessor": lambda arguments: _helper.extract_response(arguments['result'])
47
+ }
48
+ ]
49
+ }
50
+ ]
51
+ }
@@ -1,12 +1,13 @@
1
1
  from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.requests.entities.http import REQUEST_HTTP_PROCESSOR
2
3
 
3
4
  REQUESTS_METHODS = [
4
5
  {
5
6
  "package": "requests.sessions",
6
7
  "object": "Session",
7
8
  "method": "request",
8
- "span_name": "http_requests",
9
9
  "wrapper_method": task_wrapper,
10
10
  "span_handler":"request_handler",
11
+ "output_processor": REQUEST_HTTP_PROCESSOR
11
12
  }
12
13
  ]
@@ -0,0 +1,58 @@
1
+ from monocle_apptrace.instrumentation.common.utils import MonocleSpanException
2
+ def capture_input(arguments):
3
+ """
4
+ Captures the input from Teams AI state.
5
+ Args:
6
+ arguments (dict): Arguments containing state and context information
7
+ Returns:
8
+ str: The input message or error message
9
+ """
10
+ try:
11
+ # Get the memory object from kwargs
12
+ kwargs = arguments.get("kwargs", {})
13
+
14
+ # If memory exists, try to get the input from temp
15
+ if "memory" in kwargs:
16
+ memory = kwargs["memory"]
17
+ # Check if it's a TurnState object
18
+ if hasattr(memory, "get"):
19
+ # Use proper TurnState.get() method
20
+ temp = memory.get("temp")
21
+ if temp and hasattr(temp, "get"):
22
+ input_value = temp.get("input")
23
+ if input_value:
24
+ return str(input_value)
25
+
26
+ # Try alternative path through context if memory path fails
27
+ context = kwargs.get("context")
28
+ if hasattr(context, "activity") and hasattr(context.activity, "text"):
29
+ return str(context.activity.text)
30
+
31
+ return "No input found in memory or context"
32
+ except Exception as e:
33
+ print(f"Debug - Arguments structure: {str(arguments)}")
34
+ print(f"Debug - kwargs: {str(kwargs)}")
35
+ if "memory" in kwargs:
36
+ print(f"Debug - memory type: {type(kwargs['memory'])}")
37
+ return f"Error capturing input: {str(e)}"
38
+
39
+ def capture_prompt_info(arguments):
40
+ """Captures prompt information from ActionPlanner state"""
41
+ try:
42
+ kwargs = arguments.get("kwargs", {})
43
+ prompt = kwargs.get("prompt")
44
+
45
+ if isinstance(prompt, str):
46
+ return prompt
47
+ elif hasattr(prompt, "name"):
48
+ return prompt.name
49
+
50
+ return "No prompt information found"
51
+ except Exception as e:
52
+ return f"Error capturing prompt: {str(e)}"
53
+
54
+ def status_check(arguments):
55
+ if hasattr(arguments["result"], "error") and arguments["result"].error is not None:
56
+ error_msg:str = arguments["result"].error
57
+ error_code:str = arguments["result"].status if hasattr(arguments["result"], "status") else "unknown"
58
+ raise MonocleSpanException(f"Error: {error_code} - {error_msg}")
@@ -0,0 +1,80 @@
1
+ from monocle_apptrace.instrumentation.metamodel.teamsai import (
2
+ _helper,
3
+ )
4
+ ACTIONPLANNER_OUTPUT_PROCESSOR = {
5
+ "type": "inference",
6
+ "attributes": [
7
+ [
8
+ {
9
+ "_comment": "planner type and configuration",
10
+ "attribute": "type",
11
+ "accessor": lambda arguments: "teams.planner"
12
+ },
13
+ {
14
+ "attribute": "planner_type",
15
+ "accessor": lambda arguments: "ActionPlanner"
16
+ },
17
+ {
18
+ "attribute": "max_repair_attempts",
19
+ "accessor": lambda arguments: arguments["instance"]._options.max_repair_attempts if hasattr(arguments["instance"], "_options") else 3
20
+ }
21
+ ],
22
+ [
23
+ {
24
+ "_comment": "model configuration",
25
+ "attribute": "model",
26
+ "accessor": lambda arguments: arguments["instance"]._options.model.__class__.__name__ if hasattr(arguments["instance"], "_options") else "unknown"
27
+ },
28
+ {
29
+ "attribute": "tokenizer",
30
+ "accessor": lambda arguments: arguments["instance"]._options.tokenizer.__class__.__name__ if hasattr(arguments["instance"], "_options") else "GPTTokenizer"
31
+ }
32
+ ]
33
+ ],
34
+ "events": [
35
+ {
36
+ "name": "data.input",
37
+ "_comment": "input configuration to ActionPlanner",
38
+ "attributes": [
39
+ {
40
+ "attribute": "prompt_name",
41
+ "accessor": _helper.capture_prompt_info
42
+ },
43
+ {
44
+ "attribute": "validator",
45
+ "accessor": lambda arguments: arguments["kwargs"].get("validator").__class__.__name__ if arguments.get("kwargs", {}).get("validator") else "DefaultResponseValidator"
46
+ },
47
+ {
48
+ "attribute": "memory_type",
49
+ "accessor": lambda arguments: arguments["kwargs"].get("memory").__class__.__name__ if arguments.get("kwargs", {}).get("memory") else "unknown"
50
+ }
51
+ ]
52
+ },
53
+ {
54
+ "name": "data.output",
55
+ "_comment": "output from ActionPlanner",
56
+ "attributes": [
57
+ {
58
+ "attribute": "status",
59
+ "accessor": lambda arguments: _helper.status_check(arguments)
60
+ },
61
+ {
62
+ "attribute": "response",
63
+ "accessor": lambda arguments: arguments["result"].message.content if hasattr(arguments["result"], "message") else str(arguments["result"])
64
+ }
65
+ ]
66
+ },
67
+ {
68
+ "name": "metadata",
69
+ "attributes": [
70
+ {
71
+ "_comment": "execution metadata",
72
+ "accessor": lambda arguments: {
73
+ "latency_ms": arguments.get("latency_ms"),
74
+ "feedback_enabled": arguments["instance"]._enable_feedback_loop if hasattr(arguments["instance"], "_enable_feedback_loop") else False
75
+ }
76
+ }
77
+ ]
78
+ }
79
+ ]
80
+ }
@@ -0,0 +1,70 @@
1
+ from monocle_apptrace.instrumentation.metamodel.teamsai import (
2
+ _helper,
3
+ )
4
+ TEAMAI_OUTPUT_PROCESSOR = {
5
+ "type": "inference",
6
+ "attributes": [
7
+ [
8
+ {
9
+ "_comment": "provider type, name, deployment",
10
+ "attribute": "type",
11
+ "accessor": lambda arguments: "teams.openai"
12
+ },
13
+ {
14
+ "attribute": "provider_name",
15
+ "accessor": lambda arguments: "Microsoft Teams AI"
16
+ },
17
+ {
18
+ "attribute": "deployment",
19
+ "accessor": lambda arguments: arguments["instance"]._options.default_model if hasattr(arguments["instance"], "_options") else "unknown"
20
+ }
21
+ ],
22
+ [
23
+ {
24
+ "_comment": "LLM Model",
25
+ "attribute": "name",
26
+ "accessor": lambda arguments: arguments["instance"]._options.default_model if hasattr(arguments["instance"], "_options") else "unknown"
27
+ },
28
+ {
29
+ "attribute": "is_streaming",
30
+ "accessor": lambda arguments: arguments["instance"]._options.stream if hasattr(arguments["instance"], "_options") else False
31
+ }
32
+ ]
33
+ ],
34
+ "events": [
35
+ {
36
+ "name": "data.input",
37
+ "_comment": "input to Teams AI",
38
+ "attributes": [
39
+ {
40
+ "attribute": "input",
41
+ "accessor": _helper.capture_input
42
+ }
43
+ ]
44
+ },
45
+ {
46
+ "name": "data.output",
47
+ "_comment": "output from Teams AI",
48
+ "attributes": [
49
+ {
50
+ "attribute": "response",
51
+ "accessor": lambda arguments: arguments["result"].message.content if hasattr(arguments["result"], "message") else str(arguments["result"])
52
+ }
53
+ ]
54
+ },
55
+ {
56
+ "name": "metadata",
57
+ "attributes": [
58
+ {
59
+ "_comment": "metadata from Teams AI response",
60
+ "accessor": lambda arguments: {
61
+ "prompt_tokens": arguments["result"].get("usage", {}).get("prompt_tokens", 0),
62
+ "completion_tokens": arguments["result"].get("usage", {}).get("completion_tokens", 0),
63
+ "total_tokens": arguments["result"].get("usage", {}).get("total_tokens", 0),
64
+ "latency_ms": arguments.get("latency_ms")
65
+ }
66
+ }
67
+ ]
68
+ }
69
+ ]
70
+ }
@@ -0,0 +1,26 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.teamsai.entities.inference.teamsai_output_processor import (
3
+ TEAMAI_OUTPUT_PROCESSOR,
4
+ )
5
+ from monocle_apptrace.instrumentation.metamodel.teamsai.entities.inference.actionplanner_output_processor import (
6
+ ACTIONPLANNER_OUTPUT_PROCESSOR,
7
+ )
8
+
9
+ TEAMAI_METHODS =[
10
+ {
11
+ "package": "teams.ai.models.openai_model",
12
+ "object": "OpenAIModel",
13
+ "method": "complete_prompt",
14
+ "span_name": "teamsai.workflow",
15
+ "wrapper_method": atask_wrapper,
16
+ "output_processor": TEAMAI_OUTPUT_PROCESSOR
17
+ },
18
+ {
19
+ "package": "teams.ai.planners.action_planner",
20
+ "object": "ActionPlanner",
21
+ "method": "complete_prompt",
22
+ "span_name": "teamsai.workflow",
23
+ "wrapper_method": atask_wrapper,
24
+ "output_processor": ACTIONPLANNER_OUTPUT_PROCESSOR
25
+ }
26
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: monocle_apptrace
3
- Version: 0.3.0b6
3
+ Version: 0.3.1
4
4
  Summary: package with monocle genAI tracing
5
5
  Project-URL: Homepage, https://github.com/monocle2ai/monocle
6
6
  Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
@@ -20,6 +20,7 @@ Requires-Dist: boto3==1.35.19; extra == 'aws'
20
20
  Provides-Extra: azure
21
21
  Requires-Dist: azure-storage-blob==12.22.0; extra == 'azure'
22
22
  Provides-Extra: dev
23
+ Requires-Dist: anthropic==0.49.0; extra == 'dev'
23
24
  Requires-Dist: azure-storage-blob==12.22.0; extra == 'dev'
24
25
  Requires-Dist: boto3==1.34.131; extra == 'dev'
25
26
  Requires-Dist: chromadb==0.4.22; extra == 'dev'