monocle-apptrace 0.3.1b1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (46) hide show
  1. monocle_apptrace/exporters/aws/s3_exporter.py +3 -1
  2. monocle_apptrace/exporters/azure/blob_exporter.py +2 -2
  3. monocle_apptrace/exporters/base_exporter.py +10 -4
  4. monocle_apptrace/exporters/file_exporter.py +19 -4
  5. monocle_apptrace/exporters/monocle_exporters.py +8 -5
  6. monocle_apptrace/exporters/okahu/okahu_exporter.py +5 -2
  7. monocle_apptrace/instrumentation/common/__init__.py +1 -1
  8. monocle_apptrace/instrumentation/common/constants.py +12 -5
  9. monocle_apptrace/instrumentation/common/instrumentor.py +44 -22
  10. monocle_apptrace/instrumentation/common/span_handler.py +102 -50
  11. monocle_apptrace/instrumentation/common/tracing.md +68 -0
  12. monocle_apptrace/instrumentation/common/utils.py +114 -63
  13. monocle_apptrace/instrumentation/common/wrapper.py +202 -47
  14. monocle_apptrace/instrumentation/common/wrapper_method.py +15 -7
  15. monocle_apptrace/instrumentation/metamodel/aiohttp/__init__.py +0 -0
  16. monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +66 -0
  17. monocle_apptrace/instrumentation/metamodel/aiohttp/entities/http.py +51 -0
  18. monocle_apptrace/instrumentation/metamodel/aiohttp/methods.py +13 -0
  19. monocle_apptrace/instrumentation/metamodel/anthropic/methods.py +4 -2
  20. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +50 -3
  21. monocle_apptrace/instrumentation/metamodel/flask/entities/http.py +48 -0
  22. monocle_apptrace/instrumentation/metamodel/flask/methods.py +10 -1
  23. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +17 -4
  24. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +5 -2
  25. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +8 -4
  26. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +12 -4
  27. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +1 -1
  28. monocle_apptrace/instrumentation/metamodel/langchain/methods.py +6 -14
  29. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +13 -9
  30. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +1 -1
  31. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +16 -15
  32. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +26 -5
  33. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +184 -26
  34. monocle_apptrace/instrumentation/metamodel/openai/methods.py +6 -8
  35. monocle_apptrace/instrumentation/metamodel/requests/_helper.py +31 -0
  36. monocle_apptrace/instrumentation/metamodel/requests/entities/http.py +51 -0
  37. monocle_apptrace/instrumentation/metamodel/requests/methods.py +2 -1
  38. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +55 -5
  39. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +13 -33
  40. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +24 -20
  41. monocle_apptrace/instrumentation/metamodel/teamsai/methods.py +54 -8
  42. {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0.dist-info}/METADATA +22 -18
  43. {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0.dist-info}/RECORD +46 -39
  44. {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0.dist-info}/WHEEL +0 -0
  45. {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0.dist-info}/licenses/LICENSE +0 -0
  46. {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0.dist-info}/licenses/NOTICE +0 -0
@@ -12,7 +12,7 @@ OPENAI_METHODS = [
12
12
  "object": "Completions",
13
13
  "method": "create",
14
14
  "wrapper_method": task_wrapper,
15
- "span_handler": "non_framework_handler",
15
+ "span_handler": "openai_handler",
16
16
  "output_processor": INFERENCE
17
17
  },
18
18
  {
@@ -20,7 +20,7 @@ OPENAI_METHODS = [
20
20
  "object": "AsyncCompletions",
21
21
  "method": "create",
22
22
  "wrapper_method": atask_wrapper,
23
- "span_handler": "non_framework_handler",
23
+ "span_handler": "openai_handler",
24
24
  "output_processor": INFERENCE
25
25
  },
26
26
  {
@@ -28,8 +28,7 @@ OPENAI_METHODS = [
28
28
  "object": "Embeddings",
29
29
  "method": "create",
30
30
  "wrapper_method": task_wrapper,
31
- "span_name": "openai_embeddings",
32
- "span_handler": "non_framework_handler",
31
+ "span_handler": "openai_handler",
33
32
  "output_processor": RETRIEVAL
34
33
  },
35
34
  {
@@ -37,8 +36,7 @@ OPENAI_METHODS = [
37
36
  "object": "AsyncEmbeddings",
38
37
  "method": "create",
39
38
  "wrapper_method": atask_wrapper,
40
- "span_name": "openai_embeddings",
41
- "span_handler": "non_framework_handler",
39
+ "span_handler": "openai_handler",
42
40
  "output_processor": RETRIEVAL
43
41
  },
44
42
  {
@@ -46,7 +44,7 @@ OPENAI_METHODS = [
46
44
  "object": "Responses",
47
45
  "method": "create",
48
46
  "wrapper_method": task_wrapper,
49
- "span_handler": "non_framework_handler",
47
+ "span_handler": "openai_handler",
50
48
  "output_processor": INFERENCE
51
49
  },
52
50
  {
@@ -54,7 +52,7 @@ OPENAI_METHODS = [
54
52
  "object": "AsyncResponses",
55
53
  "method": "create",
56
54
  "wrapper_method": atask_wrapper,
57
- "span_handler": "non_framework_handler",
55
+ "span_handler": "openai_handler",
58
56
  "output_processor": INFERENCE
59
57
  }
60
58
 
@@ -2,6 +2,36 @@ import os
2
2
  from monocle_apptrace.instrumentation.metamodel.requests import allowed_urls
3
3
  from opentelemetry.propagate import inject
4
4
  from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
5
+ from monocle_apptrace.instrumentation.common.utils import add_monocle_trace_state
6
+ from urllib.parse import urlparse, ParseResult
7
+
8
+
9
+ def get_route(kwargs):
10
+ url:str = kwargs['url']
11
+ parsed_url:ParseResult = urlparse(url)
12
+ return f"{parsed_url.netloc}{parsed_url.path}"
13
+
14
+ def get_method(kwargs) -> str:
15
+ return kwargs['method'] if 'method' in kwargs else 'GET'
16
+
17
+ def get_params(kwargs) -> dict:
18
+ url:str = kwargs['url']
19
+ parsed_url:ParseResult = urlparse(url)
20
+ return parsed_url.query
21
+
22
+ def get_headers(kwargs) -> dict:
23
+ return kwargs['headers'] if 'headers' in kwargs else {}
24
+
25
+ def get_body(kwargs) -> dict:
26
+ body = {}
27
+ return body
28
+
29
+ def extract_response(result) -> str:
30
+ return result.text if hasattr(result, 'text') else str(result)
31
+
32
+ def extract_status(result) -> str:
33
+ return f"{result.status_code}"
34
+
5
35
 
6
36
  def request_pre_task_processor(kwargs):
7
37
  # add traceparent to the request headers in kwargs
@@ -9,6 +39,7 @@ def request_pre_task_processor(kwargs):
9
39
  headers = {}
10
40
  else:
11
41
  headers = kwargs['headers'].copy()
42
+ add_monocle_trace_state(headers)
12
43
  inject(headers)
13
44
  kwargs['headers'] = headers
14
45
 
@@ -0,0 +1,51 @@
1
+ from monocle_apptrace.instrumentation.metamodel.requests import _helper
2
+ REQUEST_HTTP_PROCESSOR = {
3
+ "type": "http.send",
4
+ "attributes": [
5
+ [
6
+ {
7
+ "_comment": "request method, request URI",
8
+ "attribute": "method",
9
+ "accessor": lambda arguments: _helper.get_method(arguments['kwargs'])
10
+ },
11
+ {
12
+ "_comment": "request method, request URI",
13
+ "attribute": "URL",
14
+ "accessor": lambda arguments: _helper.get_route(arguments['kwargs'])
15
+ }
16
+
17
+ ]
18
+ ],
19
+ "events": [
20
+ {"name": "data.input",
21
+ "attributes": [
22
+ {
23
+ "_comment": "route params",
24
+ "attribute": "http.params",
25
+ "accessor": lambda arguments: _helper.get_params(arguments['kwargs'])
26
+ },
27
+ {
28
+ "_comment": "route body",
29
+ "attribute": "body",
30
+ "accessor": lambda arguments: _helper.get_body(arguments['kwargs'])
31
+ },
32
+
33
+ ]
34
+ },
35
+ {
36
+ "name": "data.output",
37
+ "attributes": [
38
+ {
39
+ "_comment": "status from HTTP response",
40
+ "attribute": "status",
41
+ "accessor": lambda arguments: _helper.extract_status(arguments['result'])
42
+ },
43
+ {
44
+ "_comment": "this is result from LLM",
45
+ "attribute": "response",
46
+ "accessor": lambda arguments: _helper.extract_response(arguments['result'])
47
+ }
48
+ ]
49
+ }
50
+ ]
51
+ }
@@ -1,12 +1,13 @@
1
1
  from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.requests.entities.http import REQUEST_HTTP_PROCESSOR
2
3
 
3
4
  REQUESTS_METHODS = [
4
5
  {
5
6
  "package": "requests.sessions",
6
7
  "object": "Session",
7
8
  "method": "request",
8
- "span_name": "http_requests",
9
9
  "wrapper_method": task_wrapper,
10
10
  "span_handler":"request_handler",
11
+ "output_processor": REQUEST_HTTP_PROCESSOR
11
12
  }
12
13
  ]
@@ -1,4 +1,12 @@
1
1
  from monocle_apptrace.instrumentation.common.utils import MonocleSpanException
2
+ from monocle_apptrace.instrumentation.common.utils import (
3
+ Option,
4
+ get_keys_as_tuple,
5
+ get_nested_value,
6
+ try_option,
7
+ get_exception_message,
8
+ get_exception_status_code
9
+ )
2
10
  def capture_input(arguments):
3
11
  """
4
12
  Captures the input from Teams AI state.
@@ -51,8 +59,50 @@ def capture_prompt_info(arguments):
51
59
  except Exception as e:
52
60
  return f"Error capturing prompt: {str(e)}"
53
61
 
54
- def status_check(arguments):
55
- if hasattr(arguments["result"], "error") and arguments["result"].error is not None:
56
- error_msg:str = arguments["result"].error
57
- error_code:str = arguments["result"].status if hasattr(arguments["result"], "status") else "unknown"
58
- raise MonocleSpanException(f"Error: {error_code} - {error_msg}")
62
+ def get_status_code(arguments):
63
+ if arguments["exception"] is not None:
64
+ return get_exception_status_code(arguments)
65
+ elif hasattr(arguments["result"], "status"):
66
+ return arguments["result"].status
67
+ else:
68
+ return 'success'
69
+
70
+ def get_status(arguments):
71
+ if arguments["exception"] is not None:
72
+ return 'error'
73
+ elif get_status_code(arguments) == 'success':
74
+ return 'success'
75
+ else:
76
+ return 'error'
77
+
78
+ def get_response(arguments) -> str:
79
+ status = get_status_code(arguments)
80
+ response:str = ""
81
+ if status == 'success':
82
+ if hasattr(arguments["result"], "message"):
83
+ response = arguments["result"].message.content
84
+ else:
85
+ response = str(arguments["result"])
86
+ else:
87
+ if arguments["exception"] is not None:
88
+ response = get_exception_message(arguments)
89
+ elif hasattr(arguments["result"], "error"):
90
+ response = arguments["result"].error
91
+ return response
92
+
93
+ def check_status(arguments):
94
+ status = get_status_code(arguments)
95
+ if status != 'success':
96
+ raise MonocleSpanException(f"{status}")
97
+
98
+ def extract_provider_name(instance):
99
+ provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
100
+ return provider_url.unwrap_or(None)
101
+
102
+
103
+ def extract_inference_endpoint(instance):
104
+ inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
105
+ if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
106
+ inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
107
+
108
+ return inference_endpoint.unwrap_or(extract_provider_name(instance))
@@ -2,7 +2,7 @@ from monocle_apptrace.instrumentation.metamodel.teamsai import (
2
2
  _helper,
3
3
  )
4
4
  ACTIONPLANNER_OUTPUT_PROCESSOR = {
5
- "type": "inference",
5
+ "type": "generic",
6
6
  "attributes": [
7
7
  [
8
8
  {
@@ -28,42 +28,22 @@ ACTIONPLANNER_OUTPUT_PROCESSOR = {
28
28
  {
29
29
  "attribute": "tokenizer",
30
30
  "accessor": lambda arguments: arguments["instance"]._options.tokenizer.__class__.__name__ if hasattr(arguments["instance"], "_options") else "GPTTokenizer"
31
+ },
32
+ {
33
+ "attribute": "prompt_name",
34
+ "accessor": _helper.capture_prompt_info
35
+ },
36
+ {
37
+ "attribute": "validator",
38
+ "accessor": lambda arguments: arguments["kwargs"].get("validator").__class__.__name__ if arguments.get("kwargs", {}).get("validator") else "DefaultResponseValidator"
39
+ },
40
+ {
41
+ "attribute": "memory_type",
42
+ "accessor": lambda arguments: arguments["kwargs"].get("memory").__class__.__name__ if arguments.get("kwargs", {}).get("memory") else "unknown"
31
43
  }
32
44
  ]
33
45
  ],
34
46
  "events": [
35
- {
36
- "name": "data.input",
37
- "_comment": "input configuration to ActionPlanner",
38
- "attributes": [
39
- {
40
- "attribute": "prompt_name",
41
- "accessor": _helper.capture_prompt_info
42
- },
43
- {
44
- "attribute": "validator",
45
- "accessor": lambda arguments: arguments["kwargs"].get("validator").__class__.__name__ if arguments.get("kwargs", {}).get("validator") else "DefaultResponseValidator"
46
- },
47
- {
48
- "attribute": "memory_type",
49
- "accessor": lambda arguments: arguments["kwargs"].get("memory").__class__.__name__ if arguments.get("kwargs", {}).get("memory") else "unknown"
50
- }
51
- ]
52
- },
53
- {
54
- "name": "data.output",
55
- "_comment": "output from ActionPlanner",
56
- "attributes": [
57
- {
58
- "attribute": "status",
59
- "accessor": lambda arguments: _helper.status_check(arguments)
60
- },
61
- {
62
- "attribute": "response",
63
- "accessor": lambda arguments: arguments["result"].message.content if hasattr(arguments["result"], "message") else str(arguments["result"])
64
- }
65
- ]
66
- },
67
47
  {
68
48
  "name": "metadata",
69
49
  "attributes": [
@@ -1,22 +1,23 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.teamsai import (
2
2
  _helper,
3
3
  )
4
+ from monocle_apptrace.instrumentation.common.utils import get_llm_type
4
5
  TEAMAI_OUTPUT_PROCESSOR = {
5
- "type": "inference",
6
+ "type": "inference.framework",
6
7
  "attributes": [
7
8
  [
8
9
  {
9
10
  "_comment": "provider type, name, deployment",
10
11
  "attribute": "type",
11
- "accessor": lambda arguments: "teams.openai"
12
+ "accessor": lambda arguments: 'inference.' + (get_llm_type(arguments['instance']._client) or 'generic')
12
13
  },
13
14
  {
14
15
  "attribute": "provider_name",
15
- "accessor": lambda arguments: "Microsoft Teams AI"
16
+ "accessor": lambda arguments: _helper.extract_provider_name(arguments['instance'])
16
17
  },
17
18
  {
18
- "attribute": "deployment",
19
- "accessor": lambda arguments: arguments["instance"]._options.default_model if hasattr(arguments["instance"], "_options") else "unknown"
19
+ "attribute": "inference_endpoint",
20
+ "accessor": lambda arguments: _helper.extract_inference_endpoint(arguments['instance'])
20
21
  }
21
22
  ],
22
23
  [
@@ -25,6 +26,11 @@ TEAMAI_OUTPUT_PROCESSOR = {
25
26
  "attribute": "name",
26
27
  "accessor": lambda arguments: arguments["instance"]._options.default_model if hasattr(arguments["instance"], "_options") else "unknown"
27
28
  },
29
+ {
30
+ "_comment": "LLM Model",
31
+ "attribute": "type",
32
+ "accessor": lambda arguments: 'model.llm.'+ arguments["instance"]._options.default_model if hasattr(arguments["instance"], "_options") else "unknown"
33
+ },
28
34
  {
29
35
  "attribute": "is_streaming",
30
36
  "accessor": lambda arguments: arguments["instance"]._options.stream if hasattr(arguments["instance"], "_options") else False
@@ -46,25 +52,23 @@ TEAMAI_OUTPUT_PROCESSOR = {
46
52
  "name": "data.output",
47
53
  "_comment": "output from Teams AI",
48
54
  "attributes": [
55
+ {
56
+ "attribute": "status",
57
+ "accessor": lambda arguments: _helper.get_status(arguments)
58
+ },
59
+ {
60
+ "attribute": "status_code",
61
+ "accessor": lambda arguments: _helper.get_status_code(arguments)
62
+ },
49
63
  {
50
64
  "attribute": "response",
51
- "accessor": lambda arguments: arguments["result"].message.content if hasattr(arguments["result"], "message") else str(arguments["result"])
52
- }
53
- ]
54
- },
55
- {
56
- "name": "metadata",
57
- "attributes": [
65
+ "accessor": lambda arguments: _helper.get_response(arguments)
66
+ },
58
67
  {
59
- "_comment": "metadata from Teams AI response",
60
- "accessor": lambda arguments: {
61
- "prompt_tokens": arguments["result"].get("usage", {}).get("prompt_tokens", 0),
62
- "completion_tokens": arguments["result"].get("usage", {}).get("completion_tokens", 0),
63
- "total_tokens": arguments["result"].get("usage", {}).get("total_tokens", 0),
64
- "latency_ms": arguments.get("latency_ms")
65
- }
68
+ "attribute": "check_status",
69
+ "accessor": lambda arguments: _helper.check_status(arguments)
66
70
  }
67
71
  ]
68
- }
72
+ },
69
73
  ]
70
74
  }
@@ -1,26 +1,72 @@
1
- from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
1
+ from monocle_apptrace.instrumentation.common.wrapper import (
2
+ ascopes_wrapper,
3
+ atask_wrapper,
4
+ task_wrapper,
5
+ ascope_wrapper,
6
+ )
2
7
  from monocle_apptrace.instrumentation.metamodel.teamsai.entities.inference.teamsai_output_processor import (
3
8
  TEAMAI_OUTPUT_PROCESSOR,
4
9
  )
5
10
  from monocle_apptrace.instrumentation.metamodel.teamsai.entities.inference.actionplanner_output_processor import (
6
11
  ACTIONPLANNER_OUTPUT_PROCESSOR,
7
- )
12
+ )
13
+ def get_id(args, kwargs):
14
+ """
15
+ Extracts the ID from the context.
16
+ """
17
+ scopes: dict[str, dict[str:str]] = {}
18
+ context = kwargs.get("context")
19
+ if context and context.activity and context.activity.channel_id:
20
+ channel_id = context.activity.channel_id or ""
21
+ if channel_id == "msteams":
22
+ scopes[f"msteams.activity.type"] = context.activity.type or ""
23
+
24
+ if hasattr(context.activity,"conversation"):
25
+ scopes[f"msteams.conversation.id"] = context.activity.conversation.id or ""
26
+ scopes[f"msteams.conversation.type"] = context.activity.conversation.conversation_type or ""
27
+ scopes[f"msteams.conversation.name"] = context.activity.conversation.name or ""
28
+
29
+ if hasattr(context.activity,"from_property"):
30
+ scopes[f"msteams.user.from_property.id"] = context.activity.from_property.id or ""
31
+ scopes[f"msteams.user.from_property.name"] = context.activity.from_property.name or ""
32
+ scopes[f"msteams.user.from_property.role"] = context.activity.from_property.role or ""
33
+
34
+ if hasattr(context.activity,"recipient"):
35
+ scopes[f"msteams.recipient.id"] = context.activity.recipient.id or ""
36
+
37
+ if hasattr(context.activity,"channel_data"):
38
+ if "tenant" in context.activity.channel_data:
39
+ scopes[f"msteams.channel_data.tenant.id"] = context.activity.channel_data['tenant']['id'] or ""
40
+ if "team" in context.activity.channel_data:
41
+ scopes[f"msteams.channel_data.team.id"] = context.activity.channel_data['team']['id'] or ""
42
+ if "name" in context.activity.channel_data['team']:
43
+ scopes[f"msteams.channel_data.team.name"] = context.activity.channel_data['team']['name'] or ""
44
+ if "channel" in context.activity.channel_data:
45
+ scopes[f"msteams.channel_data.channel.id"] = context.activity.channel_data['channel']['id'] or ""
46
+ if "name" in context.activity.channel_data['channel']:
47
+ scopes[f"msteams.channel_data.channel.name"] = context.activity.channel_data['channel']['name'] or ""
48
+ return scopes
8
49
 
9
- TEAMAI_METHODS =[
50
+ TEAMAI_METHODS = [
10
51
  {
11
52
  "package": "teams.ai.models.openai_model",
12
53
  "object": "OpenAIModel",
13
54
  "method": "complete_prompt",
14
- "span_name": "teamsai.workflow",
15
55
  "wrapper_method": atask_wrapper,
16
- "output_processor": TEAMAI_OUTPUT_PROCESSOR
56
+ "output_processor": TEAMAI_OUTPUT_PROCESSOR,
17
57
  },
18
58
  {
19
59
  "package": "teams.ai.planners.action_planner",
20
60
  "object": "ActionPlanner",
21
61
  "method": "complete_prompt",
22
- "span_name": "teamsai.workflow",
23
62
  "wrapper_method": atask_wrapper,
24
- "output_processor": ACTIONPLANNER_OUTPUT_PROCESSOR
63
+ "output_processor": ACTIONPLANNER_OUTPUT_PROCESSOR,
64
+ },
65
+ {
66
+ "package": "teams.ai.planners.action_planner",
67
+ "object": "ActionPlanner",
68
+ "method": "complete_prompt",
69
+ "scope_values": get_id,
70
+ "wrapper_method": ascopes_wrapper,
25
71
  }
26
- ]
72
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: monocle_apptrace
3
- Version: 0.3.1b1
3
+ Version: 0.4.0
4
4
  Summary: package with monocle genAI tracing
5
5
  Project-URL: Homepage, https://github.com/monocle2ai/monocle
6
6
  Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
@@ -16,40 +16,44 @@ Requires-Dist: opentelemetry-sdk>=1.21.0
16
16
  Requires-Dist: requests
17
17
  Requires-Dist: wrapt>=1.14.0
18
18
  Provides-Extra: aws
19
- Requires-Dist: boto3==1.35.19; extra == 'aws'
19
+ Requires-Dist: boto3==1.37.24; extra == 'aws'
20
20
  Provides-Extra: azure
21
21
  Requires-Dist: azure-storage-blob==12.22.0; extra == 'azure'
22
22
  Provides-Extra: dev
23
- Requires-Dist: anthropic==0.49.0; extra == 'dev'
23
+ Requires-Dist: anthropic-haystack; extra == 'dev'
24
+ Requires-Dist: anthropic==0.52.0; extra == 'dev'
24
25
  Requires-Dist: azure-storage-blob==12.22.0; extra == 'dev'
25
- Requires-Dist: boto3==1.34.131; extra == 'dev'
26
- Requires-Dist: chromadb==0.4.22; extra == 'dev'
26
+ Requires-Dist: boto3==1.37.24; extra == 'dev'
27
+ Requires-Dist: chromadb==1.0.10; extra == 'dev'
27
28
  Requires-Dist: datasets==2.20.0; extra == 'dev'
28
29
  Requires-Dist: faiss-cpu==1.8.0; extra == 'dev'
29
30
  Requires-Dist: flask; extra == 'dev'
30
31
  Requires-Dist: haystack-ai==2.3.0; extra == 'dev'
31
32
  Requires-Dist: instructorembedding==1.0.1; extra == 'dev'
32
- Requires-Dist: langchain-aws==0.1.10; extra == 'dev'
33
- Requires-Dist: langchain-chroma==0.1.1; extra == 'dev'
34
- Requires-Dist: langchain-community==0.2.5; extra == 'dev'
35
- Requires-Dist: langchain-mistralai==0.1.13; extra == 'dev'
36
- Requires-Dist: langchain-openai==0.1.8; extra == 'dev'
37
- Requires-Dist: langchain==0.2.5; extra == 'dev'
33
+ Requires-Dist: langchain-anthropic==0.3.13; extra == 'dev'
34
+ Requires-Dist: langchain-aws==0.2.23; extra == 'dev'
35
+ Requires-Dist: langchain-chroma==0.2.4; extra == 'dev'
36
+ Requires-Dist: langchain-community==0.3.24; extra == 'dev'
37
+ Requires-Dist: langchain-mistralai==0.2.10; extra == 'dev'
38
+ Requires-Dist: langchain-openai==0.3.18; extra == 'dev'
39
+ Requires-Dist: langchain==0.3.25; extra == 'dev'
38
40
  Requires-Dist: langchainhub==0.1.21; extra == 'dev'
39
41
  Requires-Dist: langgraph==0.2.68; extra == 'dev'
40
- Requires-Dist: llama-index-embeddings-huggingface==0.2.0; extra == 'dev'
41
- Requires-Dist: llama-index-llms-azure-openai==0.1.9; extra == 'dev'
42
- Requires-Dist: llama-index-llms-mistralai==0.1.20; extra == 'dev'
43
- Requires-Dist: llama-index-vector-stores-chroma==0.1.9; extra == 'dev'
44
- Requires-Dist: llama-index-vector-stores-opensearch==0.1.10; extra == 'dev'
45
- Requires-Dist: llama-index==0.10.30; extra == 'dev'
42
+ Requires-Dist: llama-index-embeddings-huggingface==0.5.4; extra == 'dev'
43
+ Requires-Dist: llama-index-llms-anthropic==0.6.19; extra == 'dev'
44
+ Requires-Dist: llama-index-llms-azure-openai==0.3.2; extra == 'dev'
45
+ Requires-Dist: llama-index-llms-mistralai==0.4.0; extra == 'dev'
46
+ Requires-Dist: llama-index-vector-stores-chroma==0.4.1; extra == 'dev'
47
+ Requires-Dist: llama-index-vector-stores-opensearch==0.5.4; extra == 'dev'
48
+ Requires-Dist: llama-index==0.12.37; extra == 'dev'
46
49
  Requires-Dist: mistral-haystack==0.0.2; extra == 'dev'
47
50
  Requires-Dist: numpy==1.26.4; extra == 'dev'
48
51
  Requires-Dist: opendal==0.45.14; extra == 'dev'
49
52
  Requires-Dist: opensearch-haystack==1.2.0; extra == 'dev'
50
53
  Requires-Dist: opentelemetry-instrumentation-flask; extra == 'dev'
51
54
  Requires-Dist: parameterized==0.9.0; extra == 'dev'
52
- Requires-Dist: pytest==8.0.0; extra == 'dev'
55
+ Requires-Dist: pytest-asyncio==0.26.0; extra == 'dev'
56
+ Requires-Dist: pytest==8.3.5; extra == 'dev'
53
57
  Requires-Dist: requests-aws4auth==1.2.3; extra == 'dev'
54
58
  Requires-Dist: sentence-transformers==2.6.1; extra == 'dev'
55
59
  Requires-Dist: types-requests==2.31.0.20240106; extra == 'dev'