genai-otel-instrument 0.1.7.dev0__tar.gz → 0.1.10.dev0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of genai-otel-instrument might be problematic. Click here for more details.

Files changed (207) hide show
  1. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/CHANGELOG.md +121 -0
  2. {genai_otel_instrument-0.1.7.dev0/genai_otel_instrument.egg-info → genai_otel_instrument-0.1.10.dev0}/PKG-INFO +80 -35
  3. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/README.md +79 -34
  4. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/anthropic/example.py +1 -1
  5. genai_otel_instrument-0.1.10.dev0/examples/huggingface/example_automodel.py +89 -0
  6. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/__version__.py +3 -3
  7. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/auto_instrument.py +7 -3
  8. genai_otel_instrument-0.1.10.dev0/genai_otel/cost_enriching_exporter.py +207 -0
  9. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/cost_enrichment_processor.py +2 -3
  10. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/huggingface_instrumentor.py +178 -5
  11. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/llm_pricing.json +305 -7
  12. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0/genai_otel_instrument.egg-info}/PKG-INFO +80 -35
  13. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel_instrument.egg-info/SOURCES.txt +2 -0
  14. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/pyproject.toml +6 -0
  15. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_huggingface_instrumentor.py +26 -0
  16. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.claude/settings.local.json +0 -0
  17. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.github/workflows/README.md +0 -0
  18. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.github/workflows/pre-release-check.yml +0 -0
  19. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.github/workflows/publish.yml +0 -0
  20. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.github/workflows/test.yml +0 -0
  21. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.gitignore +0 -0
  22. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.idea/.gitignore +0 -0
  23. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.idea/genai_otel_instrument.iml +0 -0
  24. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.idea/inspectionProfiles/Project_Default.xml +0 -0
  25. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.idea/inspectionProfiles/profiles_settings.xml +0 -0
  26. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.idea/misc.xml +0 -0
  27. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.idea/modules.xml +0 -0
  28. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.idea/vcs.xml +0 -0
  29. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.pre-commit-config.yaml +0 -0
  30. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/.pylintrc +0 -0
  31. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/Contributing.md +0 -0
  32. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/DEVELOPMENT.md +0 -0
  33. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/LICENSE +0 -0
  34. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/MANIFEST.in +0 -0
  35. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/OTEL_SEMANTIC_COMPATIBILITY.md +0 -0
  36. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/OTEL_SEMANTIC_GAP_ANALYSIS_AND_IMPLEMENTATION_PLAN.md +0 -0
  37. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/PRE_RELEASE_CHECKLIST.md +0 -0
  38. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/TEST_COVERAGE_CHECKLIST.md +0 -0
  39. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/TROUBLESHOOTING.md +0 -0
  40. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/example_usage.py +0 -0
  41. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/README.md +0 -0
  42. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/anthropic/.env.example +0 -0
  43. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/anthropic/README.md +0 -0
  44. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/aws_bedrock/.env.example +0 -0
  45. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/aws_bedrock/README.md +0 -0
  46. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/aws_bedrock/example.py +0 -0
  47. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/azure_openai/.env.example +0 -0
  48. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/azure_openai/README.md +0 -0
  49. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/azure_openai/example.py +0 -0
  50. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/cohere/.env.example +0 -0
  51. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/cohere/README.md +0 -0
  52. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/cohere/example.py +0 -0
  53. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/.env.example +0 -0
  54. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/Dockerfile +0 -0
  55. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/OPENSEARCH_SETUP.md +0 -0
  56. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/README.md +0 -0
  57. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/app.py +0 -0
  58. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/docker-compose.yml +0 -0
  59. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/dashboards/GenAI OTel Demo Metrics-1761310525837.json +0 -0
  60. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/dashboards/GenAI OTel Demo Traces-1761321575526.json +0 -0
  61. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/dashboards/GenAI Traces - OpenSearch-1761319701624.json +0 -0
  62. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/dashboards/genai-metrics-dashboard.json +0 -0
  63. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/dashboards/genai-opensearch-traces-dashboard.json +0 -0
  64. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/dashboards/genai-traces-dashboard.json +0 -0
  65. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/provisioning/dashboards/dashboards.yml +0 -0
  66. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/provisioning/datasources/jaeger.yml +0 -0
  67. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/provisioning/datasources/opensearch.yml +0 -0
  68. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/grafana/provisioning/datasources/prometheus.yml +0 -0
  69. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/opensearch-setup.sh +0 -0
  70. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/otel-collector-config.yml +0 -0
  71. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/prometheus.yml +0 -0
  72. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/demo/requirements.txt +0 -0
  73. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/google_ai/.env.example +0 -0
  74. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/google_ai/README.md +0 -0
  75. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/google_ai/example.py +0 -0
  76. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/groq/.env.example +0 -0
  77. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/groq/README.md +0 -0
  78. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/groq/example.py +0 -0
  79. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/huggingface/.env.example +0 -0
  80. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/huggingface/README.md +0 -0
  81. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/huggingface/example.py +0 -0
  82. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/langchain/.env.example +0 -0
  83. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/langchain/README.md +0 -0
  84. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/langchain/example.py +0 -0
  85. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/litellm/example.py +0 -0
  86. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/llamaindex/.env.example +0 -0
  87. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/llamaindex/README.md +0 -0
  88. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/llamaindex/example.py +0 -0
  89. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/mistralai/.env.example +0 -0
  90. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/mistralai/README.md +0 -0
  91. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/mistralai/example.py +0 -0
  92. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/ollama/.env.example +0 -0
  93. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/ollama/README.md +0 -0
  94. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/ollama/example.py +0 -0
  95. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/openai/.env.example +0 -0
  96. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/openai/README.md +0 -0
  97. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/openai/example.py +0 -0
  98. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/phase4_session_rag_tracking.py +0 -0
  99. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/replicate/.env.example +0 -0
  100. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/replicate/README.md +0 -0
  101. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/replicate/example.py +0 -0
  102. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/smolagents/example.py +0 -0
  103. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/togetherai/.env.example +0 -0
  104. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/togetherai/README.md +0 -0
  105. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/togetherai/example.py +0 -0
  106. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/vertexai/.env.example +0 -0
  107. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/vertexai/README.md +0 -0
  108. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/examples/vertexai/example.py +0 -0
  109. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/__init__.py +0 -0
  110. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/cli.py +0 -0
  111. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/config.py +0 -0
  112. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/cost_calculator.py +0 -0
  113. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/exceptions.py +0 -0
  114. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/gpu_metrics.py +0 -0
  115. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/__init__.py +0 -0
  116. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/anthropic_instrumentor.py +0 -0
  117. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/anyscale_instrumentor.py +0 -0
  118. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/aws_bedrock_instrumentor.py +0 -0
  119. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/azure_openai_instrumentor.py +0 -0
  120. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/base.py +0 -0
  121. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/cohere_instrumentor.py +0 -0
  122. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/google_ai_instrumentor.py +0 -0
  123. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/groq_instrumentor.py +0 -0
  124. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/langchain_instrumentor.py +0 -0
  125. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/llamaindex_instrumentor.py +0 -0
  126. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/mistralai_instrumentor.py +0 -0
  127. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/ollama_instrumentor.py +0 -0
  128. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/openai_instrumentor.py +0 -0
  129. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/replicate_instrumentor.py +0 -0
  130. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/togetherai_instrumentor.py +0 -0
  131. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/instrumentors/vertexai_instrumentor.py +0 -0
  132. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/logging_config.py +0 -0
  133. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/mcp_instrumentors/__init__.py +0 -0
  134. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/mcp_instrumentors/api_instrumentor.py +0 -0
  135. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/mcp_instrumentors/base.py +0 -0
  136. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/mcp_instrumentors/database_instrumentor.py +0 -0
  137. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/mcp_instrumentors/kafka_instrumentor.py +0 -0
  138. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/mcp_instrumentors/manager.py +0 -0
  139. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/mcp_instrumentors/redis_instrumentor.py +0 -0
  140. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/mcp_instrumentors/vector_db_instrumentor.py +0 -0
  141. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/metrics.py +0 -0
  142. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel/py.typed +0 -0
  143. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel_instrument.egg-info/dependency_links.txt +0 -0
  144. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel_instrument.egg-info/entry_points.txt +0 -0
  145. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel_instrument.egg-info/requires.txt +0 -0
  146. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/genai_otel_instrument.egg-info/top_level.txt +0 -0
  147. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/openlit/semcov.py +0 -0
  148. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/requirements-dev.txt +0 -0
  149. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/requirements-testing.txt +0 -0
  150. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/requirements.txt +0 -0
  151. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/sample.env +0 -0
  152. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/add_ollama_pricing.py +0 -0
  153. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/simple_test.py +0 -0
  154. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/test_example_debug.py +0 -0
  155. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/test_exporter_fix.py +0 -0
  156. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/test_final.py +0 -0
  157. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/test_gpu_debug.py +0 -0
  158. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/test_gpu_metrics.py +0 -0
  159. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/test_litellm_instrumentation.py +0 -0
  160. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/test_ollama_cost.py +0 -0
  161. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/debug/test_ollama_span_attributes.py +0 -0
  162. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/fix_all_deps.sh +0 -0
  163. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/fix_instrumentors.py +0 -0
  164. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/test_installation.py +0 -0
  165. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/scripts/test_release.sh +0 -0
  166. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/setup.cfg +0 -0
  167. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/setup.py +0 -0
  168. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/__init__.py +0 -0
  169. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_anthropic_instrumentor.py +0 -0
  170. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_anyscale_instrumentor.py +0 -0
  171. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_aws_bedrock_instrumentor.py +0 -0
  172. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_azure_openai_instrumentor.py +0 -0
  173. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_base.py +0 -0
  174. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_cohere_instrumentor.py +0 -0
  175. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_google_ai_instrumentor.py +0 -0
  176. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_groq_instrumentor.py +0 -0
  177. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_langchain_instrumentor.py +0 -0
  178. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_litellm_instrumentor.py +0 -0
  179. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_llamaindex_instrumentor.py +0 -0
  180. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_mcp_instrumentor.py +0 -0
  181. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_mistralai_instrumentor.py +0 -0
  182. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_ollama_instrumentor.py +0 -0
  183. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_openai_instrumentor.py +0 -0
  184. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_replicate_instrumentor.py +0 -0
  185. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_smolagents_instrumentor.py +0 -0
  186. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_togetherai_instrumentor.py +0 -0
  187. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/instrumentors/test_vertexai_instrumentor.py +0 -0
  188. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/mcp_instrumentors/test_api_instrumentor.py +0 -0
  189. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/mcp_instrumentors/test_database_instrumentor.py +0 -0
  190. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/mcp_instrumentors/test_kafka_instrumentor.py +0 -0
  191. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/mcp_instrumentors/test_manager.py +0 -0
  192. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/mcp_instrumentors/test_mcp_base.py +0 -0
  193. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/mcp_instrumentors/test_redis_instrumentor.py +0 -0
  194. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/mcp_instrumentors/test_vector_db_instrumentor.py +0 -0
  195. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_auto_instrument.py +0 -0
  196. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_cli.py +0 -0
  197. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_config.py +0 -0
  198. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_cost_calculator.py +0 -0
  199. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_cost_enrichment_processor.py +0 -0
  200. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_exceptions.py +0 -0
  201. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_gpu_metrics.py +0 -0
  202. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_init.py +0 -0
  203. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_logging_config.py +0 -0
  204. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_metrics.py +0 -0
  205. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_openai_instrumentor.py +0 -0
  206. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_otel_setup.py +0 -0
  207. {genai_otel_instrument-0.1.7.dev0 → genai_otel_instrument-0.1.10.dev0}/tests/test_phase4_features.py +0 -0
@@ -6,6 +6,127 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
6
6
 
7
7
  ## [Unreleased]
8
8
 
9
+ ## [0.1.9.1] - 2025-01-29
10
+
11
+ ### Added
12
+
13
+ - **Enhanced README Documentation**
14
+ - Added professional project logo centered at the top of README
15
+ - Added landing page hero image showcasing the project overview
16
+ - Added comprehensive Screenshots section with 5 embedded demonstration images:
17
+ - OpenAI instrumentation with token usage, costs, and latency metrics
18
+ - Ollama (local LLM) zero-code instrumentation
19
+ - HuggingFace Transformers with automatic token counting
20
+ - SmolAgents framework with complete agent workflow tracing
21
+ - GPU metrics collection dashboard
22
+ - Added links to additional screenshots (Token Cost Breakdown, OpenSearch Dashboard)
23
+ - Added Demo Video section with placeholder for future video content
24
+ - All images follow OSS documentation standards with professional formatting
25
+
26
+ ### Changed
27
+
28
+ - **Roadmap Section Cleanup**
29
+ - Removed Phase 4 implementation details from roadmap (Session & User Tracking, RAG/Embedding Attributes)
30
+ - Phase 4 features are now fully implemented and documented in the Advanced Features section
31
+ - Roadmap now focuses exclusively on future releases (v0.2.0 onwards)
32
+
33
+ ### Improved
34
+
35
+ - **Comprehensive Model Pricing Database Update**
36
+ - Expanded pricing coverage from 145+ to 240+ models across 15+ providers
37
+ - **OpenAI GPT-5 Series** (4 new models):
38
+ - `gpt-5` - $1.25/$10 per 1M tokens
39
+ - `gpt-5-2025-08-07` - $1.25/$10 per 1M tokens
40
+ - `gpt-5-mini` - $0.25/$2 per 1M tokens
41
+ - `gpt-5-nano` - $0.10/$0.40 per 1M tokens
42
+ - **Anthropic Claude 4/3.5 Variants** (13 new models):
43
+ - Claude 4 Opus series: `claude-4-opus`, `claude-opus-4`, `claude-opus-4-1`, `claude-opus-4.1` - $15/$75 per 1M tokens
44
+ - Claude 3.5 Sonnet: `claude-3-5-sonnet-20240620`, `claude-3-5-sonnet-20241022`, `claude-sonnet-4-5`, `claude-sonnet-4-5-20250929`, `claude-3-7-sonnet` - $3/$15 per 1M tokens
45
+ - Claude 3.5 Haiku: `claude-3-5-haiku-20241022` - $0.80/$4 per 1M tokens
46
+ - Claude Haiku 4.5: `claude-haiku-4-5` - $1/$5 per 1M tokens
47
+ - **XAI Grok Models** (10 new models):
48
+ - Grok 2: `grok-2-1212`, `grok-2-vision-1212` - $2/$10 per 1M tokens
49
+ - Grok 3: `grok-3` - $3/$15 per 1M tokens, `grok-3-mini` - $0.30/$0.50 per 1M tokens
50
+ - Grok 3 Fast: `grok-3-fast` - $5/$25 per 1M tokens, `grok-3-mini-fast` - $0.60/$4 per 1M tokens
51
+ - Grok 4: `grok-4` - $3/$15 per 1M tokens, `grok-4-fast` - $0.20/$0.50 per 1M tokens
52
+ - Image models: `grok-image`, `xai-grok-image` - $0.07 per image
53
+ - **Google Gemini Variants** (2 new models):
54
+ - `gemini-2-5-flash-image` - $0.30/$30 per 1M tokens
55
+ - `nano-banana` - $0.30/$30 per 1M tokens
56
+ - **Qwen Series** (6 new models):
57
+ - `qwen3-next-80b-a3b-instruct` - $0.525/$2.10 per 1M tokens
58
+ - `qwen3-next-80b-a3b-thinking` - $0.525/$6.30 per 1M tokens
59
+ - `qwen3-coder-480b-a35b-instruct` - $1/$5 per 1M tokens
60
+ - `qwen3-max`, `qwen-qwen3-max` - $1.20/$6 per 1M tokens
61
+ - **Meta Llama 4 Scout & Maverick** (6 models with updated pricing):
62
+ - `llama-4-scout`, `llama-4-scout-17bx16e-128k`, `meta-llama/Llama-4-Scout` - $0.15/$0.50 per 1M tokens
63
+ - `llama-4-maverick`, `llama-4-maverick-17bx128e-128k`, `meta-llama/Llama-4-Maverick` - $0.22/$0.85 per 1M tokens
64
+ - **IBM Granite Models** (13 new models):
65
+ - Granite 3 series: `ibm-granite-3-1-8b-instruct`, `ibm-granite-3-8b-instruct`, `granite-3-8b-instruct` - $0.20/$0.20 per 1M tokens
66
+ - Granite 4 series: `granite-4-0-h-small`, `granite-4-0-h-tiny`, `granite-4-0-h-micro`, `granite-4-0-micro` - $0.20/$0.20 per 1M tokens
67
+ - Embeddings: `granite-embedding-107m-multilingual`, `granite-embedding-278m-multilingual` - $0.10/$0.10 per 1M tokens
68
+ - Ollama variants: `granite:3b`, `granite:8b` - $0.20/$0.20 per 1M tokens
69
+ - **Mistral AI Updates** (10 new models):
70
+ - `mistral-large-24-11`, `mistral-large-2411` - $8/$24 per 1M tokens
71
+ - `mistral-small-3-1`, `mistral-small-3.1` - $1/$3 per 1M tokens
72
+ - `mistral-medium-3`, `mistral-medium-2025` - $0.40/$2 per 1M tokens
73
+ - Magistral series: `magistral-small` - $1/$3, `magistral-medium` - $3/$9 per 1M tokens
74
+ - Codestral: `codestral-25-01`, `codestral-2501` - $1/$3 per 1M tokens
75
+ - **Additional Providers**:
76
+ - **Sarvam AI**: `sarvam-m`, `sarvamai/sarvam-m`, `sarvam-chat` - Free (Open source)
77
+ - **Liquid AI**: `lfm-7b`, `liquid/lfm-7b` - $0.30/$0.60 per 1M tokens
78
+ - **Snowflake**: `snowflake-arctic`, `snowflake-arctic-instruct` - $0.80/$2.40 per 1M tokens, `snowflake-arctic-embed-l-v2.0` - $0.05/$0.05 per 1M tokens
79
+ - **NVIDIA Nemotron**: `nvidia-nemotron-4-340b-instruct` - $3/$9 per 1M tokens, `nvidia-nemotron-mini` - $0.20/$0.40 per 1M tokens, `nvidia/llama-3.1-nemotron-70b-instruct` - $0.80/$0.80 per 1M tokens
80
+ - **ServiceNow**: `servicenow-now-assist` - $1/$3 per 1M tokens
81
+ - **Pricing Corrections**:
82
+ - `deepseek-v3.1`: Updated to $0.56/$1.68 per 1M tokens (from $1.20/$1.20)
83
+ - `qwen3:3b`: Renamed to `qwen3:4b` (4B parameter model)
84
+ - All pricing reflects official provider rates as of October 2025
85
+
86
+ ## [0.1.9] - 2025-01-27
87
+
88
+ ### Added
89
+
90
+ - **HuggingFace AutoModelForCausalLM and AutoModelForSeq2SeqLM Instrumentation**
91
+ - Added support for direct model usage via `AutoModelForCausalLM.generate()` and `AutoModelForSeq2SeqLM.generate()`
92
+ - Automatic token counting from input and output tensor shapes
93
+ - Cost calculation based on model parameter count (uses CostCalculator's local model pricing tiers)
94
+ - Span attributes: `gen_ai.system`, `gen_ai.request.model`, `gen_ai.operation.name`, token counts, costs
95
+ - Metrics: request counter, token counter, latency histogram, cost counter
96
+ - Supports generation parameters: `max_length`, `max_new_tokens`, `temperature`, `top_p`
97
+ - Implementation in `genai_otel/instrumentors/huggingface_instrumentor.py:184-333`
98
+ - Example usage in `examples/huggingface/example_automodel.py`
99
+ - All 443 tests pass (added 1 new test)
100
+
101
+ ### Fixed
102
+
103
+ - **CRITICAL: Cost Tracking for OpenInference Instrumentors (smolagents, litellm, mcp)**
104
+ - Replaced `CostEnrichmentSpanProcessor` with `CostEnrichingSpanExporter` to properly add cost attributes
105
+ - **Root Cause**: SpanProcessor's `on_end()` receives immutable `ReadableSpan` objects that cannot be modified
106
+ - **Solution**: Custom SpanExporter that enriches span data before export, creating new ReadableSpan instances with cost attributes
107
+ - Cost attributes now correctly appear for smolagents, litellm, and mcp spans:
108
+ - `gen_ai.usage.cost.total`: Total cost in USD
109
+ - `gen_ai.usage.cost.prompt`: Prompt tokens cost
110
+ - `gen_ai.usage.cost.completion`: Completion tokens cost
111
+ - Supports all OpenInference semantic conventions:
112
+ - Model name: `llm.model_name`, `gen_ai.request.model`, `embedding.model_name`
113
+ - Token counts: `llm.token_count.{prompt,completion}`, `gen_ai.usage.{prompt_tokens,completion_tokens}`
114
+ - Span kinds: `openinference.span.kind` (LLM, EMBEDDING, CHAIN, etc.)
115
+ - Implementation in `genai_otel/cost_enriching_exporter.py`
116
+ - Updated `genai_otel/auto_instrument.py` to wrap OTLP and Console exporters
117
+ - Model name normalization handles provider prefixes (e.g., `openai/gpt-3.5-turbo` → `gpt-3.5-turbo`)
118
+ - All 442 existing tests continue to pass
119
+
120
+ - **HuggingFace AutoModelForCausalLM AttributeError Fix**
121
+ - Fixed `AttributeError: type object 'AutoModelForCausalLM' has no attribute 'generate'`
122
+ - Root cause: `AutoModelForCausalLM` is a factory class; `generate()` exists on `GenerationMixin`
123
+ - Solution: Wrap `GenerationMixin.generate()` which all generative models inherit from
124
+ - This covers all model types: `AutoModelForCausalLM`, `AutoModelForSeq2SeqLM`, `GPT2LMHeadModel`, etc.
125
+ - Added fallback import for older transformers versions
126
+ - Implementation in `genai_otel/instrumentors/huggingface_instrumentor.py:184-346`
127
+
128
+ ## [0.1.7] - 2025-01-25
129
+
9
130
  ### Added
10
131
 
11
132
  - **Phase 4: Session and User Tracking (4.1)**
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: genai-otel-instrument
3
- Version: 0.1.7.dev0
3
+ Version: 0.1.10.dev0
4
4
  Summary: Comprehensive OpenTelemetry auto-instrumentation for LLM/GenAI applications
5
5
  Author-email: Kshitij Thakkar <kshitijthakkar@rocketmail.com>
6
6
  License: Apache-2.0
@@ -180,6 +180,12 @@ Dynamic: license-file
180
180
 
181
181
  # GenAI OpenTelemetry Auto-Instrumentation
182
182
 
183
+ <div align="center">
184
+ <img src=".github/images/Logo.jpg" alt="GenAI OpenTelemetry Instrumentation Logo" width="400"/>
185
+ </div>
186
+
187
+ <br/>
188
+
183
189
  [![PyPI version](https://badge.fury.io/py/genai-otel-instrument.svg)](https://badge.fury.io/py/genai-otel-instrument)
184
190
  [![Python Versions](https://img.shields.io/pypi/pyversions/genai-otel-instrument.svg)](https://pypi.org/project/genai-otel-instrument/)
185
191
  [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
@@ -200,6 +206,14 @@ Dynamic: license-file
200
206
  [![Semantic Conventions](https://img.shields.io/badge/OTel%20Semconv-GenAI%20v1.28-orange)](https://opentelemetry.io/docs/specs/semconv/gen-ai/)
201
207
  [![CI/CD](https://img.shields.io/badge/CI%2FCD-GitHub%20Actions-2088FF?logo=github-actions&logoColor=white)](https://github.com/Mandark-droid/genai_otel_instrument/actions)
202
208
 
209
+ ---
210
+
211
+ <div align="center">
212
+ <img src=".github/images/Landing_Page.jpg" alt="GenAI OpenTelemetry Instrumentation Overview" width="800"/>
213
+ </div>
214
+
215
+ ---
216
+
203
217
  Production-ready OpenTelemetry instrumentation for GenAI/LLM applications with zero-code setup.
204
218
 
205
219
  ## Features
@@ -257,7 +271,8 @@ For a more comprehensive demonstration of various LLM providers and MCP tools, r
257
271
 
258
272
  ### LLM Providers (Auto-detected)
259
273
  - **With Full Cost Tracking**: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure OpenAI, Cohere, Mistral AI, Together AI, Groq, Ollama, Vertex AI
260
- - **Hardware/Local Pricing**: Replicate (hardware-based $/second), HuggingFace (local execution, free)
274
+ - **Hardware/Local Pricing**: Replicate (hardware-based $/second), HuggingFace (local execution with estimated costs)
275
+ - **HuggingFace Support**: `pipeline()`, `AutoModelForCausalLM.generate()`, `AutoModelForSeq2SeqLM.generate()`, `InferenceClient` API calls
261
276
  - **Other Providers**: Anyscale
262
277
 
263
278
  ### Frameworks
@@ -288,6 +303,65 @@ The processor supports OpenInference semantic conventions:
288
303
  pip install genai-otel-instrument[openinference]
289
304
  ```
290
305
 
306
+ ## Screenshots
307
+
308
+ See the instrumentation in action across different LLM providers and observability backends.
309
+
310
+ ### OpenAI Instrumentation
311
+ Full trace capture for OpenAI API calls with token usage, costs, and latency metrics.
312
+
313
+ <div align="center">
314
+ <img src=".github/images/Screenshots/Traces_OpenAI.png" alt="OpenAI Traces" width="900"/>
315
+ </div>
316
+
317
+ ### Ollama (Local LLM) Instrumentation
318
+ Zero-code instrumentation for local models running on Ollama with comprehensive observability.
319
+
320
+ <div align="center">
321
+ <img src=".github/images/Screenshots/Traces_Ollama.png" alt="Ollama Traces" width="900"/>
322
+ </div>
323
+
324
+ ### HuggingFace Transformers
325
+ Direct instrumentation of HuggingFace Transformers with automatic token counting and cost estimation.
326
+
327
+ <div align="center">
328
+ <img src=".github/images/Screenshots/Trace_HuggingFace_Transformer_Models.png" alt="HuggingFace Transformer Traces" width="900"/>
329
+ </div>
330
+
331
+ ### SmolAgents Framework
332
+ Complete agent workflow tracing with tool calls, iterations, and cost breakdown.
333
+
334
+ <div align="center">
335
+ <img src=".github/images/Screenshots/Traces_SmolAgent_with_tool_calls.png" alt="SmolAgent Traces with Tool Calls" width="900"/>
336
+ </div>
337
+
338
+ ### GPU Metrics Collection
339
+ Real-time GPU utilization, memory, temperature, and power consumption metrics.
340
+
341
+ <div align="center">
342
+ <img src=".github/images/Screenshots/GPU_Metrics.png" alt="GPU Metrics Dashboard" width="900"/>
343
+ </div>
344
+
345
+ ### Additional Screenshots
346
+
347
+ - **[Token Cost Breakdown](.github/images/Screenshots/Traces_SmolAgent_Token_Cost_breakdown.png)** - Detailed token usage and cost analysis for SmolAgent workflows
348
+ - **[OpenSearch Dashboard](.github/images/Screenshots/GENAI_OpenSearch_output.png)** - GenAI metrics visualization in OpenSearch/Kibana
349
+
350
+ ---
351
+
352
+ ## Demo Video
353
+
354
+ Watch a comprehensive walkthrough of GenAI OpenTelemetry Auto-Instrumentation in action, demonstrating setup, configuration, and real-time observability across multiple LLM providers.
355
+
356
+ <div align="center">
357
+
358
+ **🎥 [Watch Demo Video](https://youtu.be/YOUR_VIDEO_ID_HERE)**
359
+ *(Coming Soon)*
360
+
361
+ </div>
362
+
363
+ ---
364
+
291
365
  ## Cost Tracking Coverage
292
366
 
293
367
  The library includes comprehensive cost tracking with pricing data for **145+ models** across **11 providers**:
@@ -307,7 +381,10 @@ The library includes comprehensive cost tracking with pricing data for **145+ mo
307
381
 
308
382
  ### Special Pricing Models
309
383
  - **Replicate**: Hardware-based pricing ($/second of GPU/CPU time) - not token-based
310
- - **HuggingFace Transformers**: Local execution - no API costs
384
+ - **HuggingFace Transformers**: Local model execution with estimated costs based on parameter count
385
+ - Supports `pipeline()`, `AutoModelForCausalLM.generate()`, `AutoModelForSeq2SeqLM.generate()`
386
+ - Cost estimation uses GPU/compute resource pricing tiers (tiny/small/medium/large)
387
+ - Automatic token counting from tensor shapes
311
388
 
312
389
  ### Pricing Features
313
390
  - **Differential Pricing**: Separate rates for prompt tokens vs. completion tokens
@@ -836,38 +913,6 @@ genai_otel.instrument(
836
913
  - `gen_ai.eval.bias_categories` - Detected bias types (array)
837
914
  - `gen_ai.eval.toxicity_categories` - Toxicity categories (array)
838
915
 
839
- #### 📊 Enhanced OpenTelemetry Compliance
840
-
841
- Completing remaining items from [OTEL_SEMANTIC_GAP_ANALYSIS_AND_IMPLEMENTATION_PLAN.md](OTEL_SEMANTIC_GAP_ANALYSIS_AND_IMPLEMENTATION_PLAN.md):
842
-
843
- **Phase 4: Optional Enhancements (✅ COMPLETED)**
844
-
845
- All Phase 4 features are now available! See the [Advanced Features](#advanced-features) section for detailed documentation.
846
-
847
- - ✅ **Session & User Tracking** - Track sessions and users across requests with custom extractor functions
848
- - Configurable via `session_id_extractor` and `user_id_extractor` in `OTelConfig`
849
- - Automatically adds `session.id` and `user.id` span attributes
850
- - See [Session and User Tracking](#session-and-user-tracking) for usage examples
851
-
852
- - ✅ **RAG/Embedding Attributes** - Enhanced observability for retrieval-augmented generation
853
- - Helper methods: `add_embedding_attributes()` and `add_retrieval_attributes()`
854
- - Embedding attributes: `embedding.model_name`, `embedding.text`, `embedding.vector.dimension`
855
- - Retrieval attributes: `retrieval.query`, `retrieval.document_count`, `retrieval.documents.{i}.document.*`
856
- - See [RAG and Embedding Attributes](#rag-and-embedding-attributes) for usage examples
857
- - Complete example: `examples/phase4_session_rag_tracking.py`
858
-
859
- **Note on Agent Workflow Tracking:**
860
-
861
- Agent workflow observability is already provided by the OpenInference Smolagents instrumentor (included when `smolagents` is in `enabled_instrumentors`). This is not a new Phase 4 feature, but an existing capability:
862
-
863
- - `openinference.span.kind: "AGENT"` - Identifies agent spans
864
- - `agent.name` - Agent identifier (via OpenInference)
865
- - `agent.iteration` - Current iteration number (via OpenInference)
866
- - `agent.action` - Action taken (via OpenInference)
867
- - `agent.observation` - Observation received (via OpenInference)
868
-
869
- Agent tracking requires Python >= 3.10 and the `smolagents` library. See [OpenInference Integration](#openinference-optional---python-310-only) for details.
870
-
871
916
  #### 🔄 Migration Support
872
917
 
873
918
  **Backward Compatibility:**
@@ -1,5 +1,11 @@
1
1
  # GenAI OpenTelemetry Auto-Instrumentation
2
2
 
3
+ <div align="center">
4
+ <img src=".github/images/Logo.jpg" alt="GenAI OpenTelemetry Instrumentation Logo" width="400"/>
5
+ </div>
6
+
7
+ <br/>
8
+
3
9
  [![PyPI version](https://badge.fury.io/py/genai-otel-instrument.svg)](https://badge.fury.io/py/genai-otel-instrument)
4
10
  [![Python Versions](https://img.shields.io/pypi/pyversions/genai-otel-instrument.svg)](https://pypi.org/project/genai-otel-instrument/)
5
11
  [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
@@ -20,6 +26,14 @@
20
26
  [![Semantic Conventions](https://img.shields.io/badge/OTel%20Semconv-GenAI%20v1.28-orange)](https://opentelemetry.io/docs/specs/semconv/gen-ai/)
21
27
  [![CI/CD](https://img.shields.io/badge/CI%2FCD-GitHub%20Actions-2088FF?logo=github-actions&logoColor=white)](https://github.com/Mandark-droid/genai_otel_instrument/actions)
22
28
 
29
+ ---
30
+
31
+ <div align="center">
32
+ <img src=".github/images/Landing_Page.jpg" alt="GenAI OpenTelemetry Instrumentation Overview" width="800"/>
33
+ </div>
34
+
35
+ ---
36
+
23
37
  Production-ready OpenTelemetry instrumentation for GenAI/LLM applications with zero-code setup.
24
38
 
25
39
  ## Features
@@ -77,7 +91,8 @@ For a more comprehensive demonstration of various LLM providers and MCP tools, r
77
91
 
78
92
  ### LLM Providers (Auto-detected)
79
93
  - **With Full Cost Tracking**: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure OpenAI, Cohere, Mistral AI, Together AI, Groq, Ollama, Vertex AI
80
- - **Hardware/Local Pricing**: Replicate (hardware-based $/second), HuggingFace (local execution, free)
94
+ - **Hardware/Local Pricing**: Replicate (hardware-based $/second), HuggingFace (local execution with estimated costs)
95
+ - **HuggingFace Support**: `pipeline()`, `AutoModelForCausalLM.generate()`, `AutoModelForSeq2SeqLM.generate()`, `InferenceClient` API calls
81
96
  - **Other Providers**: Anyscale
82
97
 
83
98
  ### Frameworks
@@ -108,6 +123,65 @@ The processor supports OpenInference semantic conventions:
108
123
  pip install genai-otel-instrument[openinference]
109
124
  ```
110
125
 
126
+ ## Screenshots
127
+
128
+ See the instrumentation in action across different LLM providers and observability backends.
129
+
130
+ ### OpenAI Instrumentation
131
+ Full trace capture for OpenAI API calls with token usage, costs, and latency metrics.
132
+
133
+ <div align="center">
134
+ <img src=".github/images/Screenshots/Traces_OpenAI.png" alt="OpenAI Traces" width="900"/>
135
+ </div>
136
+
137
+ ### Ollama (Local LLM) Instrumentation
138
+ Zero-code instrumentation for local models running on Ollama with comprehensive observability.
139
+
140
+ <div align="center">
141
+ <img src=".github/images/Screenshots/Traces_Ollama.png" alt="Ollama Traces" width="900"/>
142
+ </div>
143
+
144
+ ### HuggingFace Transformers
145
+ Direct instrumentation of HuggingFace Transformers with automatic token counting and cost estimation.
146
+
147
+ <div align="center">
148
+ <img src=".github/images/Screenshots/Trace_HuggingFace_Transformer_Models.png" alt="HuggingFace Transformer Traces" width="900"/>
149
+ </div>
150
+
151
+ ### SmolAgents Framework
152
+ Complete agent workflow tracing with tool calls, iterations, and cost breakdown.
153
+
154
+ <div align="center">
155
+ <img src=".github/images/Screenshots/Traces_SmolAgent_with_tool_calls.png" alt="SmolAgent Traces with Tool Calls" width="900"/>
156
+ </div>
157
+
158
+ ### GPU Metrics Collection
159
+ Real-time GPU utilization, memory, temperature, and power consumption metrics.
160
+
161
+ <div align="center">
162
+ <img src=".github/images/Screenshots/GPU_Metrics.png" alt="GPU Metrics Dashboard" width="900"/>
163
+ </div>
164
+
165
+ ### Additional Screenshots
166
+
167
+ - **[Token Cost Breakdown](.github/images/Screenshots/Traces_SmolAgent_Token_Cost_breakdown.png)** - Detailed token usage and cost analysis for SmolAgent workflows
168
+ - **[OpenSearch Dashboard](.github/images/Screenshots/GENAI_OpenSearch_output.png)** - GenAI metrics visualization in OpenSearch/Kibana
169
+
170
+ ---
171
+
172
+ ## Demo Video
173
+
174
+ Watch a comprehensive walkthrough of GenAI OpenTelemetry Auto-Instrumentation in action, demonstrating setup, configuration, and real-time observability across multiple LLM providers.
175
+
176
+ <div align="center">
177
+
178
+ **🎥 [Watch Demo Video](https://youtu.be/YOUR_VIDEO_ID_HERE)**
179
+ *(Coming Soon)*
180
+
181
+ </div>
182
+
183
+ ---
184
+
111
185
  ## Cost Tracking Coverage
112
186
 
113
187
  The library includes comprehensive cost tracking with pricing data for **145+ models** across **11 providers**:
@@ -127,7 +201,10 @@ The library includes comprehensive cost tracking with pricing data for **145+ mo
127
201
 
128
202
  ### Special Pricing Models
129
203
  - **Replicate**: Hardware-based pricing ($/second of GPU/CPU time) - not token-based
130
- - **HuggingFace Transformers**: Local execution - no API costs
204
+ - **HuggingFace Transformers**: Local model execution with estimated costs based on parameter count
205
+ - Supports `pipeline()`, `AutoModelForCausalLM.generate()`, `AutoModelForSeq2SeqLM.generate()`
206
+ - Cost estimation uses GPU/compute resource pricing tiers (tiny/small/medium/large)
207
+ - Automatic token counting from tensor shapes
131
208
 
132
209
  ### Pricing Features
133
210
  - **Differential Pricing**: Separate rates for prompt tokens vs. completion tokens
@@ -656,38 +733,6 @@ genai_otel.instrument(
656
733
  - `gen_ai.eval.bias_categories` - Detected bias types (array)
657
734
  - `gen_ai.eval.toxicity_categories` - Toxicity categories (array)
658
735
 
659
- #### 📊 Enhanced OpenTelemetry Compliance
660
-
661
- Completing remaining items from [OTEL_SEMANTIC_GAP_ANALYSIS_AND_IMPLEMENTATION_PLAN.md](OTEL_SEMANTIC_GAP_ANALYSIS_AND_IMPLEMENTATION_PLAN.md):
662
-
663
- **Phase 4: Optional Enhancements (✅ COMPLETED)**
664
-
665
- All Phase 4 features are now available! See the [Advanced Features](#advanced-features) section for detailed documentation.
666
-
667
- - ✅ **Session & User Tracking** - Track sessions and users across requests with custom extractor functions
668
- - Configurable via `session_id_extractor` and `user_id_extractor` in `OTelConfig`
669
- - Automatically adds `session.id` and `user.id` span attributes
670
- - See [Session and User Tracking](#session-and-user-tracking) for usage examples
671
-
672
- - ✅ **RAG/Embedding Attributes** - Enhanced observability for retrieval-augmented generation
673
- - Helper methods: `add_embedding_attributes()` and `add_retrieval_attributes()`
674
- - Embedding attributes: `embedding.model_name`, `embedding.text`, `embedding.vector.dimension`
675
- - Retrieval attributes: `retrieval.query`, `retrieval.document_count`, `retrieval.documents.{i}.document.*`
676
- - See [RAG and Embedding Attributes](#rag-and-embedding-attributes) for usage examples
677
- - Complete example: `examples/phase4_session_rag_tracking.py`
678
-
679
- **Note on Agent Workflow Tracking:**
680
-
681
- Agent workflow observability is already provided by the OpenInference Smolagents instrumentor (included when `smolagents` is in `enabled_instrumentors`). This is not a new Phase 4 feature, but an existing capability:
682
-
683
- - `openinference.span.kind: "AGENT"` - Identifies agent spans
684
- - `agent.name` - Agent identifier (via OpenInference)
685
- - `agent.iteration` - Current iteration number (via OpenInference)
686
- - `agent.action` - Action taken (via OpenInference)
687
- - `agent.observation` - Observation received (via OpenInference)
688
-
689
- Agent tracking requires Python >= 3.10 and the `smolagents` library. See [OpenInference Integration](#openinference-optional---python-310-only) for details.
690
-
691
736
  #### 🔄 Migration Support
692
737
 
693
738
  **Backward Compatibility:**
@@ -15,7 +15,7 @@ client = Anthropic()
15
15
 
16
16
  # Make a simple message request
17
17
  message = client.messages.create(
18
- model="claude-3-5-sonnet-20241022",
18
+ model="claude-haiku-4-5",
19
19
  max_tokens=150,
20
20
  messages=[{"role": "user", "content": "Explain OpenTelemetry in one sentence."}],
21
21
  )
@@ -0,0 +1,89 @@
1
+ """HuggingFace AutoModelForCausalLM Example with Token Counting and Cost Tracking.
2
+
3
+ This example demonstrates:
4
+ 1. Auto-instrumentation of AutoModelForCausalLM.generate()
5
+ 2. Automatic token counting (prompt + completion tokens)
6
+ 3. Cost calculation for local model inference
7
+ 4. Full observability with traces and metrics
8
+
9
+ Requirements:
10
+ pip install transformers torch
11
+ """
12
+
13
+ import genai_otel
14
+
15
+ # Auto-instrument HuggingFace Transformers
16
+ genai_otel.instrument()
17
+
18
+ from transformers import AutoModelForCausalLM, AutoTokenizer
19
+
20
+ print("\n" + "=" * 80)
21
+ print("Loading model and tokenizer...")
22
+ print("=" * 80 + "\n")
23
+
24
+ # Load a small model for testing (117M parameters)
25
+ model_name = "Qwen/Qwen3-0.6B"
26
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
27
+ model = AutoModelForCausalLM.from_pretrained(model_name)
28
+
29
+ print(f"Model loaded: {model_name}")
30
+ print(f"Model config: {model.config._name_or_path}\n")
31
+
32
+ # Prepare input
33
+ prompt = "The future of AI is"
34
+ inputs = tokenizer(prompt, return_tensors="pt")
35
+
36
+ print(f"Prompt: '{prompt}'")
37
+ print(f"Input tokens: {inputs['input_ids'].shape[-1]}\n")
38
+
39
+ print("=" * 80)
40
+ print("Generating text (instrumented)...")
41
+ print("=" * 80 + "\n")
42
+
43
+ # Generate text - This is automatically instrumented!
44
+ # The wrapper will:
45
+ # - Create a span with model info
46
+ # - Count input tokens (from input_ids.shape)
47
+ # - Count output tokens (from generated sequence length)
48
+ # - Calculate cost based on GPT-2's parameter count (117M -> tier pricing)
49
+ # - Record metrics for tokens and cost
50
+ outputs = model.generate(
51
+ inputs["input_ids"],
52
+ max_new_tokens=50,
53
+ temperature=0.7,
54
+ do_sample=True,
55
+ pad_token_id=tokenizer.eos_token_id,
56
+ )
57
+
58
+ # Decode the generated text
59
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
60
+
61
+ print(f"Generated text: {generated_text}\n")
62
+ print(f"Total output tokens: {outputs.shape[-1]}")
63
+ print(f"Input tokens: {inputs['input_ids'].shape[-1]}")
64
+ print(f"Generated (new) tokens: {outputs.shape[-1] - inputs['input_ids'].shape[-1]}\n")
65
+
66
+ print("=" * 80)
67
+ print("Telemetry captured:")
68
+ print("=" * 80)
69
+ print("✓ Span created: huggingface.model.generate")
70
+ print("✓ Attributes set:")
71
+ print(f" - gen_ai.system: huggingface")
72
+ print(f" - gen_ai.request.model: {model_name}")
73
+ print(f" - gen_ai.operation.name: text_generation")
74
+ print(f" - gen_ai.usage.prompt_tokens: {inputs['input_ids'].shape[-1]}")
75
+ print(f" - gen_ai.usage.completion_tokens: {outputs.shape[-1] - inputs['input_ids'].shape[-1]}")
76
+ print(f" - gen_ai.usage.total_tokens: {outputs.shape[-1]}")
77
+ print(" - gen_ai.usage.cost.total: $X.XXXXXX (estimated)")
78
+ print(" - gen_ai.usage.cost.prompt: $X.XXXXXX")
79
+ print(" - gen_ai.usage.cost.completion: $X.XXXXXX")
80
+ print("\n✓ Metrics recorded:")
81
+ print(" - gen_ai.requests counter")
82
+ print(" - gen_ai.client.token.usage (prompt + completion)")
83
+ print(" - gen_ai.client.operation.duration histogram")
84
+ print(" - gen_ai.usage.cost counter")
85
+ print("\n✓ Traces and metrics exported to OTLP endpoint!")
86
+ print("=" * 80)
87
+
88
+ print("\nNote: Cost is estimated based on model size (GPT-2 = 117M params)")
89
+ print("Local models are free to run, but costs reflect GPU/compute resources.")
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.1.7.dev0'
32
- __version_tuple__ = version_tuple = (0, 1, 7, 'dev0')
31
+ __version__ = version = '0.1.10.dev0'
32
+ __version_tuple__ = version_tuple = (0, 1, 10, 'dev0')
33
33
 
34
- __commit_id__ = commit_id = 'g6e54041fe'
34
+ __commit_id__ = commit_id = 'gb29b5b98f'
@@ -19,6 +19,7 @@ from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExport
19
19
  from .config import OTelConfig
20
20
  from .cost_calculator import CostCalculator
21
21
  from .cost_enrichment_processor import CostEnrichmentSpanProcessor
22
+ from .cost_enriching_exporter import CostEnrichingSpanExporter
22
23
  from .gpu_metrics import GPUMetricsCollector
23
24
  from .mcp_instrumentors import MCPInstrumentorManager
24
25
  from .metrics import (
@@ -169,14 +170,17 @@ def setup_auto_instrumentation(config: OTelConfig):
169
170
 
170
171
  set_global_textmap(TraceContextTextMapPropagator())
171
172
 
172
- # Add cost enrichment processor for OpenInference instrumentors
173
- # This enriches spans from smolagents, litellm, mcp with cost attributes
173
+ # Add cost enrichment processor for custom instrumentors (OpenAI, Ollama, etc.)
174
+ # These instrumentors set cost attributes directly, so processor is mainly for logging
175
+ # Also attempts to enrich OpenInference spans (smolagents, litellm, mcp), though
176
+ # the processor can't modify ReadableSpan - the exporter below handles that
177
+ cost_calculator = None
174
178
  if config.enable_cost_tracking:
175
179
  try:
176
180
  cost_calculator = CostCalculator()
177
181
  cost_processor = CostEnrichmentSpanProcessor(cost_calculator)
178
182
  tracer_provider.add_span_processor(cost_processor)
179
- logger.info("Cost enrichment processor added for OpenInference instrumentors")
183
+ logger.info("Cost enrichment processor added")
180
184
  except Exception as e:
181
185
  logger.warning(f"Failed to add cost enrichment processor: {e}", exc_info=True)
182
186