tech-hub-skills 1.2.0 → 1.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/README.md +291 -0
- package/.claude/bin/cli.js +266 -0
- package/.claude/package.json +46 -0
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/prompt_ab_tester.py +356 -0
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/prompt_template_manager.py +274 -0
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/token_cost_estimator.py +324 -0
- package/.claude/roles/ai-engineer/skills/02-rag-pipeline/document_chunker.py +336 -0
- package/.claude/roles/ai-engineer/skills/02-rag-pipeline/rag_pipeline.sql +213 -0
- package/.claude/roles/data-engineer/skills/01-lakehouse-architecture/bronze_ingestion.py +337 -0
- package/.claude/roles/data-engineer/skills/01-lakehouse-architecture/medallion_queries.sql +300 -0
- package/.claude/roles/data-scientist/skills/01-eda-automation/eda_generator.py +446 -0
- package/.claude/roles/system-design/skills/08-process-automation/ai_prompt_generator.py +744 -0
- package/.claude/roles/system-design/skills/08-process-automation/automation_recommender.py +688 -0
- package/.claude/roles/system-design/skills/08-process-automation/plan_generator.py +679 -0
- package/.claude/roles/system-design/skills/08-process-automation/process_analyzer.py +528 -0
- package/.claude/roles/system-design/skills/08-process-automation/process_parser.py +684 -0
- package/.claude/roles/system-design/skills/08-process-automation/role_matcher.py +615 -0
- package/.claude/skills/README.md +336 -0
- package/.claude/skills/ai-engineer.md +104 -0
- package/.claude/skills/aws.md +143 -0
- package/.claude/skills/azure.md +149 -0
- package/.claude/skills/backend-developer.md +108 -0
- package/.claude/skills/code-review.md +399 -0
- package/.claude/skills/compliance-automation.md +747 -0
- package/.claude/skills/compliance-officer.md +108 -0
- package/.claude/skills/data-engineer.md +113 -0
- package/.claude/skills/data-governance.md +102 -0
- package/.claude/skills/data-scientist.md +123 -0
- package/.claude/skills/database-admin.md +109 -0
- package/.claude/skills/devops.md +160 -0
- package/.claude/skills/docker.md +160 -0
- package/.claude/skills/enterprise-dashboard.md +613 -0
- package/.claude/skills/finops.md +184 -0
- package/.claude/skills/frontend-developer.md +108 -0
- package/.claude/skills/gcp.md +143 -0
- package/.claude/skills/ml-engineer.md +115 -0
- package/.claude/skills/mlops.md +187 -0
- package/.claude/skills/network-engineer.md +109 -0
- package/.claude/skills/optimization-advisor.md +329 -0
- package/.claude/skills/orchestrator.md +623 -0
- package/.claude/skills/platform-engineer.md +102 -0
- package/.claude/skills/process-automation.md +226 -0
- package/.claude/skills/process-changelog.md +184 -0
- package/.claude/skills/process-documentation.md +484 -0
- package/.claude/skills/process-kanban.md +324 -0
- package/.claude/skills/process-versioning.md +214 -0
- package/.claude/skills/product-designer.md +104 -0
- package/.claude/skills/project-starter.md +443 -0
- package/.claude/skills/qa-engineer.md +109 -0
- package/.claude/skills/security-architect.md +135 -0
- package/.claude/skills/sre.md +109 -0
- package/.claude/skills/system-design.md +126 -0
- package/.claude/skills/technical-writer.md +101 -0
- package/.gitattributes +2 -0
- package/GITHUB_COPILOT.md +106 -0
- package/README.md +117 -224
- package/package.json +4 -42
- package/bin/cli.js +0 -241
- /package/{LICENSE → .claude/LICENSE} +0 -0
- /package/{bin → .claude/bin}/copilot.js +0 -0
- /package/{bin → .claude/bin}/postinstall.js +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/README.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/ai-engineer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/aws.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/azure.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/backend-developer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/code-review.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/compliance-automation.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/compliance-officer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/data-engineer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/data-governance.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/data-scientist.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/database-admin.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/devops.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/docker.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/enterprise-dashboard.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/finops.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/frontend-developer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/gcp.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/ml-engineer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/mlops.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/network-engineer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/optimization-advisor.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/orchestrator.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/platform-engineer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/process-automation.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/process-changelog.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/process-documentation.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/process-kanban.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/process-versioning.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/product-designer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/project-starter.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/qa-engineer.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/security-architect.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/sre.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/system-design.md +0 -0
- /package/{tech_hub_skills/skills → .claude/commands}/technical-writer.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/01-prompt-engineering/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/02-rag-pipeline/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/03-agent-orchestration/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/04-llm-guardrails/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/05-vector-embeddings/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/06-llm-evaluation/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/01-infrastructure-fundamentals/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/02-data-factory/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/03-synapse-analytics/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/04-databricks/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/05-functions/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/06-kubernetes-service/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/07-openai-service/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/08-machine-learning/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/09-storage-adls/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/10-networking/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/11-sql-cosmos/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/azure/skills/12-event-hubs/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/code-review/skills/01-automated-code-review/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/code-review/skills/02-pr-review-workflow/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/code-review/skills/03-code-quality-gates/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/code-review/skills/04-reviewer-assignment/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/code-review/skills/05-review-analytics/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-engineer/skills/01-lakehouse-architecture/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-engineer/skills/02-etl-pipeline/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-engineer/skills/03-data-quality/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-engineer/skills/04-streaming-pipelines/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-engineer/skills/05-performance-optimization/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-governance/skills/01-data-catalog/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-governance/skills/02-data-lineage/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-governance/skills/03-data-quality-framework/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-governance/skills/04-access-control/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-governance/skills/05-master-data-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-governance/skills/06-compliance-privacy/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-scientist/skills/01-eda-automation/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-scientist/skills/02-statistical-modeling/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-scientist/skills/03-feature-engineering/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-scientist/skills/04-predictive-modeling/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-scientist/skills/05-customer-analytics/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-scientist/skills/06-campaign-analysis/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-scientist/skills/07-experimentation/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/data-scientist/skills/08-data-visualization/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/01-cicd-pipeline/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/02-container-orchestration/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/03-infrastructure-as-code/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/04-gitops/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/05-environment-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/06-automated-testing/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/07-release-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/08-monitoring-alerting/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/devops/skills/09-devsecops/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/finops/skills/01-cost-visibility/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/finops/skills/02-resource-tagging/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/finops/skills/03-budget-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/finops/skills/04-reserved-instances/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/finops/skills/05-spot-optimization/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/finops/skills/06-storage-tiering/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/finops/skills/07-compute-rightsizing/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/finops/skills/08-chargeback/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/01-mlops-pipeline/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/02-feature-engineering/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/03-model-training/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/04-model-serving/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/05-model-monitoring/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/01-ml-pipeline-orchestration/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/02-experiment-tracking/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/03-model-registry/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/04-feature-store/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/05-model-deployment/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/06-model-observability/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/07-data-versioning/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/08-ab-testing/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/mlops/skills/09-automated-retraining/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/01-internal-developer-platform/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/02-self-service-infrastructure/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/03-slo-sli-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/04-developer-experience/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/05-incident-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/06-capacity-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/product-designer/skills/01-requirements-discovery/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/product-designer/skills/02-user-research/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/product-designer/skills/03-brainstorming-ideation/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/product-designer/skills/04-ux-design/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/product-designer/skills/05-product-market-fit/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/product-designer/skills/06-stakeholder-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/security-architect/skills/01-pii-detection/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/security-architect/skills/02-threat-modeling/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/security-architect/skills/03-infrastructure-security/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/security-architect/skills/04-iam/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/security-architect/skills/05-application-security/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/security-architect/skills/06-secrets-management/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/security-architect/skills/07-security-monitoring/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/01-architecture-patterns/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/02-requirements-engineering/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/03-scalability/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/04-high-availability/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/05-cost-optimization-design/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/06-api-design/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/07-observability-architecture/README.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/08-process-automation/PROCESS_TEMPLATE.md +0 -0
- /package/{tech_hub_skills → .claude}/roles/system-design/skills/08-process-automation/README.md +0 -0
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Token Cost Estimator for Multiple LLM Providers
|
|
3
|
+
Calculate and compare costs across OpenAI, Anthropic, Google, and more.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import tiktoken
|
|
7
|
+
from typing import Dict, Optional, Tuple
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from enum import Enum
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Provider(Enum):
|
|
13
|
+
"""Supported LLM providers."""
|
|
14
|
+
OPENAI = "openai"
|
|
15
|
+
ANTHROPIC = "anthropic"
|
|
16
|
+
GOOGLE = "google"
|
|
17
|
+
META = "meta"
|
|
18
|
+
MISTRAL = "mistral"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class ModelPricing:
|
|
23
|
+
"""Pricing information for a model."""
|
|
24
|
+
input_cost_per_1k: float # Cost per 1K input tokens
|
|
25
|
+
output_cost_per_1k: float # Cost per 1K output tokens
|
|
26
|
+
context_window: int # Maximum context window in tokens
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# Pricing as of December 2025 (prices in USD)
|
|
30
|
+
PRICING_TABLE: Dict[str, ModelPricing] = {
|
|
31
|
+
# OpenAI
|
|
32
|
+
"gpt-4-turbo": ModelPricing(0.01, 0.03, 128000),
|
|
33
|
+
"gpt-4": ModelPricing(0.03, 0.06, 8192),
|
|
34
|
+
"gpt-3.5-turbo": ModelPricing(0.0005, 0.0015, 16385),
|
|
35
|
+
"gpt-4o": ModelPricing(0.005, 0.015, 128000),
|
|
36
|
+
"gpt-4o-mini": ModelPricing(0.00015, 0.0006, 128000),
|
|
37
|
+
|
|
38
|
+
# Anthropic
|
|
39
|
+
"claude-opus-4": ModelPricing(0.015, 0.075, 200000),
|
|
40
|
+
"claude-sonnet-4": ModelPricing(0.003, 0.015, 200000),
|
|
41
|
+
"claude-haiku-4": ModelPricing(0.00025, 0.00125, 200000),
|
|
42
|
+
"claude-3-opus": ModelPricing(0.015, 0.075, 200000),
|
|
43
|
+
"claude-3-sonnet": ModelPricing(0.003, 0.015, 200000),
|
|
44
|
+
"claude-3-haiku": ModelPricing(0.00025, 0.00125, 200000),
|
|
45
|
+
|
|
46
|
+
# Google
|
|
47
|
+
"gemini-pro": ModelPricing(0.000125, 0.000375, 32760),
|
|
48
|
+
"gemini-pro-vision": ModelPricing(0.000125, 0.000375, 16384),
|
|
49
|
+
"gemini-ultra": ModelPricing(0.001, 0.002, 32760),
|
|
50
|
+
"gemini-1.5-pro": ModelPricing(0.00125, 0.005, 1000000),
|
|
51
|
+
"gemini-1.5-flash": ModelPricing(0.000125, 0.0005, 1000000),
|
|
52
|
+
|
|
53
|
+
# Meta (via hosting providers - example pricing)
|
|
54
|
+
"llama-3-70b": ModelPricing(0.0007, 0.0009, 8192),
|
|
55
|
+
"llama-3-8b": ModelPricing(0.0002, 0.0002, 8192),
|
|
56
|
+
|
|
57
|
+
# Mistral
|
|
58
|
+
"mistral-large": ModelPricing(0.004, 0.012, 32000),
|
|
59
|
+
"mistral-medium": ModelPricing(0.0027, 0.0081, 32000),
|
|
60
|
+
"mistral-small": ModelPricing(0.001, 0.003, 32000),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class TokenCounter:
|
|
65
|
+
"""Count tokens for different providers."""
|
|
66
|
+
|
|
67
|
+
def __init__(self):
|
|
68
|
+
self.encoders = {
|
|
69
|
+
Provider.OPENAI: tiktoken.get_encoding("cl100k_base"),
|
|
70
|
+
# For other providers, we approximate with cl100k_base
|
|
71
|
+
# In production, use provider-specific tokenizers
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
def count_tokens(self, text: str, provider: Provider = Provider.OPENAI) -> int:
|
|
75
|
+
"""Count tokens in text."""
|
|
76
|
+
encoder = self.encoders.get(provider, self.encoders[Provider.OPENAI])
|
|
77
|
+
return len(encoder.encode(text))
|
|
78
|
+
|
|
79
|
+
def count_tokens_for_messages(
|
|
80
|
+
self,
|
|
81
|
+
messages: list,
|
|
82
|
+
model: str = "gpt-4"
|
|
83
|
+
) -> int:
|
|
84
|
+
"""Count tokens for chat completion messages."""
|
|
85
|
+
encoding = tiktoken.encoding_for_model(model)
|
|
86
|
+
|
|
87
|
+
# Token counting varies by model
|
|
88
|
+
if "gpt-4" in model or "gpt-3.5" in model:
|
|
89
|
+
tokens_per_message = 3
|
|
90
|
+
tokens_per_name = 1
|
|
91
|
+
else:
|
|
92
|
+
tokens_per_message = 3
|
|
93
|
+
tokens_per_name = 1
|
|
94
|
+
|
|
95
|
+
num_tokens = 0
|
|
96
|
+
for message in messages:
|
|
97
|
+
num_tokens += tokens_per_message
|
|
98
|
+
for key, value in message.items():
|
|
99
|
+
num_tokens += len(encoding.encode(str(value)))
|
|
100
|
+
if key == "name":
|
|
101
|
+
num_tokens += tokens_per_name
|
|
102
|
+
|
|
103
|
+
num_tokens += 3 # every reply is primed with assistant
|
|
104
|
+
return num_tokens
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class CostEstimator:
|
|
108
|
+
"""Estimate costs for LLM API calls."""
|
|
109
|
+
|
|
110
|
+
def __init__(self):
|
|
111
|
+
self.token_counter = TokenCounter()
|
|
112
|
+
|
|
113
|
+
def estimate_cost(
|
|
114
|
+
self,
|
|
115
|
+
input_text: str,
|
|
116
|
+
model: str,
|
|
117
|
+
estimated_output_tokens: int = 0,
|
|
118
|
+
provider: Optional[Provider] = None
|
|
119
|
+
) -> Dict[str, float]:
|
|
120
|
+
"""
|
|
121
|
+
Estimate the cost of an LLM API call.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
input_text: The input prompt
|
|
125
|
+
model: Model name (e.g., 'gpt-4', 'claude-3-opus')
|
|
126
|
+
estimated_output_tokens: Expected output length
|
|
127
|
+
provider: LLM provider (auto-detected if None)
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Dictionary with cost breakdown
|
|
131
|
+
"""
|
|
132
|
+
# Auto-detect provider if not specified
|
|
133
|
+
if provider is None:
|
|
134
|
+
provider = self._detect_provider(model)
|
|
135
|
+
|
|
136
|
+
# Get pricing info
|
|
137
|
+
pricing = PRICING_TABLE.get(model)
|
|
138
|
+
if not pricing:
|
|
139
|
+
raise ValueError(f"Unknown model: {model}")
|
|
140
|
+
|
|
141
|
+
# Count input tokens
|
|
142
|
+
input_tokens = self.token_counter.count_tokens(input_text, provider)
|
|
143
|
+
|
|
144
|
+
# Calculate costs
|
|
145
|
+
input_cost = (input_tokens / 1000) * pricing.input_cost_per_1k
|
|
146
|
+
output_cost = (estimated_output_tokens / 1000) * pricing.output_cost_per_1k
|
|
147
|
+
total_cost = input_cost + output_cost
|
|
148
|
+
|
|
149
|
+
return {
|
|
150
|
+
"input_tokens": input_tokens,
|
|
151
|
+
"output_tokens": estimated_output_tokens,
|
|
152
|
+
"total_tokens": input_tokens + estimated_output_tokens,
|
|
153
|
+
"input_cost": input_cost,
|
|
154
|
+
"output_cost": output_cost,
|
|
155
|
+
"total_cost": total_cost,
|
|
156
|
+
"currency": "USD",
|
|
157
|
+
"model": model,
|
|
158
|
+
"context_window": pricing.context_window,
|
|
159
|
+
"context_utilization": (input_tokens + estimated_output_tokens) / pricing.context_window
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
def compare_models(
|
|
163
|
+
self,
|
|
164
|
+
input_text: str,
|
|
165
|
+
models: list,
|
|
166
|
+
estimated_output_tokens: int = 500
|
|
167
|
+
) -> Dict[str, Dict[str, float]]:
|
|
168
|
+
"""Compare costs across multiple models."""
|
|
169
|
+
comparisons = {}
|
|
170
|
+
|
|
171
|
+
for model in models:
|
|
172
|
+
try:
|
|
173
|
+
cost = self.estimate_cost(input_text, model, estimated_output_tokens)
|
|
174
|
+
comparisons[model] = cost
|
|
175
|
+
except ValueError as e:
|
|
176
|
+
comparisons[model] = {"error": str(e)}
|
|
177
|
+
|
|
178
|
+
return comparisons
|
|
179
|
+
|
|
180
|
+
def _detect_provider(self, model: str) -> Provider:
|
|
181
|
+
"""Auto-detect provider from model name."""
|
|
182
|
+
if "gpt" in model.lower():
|
|
183
|
+
return Provider.OPENAI
|
|
184
|
+
elif "claude" in model.lower():
|
|
185
|
+
return Provider.ANTHROPIC
|
|
186
|
+
elif "gemini" in model.lower():
|
|
187
|
+
return Provider.GOOGLE
|
|
188
|
+
elif "llama" in model.lower():
|
|
189
|
+
return Provider.META
|
|
190
|
+
elif "mistral" in model.lower():
|
|
191
|
+
return Provider.MISTRAL
|
|
192
|
+
else:
|
|
193
|
+
return Provider.OPENAI # Default
|
|
194
|
+
|
|
195
|
+
def estimate_monthly_cost(
|
|
196
|
+
self,
|
|
197
|
+
queries_per_day: int,
|
|
198
|
+
avg_input_tokens: int,
|
|
199
|
+
avg_output_tokens: int,
|
|
200
|
+
model: str
|
|
201
|
+
) -> Dict[str, float]:
|
|
202
|
+
"""Estimate monthly costs based on usage patterns."""
|
|
203
|
+
pricing = PRICING_TABLE.get(model)
|
|
204
|
+
if not pricing:
|
|
205
|
+
raise ValueError(f"Unknown model: {model}")
|
|
206
|
+
|
|
207
|
+
# Daily costs
|
|
208
|
+
daily_input_cost = (queries_per_day * avg_input_tokens / 1000) * pricing.input_cost_per_1k
|
|
209
|
+
daily_output_cost = (queries_per_day * avg_output_tokens / 1000) * pricing.output_cost_per_1k
|
|
210
|
+
daily_cost = daily_input_cost + daily_output_cost
|
|
211
|
+
|
|
212
|
+
# Monthly projection (30 days)
|
|
213
|
+
monthly_cost = daily_cost * 30
|
|
214
|
+
|
|
215
|
+
return {
|
|
216
|
+
"queries_per_day": queries_per_day,
|
|
217
|
+
"daily_cost": daily_cost,
|
|
218
|
+
"monthly_cost": monthly_cost,
|
|
219
|
+
"yearly_cost": monthly_cost * 12,
|
|
220
|
+
"cost_per_query": daily_cost / queries_per_day,
|
|
221
|
+
"model": model
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def format_cost_comparison(comparisons: Dict[str, Dict[str, float]]) -> str:
|
|
226
|
+
"""Format cost comparison for display."""
|
|
227
|
+
lines = ["🔍 Cost Comparison", "=" * 70]
|
|
228
|
+
|
|
229
|
+
# Sort by total cost
|
|
230
|
+
sorted_models = sorted(
|
|
231
|
+
[(k, v) for k, v in comparisons.items() if "error" not in v],
|
|
232
|
+
key=lambda x: x[1]["total_cost"]
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
for model, cost in sorted_models:
|
|
236
|
+
lines.append(f"\n📊 {model}")
|
|
237
|
+
lines.append(f" Input: {cost['input_tokens']:,} tokens → ${cost['input_cost']:.6f}")
|
|
238
|
+
lines.append(f" Output: {cost['output_tokens']:,} tokens → ${cost['output_cost']:.6f}")
|
|
239
|
+
lines.append(f" Total: ${cost['total_cost']:.6f}")
|
|
240
|
+
lines.append(f" Context: {cost['context_utilization']:.1%} utilized")
|
|
241
|
+
|
|
242
|
+
return "\n".join(lines)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
# Example usage
|
|
246
|
+
if __name__ == "__main__":
|
|
247
|
+
estimator = CostEstimator()
|
|
248
|
+
|
|
249
|
+
# Example prompt
|
|
250
|
+
prompt = """Analyze this marketing campaign and provide detailed recommendations:
|
|
251
|
+
|
|
252
|
+
Campaign: Summer Sale 2025
|
|
253
|
+
Budget: $50,000
|
|
254
|
+
Duration: 30 days
|
|
255
|
+
Channels: Email, Social Media, Search Ads
|
|
256
|
+
|
|
257
|
+
Current Metrics (Week 1):
|
|
258
|
+
- Impressions: 1,200,000
|
|
259
|
+
- Clicks: 24,000 (CTR: 2%)
|
|
260
|
+
- Conversions: 480 (Conv Rate: 2%)
|
|
261
|
+
- Revenue: $48,000
|
|
262
|
+
- ROAS: 0.96
|
|
263
|
+
|
|
264
|
+
Please provide:
|
|
265
|
+
1. Performance analysis
|
|
266
|
+
2. Optimization recommendations
|
|
267
|
+
3. Budget reallocation strategy
|
|
268
|
+
4. Projected outcomes
|
|
269
|
+
"""
|
|
270
|
+
|
|
271
|
+
# Single model estimate
|
|
272
|
+
print("=" * 70)
|
|
273
|
+
print("💰 Single Model Estimation")
|
|
274
|
+
print("=" * 70)
|
|
275
|
+
|
|
276
|
+
cost = estimator.estimate_cost(
|
|
277
|
+
input_text=prompt,
|
|
278
|
+
model="gpt-4o",
|
|
279
|
+
estimated_output_tokens=1000
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
print(f"\nModel: {cost['model']}")
|
|
283
|
+
print(f"Input tokens: {cost['input_tokens']:,}")
|
|
284
|
+
print(f"Estimated output tokens: {cost['output_tokens']:,}")
|
|
285
|
+
print(f"Total cost: ${cost['total_cost']:.4f}")
|
|
286
|
+
print(f"Context utilization: {cost['context_utilization']:.1%}")
|
|
287
|
+
|
|
288
|
+
# Compare multiple models
|
|
289
|
+
print("\n" + "=" * 70)
|
|
290
|
+
models_to_compare = [
|
|
291
|
+
"gpt-4o",
|
|
292
|
+
"gpt-4o-mini",
|
|
293
|
+
"claude-sonnet-4",
|
|
294
|
+
"claude-haiku-4",
|
|
295
|
+
"gemini-1.5-pro",
|
|
296
|
+
"gemini-1.5-flash"
|
|
297
|
+
]
|
|
298
|
+
|
|
299
|
+
comparisons = estimator.compare_models(
|
|
300
|
+
input_text=prompt,
|
|
301
|
+
models=models_to_compare,
|
|
302
|
+
estimated_output_tokens=1000
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
print(format_cost_comparison(comparisons))
|
|
306
|
+
|
|
307
|
+
# Monthly cost projection
|
|
308
|
+
print("\n" + "=" * 70)
|
|
309
|
+
print("📅 Monthly Cost Projection")
|
|
310
|
+
print("=" * 70)
|
|
311
|
+
|
|
312
|
+
monthly = estimator.estimate_monthly_cost(
|
|
313
|
+
queries_per_day=1000,
|
|
314
|
+
avg_input_tokens=500,
|
|
315
|
+
avg_output_tokens=800,
|
|
316
|
+
model="gpt-4o"
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
print(f"\nModel: {monthly['model']}")
|
|
320
|
+
print(f"Queries per day: {monthly['queries_per_day']:,}")
|
|
321
|
+
print(f"Daily cost: ${monthly['daily_cost']:.2f}")
|
|
322
|
+
print(f"Monthly cost: ${monthly['monthly_cost']:.2f}")
|
|
323
|
+
print(f"Yearly cost: ${monthly['yearly_cost']:,.2f}")
|
|
324
|
+
print(f"Cost per query: ${monthly['cost_per_query']:.4f}")
|
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Advanced Document Chunking for RAG Systems
|
|
3
|
+
Supports semantic, recursive, and fixed-size chunking strategies.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import List, Dict, Any, Optional
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from enum import Enum
|
|
9
|
+
import re
|
|
10
|
+
from langchain.text_splitter import (
|
|
11
|
+
RecursiveCharacterTextSplitter,
|
|
12
|
+
CharacterTextSplitter,
|
|
13
|
+
TokenTextSplitter
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ChunkStrategy(Enum):
|
|
18
|
+
"""Available chunking strategies."""
|
|
19
|
+
FIXED = "fixed" # Fixed character/token size
|
|
20
|
+
SEMANTIC = "semantic" # Semantic boundaries (paragraphs, sentences)
|
|
21
|
+
RECURSIVE = "recursive" # Recursive splitting with multiple separators
|
|
22
|
+
SLIDING_WINDOW = "sliding_window" # Overlapping windows
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class Chunk:
|
|
27
|
+
"""A document chunk with metadata."""
|
|
28
|
+
content: str
|
|
29
|
+
chunk_id: str
|
|
30
|
+
document_id: str
|
|
31
|
+
chunk_index: int
|
|
32
|
+
metadata: Dict[str, Any]
|
|
33
|
+
char_count: int
|
|
34
|
+
token_count: Optional[int] = None
|
|
35
|
+
|
|
36
|
+
def __post_init__(self):
|
|
37
|
+
if self.char_count == 0:
|
|
38
|
+
self.char_count = len(self.content)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class DocumentChunker:
|
|
42
|
+
"""Advanced document chunker with multiple strategies."""
|
|
43
|
+
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
strategy: ChunkStrategy = ChunkStrategy.RECURSIVE,
|
|
47
|
+
chunk_size: int = 1000,
|
|
48
|
+
chunk_overlap: int = 200,
|
|
49
|
+
separators: Optional[List[str]] = None
|
|
50
|
+
):
|
|
51
|
+
"""
|
|
52
|
+
Initialize document chunker.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
strategy: Chunking strategy to use
|
|
56
|
+
chunk_size: Target chunk size (characters or tokens)
|
|
57
|
+
chunk_overlap: Overlap between chunks
|
|
58
|
+
separators: Custom separators for recursive splitting
|
|
59
|
+
"""
|
|
60
|
+
self.strategy = strategy
|
|
61
|
+
self.chunk_size = chunk_size
|
|
62
|
+
self.chunk_overlap = chunk_overlap
|
|
63
|
+
self.separators = separators or ["\n\n", "\n", ". ", " ", ""]
|
|
64
|
+
|
|
65
|
+
self._init_splitter()
|
|
66
|
+
|
|
67
|
+
def _init_splitter(self):
|
|
68
|
+
"""Initialize the appropriate text splitter."""
|
|
69
|
+
if self.strategy == ChunkStrategy.RECURSIVE:
|
|
70
|
+
self.splitter = RecursiveCharacterTextSplitter(
|
|
71
|
+
chunk_size=self.chunk_size,
|
|
72
|
+
chunk_overlap=self.chunk_overlap,
|
|
73
|
+
separators=self.separators,
|
|
74
|
+
length_function=len
|
|
75
|
+
)
|
|
76
|
+
elif self.strategy == ChunkStrategy.FIXED:
|
|
77
|
+
self.splitter = CharacterTextSplitter(
|
|
78
|
+
chunk_size=self.chunk_size,
|
|
79
|
+
chunk_overlap=self.chunk_overlap,
|
|
80
|
+
separator="\n"
|
|
81
|
+
)
|
|
82
|
+
elif self.strategy == ChunkStrategy.SEMANTIC:
|
|
83
|
+
# For semantic chunking, we'll use custom logic
|
|
84
|
+
self.splitter = None
|
|
85
|
+
else:
|
|
86
|
+
self.splitter = RecursiveCharacterTextSplitter(
|
|
87
|
+
chunk_size=self.chunk_size,
|
|
88
|
+
chunk_overlap=self.chunk_overlap
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def chunk_document(
|
|
92
|
+
self,
|
|
93
|
+
text: str,
|
|
94
|
+
document_id: str,
|
|
95
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
96
|
+
) -> List[Chunk]:
|
|
97
|
+
"""
|
|
98
|
+
Chunk a document into smaller pieces.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
text: Document text
|
|
102
|
+
document_id: Unique document identifier
|
|
103
|
+
metadata: Additional metadata
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
List of Chunk objects
|
|
107
|
+
"""
|
|
108
|
+
metadata = metadata or {}
|
|
109
|
+
|
|
110
|
+
if self.strategy == ChunkStrategy.SEMANTIC:
|
|
111
|
+
text_chunks = self._semantic_chunking(text)
|
|
112
|
+
elif self.strategy == ChunkStrategy.SLIDING_WINDOW:
|
|
113
|
+
text_chunks = self._sliding_window_chunking(text)
|
|
114
|
+
else:
|
|
115
|
+
text_chunks = self.splitter.split_text(text)
|
|
116
|
+
|
|
117
|
+
chunks = []
|
|
118
|
+
for idx, chunk_text in enumerate(text_chunks):
|
|
119
|
+
chunk = Chunk(
|
|
120
|
+
content=chunk_text,
|
|
121
|
+
chunk_id=f"{document_id}_chunk_{idx}",
|
|
122
|
+
document_id=document_id,
|
|
123
|
+
chunk_index=idx,
|
|
124
|
+
metadata={**metadata, "strategy": self.strategy.value},
|
|
125
|
+
char_count=len(chunk_text)
|
|
126
|
+
)
|
|
127
|
+
chunks.append(chunk)
|
|
128
|
+
|
|
129
|
+
return chunks
|
|
130
|
+
|
|
131
|
+
def _semantic_chunking(self, text: str) -> List[str]:
|
|
132
|
+
"""
|
|
133
|
+
Chunk by semantic boundaries (paragraphs with context).
|
|
134
|
+
|
|
135
|
+
This strategy:
|
|
136
|
+
1. Splits on paragraph boundaries
|
|
137
|
+
2. Combines small paragraphs
|
|
138
|
+
3. Ensures chunks don't exceed max size
|
|
139
|
+
"""
|
|
140
|
+
# Split into paragraphs
|
|
141
|
+
paragraphs = re.split(r'\n\s*\n', text)
|
|
142
|
+
|
|
143
|
+
chunks = []
|
|
144
|
+
current_chunk = []
|
|
145
|
+
current_length = 0
|
|
146
|
+
|
|
147
|
+
for para in paragraphs:
|
|
148
|
+
para = para.strip()
|
|
149
|
+
if not para:
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
para_length = len(para)
|
|
153
|
+
|
|
154
|
+
# If paragraph alone exceeds chunk size, split it
|
|
155
|
+
if para_length > self.chunk_size:
|
|
156
|
+
# Save current chunk if exists
|
|
157
|
+
if current_chunk:
|
|
158
|
+
chunks.append("\n\n".join(current_chunk))
|
|
159
|
+
current_chunk = []
|
|
160
|
+
current_length = 0
|
|
161
|
+
|
|
162
|
+
# Split large paragraph
|
|
163
|
+
sentences = re.split(r'(?<=[.!?])\s+', para)
|
|
164
|
+
temp_chunk = []
|
|
165
|
+
temp_length = 0
|
|
166
|
+
|
|
167
|
+
for sentence in sentences:
|
|
168
|
+
sent_length = len(sentence)
|
|
169
|
+
if temp_length + sent_length > self.chunk_size:
|
|
170
|
+
if temp_chunk:
|
|
171
|
+
chunks.append(" ".join(temp_chunk))
|
|
172
|
+
temp_chunk = [sentence]
|
|
173
|
+
temp_length = sent_length
|
|
174
|
+
else:
|
|
175
|
+
temp_chunk.append(sentence)
|
|
176
|
+
temp_length += sent_length + 1
|
|
177
|
+
|
|
178
|
+
if temp_chunk:
|
|
179
|
+
chunks.append(" ".join(temp_chunk))
|
|
180
|
+
|
|
181
|
+
# If adding paragraph exceeds chunk size, save current chunk
|
|
182
|
+
elif current_length + para_length > self.chunk_size:
|
|
183
|
+
if current_chunk:
|
|
184
|
+
chunks.append("\n\n".join(current_chunk))
|
|
185
|
+
current_chunk = [para]
|
|
186
|
+
current_length = para_length
|
|
187
|
+
|
|
188
|
+
# Otherwise, add to current chunk
|
|
189
|
+
else:
|
|
190
|
+
current_chunk.append(para)
|
|
191
|
+
current_length += para_length + 2 # +2 for \n\n
|
|
192
|
+
|
|
193
|
+
# Add remaining chunk
|
|
194
|
+
if current_chunk:
|
|
195
|
+
chunks.append("\n\n".join(current_chunk))
|
|
196
|
+
|
|
197
|
+
return chunks
|
|
198
|
+
|
|
199
|
+
def _sliding_window_chunking(self, text: str) -> List[str]:
|
|
200
|
+
"""
|
|
201
|
+
Create overlapping chunks with sliding window.
|
|
202
|
+
|
|
203
|
+
Useful for ensuring important content at chunk boundaries isn't lost.
|
|
204
|
+
"""
|
|
205
|
+
chunks = []
|
|
206
|
+
start = 0
|
|
207
|
+
|
|
208
|
+
while start < len(text):
|
|
209
|
+
end = start + self.chunk_size
|
|
210
|
+
chunk = text[start:end]
|
|
211
|
+
|
|
212
|
+
# Try to end at sentence boundary
|
|
213
|
+
if end < len(text):
|
|
214
|
+
last_period = chunk.rfind('. ')
|
|
215
|
+
if last_period > self.chunk_size // 2:
|
|
216
|
+
chunk = chunk[:last_period + 1]
|
|
217
|
+
end = start + last_period + 1
|
|
218
|
+
|
|
219
|
+
chunks.append(chunk.strip())
|
|
220
|
+
|
|
221
|
+
# Move start forward (with overlap)
|
|
222
|
+
start = end - self.chunk_overlap
|
|
223
|
+
|
|
224
|
+
return chunks
|
|
225
|
+
|
|
226
|
+
def chunk_multiple_documents(
|
|
227
|
+
self,
|
|
228
|
+
documents: List[Dict[str, Any]]
|
|
229
|
+
) -> List[Chunk]:
|
|
230
|
+
"""
|
|
231
|
+
Chunk multiple documents.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
documents: List of dicts with 'id', 'text', and optional 'metadata'
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
List of all chunks
|
|
238
|
+
"""
|
|
239
|
+
all_chunks = []
|
|
240
|
+
|
|
241
|
+
for doc in documents:
|
|
242
|
+
chunks = self.chunk_document(
|
|
243
|
+
text=doc['text'],
|
|
244
|
+
document_id=doc['id'],
|
|
245
|
+
metadata=doc.get('metadata', {})
|
|
246
|
+
)
|
|
247
|
+
all_chunks.extend(chunks)
|
|
248
|
+
|
|
249
|
+
return all_chunks
|
|
250
|
+
|
|
251
|
+
def get_chunk_statistics(self, chunks: List[Chunk]) -> Dict[str, Any]:
|
|
252
|
+
"""Get statistics about chunks."""
|
|
253
|
+
if not chunks:
|
|
254
|
+
return {}
|
|
255
|
+
|
|
256
|
+
char_counts = [c.char_count for c in chunks]
|
|
257
|
+
|
|
258
|
+
return {
|
|
259
|
+
"total_chunks": len(chunks),
|
|
260
|
+
"total_characters": sum(char_counts),
|
|
261
|
+
"avg_chunk_size": sum(char_counts) / len(chunks),
|
|
262
|
+
"min_chunk_size": min(char_counts),
|
|
263
|
+
"max_chunk_size": max(char_counts),
|
|
264
|
+
"unique_documents": len(set(c.document_id for c in chunks)),
|
|
265
|
+
"strategy": self.strategy.value
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
# Example usage
|
|
270
|
+
if __name__ == "__main__":
|
|
271
|
+
# Sample document
|
|
272
|
+
sample_doc = """
|
|
273
|
+
Marketing Campaign Analysis Best Practices
|
|
274
|
+
|
|
275
|
+
Effective marketing campaign analysis requires a systematic approach to data collection and interpretation.
|
|
276
|
+
|
|
277
|
+
Data Collection
|
|
278
|
+
First, ensure you're tracking the right metrics. Common KPIs include impression count, click-through rates (CTR), conversion rates, and return on ad spend (ROAS). Use tracking pixels and UTM parameters to accurately attribute conversions.
|
|
279
|
+
|
|
280
|
+
Campaign Segmentation
|
|
281
|
+
Break down your analysis by campaign type, channel, audience segment, and time period. This granular view helps identify what's working and what isn't. For example, email campaigns might perform better with certain demographics, while social media ads resonate with others.
|
|
282
|
+
|
|
283
|
+
Performance Benchmarking
|
|
284
|
+
Compare your results against industry benchmarks and historical data. A 2% CTR might seem low in isolation, but could be excellent for your industry. Track performance over time to identify trends and seasonality.
|
|
285
|
+
|
|
286
|
+
Attribution Modeling
|
|
287
|
+
Understand the customer journey. Did they convert after the first touchpoint or after multiple interactions? Multi-touch attribution helps allocate credit appropriately across channels.
|
|
288
|
+
|
|
289
|
+
A/B Testing
|
|
290
|
+
Never stop testing. Test subject lines, ad copy, images, calls-to-action, and landing pages. Use statistical significance testing to ensure your results are valid.
|
|
291
|
+
|
|
292
|
+
Reporting and Insights
|
|
293
|
+
Create actionable reports that tell a story. Don't just show numbers—explain what they mean and what actions should be taken. Use visualizations to make data accessible.
|
|
294
|
+
|
|
295
|
+
Continuous Optimization
|
|
296
|
+
Marketing is iterative. Use insights from each campaign to improve the next one. Build a knowledge base of what works for your audience.
|
|
297
|
+
"""
|
|
298
|
+
|
|
299
|
+
print("=" * 80)
|
|
300
|
+
print("Document Chunking Demonstrations")
|
|
301
|
+
print("=" * 80)
|
|
302
|
+
|
|
303
|
+
# Test different chunking strategies
|
|
304
|
+
strategies = [
|
|
305
|
+
(ChunkStrategy.RECURSIVE, "Recursive (smart boundaries)"),
|
|
306
|
+
(ChunkStrategy.SEMANTIC, "Semantic (paragraph-based)"),
|
|
307
|
+
(ChunkStrategy.SLIDING_WINDOW, "Sliding Window (overlapping)"),
|
|
308
|
+
(ChunkStrategy.FIXED, "Fixed Size")
|
|
309
|
+
]
|
|
310
|
+
|
|
311
|
+
for strategy, description in strategies:
|
|
312
|
+
print(f"\n📄 Strategy: {description}")
|
|
313
|
+
print("-" * 80)
|
|
314
|
+
|
|
315
|
+
chunker = DocumentChunker(
|
|
316
|
+
strategy=strategy,
|
|
317
|
+
chunk_size=300,
|
|
318
|
+
chunk_overlap=50
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
chunks = chunker.chunk_document(
|
|
322
|
+
text=sample_doc,
|
|
323
|
+
document_id="campaign_analysis_guide",
|
|
324
|
+
metadata={"category": "marketing", "author": "Tech Hub"}
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
stats = chunker.get_chunk_statistics(chunks)
|
|
328
|
+
|
|
329
|
+
print(f"Total chunks: {stats['total_chunks']}")
|
|
330
|
+
print(f"Avg chunk size: {stats['avg_chunk_size']:.0f} chars")
|
|
331
|
+
print(f"Size range: {stats['min_chunk_size']}-{stats['max_chunk_size']} chars")
|
|
332
|
+
|
|
333
|
+
print(f"\nFirst chunk preview:")
|
|
334
|
+
print(f"{chunks[0].content[:200]}...")
|
|
335
|
+
|
|
336
|
+
print(f"\nChunk IDs: {[c.chunk_id for c in chunks]}")
|