tech-hub-skills 1.5.1 ā 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/LICENSE +21 -21
- package/.claude/README.md +291 -291
- package/.claude/bin/cli.js +266 -266
- package/.claude/bin/copilot.js +182 -182
- package/.claude/bin/postinstall.js +42 -42
- package/.claude/commands/README.md +336 -336
- package/.claude/commands/ai-engineer.md +104 -104
- package/.claude/commands/aws.md +143 -143
- package/.claude/commands/azure.md +149 -149
- package/.claude/commands/backend-developer.md +108 -108
- package/.claude/commands/code-review.md +399 -399
- package/.claude/commands/compliance-automation.md +747 -747
- package/.claude/commands/compliance-officer.md +108 -108
- package/.claude/commands/data-engineer.md +113 -113
- package/.claude/commands/data-governance.md +102 -102
- package/.claude/commands/data-scientist.md +123 -123
- package/.claude/commands/database-admin.md +109 -109
- package/.claude/commands/devops.md +160 -160
- package/.claude/commands/docker.md +160 -160
- package/.claude/commands/enterprise-dashboard.md +613 -613
- package/.claude/commands/finops.md +184 -184
- package/.claude/commands/frontend-developer.md +108 -108
- package/.claude/commands/gcp.md +143 -143
- package/.claude/commands/ml-engineer.md +115 -115
- package/.claude/commands/mlops.md +187 -187
- package/.claude/commands/network-engineer.md +109 -109
- package/.claude/commands/optimization-advisor.md +329 -329
- package/.claude/commands/orchestrator.md +623 -623
- package/.claude/commands/platform-engineer.md +102 -102
- package/.claude/commands/process-automation.md +226 -226
- package/.claude/commands/process-changelog.md +184 -184
- package/.claude/commands/process-documentation.md +484 -484
- package/.claude/commands/process-kanban.md +324 -324
- package/.claude/commands/process-versioning.md +214 -214
- package/.claude/commands/product-designer.md +104 -104
- package/.claude/commands/project-starter.md +443 -443
- package/.claude/commands/qa-engineer.md +109 -109
- package/.claude/commands/security-architect.md +135 -135
- package/.claude/commands/sre.md +109 -109
- package/.claude/commands/system-design.md +126 -126
- package/.claude/commands/technical-writer.md +101 -101
- package/.claude/package.json +46 -46
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/README.md +252 -252
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/prompt_ab_tester.py +356 -356
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/prompt_template_manager.py +274 -274
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/token_cost_estimator.py +324 -324
- package/.claude/roles/ai-engineer/skills/02-rag-pipeline/README.md +448 -448
- package/.claude/roles/ai-engineer/skills/02-rag-pipeline/document_chunker.py +336 -336
- package/.claude/roles/ai-engineer/skills/02-rag-pipeline/rag_pipeline.sql +213 -213
- package/.claude/roles/ai-engineer/skills/03-agent-orchestration/README.md +599 -599
- package/.claude/roles/ai-engineer/skills/04-llm-guardrails/README.md +735 -735
- package/.claude/roles/ai-engineer/skills/05-vector-embeddings/README.md +711 -711
- package/.claude/roles/ai-engineer/skills/06-llm-evaluation/README.md +777 -777
- package/.claude/roles/azure/skills/01-infrastructure-fundamentals/README.md +264 -264
- package/.claude/roles/azure/skills/02-data-factory/README.md +264 -264
- package/.claude/roles/azure/skills/03-synapse-analytics/README.md +264 -264
- package/.claude/roles/azure/skills/04-databricks/README.md +264 -264
- package/.claude/roles/azure/skills/05-functions/README.md +264 -264
- package/.claude/roles/azure/skills/06-kubernetes-service/README.md +264 -264
- package/.claude/roles/azure/skills/07-openai-service/README.md +264 -264
- package/.claude/roles/azure/skills/08-machine-learning/README.md +264 -264
- package/.claude/roles/azure/skills/09-storage-adls/README.md +264 -264
- package/.claude/roles/azure/skills/10-networking/README.md +264 -264
- package/.claude/roles/azure/skills/11-sql-cosmos/README.md +264 -264
- package/.claude/roles/azure/skills/12-event-hubs/README.md +264 -264
- package/.claude/roles/code-review/skills/01-automated-code-review/README.md +394 -394
- package/.claude/roles/code-review/skills/02-pr-review-workflow/README.md +427 -427
- package/.claude/roles/code-review/skills/03-code-quality-gates/README.md +518 -518
- package/.claude/roles/code-review/skills/04-reviewer-assignment/README.md +504 -504
- package/.claude/roles/code-review/skills/05-review-analytics/README.md +540 -540
- package/.claude/roles/data-engineer/skills/01-lakehouse-architecture/README.md +550 -550
- package/.claude/roles/data-engineer/skills/01-lakehouse-architecture/bronze_ingestion.py +337 -337
- package/.claude/roles/data-engineer/skills/01-lakehouse-architecture/medallion_queries.sql +300 -300
- package/.claude/roles/data-engineer/skills/02-etl-pipeline/README.md +580 -580
- package/.claude/roles/data-engineer/skills/03-data-quality/README.md +579 -579
- package/.claude/roles/data-engineer/skills/04-streaming-pipelines/README.md +608 -608
- package/.claude/roles/data-engineer/skills/05-performance-optimization/README.md +547 -547
- package/.claude/roles/data-governance/skills/01-data-catalog/README.md +112 -112
- package/.claude/roles/data-governance/skills/02-data-lineage/README.md +129 -129
- package/.claude/roles/data-governance/skills/03-data-quality-framework/README.md +182 -182
- package/.claude/roles/data-governance/skills/04-access-control/README.md +39 -39
- package/.claude/roles/data-governance/skills/05-master-data-management/README.md +40 -40
- package/.claude/roles/data-governance/skills/06-compliance-privacy/README.md +46 -46
- package/.claude/roles/data-scientist/skills/01-eda-automation/README.md +230 -230
- package/.claude/roles/data-scientist/skills/01-eda-automation/eda_generator.py +446 -446
- package/.claude/roles/data-scientist/skills/02-statistical-modeling/README.md +264 -264
- package/.claude/roles/data-scientist/skills/03-feature-engineering/README.md +264 -264
- package/.claude/roles/data-scientist/skills/04-predictive-modeling/README.md +264 -264
- package/.claude/roles/data-scientist/skills/05-customer-analytics/README.md +264 -264
- package/.claude/roles/data-scientist/skills/06-campaign-analysis/README.md +264 -264
- package/.claude/roles/data-scientist/skills/07-experimentation/README.md +264 -264
- package/.claude/roles/data-scientist/skills/08-data-visualization/README.md +264 -264
- package/.claude/roles/devops/skills/01-cicd-pipeline/README.md +264 -264
- package/.claude/roles/devops/skills/02-container-orchestration/README.md +264 -264
- package/.claude/roles/devops/skills/03-infrastructure-as-code/README.md +264 -264
- package/.claude/roles/devops/skills/04-gitops/README.md +264 -264
- package/.claude/roles/devops/skills/05-environment-management/README.md +264 -264
- package/.claude/roles/devops/skills/06-automated-testing/README.md +264 -264
- package/.claude/roles/devops/skills/07-release-management/README.md +264 -264
- package/.claude/roles/devops/skills/08-monitoring-alerting/README.md +264 -264
- package/.claude/roles/devops/skills/09-devsecops/README.md +265 -265
- package/.claude/roles/finops/skills/01-cost-visibility/README.md +264 -264
- package/.claude/roles/finops/skills/02-resource-tagging/README.md +264 -264
- package/.claude/roles/finops/skills/03-budget-management/README.md +264 -264
- package/.claude/roles/finops/skills/04-reserved-instances/README.md +264 -264
- package/.claude/roles/finops/skills/05-spot-optimization/README.md +264 -264
- package/.claude/roles/finops/skills/06-storage-tiering/README.md +264 -264
- package/.claude/roles/finops/skills/07-compute-rightsizing/README.md +264 -264
- package/.claude/roles/finops/skills/08-chargeback/README.md +264 -264
- package/.claude/roles/ml-engineer/skills/01-mlops-pipeline/README.md +566 -566
- package/.claude/roles/ml-engineer/skills/02-feature-engineering/README.md +655 -655
- package/.claude/roles/ml-engineer/skills/03-model-training/README.md +704 -704
- package/.claude/roles/ml-engineer/skills/04-model-serving/README.md +845 -845
- package/.claude/roles/ml-engineer/skills/05-model-monitoring/README.md +874 -874
- package/.claude/roles/mlops/skills/01-ml-pipeline-orchestration/README.md +264 -264
- package/.claude/roles/mlops/skills/02-experiment-tracking/README.md +264 -264
- package/.claude/roles/mlops/skills/03-model-registry/README.md +264 -264
- package/.claude/roles/mlops/skills/04-feature-store/README.md +264 -264
- package/.claude/roles/mlops/skills/05-model-deployment/README.md +264 -264
- package/.claude/roles/mlops/skills/06-model-observability/README.md +264 -264
- package/.claude/roles/mlops/skills/07-data-versioning/README.md +264 -264
- package/.claude/roles/mlops/skills/08-ab-testing/README.md +264 -264
- package/.claude/roles/mlops/skills/09-automated-retraining/README.md +264 -264
- package/.claude/roles/platform-engineer/skills/01-internal-developer-platform/README.md +153 -153
- package/.claude/roles/platform-engineer/skills/02-self-service-infrastructure/README.md +57 -57
- package/.claude/roles/platform-engineer/skills/03-slo-sli-management/README.md +59 -59
- package/.claude/roles/platform-engineer/skills/04-developer-experience/README.md +57 -57
- package/.claude/roles/platform-engineer/skills/05-incident-management/README.md +73 -73
- package/.claude/roles/platform-engineer/skills/06-capacity-management/README.md +59 -59
- package/.claude/roles/product-designer/skills/01-requirements-discovery/README.md +407 -407
- package/.claude/roles/product-designer/skills/02-user-research/README.md +382 -382
- package/.claude/roles/product-designer/skills/03-brainstorming-ideation/README.md +437 -437
- package/.claude/roles/product-designer/skills/04-ux-design/README.md +496 -496
- package/.claude/roles/product-designer/skills/05-product-market-fit/README.md +376 -376
- package/.claude/roles/product-designer/skills/06-stakeholder-management/README.md +412 -412
- package/.claude/roles/security-architect/skills/01-pii-detection/README.md +319 -319
- package/.claude/roles/security-architect/skills/02-threat-modeling/README.md +264 -264
- package/.claude/roles/security-architect/skills/03-infrastructure-security/README.md +264 -264
- package/.claude/roles/security-architect/skills/04-iam/README.md +264 -264
- package/.claude/roles/security-architect/skills/05-application-security/README.md +264 -264
- package/.claude/roles/security-architect/skills/06-secrets-management/README.md +264 -264
- package/.claude/roles/security-architect/skills/07-security-monitoring/README.md +264 -264
- package/.claude/roles/system-design/skills/01-architecture-patterns/README.md +337 -337
- package/.claude/roles/system-design/skills/02-requirements-engineering/README.md +264 -264
- package/.claude/roles/system-design/skills/03-scalability/README.md +264 -264
- package/.claude/roles/system-design/skills/04-high-availability/README.md +264 -264
- package/.claude/roles/system-design/skills/05-cost-optimization-design/README.md +264 -264
- package/.claude/roles/system-design/skills/06-api-design/README.md +264 -264
- package/.claude/roles/system-design/skills/07-observability-architecture/README.md +264 -264
- package/.claude/roles/system-design/skills/08-process-automation/PROCESS_TEMPLATE.md +336 -336
- package/.claude/roles/system-design/skills/08-process-automation/README.md +521 -521
- package/.claude/roles/system-design/skills/08-process-automation/ai_prompt_generator.py +744 -744
- package/.claude/roles/system-design/skills/08-process-automation/automation_recommender.py +688 -688
- package/.claude/roles/system-design/skills/08-process-automation/plan_generator.py +679 -679
- package/.claude/roles/system-design/skills/08-process-automation/process_analyzer.py +528 -528
- package/.claude/roles/system-design/skills/08-process-automation/process_parser.py +684 -684
- package/.claude/roles/system-design/skills/08-process-automation/role_matcher.py +615 -615
- package/.claude/skills/README.md +336 -336
- package/.claude/skills/ai-engineer.md +104 -104
- package/.claude/skills/aws.md +143 -143
- package/.claude/skills/azure.md +149 -149
- package/.claude/skills/backend-developer.md +108 -108
- package/.claude/skills/code-review.md +399 -399
- package/.claude/skills/compliance-automation.md +747 -747
- package/.claude/skills/compliance-officer.md +108 -108
- package/.claude/skills/data-engineer.md +113 -113
- package/.claude/skills/data-governance.md +102 -102
- package/.claude/skills/data-scientist.md +123 -123
- package/.claude/skills/database-admin.md +109 -109
- package/.claude/skills/devops.md +160 -160
- package/.claude/skills/docker.md +160 -160
- package/.claude/skills/enterprise-dashboard.md +613 -613
- package/.claude/skills/finops.md +184 -184
- package/.claude/skills/frontend-developer.md +108 -108
- package/.claude/skills/gcp.md +143 -143
- package/.claude/skills/ml-engineer.md +115 -115
- package/.claude/skills/mlops.md +187 -187
- package/.claude/skills/network-engineer.md +109 -109
- package/.claude/skills/optimization-advisor.md +329 -329
- package/.claude/skills/orchestrator.md +623 -623
- package/.claude/skills/platform-engineer.md +102 -102
- package/.claude/skills/process-automation.md +226 -226
- package/.claude/skills/process-changelog.md +184 -184
- package/.claude/skills/process-documentation.md +484 -484
- package/.claude/skills/process-kanban.md +324 -324
- package/.claude/skills/process-versioning.md +214 -214
- package/.claude/skills/product-designer.md +104 -104
- package/.claude/skills/project-starter.md +443 -443
- package/.claude/skills/qa-engineer.md +109 -109
- package/.claude/skills/security-architect.md +135 -135
- package/.claude/skills/sre.md +109 -109
- package/.claude/skills/system-design.md +126 -126
- package/.claude/skills/technical-writer.md +101 -101
- package/.gitattributes +2 -2
- package/GITHUB_COPILOT.md +106 -106
- package/README.md +192 -184
- package/package.json +16 -8
|
@@ -1,324 +1,324 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Token Cost Estimator for Multiple LLM Providers
|
|
3
|
-
Calculate and compare costs across OpenAI, Anthropic, Google, and more.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
import tiktoken
|
|
7
|
-
from typing import Dict, Optional, Tuple
|
|
8
|
-
from dataclasses import dataclass
|
|
9
|
-
from enum import Enum
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class Provider(Enum):
|
|
13
|
-
"""Supported LLM providers."""
|
|
14
|
-
OPENAI = "openai"
|
|
15
|
-
ANTHROPIC = "anthropic"
|
|
16
|
-
GOOGLE = "google"
|
|
17
|
-
META = "meta"
|
|
18
|
-
MISTRAL = "mistral"
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
@dataclass
|
|
22
|
-
class ModelPricing:
|
|
23
|
-
"""Pricing information for a model."""
|
|
24
|
-
input_cost_per_1k: float # Cost per 1K input tokens
|
|
25
|
-
output_cost_per_1k: float # Cost per 1K output tokens
|
|
26
|
-
context_window: int # Maximum context window in tokens
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
# Pricing as of December 2025 (prices in USD)
|
|
30
|
-
PRICING_TABLE: Dict[str, ModelPricing] = {
|
|
31
|
-
# OpenAI
|
|
32
|
-
"gpt-4-turbo": ModelPricing(0.01, 0.03, 128000),
|
|
33
|
-
"gpt-4": ModelPricing(0.03, 0.06, 8192),
|
|
34
|
-
"gpt-3.5-turbo": ModelPricing(0.0005, 0.0015, 16385),
|
|
35
|
-
"gpt-4o": ModelPricing(0.005, 0.015, 128000),
|
|
36
|
-
"gpt-4o-mini": ModelPricing(0.00015, 0.0006, 128000),
|
|
37
|
-
|
|
38
|
-
# Anthropic
|
|
39
|
-
"claude-opus-4": ModelPricing(0.015, 0.075, 200000),
|
|
40
|
-
"claude-sonnet-4": ModelPricing(0.003, 0.015, 200000),
|
|
41
|
-
"claude-haiku-4": ModelPricing(0.00025, 0.00125, 200000),
|
|
42
|
-
"claude-3-opus": ModelPricing(0.015, 0.075, 200000),
|
|
43
|
-
"claude-3-sonnet": ModelPricing(0.003, 0.015, 200000),
|
|
44
|
-
"claude-3-haiku": ModelPricing(0.00025, 0.00125, 200000),
|
|
45
|
-
|
|
46
|
-
# Google
|
|
47
|
-
"gemini-pro": ModelPricing(0.000125, 0.000375, 32760),
|
|
48
|
-
"gemini-pro-vision": ModelPricing(0.000125, 0.000375, 16384),
|
|
49
|
-
"gemini-ultra": ModelPricing(0.001, 0.002, 32760),
|
|
50
|
-
"gemini-1.5-pro": ModelPricing(0.00125, 0.005, 1000000),
|
|
51
|
-
"gemini-1.5-flash": ModelPricing(0.000125, 0.0005, 1000000),
|
|
52
|
-
|
|
53
|
-
# Meta (via hosting providers - example pricing)
|
|
54
|
-
"llama-3-70b": ModelPricing(0.0007, 0.0009, 8192),
|
|
55
|
-
"llama-3-8b": ModelPricing(0.0002, 0.0002, 8192),
|
|
56
|
-
|
|
57
|
-
# Mistral
|
|
58
|
-
"mistral-large": ModelPricing(0.004, 0.012, 32000),
|
|
59
|
-
"mistral-medium": ModelPricing(0.0027, 0.0081, 32000),
|
|
60
|
-
"mistral-small": ModelPricing(0.001, 0.003, 32000),
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
class TokenCounter:
|
|
65
|
-
"""Count tokens for different providers."""
|
|
66
|
-
|
|
67
|
-
def __init__(self):
|
|
68
|
-
self.encoders = {
|
|
69
|
-
Provider.OPENAI: tiktoken.get_encoding("cl100k_base"),
|
|
70
|
-
# For other providers, we approximate with cl100k_base
|
|
71
|
-
# In production, use provider-specific tokenizers
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
def count_tokens(self, text: str, provider: Provider = Provider.OPENAI) -> int:
|
|
75
|
-
"""Count tokens in text."""
|
|
76
|
-
encoder = self.encoders.get(provider, self.encoders[Provider.OPENAI])
|
|
77
|
-
return len(encoder.encode(text))
|
|
78
|
-
|
|
79
|
-
def count_tokens_for_messages(
|
|
80
|
-
self,
|
|
81
|
-
messages: list,
|
|
82
|
-
model: str = "gpt-4"
|
|
83
|
-
) -> int:
|
|
84
|
-
"""Count tokens for chat completion messages."""
|
|
85
|
-
encoding = tiktoken.encoding_for_model(model)
|
|
86
|
-
|
|
87
|
-
# Token counting varies by model
|
|
88
|
-
if "gpt-4" in model or "gpt-3.5" in model:
|
|
89
|
-
tokens_per_message = 3
|
|
90
|
-
tokens_per_name = 1
|
|
91
|
-
else:
|
|
92
|
-
tokens_per_message = 3
|
|
93
|
-
tokens_per_name = 1
|
|
94
|
-
|
|
95
|
-
num_tokens = 0
|
|
96
|
-
for message in messages:
|
|
97
|
-
num_tokens += tokens_per_message
|
|
98
|
-
for key, value in message.items():
|
|
99
|
-
num_tokens += len(encoding.encode(str(value)))
|
|
100
|
-
if key == "name":
|
|
101
|
-
num_tokens += tokens_per_name
|
|
102
|
-
|
|
103
|
-
num_tokens += 3 # every reply is primed with assistant
|
|
104
|
-
return num_tokens
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
class CostEstimator:
|
|
108
|
-
"""Estimate costs for LLM API calls."""
|
|
109
|
-
|
|
110
|
-
def __init__(self):
|
|
111
|
-
self.token_counter = TokenCounter()
|
|
112
|
-
|
|
113
|
-
def estimate_cost(
|
|
114
|
-
self,
|
|
115
|
-
input_text: str,
|
|
116
|
-
model: str,
|
|
117
|
-
estimated_output_tokens: int = 0,
|
|
118
|
-
provider: Optional[Provider] = None
|
|
119
|
-
) -> Dict[str, float]:
|
|
120
|
-
"""
|
|
121
|
-
Estimate the cost of an LLM API call.
|
|
122
|
-
|
|
123
|
-
Args:
|
|
124
|
-
input_text: The input prompt
|
|
125
|
-
model: Model name (e.g., 'gpt-4', 'claude-3-opus')
|
|
126
|
-
estimated_output_tokens: Expected output length
|
|
127
|
-
provider: LLM provider (auto-detected if None)
|
|
128
|
-
|
|
129
|
-
Returns:
|
|
130
|
-
Dictionary with cost breakdown
|
|
131
|
-
"""
|
|
132
|
-
# Auto-detect provider if not specified
|
|
133
|
-
if provider is None:
|
|
134
|
-
provider = self._detect_provider(model)
|
|
135
|
-
|
|
136
|
-
# Get pricing info
|
|
137
|
-
pricing = PRICING_TABLE.get(model)
|
|
138
|
-
if not pricing:
|
|
139
|
-
raise ValueError(f"Unknown model: {model}")
|
|
140
|
-
|
|
141
|
-
# Count input tokens
|
|
142
|
-
input_tokens = self.token_counter.count_tokens(input_text, provider)
|
|
143
|
-
|
|
144
|
-
# Calculate costs
|
|
145
|
-
input_cost = (input_tokens / 1000) * pricing.input_cost_per_1k
|
|
146
|
-
output_cost = (estimated_output_tokens / 1000) * pricing.output_cost_per_1k
|
|
147
|
-
total_cost = input_cost + output_cost
|
|
148
|
-
|
|
149
|
-
return {
|
|
150
|
-
"input_tokens": input_tokens,
|
|
151
|
-
"output_tokens": estimated_output_tokens,
|
|
152
|
-
"total_tokens": input_tokens + estimated_output_tokens,
|
|
153
|
-
"input_cost": input_cost,
|
|
154
|
-
"output_cost": output_cost,
|
|
155
|
-
"total_cost": total_cost,
|
|
156
|
-
"currency": "USD",
|
|
157
|
-
"model": model,
|
|
158
|
-
"context_window": pricing.context_window,
|
|
159
|
-
"context_utilization": (input_tokens + estimated_output_tokens) / pricing.context_window
|
|
160
|
-
}
|
|
161
|
-
|
|
162
|
-
def compare_models(
|
|
163
|
-
self,
|
|
164
|
-
input_text: str,
|
|
165
|
-
models: list,
|
|
166
|
-
estimated_output_tokens: int = 500
|
|
167
|
-
) -> Dict[str, Dict[str, float]]:
|
|
168
|
-
"""Compare costs across multiple models."""
|
|
169
|
-
comparisons = {}
|
|
170
|
-
|
|
171
|
-
for model in models:
|
|
172
|
-
try:
|
|
173
|
-
cost = self.estimate_cost(input_text, model, estimated_output_tokens)
|
|
174
|
-
comparisons[model] = cost
|
|
175
|
-
except ValueError as e:
|
|
176
|
-
comparisons[model] = {"error": str(e)}
|
|
177
|
-
|
|
178
|
-
return comparisons
|
|
179
|
-
|
|
180
|
-
def _detect_provider(self, model: str) -> Provider:
|
|
181
|
-
"""Auto-detect provider from model name."""
|
|
182
|
-
if "gpt" in model.lower():
|
|
183
|
-
return Provider.OPENAI
|
|
184
|
-
elif "claude" in model.lower():
|
|
185
|
-
return Provider.ANTHROPIC
|
|
186
|
-
elif "gemini" in model.lower():
|
|
187
|
-
return Provider.GOOGLE
|
|
188
|
-
elif "llama" in model.lower():
|
|
189
|
-
return Provider.META
|
|
190
|
-
elif "mistral" in model.lower():
|
|
191
|
-
return Provider.MISTRAL
|
|
192
|
-
else:
|
|
193
|
-
return Provider.OPENAI # Default
|
|
194
|
-
|
|
195
|
-
def estimate_monthly_cost(
|
|
196
|
-
self,
|
|
197
|
-
queries_per_day: int,
|
|
198
|
-
avg_input_tokens: int,
|
|
199
|
-
avg_output_tokens: int,
|
|
200
|
-
model: str
|
|
201
|
-
) -> Dict[str, float]:
|
|
202
|
-
"""Estimate monthly costs based on usage patterns."""
|
|
203
|
-
pricing = PRICING_TABLE.get(model)
|
|
204
|
-
if not pricing:
|
|
205
|
-
raise ValueError(f"Unknown model: {model}")
|
|
206
|
-
|
|
207
|
-
# Daily costs
|
|
208
|
-
daily_input_cost = (queries_per_day * avg_input_tokens / 1000) * pricing.input_cost_per_1k
|
|
209
|
-
daily_output_cost = (queries_per_day * avg_output_tokens / 1000) * pricing.output_cost_per_1k
|
|
210
|
-
daily_cost = daily_input_cost + daily_output_cost
|
|
211
|
-
|
|
212
|
-
# Monthly projection (30 days)
|
|
213
|
-
monthly_cost = daily_cost * 30
|
|
214
|
-
|
|
215
|
-
return {
|
|
216
|
-
"queries_per_day": queries_per_day,
|
|
217
|
-
"daily_cost": daily_cost,
|
|
218
|
-
"monthly_cost": monthly_cost,
|
|
219
|
-
"yearly_cost": monthly_cost * 12,
|
|
220
|
-
"cost_per_query": daily_cost / queries_per_day,
|
|
221
|
-
"model": model
|
|
222
|
-
}
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
def format_cost_comparison(comparisons: Dict[str, Dict[str, float]]) -> str:
|
|
226
|
-
"""Format cost comparison for display."""
|
|
227
|
-
lines = ["š Cost Comparison", "=" * 70]
|
|
228
|
-
|
|
229
|
-
# Sort by total cost
|
|
230
|
-
sorted_models = sorted(
|
|
231
|
-
[(k, v) for k, v in comparisons.items() if "error" not in v],
|
|
232
|
-
key=lambda x: x[1]["total_cost"]
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
for model, cost in sorted_models:
|
|
236
|
-
lines.append(f"\nš {model}")
|
|
237
|
-
lines.append(f" Input: {cost['input_tokens']:,} tokens ā ${cost['input_cost']:.6f}")
|
|
238
|
-
lines.append(f" Output: {cost['output_tokens']:,} tokens ā ${cost['output_cost']:.6f}")
|
|
239
|
-
lines.append(f" Total: ${cost['total_cost']:.6f}")
|
|
240
|
-
lines.append(f" Context: {cost['context_utilization']:.1%} utilized")
|
|
241
|
-
|
|
242
|
-
return "\n".join(lines)
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
# Example usage
|
|
246
|
-
if __name__ == "__main__":
|
|
247
|
-
estimator = CostEstimator()
|
|
248
|
-
|
|
249
|
-
# Example prompt
|
|
250
|
-
prompt = """Analyze this marketing campaign and provide detailed recommendations:
|
|
251
|
-
|
|
252
|
-
Campaign: Summer Sale 2025
|
|
253
|
-
Budget: $50,000
|
|
254
|
-
Duration: 30 days
|
|
255
|
-
Channels: Email, Social Media, Search Ads
|
|
256
|
-
|
|
257
|
-
Current Metrics (Week 1):
|
|
258
|
-
- Impressions: 1,200,000
|
|
259
|
-
- Clicks: 24,000 (CTR: 2%)
|
|
260
|
-
- Conversions: 480 (Conv Rate: 2%)
|
|
261
|
-
- Revenue: $48,000
|
|
262
|
-
- ROAS: 0.96
|
|
263
|
-
|
|
264
|
-
Please provide:
|
|
265
|
-
1. Performance analysis
|
|
266
|
-
2. Optimization recommendations
|
|
267
|
-
3. Budget reallocation strategy
|
|
268
|
-
4. Projected outcomes
|
|
269
|
-
"""
|
|
270
|
-
|
|
271
|
-
# Single model estimate
|
|
272
|
-
print("=" * 70)
|
|
273
|
-
print("š° Single Model Estimation")
|
|
274
|
-
print("=" * 70)
|
|
275
|
-
|
|
276
|
-
cost = estimator.estimate_cost(
|
|
277
|
-
input_text=prompt,
|
|
278
|
-
model="gpt-4o",
|
|
279
|
-
estimated_output_tokens=1000
|
|
280
|
-
)
|
|
281
|
-
|
|
282
|
-
print(f"\nModel: {cost['model']}")
|
|
283
|
-
print(f"Input tokens: {cost['input_tokens']:,}")
|
|
284
|
-
print(f"Estimated output tokens: {cost['output_tokens']:,}")
|
|
285
|
-
print(f"Total cost: ${cost['total_cost']:.4f}")
|
|
286
|
-
print(f"Context utilization: {cost['context_utilization']:.1%}")
|
|
287
|
-
|
|
288
|
-
# Compare multiple models
|
|
289
|
-
print("\n" + "=" * 70)
|
|
290
|
-
models_to_compare = [
|
|
291
|
-
"gpt-4o",
|
|
292
|
-
"gpt-4o-mini",
|
|
293
|
-
"claude-sonnet-4",
|
|
294
|
-
"claude-haiku-4",
|
|
295
|
-
"gemini-1.5-pro",
|
|
296
|
-
"gemini-1.5-flash"
|
|
297
|
-
]
|
|
298
|
-
|
|
299
|
-
comparisons = estimator.compare_models(
|
|
300
|
-
input_text=prompt,
|
|
301
|
-
models=models_to_compare,
|
|
302
|
-
estimated_output_tokens=1000
|
|
303
|
-
)
|
|
304
|
-
|
|
305
|
-
print(format_cost_comparison(comparisons))
|
|
306
|
-
|
|
307
|
-
# Monthly cost projection
|
|
308
|
-
print("\n" + "=" * 70)
|
|
309
|
-
print("š
Monthly Cost Projection")
|
|
310
|
-
print("=" * 70)
|
|
311
|
-
|
|
312
|
-
monthly = estimator.estimate_monthly_cost(
|
|
313
|
-
queries_per_day=1000,
|
|
314
|
-
avg_input_tokens=500,
|
|
315
|
-
avg_output_tokens=800,
|
|
316
|
-
model="gpt-4o"
|
|
317
|
-
)
|
|
318
|
-
|
|
319
|
-
print(f"\nModel: {monthly['model']}")
|
|
320
|
-
print(f"Queries per day: {monthly['queries_per_day']:,}")
|
|
321
|
-
print(f"Daily cost: ${monthly['daily_cost']:.2f}")
|
|
322
|
-
print(f"Monthly cost: ${monthly['monthly_cost']:.2f}")
|
|
323
|
-
print(f"Yearly cost: ${monthly['yearly_cost']:,.2f}")
|
|
324
|
-
print(f"Cost per query: ${monthly['cost_per_query']:.4f}")
|
|
1
|
+
"""
|
|
2
|
+
Token Cost Estimator for Multiple LLM Providers
|
|
3
|
+
Calculate and compare costs across OpenAI, Anthropic, Google, and more.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import tiktoken
|
|
7
|
+
from typing import Dict, Optional, Tuple
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from enum import Enum
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Provider(Enum):
|
|
13
|
+
"""Supported LLM providers."""
|
|
14
|
+
OPENAI = "openai"
|
|
15
|
+
ANTHROPIC = "anthropic"
|
|
16
|
+
GOOGLE = "google"
|
|
17
|
+
META = "meta"
|
|
18
|
+
MISTRAL = "mistral"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class ModelPricing:
|
|
23
|
+
"""Pricing information for a model."""
|
|
24
|
+
input_cost_per_1k: float # Cost per 1K input tokens
|
|
25
|
+
output_cost_per_1k: float # Cost per 1K output tokens
|
|
26
|
+
context_window: int # Maximum context window in tokens
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# Pricing as of December 2025 (prices in USD)
|
|
30
|
+
PRICING_TABLE: Dict[str, ModelPricing] = {
|
|
31
|
+
# OpenAI
|
|
32
|
+
"gpt-4-turbo": ModelPricing(0.01, 0.03, 128000),
|
|
33
|
+
"gpt-4": ModelPricing(0.03, 0.06, 8192),
|
|
34
|
+
"gpt-3.5-turbo": ModelPricing(0.0005, 0.0015, 16385),
|
|
35
|
+
"gpt-4o": ModelPricing(0.005, 0.015, 128000),
|
|
36
|
+
"gpt-4o-mini": ModelPricing(0.00015, 0.0006, 128000),
|
|
37
|
+
|
|
38
|
+
# Anthropic
|
|
39
|
+
"claude-opus-4": ModelPricing(0.015, 0.075, 200000),
|
|
40
|
+
"claude-sonnet-4": ModelPricing(0.003, 0.015, 200000),
|
|
41
|
+
"claude-haiku-4": ModelPricing(0.00025, 0.00125, 200000),
|
|
42
|
+
"claude-3-opus": ModelPricing(0.015, 0.075, 200000),
|
|
43
|
+
"claude-3-sonnet": ModelPricing(0.003, 0.015, 200000),
|
|
44
|
+
"claude-3-haiku": ModelPricing(0.00025, 0.00125, 200000),
|
|
45
|
+
|
|
46
|
+
# Google
|
|
47
|
+
"gemini-pro": ModelPricing(0.000125, 0.000375, 32760),
|
|
48
|
+
"gemini-pro-vision": ModelPricing(0.000125, 0.000375, 16384),
|
|
49
|
+
"gemini-ultra": ModelPricing(0.001, 0.002, 32760),
|
|
50
|
+
"gemini-1.5-pro": ModelPricing(0.00125, 0.005, 1000000),
|
|
51
|
+
"gemini-1.5-flash": ModelPricing(0.000125, 0.0005, 1000000),
|
|
52
|
+
|
|
53
|
+
# Meta (via hosting providers - example pricing)
|
|
54
|
+
"llama-3-70b": ModelPricing(0.0007, 0.0009, 8192),
|
|
55
|
+
"llama-3-8b": ModelPricing(0.0002, 0.0002, 8192),
|
|
56
|
+
|
|
57
|
+
# Mistral
|
|
58
|
+
"mistral-large": ModelPricing(0.004, 0.012, 32000),
|
|
59
|
+
"mistral-medium": ModelPricing(0.0027, 0.0081, 32000),
|
|
60
|
+
"mistral-small": ModelPricing(0.001, 0.003, 32000),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class TokenCounter:
|
|
65
|
+
"""Count tokens for different providers."""
|
|
66
|
+
|
|
67
|
+
def __init__(self):
|
|
68
|
+
self.encoders = {
|
|
69
|
+
Provider.OPENAI: tiktoken.get_encoding("cl100k_base"),
|
|
70
|
+
# For other providers, we approximate with cl100k_base
|
|
71
|
+
# In production, use provider-specific tokenizers
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
def count_tokens(self, text: str, provider: Provider = Provider.OPENAI) -> int:
|
|
75
|
+
"""Count tokens in text."""
|
|
76
|
+
encoder = self.encoders.get(provider, self.encoders[Provider.OPENAI])
|
|
77
|
+
return len(encoder.encode(text))
|
|
78
|
+
|
|
79
|
+
def count_tokens_for_messages(
|
|
80
|
+
self,
|
|
81
|
+
messages: list,
|
|
82
|
+
model: str = "gpt-4"
|
|
83
|
+
) -> int:
|
|
84
|
+
"""Count tokens for chat completion messages."""
|
|
85
|
+
encoding = tiktoken.encoding_for_model(model)
|
|
86
|
+
|
|
87
|
+
# Token counting varies by model
|
|
88
|
+
if "gpt-4" in model or "gpt-3.5" in model:
|
|
89
|
+
tokens_per_message = 3
|
|
90
|
+
tokens_per_name = 1
|
|
91
|
+
else:
|
|
92
|
+
tokens_per_message = 3
|
|
93
|
+
tokens_per_name = 1
|
|
94
|
+
|
|
95
|
+
num_tokens = 0
|
|
96
|
+
for message in messages:
|
|
97
|
+
num_tokens += tokens_per_message
|
|
98
|
+
for key, value in message.items():
|
|
99
|
+
num_tokens += len(encoding.encode(str(value)))
|
|
100
|
+
if key == "name":
|
|
101
|
+
num_tokens += tokens_per_name
|
|
102
|
+
|
|
103
|
+
num_tokens += 3 # every reply is primed with assistant
|
|
104
|
+
return num_tokens
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class CostEstimator:
|
|
108
|
+
"""Estimate costs for LLM API calls."""
|
|
109
|
+
|
|
110
|
+
def __init__(self):
|
|
111
|
+
self.token_counter = TokenCounter()
|
|
112
|
+
|
|
113
|
+
def estimate_cost(
|
|
114
|
+
self,
|
|
115
|
+
input_text: str,
|
|
116
|
+
model: str,
|
|
117
|
+
estimated_output_tokens: int = 0,
|
|
118
|
+
provider: Optional[Provider] = None
|
|
119
|
+
) -> Dict[str, float]:
|
|
120
|
+
"""
|
|
121
|
+
Estimate the cost of an LLM API call.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
input_text: The input prompt
|
|
125
|
+
model: Model name (e.g., 'gpt-4', 'claude-3-opus')
|
|
126
|
+
estimated_output_tokens: Expected output length
|
|
127
|
+
provider: LLM provider (auto-detected if None)
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Dictionary with cost breakdown
|
|
131
|
+
"""
|
|
132
|
+
# Auto-detect provider if not specified
|
|
133
|
+
if provider is None:
|
|
134
|
+
provider = self._detect_provider(model)
|
|
135
|
+
|
|
136
|
+
# Get pricing info
|
|
137
|
+
pricing = PRICING_TABLE.get(model)
|
|
138
|
+
if not pricing:
|
|
139
|
+
raise ValueError(f"Unknown model: {model}")
|
|
140
|
+
|
|
141
|
+
# Count input tokens
|
|
142
|
+
input_tokens = self.token_counter.count_tokens(input_text, provider)
|
|
143
|
+
|
|
144
|
+
# Calculate costs
|
|
145
|
+
input_cost = (input_tokens / 1000) * pricing.input_cost_per_1k
|
|
146
|
+
output_cost = (estimated_output_tokens / 1000) * pricing.output_cost_per_1k
|
|
147
|
+
total_cost = input_cost + output_cost
|
|
148
|
+
|
|
149
|
+
return {
|
|
150
|
+
"input_tokens": input_tokens,
|
|
151
|
+
"output_tokens": estimated_output_tokens,
|
|
152
|
+
"total_tokens": input_tokens + estimated_output_tokens,
|
|
153
|
+
"input_cost": input_cost,
|
|
154
|
+
"output_cost": output_cost,
|
|
155
|
+
"total_cost": total_cost,
|
|
156
|
+
"currency": "USD",
|
|
157
|
+
"model": model,
|
|
158
|
+
"context_window": pricing.context_window,
|
|
159
|
+
"context_utilization": (input_tokens + estimated_output_tokens) / pricing.context_window
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
def compare_models(
|
|
163
|
+
self,
|
|
164
|
+
input_text: str,
|
|
165
|
+
models: list,
|
|
166
|
+
estimated_output_tokens: int = 500
|
|
167
|
+
) -> Dict[str, Dict[str, float]]:
|
|
168
|
+
"""Compare costs across multiple models."""
|
|
169
|
+
comparisons = {}
|
|
170
|
+
|
|
171
|
+
for model in models:
|
|
172
|
+
try:
|
|
173
|
+
cost = self.estimate_cost(input_text, model, estimated_output_tokens)
|
|
174
|
+
comparisons[model] = cost
|
|
175
|
+
except ValueError as e:
|
|
176
|
+
comparisons[model] = {"error": str(e)}
|
|
177
|
+
|
|
178
|
+
return comparisons
|
|
179
|
+
|
|
180
|
+
def _detect_provider(self, model: str) -> Provider:
|
|
181
|
+
"""Auto-detect provider from model name."""
|
|
182
|
+
if "gpt" in model.lower():
|
|
183
|
+
return Provider.OPENAI
|
|
184
|
+
elif "claude" in model.lower():
|
|
185
|
+
return Provider.ANTHROPIC
|
|
186
|
+
elif "gemini" in model.lower():
|
|
187
|
+
return Provider.GOOGLE
|
|
188
|
+
elif "llama" in model.lower():
|
|
189
|
+
return Provider.META
|
|
190
|
+
elif "mistral" in model.lower():
|
|
191
|
+
return Provider.MISTRAL
|
|
192
|
+
else:
|
|
193
|
+
return Provider.OPENAI # Default
|
|
194
|
+
|
|
195
|
+
def estimate_monthly_cost(
|
|
196
|
+
self,
|
|
197
|
+
queries_per_day: int,
|
|
198
|
+
avg_input_tokens: int,
|
|
199
|
+
avg_output_tokens: int,
|
|
200
|
+
model: str
|
|
201
|
+
) -> Dict[str, float]:
|
|
202
|
+
"""Estimate monthly costs based on usage patterns."""
|
|
203
|
+
pricing = PRICING_TABLE.get(model)
|
|
204
|
+
if not pricing:
|
|
205
|
+
raise ValueError(f"Unknown model: {model}")
|
|
206
|
+
|
|
207
|
+
# Daily costs
|
|
208
|
+
daily_input_cost = (queries_per_day * avg_input_tokens / 1000) * pricing.input_cost_per_1k
|
|
209
|
+
daily_output_cost = (queries_per_day * avg_output_tokens / 1000) * pricing.output_cost_per_1k
|
|
210
|
+
daily_cost = daily_input_cost + daily_output_cost
|
|
211
|
+
|
|
212
|
+
# Monthly projection (30 days)
|
|
213
|
+
monthly_cost = daily_cost * 30
|
|
214
|
+
|
|
215
|
+
return {
|
|
216
|
+
"queries_per_day": queries_per_day,
|
|
217
|
+
"daily_cost": daily_cost,
|
|
218
|
+
"monthly_cost": monthly_cost,
|
|
219
|
+
"yearly_cost": monthly_cost * 12,
|
|
220
|
+
"cost_per_query": daily_cost / queries_per_day,
|
|
221
|
+
"model": model
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def format_cost_comparison(comparisons: Dict[str, Dict[str, float]]) -> str:
|
|
226
|
+
"""Format cost comparison for display."""
|
|
227
|
+
lines = ["š Cost Comparison", "=" * 70]
|
|
228
|
+
|
|
229
|
+
# Sort by total cost
|
|
230
|
+
sorted_models = sorted(
|
|
231
|
+
[(k, v) for k, v in comparisons.items() if "error" not in v],
|
|
232
|
+
key=lambda x: x[1]["total_cost"]
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
for model, cost in sorted_models:
|
|
236
|
+
lines.append(f"\nš {model}")
|
|
237
|
+
lines.append(f" Input: {cost['input_tokens']:,} tokens ā ${cost['input_cost']:.6f}")
|
|
238
|
+
lines.append(f" Output: {cost['output_tokens']:,} tokens ā ${cost['output_cost']:.6f}")
|
|
239
|
+
lines.append(f" Total: ${cost['total_cost']:.6f}")
|
|
240
|
+
lines.append(f" Context: {cost['context_utilization']:.1%} utilized")
|
|
241
|
+
|
|
242
|
+
return "\n".join(lines)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
# Example usage
|
|
246
|
+
if __name__ == "__main__":
|
|
247
|
+
estimator = CostEstimator()
|
|
248
|
+
|
|
249
|
+
# Example prompt
|
|
250
|
+
prompt = """Analyze this marketing campaign and provide detailed recommendations:
|
|
251
|
+
|
|
252
|
+
Campaign: Summer Sale 2025
|
|
253
|
+
Budget: $50,000
|
|
254
|
+
Duration: 30 days
|
|
255
|
+
Channels: Email, Social Media, Search Ads
|
|
256
|
+
|
|
257
|
+
Current Metrics (Week 1):
|
|
258
|
+
- Impressions: 1,200,000
|
|
259
|
+
- Clicks: 24,000 (CTR: 2%)
|
|
260
|
+
- Conversions: 480 (Conv Rate: 2%)
|
|
261
|
+
- Revenue: $48,000
|
|
262
|
+
- ROAS: 0.96
|
|
263
|
+
|
|
264
|
+
Please provide:
|
|
265
|
+
1. Performance analysis
|
|
266
|
+
2. Optimization recommendations
|
|
267
|
+
3. Budget reallocation strategy
|
|
268
|
+
4. Projected outcomes
|
|
269
|
+
"""
|
|
270
|
+
|
|
271
|
+
# Single model estimate
|
|
272
|
+
print("=" * 70)
|
|
273
|
+
print("š° Single Model Estimation")
|
|
274
|
+
print("=" * 70)
|
|
275
|
+
|
|
276
|
+
cost = estimator.estimate_cost(
|
|
277
|
+
input_text=prompt,
|
|
278
|
+
model="gpt-4o",
|
|
279
|
+
estimated_output_tokens=1000
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
print(f"\nModel: {cost['model']}")
|
|
283
|
+
print(f"Input tokens: {cost['input_tokens']:,}")
|
|
284
|
+
print(f"Estimated output tokens: {cost['output_tokens']:,}")
|
|
285
|
+
print(f"Total cost: ${cost['total_cost']:.4f}")
|
|
286
|
+
print(f"Context utilization: {cost['context_utilization']:.1%}")
|
|
287
|
+
|
|
288
|
+
# Compare multiple models
|
|
289
|
+
print("\n" + "=" * 70)
|
|
290
|
+
models_to_compare = [
|
|
291
|
+
"gpt-4o",
|
|
292
|
+
"gpt-4o-mini",
|
|
293
|
+
"claude-sonnet-4",
|
|
294
|
+
"claude-haiku-4",
|
|
295
|
+
"gemini-1.5-pro",
|
|
296
|
+
"gemini-1.5-flash"
|
|
297
|
+
]
|
|
298
|
+
|
|
299
|
+
comparisons = estimator.compare_models(
|
|
300
|
+
input_text=prompt,
|
|
301
|
+
models=models_to_compare,
|
|
302
|
+
estimated_output_tokens=1000
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
print(format_cost_comparison(comparisons))
|
|
306
|
+
|
|
307
|
+
# Monthly cost projection
|
|
308
|
+
print("\n" + "=" * 70)
|
|
309
|
+
print("š
Monthly Cost Projection")
|
|
310
|
+
print("=" * 70)
|
|
311
|
+
|
|
312
|
+
monthly = estimator.estimate_monthly_cost(
|
|
313
|
+
queries_per_day=1000,
|
|
314
|
+
avg_input_tokens=500,
|
|
315
|
+
avg_output_tokens=800,
|
|
316
|
+
model="gpt-4o"
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
print(f"\nModel: {monthly['model']}")
|
|
320
|
+
print(f"Queries per day: {monthly['queries_per_day']:,}")
|
|
321
|
+
print(f"Daily cost: ${monthly['daily_cost']:.2f}")
|
|
322
|
+
print(f"Monthly cost: ${monthly['monthly_cost']:.2f}")
|
|
323
|
+
print(f"Yearly cost: ${monthly['yearly_cost']:,.2f}")
|
|
324
|
+
print(f"Cost per query: ${monthly['cost_per_query']:.4f}")
|