cost-katana 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cost_katana/__init__.py +16 -14
- cost_katana/cli.py +581 -119
- cost_katana/client.py +298 -75
- cost_katana/config.py +82 -85
- cost_katana/exceptions.py +19 -1
- cost_katana/models.py +110 -111
- {cost_katana-1.0.0.dist-info → cost_katana-1.0.2.dist-info}/METADATA +10 -9
- cost_katana-1.0.2.dist-info/RECORD +12 -0
- cost_katana-1.0.0.dist-info/RECORD +0 -12
- {cost_katana-1.0.0.dist-info → cost_katana-1.0.2.dist-info}/WHEEL +0 -0
- {cost_katana-1.0.0.dist-info → cost_katana-1.0.2.dist-info}/entry_points.txt +0 -0
- {cost_katana-1.0.0.dist-info → cost_katana-1.0.2.dist-info}/licenses/LICENSE +0 -0
- {cost_katana-1.0.0.dist-info → cost_katana-1.0.2.dist-info}/top_level.txt +0 -0
cost_katana/models.py
CHANGED
@@ -3,14 +3,16 @@ Generative AI Models - Simple interface similar to google-generative-ai
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import time
|
6
|
-
from typing import Dict, Any, Optional, List,
|
6
|
+
from typing import Dict, Any, Optional, List, Union
|
7
7
|
from dataclasses import dataclass
|
8
8
|
from .client import CostKatanaClient
|
9
9
|
from .exceptions import CostKatanaError, ModelNotAvailableError
|
10
10
|
|
11
|
+
|
11
12
|
@dataclass
|
12
13
|
class GenerationConfig:
|
13
14
|
"""Configuration for text generation"""
|
15
|
+
|
14
16
|
temperature: float = 0.7
|
15
17
|
max_output_tokens: int = 2000
|
16
18
|
top_p: Optional[float] = None
|
@@ -18,9 +20,11 @@ class GenerationConfig:
|
|
18
20
|
candidate_count: int = 1
|
19
21
|
stop_sequences: Optional[List[str]] = None
|
20
22
|
|
23
|
+
|
21
24
|
@dataclass
|
22
25
|
class UsageMetadata:
|
23
26
|
"""Usage metadata returned with responses"""
|
27
|
+
|
24
28
|
prompt_tokens: int
|
25
29
|
completion_tokens: int
|
26
30
|
total_tokens: int
|
@@ -32,149 +36,149 @@ class UsageMetadata:
|
|
32
36
|
agent_path: Optional[List[str]] = None
|
33
37
|
risk_level: Optional[str] = None
|
34
38
|
|
39
|
+
|
35
40
|
class GenerateContentResponse:
|
36
41
|
"""Response from generate_content method"""
|
37
|
-
|
42
|
+
|
38
43
|
def __init__(self, response_data: Dict[str, Any]):
|
39
44
|
self._data = response_data
|
40
|
-
self._text = response_data.get(
|
41
|
-
|
45
|
+
self._text = response_data.get("data", {}).get("response", "")
|
46
|
+
|
42
47
|
# Extract usage metadata
|
43
|
-
data = response_data.get(
|
48
|
+
data = response_data.get("data", {})
|
44
49
|
self.usage_metadata = UsageMetadata(
|
45
|
-
prompt_tokens=data.get(
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
50
|
+
prompt_tokens=data.get(
|
51
|
+
"tokenCount", 0
|
52
|
+
), # This might need adjustment based on actual response
|
53
|
+
completion_tokens=data.get("tokenCount", 0),
|
54
|
+
total_tokens=data.get("tokenCount", 0),
|
55
|
+
cost=data.get("cost", 0.0),
|
56
|
+
latency=data.get("latency", 0.0),
|
57
|
+
model=data.get("model", ""),
|
58
|
+
optimizations_applied=data.get("optimizationsApplied"),
|
59
|
+
cache_hit=data.get("cacheHit", False),
|
60
|
+
agent_path=data.get("agentPath"),
|
61
|
+
risk_level=data.get("riskLevel"),
|
55
62
|
)
|
56
|
-
|
63
|
+
|
57
64
|
# Store thinking/reasoning if available
|
58
|
-
self.thinking = data.get(
|
59
|
-
|
60
|
-
@property
|
65
|
+
self.thinking = data.get("thinking")
|
66
|
+
|
67
|
+
@property
|
61
68
|
def text(self) -> str:
|
62
69
|
"""Get the response text"""
|
63
70
|
return self._text
|
64
|
-
|
71
|
+
|
65
72
|
@property
|
66
73
|
def parts(self) -> List[Dict[str, Any]]:
|
67
74
|
"""Get response parts (for compatibility)"""
|
68
|
-
return [{
|
69
|
-
|
75
|
+
return [{"text": self._text}] if self._text else []
|
76
|
+
|
70
77
|
def __str__(self) -> str:
|
71
78
|
return self._text
|
72
|
-
|
79
|
+
|
73
80
|
def __repr__(self) -> str:
|
74
81
|
return f"GenerateContentResponse(text='{self._text[:50]}...', cost=${self.usage_metadata.cost:.4f})"
|
75
82
|
|
83
|
+
|
76
84
|
class ChatSession:
|
77
85
|
"""A chat session for maintaining conversation context"""
|
78
|
-
|
86
|
+
|
79
87
|
def __init__(
|
80
|
-
self,
|
88
|
+
self,
|
81
89
|
client: CostKatanaClient,
|
82
90
|
model_id: str,
|
83
91
|
generation_config: Optional[GenerationConfig] = None,
|
84
|
-
conversation_id: str = None
|
92
|
+
conversation_id: Optional[str] = None,
|
85
93
|
):
|
86
94
|
self.client = client
|
87
95
|
self.model_id = model_id
|
88
96
|
self.generation_config = generation_config or GenerationConfig()
|
89
97
|
self.conversation_id = conversation_id
|
90
98
|
self.history: List[Dict[str, Any]] = []
|
91
|
-
|
99
|
+
|
92
100
|
# Create conversation if not provided
|
93
101
|
if not self.conversation_id:
|
94
102
|
try:
|
95
103
|
conv_response = self.client.create_conversation(
|
96
|
-
title=f"Chat with {model_id}",
|
97
|
-
model_id=model_id
|
104
|
+
title=f"Chat with {model_id}", model_id=model_id
|
98
105
|
)
|
99
|
-
self.conversation_id = conv_response[
|
106
|
+
self.conversation_id = conv_response["data"]["id"]
|
100
107
|
except Exception as e:
|
101
108
|
raise CostKatanaError(f"Failed to create conversation: {str(e)}")
|
102
|
-
|
103
|
-
def send_message(
|
104
|
-
self,
|
105
|
-
message: str,
|
106
|
-
**kwargs
|
107
|
-
) -> GenerateContentResponse:
|
109
|
+
|
110
|
+
def send_message(self, message: str, **kwargs) -> GenerateContentResponse:
|
108
111
|
"""
|
109
112
|
Send a message in the chat session.
|
110
|
-
|
113
|
+
|
111
114
|
Args:
|
112
115
|
message: The message to send
|
113
116
|
**kwargs: Additional parameters to override defaults
|
114
|
-
|
117
|
+
|
115
118
|
Returns:
|
116
119
|
GenerateContentResponse with the model's reply
|
117
|
-
|
120
|
+
|
118
121
|
Example:
|
119
122
|
response = chat.send_message("What's the weather like?")
|
120
123
|
print(response.text)
|
121
124
|
"""
|
122
125
|
# Merge generation config with kwargs
|
123
126
|
params = {
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
127
|
+
"temperature": kwargs.get("temperature", self.generation_config.temperature),
|
128
|
+
"max_tokens": kwargs.get("max_tokens", self.generation_config.max_output_tokens),
|
129
|
+
"chat_mode": kwargs.get("chat_mode", "balanced"),
|
130
|
+
"use_multi_agent": kwargs.get("use_multi_agent", False),
|
128
131
|
}
|
129
|
-
|
132
|
+
|
130
133
|
# Add any additional parameters
|
131
134
|
for key, value in kwargs.items():
|
132
135
|
if key not in params:
|
133
136
|
params[key] = value
|
134
|
-
|
137
|
+
|
135
138
|
try:
|
136
139
|
response_data = self.client.send_message(
|
137
140
|
message=message,
|
138
141
|
model_id=self.model_id,
|
139
142
|
conversation_id=self.conversation_id,
|
140
|
-
**params
|
143
|
+
**params,
|
141
144
|
)
|
142
|
-
|
145
|
+
|
143
146
|
# Add to history
|
144
|
-
self.history.append({
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
})
|
157
|
-
|
147
|
+
self.history.append({"role": "user", "content": message, "timestamp": time.time()})
|
148
|
+
|
149
|
+
response_text = response_data.get("data", {}).get("response", "")
|
150
|
+
self.history.append(
|
151
|
+
{
|
152
|
+
"role": "assistant",
|
153
|
+
"content": response_text,
|
154
|
+
"timestamp": time.time(),
|
155
|
+
"metadata": response_data.get("data", {}),
|
156
|
+
}
|
157
|
+
)
|
158
|
+
|
158
159
|
return GenerateContentResponse(response_data)
|
159
|
-
|
160
|
+
|
160
161
|
except Exception as e:
|
161
162
|
if isinstance(e, CostKatanaError):
|
162
163
|
raise
|
163
164
|
raise CostKatanaError(f"Failed to send message: {str(e)}")
|
164
|
-
|
165
|
+
|
165
166
|
def get_history(self) -> List[Dict[str, Any]]:
|
166
167
|
"""Get the conversation history"""
|
168
|
+
if not self.conversation_id:
|
169
|
+
return self.history
|
170
|
+
|
167
171
|
try:
|
168
172
|
history_response = self.client.get_conversation_history(self.conversation_id)
|
169
|
-
return history_response.get(
|
170
|
-
except Exception
|
173
|
+
return history_response.get("data", [])
|
174
|
+
except Exception:
|
171
175
|
# Fall back to local history if API call fails
|
172
176
|
return self.history
|
173
|
-
|
177
|
+
|
174
178
|
def clear_history(self):
|
175
179
|
"""Clear the local conversation history"""
|
176
180
|
self.history = []
|
177
|
-
|
181
|
+
|
178
182
|
def delete_conversation(self):
|
179
183
|
"""Delete the conversation from the server"""
|
180
184
|
try:
|
@@ -184,22 +188,23 @@ class ChatSession:
|
|
184
188
|
except Exception as e:
|
185
189
|
raise CostKatanaError(f"Failed to delete conversation: {str(e)}")
|
186
190
|
|
191
|
+
|
187
192
|
class GenerativeModel:
|
188
193
|
"""
|
189
194
|
A generative AI model with a simple interface similar to google-generative-ai.
|
190
195
|
All requests are routed through Cost Katana for optimization and cost management.
|
191
196
|
"""
|
192
|
-
|
197
|
+
|
193
198
|
def __init__(
|
194
199
|
self,
|
195
200
|
client: CostKatanaClient,
|
196
201
|
model_name: str,
|
197
202
|
generation_config: Optional[GenerationConfig] = None,
|
198
|
-
**kwargs
|
203
|
+
**kwargs,
|
199
204
|
):
|
200
205
|
"""
|
201
206
|
Initialize a generative model.
|
202
|
-
|
207
|
+
|
203
208
|
Args:
|
204
209
|
client: Cost Katana client instance
|
205
210
|
model_name: Name of the model (e.g., 'gemini-2.0-flash', 'claude-3-sonnet')
|
@@ -211,16 +216,16 @@ class GenerativeModel:
|
|
211
216
|
self.model_id = client.config.get_model_mapping(model_name)
|
212
217
|
self.generation_config = generation_config or GenerationConfig()
|
213
218
|
self.model_params = kwargs
|
214
|
-
|
219
|
+
|
215
220
|
# Validate model is available
|
216
221
|
self._validate_model()
|
217
|
-
|
222
|
+
|
218
223
|
def _validate_model(self):
|
219
224
|
"""Validate that the model is available"""
|
220
225
|
try:
|
221
226
|
available_models = self.client.get_available_models()
|
222
|
-
model_ids = [model.get(
|
223
|
-
|
227
|
+
model_ids = [model.get("id", model.get("modelId", "")) for model in available_models]
|
228
|
+
|
224
229
|
if self.model_id not in model_ids and self.model_name not in model_ids:
|
225
230
|
raise ModelNotAvailableError(
|
226
231
|
f"Model '{self.model_name}' (ID: {self.model_id}) is not available. "
|
@@ -231,24 +236,24 @@ class GenerativeModel:
|
|
231
236
|
except Exception as e:
|
232
237
|
# If we can't validate, log but don't fail - the model might still work
|
233
238
|
print(f"Warning: Could not validate model availability: {e}")
|
234
|
-
|
239
|
+
|
235
240
|
def generate_content(
|
236
241
|
self,
|
237
242
|
prompt: Union[str, List[str]],
|
238
243
|
generation_config: Optional[GenerationConfig] = None,
|
239
|
-
**kwargs
|
244
|
+
**kwargs,
|
240
245
|
) -> GenerateContentResponse:
|
241
246
|
"""
|
242
247
|
Generate content from a prompt.
|
243
|
-
|
248
|
+
|
244
249
|
Args:
|
245
250
|
prompt: Text prompt or list of prompts
|
246
251
|
generation_config: Generation configuration (overrides instance config)
|
247
252
|
**kwargs: Additional parameters
|
248
|
-
|
253
|
+
|
249
254
|
Returns:
|
250
255
|
GenerateContentResponse with the generated content
|
251
|
-
|
256
|
+
|
252
257
|
Example:
|
253
258
|
model = cost_katana.GenerativeModel('gemini-2.0-flash')
|
254
259
|
response = model.generate_content("Tell me about AI")
|
@@ -258,53 +263,47 @@ class GenerativeModel:
|
|
258
263
|
# Handle multiple prompts
|
259
264
|
if isinstance(prompt, list):
|
260
265
|
prompt = "\n\n".join(str(p) for p in prompt)
|
261
|
-
|
266
|
+
|
262
267
|
# Use provided config or instance config
|
263
268
|
config = generation_config or self.generation_config
|
264
|
-
|
269
|
+
|
265
270
|
# Prepare parameters
|
266
271
|
params = {
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
272
|
+
"temperature": kwargs.get("temperature", config.temperature),
|
273
|
+
"max_tokens": kwargs.get("max_tokens", config.max_output_tokens),
|
274
|
+
"chat_mode": kwargs.get("chat_mode", "balanced"),
|
275
|
+
"use_multi_agent": kwargs.get("use_multi_agent", False),
|
271
276
|
}
|
272
|
-
|
277
|
+
|
273
278
|
# Add any additional parameters from model_params or kwargs
|
274
279
|
params.update(self.model_params)
|
275
280
|
for key, value in kwargs.items():
|
276
281
|
if key not in params:
|
277
282
|
params[key] = value
|
278
|
-
|
283
|
+
|
279
284
|
try:
|
280
285
|
response_data = self.client.send_message(
|
281
|
-
message=prompt,
|
282
|
-
model_id=self.model_id,
|
283
|
-
**params
|
286
|
+
message=prompt, model_id=self.model_id, **params
|
284
287
|
)
|
285
|
-
|
288
|
+
|
286
289
|
return GenerateContentResponse(response_data)
|
287
|
-
|
290
|
+
|
288
291
|
except Exception as e:
|
289
292
|
if isinstance(e, CostKatanaError):
|
290
293
|
raise
|
291
294
|
raise CostKatanaError(f"Failed to generate content: {str(e)}")
|
292
|
-
|
293
|
-
def start_chat(
|
294
|
-
self,
|
295
|
-
history: Optional[List[Dict[str, Any]]] = None,
|
296
|
-
**kwargs
|
297
|
-
) -> ChatSession:
|
295
|
+
|
296
|
+
def start_chat(self, history: Optional[List[Dict[str, Any]]] = None, **kwargs) -> ChatSession:
|
298
297
|
"""
|
299
298
|
Start a chat session.
|
300
|
-
|
299
|
+
|
301
300
|
Args:
|
302
301
|
history: Optional conversation history
|
303
302
|
**kwargs: Additional chat configuration
|
304
|
-
|
303
|
+
|
305
304
|
Returns:
|
306
305
|
ChatSession instance
|
307
|
-
|
306
|
+
|
308
307
|
Example:
|
309
308
|
model = cost_katana.GenerativeModel('gemini-2.0-flash')
|
310
309
|
chat = model.start_chat()
|
@@ -315,15 +314,15 @@ class GenerativeModel:
|
|
315
314
|
client=self.client,
|
316
315
|
model_id=self.model_id,
|
317
316
|
generation_config=self.generation_config,
|
318
|
-
**kwargs
|
317
|
+
**kwargs,
|
319
318
|
)
|
320
|
-
|
319
|
+
|
321
320
|
# Add history if provided
|
322
321
|
if history:
|
323
322
|
chat_session.history = history
|
324
|
-
|
323
|
+
|
325
324
|
return chat_session
|
326
|
-
|
325
|
+
|
327
326
|
def count_tokens(self, prompt: str) -> Dict[str, int]:
|
328
327
|
"""
|
329
328
|
Count tokens in a prompt (estimated).
|
@@ -332,12 +331,12 @@ class GenerativeModel:
|
|
332
331
|
# Simple word-based estimation - not accurate but gives an idea
|
333
332
|
words = len(prompt.split())
|
334
333
|
estimated_tokens = int(words * 1.3) # Rough approximation
|
335
|
-
|
334
|
+
|
336
335
|
return {
|
337
|
-
|
338
|
-
|
339
|
-
|
336
|
+
"total_tokens": estimated_tokens,
|
337
|
+
"prompt_tokens": estimated_tokens,
|
338
|
+
"completion_tokens": 0,
|
340
339
|
}
|
341
|
-
|
340
|
+
|
342
341
|
def __repr__(self) -> str:
|
343
|
-
return f"GenerativeModel(model_name='{self.model_name}', model_id='{self.model_id}')"
|
342
|
+
return f"GenerativeModel(model_name='{self.model_name}', model_id='{self.model_id}')"
|
@@ -1,12 +1,12 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: cost-katana
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.2
|
4
4
|
Summary: Unified AI interface with cost optimization and failover
|
5
|
-
Home-page: https://github.com/
|
5
|
+
Home-page: https://github.com/Hypothesize-Tech/cost-katana-python
|
6
6
|
Author: Cost Katana Team
|
7
|
-
Author-email:
|
8
|
-
Project-URL: Bug Reports, https://github.com/
|
9
|
-
Project-URL: Source, https://github.com/
|
7
|
+
Author-email: abdul@hypothesize.tech
|
8
|
+
Project-URL: Bug Reports, https://github.com/Hypothesize-Tech/cost-katana-python/issues
|
9
|
+
Project-URL: Source, https://github.com/Hypothesize-Tech/cost-katana-python
|
10
10
|
Project-URL: Documentation, https://docs.costkatana.com
|
11
11
|
Keywords: ai,machine learning,cost optimization,openai,anthropic,aws bedrock,gemini
|
12
12
|
Classifier: Development Status :: 4 - Beta
|
@@ -159,7 +159,7 @@ model = ck.GenerativeModel('gemini') # Uses mapping from config
|
|
159
159
|
### Environment Variables
|
160
160
|
|
161
161
|
```bash
|
162
|
-
export
|
162
|
+
export API_KEY=dak_your_key_here
|
163
163
|
export COST_KATANA_DEFAULT_MODEL=claude-3-sonnet
|
164
164
|
```
|
165
165
|
|
@@ -412,9 +412,10 @@ class GenerateContentResponse:
|
|
412
412
|
## 🤝 Support
|
413
413
|
|
414
414
|
- **Documentation**: [docs.costkatana.com](https://docs.costkatana.com)
|
415
|
-
- **Discord Community**: [discord.gg/costkatana](https://discord.gg/
|
416
|
-
- **Email Support**:
|
415
|
+
- **Discord Community**: [discord.gg/costkatana](https://discord.gg/Wcwzw8wM)
|
416
|
+
- **Email Support**: abdul@hypothesize.tech
|
417
417
|
- **GitHub Issues**: [github.com/cost-katana/python-sdk](https://github.com/cost-katana/python-sdk)
|
418
|
+
- **GitHub Repository**: [github.com/Hypothesize-Tech/cost-katana-python](https://github.com/Hypothesize-Tech/cost-katana-python)
|
418
419
|
|
419
420
|
## 📄 License
|
420
421
|
|
@@ -422,4 +423,4 @@ MIT License - see [LICENSE](LICENSE) for details.
|
|
422
423
|
|
423
424
|
---
|
424
425
|
|
425
|
-
**Ready to optimize your AI costs?** Get started at [costkatana.com](https://costkatana.com)
|
426
|
+
**Ready to optimize your AI costs?** Get started at [costkatana.com](https://costkatana.com) 🚀# cost-katana-python
|
@@ -0,0 +1,12 @@
|
|
1
|
+
cost_katana/__init__.py,sha256=K6jfpo5xwdkjcJpJOIZe3_g3MPzwbJ--SjR7X2flx-Y,1754
|
2
|
+
cost_katana/cli.py,sha256=Xe88QKiNdB1aBnV60sSHLSvJ6YSk-BB9Ke45PSGp72Y,29404
|
3
|
+
cost_katana/client.py,sha256=gZX_-VC2gMuOjXO5XP2-YHElAy2K09XdhZW1jebeSLM,15782
|
4
|
+
cost_katana/config.py,sha256=5uxS_8Qqcb1FlINNMdVl1vxJ_n3FK2BHlJGpeJsoXL0,6792
|
5
|
+
cost_katana/exceptions.py,sha256=VNwc9lpShHQkHsgpAB-w-QJLNH6XRhuUzuXmbj9I9I8,963
|
6
|
+
cost_katana/models.py,sha256=zmPSVF8sLhSu1-o47_cq1Up3FkN6mE_Co9kaBJiCcIE,11602
|
7
|
+
cost_katana-1.0.2.dist-info/licenses/LICENSE,sha256=P7-BNX2xxJZ11R7KpNzczN_H1KJ6R8TisirpIQZWSzw,1067
|
8
|
+
cost_katana-1.0.2.dist-info/METADATA,sha256=_4ZwrBTDUE9SMMaGJz3EmSTs9c1JtgF1oKbuW9cVELI,12384
|
9
|
+
cost_katana-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
10
|
+
cost_katana-1.0.2.dist-info/entry_points.txt,sha256=vJX-F_Xy4kOoGDZr29uOxB9Iu8ZJDgi4u5NC4_XwFEA,53
|
11
|
+
cost_katana-1.0.2.dist-info/top_level.txt,sha256=VdbCDM3Xp_40Yu73-xCGWUJRn0pPs6kc0iMU3yd59lo,12
|
12
|
+
cost_katana-1.0.2.dist-info/RECORD,,
|
@@ -1,12 +0,0 @@
|
|
1
|
-
cost_katana/__init__.py,sha256=ahLaLIQY5LmDMBS64qXe7gCObdRxL4HjP8UVzpPEqLY,1784
|
2
|
-
cost_katana/cli.py,sha256=FHK7xPdyU5w3aBTEHCN-Km9j5p1HSYGCQZ10CJ62_0s,10334
|
3
|
-
cost_katana/client.py,sha256=Shsf0LNGOsW2lh8uTOrT1XGZhXRXvAh5jZ3AYGKvaFc,7962
|
4
|
-
cost_katana/config.py,sha256=n3bnV7o2YlY7pXyTWLnqQ3aNgE14V0mr6Ujv4ystI6U,7000
|
5
|
-
cost_katana/exceptions.py,sha256=36JD4uykJcMOT-Zdgp4fghmoNzCQMVpao7xmupKxKgQ,944
|
6
|
-
cost_katana/models.py,sha256=95N3ZyoGv0vfE-tl6RU2JS13QXDMmzTlUJqhksntgtk,11980
|
7
|
-
cost_katana-1.0.0.dist-info/licenses/LICENSE,sha256=P7-BNX2xxJZ11R7KpNzczN_H1KJ6R8TisirpIQZWSzw,1067
|
8
|
-
cost_katana-1.0.0.dist-info/METADATA,sha256=GSXAEuDPgFHbbCFSJMNJa0648NGPF1gSB91ZmFuc8Zo,12224
|
9
|
-
cost_katana-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
10
|
-
cost_katana-1.0.0.dist-info/entry_points.txt,sha256=vJX-F_Xy4kOoGDZr29uOxB9Iu8ZJDgi4u5NC4_XwFEA,53
|
11
|
-
cost_katana-1.0.0.dist-info/top_level.txt,sha256=VdbCDM3Xp_40Yu73-xCGWUJRn0pPs6kc0iMU3yd59lo,12
|
12
|
-
cost_katana-1.0.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|