kalibr 1.0.28__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kalibr/__init__.py +170 -3
- kalibr/__main__.py +3 -203
- kalibr/capsule_middleware.py +108 -0
- kalibr/cli/__init__.py +5 -0
- kalibr/cli/capsule_cmd.py +174 -0
- kalibr/cli/deploy_cmd.py +114 -0
- kalibr/cli/main.py +67 -0
- kalibr/cli/run.py +200 -0
- kalibr/cli/serve.py +59 -0
- kalibr/client.py +293 -0
- kalibr/collector.py +173 -0
- kalibr/context.py +132 -0
- kalibr/cost_adapter.py +222 -0
- kalibr/decorators.py +140 -0
- kalibr/instrumentation/__init__.py +13 -0
- kalibr/instrumentation/anthropic_instr.py +282 -0
- kalibr/instrumentation/base.py +108 -0
- kalibr/instrumentation/google_instr.py +281 -0
- kalibr/instrumentation/openai_instr.py +265 -0
- kalibr/instrumentation/registry.py +153 -0
- kalibr/kalibr.py +144 -230
- kalibr/kalibr_app.py +53 -314
- kalibr/middleware/__init__.py +5 -0
- kalibr/middleware/auto_tracer.py +356 -0
- kalibr/models.py +41 -0
- kalibr/redaction.py +44 -0
- kalibr/schemas.py +116 -0
- kalibr/simple_tracer.py +255 -0
- kalibr/tokens.py +52 -0
- kalibr/trace_capsule.py +296 -0
- kalibr/trace_models.py +201 -0
- kalibr/tracer.py +354 -0
- kalibr/types.py +25 -93
- kalibr/utils.py +198 -0
- kalibr-1.1.0.dist-info/METADATA +97 -0
- kalibr-1.1.0.dist-info/RECORD +40 -0
- kalibr-1.1.0.dist-info/entry_points.txt +2 -0
- kalibr-1.1.0.dist-info/licenses/LICENSE +21 -0
- kalibr/deployment.py +0 -41
- kalibr/packager.py +0 -43
- kalibr/runtime_router.py +0 -138
- kalibr/schema_generators.py +0 -159
- kalibr/validator.py +0 -70
- kalibr-1.0.28.data/data/examples/README.md +0 -173
- kalibr-1.0.28.data/data/examples/basic_kalibr_example.py +0 -66
- kalibr-1.0.28.data/data/examples/enhanced_kalibr_example.py +0 -347
- kalibr-1.0.28.dist-info/METADATA +0 -175
- kalibr-1.0.28.dist-info/RECORD +0 -19
- kalibr-1.0.28.dist-info/entry_points.txt +0 -2
- kalibr-1.0.28.dist-info/licenses/LICENSE +0 -11
- {kalibr-1.0.28.dist-info → kalibr-1.1.0.dist-info}/WHEEL +0 -0
- {kalibr-1.0.28.dist-info → kalibr-1.1.0.dist-info}/top_level.txt +0 -0
|
@@ -1,347 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Enhanced Kalibr App Example - App-level capabilities
|
|
3
|
-
This demonstrates the new enhanced capabilities including file uploads,
|
|
4
|
-
sessions, streaming, workflows, and multi-model schema generation.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from kalibr import KalibrApp
|
|
8
|
-
from kalibr.types import FileUpload, Session, StreamingResponse, WorkflowState, AuthenticatedUser
|
|
9
|
-
import asyncio
|
|
10
|
-
import json
|
|
11
|
-
from datetime import datetime
|
|
12
|
-
from typing import List
|
|
13
|
-
|
|
14
|
-
# Create an enhanced KalibrApp instance
|
|
15
|
-
app = KalibrApp(title="Enhanced Kalibr Demo", base_url="http://localhost:8000")
|
|
16
|
-
|
|
17
|
-
# Basic action (compatible with original Kalibr)
|
|
18
|
-
@app.action("hello", "Say hello with enhanced capabilities")
|
|
19
|
-
def hello_enhanced(name: str = "World", include_timestamp: bool = False):
|
|
20
|
-
"""Enhanced hello function with optional timestamp"""
|
|
21
|
-
message = f"Hello, {name}! This is Enhanced Kalibr v2.0"
|
|
22
|
-
|
|
23
|
-
response = {"message": message}
|
|
24
|
-
if include_timestamp:
|
|
25
|
-
response["timestamp"] = datetime.now().isoformat()
|
|
26
|
-
|
|
27
|
-
return response
|
|
28
|
-
|
|
29
|
-
# File upload handler
|
|
30
|
-
@app.file_handler("analyze_document", [".txt", ".md", ".py", ".js", ".json"])
|
|
31
|
-
async def analyze_document(file: FileUpload):
|
|
32
|
-
"""Analyze uploaded document and return insights"""
|
|
33
|
-
try:
|
|
34
|
-
# Decode file content
|
|
35
|
-
content = file.content.decode('utf-8')
|
|
36
|
-
|
|
37
|
-
# Basic analysis
|
|
38
|
-
lines = content.split('\n')
|
|
39
|
-
words = content.split()
|
|
40
|
-
|
|
41
|
-
# Language detection based on file extension
|
|
42
|
-
language = "text"
|
|
43
|
-
if file.filename.endswith('.py'):
|
|
44
|
-
language = "python"
|
|
45
|
-
elif file.filename.endswith('.js'):
|
|
46
|
-
language = "javascript"
|
|
47
|
-
elif file.filename.endswith('.json'):
|
|
48
|
-
language = "json"
|
|
49
|
-
try:
|
|
50
|
-
json_data = json.loads(content)
|
|
51
|
-
return {
|
|
52
|
-
"upload_id": file.upload_id,
|
|
53
|
-
"filename": file.filename,
|
|
54
|
-
"analysis": {
|
|
55
|
-
"type": "json",
|
|
56
|
-
"valid_json": True,
|
|
57
|
-
"keys": list(json_data.keys()) if isinstance(json_data, dict) else None,
|
|
58
|
-
"size_bytes": file.size
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
except json.JSONDecodeError:
|
|
62
|
-
pass
|
|
63
|
-
|
|
64
|
-
return {
|
|
65
|
-
"upload_id": file.upload_id,
|
|
66
|
-
"filename": file.filename,
|
|
67
|
-
"analysis": {
|
|
68
|
-
"language": language,
|
|
69
|
-
"line_count": len(lines),
|
|
70
|
-
"word_count": len(words),
|
|
71
|
-
"character_count": len(content),
|
|
72
|
-
"size_bytes": file.size,
|
|
73
|
-
"non_empty_lines": len([line for line in lines if line.strip()]),
|
|
74
|
-
"estimated_reading_time_minutes": len(words) / 200 # Average reading speed
|
|
75
|
-
}
|
|
76
|
-
}
|
|
77
|
-
except UnicodeDecodeError:
|
|
78
|
-
return {
|
|
79
|
-
"upload_id": file.upload_id,
|
|
80
|
-
"filename": file.filename,
|
|
81
|
-
"error": "File is not text-readable (binary file)",
|
|
82
|
-
"size_bytes": file.size
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
# Session-aware action
|
|
86
|
-
@app.session_action("save_note", "Save a note to user session")
|
|
87
|
-
async def save_note(session: Session, note_title: str, note_content: str):
|
|
88
|
-
"""Save a note to the user's session"""
|
|
89
|
-
|
|
90
|
-
# Initialize notes if not exists
|
|
91
|
-
if 'notes' not in session.data:
|
|
92
|
-
session.data['notes'] = []
|
|
93
|
-
|
|
94
|
-
# Create note object
|
|
95
|
-
note = {
|
|
96
|
-
"id": len(session.data['notes']) + 1,
|
|
97
|
-
"title": note_title,
|
|
98
|
-
"content": note_content,
|
|
99
|
-
"created_at": datetime.now().isoformat(),
|
|
100
|
-
"updated_at": datetime.now().isoformat()
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
session.data['notes'].append(note)
|
|
104
|
-
session.set('last_note_id', note['id'])
|
|
105
|
-
|
|
106
|
-
return {
|
|
107
|
-
"status": "saved",
|
|
108
|
-
"note": note,
|
|
109
|
-
"total_notes": len(session.data['notes']),
|
|
110
|
-
"session_id": session.session_id
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
@app.session_action("get_notes", "Retrieve all notes from session")
|
|
114
|
-
async def get_notes(session: Session):
|
|
115
|
-
"""Get all notes from the user's session"""
|
|
116
|
-
notes = session.get('notes', [])
|
|
117
|
-
|
|
118
|
-
return {
|
|
119
|
-
"notes": notes,
|
|
120
|
-
"count": len(notes),
|
|
121
|
-
"session_id": session.session_id,
|
|
122
|
-
"last_note_id": session.get('last_note_id')
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
# Streaming action
|
|
126
|
-
@app.stream_action("count_with_progress", "Stream counting with progress updates")
|
|
127
|
-
async def count_with_progress(max_count: int = 10, delay_seconds: float = 1.0):
|
|
128
|
-
"""Stream counting numbers with progress indication"""
|
|
129
|
-
|
|
130
|
-
for i in range(max_count + 1):
|
|
131
|
-
progress_percent = (i / max_count) * 100
|
|
132
|
-
|
|
133
|
-
yield {
|
|
134
|
-
"count": i,
|
|
135
|
-
"max_count": max_count,
|
|
136
|
-
"progress_percent": progress_percent,
|
|
137
|
-
"message": f"Counting: {i}/{max_count}",
|
|
138
|
-
"timestamp": datetime.now().isoformat(),
|
|
139
|
-
"is_complete": (i == max_count)
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
if i < max_count: # Don't delay after the last item
|
|
143
|
-
await asyncio.sleep(delay_seconds)
|
|
144
|
-
|
|
145
|
-
@app.stream_action("generate_fibonacci", "Stream Fibonacci sequence")
|
|
146
|
-
async def generate_fibonacci(count: int = 20, delay_seconds: float = 0.5):
|
|
147
|
-
"""Generate Fibonacci sequence as a stream"""
|
|
148
|
-
|
|
149
|
-
a, b = 0, 1
|
|
150
|
-
for i in range(count):
|
|
151
|
-
yield {
|
|
152
|
-
"position": i + 1,
|
|
153
|
-
"fibonacci_number": a,
|
|
154
|
-
"sequence_so_far": f"F({i+1}) = {a}",
|
|
155
|
-
"timestamp": datetime.now().isoformat()
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
a, b = b, a + b
|
|
159
|
-
await asyncio.sleep(delay_seconds)
|
|
160
|
-
|
|
161
|
-
# Complex workflow
|
|
162
|
-
@app.workflow("process_text_analysis", "Complete text analysis workflow")
|
|
163
|
-
async def text_analysis_workflow(text: str, workflow_state: WorkflowState):
|
|
164
|
-
"""Multi-step text analysis workflow"""
|
|
165
|
-
|
|
166
|
-
# Step 1: Validation
|
|
167
|
-
workflow_state.step = "validation"
|
|
168
|
-
workflow_state.status = "processing"
|
|
169
|
-
|
|
170
|
-
if not text or len(text.strip()) < 10:
|
|
171
|
-
workflow_state.status = "error"
|
|
172
|
-
return {"error": "Text must be at least 10 characters long"}
|
|
173
|
-
|
|
174
|
-
await asyncio.sleep(1) # Simulate processing time
|
|
175
|
-
|
|
176
|
-
# Step 2: Basic analysis
|
|
177
|
-
workflow_state.step = "basic_analysis"
|
|
178
|
-
workflow_state.data["validation_passed"] = True
|
|
179
|
-
|
|
180
|
-
words = text.split()
|
|
181
|
-
sentences = [s.strip() for s in text.replace('!', '.').replace('?', '.').split('.') if s.strip()]
|
|
182
|
-
|
|
183
|
-
basic_stats = {
|
|
184
|
-
"character_count": len(text),
|
|
185
|
-
"word_count": len(words),
|
|
186
|
-
"sentence_count": len(sentences),
|
|
187
|
-
"paragraph_count": len([p for p in text.split('\n\n') if p.strip()])
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
workflow_state.data["basic_stats"] = basic_stats
|
|
191
|
-
await asyncio.sleep(1)
|
|
192
|
-
|
|
193
|
-
# Step 3: Advanced analysis
|
|
194
|
-
workflow_state.step = "advanced_analysis"
|
|
195
|
-
|
|
196
|
-
# Word frequency
|
|
197
|
-
word_freq = {}
|
|
198
|
-
for word in words:
|
|
199
|
-
clean_word = word.lower().strip('.,!?";:')
|
|
200
|
-
word_freq[clean_word] = word_freq.get(clean_word, 0) + 1
|
|
201
|
-
|
|
202
|
-
# Top words
|
|
203
|
-
top_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:5]
|
|
204
|
-
|
|
205
|
-
advanced_stats = {
|
|
206
|
-
"unique_words": len(word_freq),
|
|
207
|
-
"average_word_length": sum(len(word) for word in words) / len(words) if words else 0,
|
|
208
|
-
"longest_word": max(words, key=len) if words else None,
|
|
209
|
-
"top_words": top_words,
|
|
210
|
-
"readability_score": min(100, max(0, 100 - (len(words) / len(sentences) if sentences else 1) * 2))
|
|
211
|
-
}
|
|
212
|
-
|
|
213
|
-
workflow_state.data["advanced_stats"] = advanced_stats
|
|
214
|
-
await asyncio.sleep(1)
|
|
215
|
-
|
|
216
|
-
# Step 4: Final compilation
|
|
217
|
-
workflow_state.step = "compilation"
|
|
218
|
-
|
|
219
|
-
result = {
|
|
220
|
-
"workflow_id": workflow_state.workflow_id,
|
|
221
|
-
"analysis_type": "complete_text_analysis",
|
|
222
|
-
"input_text_preview": text[:100] + "..." if len(text) > 100 else text,
|
|
223
|
-
"basic_statistics": basic_stats,
|
|
224
|
-
"advanced_statistics": advanced_stats,
|
|
225
|
-
"processing_steps": ["validation", "basic_analysis", "advanced_analysis", "compilation"],
|
|
226
|
-
"completed_at": datetime.now().isoformat()
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
workflow_state.step = "completed"
|
|
230
|
-
workflow_state.status = "success"
|
|
231
|
-
workflow_state.data["final_result"] = result
|
|
232
|
-
|
|
233
|
-
return result
|
|
234
|
-
|
|
235
|
-
# Data processing workflow
|
|
236
|
-
@app.workflow("batch_text_processor", "Process multiple texts in batch")
|
|
237
|
-
async def batch_text_processor(texts: List[str], workflow_state: WorkflowState):
|
|
238
|
-
"""Process multiple texts as a batch workflow"""
|
|
239
|
-
|
|
240
|
-
workflow_state.step = "initialization"
|
|
241
|
-
workflow_state.status = "processing"
|
|
242
|
-
|
|
243
|
-
if not texts or len(texts) == 0:
|
|
244
|
-
workflow_state.status = "error"
|
|
245
|
-
return {"error": "No texts provided for processing"}
|
|
246
|
-
|
|
247
|
-
results = []
|
|
248
|
-
workflow_state.data["total_texts"] = len(texts)
|
|
249
|
-
|
|
250
|
-
for i, text in enumerate(texts):
|
|
251
|
-
workflow_state.step = f"processing_text_{i+1}"
|
|
252
|
-
workflow_state.data["current_text"] = i + 1
|
|
253
|
-
workflow_state.data["progress_percent"] = ((i + 1) / len(texts)) * 100
|
|
254
|
-
|
|
255
|
-
# Process each text
|
|
256
|
-
words = text.split()
|
|
257
|
-
analysis = {
|
|
258
|
-
"text_id": i + 1,
|
|
259
|
-
"text_preview": text[:50] + "..." if len(text) > 50 else text,
|
|
260
|
-
"word_count": len(words),
|
|
261
|
-
"character_count": len(text),
|
|
262
|
-
"sentence_count": len([s for s in text.split('.') if s.strip()])
|
|
263
|
-
}
|
|
264
|
-
|
|
265
|
-
results.append(analysis)
|
|
266
|
-
await asyncio.sleep(0.5) # Simulate processing time
|
|
267
|
-
|
|
268
|
-
# Final aggregation
|
|
269
|
-
workflow_state.step = "aggregation"
|
|
270
|
-
|
|
271
|
-
total_words = sum(r["word_count"] for r in results)
|
|
272
|
-
total_chars = sum(r["character_count"] for r in results)
|
|
273
|
-
|
|
274
|
-
final_result = {
|
|
275
|
-
"workflow_id": workflow_state.workflow_id,
|
|
276
|
-
"batch_summary": {
|
|
277
|
-
"total_texts_processed": len(results),
|
|
278
|
-
"total_words": total_words,
|
|
279
|
-
"total_characters": total_chars,
|
|
280
|
-
"average_words_per_text": total_words / len(results) if results else 0,
|
|
281
|
-
"average_chars_per_text": total_chars / len(results) if results else 0
|
|
282
|
-
},
|
|
283
|
-
"individual_results": results,
|
|
284
|
-
"completed_at": datetime.now().isoformat()
|
|
285
|
-
}
|
|
286
|
-
|
|
287
|
-
workflow_state.step = "completed"
|
|
288
|
-
workflow_state.status = "success"
|
|
289
|
-
workflow_state.data["final_result"] = final_result
|
|
290
|
-
|
|
291
|
-
return final_result
|
|
292
|
-
|
|
293
|
-
# Advanced action with multiple parameters
|
|
294
|
-
@app.action("advanced_search", "Perform advanced search with multiple filters")
|
|
295
|
-
def advanced_search(
|
|
296
|
-
query: str,
|
|
297
|
-
category: str = "all",
|
|
298
|
-
min_score: float = 0.0,
|
|
299
|
-
max_results: int = 10,
|
|
300
|
-
include_metadata: bool = False,
|
|
301
|
-
sort_by: str = "relevance"
|
|
302
|
-
):
|
|
303
|
-
"""Advanced search function demonstrating complex parameter handling"""
|
|
304
|
-
|
|
305
|
-
# Simulate search results
|
|
306
|
-
mock_results = [
|
|
307
|
-
{"id": 1, "title": f"Result matching '{query}'", "score": 0.95, "category": category},
|
|
308
|
-
{"id": 2, "title": f"Another match for '{query}'", "score": 0.87, "category": category},
|
|
309
|
-
{"id": 3, "title": f"Related to '{query}'", "score": 0.73, "category": category},
|
|
310
|
-
]
|
|
311
|
-
|
|
312
|
-
# Filter by score
|
|
313
|
-
filtered_results = [r for r in mock_results if r["score"] >= min_score]
|
|
314
|
-
|
|
315
|
-
# Limit results
|
|
316
|
-
filtered_results = filtered_results[:max_results]
|
|
317
|
-
|
|
318
|
-
# Sort results
|
|
319
|
-
if sort_by == "score":
|
|
320
|
-
filtered_results.sort(key=lambda x: x["score"], reverse=True)
|
|
321
|
-
|
|
322
|
-
response = {
|
|
323
|
-
"query": query,
|
|
324
|
-
"filters": {
|
|
325
|
-
"category": category,
|
|
326
|
-
"min_score": min_score,
|
|
327
|
-
"max_results": max_results,
|
|
328
|
-
"sort_by": sort_by
|
|
329
|
-
},
|
|
330
|
-
"results": filtered_results,
|
|
331
|
-
"result_count": len(filtered_results)
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
if include_metadata:
|
|
335
|
-
response["metadata"] = {
|
|
336
|
-
"search_performed_at": datetime.now().isoformat(),
|
|
337
|
-
"processing_time_ms": 45,
|
|
338
|
-
"total_available": len(mock_results)
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
return response
|
|
342
|
-
|
|
343
|
-
# Enable authentication (optional)
|
|
344
|
-
# app.enable_auth("your-secret-jwt-key-here")
|
|
345
|
-
|
|
346
|
-
# The app instance is automatically discovered by the Kalibr CLI
|
|
347
|
-
# To run this: kalibr serve enhanced_kalibr_example.py --app-mode
|
kalibr-1.0.28.dist-info/METADATA
DELETED
|
@@ -1,175 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: kalibr
|
|
3
|
-
Version: 1.0.28
|
|
4
|
-
Summary: Multi-Model MCP SDK — deploy to GPT, Claude, Gemini, Copilot from one codebase.
|
|
5
|
-
Home-page: https://github.com/devonakelley/kalibr-sdk
|
|
6
|
-
Author: Kalibr Team
|
|
7
|
-
Author-email: team@kalibr.dev
|
|
8
|
-
Requires-Python: >=3.11
|
|
9
|
-
Description-Content-Type: text/markdown
|
|
10
|
-
License-File: LICENSE
|
|
11
|
-
Requires-Dist: fastapi>=0.110.1
|
|
12
|
-
Requires-Dist: uvicorn>=0.25.0
|
|
13
|
-
Requires-Dist: typer>=0.9.0
|
|
14
|
-
Requires-Dist: pydantic>=2.6.4
|
|
15
|
-
Requires-Dist: requests>=2.31.0
|
|
16
|
-
Requires-Dist: aiofiles>=23.2.1
|
|
17
|
-
Requires-Dist: jsonschema>=4.21.1
|
|
18
|
-
Dynamic: author
|
|
19
|
-
Dynamic: author-email
|
|
20
|
-
Dynamic: home-page
|
|
21
|
-
Dynamic: license-file
|
|
22
|
-
Dynamic: requires-python
|
|
23
|
-
|
|
24
|
-
# Kalibr SDK v1.0.28
|
|
25
|
-
### Multi-Model AI Integration Framework
|
|
26
|
-
|
|
27
|
-
**Write once. Deploy anywhere. Connect to any AI model.**
|
|
28
|
-
|
|
29
|
-
Kalibr turns your Python functions into APIs that automatically work with **GPT Actions**, **Claude MCP**, **Gemini Extensions**, and **Copilot Plugins** — all from one codebase.
|
|
30
|
-
It’s the unified SDK layer for building, packaging, and deploying AI-ready endpoints.
|
|
31
|
-
|
|
32
|
-
---
|
|
33
|
-
|
|
34
|
-
## 🧠 Core Purpose
|
|
35
|
-
|
|
36
|
-
Kalibr is a **multi-model integration SDK** that converts simple Python functions into fully MCP-compatible APIs for every major AI model ecosystem.
|
|
37
|
-
|
|
38
|
-
> One function → Four schemas → Deploy anywhere.
|
|
39
|
-
|
|
40
|
-
---
|
|
41
|
-
|
|
42
|
-
## ⚙️ What It Does
|
|
43
|
-
|
|
44
|
-
### 1. Unified Schema Generation
|
|
45
|
-
|
|
46
|
-
Kalibr automatically generates and serves schemas for:
|
|
47
|
-
- `/openapi.json` → GPT Actions
|
|
48
|
-
- `/mcp.json` → Claude MCP
|
|
49
|
-
- `/schemas/gemini` → Gemini Extensions
|
|
50
|
-
- `/schemas/copilot` → Copilot Plugins
|
|
51
|
-
- `/models/supported` → List of supported integrations
|
|
52
|
-
|
|
53
|
-
No manual YAML or JSON schema creation needed.
|
|
54
|
-
|
|
55
|
-
---
|
|
56
|
-
|
|
57
|
-
### 2. Environment-Aware Base URLs
|
|
58
|
-
|
|
59
|
-
Kalibr auto-detects where it's running and sets the correct base URL automatically:
|
|
60
|
-
|
|
61
|
-
| Environment | Example Base URL |
|
|
62
|
-
|--------------|------------------|
|
|
63
|
-
| Local | `http://localhost:8000` |
|
|
64
|
-
| Fly.io | `https://<app>.fly.dev` |
|
|
65
|
-
| Render | `https://<app>.onrender.com` |
|
|
66
|
-
| Custom | Set via `KALIBR_BASE_URL` |
|
|
67
|
-
|
|
68
|
-
---
|
|
69
|
-
|
|
70
|
-
### 3. Deployment & Runtime Abstraction
|
|
71
|
-
|
|
72
|
-
Kalibr provides a single CLI entrypoint for local and hosted runtime deployment:
|
|
73
|
-
|
|
74
|
-
```bash
|
|
75
|
-
kalibr serve my_app.py
|
|
76
|
-
kalibr deploy my_app.py --runtime fly|render|local
|
|
77
|
-
kalibr version
|
|
78
|
-
```
|
|
79
|
-
|
|
80
|
-
Each runtime automatically generates valid schema URLs and deployment bundles (`Dockerfile`, `fly.toml`, etc).
|
|
81
|
-
|
|
82
|
-
---
|
|
83
|
-
|
|
84
|
-
### 4. Two Development Modes
|
|
85
|
-
|
|
86
|
-
#### Simple Mode (Function-Level)
|
|
87
|
-
For lightweight APIs and test integrations.
|
|
88
|
-
|
|
89
|
-
```python
|
|
90
|
-
from kalibr import Kalibr
|
|
91
|
-
|
|
92
|
-
app = Kalibr(title="Weather API")
|
|
93
|
-
|
|
94
|
-
@app.action("get_weather", "Fetch weather data")
|
|
95
|
-
def get_weather(city: str):
|
|
96
|
-
return {"city": city, "temp": 72, "condition": "sunny"}
|
|
97
|
-
```
|
|
98
|
-
|
|
99
|
-
#### Advanced Mode (Full App)
|
|
100
|
-
|
|
101
|
-
```python
|
|
102
|
-
from kalibr import KalibrApp
|
|
103
|
-
from kalibr.types import FileUpload, Session
|
|
104
|
-
|
|
105
|
-
app = KalibrApp(title="Document API")
|
|
106
|
-
|
|
107
|
-
@app.file_handler("analyze_doc", [".pdf", ".docx"])
|
|
108
|
-
async def analyze_doc(file: FileUpload):
|
|
109
|
-
return {"filename": file.filename, "result": "parsed"}
|
|
110
|
-
|
|
111
|
-
@app.session_action("save_data")
|
|
112
|
-
async def save_data(session: Session, data: dict):
|
|
113
|
-
session.set("data", data)
|
|
114
|
-
return {"saved": True}
|
|
115
|
-
```
|
|
116
|
-
|
|
117
|
-
Includes:
|
|
118
|
-
- Async/await support
|
|
119
|
-
- File uploads
|
|
120
|
-
- Session persistence
|
|
121
|
-
- Streaming responses
|
|
122
|
-
- Workflow scaffolding
|
|
123
|
-
|
|
124
|
-
---
|
|
125
|
-
|
|
126
|
-
### 5. Built-in Routes
|
|
127
|
-
|
|
128
|
-
| Endpoint | Purpose |
|
|
129
|
-
|-----------|----------|
|
|
130
|
-
| `/health` | Health + version check |
|
|
131
|
-
| `/docs` | Swagger UI |
|
|
132
|
-
| `/models/supported` | Shows model compatibility |
|
|
133
|
-
| `/openapi.json`, `/mcp.json`, etc. | Model schemas |
|
|
134
|
-
|
|
135
|
-
---
|
|
136
|
-
|
|
137
|
-
### 6. CLI Reference
|
|
138
|
-
|
|
139
|
-
```
|
|
140
|
-
kalibr serve my_app.py # Run locally
|
|
141
|
-
kalibr deploy my_app.py # Deploy to Fly/Render
|
|
142
|
-
kalibr examples # Copy examples
|
|
143
|
-
kalibr version # Show SDK version
|
|
144
|
-
```
|
|
145
|
-
|
|
146
|
-
---
|
|
147
|
-
|
|
148
|
-
### 7. Value Proposition
|
|
149
|
-
|
|
150
|
-
For developers or MCP infrastructure projects:
|
|
151
|
-
|
|
152
|
-
- **Instant MCP onboarding** — one file → all model schemas
|
|
153
|
-
- **Zero config** — no schema or deployment setup required
|
|
154
|
-
- **Multi-runtime support** — local, Fly, Render, or custom hosts
|
|
155
|
-
- **Unified interface layer** — consistent schema output for all AI platforms
|
|
156
|
-
|
|
157
|
-
---
|
|
158
|
-
|
|
159
|
-
### 8. Upcoming Additions
|
|
160
|
-
|
|
161
|
-
- Observability + tracing hooks
|
|
162
|
-
- Usage metering and billing
|
|
163
|
-
- Schema diffing / auto-validation
|
|
164
|
-
- Multi-runtime load routing
|
|
165
|
-
|
|
166
|
-
---
|
|
167
|
-
|
|
168
|
-
### License
|
|
169
|
-
|
|
170
|
-
MIT License © 2025 Kalibr Team
|
|
171
|
-
|
|
172
|
-
---
|
|
173
|
-
|
|
174
|
-
**Kalibr SDK** — the unified layer between AI models and the real world.
|
|
175
|
-
Write once. Deploy anywhere. Integrate everything.
|
kalibr-1.0.28.dist-info/RECORD
DELETED
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
kalibr/__init__.py,sha256=0M3sG1no7m3EjMR_rwHfoWekMxswhFPbzfM0KfjF3eU,119
|
|
2
|
-
kalibr/__main__.py,sha256=nRQLefjyoy9c_-UKU1UZGcCRrypQWchJAbUlmNFPv2g,8353
|
|
3
|
-
kalibr/deployment.py,sha256=GZ874FXQX1uIroTA-UM5A-pTVn2UY0NxXi39vmTowyI,1396
|
|
4
|
-
kalibr/kalibr.py,sha256=yrgXVlTgadBbpnX_l7fAxxjxGp9oxcZhzGjaQPiIcpo,10469
|
|
5
|
-
kalibr/kalibr_app.py,sha256=5ylagvKEsGN6T6AFr9rWHaAjdWcpKdDG7nhDPUEQAQA,13809
|
|
6
|
-
kalibr/packager.py,sha256=vrwviRzZMjegbDXyjZZwA7gsqchd5V6PZAZTi5WNjyQ,1353
|
|
7
|
-
kalibr/runtime_router.py,sha256=PTXMj5yt72cfzYtjWQ-mUF3t8eov9nuXXI0ScIVNMrU,4606
|
|
8
|
-
kalibr/schema_generators.py,sha256=xxnY05KgHTJ7BPXMCIC-McJCCt8aIOF5s0ptCwDqs_Y,6716
|
|
9
|
-
kalibr/types.py,sha256=bNmf_cOWXBmhaMVAPEp3_EdRCcdXY2pbOgOxZ1dZ0Mc,3476
|
|
10
|
-
kalibr/validator.py,sha256=PezDmHG9dVINce91rdYQstJC41eZP21qEA3VDICE3C4,2333
|
|
11
|
-
kalibr-1.0.28.data/data/examples/README.md,sha256=loo2nm6yfT-pqGb5uNg1VeEdOKflYzHISUHTuSltfY0,4875
|
|
12
|
-
kalibr-1.0.28.data/data/examples/basic_kalibr_example.py,sha256=Kfrh-XZuJ0vwFLB_xBpdqpgpMJw2NpIx0yBsqrAqBnE,2188
|
|
13
|
-
kalibr-1.0.28.data/data/examples/enhanced_kalibr_example.py,sha256=AuhTpyRUNVAJuZKRy9iydXusNkBgQ84eKNiXxsr4iUQ,11994
|
|
14
|
-
kalibr-1.0.28.dist-info/licenses/LICENSE,sha256=1WLJDkrueNpHCROy9zANrK2Ar2weqZ_z88hw90UKDoc,451
|
|
15
|
-
kalibr-1.0.28.dist-info/METADATA,sha256=W1stKZstce3wcTk6RDNMPy6-OtoEW0wPKsqjZIHjU_M,4443
|
|
16
|
-
kalibr-1.0.28.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
17
|
-
kalibr-1.0.28.dist-info/entry_points.txt,sha256=KiJfV_BaeYdIdYniww3wnSBBqSHpRxP9BTLNwu7IjyY,48
|
|
18
|
-
kalibr-1.0.28.dist-info/top_level.txt,sha256=OkloC5_IfpE4-QwI30aLIYbFZk_-ChABWF7aBGddy28,7
|
|
19
|
-
kalibr-1.0.28.dist-info/RECORD,,
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
Kalibr Source-Available License
|
|
2
|
-
|
|
3
|
-
Copyright (c) 2025 Kalibr Systems.
|
|
4
|
-
|
|
5
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
|
|
6
|
-
for the purpose of internal evaluation, testing, or demonstration only.
|
|
7
|
-
|
|
8
|
-
Commercial, production, or revenue-generating use of this software requires a valid commercial
|
|
9
|
-
license from Kalibr Systems (https://kalibr.systems).
|
|
10
|
-
|
|
11
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
|
|
File without changes
|
|
File without changes
|