kalibr 1.0.17__py3-none-any.whl → 1.0.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kalibr/__init__.py +7 -0
- kalibr/__main__.py +672 -26
- kalibr/deployment.py +26 -0
- kalibr/kalibr.py +259 -0
- kalibr/kalibr_app.py +465 -34
- kalibr/schema_generators.py +212 -13
- kalibr/types.py +106 -0
- kalibr-1.0.20.data/data/examples/README.md +173 -0
- kalibr-1.0.20.data/data/examples/basic_kalibr_example.py +66 -0
- kalibr-1.0.20.data/data/examples/enhanced_kalibr_example.py +347 -0
- kalibr-1.0.20.dist-info/METADATA +302 -0
- kalibr-1.0.20.dist-info/RECORD +16 -0
- kalibr-1.0.17.dist-info/METADATA +0 -120
- kalibr-1.0.17.dist-info/RECORD +0 -10
- {kalibr-1.0.17.dist-info → kalibr-1.0.20.dist-info}/WHEEL +0 -0
- {kalibr-1.0.17.dist-info → kalibr-1.0.20.dist-info}/entry_points.txt +0 -0
- /kalibr-1.0.17.dist-info/licenses/LICENSE.txt → /kalibr-1.0.20.dist-info/licenses/LICENSE +0 -0
- {kalibr-1.0.17.dist-info → kalibr-1.0.20.dist-info}/top_level.txt +0 -0
kalibr/schema_generators.py
CHANGED
|
@@ -1,13 +1,212 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
1
|
+
"""
|
|
2
|
+
Multi-model schema generators for different AI platforms
|
|
3
|
+
"""
|
|
4
|
+
from typing import Dict, Any, List
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
|
|
7
|
+
class BaseSchemaGenerator(ABC):
|
|
8
|
+
"""Base class for AI model schema generators"""
|
|
9
|
+
|
|
10
|
+
@abstractmethod
|
|
11
|
+
def generate_schema(self, actions: Dict, base_url: str) -> Dict[str, Any]:
|
|
12
|
+
"""Generate schema for the specific AI model"""
|
|
13
|
+
pass
|
|
14
|
+
|
|
15
|
+
class MCPSchemaGenerator(BaseSchemaGenerator):
|
|
16
|
+
"""Claude MCP schema generator"""
|
|
17
|
+
|
|
18
|
+
def generate_schema(self, actions: Dict, base_url: str) -> Dict[str, Any]:
|
|
19
|
+
tools = []
|
|
20
|
+
for action_name, action_data in actions.items():
|
|
21
|
+
properties = {}
|
|
22
|
+
required = []
|
|
23
|
+
|
|
24
|
+
# Construct the input schema for the tool
|
|
25
|
+
for param_name, param_info in action_data["params"].items():
|
|
26
|
+
properties[param_name] = {"type": param_info["type"]}
|
|
27
|
+
if param_info["required"]:
|
|
28
|
+
required.append(param_name)
|
|
29
|
+
|
|
30
|
+
tools.append({
|
|
31
|
+
"name": action_name,
|
|
32
|
+
"description": action_data["description"],
|
|
33
|
+
"input_schema": {
|
|
34
|
+
"type": "object",
|
|
35
|
+
"properties": properties,
|
|
36
|
+
"required": required
|
|
37
|
+
},
|
|
38
|
+
"server": {
|
|
39
|
+
"url": f"{base_url}/proxy/{action_name}"
|
|
40
|
+
}
|
|
41
|
+
})
|
|
42
|
+
|
|
43
|
+
return {
|
|
44
|
+
"mcp": "1.0",
|
|
45
|
+
"name": "kalibr-enhanced",
|
|
46
|
+
"tools": tools
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
class OpenAPISchemaGenerator(BaseSchemaGenerator):
|
|
50
|
+
"""GPT Actions OpenAPI schema generator"""
|
|
51
|
+
|
|
52
|
+
def generate_schema(self, actions: Dict, base_url: str) -> Dict[str, Any]:
|
|
53
|
+
paths = {}
|
|
54
|
+
|
|
55
|
+
for action_name, action_data in actions.items():
|
|
56
|
+
properties = {}
|
|
57
|
+
required = []
|
|
58
|
+
|
|
59
|
+
for param_name, param_info in action_data["params"].items():
|
|
60
|
+
properties[param_name] = {"type": param_info["type"]}
|
|
61
|
+
if param_info["required"]:
|
|
62
|
+
required.append(param_name)
|
|
63
|
+
|
|
64
|
+
paths[f"/proxy/{action_name}"] = {
|
|
65
|
+
"post": {
|
|
66
|
+
"summary": action_data["description"],
|
|
67
|
+
"operationId": action_name,
|
|
68
|
+
"requestBody": {
|
|
69
|
+
"required": True,
|
|
70
|
+
"content": {
|
|
71
|
+
"application/json": {
|
|
72
|
+
"schema": {
|
|
73
|
+
"type": "object",
|
|
74
|
+
"properties": properties,
|
|
75
|
+
"required": required
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
"responses": {
|
|
81
|
+
"200": {
|
|
82
|
+
"description": "Successful response",
|
|
83
|
+
"content": {
|
|
84
|
+
"application/json": {
|
|
85
|
+
"schema": {"type": "object"}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
return {
|
|
94
|
+
"openapi": "3.0.0",
|
|
95
|
+
"info": {
|
|
96
|
+
"title": "Kalibr Enhanced API",
|
|
97
|
+
"version": "2.0.0",
|
|
98
|
+
"description": "Enhanced Kalibr API with app-level capabilities"
|
|
99
|
+
},
|
|
100
|
+
"servers": [{"url": base_url}],
|
|
101
|
+
"paths": paths
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
class GeminiSchemaGenerator(BaseSchemaGenerator):
|
|
105
|
+
"""Google Gemini Extensions schema generator"""
|
|
106
|
+
|
|
107
|
+
def generate_schema(self, actions: Dict, base_url: str) -> Dict[str, Any]:
|
|
108
|
+
functions = []
|
|
109
|
+
|
|
110
|
+
for action_name, action_data in actions.items():
|
|
111
|
+
parameters = {
|
|
112
|
+
"type": "object",
|
|
113
|
+
"properties": {},
|
|
114
|
+
"required": []
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
for param_name, param_info in action_data["params"].items():
|
|
118
|
+
parameters["properties"][param_name] = {
|
|
119
|
+
"type": param_info["type"],
|
|
120
|
+
"description": f"Parameter {param_name}"
|
|
121
|
+
}
|
|
122
|
+
if param_info["required"]:
|
|
123
|
+
parameters["required"].append(param_name)
|
|
124
|
+
|
|
125
|
+
functions.append({
|
|
126
|
+
"name": action_name,
|
|
127
|
+
"description": action_data["description"],
|
|
128
|
+
"parameters": parameters,
|
|
129
|
+
"server": {
|
|
130
|
+
"url": f"{base_url}/proxy/{action_name}"
|
|
131
|
+
}
|
|
132
|
+
})
|
|
133
|
+
|
|
134
|
+
return {
|
|
135
|
+
"gemini_extension": "1.0",
|
|
136
|
+
"name": "kalibr_enhanced",
|
|
137
|
+
"description": "Enhanced Kalibr API for Gemini integration",
|
|
138
|
+
"functions": functions
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
class CopilotSchemaGenerator(BaseSchemaGenerator):
|
|
142
|
+
"""Microsoft Copilot plugin schema generator"""
|
|
143
|
+
|
|
144
|
+
def generate_schema(self, actions: Dict, base_url: str) -> Dict[str, Any]:
|
|
145
|
+
apis = []
|
|
146
|
+
|
|
147
|
+
for action_name, action_data in actions.items():
|
|
148
|
+
request_schema = {
|
|
149
|
+
"type": "object",
|
|
150
|
+
"properties": {},
|
|
151
|
+
"required": []
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
for param_name, param_info in action_data["params"].items():
|
|
155
|
+
request_schema["properties"][param_name] = {
|
|
156
|
+
"type": param_info["type"]
|
|
157
|
+
}
|
|
158
|
+
if param_info["required"]:
|
|
159
|
+
request_schema["required"].append(param_name)
|
|
160
|
+
|
|
161
|
+
apis.append({
|
|
162
|
+
"name": action_name,
|
|
163
|
+
"description": action_data["description"],
|
|
164
|
+
"url": f"{base_url}/proxy/{action_name}",
|
|
165
|
+
"method": "POST",
|
|
166
|
+
"request_schema": request_schema,
|
|
167
|
+
"response_schema": {
|
|
168
|
+
"type": "object",
|
|
169
|
+
"description": "API response"
|
|
170
|
+
}
|
|
171
|
+
})
|
|
172
|
+
|
|
173
|
+
return {
|
|
174
|
+
"schema_version": "v1",
|
|
175
|
+
"name_for_model": "kalibr_enhanced",
|
|
176
|
+
"name_for_human": "Enhanced Kalibr API",
|
|
177
|
+
"description_for_model": "Enhanced Kalibr API with advanced capabilities",
|
|
178
|
+
"description_for_human": "API for advanced AI model integrations",
|
|
179
|
+
"auth": {
|
|
180
|
+
"type": "none"
|
|
181
|
+
},
|
|
182
|
+
"api": {
|
|
183
|
+
"type": "openapi",
|
|
184
|
+
"url": f"{base_url}/openapi.json"
|
|
185
|
+
},
|
|
186
|
+
"apis": apis
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
class CustomModelSchemaGenerator(BaseSchemaGenerator):
|
|
190
|
+
"""Extensible generator for future AI models"""
|
|
191
|
+
|
|
192
|
+
def __init__(self, model_name: str, schema_format: str):
|
|
193
|
+
self.model_name = model_name
|
|
194
|
+
self.schema_format = schema_format
|
|
195
|
+
|
|
196
|
+
def generate_schema(self, actions: Dict, base_url: str) -> Dict[str, Any]:
|
|
197
|
+
# Generic schema format that can be customized
|
|
198
|
+
return {
|
|
199
|
+
"model": self.model_name,
|
|
200
|
+
"format": self.schema_format,
|
|
201
|
+
"version": "2.0.0",
|
|
202
|
+
"base_url": base_url,
|
|
203
|
+
"actions": [
|
|
204
|
+
{
|
|
205
|
+
"name": name,
|
|
206
|
+
"description": data["description"],
|
|
207
|
+
"parameters": data["params"],
|
|
208
|
+
"endpoint": f"{base_url}/proxy/{name}"
|
|
209
|
+
}
|
|
210
|
+
for name, data in actions.items()
|
|
211
|
+
]
|
|
212
|
+
}
|
kalibr/types.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Enhanced data types for Kalibr app-level framework
|
|
3
|
+
"""
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
from typing import Optional, Dict, Any, List, Union, AsyncGenerator
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
import uuid
|
|
8
|
+
import io
|
|
9
|
+
|
|
10
|
+
class FileUpload(BaseModel):
|
|
11
|
+
"""Enhanced file upload handling for AI model integrations"""
|
|
12
|
+
filename: str
|
|
13
|
+
content_type: str
|
|
14
|
+
size: int
|
|
15
|
+
content: bytes
|
|
16
|
+
upload_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
|
17
|
+
uploaded_at: datetime = Field(default_factory=datetime.now)
|
|
18
|
+
|
|
19
|
+
class Config:
|
|
20
|
+
arbitrary_types_allowed = True
|
|
21
|
+
|
|
22
|
+
class ImageData(BaseModel):
|
|
23
|
+
"""Image data type for AI vision capabilities"""
|
|
24
|
+
filename: str
|
|
25
|
+
content_type: str
|
|
26
|
+
width: Optional[int] = None
|
|
27
|
+
height: Optional[int] = None
|
|
28
|
+
format: str # jpeg, png, webp, etc.
|
|
29
|
+
content: bytes
|
|
30
|
+
image_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
|
31
|
+
|
|
32
|
+
class Config:
|
|
33
|
+
arbitrary_types_allowed = True
|
|
34
|
+
|
|
35
|
+
class TableData(BaseModel):
|
|
36
|
+
"""Structured table data for AI analysis"""
|
|
37
|
+
headers: List[str]
|
|
38
|
+
rows: List[List[Any]]
|
|
39
|
+
table_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
|
40
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
41
|
+
|
|
42
|
+
class StreamingResponse(BaseModel):
|
|
43
|
+
"""Base class for streaming responses"""
|
|
44
|
+
chunk_id: str
|
|
45
|
+
content: Any
|
|
46
|
+
is_final: bool = False
|
|
47
|
+
timestamp: datetime = Field(default_factory=datetime.now)
|
|
48
|
+
|
|
49
|
+
class Session(BaseModel):
|
|
50
|
+
"""Session management for stateful interactions"""
|
|
51
|
+
session_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
|
52
|
+
user_id: Optional[str] = None
|
|
53
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
54
|
+
last_accessed: datetime = Field(default_factory=datetime.now)
|
|
55
|
+
data: Dict[str, Any] = Field(default_factory=dict)
|
|
56
|
+
expires_at: Optional[datetime] = None
|
|
57
|
+
|
|
58
|
+
def get(self, key: str, default=None):
|
|
59
|
+
"""Get session data"""
|
|
60
|
+
return self.data.get(key, default)
|
|
61
|
+
|
|
62
|
+
def set(self, key: str, value: Any):
|
|
63
|
+
"""Set session data"""
|
|
64
|
+
self.data[key] = value
|
|
65
|
+
self.last_accessed = datetime.now()
|
|
66
|
+
|
|
67
|
+
def delete(self, key: str):
|
|
68
|
+
"""Delete session data"""
|
|
69
|
+
if key in self.data:
|
|
70
|
+
del self.data[key]
|
|
71
|
+
|
|
72
|
+
class AuthenticatedUser(BaseModel):
|
|
73
|
+
"""Authenticated user context"""
|
|
74
|
+
user_id: str
|
|
75
|
+
username: str
|
|
76
|
+
email: Optional[str] = None
|
|
77
|
+
roles: List[str] = Field(default_factory=list)
|
|
78
|
+
permissions: List[str] = Field(default_factory=list)
|
|
79
|
+
auth_method: str # "jwt", "oauth", "api_key", etc.
|
|
80
|
+
|
|
81
|
+
class FileDownload(BaseModel):
|
|
82
|
+
"""File download response"""
|
|
83
|
+
filename: str
|
|
84
|
+
content_type: str
|
|
85
|
+
content: bytes
|
|
86
|
+
|
|
87
|
+
class Config:
|
|
88
|
+
arbitrary_types_allowed = True
|
|
89
|
+
|
|
90
|
+
class AnalysisResult(BaseModel):
|
|
91
|
+
"""Generic analysis result structure"""
|
|
92
|
+
result_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
|
93
|
+
status: str # "success", "error", "pending"
|
|
94
|
+
data: Dict[str, Any] = Field(default_factory=dict)
|
|
95
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
96
|
+
processing_time: Optional[float] = None
|
|
97
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
98
|
+
|
|
99
|
+
class WorkflowState(BaseModel):
|
|
100
|
+
"""Workflow state management"""
|
|
101
|
+
workflow_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
|
102
|
+
step: str
|
|
103
|
+
status: str
|
|
104
|
+
data: Dict[str, Any] = Field(default_factory=dict)
|
|
105
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
106
|
+
updated_at: datetime = Field(default_factory=datetime.now)
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
# Enhanced Kalibr SDK Examples
|
|
2
|
+
|
|
3
|
+
This directory contains examples demonstrating both the original function-level Kalibr capabilities and the new enhanced app-level features.
|
|
4
|
+
|
|
5
|
+
## Examples Included
|
|
6
|
+
|
|
7
|
+
### 1. Basic Kalibr Example (`basic_kalibr_example.py`)
|
|
8
|
+
Demonstrates the original Kalibr SDK capabilities:
|
|
9
|
+
- Simple function decoration with `@sdk.action()`
|
|
10
|
+
- Basic parameter handling and type inference
|
|
11
|
+
- Compatible with GPT Actions and Claude MCP
|
|
12
|
+
- Simple API endpoints
|
|
13
|
+
|
|
14
|
+
**Features shown:**
|
|
15
|
+
- Text processing functions
|
|
16
|
+
- Mathematical calculations
|
|
17
|
+
- Email validation
|
|
18
|
+
- Text statistics
|
|
19
|
+
|
|
20
|
+
**To run:**
|
|
21
|
+
```bash
|
|
22
|
+
kalibr serve basic_kalibr_example.py
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
**Test endpoints:**
|
|
26
|
+
- `POST /proxy/greet` - Greeting function
|
|
27
|
+
- `POST /proxy/calculate` - Basic calculator
|
|
28
|
+
- `POST /proxy/validate_email` - Email validation
|
|
29
|
+
- `POST /proxy/text_stats` - Text analysis
|
|
30
|
+
|
|
31
|
+
### 2. Enhanced Kalibr Example (`enhanced_kalibr_example.py`)
|
|
32
|
+
Demonstrates the new enhanced app-level capabilities:
|
|
33
|
+
- File upload handling
|
|
34
|
+
- Session management
|
|
35
|
+
- Streaming responses
|
|
36
|
+
- Complex workflows
|
|
37
|
+
- Multi-model schema generation
|
|
38
|
+
|
|
39
|
+
**Features shown:**
|
|
40
|
+
- File upload and analysis
|
|
41
|
+
- Session-based note taking
|
|
42
|
+
- Real-time streaming data
|
|
43
|
+
- Multi-step workflows
|
|
44
|
+
- Advanced parameter handling
|
|
45
|
+
|
|
46
|
+
**To run:**
|
|
47
|
+
```bash
|
|
48
|
+
kalibr serve enhanced_kalibr_example.py --app-mode
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
**Test endpoints:**
|
|
52
|
+
- `POST /upload/analyze_document` - File upload analysis
|
|
53
|
+
- `POST /session/save_note` - Session-based note saving
|
|
54
|
+
- `GET /stream/count_with_progress` - Streaming counter
|
|
55
|
+
- `POST /workflow/process_text_analysis` - Complex text workflow
|
|
56
|
+
|
|
57
|
+
## Multi-Model Integration
|
|
58
|
+
|
|
59
|
+
Both examples automatically generate schemas for multiple AI models:
|
|
60
|
+
|
|
61
|
+
### Available Schema Endpoints:
|
|
62
|
+
- **Claude MCP**: `/mcp.json`
|
|
63
|
+
- **GPT Actions**: `/openapi.json`
|
|
64
|
+
- **Gemini Extensions**: `/schemas/gemini`
|
|
65
|
+
- **Microsoft Copilot**: `/schemas/copilot`
|
|
66
|
+
|
|
67
|
+
### Management Endpoints:
|
|
68
|
+
- **Health Check**: `/health`
|
|
69
|
+
- **Supported Models**: `/models/supported`
|
|
70
|
+
- **API Documentation**: `/docs`
|
|
71
|
+
|
|
72
|
+
## Usage Examples
|
|
73
|
+
|
|
74
|
+
### Basic Function Call:
|
|
75
|
+
```bash
|
|
76
|
+
curl -X POST http://localhost:8000/proxy/greet \
|
|
77
|
+
-H "Content-Type: application/json" \
|
|
78
|
+
-d '{"name": "Alice", "greeting": "Hi"}'
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### File Upload:
|
|
82
|
+
```bash
|
|
83
|
+
curl -X POST http://localhost:8000/upload/analyze_document \
|
|
84
|
+
-F "file=@example.txt"
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
### Session Management:
|
|
88
|
+
```bash
|
|
89
|
+
# Save a note (creates session)
|
|
90
|
+
curl -X POST http://localhost:8000/session/save_note \
|
|
91
|
+
-H "Content-Type: application/json" \
|
|
92
|
+
-d '{"note_title": "My Note", "note_content": "This is a test note"}'
|
|
93
|
+
|
|
94
|
+
# Get notes (use session ID from previous response)
|
|
95
|
+
curl -X POST http://localhost:8000/session/get_notes \
|
|
96
|
+
-H "Content-Type: application/json" \
|
|
97
|
+
-H "x-session-id: <session-id-here>" \
|
|
98
|
+
-d '{}'
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
### Streaming Data:
|
|
102
|
+
```bash
|
|
103
|
+
curl http://localhost:8000/stream/count_with_progress?max_count=5&delay_seconds=1
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Complex Workflow:
|
|
107
|
+
```bash
|
|
108
|
+
curl -X POST http://localhost:8000/workflow/process_text_analysis \
|
|
109
|
+
-H "Content-Type: application/json" \
|
|
110
|
+
-d '{"text": "This is a sample text for analysis. It contains multiple sentences and words for testing the workflow capabilities."}'
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
## Integration with AI Models
|
|
114
|
+
|
|
115
|
+
### GPT Actions Setup:
|
|
116
|
+
1. Copy the OpenAPI schema from `/openapi.json`
|
|
117
|
+
2. Create a new GPT Action in ChatGPT
|
|
118
|
+
3. Paste the schema and set the base URL
|
|
119
|
+
|
|
120
|
+
### Claude MCP Setup:
|
|
121
|
+
1. Add the MCP server configuration:
|
|
122
|
+
```json
|
|
123
|
+
{
|
|
124
|
+
"mcp": {
|
|
125
|
+
"servers": {
|
|
126
|
+
"kalibr": {
|
|
127
|
+
"command": "curl",
|
|
128
|
+
"args": ["http://localhost:8000/mcp.json"]
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
### Gemini Extensions:
|
|
136
|
+
1. Use the schema from `/schemas/gemini`
|
|
137
|
+
2. Configure according to Gemini's extension documentation
|
|
138
|
+
|
|
139
|
+
### Microsoft Copilot:
|
|
140
|
+
1. Use the schema from `/schemas/copilot`
|
|
141
|
+
2. Follow Microsoft's plugin development guidelines
|
|
142
|
+
|
|
143
|
+
## Advanced Features
|
|
144
|
+
|
|
145
|
+
### Authentication (Optional):
|
|
146
|
+
Uncomment the authentication line in enhanced example:
|
|
147
|
+
```python
|
|
148
|
+
app.enable_auth("your-secret-jwt-key-here")
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
### Custom Schema Generation:
|
|
152
|
+
The framework supports extensible schema generation for future AI models through the `CustomModelSchemaGenerator` class.
|
|
153
|
+
|
|
154
|
+
### Error Handling:
|
|
155
|
+
All endpoints include comprehensive error handling with meaningful error messages.
|
|
156
|
+
|
|
157
|
+
### Type Safety:
|
|
158
|
+
Full support for Python type hints with automatic schema generation.
|
|
159
|
+
|
|
160
|
+
## Development Notes
|
|
161
|
+
|
|
162
|
+
- The enhanced framework is backward compatible with original Kalibr apps
|
|
163
|
+
- Session data is stored in memory (use external storage for production)
|
|
164
|
+
- File uploads are handled in memory (implement persistent storage as needed)
|
|
165
|
+
- Streaming uses Server-Sent Events (SSE) format
|
|
166
|
+
- All examples include proper async/await handling where needed
|
|
167
|
+
|
|
168
|
+
## Next Steps
|
|
169
|
+
|
|
170
|
+
1. Try the examples with different AI models
|
|
171
|
+
2. Modify the examples to fit your specific use case
|
|
172
|
+
3. Explore the source code in `/app/backend/kalibr/` for advanced customization
|
|
173
|
+
4. Build your own enhanced Kalibr applications!
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Basic Kalibr SDK Example - Function-level API integration
|
|
3
|
+
This demonstrates the original function-level capabilities of Kalibr.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from kalibr import Kalibr
|
|
7
|
+
|
|
8
|
+
# Create a basic Kalibr instance
|
|
9
|
+
sdk = Kalibr(title="Basic Kalibr Demo", base_url="http://localhost:8000")
|
|
10
|
+
|
|
11
|
+
@sdk.action("greet", "Greet someone with a personalized message")
|
|
12
|
+
def greet_user(name: str, greeting: str = "Hello"):
|
|
13
|
+
"""Simple greeting function"""
|
|
14
|
+
return {"message": f"{greeting}, {name}! Welcome to Kalibr SDK."}
|
|
15
|
+
|
|
16
|
+
@sdk.action("calculate", "Perform basic mathematical operations")
|
|
17
|
+
def calculate(operation: str, a: float, b: float):
|
|
18
|
+
"""Basic calculator functionality"""
|
|
19
|
+
operations = {
|
|
20
|
+
"add": a + b,
|
|
21
|
+
"subtract": a - b,
|
|
22
|
+
"multiply": a * b,
|
|
23
|
+
"divide": a / b if b != 0 else None
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
result = operations.get(operation)
|
|
27
|
+
if result is None:
|
|
28
|
+
return {"error": f"Invalid operation '{operation}' or division by zero"}
|
|
29
|
+
|
|
30
|
+
return {
|
|
31
|
+
"operation": operation,
|
|
32
|
+
"operands": [a, b],
|
|
33
|
+
"result": result
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
@sdk.action("validate_email", "Check if an email address is valid")
|
|
37
|
+
def validate_email(email: str):
|
|
38
|
+
"""Simple email validation"""
|
|
39
|
+
import re
|
|
40
|
+
|
|
41
|
+
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
|
|
42
|
+
is_valid = bool(re.match(pattern, email))
|
|
43
|
+
|
|
44
|
+
return {
|
|
45
|
+
"email": email,
|
|
46
|
+
"is_valid": is_valid,
|
|
47
|
+
"message": "Valid email address" if is_valid else "Invalid email format"
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
@sdk.action("text_stats", "Get statistics about a text string")
|
|
51
|
+
def text_statistics(text: str):
|
|
52
|
+
"""Analyze text and return statistics"""
|
|
53
|
+
words = text.split()
|
|
54
|
+
sentences = text.split('.') + text.split('!') + text.split('?')
|
|
55
|
+
sentences = [s.strip() for s in sentences if s.strip()]
|
|
56
|
+
|
|
57
|
+
return {
|
|
58
|
+
"character_count": len(text),
|
|
59
|
+
"word_count": len(words),
|
|
60
|
+
"sentence_count": len(sentences),
|
|
61
|
+
"average_word_length": sum(len(word) for word in words) / len(words) if words else 0,
|
|
62
|
+
"longest_word": max(words, key=len) if words else None
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
# The SDK instance is automatically discovered by the Kalibr CLI
|
|
66
|
+
# To run this: kalibr serve basic_kalibr_example.py
|