@chaprola/mcp-server 1.4.3 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -164,6 +164,40 @@ server.resource("endpoints", "chaprola://endpoints", { description: "Chaprola AP
164
164
  server.resource("auth", "chaprola://auth", { description: "Chaprola authentication reference — API key model, BAA flow, credential recovery", mimeType: "text/markdown" }, async () => ({
165
165
  contents: [{ uri: "chaprola://auth", mimeType: "text/markdown", text: readRef("auth.md") }],
166
166
  }));
167
+ // --- Modular Resources (Tier 1 + Tier 2) ---
168
+ server.resource("quickstart", "chaprola://quickstart", { description: "Chaprola quickstart — auth, base URL, core workflow, and index of all feature references. READ THIS FIRST.", mimeType: "text/markdown" }, async () => ({
169
+ contents: [{ uri: "chaprola://quickstart", mimeType: "text/markdown", text: readRef("quickstart.md") }],
170
+ }));
171
+ server.resource("ref-import", "chaprola://ref/import", { description: "Import, export, list, download — all data I/O endpoints", mimeType: "text/markdown" }, async () => ({
172
+ contents: [{ uri: "chaprola://ref/import", mimeType: "text/markdown", text: readRef("ref-import.md") }],
173
+ }));
174
+ server.resource("ref-query", "chaprola://ref/query", { description: "Query, sort, index, merge, record CRUD — data operations", mimeType: "text/markdown" }, async () => ({
175
+ contents: [{ uri: "chaprola://ref/query", mimeType: "text/markdown", text: readRef("ref-query.md") }],
176
+ }));
177
+ server.resource("ref-pivot", "chaprola://ref/pivot", { description: "Pivot tables (GROUP BY) — row, column, aggregate functions", mimeType: "text/markdown" }, async () => ({
178
+ contents: [{ uri: "chaprola://ref/pivot", mimeType: "text/markdown", text: readRef("ref-pivot.md") }],
179
+ }));
180
+ server.resource("ref-mercury", "chaprola://ref/mercury", { description: "Mercury weighted scoring — rank records by multiple criteria", mimeType: "text/markdown" }, async () => ({
181
+ contents: [{ uri: "chaprola://ref/mercury", mimeType: "text/markdown", text: readRef("ref-mercury.md") }],
182
+ }));
183
+ server.resource("ref-programs", "chaprola://ref/programs", { description: "Chaprola .CS language — MOVE, LET, GET, PUT, SEEK, IF, GOTO, PRINT, secondary files, params", mimeType: "text/markdown" }, async () => ({
184
+ contents: [{ uri: "chaprola://ref/programs", mimeType: "text/markdown", text: readRef("ref-programs.md") }],
185
+ }));
186
+ server.resource("ref-huldra", "chaprola://ref/huldra", { description: "HULDRA nonlinear optimization — parameter fitting, model catalog", mimeType: "text/markdown" }, async () => ({
187
+ contents: [{ uri: "chaprola://ref/huldra", mimeType: "text/markdown", text: readRef("ref-huldra.md") }],
188
+ }));
189
+ server.resource("ref-deploy", "chaprola://ref/deploy", { description: "Static app deployment to chaprola.org/apps/", mimeType: "text/markdown" }, async () => ({
190
+ contents: [{ uri: "chaprola://ref/deploy", mimeType: "text/markdown", text: readRef("ref-deploy.md") }],
191
+ }));
192
+ server.resource("ref-email", "chaprola://ref/email", { description: "Email system — inbox, read, send, forward, attachments", mimeType: "text/markdown" }, async () => ({
193
+ contents: [{ uri: "chaprola://ref/email", mimeType: "text/markdown", text: readRef("ref-email.md") }],
194
+ }));
195
+ server.resource("ref-gotchas", "chaprola://ref/gotchas", { description: "Common Chaprola mistakes — language, API, and secondary file pitfalls", mimeType: "text/markdown" }, async () => ({
196
+ contents: [{ uri: "chaprola://ref/gotchas", mimeType: "text/markdown", text: readRef("ref-gotchas.md") }],
197
+ }));
198
+ server.resource("ref-auth", "chaprola://ref/auth", { description: "Authentication details — registration, login, BAA, MCP env vars", mimeType: "text/markdown" }, async () => ({
199
+ contents: [{ uri: "chaprola://ref/auth", mimeType: "text/markdown", text: readRef("ref-auth.md") }],
200
+ }));
167
201
  // --- MCP Prompts ---
168
202
  server.prompt("chaprola-guide", "Essential guide for working with Chaprola. Read this before writing any Chaprola source code.", async () => ({
169
203
  messages: [{
@@ -695,6 +729,14 @@ server.tool("chaprola_email_delete", "Delete a specific email from your mailbox"
695
729
  const res = await authedFetch("/email/delete", { address: username, message_id });
696
730
  return textResult(res);
697
731
  }));
732
+ server.tool("chaprola_email_forward", "Forward an email (with attachments) to another address. Reads the original email from your mailbox, includes all attachments, and sends via Resend.", {
733
+ message_id: z.string().describe("Message ID of the email to forward"),
734
+ to: z.string().describe("Destination email address"),
735
+ }, async ({ message_id, to }) => withBaaCheck(async () => {
736
+ const { username } = getCredentials();
737
+ const res = await authedFetch("/email/forward", { from: username, message_id, to });
738
+ return textResult(res);
739
+ }));
698
740
  // --- Search ---
699
741
  server.tool("chaprola_search", "Search the web via Brave Search API. Returns titles, URLs, and snippets. Optional AI-grounded summary. Rate limit: 10/day per user", {
700
742
  query: z.string().describe("Search query string"),
@@ -784,6 +826,57 @@ server.tool("chaprola_consolidate", "Merge a .MRG file into its parent .DA, prod
784
826
  const res = await authedFetch("/consolidate", { userid: username, project, file });
785
827
  return textResult(res);
786
828
  }));
829
+ // --- Site Keys ---
830
+ server.tool("chaprola_create_site_key", "Create an origin-locked site key for frontend JavaScript. Site keys are restricted to specific origins and endpoints, safe to embed in public code.", {
831
+ label: z.string().describe("Human-readable label for this key (e.g., 'poll-frontend')"),
832
+ allowed_origins: z.array(z.string()).describe("HTTPS URL patterns where this key works (e.g., ['https://chaprola.org/apps/poll/*']). Wildcards allowed at end."),
833
+ allowed_endpoints: z.array(z.string()).describe("API endpoints this key can call (e.g., ['/query', '/insert-record', '/report']). Security-sensitive endpoints like /export, /import, /compile are always denied."),
834
+ }, async ({ label, allowed_origins, allowed_endpoints }) => {
835
+ const { username } = getCredentials();
836
+ const res = await authedFetch("/create-site-key", {
837
+ userid: username,
838
+ label,
839
+ allowed_origins,
840
+ allowed_endpoints,
841
+ });
842
+ return textResult(res);
843
+ });
844
+ server.tool("chaprola_list_site_keys", "List all site keys for the authenticated user. Shows label, allowed origins/endpoints, and creation date.", {}, async () => {
845
+ const { username } = getCredentials();
846
+ const res = await authedFetch("/list-site-keys", { userid: username });
847
+ return textResult(res);
848
+ });
849
+ server.tool("chaprola_delete_site_key", "Delete a site key. Requires the full site key value (site_...).", {
850
+ site_key: z.string().describe("The full site key to delete (starts with site_)"),
851
+ }, async ({ site_key }) => {
852
+ const { username } = getCredentials();
853
+ const res = await authedFetch("/delete-site-key", { userid: username, site_key });
854
+ return textResult(res);
855
+ });
856
+ // --- App Hosting tools ---
857
+ server.tool("chaprola_app_deploy", "Deploy a static web app (HTML/JS/CSS) to chaprola.org/apps/{userid}/{project}/. Returns a presigned upload URL for a .zip or .tar.gz archive.", {
858
+ project: z.string().describe("Project name for the app"),
859
+ }, async ({ project }) => withBaaCheck(async () => {
860
+ const { username } = getCredentials();
861
+ const res = await authedFetch("/app/deploy", { userid: username, project });
862
+ return textResult(res);
863
+ }));
864
+ server.tool("chaprola_app_deploy_process", "Process a staged app archive and deploy files to chaprola.org/apps/{userid}/{project}/", {
865
+ project: z.string().describe("Project name for the app"),
866
+ staging_key: z.string().describe("Staging key returned by chaprola_app_deploy"),
867
+ }, async ({ project, staging_key }) => withBaaCheck(async () => {
868
+ const { username } = getCredentials();
869
+ const res = await authedFetch("/app/deploy/process", { userid: username, project, staging_key });
870
+ return textResult(res);
871
+ }));
872
+ server.tool("chaprola_app_upload", "Get a presigned URL to upload a single file to a deployed app at chaprola.org/apps/{userid}/{project}/{path}", {
873
+ project: z.string().describe("Project name for the app"),
874
+ path: z.string().describe("File path within the app (e.g. 'css/app.css', 'index.html')"),
875
+ }, async ({ project, path }) => withBaaCheck(async () => {
876
+ const { username } = getCredentials();
877
+ const res = await authedFetch("/app/upload", { userid: username, project, path });
878
+ return textResult(res);
879
+ }));
787
880
  // --- Start server ---
788
881
  async function main() {
789
882
  const transport = new StdioServerTransport();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chaprola/mcp-server",
3
- "version": "1.4.3",
3
+ "version": "1.6.0",
4
4
  "description": "MCP server for Chaprola — agent-first data platform. Gives AI agents 46 tools for structured data storage, record CRUD, querying, schema inspection, web search, URL fetching, scheduled jobs, and execution via plain HTTP.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -13,6 +13,30 @@ POST /compile {userid, project, name: "REPORT", source: "...", primary_format: "
13
13
  POST /run {userid, project, name: "REPORT", primary_file: "STAFF", record: 1}
14
14
  ```
15
15
 
16
+ ## R-Variable Ranges
17
+
18
+ | Range | Purpose | Safe for DEFINE VARIABLE? |
19
+ |-------|---------|--------------------------|
20
+ | R1–R20 | HULDRA elements (parameters) | No — HULDRA overwrites these |
21
+ | R21–R40 | HULDRA objectives (error metrics) | No — HULDRA reads these |
22
+ | R41–R50 | Scratch space | **Yes — always use R41–R50 for DEFINE VARIABLE** |
23
+
24
+ For non-HULDRA programs, R1–R40 are technically available but using R41–R50 is a good habit.
25
+
26
+ ## PRINT: Output from U Buffer
27
+
28
+ ```
29
+ PRINT 0 — output the ENTIRE U buffer contents, then clear it
30
+ PRINT N — output exactly N characters from U buffer (no clear)
31
+ ```
32
+
33
+ Use `PRINT N` when you've placed data at specific positions and want clean output without trailing garbage. Use `PRINT 0` for quick output of everything.
34
+
35
+ ```chaprola
36
+ MOVE "Hello" U.1 5
37
+ PRINT 5 // Outputs "Hello" — exactly 5 chars, no trailing spaces
38
+ ```
39
+
16
40
  ## Hello World (no data file)
17
41
 
18
42
  ```chaprola
@@ -58,7 +82,25 @@ READ match // load matched secondary record
58
82
  MOVE S.dept_name U.12 15 // now accessible
59
83
  ```
60
84
 
61
- Compile with: `primary_format: "EMPLOYEES", secondary_format: "DEPARTMENTS"`
85
+ Compile with both formats so the compiler resolves fields from both files:
86
+ ```bash
87
+ POST /compile {
88
+ userid, project, name: "REPORT",
89
+ source: "...",
90
+ primary_format: "EMPLOYEES",
91
+ secondary_format: "DEPARTMENTS"
92
+ }
93
+ ```
94
+
95
+ ## Comparing Two Memory Locations
96
+
97
+ IF EQUAL compares a literal to a location. To compare two memory locations, copy both to U buffer:
98
+
99
+ ```chaprola
100
+ MOVE PARAM.poll_id U.200 12
101
+ MOVE P.poll_id U.180 12
102
+ IF EQUAL U.200 U.180 12 GOTO 200 // match — jump to handler
103
+ ```
62
104
 
63
105
  ## Read-Modify-Write (UPDATE)
64
106
 
@@ -152,6 +194,19 @@ POST /query {
152
194
  }
153
195
  ```
154
196
 
197
+ For simple aggregation without a cross-tabulation column, set `column` to empty string:
198
+ ```bash
199
+ # Count records per department (no cross-tab)
200
+ POST /query {
201
+ userid, project, file: "STAFF",
202
+ pivot: {
203
+ row: "department",
204
+ column: "",
205
+ values: [{field: "department", function: "count"}]
206
+ }
207
+ }
208
+ ```
209
+
155
210
  Supported aggregate functions: `count`, `sum`, `avg`, `min`, `max`, `stddev`.
156
211
 
157
212
  ## PUT Format Codes
@@ -165,6 +220,19 @@ Supported aggregate functions: `count`, `sum`, `avg`, `min`, `max`, `stddev`.
165
220
 
166
221
  Syntax: `PUT R1 INTO U.30 10 D 2` (R-var, location, width, format, decimals)
167
222
 
223
+ ## Common Field Widths
224
+
225
+ | Data type | Chars | Example |
226
+ |-----------|-------|---------|
227
+ | ISO datetime | 20 | `2026-03-28T14:30:00Z` |
228
+ | UUID | 36 | `550e8400-e29b-41d4-a716-446655440000` |
229
+ | Email | 50 | `user@example.com` |
230
+ | Short ID | 8–12 | `poll_001` |
231
+ | Dollar amount | 10 | `$1,234.56` |
232
+ | Phone | 15 | `+1-555-123-4567` |
233
+
234
+ Use these when sizing MOVE lengths and U buffer positions.
235
+
168
236
  ## Memory Regions
169
237
 
170
238
  | Prefix | Description |
@@ -48,9 +48,9 @@ Auth: `Authorization: Bearer chp_your_api_key` on all protected endpoints.
48
48
  ### Query & Data Operations
49
49
  | Endpoint | Body | Response |
50
50
  |----------|------|----------|
51
- | `POST /query` | `{userid, project, file, where?, select?, aggregate?, order_by?, limit?, join?, pivot?, mercury?}` | `{records, total}` |
51
+ | `POST /query` | `{userid, project, file, where?: [{field, op, value}], select?, aggregate?, order_by?, limit?, join?, pivot?, mercury?}` | `{records, total}` |
52
52
  | `POST /sort` | `{userid, project, file, sort_by}` | `{status: "ok"}` |
53
- | `POST /index` | `{userid, project, file, field}` | `{status: "ok"}` |
53
+ | `POST /index` | `{userid, project, file, key_fields: ["field1", "field2"], output: "INDEXNAME"}` | `{status: "ok"}` |
54
54
  | `POST /merge` | `{userid, project, file_a, file_b, output, key}` | `{status: "ok"}` |
55
55
 
56
56
  ### Optimization (HULDRA)
@@ -82,6 +82,6 @@ Auth: `Authorization: Bearer chp_your_api_key` on all protected endpoints.
82
82
  ## Key Rules
83
83
 
84
84
  - `userid` in every request body must match the authenticated user (403 if not)
85
- - API keys never expire. Login generates a new key and invalidates the old one
85
+ - API keys expire after 90 days. Login generates a new key (old keys remain valid until expiration)
86
86
  - BAA only required for PHI data. Non-PHI data works without signing a BAA
87
87
  - All `.DA` files expire after 90 days by default. Set `expires_in_days` on import to override (up to 36500 days)
@@ -109,3 +109,32 @@ All outbound emails are AI-screened. Blocked emails return 403.
109
109
 
110
110
  ### PHI in email
111
111
  Emails containing PHI identifiers (names, SSNs, dates of birth, etc.) are blocked by the content moderator.
112
+
113
+ ## Concurrency Model
114
+
115
+ ### Last-write-wins is intentional, not a missing feature
116
+
117
+ Chaprola uses **last-write-wins** instead of ACID transactions. This is a deliberate architectural choice, not a gap.
118
+
119
+ **Why last-write-wins is often the right model:**
120
+
121
+ ACID transactions solve one problem: what happens when two writers hit the same record simultaneously? Record locking, deadlock detection, lock wait queues, transaction logs, and rollback mechanisms all exist to answer that question.
122
+
123
+ For most real-world data, the answer is: they don't. Application data is typically **partitioned by owner** — a user's settings, a user's records, a user's event history. One writer per partition. No contention. In these workloads, transaction machinery is pure overhead solving a problem that doesn't exist.
124
+
125
+ Last-write-wins eliminates that overhead entirely. No lock acquisition. No lock wait. No deadlock detection. Every write goes straight through.
126
+
127
+ **What Chaprola does provide:**
128
+
129
+ - **Single-object atomicity**: Each S3 `put_object` either completes or fails — no partial writes within a single data file.
130
+ - **Consolidation dirty-bit checks**: The `/consolidate` endpoint verifies the merge file wasn't modified during the operation. If it was, consolidation aborts.
131
+ - **11-nines durability**: S3's 99.999999999% durability guarantee means your data survives once written.
132
+
133
+ **What Chaprola does not provide:**
134
+
135
+ - **Multi-object atomicity**: If you need "write record A and record B or neither," keep that logic in a relational database. Chaprola writes each object independently.
136
+ - **Read-modify-write isolation**: Two agents reading the same record, modifying it, and writing back will result in last-write-wins. If you need compare-and-swap semantics, Chaprola is not the right tool for that specific operation.
137
+
138
+ **When to use a relational database instead:**
139
+
140
+ When multiple concurrent writers update the same record — financial ledgers, inventory counters, auction bidding. If your application has true multi-writer contention on shared records, use PostgreSQL or similar for that workload. But most application data is owner-partitioned, and for that, last-write-wins is the lighter, faster, correct choice.
@@ -0,0 +1,30 @@
1
+ # Chaprola Quickstart
2
+
3
+ Base URL: `https://api.chaprola.org`
4
+ Auth: `Authorization: Bearer chp_your_api_key` on all protected endpoints.
5
+ Every request body's `userid` must match the authenticated username.
6
+
7
+ ## Core Workflow
8
+
9
+ ```bash
10
+ POST /import {userid, project, name: "STAFF", data: [{name: "Alice", salary: 95000}, ...]}
11
+ POST /compile {userid, project, name: "REPORT", source: "...", primary_format: "STAFF"}
12
+ POST /run {userid, project, name: "REPORT", primary_file: "STAFF", record: 1}
13
+ ```
14
+
15
+ ## Available References
16
+
17
+ Read only what your app needs:
18
+
19
+ | Resource | When to read |
20
+ |----------|-------------|
21
+ | `chaprola://ref/import` | Importing data, exporting, listing files |
22
+ | `chaprola://ref/query` | Filtering, sorting, indexing, joining data |
23
+ | `chaprola://ref/pivot` | GROUP BY / pivot tables |
24
+ | `chaprola://ref/mercury` | Weighted scoring / ranking |
25
+ | `chaprola://ref/programs` | Writing .CS source code (MOVE, LET, GET, PUT, SEEK, IF, GOTO, PRINT) |
26
+ | `chaprola://ref/huldra` | Nonlinear optimization / parameter fitting |
27
+ | `chaprola://ref/deploy` | Deploying static web apps to chaprola.org/apps/ |
28
+ | `chaprola://ref/email` | Sending/receiving email via @chaprola.org |
29
+ | `chaprola://ref/gotchas` | Common mistakes to avoid |
30
+ | `chaprola://ref/auth` | Registration, login, BAA, credentials |
@@ -0,0 +1,25 @@
1
+ # Authentication
2
+
3
+ ## API Key Model
4
+ All protected requests: `Authorization: Bearer chp_your_api_key`
5
+ Keys: `chp_` + 64 hex chars. Expire after 90 days.
6
+
7
+ ## Registration & Login
8
+ ```bash
9
+ POST /register {"username": "my-agent", "passcode": "16-chars-minimum-passcode"}
10
+ → {"api_key": "chp_..."}
11
+
12
+ POST /login {"username": "my-agent", "passcode": "..."}
13
+ → {"api_key": "chp_..."} # old keys remain valid until expiration
14
+ ```
15
+ - Passcode: 16-128 chars. Username: 3-40 chars, alphanumeric + hyphens/underscores.
16
+ - Rate limits: auth 5 rps, data 20 rps.
17
+
18
+ ## BAA
19
+ Only needed for PHI. Sign once: `POST /baa-text` → human reviews → `POST /sign-baa {userid, signatory_name}`.
20
+
21
+ ## MCP Environment Variables
22
+ | Variable | Description |
23
+ |----------|-------------|
24
+ | `CHAPROLA_USERNAME` | Your registered username |
25
+ | `CHAPROLA_API_KEY` | Your API key (`chp_...`) |
@@ -0,0 +1,24 @@
1
+ # App Deployment
2
+
3
+ Deploy static web apps (HTML/JS/CSS) to `https://chaprola.org/apps/{userid}/{project}/`.
4
+
5
+ ## Flow
6
+ ```bash
7
+ # 1. Get presigned upload URL
8
+ POST /app/deploy {userid, project} → {upload_url, staging_key, max_size_mb: 50}
9
+
10
+ # 2. Upload .zip or .tar.gz to upload_url
11
+ PUT upload_url --data-binary @app.zip
12
+
13
+ # 3. Extract and deploy
14
+ POST /app/deploy/process {userid, project, staging_key} → {url, files_deployed}
15
+ ```
16
+
17
+ ## Single file upload
18
+ ```bash
19
+ POST /app/upload {userid, project, path: "css/app.css"} → {upload_url, content_type}
20
+ PUT upload_url --data-binary @app.css
21
+ ```
22
+
23
+ Max 500 files per app. Blocked extensions: .php, .py, .sh, .exe.
24
+ Content-Type set automatically from extension.
@@ -0,0 +1,20 @@
1
+ # Email (@chaprola.org)
2
+
3
+ Every account gets `{username}@chaprola.org`.
4
+
5
+ ## Endpoints
6
+ - `POST /email/inbox {address, limit?, before?}` → `{emails: [...], total}`
7
+ - `POST /email/read {address, message_id}` → `{email: {from, to, subject, text, html, attachments}}`
8
+ - `POST /email/send {from, to, subject, text, html?}` → `{status, message_id}`
9
+ - `POST /email/delete {address, message_id}` → `{status}`
10
+ - `POST /email/forward {from, message_id, to}` → `{status, resend_message_id}`
11
+
12
+ ## Attachments
13
+ `/email/read` returns presigned download URLs (1hr expiry) for each attachment.
14
+ `/email/forward` re-attaches stored attachments automatically.
15
+
16
+ ## Rate limits
17
+ 20 emails/day per user, 3 emails/minute. Outbound emails are AI content-moderated (blocks spam/PHI).
18
+
19
+ ## Delegates
20
+ Agents can send as another user if listed in their delegate file. E.g., tawni can send `"from": "charles"`.
@@ -0,0 +1,23 @@
1
+ # Gotchas
2
+
3
+ ## Language
4
+ - **No parentheses in LET.** `LET result = price * qty` only. For `price * (qty + bonus)`: use `LET temp = qty + bonus` then `LET result = price * temp`.
5
+ - **IF EQUAL compares literal to location.** To compare two locations, copy both to U buffer first.
6
+ - **MOVE length must match field width.** If `name` is 8 chars wide, `MOVE P.name U.1 20` bleeds into adjacent fields.
7
+ - **DEFINE VARIABLE names must not collide with field names.** If format has `balance`, don't `DEFINE VARIABLE balance R41`.
8
+ - **R-variables are 64-bit floats.** `7 / 2 = 3.5`. Use PUT with `I` format for integer display.
9
+ - **FIND returns 0 on no match.** Always check `IF match EQ 0` before READ.
10
+ - **PRINT 0 clears the U buffer.** No need to manually clear between prints.
11
+ - **Statement numbers are labels, not line numbers.** Only number GOTO/CALL targets.
12
+
13
+ ## API
14
+ - **userid must match authenticated user.** 403 on mismatch.
15
+ - **Login invalidates the old key.** Save the new one immediately.
16
+ - **Async for large datasets.** `/run` with `async: true` for >100K records (API Gateway 30s timeout).
17
+ - **secondary_format is a string**, not an array.
18
+ - **Data files expire.** Default 90 days. Override with `expires_in_days` on import.
19
+ - **API keys expire after 90 days.** Re-login to get a new key.
20
+
21
+ ## Secondary Files
22
+ - **One at a time.** CLOSE before opening another.
23
+ - **CLOSE flushes writes.** Always CLOSE before END if you wrote to the secondary file.
@@ -0,0 +1,78 @@
1
+ # HULDRA — Nonlinear Optimization
2
+
3
+ HULDRA finds optimal parameter values for a mathematical model by minimizing the difference between predictions and observed data.
4
+
5
+ ## R-Variable Interface
6
+
7
+ | Range | Purpose | Who sets it |
8
+ |-------|---------|-------------|
9
+ | R1–R20 | Elements (parameters to optimize) | HULDRA sets before each run |
10
+ | R21–R40 | Objectives (error metrics) | Your program computes these |
11
+ | R41–R50 | Scratch space | Your program's temp variables |
12
+
13
+ ## POST /optimize
14
+ ```json
15
+ {
16
+ "userid": "...", "project": "fit", "program": "SALFIT", "primary_file": "EMP",
17
+ "elements": [
18
+ {"index": 1, "label": "slope", "start": 5000, "min": 0, "max": 20000, "delta": 10},
19
+ {"index": 2, "label": "base", "start": 40000, "min": 0, "max": 100000, "delta": 100}
20
+ ],
21
+ "objectives": [
22
+ {"index": 1, "label": "SSR", "goal": 0.0, "weight": 1.0}
23
+ ],
24
+ "max_iterations": 100,
25
+ "async_exec": true
26
+ }
27
+ ```
28
+
29
+ Async: `POST /optimize/status {userid, project, job_id}`
30
+
31
+ ## Delta Guidance
32
+ - Dollar amounts: `0.01` to `1.0`
33
+ - Rates/percentages: `0.001` to `0.01`
34
+ - Counts: `0.1` to `1.0`
35
+
36
+ ## Performance
37
+ HULDRA runs your program `1 + 2 × N_elements` times per iteration. Lambda timeout: 900s. Sample large datasets (200-500 records) before optimizing.
38
+
39
+ ## Complete Example: Linear Fit
40
+ `salary = R1 × years_exp + R2`
41
+
42
+ ```chaprola
43
+ DEFINE VARIABLE REC R41
44
+ DEFINE VARIABLE YRS R42
45
+ DEFINE VARIABLE SAL R43
46
+ DEFINE VARIABLE PRED R44
47
+ DEFINE VARIABLE RESID R45
48
+ DEFINE VARIABLE SSR R46
49
+
50
+ LET SSR = 0 // MUST initialize to 0
51
+ LET REC = 1
52
+ 100 SEEK REC
53
+ IF EOF GOTO 200
54
+ GET YRS FROM P.years_exp
55
+ GET SAL FROM P.salary
56
+ LET PRED = R1 * YRS
57
+ LET PRED = PRED + R2
58
+ LET RESID = PRED - SAL
59
+ LET RESID = RESID * RESID
60
+ LET SSR = SSR + RESID
61
+ LET REC = REC + 1
62
+ GOTO 100
63
+ 200 LET R21 = SSR
64
+ END
65
+ ```
66
+
67
+ ## Model Catalog
68
+
69
+ | Model | Formula | When to use |
70
+ |-------|---------|-------------|
71
+ | Linear | `y = R1*x + R2` | Proportional relationships |
72
+ | Multi-linear | `y = R1*x1 + R2*x2 + R3` | Multiple factors |
73
+ | Quadratic | `y = R1*x^2 + R2*x + R3` | Accelerating curves |
74
+ | Exponential | `y = R1 * EXP(R2*x)` | Compound growth |
75
+ | Exp. decay | `y = R1 * EXP(-R2*x) + R3` | Decay, cooling |
76
+ | Power law | `y = R1 * POW(x, R2)` | Scaling laws |
77
+ | Logarithmic | `y = R1 * LOG(x) + R2` | Diminishing returns |
78
+ | Logistic | `y = R1 / (1 + EXP(-R2*(x-R3)))` | S-curves, saturation |
@@ -0,0 +1,34 @@
1
+ # Import / Export / Files
2
+
3
+ ## POST /import
4
+ `{userid, project, name, data: [{...}, ...], format?, expires_in_days?, force?}`
5
+ Returns: `{records, fields, record_length, format_file, data_file}`
6
+
7
+ ```bash
8
+ POST /import {userid, project, name: "STAFF", data: [{"name": "Alice", "salary": 95000}]}
9
+ ```
10
+
11
+ Field widths auto-sized from longest value. Default expiry: 90 days. Override with `expires_in_days`.
12
+
13
+ ## Large File Upload (presigned URL)
14
+ ```bash
15
+ POST /import-url {userid, project, name} → {upload_url, staging_key}
16
+ # PUT your JSON to upload_url
17
+ POST /import-process {userid, project, name, staging_key} → same as /import
18
+ ```
19
+
20
+ ## POST /import-download
21
+ `{userid, project, name, url, instructions?, max_rows?}`
22
+ Imports directly from URL. Supports: CSV, TSV, JSON, NDJSON, Parquet, Excel (.xlsx/.xls).
23
+ Optional `instructions` for AI schema inference. Max 1M records.
24
+
25
+ ## POST /export
26
+ `{userid, project, name, format?}` → `{data: [...records]}`
27
+ Optional `format: "fhir"` for FHIR JSON reconstruction.
28
+
29
+ ## POST /list
30
+ `{userid, project, pattern?}` → `{files: [...], total}`
31
+
32
+ ## POST /download
33
+ `{userid, project, file, type}` → `{download_url, expires_in, size_bytes}`
34
+ Type: `data`, `format`, `source`, `proc`, `output`.
@@ -0,0 +1,23 @@
1
+ # Mercury (Weighted Scoring)
2
+
3
+ Add `mercury` to `/query` to score and rank records by weighted criteria.
4
+
5
+ ```json
6
+ POST /query {
7
+ "userid": "...", "project": "...", "file": "CANDIDATES",
8
+ "mercury": {
9
+ "scores": [
10
+ {"field": "experience_years", "weight": 0.4, "direction": "higher_better"},
11
+ {"field": "interview_score", "weight": 0.35, "direction": "higher_better"},
12
+ {"field": "salary_ask", "weight": 0.25, "direction": "lower_better"}
13
+ ]
14
+ },
15
+ "limit": 10
16
+ }
17
+ ```
18
+
19
+ Returns records ranked by composite score (0-100). Each record includes `_mercury_score` and per-field `_mercury_{field}` component scores.
20
+
21
+ Fields are normalized min-max within the dataset. `direction` controls whether higher or lower raw values score better. Weights must sum to 1.0.
22
+
23
+ Combine with `where` to pre-filter before scoring.
@@ -0,0 +1,32 @@
1
+ # Pivot (GROUP BY)
2
+
3
+ Chaprola's pivot IS GROUP BY. Add `pivot` to `/query`.
4
+
5
+ ## Simple aggregation (no cross-tab)
6
+ ```json
7
+ POST /query {
8
+ "userid": "...", "project": "...", "file": "STAFF",
9
+ "pivot": {
10
+ "row": "department",
11
+ "column": "",
12
+ "values": [
13
+ {"field": "department", "function": "count"},
14
+ {"field": "salary", "function": "avg"}
15
+ ]
16
+ }
17
+ }
18
+ ```
19
+ SQL equivalent: `SELECT department, COUNT(*), AVG(salary) FROM staff GROUP BY department`
20
+
21
+ ## Cross-tabulation
22
+ ```json
23
+ "pivot": {
24
+ "row": "department",
25
+ "column": "year",
26
+ "values": [{"field": "revenue", "function": "sum"}],
27
+ "totals": true
28
+ }
29
+ ```
30
+ SQL equivalent: `SELECT department, year, SUM(revenue) FROM sales GROUP BY department, year`
31
+
32
+ Supported functions: `count`, `sum`, `avg`, `min`, `max`, `stddev`.
@@ -0,0 +1,85 @@
1
+ # Chaprola Programs (.CS Source)
2
+
3
+ ## Compile & Run
4
+ ```bash
5
+ POST /compile {userid, project, name: "REPORT", source: "...", primary_format: "STAFF", secondary_format?: "DEPTS"}
6
+ POST /run {userid, project, name: "REPORT", primary_file: "STAFF", record: 1, async?: true, nophi?: true}
7
+ POST /run/status {userid, project, job_id} # poll async jobs
8
+ POST /publish {userid, project, name, primary_file, acl?: "public|authenticated|owner|token"}
9
+ ```
10
+
11
+ ## Memory Regions
12
+
13
+ | Prefix | Description |
14
+ |--------|-------------|
15
+ | `P` | Primary data file (current record) |
16
+ | `S` | Secondary data file (current record) |
17
+ | `U` | User buffer (output scratch) |
18
+ | `X` | System text (date, time, filenames) |
19
+
20
+ ## Language Essentials
21
+
22
+ ```chaprola
23
+ // Loop through records
24
+ DEFINE VARIABLE rec R41
25
+ LET rec = 1
26
+ 100 SEEK rec
27
+ IF EOF GOTO 900
28
+ MOVE P.name U.1 20 // copy field to output buffer
29
+ GET sal FROM P.salary // numeric field → R variable
30
+ PUT sal INTO U.22 10 D 2 // R variable → formatted output
31
+ PRINT 0 // output full U buffer, clear it
32
+ LET rec = rec + 1
33
+ GOTO 100
34
+ 900 END
35
+ ```
36
+
37
+ - `PRINT 0` — output entire U buffer and clear. `PRINT N` — output exactly N chars.
38
+ - `MOVE BLANKS U.1 80` — clear a region. `MOVE "literal" U.1 7` — move literal.
39
+ - `IF EQUAL "text" U.50 4 GOTO 200` — compare literal to memory location.
40
+ - `U.name` — named positions (auto-allocated by compiler): `MOVE P.name U.name 20`
41
+ - `DEFINE VARIABLE counter R41` — alias R-variable. **Use R41-R50** (R1-R40 reserved for HULDRA).
42
+
43
+ ## PUT Format Codes
44
+
45
+ | Code | Description | Example |
46
+ |------|-------------|---------|
47
+ | `D` | Dollar with commas | `$1,234.56` |
48
+ | `F` | Fixed decimal | `1234.56` |
49
+ | `I` | Integer (right-justified) | ` 1234` |
50
+ | `E` | Scientific notation | `1.23E+03` |
51
+
52
+ Syntax: `PUT R41 INTO U.30 10 D 2` — (R-var, location, width, format, decimals)
53
+
54
+ ## Math
55
+
56
+ ```chaprola
57
+ LET R42 = R41 + 1 // one operation per LET
58
+ LET R43 = EXP R41 // also: LOG, SQRT, ABS
59
+ LET R44 = POW R41 R42 // R41^R42
60
+ ```
61
+
62
+ ## Secondary Files (FIND/JOIN)
63
+
64
+ ```chaprola
65
+ OPEN "DEPARTMENTS" 0 // open secondary file
66
+ FIND match FROM S.dept_code 3 USING P.dept_code
67
+ IF match EQ 0 GOTO 200 // 0 = no match
68
+ READ match // load matched record
69
+ MOVE S.dept_name U.30 15
70
+ WRITE match // write back if modified
71
+ CLOSE // flush + close
72
+ ```
73
+
74
+ Compile with: `secondary_format: "DEPARTMENTS"`
75
+
76
+ ## Parameterized Reports (PARAM.name)
77
+ ```chaprola
78
+ MOVE PARAM.deck U.1 20 // string param → U buffer
79
+ LET lvl = PARAM.level // numeric param → R variable
80
+ ```
81
+ Publish, then call: `POST /report?userid=X&project=Y&name=Z&deck=kanji&level=3`
82
+ Discover params: `POST /report/params {userid, project, name}`
83
+
84
+ ## Common Field Widths
85
+ ISO datetime: 20, UUID: 36, email: 50, short ID: 8-12, dollar: 10, phone: 15.
@@ -0,0 +1,40 @@
1
+ # Query / Sort / Index / Merge
2
+
3
+ ## POST /query
4
+ ```json
5
+ {
6
+ "userid": "...", "project": "...", "file": "STAFF",
7
+ "where": [{"field": "salary", "op": "gt", "value": 80000}],
8
+ "where_logic": "and",
9
+ "select": ["name", "salary"],
10
+ "aggregate": [{"field": "salary", "function": "avg"}],
11
+ "order_by": "salary desc",
12
+ "limit": 100,
13
+ "offset": 0
14
+ }
15
+ ```
16
+ Returns: `{records: [...], total, fields}`
17
+
18
+ WHERE operators: `eq`, `ne`, `gt`, `ge`, `lt`, `le`, `between`, `contains`, `starts_with`, `ends_with`
19
+
20
+ ## JOIN
21
+ Add `join` to /query:
22
+ ```json
23
+ "join": {"file": "DEPARTMENTS", "on": {"left": "dept_id", "right": "dept_id"}, "type": "inner"}
24
+ ```
25
+ Types: `inner`, `left`, `right`, `full`. Optional `pre_sorted: true` for merge join.
26
+
27
+ ## POST /sort
28
+ `{userid, project, file, sort_by: "salary desc"}`
29
+
30
+ ## POST /index
31
+ `{userid, project, file, key_fields: ["dept_id"], output: "STAFF_BY_DEPT"}`
32
+
33
+ ## POST /merge
34
+ `{userid, project, file_a, file_b, output, key}`
35
+
36
+ ## Record CRUD
37
+ - `POST /insert-record {userid, project, file, record: {field: "value"}}`
38
+ - `POST /update-record {userid, project, file, where: [...], set: {field: "value"}}`
39
+ - `POST /delete-record {userid, project, file, where: [...]}`
40
+ - `POST /consolidate {userid, project, file}` — merge .MRG into .DA