@lerna-labs/hydra-sdk 1.0.0-beta.13 → 1.0.0-beta.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,159 @@
1
+ # @lerna-labs/hydra-sdk
2
+
3
+ Core TypeScript SDK for managing [Cardano Hydra](https://hydra.family/) Heads — connect, open, transact, and close Hydra Heads with a high-level API.
4
+
5
+ > **Beta** — APIs may change between releases. Currently at `1.0.0-beta.x`.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ npm install @lerna-labs/hydra-sdk
11
+ ```
12
+
13
+ ## Environment Variables
14
+
15
+ The SDK reads configuration from environment variables at the point of use (never at import time):
16
+
17
+ | Variable | Required | Description |
18
+ |----------|----------|-------------|
19
+ | `BLOCKFROST_API_KEY` | Yes (Wrangler) | Blockfrost project ID for L1 chain queries |
20
+ | `HYDRA_API_URL` | Yes (UTxO queries) | Hydra node HTTP API endpoint (e.g. `http://localhost:4001`) |
21
+ | `HYDRA_WS_URL` | Yes (Wrangler) | Hydra node WebSocket endpoint (e.g. `ws://localhost:4001`) |
22
+ | `HYDRA_ADMIN_KEY_FILE` | One required | Path to a Cardano `.sk` signing key file |
23
+ | `HYDRA_ADMIN_CARDANO_PK` | One required | Cardano private key hex string (fallback if no key file) |
24
+
25
+ ## Usage
26
+
27
+ ### Wrangler — Head Lifecycle Management
28
+
29
+ The `Wrangler` class manages the full Hydra Head lifecycle: connecting, opening, monitoring, and closing.
30
+
31
+ ```typescript
32
+ import { Wrangler } from "@lerna-labs/hydra-sdk";
33
+
34
+ const wrangler = new Wrangler();
35
+
36
+ // Connect to the Hydra node with automatic retry
37
+ await wrangler.connect();
38
+
39
+ // Open the Head (commits UTxO from L1 tx)
40
+ await wrangler.waitForHeadOpen({ txHash: "abc123...", txIndex: 0 });
41
+
42
+ // Monitor status
43
+ const status = await wrangler.getHeadStatus();
44
+ wrangler.onStatusChange((status) => console.log("Status:", status));
45
+
46
+ // Close and finalize
47
+ await wrangler.waitForHeadClose();
48
+
49
+ // Disconnect when done
50
+ await wrangler.disconnect();
51
+ ```
52
+
53
+ ### UTxO Queries
54
+
55
+ ```typescript
56
+ import { getUtxoSet, queryUtxoByAddress } from "@lerna-labs/hydra-sdk";
57
+
58
+ // Get all UTxOs in the Hydra Head
59
+ const utxos = await getUtxoSet();
60
+
61
+ // Query UTxOs for a specific address
62
+ const myUtxos = await queryUtxoByAddress("addr_test1...");
63
+ ```
64
+
65
+ ### Wallet & Admin
66
+
67
+ ```typescript
68
+ import { getAdmin, createMultisigAddress } from "@lerna-labs/hydra-sdk";
69
+
70
+ // Get a MeshWallet instance from env-configured signing key
71
+ const adminWallet = await getAdmin();
72
+
73
+ // Create a multisig address from two participant addresses
74
+ const { address, scriptCbor, scriptHash } = createMultisigAddress(
75
+ "addr_test1...",
76
+ "addr_test2...",
77
+ );
78
+ ```
79
+
80
+ ### Signature Verification
81
+
82
+ ```typescript
83
+ import { verifySignature } from "@lerna-labs/hydra-sdk";
84
+
85
+ const { isValid, sigMeta, pubKeyHex } = verifySignature(
86
+ signature,
87
+ message,
88
+ signingAddress,
89
+ signatureKey,
90
+ );
91
+ ```
92
+
93
+ ### Transaction Submission
94
+
95
+ ```typescript
96
+ import { submitTx } from "@lerna-labs/hydra-sdk";
97
+
98
+ const response = await submitTx(submitEndpoint, cborPayload, txId);
99
+ ```
100
+
101
+ ### Config Helpers
102
+
103
+ ```typescript
104
+ import { requireEnv, optionalEnv } from "@lerna-labs/hydra-sdk";
105
+
106
+ // Throws with a clear message if missing
107
+ const apiKey = requireEnv("BLOCKFROST_API_KEY");
108
+
109
+ // Returns fallback if not set
110
+ const url = optionalEnv("HYDRA_API_URL", "http://localhost:4001");
111
+ ```
112
+
113
+ ### Utilities
114
+
115
+ ```typescript
116
+ import { chunkString, bufferToHex, bufferToAscii } from "@lerna-labs/hydra-sdk";
117
+
118
+ const chunks = chunkString("abcdef", 2); // ["ab", "cd", "ef"]
119
+ ```
120
+
121
+ ## API Reference
122
+
123
+ ### Functions
124
+
125
+ | Export | Description |
126
+ |--------|-------------|
127
+ | `getAdmin()` | Create a `MeshWallet` from env-configured signing key |
128
+ | `createMultisigAddress(addr1, addr2, networkId?, scriptType?)` | Build a multisig address from two participant addresses |
129
+ | `createNativeScript(addr, opts?)` | Build a native script policy — bare `sig` by default, or time-bound `all:[sig, before]` when `opts.invalidHereafter` is set |
130
+ | `getUtxoSet()` | Fetch all UTxOs in the Hydra Head |
131
+ | `queryUtxoByAddress(address)` | Fetch UTxOs for a specific address |
132
+ | `submitTx(endpoint, payload, id)` | Submit a signed transaction to the Hydra node |
133
+ | `verifySignature(signature, message, address, key)` | Verify a CIP-8 message signature |
134
+ | `requireEnv(name)` | Read a required environment variable (throws if missing) |
135
+ | `optionalEnv(name, fallback)` | Read an optional environment variable with fallback |
136
+ | `chunkString(str, size)` | Split a string into fixed-size chunks |
137
+ | `bufferToHex(buffer)` | Convert a buffer to a hex string |
138
+ | `bufferToAscii(buffer)` | Convert a buffer to an ASCII string |
139
+
140
+ ### Classes
141
+
142
+ | Export | Description |
143
+ |--------|-------------|
144
+ | `Wrangler` | Hydra Head lifecycle manager — connect, open, monitor, close |
145
+
146
+ ### Types
147
+
148
+ | Export | Description |
149
+ |--------|-------------|
150
+ | `CommitArgs` | Arguments for committing UTxOs when opening a Head |
151
+ | `HeadStatus` | Hydra Head status identifier |
152
+ | `HydraMessage` | Typed Hydra protocol message |
153
+ | `HydraWsMessage` | Raw WebSocket message from Hydra node |
154
+ | `ParsedUtxo` | Parsed UTxO with address, value, and datum |
155
+ | `ServerOutput` | Hydra node server output message type |
156
+
157
+ ## License
158
+
159
+ [Apache-2.0](../../LICENSE)
@@ -0,0 +1,37 @@
1
+ export interface DiskCacheConfig {
2
+ /** Root directory for all cache storage (e.g. "/ipfs-staging"). */
3
+ stagingDir: string;
4
+ /**
5
+ * Subdirectory name for the full document payloads.
6
+ * @default "documents"
7
+ */
8
+ documentsSubdir?: string;
9
+ /**
10
+ * Subdirectory name for the latest-entry lookup files.
11
+ * @default "latest"
12
+ */
13
+ latestSubdir?: string;
14
+ }
15
+ /**
16
+ * A generic disk-backed in-memory cache keyed by a string identifier.
17
+ *
18
+ * Each entry is persisted to disk in two places:
19
+ * - `documents/` — the full payload (intended for IPFS pinning)
20
+ * - `latest/` — a lightweight lookup file keyed by id (for fast rehydration)
21
+ *
22
+ * On startup, call {@link rehydrate} to rebuild the in-memory `Map` from the
23
+ * `latest/` directory so the cache survives service restarts.
24
+ *
25
+ * @typeParam E - The shape of a cache entry (must include a string `id` field
26
+ * used as the Map key and the latest/ filename).
27
+ */
28
+ export declare function createDiskCache<E extends object>(config: DiskCacheConfig,
29
+ /** Extract the cache key from an entry. */
30
+ keyFn: (entry: E) => string): {
31
+ rehydrate: () => Promise<number>;
32
+ put: (entry: E, filename: string, fullPayload: unknown) => Promise<string>;
33
+ get: (key: string) => E | undefined;
34
+ getAll: () => E[];
35
+ getDocumentsDir: () => string;
36
+ };
37
+ export type DiskCache<E extends object> = ReturnType<typeof createDiskCache<E>>;
@@ -0,0 +1,84 @@
1
+ import fs from 'node:fs/promises';
2
+ import path from 'node:path';
3
+ /**
4
+ * A generic disk-backed in-memory cache keyed by a string identifier.
5
+ *
6
+ * Each entry is persisted to disk in two places:
7
+ * - `documents/` — the full payload (intended for IPFS pinning)
8
+ * - `latest/` — a lightweight lookup file keyed by id (for fast rehydration)
9
+ *
10
+ * On startup, call {@link rehydrate} to rebuild the in-memory `Map` from the
11
+ * `latest/` directory so the cache survives service restarts.
12
+ *
13
+ * @typeParam E - The shape of a cache entry (must include a string `id` field
14
+ * used as the Map key and the latest/ filename).
15
+ */
16
+ export function createDiskCache(config,
17
+ /** Extract the cache key from an entry. */
18
+ keyFn) {
19
+ const docsDir = path.join(config.stagingDir, config.documentsSubdir ?? 'documents');
20
+ const latestDir = path.join(config.stagingDir, config.latestSubdir ?? 'latest');
21
+ const cache = new Map();
22
+ /** Ensure the storage directories exist. */
23
+ async function ensureDirs() {
24
+ await fs.mkdir(docsDir, { recursive: true });
25
+ await fs.mkdir(latestDir, { recursive: true });
26
+ }
27
+ /**
28
+ * Rebuild the in-memory cache from the `latest/` directory on disk.
29
+ * Call once before the service starts accepting requests.
30
+ *
31
+ * @returns The number of entries loaded.
32
+ */
33
+ async function rehydrate() {
34
+ await ensureDirs();
35
+ const files = await fs.readdir(latestDir);
36
+ let count = 0;
37
+ for (const file of files) {
38
+ if (!file.endsWith('.json'))
39
+ continue;
40
+ try {
41
+ const raw = await fs.readFile(path.join(latestDir, file), 'utf-8');
42
+ const entry = JSON.parse(raw);
43
+ cache.set(keyFn(entry), entry);
44
+ count++;
45
+ }
46
+ catch {
47
+ // Skip corrupt / unparseable files
48
+ }
49
+ }
50
+ return count;
51
+ }
52
+ /**
53
+ * Store an entry in both the in-memory cache and on disk.
54
+ *
55
+ * @param entry - The cache entry.
56
+ * @param filename - Filename for the full payload in `documents/`.
57
+ * @param fullPayload - The complete payload to persist (may differ from
58
+ * the lightweight entry stored in `latest/`).
59
+ * @returns Absolute path to the written document file.
60
+ */
61
+ async function put(entry, filename, fullPayload) {
62
+ await ensureDirs();
63
+ const key = keyFn(entry);
64
+ cache.set(key, entry);
65
+ const docPath = path.join(docsDir, filename);
66
+ await fs.writeFile(docPath, JSON.stringify(fullPayload, null, 2));
67
+ const latestPath = path.join(latestDir, `${key}.json`);
68
+ await fs.writeFile(latestPath, JSON.stringify(entry));
69
+ return docPath;
70
+ }
71
+ /** Retrieve an entry by key from the in-memory cache. */
72
+ function get(key) {
73
+ return cache.get(key);
74
+ }
75
+ /** Return all cached entries. */
76
+ function getAll() {
77
+ return Array.from(cache.values());
78
+ }
79
+ /** Return the absolute path to the documents directory (for IPFS pinning). */
80
+ function getDocumentsDir() {
81
+ return docsDir;
82
+ }
83
+ return { rehydrate, put, get, getAll, getDocumentsDir };
84
+ }
@@ -0,0 +1,4 @@
1
+ /** Read a required environment variable or throw with a clear message. */
2
+ export declare function requireEnv(name: string): string;
3
+ /** Read an optional environment variable with a fallback default. */
4
+ export declare function optionalEnv(name: string, fallback: string): string;
package/dist/config.js ADDED
@@ -0,0 +1,12 @@
1
+ /** Read a required environment variable or throw with a clear message. */
2
+ export function requireEnv(name) {
3
+ const value = process.env[name];
4
+ if (!value) {
5
+ throw new Error(`Missing required environment variable: ${name}`);
6
+ }
7
+ return value;
8
+ }
9
+ /** Read an optional environment variable with a fallback default. */
10
+ export function optionalEnv(name, fallback) {
11
+ return process.env[name] || fallback;
12
+ }
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Re-exported Hydra WebSocket message types from `@meshsdk/hydra`.
3
+ *
4
+ * `ServerOutput` is a discriminated union (on `.tag`) of all possible messages
5
+ * the Hydra node can send over its WebSocket API. Use `Extract` to narrow:
6
+ *
7
+ * @example
8
+ * ```ts
9
+ * function handleOpen(msg: HydraMessage<"HeadIsOpen">) {
10
+ * console.log(msg.headId, msg.utxo);
11
+ * }
12
+ * ```
13
+ */
14
+ export type { ClientInput, ClientMessage, ConnectionState, ServerOutput } from '@meshsdk/hydra';
15
+ import type { ClientMessage, ServerOutput } from '@meshsdk/hydra';
16
+ /** Any message received via the Hydra WebSocket (server output or client echo). */
17
+ export type HydraWsMessage = ServerOutput | ClientMessage;
18
+ /**
19
+ * Extract a specific message type from the `ServerOutput` union by its `tag`.
20
+ *
21
+ * @example
22
+ * ```ts
23
+ * type Greetings = HydraMessage<"Greetings">;
24
+ * // { tag: "Greetings"; me: { vkey: string }; headStatus: HeadStatus; ... }
25
+ * ```
26
+ */
27
+ export type HydraMessage<T extends ServerOutput['tag']> = Extract<ServerOutput, {
28
+ tag: T;
29
+ }>;
30
+ /** Possible Hydra head states as reported in the `Greetings` message. */
31
+ export type HeadStatus = 'Idle' | 'Initializing' | 'Open' | 'Closed' | 'FanoutPossible' | 'Final';
@@ -0,0 +1 @@
1
+ export {};
@@ -1,4 +1,5 @@
1
- interface ParsedUtxo {
1
+ /** A UTxO entry parsed from the Hydra snapshot. */
2
+ export interface ParsedUtxo {
2
3
  tx_hash: string;
3
4
  output_index: number;
4
5
  address: string;
@@ -7,9 +8,18 @@ interface ParsedUtxo {
7
8
  quantity: string;
8
9
  }[];
9
10
  }
11
+ /**
12
+ * Fetch the full UTxO set from the Hydra head snapshot.
13
+ *
14
+ * Reads `HYDRA_API_URL` from the environment.
15
+ *
16
+ * @returns All UTxOs currently held in the Hydra head.
17
+ */
10
18
  export declare function getUtxoSet(): Promise<ParsedUtxo[]>;
11
19
  /**
12
- * Fetches the full UTxO snapshot and filters by address.
20
+ * Fetch UTxOs belonging to a specific address from the Hydra head snapshot.
21
+ *
22
+ * @param address - Bech32 address to filter by.
23
+ * @returns UTxOs matching the given address.
13
24
  */
14
25
  export declare function queryUtxoByAddress(address: string): Promise<ParsedUtxo[]>;
15
- export {};
@@ -1,16 +1,21 @@
1
1
  import axios from 'axios';
2
+ import { requireEnv } from '../config.js';
3
+ /**
4
+ * Fetch the full UTxO set from the Hydra head snapshot.
5
+ *
6
+ * Reads `HYDRA_API_URL` from the environment.
7
+ *
8
+ * @returns All UTxOs currently held in the Hydra head.
9
+ */
2
10
  export async function getUtxoSet() {
3
- const baseUrl = process.env.HYDRA_API_URL;
4
- if (!baseUrl) {
5
- throw new Error("HYDRA_API_URL is not defined in the environment variables!");
6
- }
11
+ const baseUrl = requireEnv('HYDRA_API_URL');
7
12
  const url = `${baseUrl}/snapshot/utxo`;
8
13
  try {
9
14
  const response = await axios.get(url);
10
15
  const data = response.data;
11
16
  const UtxoSet = [];
12
17
  for (const [txKey, utxo] of Object.entries(data)) {
13
- const [tx_hash, index_str] = txKey.split("#");
18
+ const [tx_hash, index_str] = txKey.split('#');
14
19
  const output_index = parseInt(index_str, 10);
15
20
  const amount = Object.entries(utxo.value).map(([unit, quantity]) => ({
16
21
  unit,
@@ -26,27 +31,23 @@ export async function getUtxoSet() {
26
31
  return UtxoSet;
27
32
  }
28
33
  catch (error) {
29
- console.error("Error fetching the Hydra Ledger?", error);
34
+ console.error('Error fetching the Hydra Ledger?', error);
30
35
  throw error;
31
36
  }
32
37
  }
33
38
  /**
34
- * Fetches the full UTxO snapshot and filters by address.
39
+ * Fetch UTxOs belonging to a specific address from the Hydra head snapshot.
40
+ *
41
+ * @param address - Bech32 address to filter by.
42
+ * @returns UTxOs matching the given address.
35
43
  */
36
44
  export async function queryUtxoByAddress(address) {
37
- console.log(`Querying UTxO by Address: ${address}`);
38
- try {
39
- const result = [];
40
- const data = await getUtxoSet();
41
- for (const utxo of data) {
42
- if (utxo.address === address) {
43
- result.push(utxo);
44
- }
45
+ const result = [];
46
+ const data = await getUtxoSet();
47
+ for (const utxo of data) {
48
+ if (utxo.address === address) {
49
+ result.push(utxo);
45
50
  }
46
- return result;
47
- }
48
- catch (error) {
49
- console.error('Failed to fetch or parse UTxO snapshot:', error.message);
50
- throw error;
51
51
  }
52
+ return result;
52
53
  }
package/dist/index.d.ts CHANGED
@@ -1,7 +1,14 @@
1
+ export type { DiskCache, DiskCacheConfig } from './cache/disk-cache.js';
2
+ export { createDiskCache } from './cache/disk-cache.js';
3
+ export { optionalEnv, requireEnv } from './config.js';
4
+ export type { HeadStatus, HydraMessage, HydraWsMessage, ServerOutput } from './hydra/messages.js';
5
+ export type { ParsedUtxo } from './hydra/utxo.js';
6
+ export { getUtxoSet, queryUtxoByAddress } from './hydra/utxo.js';
7
+ export type { IpfsClient, IpfsConfig, PinResult } from './ipfs/ipfs.js';
8
+ export { createIpfsClient } from './ipfs/ipfs.js';
1
9
  export { getAdmin } from './mesh/get-admin.js';
2
10
  export { createMultisigAddress, createNativeScript } from './mesh/native-script.js';
3
- export { Wrangler } from './mesh/wrangler.js';
4
- export { getUtxoSet, queryUtxoByAddress } from './hydra/utxo.js';
11
+ export { CommitArgs, UTxORef, Wrangler } from './mesh/wrangler.js';
5
12
  export { submitTx } from './tx3/submit-tx.js';
6
- export { bufferToHex, bufferToAscii, verifySignature } from './utils/verify-signature.js';
7
13
  export { chunkString } from './utils/chunk-string.js';
14
+ export { bufferToAscii, bufferToHex, verifySignature } from './utils/verify-signature.js';
package/dist/index.js CHANGED
@@ -1,7 +1,10 @@
1
+ export { createDiskCache } from './cache/disk-cache.js';
2
+ export { optionalEnv, requireEnv } from './config.js';
3
+ export { getUtxoSet, queryUtxoByAddress } from './hydra/utxo.js';
4
+ export { createIpfsClient } from './ipfs/ipfs.js';
1
5
  export { getAdmin } from './mesh/get-admin.js';
2
6
  export { createMultisigAddress, createNativeScript } from './mesh/native-script.js';
3
7
  export { Wrangler } from './mesh/wrangler.js';
4
- export { getUtxoSet, queryUtxoByAddress } from './hydra/utxo.js';
5
8
  export { submitTx } from './tx3/submit-tx.js';
6
- export { bufferToHex, bufferToAscii, verifySignature } from './utils/verify-signature.js';
7
9
  export { chunkString } from './utils/chunk-string.js';
10
+ export { bufferToAscii, bufferToHex, verifySignature } from './utils/verify-signature.js';
@@ -0,0 +1,22 @@
1
+ export interface IpfsConfig {
2
+ /** Base URL for the Kubo HTTP API (e.g. "http://localhost:5001"). */
3
+ apiUrl: string;
4
+ }
5
+ export interface PinResult {
6
+ /** The content-identifier returned by the IPFS node. */
7
+ cid: string;
8
+ /** Size in bytes as reported by the IPFS node. */
9
+ size: number;
10
+ }
11
+ /**
12
+ * Create an IPFS client bound to the given Kubo API endpoint.
13
+ *
14
+ * All operations go through the Kubo HTTP RPC, so the only requirement
15
+ * is a reachable Kubo node — no additional IPFS libraries are needed.
16
+ */
17
+ export declare function createIpfsClient(config: IpfsConfig): {
18
+ pinJson: (filename: string, payload: unknown) => Promise<PinResult>;
19
+ pinDirectory: (dirPath: string) => Promise<PinResult>;
20
+ fetchJson: <T = unknown>(cid: string) => Promise<T>;
21
+ };
22
+ export type IpfsClient = ReturnType<typeof createIpfsClient>;
@@ -0,0 +1,77 @@
1
+ import fs from 'node:fs/promises';
2
+ import path from 'node:path';
3
+ /**
4
+ * Create an IPFS client bound to the given Kubo API endpoint.
5
+ *
6
+ * All operations go through the Kubo HTTP RPC, so the only requirement
7
+ * is a reachable Kubo node — no additional IPFS libraries are needed.
8
+ */
9
+ export function createIpfsClient(config) {
10
+ const { apiUrl } = config;
11
+ /**
12
+ * Pin a single JSON-serialisable payload and return its CID.
13
+ *
14
+ * @param filename - Filename used inside the IPFS object.
15
+ * @param payload - Any JSON-serialisable value.
16
+ */
17
+ async function pinJson(filename, payload) {
18
+ const body = JSON.stringify(payload, null, 2);
19
+ const form = new FormData();
20
+ form.append('file', new Blob([body], { type: 'application/json' }), filename);
21
+ const res = await fetch(`${apiUrl}/api/v0/add?pin=true`, {
22
+ method: 'POST',
23
+ body: form,
24
+ });
25
+ if (!res.ok) {
26
+ throw new Error(`IPFS pin failed: ${res.status} ${await res.text()}`);
27
+ }
28
+ const json = (await res.json());
29
+ return { cid: json.Hash, size: Number.parseInt(json.Size, 10) };
30
+ }
31
+ /**
32
+ * Pin every file in a directory and wrap them in an IPFS directory object.
33
+ *
34
+ * @param dirPath - Absolute path to a local directory.
35
+ * @returns The CID of the wrapping IPFS directory.
36
+ */
37
+ async function pinDirectory(dirPath) {
38
+ const entries = await fs.readdir(dirPath);
39
+ const form = new FormData();
40
+ for (const entry of entries) {
41
+ const filePath = path.join(dirPath, entry);
42
+ const stat = await fs.stat(filePath);
43
+ if (!stat.isFile())
44
+ continue;
45
+ const content = await fs.readFile(filePath, 'utf-8');
46
+ form.append('file', new Blob([content]), entry);
47
+ }
48
+ const res = await fetch(`${apiUrl}/api/v0/add?pin=true&wrap-with-directory=true&recursive=true`, {
49
+ method: 'POST',
50
+ body: form,
51
+ });
52
+ if (!res.ok) {
53
+ throw new Error(`IPFS directory pin failed: ${res.status} ${await res.text()}`);
54
+ }
55
+ // Kubo returns one JSON object per line (ndjson). The last line is the
56
+ // wrapping directory entry.
57
+ const text = await res.text();
58
+ const lines = text.trim().split('\n');
59
+ const last = JSON.parse(lines[lines.length - 1]);
60
+ return { cid: last.Hash, size: Number.parseInt(last.Size, 10) };
61
+ }
62
+ /**
63
+ * Fetch a JSON payload from IPFS by CID.
64
+ *
65
+ * @param cid - The content identifier to retrieve.
66
+ */
67
+ async function fetchJson(cid) {
68
+ const res = await fetch(`${apiUrl}/api/v0/cat?arg=${cid}`, {
69
+ method: 'POST',
70
+ });
71
+ if (!res.ok) {
72
+ throw new Error(`IPFS fetch failed: ${res.status} ${await res.text()}`);
73
+ }
74
+ return (await res.json());
75
+ }
76
+ return { pinJson, pinDirectory, fetchJson };
77
+ }
@@ -1,2 +1,11 @@
1
1
  import { MeshWallet } from '@meshsdk/core';
2
+ /**
3
+ * Create and initialize a MeshWallet for the Hydra head admin.
4
+ *
5
+ * Reads the Cardano signing key from `HYDRA_ADMIN_KEY_FILE` (preferred)
6
+ * or falls back to `HYDRA_ADMIN_CARDANO_PK`. Network is selected via `HYDRA_NETWORK`.
7
+ *
8
+ * @returns An initialized MeshWallet ready for signing transactions.
9
+ * @throws If no signing key is available or the wallet fails to initialize.
10
+ */
2
11
  export declare function getAdmin(): Promise<MeshWallet>;
@@ -1,10 +1,31 @@
1
- import { MeshWallet, } from '@meshsdk/core';
1
+ import { readFileSync } from 'node:fs';
2
+ import { MeshWallet } from '@meshsdk/core';
3
+ import { optionalEnv } from '../config.js';
4
+ /**
5
+ * Create and initialize a MeshWallet for the Hydra head admin.
6
+ *
7
+ * Reads the Cardano signing key from `HYDRA_ADMIN_KEY_FILE` (preferred)
8
+ * or falls back to `HYDRA_ADMIN_CARDANO_PK`. Network is selected via `HYDRA_NETWORK`.
9
+ *
10
+ * @returns An initialized MeshWallet ready for signing transactions.
11
+ * @throws If no signing key is available or the wallet fails to initialize.
12
+ */
2
13
  export async function getAdmin() {
3
- const keyCborHex = process.env.HYDRA_ADMIN_CARDANO_PK || null;
14
+ let keyCborHex = null;
15
+ // Preferred: read the instance's cardano.sk file directly (no secrets in .env)
16
+ const keyFile = process.env.HYDRA_ADMIN_KEY_FILE;
17
+ if (keyFile) {
18
+ const content = JSON.parse(readFileSync(keyFile, 'utf-8'));
19
+ keyCborHex = content.cborHex ?? content.key?.cborHex ?? null;
20
+ }
21
+ // Fallback: cborHex passed via env var (backward compatible)
22
+ if (!keyCborHex) {
23
+ keyCborHex = process.env.HYDRA_ADMIN_CARDANO_PK || null;
24
+ }
4
25
  if (!keyCborHex) {
5
- throw new Error('Admin signing key is not defined!');
26
+ throw new Error('Cardano signing key not found. Set HYDRA_ADMIN_KEY_FILE to the instance cardano.sk path, or HYDRA_ADMIN_CARDANO_PK to its cborHex.');
6
27
  }
7
- let networkId = parseInt(process.env.HYDRA_NETWORK || '0', 10);
28
+ let networkId = parseInt(optionalEnv('HYDRA_NETWORK', '0'), 10);
8
29
  if (networkId < 0) {
9
30
  networkId = 0;
10
31
  }
@@ -1,9 +1,11 @@
1
1
  /**
2
- * Create a multisig address using 'any' policy (AND logic)
3
- * @param address1 The first address to include in the script (staking or payment)
4
- * @param address2 The second address to include in the script (staking or payment)
5
- * @param networkId 0 = testnet, 1 = mainnet
6
- * @param scriptType
2
+ * Create a multisig script address from two Cardano addresses.
3
+ *
4
+ * @param address1 - First address (bech32) to include in the native script.
5
+ * @param address2 - Second address (bech32) to include in the native script.
6
+ * @param networkId - `0` for testnet, `1` for mainnet.
7
+ * @param scriptType - `"any"` requires one signature, `"all"` requires both.
8
+ * @returns The script address, serialized CBOR, and script hash.
7
9
  */
8
10
  export declare function createMultisigAddress(address1: string, address2: string, networkId?: number, scriptType?: 'any' | 'all'): {
9
11
  address: string;
@@ -11,9 +13,24 @@ export declare function createMultisigAddress(address1: string, address2: string
11
13
  scriptHash?: string;
12
14
  };
13
15
  /**
14
- * Create a Native Script policy for minting tokens
16
+ * Create a native script policy for minting tokens.
17
+ *
18
+ * Without options, produces a bare `sig(keyHash)` script suitable for
19
+ * in-head voter tokens that must remain burnable by the admin.
20
+ *
21
+ * When `invalidHereafter` is provided, produces a compound time-bound
22
+ * script: `all: [sig(keyHash), before(slot)]` — each ballot gets its
23
+ * own time-bound policy.
24
+ *
25
+ * @param address - Address (bech32) whose key hash is the required signer.
26
+ * @param opts.invalidHereafter - Slot after which minting is no longer possible.
27
+ * @param opts.networkId - `0` for testnet (default), `1` for mainnet.
28
+ * @returns The script address, serialized CBOR, and script hash.
15
29
  */
16
- export declare function createNativeScript(address1: string, networkId?: number, scriptType?: 'any' | 'all', invalidBefore?: number | null, invalidHereafter?: number | null): {
30
+ export declare function createNativeScript(address: string, opts?: {
31
+ invalidHereafter?: number;
32
+ networkId?: number;
33
+ }): {
17
34
  address: string;
18
35
  scriptCbor?: string;
19
36
  scriptHash?: string;