wayfind 2.0.48 → 2.0.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -356,6 +356,59 @@ async function detect() {
356
356
 
357
357
  const OPENAI_EMBEDDINGS_URL = 'https://api.openai.com/v1/embeddings';
358
358
  const DEFAULT_EMBEDDING_MODEL = 'text-embedding-3-small';
359
+ const LOCAL_EMBEDDING_MODEL = 'Xenova/all-MiniLM-L6-v2';
360
+
361
+ // Cached pipeline instance — expensive to initialize, reuse across calls.
362
+ let _localPipeline = null;
363
+
364
+ /**
365
+ * Try to generate an embedding using the local ONNX model (@xenova/transformers).
366
+ * Returns null if the package is not installed or the model fails to load.
367
+ * Downloads the model (~80MB) on first use into the transformers cache.
368
+ * @param {string} text
369
+ * @returns {Promise<number[]|null>}
370
+ */
371
+ async function generateEmbeddingLocal(text) {
372
+ try {
373
+ if (!_localPipeline) {
374
+ // Dynamic require — optional dep, may not be installed
375
+ const { pipeline, env } = require('@xenova/transformers');
376
+ // Suppress progress output in non-interactive contexts
377
+ if (!process.stdout.isTTY) {
378
+ env.allowLocalModels = false;
379
+ }
380
+ process.stderr.write('[wayfind] Loading local embedding model (first use — may take a moment)...\n');
381
+ _localPipeline = await pipeline('feature-extraction', LOCAL_EMBEDDING_MODEL);
382
+ }
383
+ const output = await _localPipeline(text, { pooling: 'mean', normalize: true });
384
+ return Array.from(output.data);
385
+ } catch (_) {
386
+ return null;
387
+ }
388
+ }
389
+
390
+ /**
391
+ * Detect which embedding provider is active based on env vars and installed packages.
392
+ * Returns an object describing the provider so callers can surface this to users.
393
+ * @returns {{ provider: string, model: string, requiresKey: boolean, available: boolean }}
394
+ */
395
+ function getEmbeddingProviderInfo() {
396
+ if (isSimulation()) {
397
+ return { provider: 'simulation', model: 'fake-1536d', requiresKey: false, available: true };
398
+ }
399
+ if (process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT) {
400
+ const hasKey = !!process.env.AZURE_OPENAI_EMBEDDING_KEY;
401
+ return { provider: 'azure', model: process.env.AZURE_OPENAI_EMBEDDING_DEPLOYMENT || 'text-embedding-3-small', requiresKey: true, available: hasKey };
402
+ }
403
+ if (process.env.OPENAI_API_KEY) {
404
+ return { provider: 'openai', model: DEFAULT_EMBEDDING_MODEL, requiresKey: true, available: true };
405
+ }
406
+ try {
407
+ require.resolve('@xenova/transformers');
408
+ return { provider: 'local', model: LOCAL_EMBEDDING_MODEL, requiresKey: false, available: true };
409
+ } catch (_) {}
410
+ return { provider: 'none', model: null, requiresKey: false, available: false };
411
+ }
359
412
 
360
413
  /**
361
414
  * Generate an embedding vector for the given text.
@@ -392,7 +445,14 @@ async function generateEmbedding(text, options = {}) {
392
445
  const apiKeyEnv = options.apiKeyEnv || 'OPENAI_API_KEY';
393
446
  const apiKey = process.env[apiKeyEnv];
394
447
  if (!apiKey) {
395
- throw new Error(`Embeddings: Missing API key. Set ${apiKeyEnv} environment variable.`);
448
+ // No cloud key try local model before failing
449
+ const localVec = await generateEmbeddingLocal(text);
450
+ if (localVec !== null) return localVec;
451
+ throw new Error(
452
+ 'Embeddings: No provider configured.\n' +
453
+ ' Option 1 (cloud): set OPENAI_API_KEY or AZURE_OPENAI_EMBEDDING_ENDPOINT\n' +
454
+ ' Option 2 (local, no key): npm install -g @xenova/transformers'
455
+ );
396
456
  }
397
457
 
398
458
  const baseUrl = options.baseUrl || OPENAI_EMBEDDINGS_URL.replace('/embeddings', '');
@@ -477,5 +537,6 @@ module.exports = {
477
537
  call,
478
538
  detect,
479
539
  generateEmbedding,
540
+ getEmbeddingProviderInfo,
480
541
  isSimulation,
481
542
  };
@@ -426,7 +426,7 @@ async function indexJournals(options = {}) {
426
426
  const storePath = options.storePath || resolveStorePath();
427
427
  const doEmbeddings = options.embeddings !== undefined
428
428
  ? options.embeddings
429
- : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation());
429
+ : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation() || llm.getEmbeddingProviderInfo().available);
430
430
 
431
431
  if (!journalDir || !storePath) {
432
432
  throw new Error('Journal directory and store path are required.');
@@ -546,11 +546,13 @@ async function indexJournals(options = {}) {
546
546
  stats.entryCount = Object.keys(finalEntries).length;
547
547
 
548
548
  // Save
549
+ const embeddingModel = doEmbeddings ? llm.getEmbeddingProviderInfo().model : null;
549
550
  const index = {
550
551
  version: INDEX_VERSION,
551
552
  lastUpdated: Date.now(),
552
553
  entryCount: stats.entryCount,
553
554
  entries: finalEntries,
555
+ ...(embeddingModel ? { embedding_model: embeddingModel } : {}),
554
556
  };
555
557
 
556
558
  backend.saveIndex(index);
@@ -563,6 +565,7 @@ async function indexJournals(options = {}) {
563
565
  entry_count: stats.entryCount,
564
566
  new_entries: stats.newEntries,
565
567
  has_embeddings: doEmbeddings,
568
+ embedding_model: embeddingModel,
566
569
  });
567
570
 
568
571
  return stats;
@@ -1375,7 +1378,7 @@ async function indexConversations(options = {}) {
1375
1378
  const storePath = options.storePath || resolveStorePath();
1376
1379
  const doEmbeddings = options.embeddings !== undefined
1377
1380
  ? options.embeddings
1378
- : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation());
1381
+ : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation() || llm.getEmbeddingProviderInfo().available);
1379
1382
 
1380
1383
  if (!projectsDir || !storePath) {
1381
1384
  throw new Error('Projects directory and store path are required.');
@@ -1566,6 +1569,10 @@ async function indexConversations(options = {}) {
1566
1569
 
1567
1570
  // Save everything
1568
1571
  existingIndex.entryCount = Object.keys(existingIndex.entries).length;
1572
+ if (doEmbeddings) {
1573
+ const model = llm.getEmbeddingProviderInfo().model;
1574
+ if (model) existingIndex.embedding_model = model;
1575
+ }
1569
1576
  backend.saveIndex(existingIndex);
1570
1577
  if (doEmbeddings) {
1571
1578
  backend.saveEmbeddings(existingEmbeddings);
@@ -1801,7 +1808,7 @@ async function indexSignals(options = {}) {
1801
1808
  const storePath = options.storePath || resolveStorePath();
1802
1809
  const doEmbeddings = options.embeddings !== undefined
1803
1810
  ? options.embeddings
1804
- : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation());
1811
+ : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation() || llm.getEmbeddingProviderInfo().available);
1805
1812
 
1806
1813
  if (!signalsDir || !storePath) {
1807
1814
  throw new Error('Signals directory and store path are required.');
@@ -2076,6 +2083,10 @@ async function indexSignals(options = {}) {
2076
2083
 
2077
2084
  // Save
2078
2085
  existingIndex.entryCount = Object.keys(existingIndex.entries).length;
2086
+ if (doEmbeddings) {
2087
+ const model = llm.getEmbeddingProviderInfo().model;
2088
+ if (model) existingIndex.embedding_model = model;
2089
+ }
2079
2090
  backend.saveIndex(existingIndex);
2080
2091
  if (doEmbeddings) {
2081
2092
  backend.saveEmbeddings(existingEmbeddings);
@@ -2376,6 +2387,12 @@ module.exports = {
2376
2387
  saveIndex: (storePath, index) => getBackend(storePath || resolveStorePath()).saveIndex(index),
2377
2388
  loadEmbeddings: (storePath) => getBackend(storePath || resolveStorePath()).loadEmbeddings(),
2378
2389
  saveEmbeddings: (storePath, embeddings) => getBackend(storePath || resolveStorePath()).saveEmbeddings(embeddings),
2390
+ getStoredEmbeddingModel: (storePath) => {
2391
+ try {
2392
+ const idx = getBackend(storePath || resolveStorePath()).loadIndex();
2393
+ return idx ? (idx.embedding_model || null) : null;
2394
+ } catch { return null; }
2395
+ },
2379
2396
  loadConversationIndex: (storePath) => getBackend(storePath || resolveStorePath()).loadConversationIndex(),
2380
2397
  saveConversationIndex: (storePath, convIndex) => getBackend(storePath || resolveStorePath()).saveConversationIndex(convIndex),
2381
2398
 
@@ -59,6 +59,10 @@ function splitAndLabel(content, startId) {
59
59
  * @param {Object} llmConfig - LLM config for the scoring call
60
60
  * @returns {Promise<Array<{id: number, [personaId]: number}>|null>} Scores array or null on failure
61
61
  */
62
+ // Maximum characters to send to the scoring LLM in a single call.
63
+ // Beyond this, scoring is skipped and budget truncation handles content selection.
64
+ const SCORE_MAX_CHARS = 40000;
65
+
62
66
  async function scoreItems(signalContent, journalContent, personas, llmConfig) {
63
67
  const signalResult = splitAndLabel(signalContent, 0);
64
68
  const journalResult = splitAndLabel(journalContent, signalResult.items.length);
@@ -66,6 +70,11 @@ async function scoreItems(signalContent, journalContent, personas, llmConfig) {
66
70
  const totalItems = signalResult.items.length + journalResult.items.length;
67
71
  if (totalItems === 0) return null;
68
72
 
73
+ // Skip scoring if content is too large for a reliable single LLM call.
74
+ // The token budget step will handle content selection without scoring.
75
+ const totalChars = (signalResult.labeled || '').length + (journalResult.labeled || '').length;
76
+ if (totalChars > SCORE_MAX_CHARS) return null;
77
+
69
78
  const systemPrompt = buildScoringPrompt(personas);
70
79
 
71
80
  const userParts = [];
@@ -38,6 +38,7 @@ const digest = require('./digest');
38
38
  const slack = require('./slack');
39
39
  const slackBot = require('./slack-bot');
40
40
  const contentStore = require('./content-store');
41
+ const llm = require('./connectors/llm');
41
42
  const rebuildStatus = require('./rebuild-status');
42
43
  const telemetry = require('./telemetry');
43
44
 
@@ -425,7 +426,7 @@ async function teamJoin(args) {
425
426
  console.log(` Semantic search: available | ${containerEndpoint}`);
426
427
  } else {
427
428
  console.log(` Semantic search: not configured`);
428
- console.log(` Ask your team admin: wayfind deploy set-endpoint ${teamId} <url>`);
429
+ console.log(` Ask your team admin: wayfind deploy set-endpoint <url> --team ${teamId}`);
429
430
  }
430
431
  if (keyReady) {
431
432
  console.log(` Search API key: ready — rotates daily, committed to team repo`);
@@ -1053,12 +1054,26 @@ async function runDigest(args) {
1053
1054
  process.exit(1);
1054
1055
  }
1055
1056
 
1057
+ // Sanitize configured paths — connectors.json may have been written from inside a container
1058
+ // with paths like /home/node/... or /data/... that don't exist on the host. Fall back to
1059
+ // local defaults for any path that doesn't resolve on this machine.
1060
+ const digestConfig = { ...config.digest };
1061
+ if (digestConfig.store_path && !fs.existsSync(digestConfig.store_path)) {
1062
+ digestConfig.store_path = contentStore.resolveStorePath();
1063
+ }
1064
+ if (digestConfig.journal_dir && !fs.existsSync(digestConfig.journal_dir)) {
1065
+ digestConfig.journal_dir = contentStore.DEFAULT_JOURNAL_DIR;
1066
+ }
1067
+ if (digestConfig.signals_dir && !fs.existsSync(digestConfig.signals_dir)) {
1068
+ digestConfig.signals_dir = contentStore.resolveSignalsDir();
1069
+ }
1070
+
1056
1071
  // Generate digests
1057
1072
  console.log(`Generating digests for: ${personaIds.join(', ')}`);
1058
1073
  console.log(`Period: ${sinceDate} to today`);
1059
1074
  console.log('');
1060
1075
 
1061
- const result = await digest.generateDigest(config.digest, personaIds, sinceDate, (progress) => {
1076
+ const result = await digest.generateDigest(digestConfig, personaIds, sinceDate, (progress) => {
1062
1077
  if (progress.phase === 'start') {
1063
1078
  process.stdout.write(` ${progress.personaId} (${progress.index + 1}/${progress.total})... `);
1064
1079
  } else if (progress.phase === 'done') {
@@ -1296,7 +1311,20 @@ async function runReindex(args) {
1296
1311
  const force = args.includes('--force');
1297
1312
 
1298
1313
  if (force) {
1299
- console.log('Force mode: clearing content store for full reindex...');
1314
+ // Warn if stored embeddings used a different model than the current provider
1315
+ const storedModel = contentStore.getStoredEmbeddingModel();
1316
+ const currentProvider = llm.getEmbeddingProviderInfo();
1317
+ if (storedModel && currentProvider.model && storedModel !== currentProvider.model) {
1318
+ console.log(`⚠️ Embedding model mismatch:`);
1319
+ console.log(` Stored embeddings: ${storedModel}`);
1320
+ console.log(` Current provider: ${currentProvider.model} (${currentProvider.provider})`);
1321
+ console.log(` All embeddings will be cleared and regenerated with the current provider.`);
1322
+ console.log(` Entries without embeddings will fall back to full-text search until reindexed.`);
1323
+ } else if (storedModel) {
1324
+ console.log(`Force mode: clearing content store — will regenerate ${storedModel} embeddings...`);
1325
+ } else {
1326
+ console.log('Force mode: clearing content store for full reindex...');
1327
+ }
1300
1328
  try {
1301
1329
  const backend = contentStore.getBackend();
1302
1330
  const emptyIndex = { version: contentStore.INDEX_VERSION, entries: {}, lastUpdated: Date.now(), entryCount: 0 };
@@ -2139,6 +2167,15 @@ function journalSync(args) {
2139
2167
  teamFiles[teamId].push({ file, srcPath: path.join(journalDir, file) });
2140
2168
  }
2141
2169
 
2170
+ // Always update member version stamp for all registered teams, even if no files to sync.
2171
+ // This ensures the stamp stays current regardless of whether journals are flowing.
2172
+ if (config.teams) {
2173
+ for (const teamId of Object.keys(config.teams)) {
2174
+ const teamPath = getTeamContextPath(teamId);
2175
+ if (teamPath) stampMemberVersion(teamPath);
2176
+ }
2177
+ }
2178
+
2142
2179
  if (Object.keys(teamFiles).length === 0) {
2143
2180
  console.log('No journal files to sync.');
2144
2181
  return;
@@ -3558,7 +3595,7 @@ function contextSync() {
3558
3595
  *
3559
3596
  * @param {string[]} args - CLI arguments (--quiet suppresses output)
3560
3597
  */
3561
- function contextPull(args) {
3598
+ async function contextPull(args) {
3562
3599
  const quiet = args.includes('--quiet');
3563
3600
  const background = args.includes('--background');
3564
3601
  const log = quiet ? () => {} : console.log;
@@ -3591,6 +3628,16 @@ function contextPull(args) {
3591
3628
  log('[wayfind] Pulled latest team-context');
3592
3629
  // Mark success — doctor checks this to warn on prolonged failures
3593
3630
  try { fs.writeFileSync(markerFile, new Date().toISOString()); } catch {}
3631
+ // Index any new team journals into the local content store
3632
+ const journalsDir = path.join(teamPath, 'journals');
3633
+ if (fs.existsSync(journalsDir)) {
3634
+ try {
3635
+ const stats = await contentStore.indexJournals({ journalDir: journalsDir });
3636
+ if (!quiet && stats.newEntries > 0) {
3637
+ log(`[wayfind] Indexed ${stats.newEntries} new team journal entries`);
3638
+ }
3639
+ } catch (_) {}
3640
+ }
3594
3641
  } else if (result.error && result.error.code === 'ETIMEDOUT') {
3595
3642
  log('[wayfind] Team-context pull timed out — using local state');
3596
3643
  } else {
@@ -4033,7 +4080,8 @@ function deployTeamInit(teamId, { port } = {}) {
4033
4080
  let composeContent = fs.readFileSync(templatePath, 'utf8');
4034
4081
  composeContent = composeContent
4035
4082
  .replace(/container_name: wayfind/, `container_name: ${containerName}`)
4036
- .replace(/- "3141:3141"/, `- "${assignedPort}:3141"`);
4083
+ .replace(/- "3141:3141"/, `- "${assignedPort}:3141"`)
4084
+ .replace(/^(services:)/m, `name: ${containerName}\n\n$1`);
4037
4085
 
4038
4086
  // Inject Docker label for discovery
4039
4087
  composeContent = composeContent.replace(
@@ -4824,7 +4872,7 @@ async function indexJournalsIfAvailable() {
4824
4872
  console.log('No journal files found — skipping index.');
4825
4873
  return;
4826
4874
  }
4827
- const hasEmbeddingKey = !!process.env.OPENAI_API_KEY;
4875
+ const hasEmbeddingKey = !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT);
4828
4876
  console.log(`Indexing ${entries.length} journal files from ${journalDir}${hasEmbeddingKey ? ' (with embeddings)' : ''}...`);
4829
4877
  try {
4830
4878
  const stats = await contentStore.indexJournals({
@@ -5380,12 +5428,48 @@ async function runContainerDoctor() {
5380
5428
  console.log(' Run: wayfind reindex');
5381
5429
  issues++;
5382
5430
  }
5431
+
5432
+ // Embedding provider + model mismatch check
5433
+ try {
5434
+ const providerInfo = llm.getEmbeddingProviderInfo();
5435
+ const storedModel = contentStore.getStoredEmbeddingModel(storePath);
5436
+ if (!providerInfo.available) {
5437
+ warn('Embedding provider: none configured — semantic search unavailable (full-text only)');
5438
+ console.log(' Option 1 (cloud): set OPENAI_API_KEY');
5439
+ console.log(' Option 2 (local, no key): npm install -g @xenova/transformers');
5440
+ issues++;
5441
+ } else {
5442
+ const label = providerInfo.provider === 'local'
5443
+ ? `local (${providerInfo.model}) — no API key required`
5444
+ : `${providerInfo.provider} (${providerInfo.model})`;
5445
+ pass(`Embedding provider: ${label}`);
5446
+ if (storedModel && providerInfo.model && storedModel !== providerInfo.model) {
5447
+ warn(`Embedding model mismatch: stored=${storedModel}, current=${providerInfo.model}`);
5448
+ console.log(' Semantic search results may be degraded — stored embeddings are from a different model.');
5449
+ console.log(' Fix: wayfind reindex --force (clears and regenerates all embeddings)');
5450
+ issues++;
5451
+ }
5452
+ }
5453
+ } catch (_) {}
5383
5454
  } catch (e) {
5384
5455
  warn(`Embedding coverage: error — ${e.message}`);
5385
5456
  issues++;
5386
5457
  }
5387
5458
  }
5388
5459
 
5460
+ // ── Embedding provider (standalone, when no entries yet) ───────────────────
5461
+ if (entryCount === 0) {
5462
+ try {
5463
+ const providerInfo = llm.getEmbeddingProviderInfo();
5464
+ if (!providerInfo.available) {
5465
+ warn('Embedding provider: none — semantic search will not be available');
5466
+ console.log(' Option 1 (cloud): set OPENAI_API_KEY');
5467
+ console.log(' Option 2 (local, no key): npm install -g @xenova/transformers');
5468
+ issues++;
5469
+ }
5470
+ } catch (_) {}
5471
+ }
5472
+
5389
5473
  // 4. Signal freshness — are there signal files from today?
5390
5474
  const signalsDir = path.join(EFFECTIVE_DIR, 'signals');
5391
5475
  const today = new Date().toISOString().slice(0, 10); // YYYY-MM-DD
@@ -5901,6 +5985,21 @@ const COMMANDS = {
5901
5985
  // Also sync public-staging docs if they exist
5902
5986
  const publicDocsDir = path.join(sourceRoot, 'public-staging', 'docs');
5903
5987
 
5988
+ // Keep plugin.json version in sync with package.json before syncing
5989
+ const pluginJsonPath = path.join(sourceRoot, 'plugin', '.claude-plugin', 'plugin.json');
5990
+ const pkgJsonPath = path.join(sourceRoot, 'package.json');
5991
+ if (fs.existsSync(pluginJsonPath) && fs.existsSync(pkgJsonPath)) {
5992
+ try {
5993
+ const pluginJson = JSON.parse(fs.readFileSync(pluginJsonPath, 'utf8'));
5994
+ const pkgJson = JSON.parse(fs.readFileSync(pkgJsonPath, 'utf8'));
5995
+ if (pluginJson.version !== pkgJson.version) {
5996
+ pluginJson.version = pkgJson.version;
5997
+ fs.writeFileSync(pluginJsonPath, JSON.stringify(pluginJson, null, 2) + '\n');
5998
+ console.log(`Updated plugin.json version to ${pkgJson.version}`);
5999
+ }
6000
+ } catch {}
6001
+ }
6002
+
5904
6003
  console.log('Syncing files...');
5905
6004
  for (const item of syncItems) {
5906
6005
  const isDir = item.endsWith('/');
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wayfind",
3
- "version": "2.0.48",
3
+ "version": "2.0.50",
4
4
  "description": "Team decision trail for AI-assisted development. The connective tissue between product, engineering, and strategy.",
5
5
  "bin": {
6
6
  "wayfind": "./bin/team-context.js",
@@ -54,6 +54,7 @@
54
54
  "posthog-node": "^5.28.0"
55
55
  },
56
56
  "optionalDependencies": {
57
- "better-sqlite3": "^11.0.0"
57
+ "better-sqlite3": "^11.0.0",
58
+ "@xenova/transformers": "^2.17.2"
58
59
  }
59
60
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wayfind",
3
- "version": "2.0.20",
3
+ "version": "2.0.49",
4
4
  "description": "Team decision trail for AI-assisted development. Session memory, decision journals, and team digests.",
5
5
  "author": {
6
6
  "name": "Wayfind",
@@ -9,6 +9,14 @@
9
9
  "homepage": "https://github.com/usewayfind/wayfind",
10
10
  "repository": "https://github.com/usewayfind/wayfind",
11
11
  "license": "Apache-2.0",
12
- "keywords": ["team", "context", "memory", "decisions", "digest", "journal", "session"],
12
+ "keywords": [
13
+ "team",
14
+ "context",
15
+ "memory",
16
+ "decisions",
17
+ "digest",
18
+ "journal",
19
+ "session"
20
+ ],
13
21
  "skills": "./skills/"
14
22
  }
@@ -37,7 +37,8 @@ LAST_RUN_FILE="$HOME/.claude/team-context/.last-reindex"
37
37
  if [ -f "$LAST_RUN_FILE" ]; then
38
38
  CHANGED=$(find "$HOME/.claude/projects" -name "*.jsonl" -newer "$LAST_RUN_FILE" -print -quit 2>/dev/null)
39
39
  if [ -z "$CHANGED" ]; then
40
- # No conversation files changed — skip expensive reindex, just sync journals
40
+ # No conversation files changed — skip expensive reindex, just split and sync journals
41
+ $WAYFIND journal split 2>/dev/null
41
42
  $WAYFIND journal sync 2>/dev/null &
42
43
  exit 0
43
44
  fi
@@ -50,5 +51,6 @@ $WAYFIND reindex --conversations-only --export --detect-shifts --write-stats 2>/
50
51
  mkdir -p "$HOME/.claude/team-context"
51
52
  touch "$LAST_RUN_FILE"
52
53
 
53
- # Sync authored journals to team-context repo (backgrounded)
54
+ # Split any unsuffixed journals, then sync to team-context repo (sync backgrounded)
55
+ $WAYFIND journal split 2>/dev/null
54
56
  $WAYFIND journal sync 2>/dev/null &
@@ -45,6 +45,53 @@ Check if `.claude/wayfind.json` already exists in the repo.
45
45
 
46
46
  **Verify `.gitignore` coverage:** `.claude/wayfind.json` must be gitignored. Step 3 already includes it in the required entries — confirm this is still the case. If someone removed it, Step 3 will restore it.
47
47
 
48
+ ## Step 1.7: Embedding provider (first-time only)
49
+
50
+ Read `~/.claude/team-context/context.json`. Check for an `embedding_provider` field.
51
+
52
+ **If `embedding_provider` is already set:** Skip this step silently.
53
+
54
+ **If not set:** Present the following choice to the user:
55
+
56
+ ```
57
+ Wayfind uses embeddings for semantic search (e.g. "find the auth refactor discussion").
58
+
59
+ Choose your embedding provider:
60
+
61
+ 1. Local model (recommended for getting started)
62
+ - No API key needed
63
+ - ~80MB download on first use, cached after that
64
+ - Works offline
65
+ - Good quality for most queries
66
+
67
+ 2. OpenAI (higher quality)
68
+ - Requires OPENAI_API_KEY
69
+ - ~$0/month at normal usage
70
+ - Best retrieval quality
71
+
72
+ 3. Azure OpenAI
73
+ - Requires AZURE_OPENAI_EMBEDDING_ENDPOINT + key
74
+ - For enterprise deployments
75
+
76
+ ⚠️ Switching providers later requires reindexing your content store.
77
+ Run: wayfind reindex --force
78
+ Embeddings are model-specific — mixing models breaks semantic search.
79
+
80
+ Which provider? [1/2/3, default: 1]
81
+ ```
82
+
83
+ Wait for their answer (default to 1 if they press enter). Write their choice to `~/.claude/team-context/context.json` as:
84
+
85
+ ```json
86
+ { "embedding_provider": "local" } // for choice 1
87
+ { "embedding_provider": "openai" } // for choice 2
88
+ { "embedding_provider": "azure" } // for choice 3
89
+ ```
90
+
91
+ (Merge into existing context.json — do not overwrite other fields.)
92
+
93
+ Report: "Embedding provider set to: <name>"
94
+
48
95
  ## Step 2: Create state files (if missing)
49
96
 
50
97
  This repo uses TWO state files with different visibility:
@@ -45,6 +45,38 @@ Before starting, verify:
45
45
 
46
46
  If any prerequisite fails, tell the user what's needed and stop.
47
47
 
48
+ ## Step 0.5: Embedding Provider
49
+
50
+ Read `~/.claude/team-context/context.json`. Check for an `embedding_provider` field.
51
+
52
+ **If already set:** Report the current provider and skip this step.
53
+
54
+ **If not set:** Present this choice:
55
+
56
+ ```
57
+ Wayfind uses embeddings for semantic search across your team's decision trail.
58
+
59
+ Choose your embedding provider:
60
+
61
+ 1. Local model (recommended for getting started)
62
+ - No API key needed, works offline
63
+ - ~80MB download on first use, cached after
64
+ - Good quality for most queries
65
+
66
+ 2. OpenAI (higher quality)
67
+ - Requires OPENAI_API_KEY (~$0/month at normal usage)
68
+
69
+ 3. Azure OpenAI
70
+ - Requires AZURE_OPENAI_EMBEDDING_ENDPOINT + key
71
+
72
+ ⚠️ Switching providers later requires reindexing: wayfind reindex --force
73
+ Embeddings are model-specific — changing models after indexing breaks semantic search.
74
+
75
+ Which provider? [1/2/3, default: 1]
76
+ ```
77
+
78
+ Write choice to `~/.claude/team-context/context.json` as `embedding_provider: "local"|"openai"|"azure"`.
79
+
48
80
  ## Step 1: Team Context Repo
49
81
 
50
82
  This repo holds shared journals, strategy state, digest archives, and the GitHub