wayfind 2.0.49 → 2.0.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -356,6 +356,59 @@ async function detect() {
356
356
 
357
357
  const OPENAI_EMBEDDINGS_URL = 'https://api.openai.com/v1/embeddings';
358
358
  const DEFAULT_EMBEDDING_MODEL = 'text-embedding-3-small';
359
+ const LOCAL_EMBEDDING_MODEL = 'Xenova/all-MiniLM-L6-v2';
360
+
361
+ // Cached pipeline instance — expensive to initialize, reuse across calls.
362
+ let _localPipeline = null;
363
+
364
+ /**
365
+ * Try to generate an embedding using the local ONNX model (@xenova/transformers).
366
+ * Returns null if the package is not installed or the model fails to load.
367
+ * Downloads the model (~80MB) on first use into the transformers cache.
368
+ * @param {string} text
369
+ * @returns {Promise<number[]|null>}
370
+ */
371
+ async function generateEmbeddingLocal(text) {
372
+ try {
373
+ if (!_localPipeline) {
374
+ // Dynamic require — optional dep, may not be installed
375
+ const { pipeline, env } = require('@xenova/transformers');
376
+ // Suppress progress output in non-interactive contexts
377
+ if (!process.stdout.isTTY) {
378
+ env.allowLocalModels = false;
379
+ }
380
+ process.stderr.write('[wayfind] Loading local embedding model (first use — may take a moment)...\n');
381
+ _localPipeline = await pipeline('feature-extraction', LOCAL_EMBEDDING_MODEL);
382
+ }
383
+ const output = await _localPipeline(text, { pooling: 'mean', normalize: true });
384
+ return Array.from(output.data);
385
+ } catch (_) {
386
+ return null;
387
+ }
388
+ }
389
+
390
+ /**
391
+ * Detect which embedding provider is active based on env vars and installed packages.
392
+ * Returns an object describing the provider so callers can surface this to users.
393
+ * @returns {{ provider: string, model: string, requiresKey: boolean, available: boolean }}
394
+ */
395
+ function getEmbeddingProviderInfo() {
396
+ if (isSimulation()) {
397
+ return { provider: 'simulation', model: 'fake-1536d', requiresKey: false, available: true };
398
+ }
399
+ if (process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT) {
400
+ const hasKey = !!process.env.AZURE_OPENAI_EMBEDDING_KEY;
401
+ return { provider: 'azure', model: process.env.AZURE_OPENAI_EMBEDDING_DEPLOYMENT || 'text-embedding-3-small', requiresKey: true, available: hasKey };
402
+ }
403
+ if (process.env.OPENAI_API_KEY) {
404
+ return { provider: 'openai', model: DEFAULT_EMBEDDING_MODEL, requiresKey: true, available: true };
405
+ }
406
+ try {
407
+ require.resolve('@xenova/transformers');
408
+ return { provider: 'local', model: LOCAL_EMBEDDING_MODEL, requiresKey: false, available: true };
409
+ } catch (_) {}
410
+ return { provider: 'none', model: null, requiresKey: false, available: false };
411
+ }
359
412
 
360
413
  /**
361
414
  * Generate an embedding vector for the given text.
@@ -392,7 +445,14 @@ async function generateEmbedding(text, options = {}) {
392
445
  const apiKeyEnv = options.apiKeyEnv || 'OPENAI_API_KEY';
393
446
  const apiKey = process.env[apiKeyEnv];
394
447
  if (!apiKey) {
395
- throw new Error(`Embeddings: Missing API key. Set ${apiKeyEnv} environment variable.`);
448
+ // No cloud key try local model before failing
449
+ const localVec = await generateEmbeddingLocal(text);
450
+ if (localVec !== null) return localVec;
451
+ throw new Error(
452
+ 'Embeddings: No provider configured.\n' +
453
+ ' Option 1 (cloud): set OPENAI_API_KEY or AZURE_OPENAI_EMBEDDING_ENDPOINT\n' +
454
+ ' Option 2 (local, no key): npm install -g @xenova/transformers'
455
+ );
396
456
  }
397
457
 
398
458
  const baseUrl = options.baseUrl || OPENAI_EMBEDDINGS_URL.replace('/embeddings', '');
@@ -477,5 +537,6 @@ module.exports = {
477
537
  call,
478
538
  detect,
479
539
  generateEmbedding,
540
+ getEmbeddingProviderInfo,
480
541
  isSimulation,
481
542
  };
@@ -426,7 +426,7 @@ async function indexJournals(options = {}) {
426
426
  const storePath = options.storePath || resolveStorePath();
427
427
  const doEmbeddings = options.embeddings !== undefined
428
428
  ? options.embeddings
429
- : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation());
429
+ : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation() || llm.getEmbeddingProviderInfo().available);
430
430
 
431
431
  if (!journalDir || !storePath) {
432
432
  throw new Error('Journal directory and store path are required.');
@@ -440,7 +440,26 @@ async function indexJournals(options = {}) {
440
440
  const backend = getBackend(storePath);
441
441
  const existingIndex = backend.loadIndex();
442
442
  const existingEntries = existingIndex ? existingIndex.entries : {};
443
- const existingEmbeddings = doEmbeddings ? backend.loadEmbeddings() : {};
443
+
444
+ // Detect embedding model mismatch — if the stored model differs from the current
445
+ // provider, treat all entries as needing re-embedding so the index stays consistent.
446
+ // A silent auto-migration is better than silently returning garbage similarity scores.
447
+ const currentProviderInfo = llm.getEmbeddingProviderInfo();
448
+ const storedEmbeddingModel = existingIndex ? existingIndex.embedding_model : null;
449
+ const modelChanged = doEmbeddings && storedEmbeddingModel && currentProviderInfo.model &&
450
+ storedEmbeddingModel !== currentProviderInfo.model;
451
+ if (modelChanged) {
452
+ process.stderr.write(
453
+ `[wayfind] Embedding model changed: ${storedEmbeddingModel} → ${currentProviderInfo.model}\n` +
454
+ `[wayfind] Re-embedding all entries for consistent search results...\n`
455
+ );
456
+ // Mark all existing entries as needing re-embedding
457
+ for (const entry of Object.values(existingEntries)) {
458
+ entry.hasEmbedding = false;
459
+ }
460
+ }
461
+
462
+ const existingEmbeddings = doEmbeddings ? (modelChanged ? {} : backend.loadEmbeddings()) : {};
444
463
 
445
464
  // Parse all journal files
446
465
  const files = fs.readdirSync(journalDir).filter(f => DATE_FILE_RE.test(f)).sort();
@@ -546,11 +565,13 @@ async function indexJournals(options = {}) {
546
565
  stats.entryCount = Object.keys(finalEntries).length;
547
566
 
548
567
  // Save
568
+ const embeddingModel = doEmbeddings ? llm.getEmbeddingProviderInfo().model : null;
549
569
  const index = {
550
570
  version: INDEX_VERSION,
551
571
  lastUpdated: Date.now(),
552
572
  entryCount: stats.entryCount,
553
573
  entries: finalEntries,
574
+ ...(embeddingModel ? { embedding_model: embeddingModel } : {}),
554
575
  };
555
576
 
556
577
  backend.saveIndex(index);
@@ -563,6 +584,7 @@ async function indexJournals(options = {}) {
563
584
  entry_count: stats.entryCount,
564
585
  new_entries: stats.newEntries,
565
586
  has_embeddings: doEmbeddings,
587
+ embedding_model: embeddingModel,
566
588
  });
567
589
 
568
590
  return stats;
@@ -1375,7 +1397,7 @@ async function indexConversations(options = {}) {
1375
1397
  const storePath = options.storePath || resolveStorePath();
1376
1398
  const doEmbeddings = options.embeddings !== undefined
1377
1399
  ? options.embeddings
1378
- : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation());
1400
+ : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation() || llm.getEmbeddingProviderInfo().available);
1379
1401
 
1380
1402
  if (!projectsDir || !storePath) {
1381
1403
  throw new Error('Projects directory and store path are required.');
@@ -1397,9 +1419,24 @@ async function indexConversations(options = {}) {
1397
1419
  // Load existing indexes
1398
1420
  const backend = getBackend(storePath);
1399
1421
  const existingIndex = backend.loadIndex() || { version: INDEX_VERSION, entries: {}, lastUpdated: Date.now(), entryCount: 0 };
1400
- const existingEmbeddings = doEmbeddings ? backend.loadEmbeddings() : {};
1401
1422
  const convIndex = backend.loadConversationIndex();
1402
1423
 
1424
+ // Auto-migrate embeddings if model changed
1425
+ const _convProviderInfo = llm.getEmbeddingProviderInfo();
1426
+ const _convStoredModel = existingIndex.embedding_model || null;
1427
+ const _convModelChanged = doEmbeddings && _convStoredModel && _convProviderInfo.model &&
1428
+ _convStoredModel !== _convProviderInfo.model;
1429
+ if (_convModelChanged) {
1430
+ process.stderr.write(
1431
+ `[wayfind] Embedding model changed: ${_convStoredModel} → ${_convProviderInfo.model}\n` +
1432
+ `[wayfind] Re-embedding conversation entries...\n`
1433
+ );
1434
+ for (const entry of Object.values(existingIndex.entries)) {
1435
+ entry.hasEmbedding = false;
1436
+ }
1437
+ }
1438
+ const existingEmbeddings = doEmbeddings ? (_convModelChanged ? {} : backend.loadEmbeddings()) : {};
1439
+
1403
1440
  // Compute since cutoff
1404
1441
  let sinceCutoff = 0;
1405
1442
  if (options.since) {
@@ -1566,6 +1603,10 @@ async function indexConversations(options = {}) {
1566
1603
 
1567
1604
  // Save everything
1568
1605
  existingIndex.entryCount = Object.keys(existingIndex.entries).length;
1606
+ if (doEmbeddings) {
1607
+ const model = llm.getEmbeddingProviderInfo().model;
1608
+ if (model) existingIndex.embedding_model = model;
1609
+ }
1569
1610
  backend.saveIndex(existingIndex);
1570
1611
  if (doEmbeddings) {
1571
1612
  backend.saveEmbeddings(existingEmbeddings);
@@ -1801,7 +1842,7 @@ async function indexSignals(options = {}) {
1801
1842
  const storePath = options.storePath || resolveStorePath();
1802
1843
  const doEmbeddings = options.embeddings !== undefined
1803
1844
  ? options.embeddings
1804
- : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation());
1845
+ : !!(process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_EMBEDDING_ENDPOINT || llm.isSimulation() || llm.getEmbeddingProviderInfo().available);
1805
1846
 
1806
1847
  if (!signalsDir || !storePath) {
1807
1848
  throw new Error('Signals directory and store path are required.');
@@ -1814,7 +1855,18 @@ async function indexSignals(options = {}) {
1814
1855
  // Load existing index (contains journal + conversation entries too)
1815
1856
  const backend = getBackend(storePath);
1816
1857
  const existingIndex = backend.loadIndex() || { version: INDEX_VERSION, entries: {}, lastUpdated: Date.now(), entryCount: 0 };
1817
- const existingEmbeddings = doEmbeddings ? backend.loadEmbeddings() : {};
1858
+
1859
+ // Auto-migrate embeddings if model changed
1860
+ const _sigProviderInfo = llm.getEmbeddingProviderInfo();
1861
+ const _sigStoredModel = existingIndex.embedding_model || null;
1862
+ const _sigModelChanged = doEmbeddings && _sigStoredModel && _sigProviderInfo.model &&
1863
+ _sigStoredModel !== _sigProviderInfo.model;
1864
+ if (_sigModelChanged) {
1865
+ for (const entry of Object.values(existingIndex.entries)) {
1866
+ entry.hasEmbedding = false;
1867
+ }
1868
+ }
1869
+ const existingEmbeddings = doEmbeddings ? (_sigModelChanged ? {} : backend.loadEmbeddings()) : {};
1818
1870
 
1819
1871
  const stats = { fileCount: 0, newEntries: 0, updatedEntries: 0, skippedEntries: 0 };
1820
1872
 
@@ -2076,6 +2128,10 @@ async function indexSignals(options = {}) {
2076
2128
 
2077
2129
  // Save
2078
2130
  existingIndex.entryCount = Object.keys(existingIndex.entries).length;
2131
+ if (doEmbeddings) {
2132
+ const model = llm.getEmbeddingProviderInfo().model;
2133
+ if (model) existingIndex.embedding_model = model;
2134
+ }
2079
2135
  backend.saveIndex(existingIndex);
2080
2136
  if (doEmbeddings) {
2081
2137
  backend.saveEmbeddings(existingEmbeddings);
@@ -2376,6 +2432,12 @@ module.exports = {
2376
2432
  saveIndex: (storePath, index) => getBackend(storePath || resolveStorePath()).saveIndex(index),
2377
2433
  loadEmbeddings: (storePath) => getBackend(storePath || resolveStorePath()).loadEmbeddings(),
2378
2434
  saveEmbeddings: (storePath, embeddings) => getBackend(storePath || resolveStorePath()).saveEmbeddings(embeddings),
2435
+ getStoredEmbeddingModel: (storePath) => {
2436
+ try {
2437
+ const idx = getBackend(storePath || resolveStorePath()).loadIndex();
2438
+ return idx ? (idx.embedding_model || null) : null;
2439
+ } catch { return null; }
2440
+ },
2379
2441
  loadConversationIndex: (storePath) => getBackend(storePath || resolveStorePath()).loadConversationIndex(),
2380
2442
  saveConversationIndex: (storePath, convIndex) => getBackend(storePath || resolveStorePath()).saveConversationIndex(convIndex),
2381
2443
 
@@ -38,6 +38,7 @@ const digest = require('./digest');
38
38
  const slack = require('./slack');
39
39
  const slackBot = require('./slack-bot');
40
40
  const contentStore = require('./content-store');
41
+ const llm = require('./connectors/llm');
41
42
  const rebuildStatus = require('./rebuild-status');
42
43
  const telemetry = require('./telemetry');
43
44
 
@@ -1310,7 +1311,20 @@ async function runReindex(args) {
1310
1311
  const force = args.includes('--force');
1311
1312
 
1312
1313
  if (force) {
1313
- console.log('Force mode: clearing content store for full reindex...');
1314
+ // Warn if stored embeddings used a different model than the current provider
1315
+ const storedModel = contentStore.getStoredEmbeddingModel();
1316
+ const currentProvider = llm.getEmbeddingProviderInfo();
1317
+ if (storedModel && currentProvider.model && storedModel !== currentProvider.model) {
1318
+ console.log(`⚠️ Embedding model mismatch:`);
1319
+ console.log(` Stored embeddings: ${storedModel}`);
1320
+ console.log(` Current provider: ${currentProvider.model} (${currentProvider.provider})`);
1321
+ console.log(` All embeddings will be cleared and regenerated with the current provider.`);
1322
+ console.log(` Entries without embeddings will fall back to full-text search until reindexed.`);
1323
+ } else if (storedModel) {
1324
+ console.log(`Force mode: clearing content store — will regenerate ${storedModel} embeddings...`);
1325
+ } else {
1326
+ console.log('Force mode: clearing content store for full reindex...');
1327
+ }
1314
1328
  try {
1315
1329
  const backend = contentStore.getBackend();
1316
1330
  const emptyIndex = { version: contentStore.INDEX_VERSION, entries: {}, lastUpdated: Date.now(), entryCount: 0 };
@@ -3581,7 +3595,7 @@ function contextSync() {
3581
3595
  *
3582
3596
  * @param {string[]} args - CLI arguments (--quiet suppresses output)
3583
3597
  */
3584
- function contextPull(args) {
3598
+ async function contextPull(args) {
3585
3599
  const quiet = args.includes('--quiet');
3586
3600
  const background = args.includes('--background');
3587
3601
  const log = quiet ? () => {} : console.log;
@@ -3614,6 +3628,16 @@ function contextPull(args) {
3614
3628
  log('[wayfind] Pulled latest team-context');
3615
3629
  // Mark success — doctor checks this to warn on prolonged failures
3616
3630
  try { fs.writeFileSync(markerFile, new Date().toISOString()); } catch {}
3631
+ // Index any new team journals into the local content store
3632
+ const journalsDir = path.join(teamPath, 'journals');
3633
+ if (fs.existsSync(journalsDir)) {
3634
+ try {
3635
+ const stats = await contentStore.indexJournals({ journalDir: journalsDir });
3636
+ if (!quiet && stats.newEntries > 0) {
3637
+ log(`[wayfind] Indexed ${stats.newEntries} new team journal entries`);
3638
+ }
3639
+ } catch (_) {}
3640
+ }
3617
3641
  } else if (result.error && result.error.code === 'ETIMEDOUT') {
3618
3642
  log('[wayfind] Team-context pull timed out — using local state');
3619
3643
  } else {
@@ -5404,12 +5428,48 @@ async function runContainerDoctor() {
5404
5428
  console.log(' Run: wayfind reindex');
5405
5429
  issues++;
5406
5430
  }
5431
+
5432
+ // Embedding provider + model mismatch check
5433
+ try {
5434
+ const providerInfo = llm.getEmbeddingProviderInfo();
5435
+ const storedModel = contentStore.getStoredEmbeddingModel(storePath);
5436
+ if (!providerInfo.available) {
5437
+ warn('Embedding provider: none configured — semantic search unavailable (full-text only)');
5438
+ console.log(' Option 1 (cloud): set OPENAI_API_KEY');
5439
+ console.log(' Option 2 (local, no key): npm install -g @xenova/transformers');
5440
+ issues++;
5441
+ } else {
5442
+ const label = providerInfo.provider === 'local'
5443
+ ? `local (${providerInfo.model}) — no API key required`
5444
+ : `${providerInfo.provider} (${providerInfo.model})`;
5445
+ pass(`Embedding provider: ${label}`);
5446
+ if (storedModel && providerInfo.model && storedModel !== providerInfo.model) {
5447
+ warn(`Embedding model mismatch: stored=${storedModel}, current=${providerInfo.model}`);
5448
+ console.log(' Semantic search results may be degraded — stored embeddings are from a different model.');
5449
+ console.log(' Fix: wayfind reindex --force (clears and regenerates all embeddings)');
5450
+ issues++;
5451
+ }
5452
+ }
5453
+ } catch (_) {}
5407
5454
  } catch (e) {
5408
5455
  warn(`Embedding coverage: error — ${e.message}`);
5409
5456
  issues++;
5410
5457
  }
5411
5458
  }
5412
5459
 
5460
+ // ── Embedding provider (standalone, when no entries yet) ───────────────────
5461
+ if (entryCount === 0) {
5462
+ try {
5463
+ const providerInfo = llm.getEmbeddingProviderInfo();
5464
+ if (!providerInfo.available) {
5465
+ warn('Embedding provider: none — semantic search will not be available');
5466
+ console.log(' Option 1 (cloud): set OPENAI_API_KEY');
5467
+ console.log(' Option 2 (local, no key): npm install -g @xenova/transformers');
5468
+ issues++;
5469
+ }
5470
+ } catch (_) {}
5471
+ }
5472
+
5413
5473
  // 4. Signal freshness — are there signal files from today?
5414
5474
  const signalsDir = path.join(EFFECTIVE_DIR, 'signals');
5415
5475
  const today = new Date().toISOString().slice(0, 10); // YYYY-MM-DD
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wayfind",
3
- "version": "2.0.49",
3
+ "version": "2.0.51",
4
4
  "description": "Team decision trail for AI-assisted development. The connective tissue between product, engineering, and strategy.",
5
5
  "bin": {
6
6
  "wayfind": "./bin/team-context.js",
@@ -54,6 +54,7 @@
54
54
  "posthog-node": "^5.28.0"
55
55
  },
56
56
  "optionalDependencies": {
57
- "better-sqlite3": "^11.0.0"
57
+ "better-sqlite3": "^11.0.0",
58
+ "@xenova/transformers": "^2.17.2"
58
59
  }
59
60
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wayfind",
3
- "version": "2.0.48",
3
+ "version": "2.0.49",
4
4
  "description": "Team decision trail for AI-assisted development. Session memory, decision journals, and team digests.",
5
5
  "author": {
6
6
  "name": "Wayfind",
@@ -9,6 +9,14 @@
9
9
  "homepage": "https://github.com/usewayfind/wayfind",
10
10
  "repository": "https://github.com/usewayfind/wayfind",
11
11
  "license": "Apache-2.0",
12
- "keywords": ["team", "context", "memory", "decisions", "digest", "journal", "session"],
12
+ "keywords": [
13
+ "team",
14
+ "context",
15
+ "memory",
16
+ "decisions",
17
+ "digest",
18
+ "journal",
19
+ "session"
20
+ ],
13
21
  "skills": "./skills/"
14
22
  }
@@ -45,6 +45,53 @@ Check if `.claude/wayfind.json` already exists in the repo.
45
45
 
46
46
  **Verify `.gitignore` coverage:** `.claude/wayfind.json` must be gitignored. Step 3 already includes it in the required entries — confirm this is still the case. If someone removed it, Step 3 will restore it.
47
47
 
48
+ ## Step 1.7: Embedding provider (first-time only)
49
+
50
+ Read `~/.claude/team-context/context.json`. Check for an `embedding_provider` field.
51
+
52
+ **If `embedding_provider` is already set:** Skip this step silently.
53
+
54
+ **If not set:** Present the following choice to the user:
55
+
56
+ ```
57
+ Wayfind uses embeddings for semantic search (e.g. "find the auth refactor discussion").
58
+
59
+ Choose your embedding provider:
60
+
61
+ 1. Local model (recommended for getting started)
62
+ - No API key needed
63
+ - ~80MB download on first use, cached after that
64
+ - Works offline
65
+ - Good quality for most queries
66
+
67
+ 2. OpenAI (higher quality)
68
+ - Requires OPENAI_API_KEY
69
+ - ~$0/month at normal usage
70
+ - Best retrieval quality
71
+
72
+ 3. Azure OpenAI
73
+ - Requires AZURE_OPENAI_EMBEDDING_ENDPOINT + key
74
+ - For enterprise deployments
75
+
76
+ ⚠️ Switching providers later requires reindexing your content store.
77
+ Run: wayfind reindex --force
78
+ Embeddings are model-specific — mixing models breaks semantic search.
79
+
80
+ Which provider? [1/2/3, default: 1]
81
+ ```
82
+
83
+ Wait for their answer (default to 1 if they press enter). Write their choice to `~/.claude/team-context/context.json` as:
84
+
85
+ ```json
86
+ { "embedding_provider": "local" } // for choice 1
87
+ { "embedding_provider": "openai" } // for choice 2
88
+ { "embedding_provider": "azure" } // for choice 3
89
+ ```
90
+
91
+ (Merge into existing context.json — do not overwrite other fields.)
92
+
93
+ Report: "Embedding provider set to: <name>"
94
+
48
95
  ## Step 2: Create state files (if missing)
49
96
 
50
97
  This repo uses TWO state files with different visibility:
@@ -45,6 +45,38 @@ Before starting, verify:
45
45
 
46
46
  If any prerequisite fails, tell the user what's needed and stop.
47
47
 
48
+ ## Step 0.5: Embedding Provider
49
+
50
+ Read `~/.claude/team-context/context.json`. Check for an `embedding_provider` field.
51
+
52
+ **If already set:** Report the current provider and skip this step.
53
+
54
+ **If not set:** Present this choice:
55
+
56
+ ```
57
+ Wayfind uses embeddings for semantic search across your team's decision trail.
58
+
59
+ Choose your embedding provider:
60
+
61
+ 1. Local model (recommended for getting started)
62
+ - No API key needed, works offline
63
+ - ~80MB download on first use, cached after
64
+ - Good quality for most queries
65
+
66
+ 2. OpenAI (higher quality)
67
+ - Requires OPENAI_API_KEY (~$0/month at normal usage)
68
+
69
+ 3. Azure OpenAI
70
+ - Requires AZURE_OPENAI_EMBEDDING_ENDPOINT + key
71
+
72
+ ⚠️ Switching providers later requires reindexing: wayfind reindex --force
73
+ Embeddings are model-specific — changing models after indexing breaks semantic search.
74
+
75
+ Which provider? [1/2/3, default: 1]
76
+ ```
77
+
78
+ Write choice to `~/.claude/team-context/context.json` as `embedding_provider: "local"|"openai"|"azure"`.
79
+
48
80
  ## Step 1: Team Context Repo
49
81
 
50
82
  This repo holds shared journals, strategy state, digest archives, and the GitHub