@equinor/fusion-framework-cli 14.2.5 → 14.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,3 @@
1
1
  // Generated by genversion.
2
- export const version = '14.2.5';
2
+ export const version = '14.2.7';
3
3
  //# sourceMappingURL=version.js.map
@@ -1,4 +1,4 @@
1
- import { ModulesConfigurator, type ModulesInstance } from '@equinor/fusion-framework-module';
1
+ import { ModulesConfigurator, type ModulesInstance, type AnyModule } from '@equinor/fusion-framework-module';
2
2
  import { type MsalNodeModule } from '@equinor/fusion-framework-module-msal-node';
3
3
  import { type HttpModule } from '@equinor/fusion-framework-module-http';
4
4
  import { type ServiceDiscoveryModule } from '@equinor/fusion-framework-module-service-discovery';
@@ -7,7 +7,10 @@ type Modules = [MsalNodeModule, HttpModule, ServiceDiscoveryModule];
7
7
  * Type representing the initialized Fusion Framework instance.
8
8
  * This is a composition of the modules defined in the Modules tuple.
9
9
  */
10
- export type FusionFramework = ModulesInstance<Modules>;
10
+ export type FusionFramework<TExtra extends Array<AnyModule> = []> = ModulesInstance<[
11
+ ...Modules,
12
+ ...TExtra
13
+ ]>;
11
14
  /**
12
15
  * Enum for supported Fusion environments.
13
16
  * Used to select the correct environment for service discovery and authentication.
@@ -81,9 +84,33 @@ export declare const configureFramework: (config: FusionFrameworkSettings) => Mo
81
84
  * based on the supplied configuration. It supports multiple authentication modes
82
85
  * and allows customization of service discovery endpoints and scopes.
83
86
  *
87
+ * When no `configure` callback is provided (or `TExtra` defaults to `[]`) the
88
+ * return type is {@link FusionFramework}. Pass a typed `configure` callback to
89
+ * extend the instance with additional modules:
90
+ *
91
+ * @template TExtra - Tuple of additional module types added via `configure`.
92
+ * Defaults to `[]`, which means the return type is exactly {@link FusionFramework}.
93
+ *
84
94
  * @param config - The settings for framework initialization.
85
- * @returns A promise resolving to the initialized Fusion Framework instance.
95
+ * @param configure - Optional callback to add or further configure modules.
96
+ * Receives a configurator already set up with the base {@link Modules}.
97
+ * @returns A promise resolving to the initialized Fusion Framework instance,
98
+ * typed as `ModulesInstance<[...Modules, ...TExtra]>`.
86
99
  * @throws Will throw if required authentication parameters are missing.
100
+ *
101
+ * @example Basic usage (returns FusionFramework)
102
+ * ```typescript
103
+ * const framework = await initializeFramework({ env, auth });
104
+ * ```
105
+ *
106
+ * @example With extra modules
107
+ * ```typescript
108
+ * const framework = await initializeFramework<[AiModule]>(
109
+ * { env, auth },
110
+ * (configurator) => enableAI(configurator),
111
+ * );
112
+ * framework.ai.useModel('gpt-4.1');
113
+ * ```
87
114
  */
88
- export declare const initializeFramework: (config: FusionFrameworkSettings) => Promise<FusionFramework>;
115
+ export declare const initializeFramework: <TExtra extends Array<AnyModule> = []>(config: FusionFrameworkSettings, configure?: (configurator: ModulesConfigurator<[...Modules, ...TExtra]>) => void) => Promise<FusionFramework<TExtra>>;
89
116
  export {};
@@ -1 +1 @@
1
- export declare const version = "14.2.5";
1
+ export declare const version = "14.2.7";
@@ -15,107 +15,72 @@
15
15
 
16
16
  ---
17
17
 
18
- The Fusion Framework CLI provides powerful AI commands for interacting with Large Language Models (LLMs), generating document embeddings, and performing semantic search. These commands integrate with Azure OpenAI and Azure Cognitive Search to enable intelligent codebase understanding and Q&A capabilities.
18
+ The Fusion Framework CLI provides AI commands for interacting with Large Language Models (LLMs), generating document embeddings, and performing semantic search. These commands integrate with the Fusion AI service via automatic service discovery and MSAL authentication.
19
19
 
20
20
  ## Overview
21
21
 
22
22
  The `ai` command group includes the following subcommands:
23
23
 
24
- - **`ai chat`** - Interactive chat with AI models using vector store context retrieval
24
+ - **`ai chat`** - Interactive chat with AI models
25
25
  - **`ai index add`** - Add documents to the search index by generating embeddings from source files
26
26
  - **`ai index search`** - Search the vector store to validate embeddings and retrieve relevant documents
27
27
  - **`ai index remove`** - Remove documents from the search index by source path or OData filter
28
+ - **`ai index embed`** - Embed a single text string and print the resulting vector (for testing)
28
29
 
29
30
  ## Prerequisites
30
31
 
31
32
  Before using the AI commands, you need:
32
33
 
33
- 1. **Azure OpenAI Service** with:
34
- - Chat model deployment (e.g., GPT-4, GPT-3.5-turbo)
35
- - Embedding model deployment (e.g., text-embedding-ada-002)
34
+ 1. **Access to the Fusion AI service** — the CLI resolves the service endpoint automatically from Fusion service discovery.
35
+ 2. **Azure AD credentials** — the CLI authenticates via MSAL. On first use it will launch an interactive browser login; subsequent runs use the cached token silently.
36
36
 
37
- 2. **Azure Cognitive Search** with:
38
- - A search service instance
39
- - A search index configured for vector search
37
+ ## Authentication
40
38
 
41
- 3. **Configuration** - Via environment variables (`.env` file for local development or GitHub Variables/Secrets for CI/CD)
39
+ The CLI supports three authentication modes, tried in this order:
42
40
 
43
- ## Configuration
41
+ 1. **Static token** (`--token` / `FUSION_TOKEN`) — bypasses MSAL entirely. Useful for CI/CD with pre-obtained tokens.
42
+ 2. **Silent MSAL** — uses cached credentials from a previous interactive login.
43
+ 3. **Interactive login** — if no cached credentials exist, the CLI automatically spawns `ffc auth login` (opens a browser) and retries.
44
44
 
45
- ### Environment Variables
45
+ ### Environment variables
46
46
 
47
- All AI commands are configured via environment variables. For local development, use a `.env` file in your project root. For CI/CD, use GitHub Variables and Secrets.
48
-
49
- #### Local Development (.env file)
50
-
51
- Create a `.env` file in your project root:
52
-
53
- ```bash
54
- # Azure OpenAI Configuration
55
- AZURE_OPENAI_API_KEY=your-api-key
56
- AZURE_OPENAI_API_VERSION=2024-02-15-preview
57
- AZURE_OPENAI_INSTANCE_NAME=your-instance-name
58
- AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=gpt-4
59
- AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=text-embedding-ada-002
60
-
61
- # Azure Cognitive Search Configuration
62
- AZURE_SEARCH_ENDPOINT=https://your-search.search.windows.net
63
- AZURE_SEARCH_API_KEY=your-search-api-key
64
- AZURE_SEARCH_INDEX_NAME=your-index-name
65
- ```
66
-
67
- **Note:** Add `.env` to your `.gitignore` to keep credentials secure.
68
-
69
- #### CI/CD (GitHub Actions)
70
-
71
- For GitHub Actions workflows, configure:
72
- - **Secrets** (Settings → Secrets and variables → Actions → Secrets): For sensitive data
73
- - `AZURE_OPENAI_API_KEY`
74
- - `AZURE_SEARCH_API_KEY`
75
- - **Variables** (Settings → Secrets and variables → Actions → Variables): For non-sensitive configuration
76
- - `AZURE_OPENAI_API_VERSION`
77
- - `AZURE_OPENAI_INSTANCE_NAME`
78
- - `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME`
79
- - `AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`
80
- - `AZURE_SEARCH_ENDPOINT`
81
- - `AZURE_SEARCH_INDEX_NAME`
47
+ | Flag | Environment variable | Default | Description |
48
+ |---|---|---|---|
49
+ | `--env` | `FUSION_ENV` | `ci` | Fusion environment for service discovery |
50
+ | `--token` | `FUSION_TOKEN` | — | Explicit bearer token (skips MSAL auth) |
51
+ | `--tenant-id` | `FUSION_TENANT_ID` | Equinor tenant | Azure AD tenant ID |
52
+ | `--client-id` | `FUSION_CLIENT_ID` | Fusion default | Azure AD application client ID |
53
+ | `--chat-model` | `FUSION_AI_CHAT_MODEL` | `gpt-5.1-chat` | Azure OpenAI chat model deployment |
54
+ | `--embed-model` | `FUSION_AI_EMBED_MODEL` | `text-embedding-3-large` | Azure OpenAI embedding model deployment |
55
+ | `--index-name` | `FUSION_AI_INDEX_NAME` | — | Azure AI Search index name |
82
56
 
83
57
  ### Configuration File
84
58
 
85
- For the `ai index add` command, you can create a `fusion-ai.config.ts` file in your project root:
59
+ For the `ai index add` command, you can create a `fusion-ai.config.ts` file in your project root to control which files are indexed and how metadata is processed:
86
60
 
87
61
  ```typescript
88
- import { configureFusionAI } from '@equinor/fusion-framework-cli-plugin-ai-index';
89
- import type { FusionAIConfigWithIndex } from '@equinor/fusion-framework-cli-plugin-ai-index';
90
-
91
- export default configureFusionAI((): FusionAIConfigWithIndex => {
92
- return {
93
- // Index-specific configuration
94
- index: {
95
- // File patterns to match for processing
96
- patterns: [
97
- 'packages/**/src/**/*.{ts,tsx}',
98
- 'packages/**/docs/**/*.md',
99
- 'packages/**/README.md',
100
- ],
101
- // Embedding generation configuration
102
- embedding: {
103
- // Size of text chunks for embedding
104
- chunkSize: 1000,
105
- // Overlap between chunks to maintain context
106
- chunkOverlap: 200,
107
- },
108
- // Metadata processing configuration
109
- metadata: {
110
- // Optional: Custom metadata processor
111
- // attributeProcessor: (metadata, document) => {
112
- // // Transform or filter metadata attributes
113
- // return metadata;
114
- // },
62
+ export default {
63
+ index: {
64
+ name: 'my-index', // Index name (overridden by --index-name)
65
+ model: 'text-embedding-3-large', // Embedding model (overridden by --embed-model)
66
+ patterns: [
67
+ 'packages/**/src/**/*.{ts,tsx}',
68
+ 'packages/**/docs/**/*.md',
69
+ 'packages/**/README.md',
70
+ ],
71
+ metadata: {
72
+ resolvePackage: true, // Extract package.json metadata
73
+ attributeProcessor: (metadata, document) => {
74
+ // Add custom tags based on file path
75
+ metadata.tags ??= [];
76
+ if (document.metadata.source.includes('cookbooks/')) {
77
+ metadata.tags.push('cookbook');
78
+ }
79
+ return metadata;
115
80
  },
116
81
  },
117
- };
118
- });
82
+ },
83
+ };
119
84
  ```
120
85
 
121
86
  ## Commands
@@ -142,11 +107,9 @@ ffc ai chat [options]
142
107
 
143
108
  | Option | Description | Default |
144
109
  |--------|-------------|---------|
145
- | `--context-limit <number>` | Max context documents to retrieve | `5` |
146
110
  | `--history-limit <number>` | Max messages in conversation history | `20` |
147
111
  | `--verbose` | Enable verbose output | `false` |
148
-
149
- **Note:** Azure configuration (API keys, endpoints, etc.) is provided via environment variables (`.env` file or GitHub Variables/Secrets), not command-line options.
112
+ | `--debug` | Enable debug mode (sets `OPENAI_LOG=debug`, implies `--verbose`) | `false` |
150
113
 
151
114
  #### Interactive Commands
152
115
 
@@ -163,22 +126,21 @@ While in chat mode, you can use these special commands:
163
126
  # Start interactive chat with default settings
164
127
  ffc ai chat
165
128
 
166
- # Increase context retrieval limit for more comprehensive responses
167
- ffc ai chat --context-limit 10
168
-
169
129
  # Increase conversation history limit for longer sessions
170
130
  ffc ai chat --history-limit 100
171
131
 
172
132
  # Enable verbose output for debugging
173
133
  ffc ai chat --verbose
134
+
135
+ # Enable full debug mode (OPENAI_LOG=debug)
136
+ ffc ai chat --debug
174
137
  ```
175
138
 
176
139
  #### How It Works
177
140
 
178
- 1. **Context Retrieval**: When you ask a question, the system automatically searches the vector store for relevant documents
179
- 2. **Message Formatting**: Retrieved context is included in the system message to provide the AI with relevant information
180
- 3. **Streaming Response**: The AI response streams in real-time for immediate feedback
181
- 4. **History Management**: Conversation history is automatically compressed when it reaches 10 messages to maintain context while reducing token usage
141
+ 1. **Message Formatting**: User messages and conversation history are sent to the chat model
142
+ 2. **Streaming Response**: The AI response streams in real-time for immediate feedback
143
+ 3. **History Management**: Conversation history is automatically compressed when it reaches 10 messages using AI summarisation to maintain context while reducing token usage
182
144
 
183
145
  ### `ai index add`
184
146
 
@@ -210,8 +172,6 @@ ffc ai index add [options] [glob-patterns...]
210
172
  | `--base-ref <ref>` | Git reference to compare against | `HEAD~1` |
211
173
  | `--clean` | Delete all existing documents before processing | `false` |
212
174
 
213
- **Note:** Azure configuration (API keys, endpoints, etc.) is provided via environment variables (`.env` file or GitHub Variables/Secrets), not command-line options.
214
-
215
175
  #### Examples
216
176
 
217
177
  ```bash
@@ -296,8 +256,6 @@ ffc ai index search <query> [options]
296
256
  | `--raw` | Output raw metadata without normalization | `false` |
297
257
  | `--verbose` | Enable verbose output | `false` |
298
258
 
299
- **Note:** Azure configuration (API keys, endpoints, etc.) is provided via environment variables (`.env` file or GitHub Variables/Secrets), not command-line options.
300
-
301
259
  #### Examples
302
260
 
303
261
  ```bash
@@ -381,8 +339,6 @@ ffc ai index remove [options] [source-paths...]
381
339
  | `--filter <expression>` | Raw OData filter expression for advanced selection | - |
382
340
  | `--dry-run` | Preview what would be removed without deleting anything | `false` |
383
341
 
384
- **Note:** Azure configuration (API keys, endpoints, etc.) is provided via environment variables (`.env` file or GitHub Variables/Secrets), not command-line options.
385
-
386
342
  #### Examples
387
343
 
388
344
  ```bash
@@ -477,12 +433,11 @@ Access via `metadata/attributes/any(a: a/key eq '...' and a/value eq '...')`:
477
433
 
478
434
  ### Initial Setup
479
435
 
480
- 1. **Create `.env` file** in your project root with Azure configuration:
436
+ 1. **Authenticate** with your Equinor / Fusion identity:
481
437
  ```bash
482
- # See Configuration section above for all required variables
483
- AZURE_OPENAI_API_KEY=your-key
484
- AZURE_OPENAI_INSTANCE_NAME=your-instance
485
- # ... other variables
438
+ # Interactive device-code login (opens a browser)
439
+ ffc ai chat
440
+ # The CLI will prompt you to sign in on first use
486
441
  ```
487
442
 
488
443
  2. **Create configuration file** (optional):
@@ -548,13 +503,10 @@ jobs:
548
503
  run: |
549
504
  ffc ai index add --diff
550
505
  env:
551
- AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
552
- AZURE_OPENAI_API_VERSION: ${{ vars.AZURE_OPENAI_API_VERSION }}
553
- AZURE_OPENAI_INSTANCE_NAME: ${{ vars.AZURE_OPENAI_INSTANCE_NAME }}
554
- AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME }}
555
- AZURE_SEARCH_ENDPOINT: ${{ vars.AZURE_SEARCH_ENDPOINT }}
556
- AZURE_SEARCH_API_KEY: ${{ secrets.AZURE_SEARCH_API_KEY }}
557
- AZURE_SEARCH_INDEX_NAME: ${{ vars.AZURE_SEARCH_INDEX_NAME }}
506
+ FUSION_TOKEN: ${{ secrets.FUSION_TOKEN }}
507
+ FUSION_ENV: ${{ vars.FUSION_ENV }}
508
+ FUSION_AI_EMBED_MODEL: ${{ vars.FUSION_AI_EMBED_MODEL }}
509
+ FUSION_AI_INDEX_NAME: ${{ vars.FUSION_AI_INDEX_NAME }}
558
510
  ```
559
511
 
560
512
  #### Advanced Workflow with SHA Tracking
@@ -606,13 +558,10 @@ jobs:
606
558
  ffc ai index add
607
559
  fi
608
560
  env:
609
- AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
610
- AZURE_OPENAI_API_VERSION: ${{ vars.AZURE_OPENAI_API_VERSION }}
611
- AZURE_OPENAI_INSTANCE_NAME: ${{ vars.AZURE_OPENAI_INSTANCE_NAME }}
612
- AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME }}
613
- AZURE_SEARCH_API_KEY: ${{ secrets.AZURE_SEARCH_API_KEY }}
614
- AZURE_SEARCH_ENDPOINT: ${{ vars.AZURE_SEARCH_ENDPOINT }}
615
- AZURE_SEARCH_INDEX_NAME: ${{ vars.AZURE_SEARCH_INDEX_NAME }}
561
+ FUSION_TOKEN: ${{ secrets.FUSION_TOKEN }}
562
+ FUSION_ENV: ${{ vars.FUSION_ENV }}
563
+ FUSION_AI_EMBED_MODEL: ${{ vars.FUSION_AI_EMBED_MODEL }}
564
+ FUSION_AI_INDEX_NAME: ${{ vars.FUSION_AI_INDEX_NAME }}
616
565
 
617
566
  - name: Save current SHA
618
567
  run: echo ${{ github.sha }} > .index-base-ref
@@ -633,40 +582,31 @@ jobs:
633
582
  - More efficient than comparing against a fixed branch reference
634
583
 
635
584
  **GitHub Secrets and Variables:**
636
- - **Secrets** (Settings → Secrets and variables → Actions → Secrets): Store sensitive data like API keys
637
- - `AZURE_OPENAI_API_KEY`
638
- - `AZURE_SEARCH_API_KEY`
585
+ - **Secrets** (Settings → Secrets and variables → Actions → Secrets): Store sensitive data
586
+ - `FUSION_TOKEN` — a pre-obtained bearer token for the Fusion service
639
587
  - **Variables** (Settings → Secrets and variables → Actions → Variables): Store non-sensitive configuration
640
- - `AZURE_OPENAI_API_VERSION`
641
- - `AZURE_OPENAI_INSTANCE_NAME`
642
- - `AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME`
643
- - `AZURE_SEARCH_ENDPOINT`
644
- - `AZURE_SEARCH_INDEX_NAME`
588
+ - `FUSION_ENV` — e.g. `ci` or `fprd`
589
+ - `FUSION_AI_EMBED_MODEL` — embedding model deployment name
590
+ - `FUSION_AI_INDEX_NAME` — target search index name
645
591
 
646
592
  ## Troubleshooting
647
593
 
648
594
  ### Common Issues
649
595
 
650
- **Error: "API key is required"**
651
- - Ensure environment variables are set in your `.env` file
652
- - Verify the `.env` file is in your project root
653
- - Check that variable names match exactly (case-sensitive)
654
- - For CI/CD, verify GitHub Secrets and Variables are configured
596
+ **Error: "Authentication failed"**
597
+ - Ensure you have a valid Equinor identity and can sign in via the browser
598
+ - If using `--token`, verify the token is still valid (tokens expire)
599
+ - For CI/CD, verify `FUSION_TOKEN` is set in GitHub Secrets
655
600
 
656
- **Error: "Azure Search index name is required"**
657
- - Ensure `AZURE_SEARCH_INDEX_NAME` is set
658
- - Verify the index exists in your Azure Search service
601
+ **Error: "Index name is required"**
602
+ - Pass `--index-name <name>` or set `FUSION_AI_INDEX_NAME`
603
+ - Verify the index exists in the Fusion AI Search service
659
604
 
660
605
  **No results from search**
661
606
  - Verify embeddings have been generated: `ffc ai index add --dry-run ./src`
662
607
  - Check that the index contains documents
663
608
  - Try a broader search query
664
609
 
665
- **Chat not retrieving context**
666
- - Verify vector store is configured correctly in your `.env` file
667
- - Check that embeddings exist in the index
668
- - Ensure `AZURE_SEARCH_INDEX_NAME` is set correctly
669
-
670
610
  **Embeddings command processes no files**
671
611
  - Check file patterns in config or command arguments
672
612
  - Verify files match the patterns (use `--dry-run` to debug)
@@ -674,26 +614,26 @@ jobs:
674
614
 
675
615
  ### Debug Mode
676
616
 
677
- Use the `--verbose` flag for detailed output:
617
+ Use the `--debug` flag for detailed output:
678
618
 
679
619
  ```bash
680
- ffc ai chat --verbose
681
- ffc ai index search "query" --verbose
620
+ ffc ai chat --debug
621
+ ffc ai index search "query" --debug
682
622
  ```
683
623
 
684
624
  ## Best Practices
685
625
 
686
- 1. **Use environment variables** for sensitive credentials
626
+ 1. **Use `--token` or `FUSION_TOKEN` in CI/CD** to avoid interactive auth
687
627
  2. **Start with `--dry-run`** when testing new configurations
688
628
  3. **Use `--diff` in CI/CD** to only process changed files
689
629
  4. **Regular re-indexing** with `--clean` to keep index fresh
690
- 5. **Monitor token usage** - embeddings and chat consume API tokens
630
+ 5. **Monitor token usage** embeddings and chat consume API tokens
691
631
  6. **Test search queries** before relying on chat context retrieval
692
632
  7. **Keep configuration files** in version control (without secrets)
693
633
 
694
634
  ## Additional Resources
695
635
 
636
+ - [Fusion Framework Documentation](https://github.com/equinor/fusion-framework)
696
637
  - [Azure OpenAI Documentation](https://learn.microsoft.com/azure/ai-services/openai/)
697
- - [Azure Cognitive Search Documentation](https://learn.microsoft.com/azure/search/)
698
- - [LangChain Documentation](https://js.langchain.com/) (used internally for RAG)
638
+ - [Azure AI Search Documentation](https://learn.microsoft.com/azure/search/)
699
639
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@equinor/fusion-framework-cli",
3
- "version": "14.2.5",
3
+ "version": "14.2.7",
4
4
  "keywords": [
5
5
  "Fusion",
6
6
  "Fusion Framework",
@@ -111,11 +111,11 @@
111
111
  "vite": "^8.0.0",
112
112
  "vite-tsconfig-paths": "^6.0.4",
113
113
  "zod": "^4.3.6",
114
- "@equinor/fusion-framework-dev-portal": "5.1.6",
115
- "@equinor/fusion-framework-module-msal-node": "4.0.2",
116
- "@equinor/fusion-imports": "2.0.0",
114
+ "@equinor/fusion-framework-dev-server": "2.0.4",
115
+ "@equinor/fusion-framework-dev-portal": "5.1.7",
116
+ "@equinor/fusion-framework-module-msal-node": "4.1.0",
117
117
  "@equinor/fusion-framework-vite-plugin-raw-imports": "2.0.0",
118
- "@equinor/fusion-framework-dev-server": "2.0.3"
118
+ "@equinor/fusion-imports": "2.0.0"
119
119
  },
120
120
  "devDependencies": {
121
121
  "@rollup/plugin-commonjs": "^29.0.0",
@@ -141,8 +141,8 @@
141
141
  "@equinor/fusion-framework-module": "6.0.0",
142
142
  "@equinor/fusion-framework-module-app": "8.0.1",
143
143
  "@equinor/fusion-framework-module-http": "8.0.0",
144
- "@equinor/fusion-framework-react-router": "1.2.3",
145
- "@equinor/fusion-framework-module-service-discovery": "10.0.0"
144
+ "@equinor/fusion-framework-module-service-discovery": "10.0.0",
145
+ "@equinor/fusion-framework-react-router": "2.0.1"
146
146
  },
147
147
  "peerDependenciesMeta": {
148
148
  "typescript": {