@mono-labs/cli 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +86 -0
- package/bin/haste.js +2 -0
- package/index.js +3 -0
- package/lib/app.js +28 -0
- package/lib/commands/build/index.js +71 -0
- package/lib/commands/build-process/index.js +412 -0
- package/lib/commands/deploy/index.js +44 -0
- package/lib/commands/destroy.js +27 -0
- package/lib/commands/dev/dev-editor.js +265 -0
- package/lib/commands/dev/index.js +22 -0
- package/lib/commands/dev/ngrok.js +88 -0
- package/lib/commands/generate/generateSeed.js +224 -0
- package/lib/commands/generate/index.js +30 -0
- package/lib/commands/init/index.js +37 -0
- package/lib/commands/loadFromRoot.js +38 -0
- package/lib/commands/prune/index.js +12 -0
- package/lib/commands/prune/prune.js +48 -0
- package/lib/commands/reset.js +31 -0
- package/lib/commands/seed/import.js +31 -0
- package/lib/commands/seed/index.js +12 -0
- package/lib/commands/squash/index.js +8 -0
- package/lib/commands/squash/squash.js +150 -0
- package/lib/commands/submit/index.js +38 -0
- package/lib/commands/test/index.js +251 -0
- package/lib/commands/update/eas.js +39 -0
- package/lib/commands/update/index.js +92 -0
- package/lib/config.js +4 -0
- package/lib/index.js +19 -0
- package/lib/migrations/index.js +2 -0
- package/lib/migrations/v0.js +3 -0
- package/lib/migrations/v1.js +4 -0
- package/package.json +43 -0
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
import blessed from 'blessed'
|
|
2
|
+
import { spawn } from 'child_process'
|
|
3
|
+
import 'dotenv/config'
|
|
4
|
+
import inquirer from 'inquirer'
|
|
5
|
+
import { env } from 'process'
|
|
6
|
+
import readline from 'readline'
|
|
7
|
+
import treeKill from 'tree-kill'
|
|
8
|
+
|
|
9
|
+
import { generateEnvValues } from '../../app.js'
|
|
10
|
+
import { STAGING_URL } from '../../config.js'
|
|
11
|
+
import { getNgrokUrl, updateNgrokUrl } from './ngrok.js'
|
|
12
|
+
|
|
13
|
+
// EXPO_FORCE_PROD = true;
|
|
14
|
+
// EXPO_UNSTABLE_ATLAS = false;
|
|
15
|
+
|
|
16
|
+
// Import the runDevCommand function
|
|
17
|
+
const colors = {
|
|
18
|
+
red: '\x1b[31m',
|
|
19
|
+
green: '\x1b[32m',
|
|
20
|
+
yellow: '\x1b[33m',
|
|
21
|
+
white: '\x1b[37m',
|
|
22
|
+
blue: '\x1b[34m',
|
|
23
|
+
magenta: '\x1b[35m',
|
|
24
|
+
cyan: '\x1b[36m',
|
|
25
|
+
reset: '\x1b[0m',
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
function getContinuedServices() {
|
|
29
|
+
const continuedServices = Object.keys(devServices).filter((key) => devServices[key].continue)
|
|
30
|
+
return continuedServices
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
function getPrepServices() {
|
|
34
|
+
const continuedServices = Object.keys(devServices).filter((key) => !devServices[key].continue)
|
|
35
|
+
return continuedServices
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const devServicesRoot = {
|
|
39
|
+
docker: {
|
|
40
|
+
command: 'docker compose up -d',
|
|
41
|
+
key: '?',
|
|
42
|
+
icon: '🐳',
|
|
43
|
+
stdio: 'ignore',
|
|
44
|
+
},
|
|
45
|
+
backend: {
|
|
46
|
+
command: 'yarn backend server',
|
|
47
|
+
key: 'b',
|
|
48
|
+
color: colors.yellow,
|
|
49
|
+
continue: true,
|
|
50
|
+
icon: '🦾',
|
|
51
|
+
},
|
|
52
|
+
// app: {
|
|
53
|
+
// command: 'yarn workspace app expo start -c --tunnel --dev-client',
|
|
54
|
+
// key: 'a',
|
|
55
|
+
// color: colors.white,
|
|
56
|
+
// continue: true,
|
|
57
|
+
// icon: '📱',
|
|
58
|
+
// stdio: ['inherit', 'inherit', 'inherit'],
|
|
59
|
+
// },
|
|
60
|
+
dynamo: {
|
|
61
|
+
command: 'yarn backend dynamodb-admin -p 8082 --dynamo-endpoint=http://localhost:8000',
|
|
62
|
+
key: 'd',
|
|
63
|
+
continue: true,
|
|
64
|
+
icon: '📦',
|
|
65
|
+
},
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
const devServices = {}
|
|
69
|
+
Object.keys(devServicesRoot).forEach((key) => {
|
|
70
|
+
const service = devServicesRoot[key]
|
|
71
|
+
devServices[key] = {
|
|
72
|
+
...service,
|
|
73
|
+
name: key,
|
|
74
|
+
}
|
|
75
|
+
})
|
|
76
|
+
const childProcesses = {}
|
|
77
|
+
let allowRestart = true
|
|
78
|
+
|
|
79
|
+
const totalRetries = 5
|
|
80
|
+
|
|
81
|
+
function startService(key, forceProd, ngrokUrl, stage, envObj) {
|
|
82
|
+
const { command, stdio } = devServices[key]
|
|
83
|
+
const isContinued = devServices[key].continue
|
|
84
|
+
|
|
85
|
+
const child = spawn(command, {
|
|
86
|
+
stdio: stdio ? stdio : ['ignore', 'pipe', 'pipe'],
|
|
87
|
+
shell: true,
|
|
88
|
+
env: {
|
|
89
|
+
...envObj,
|
|
90
|
+
},
|
|
91
|
+
})
|
|
92
|
+
|
|
93
|
+
childProcesses[key] = child
|
|
94
|
+
childManager(child, devServices[key], false, () => {
|
|
95
|
+
if (isContinued && allowRestart && key !== 'backend') {
|
|
96
|
+
setTimeout(() => startService(key, forceProd, ngrokUrl, stage, envObj), 2000)
|
|
97
|
+
}
|
|
98
|
+
})
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
const write = (color, message) => {
|
|
102
|
+
process.stdout.write(`${color}${message}${colors.reset}\n`)
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
const serviceSigInt = {}
|
|
106
|
+
|
|
107
|
+
function childManager(child, service, nowrite = false, restartCallback = undefined) {
|
|
108
|
+
const color = service.color || undefined
|
|
109
|
+
const writeToBox = (data) => {
|
|
110
|
+
if (color) write(color, data.toString())
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
if (!nowrite) {
|
|
114
|
+
child.stdout?.on('data', writeToBox)
|
|
115
|
+
child.stderr?.on('data', writeToBox)
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
child.on('sigint', (code) => {
|
|
119
|
+
console.log('sigint')
|
|
120
|
+
console.log(`\n${service.icon || '🔚'} ${service.name || 'Service'} exited with code ${code}`)
|
|
121
|
+
if (restartCallback) restartCallback()
|
|
122
|
+
})
|
|
123
|
+
|
|
124
|
+
child.on('exit', (code) => {
|
|
125
|
+
if (!serviceSigInt[service.name] && restartCallback) {
|
|
126
|
+
console.log(
|
|
127
|
+
`\n${service.icon || '🔚'} ${service.name || 'Service'} exited with code ${code}\n`
|
|
128
|
+
)
|
|
129
|
+
restartCallback()
|
|
130
|
+
}
|
|
131
|
+
})
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
export async function dev(_forceProd, useAtlas, argServices, stage) {
|
|
135
|
+
const forceProd = stage === true ? true : _forceProd
|
|
136
|
+
let acceptedServices = argServices || undefined
|
|
137
|
+
if (acceptedServices === undefined && !stage) {
|
|
138
|
+
const { acceptedServices: services } = await inquirer.prompt([
|
|
139
|
+
{
|
|
140
|
+
type: 'checkbox',
|
|
141
|
+
name: 'acceptedServices',
|
|
142
|
+
message: 'Select services to run:',
|
|
143
|
+
choices: Object.keys(devServices).map((key) => ({
|
|
144
|
+
name: key,
|
|
145
|
+
value: key,
|
|
146
|
+
})),
|
|
147
|
+
default: Object.keys(devServices).map((key) => key),
|
|
148
|
+
},
|
|
149
|
+
])
|
|
150
|
+
|
|
151
|
+
acceptedServices = services
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
let ngrokUrl = ''
|
|
155
|
+
if (!forceProd && !stage) {
|
|
156
|
+
let envObj = generateEnvValues(forceProd)
|
|
157
|
+
getPrepServices().forEach((key) => {
|
|
158
|
+
const { command, stdio } = devServices[key]
|
|
159
|
+
if (acceptedServices.includes(key)) {
|
|
160
|
+
console.log(`Running command for service ${key}: ${command}`)
|
|
161
|
+
const child = spawn(command, {
|
|
162
|
+
stdio: ['pipe', 'inherit', 'pipe'], // Read from terminal, but capture output
|
|
163
|
+
shell: true,
|
|
164
|
+
env: {
|
|
165
|
+
...envObj,
|
|
166
|
+
},
|
|
167
|
+
})
|
|
168
|
+
if (key === 'app') {
|
|
169
|
+
child.on('sigint', () => {
|
|
170
|
+
console.log('SIGINT received for app service')
|
|
171
|
+
})
|
|
172
|
+
child.on('exit', () => {
|
|
173
|
+
console.log('exit received for app service')
|
|
174
|
+
})
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
childProcesses[key] = child
|
|
178
|
+
childManager(child, devServices[key], true)
|
|
179
|
+
}
|
|
180
|
+
})
|
|
181
|
+
|
|
182
|
+
while (!ngrokUrl) {
|
|
183
|
+
try {
|
|
184
|
+
ngrokUrl = (await getNgrokUrl()) + '/'
|
|
185
|
+
} catch (e) {
|
|
186
|
+
console.log('Ngrok failed to start. Retrying in 2 seconds...')
|
|
187
|
+
console.log(e)
|
|
188
|
+
await new Promise((res) => setTimeout(res, 2000)) // Delay before retry
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
let envObj = generateEnvValues(forceProd, ngrokUrl, useAtlas)
|
|
194
|
+
if (stage) {
|
|
195
|
+
envObj.EXPO_PUBLIC_API_URL = `${STAGING_URL}`
|
|
196
|
+
envObj.ApiUrl = `${STAGING_URL}`
|
|
197
|
+
envObj.EXPO_FORCE_PROD = 'true'
|
|
198
|
+
} else {
|
|
199
|
+
const publicUrl = process.env.EXPO_PUBLIC_API_URL || `${STAGING_URL}`
|
|
200
|
+
envObj.EXPO_PUBLIC_API_URL = publicUrl
|
|
201
|
+
envObj.ApiUrl = `${STAGING_URL}`
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
setTimeout(
|
|
205
|
+
() => {
|
|
206
|
+
console.log('ngrokUrl', ngrokUrl)
|
|
207
|
+
console.log('envObj', envObj)
|
|
208
|
+
getContinuedServices().forEach((key) => {
|
|
209
|
+
if (stage && key === 'app') {
|
|
210
|
+
startService(key, forceProd, ngrokUrl, stage, envObj)
|
|
211
|
+
} else {
|
|
212
|
+
if (!stage && acceptedServices.includes(key)) {
|
|
213
|
+
startService(key, forceProd, ngrokUrl, stage, envObj)
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
})
|
|
217
|
+
},
|
|
218
|
+
!forceProd ? 5000 : 100
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
async function shutdown() {
|
|
222
|
+
console.log('\n🛑 Shutting down all services...')
|
|
223
|
+
for (const [key, child] of Object.entries(childProcesses)) {
|
|
224
|
+
if (
|
|
225
|
+
child &&
|
|
226
|
+
child.pid &&
|
|
227
|
+
!child.killed &&
|
|
228
|
+
devServices[key].continue &&
|
|
229
|
+
!['docker'].includes(key)
|
|
230
|
+
) {
|
|
231
|
+
console.log(`→ Killing service: ${key}`)
|
|
232
|
+
await new Promise((resolve) => {
|
|
233
|
+
treeKill(child.pid, 'SIGTERM', (err) => {
|
|
234
|
+
if (!err) {
|
|
235
|
+
console.log(`✅ ${key} has been tree-killed.`)
|
|
236
|
+
}
|
|
237
|
+
resolve()
|
|
238
|
+
})
|
|
239
|
+
})
|
|
240
|
+
}
|
|
241
|
+
if (key === ' docker') {
|
|
242
|
+
spawn(command, {
|
|
243
|
+
stdio: 'docker-compose down', // Read from terminal, but capture output
|
|
244
|
+
shell: true,
|
|
245
|
+
env: {
|
|
246
|
+
...envObj,
|
|
247
|
+
},
|
|
248
|
+
})
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
process.on('SIGINT', () => {
|
|
254
|
+
shutdown().then(() => process.exit(0))
|
|
255
|
+
})
|
|
256
|
+
|
|
257
|
+
process.on('SIGTERM', () => {
|
|
258
|
+
shutdown().then(() => process.exit(0))
|
|
259
|
+
})
|
|
260
|
+
|
|
261
|
+
// Exit signal
|
|
262
|
+
process.on('exit', () => {
|
|
263
|
+
console.log('👋 Process exiting...')
|
|
264
|
+
})
|
|
265
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { program } from '../../app.js'
|
|
2
|
+
import { dev } from './dev-editor.js'
|
|
3
|
+
|
|
4
|
+
program
|
|
5
|
+
.command('dev2')
|
|
6
|
+
.description('Run local dev environment')
|
|
7
|
+
.option('-d, --dev', 'Deploy to dev environment')
|
|
8
|
+
.option('-a, --atlas', 'Region to deploy to')
|
|
9
|
+
.option('--app', 'Runs just the native app')
|
|
10
|
+
.option('--host', 'Runs just the backend host app')
|
|
11
|
+
.option('--stage', 'Connect to staging environment')
|
|
12
|
+
.action(async (str, options) => {
|
|
13
|
+
let services = undefined
|
|
14
|
+
console.log('str', str)
|
|
15
|
+
if (str.app || str.host) {
|
|
16
|
+
if (str.app) services = ['app']
|
|
17
|
+
if (str.host) services = ['backend']
|
|
18
|
+
}
|
|
19
|
+
const stage = str.stage || false
|
|
20
|
+
console.log(str.dev || false)
|
|
21
|
+
dev(str.dev || false, str.atlas || false, services, stage)
|
|
22
|
+
})
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import fs from 'fs'
|
|
2
|
+
import http from 'http'
|
|
3
|
+
import https from 'https'
|
|
4
|
+
|
|
5
|
+
// Define the path to your .env file
|
|
6
|
+
const envFilePath = '../.env'
|
|
7
|
+
|
|
8
|
+
// Fetch the ngrok public URL from localhost:4040 (ngrok web interface)
|
|
9
|
+
export function getNgrokUrl() {
|
|
10
|
+
return new Promise((resolve, reject) => {
|
|
11
|
+
const options = {
|
|
12
|
+
hostname: 'localhost',
|
|
13
|
+
port: 4040,
|
|
14
|
+
path: '/api/tunnels',
|
|
15
|
+
method: 'GET',
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
const req = (options.port === 443 ? https : http).request(options, (res) => {
|
|
19
|
+
let data = ''
|
|
20
|
+
res.on('data', (chunk) => {
|
|
21
|
+
data += chunk
|
|
22
|
+
})
|
|
23
|
+
|
|
24
|
+
res.on('end', () => {
|
|
25
|
+
try {
|
|
26
|
+
const jsonResponse = JSON.parse(data)
|
|
27
|
+
const publicUrl = jsonResponse.tunnels && jsonResponse.tunnels[0]?.public_url
|
|
28
|
+
if (publicUrl) {
|
|
29
|
+
resolve(publicUrl)
|
|
30
|
+
} else {
|
|
31
|
+
reject('Could not find public URL in the response')
|
|
32
|
+
}
|
|
33
|
+
} catch (err) {
|
|
34
|
+
reject('Error parsing JSON response')
|
|
35
|
+
}
|
|
36
|
+
})
|
|
37
|
+
})
|
|
38
|
+
|
|
39
|
+
req.on('error', (error) => {
|
|
40
|
+
reject(error)
|
|
41
|
+
})
|
|
42
|
+
|
|
43
|
+
req.end()
|
|
44
|
+
})
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Update the .env file with the new NGROK_URL value
|
|
48
|
+
function updateEnvFile(ngrokUrl) {
|
|
49
|
+
console.log('Updating .env file with ngrok URL:', ngrokUrl)
|
|
50
|
+
console.log('Env file path:', envFilePath)
|
|
51
|
+
fs.readFile(envFilePath, 'utf8', (err, data) => {
|
|
52
|
+
if (err) {
|
|
53
|
+
console.error('Error reading .env file:', err)
|
|
54
|
+
return
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
console.log('data', data)
|
|
58
|
+
|
|
59
|
+
let newData
|
|
60
|
+
if (data.includes('EXPO_PRIVATE_API_URL=')) {
|
|
61
|
+
// If NGROK_URL exists, replace it
|
|
62
|
+
newData = data.replace(/EXPO_PRIVATE_API_URL=.*/, `EXPO_PRIVATE_API_URL=${ngrokUrl}/`)
|
|
63
|
+
} else {
|
|
64
|
+
// If NGROK_URL doesn't exist, add it to the file
|
|
65
|
+
newData = `${data}\nEXPO_PRIVATE_API_URL=${ngrokUrl}/`
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
console.log('newData', newData)
|
|
69
|
+
|
|
70
|
+
// Write the updated data back to the .env file
|
|
71
|
+
fs.writeFile(envFilePath, newData, 'utf8', (writeErr) => {
|
|
72
|
+
if (writeErr) {
|
|
73
|
+
console.error('Error writing to .env file:', writeErr)
|
|
74
|
+
} else {
|
|
75
|
+
console.log(`NGROK_URL has been updated to: ${ngrokUrl}`)
|
|
76
|
+
}
|
|
77
|
+
})
|
|
78
|
+
})
|
|
79
|
+
}
|
|
80
|
+
// Main function to get the ngrok URL and update the .env file
|
|
81
|
+
export async function updateNgrokUrl() {
|
|
82
|
+
try {
|
|
83
|
+
const ngrokUrl = await getNgrokUrl()
|
|
84
|
+
updateEnvFile(ngrokUrl)
|
|
85
|
+
} catch (error) {
|
|
86
|
+
console.error('Error:', error)
|
|
87
|
+
}
|
|
88
|
+
}
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
import fs from 'fs'
|
|
2
|
+
// Initialize the DynamoDB client
|
|
3
|
+
|
|
4
|
+
import { readFileSync } from 'fs'
|
|
5
|
+
import path from 'path'
|
|
6
|
+
|
|
7
|
+
import { DynamoDBClient, ScanCommand } from '@aws-sdk/client-dynamodb'
|
|
8
|
+
import { marshall, unmarshall } from '@aws-sdk/util-dynamodb'
|
|
9
|
+
|
|
10
|
+
import { join } from 'node:path';
|
|
11
|
+
const packageJSON = JSON.parse(readFileSync(join(process.cwd(), 'package.json'), 'utf8'));
|
|
12
|
+
|
|
13
|
+
console.log('Deploy command loaded')
|
|
14
|
+
|
|
15
|
+
const awsObject = packageJSON['aws'] || {}
|
|
16
|
+
const projectName = packageJSON['name'] || 'project'
|
|
17
|
+
|
|
18
|
+
const awsProfile = awsObject['profile'] || 'default'
|
|
19
|
+
|
|
20
|
+
// TODO: Fix Copy Issues
|
|
21
|
+
const dirPath = './docker/seed' // Folder path to delete files from
|
|
22
|
+
|
|
23
|
+
// Function to delete all files in the specified directory (ignores directories)
|
|
24
|
+
function deleteFilesInDir(dir) {
|
|
25
|
+
// Read all files and directories inside the directory
|
|
26
|
+
const files = fs.readdirSync(dir)
|
|
27
|
+
|
|
28
|
+
// Loop through each file and directory
|
|
29
|
+
files.forEach((file) => {
|
|
30
|
+
const filePath = path.join(dir, file) // Get full path of the file or directory
|
|
31
|
+
|
|
32
|
+
// Check if it's a file (not a directory)
|
|
33
|
+
const stats = fs.statSync(filePath)
|
|
34
|
+
|
|
35
|
+
if (stats.isFile()) {
|
|
36
|
+
// If it's a file, delete it
|
|
37
|
+
fs.unlinkSync(filePath)
|
|
38
|
+
console.log(`Deleted file: ${filePath}`)
|
|
39
|
+
}
|
|
40
|
+
})
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// Function to scan the DynamoDB table and generate the desired JSON format
|
|
44
|
+
async function generateTableExport(tablename, client, profilesOnly = false) {
|
|
45
|
+
let params = {
|
|
46
|
+
TableName: tablename,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// This will hold all the data retrieved from DynamoDB
|
|
50
|
+
let allItems = []
|
|
51
|
+
let lastEvaluatedKey = null
|
|
52
|
+
|
|
53
|
+
// If there are more items (pagination in case of large tables)
|
|
54
|
+
do {
|
|
55
|
+
if (lastEvaluatedKey) {
|
|
56
|
+
params.ExclusiveStartKey = lastEvaluatedKey
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
try {
|
|
60
|
+
// Perform the scan operation
|
|
61
|
+
console.log('params', params)
|
|
62
|
+
const data = await client.send(new ScanCommand(params))
|
|
63
|
+
allItems = allItems.concat(data.Items)
|
|
64
|
+
lastEvaluatedKey = data.LastEvaluatedKey // Set the last evaluated key for pagination
|
|
65
|
+
} catch (error) {
|
|
66
|
+
console.error('Error scanning DynamoDB table:', error)
|
|
67
|
+
return
|
|
68
|
+
}
|
|
69
|
+
} while (lastEvaluatedKey) // Continue scanning if there are more pages of results
|
|
70
|
+
|
|
71
|
+
// Format the data into the desired JSON structure
|
|
72
|
+
//console.log(JSON.stringify)
|
|
73
|
+
const formattedData = {
|
|
74
|
+
[tablename]: allItems
|
|
75
|
+
.filter(
|
|
76
|
+
(item) =>
|
|
77
|
+
!profilesOnly ||
|
|
78
|
+
!tablename.includes('Database') ||
|
|
79
|
+
unmarshall(item)['SK'].includes('PROFILE'),
|
|
80
|
+
)
|
|
81
|
+
.map((item) => {
|
|
82
|
+
const formattedItem = unmarshall(item) // Unmarshall DynamoDB format to JS object
|
|
83
|
+
// Ensure the correct format: PutRequest -> Item
|
|
84
|
+
//if (tablename.includes('Database') && !formattedItem['SK'].includes('USER')) return undefined;
|
|
85
|
+
return {
|
|
86
|
+
PutRequest: {
|
|
87
|
+
Item: marshall(formattedItem), // Marshall JS object back to DynamoDB format
|
|
88
|
+
},
|
|
89
|
+
}
|
|
90
|
+
}),
|
|
91
|
+
}
|
|
92
|
+
return formattedData
|
|
93
|
+
}
|
|
94
|
+
async function exportDynamoTable(
|
|
95
|
+
tables,
|
|
96
|
+
client,
|
|
97
|
+
dbRewrites,
|
|
98
|
+
profilesOnly = false,
|
|
99
|
+
strOut = './docker/seed',
|
|
100
|
+
) {
|
|
101
|
+
deleteFilesInDir(dirPath)
|
|
102
|
+
let output = await Promise.all(
|
|
103
|
+
tables.map(async (tableName) => await generateTableExport(tableName, client, profilesOnly)),
|
|
104
|
+
)
|
|
105
|
+
const fileName = `${strOut}/dynamodb-seed`
|
|
106
|
+
|
|
107
|
+
const outputRes = {}
|
|
108
|
+
output.map((item) => {
|
|
109
|
+
const keys = Object.keys(item)
|
|
110
|
+
console.log(keys)
|
|
111
|
+
|
|
112
|
+
return keys.map((key) => {
|
|
113
|
+
const value = item[key].filter((item) => item !== undefined)
|
|
114
|
+
outputRes[key] = value
|
|
115
|
+
return { value }
|
|
116
|
+
})
|
|
117
|
+
})
|
|
118
|
+
|
|
119
|
+
output = outputRes
|
|
120
|
+
|
|
121
|
+
const fileObject = {}
|
|
122
|
+
const dbObject = {}
|
|
123
|
+
Object.keys(output).forEach((key) => {
|
|
124
|
+
console.log('key', key)
|
|
125
|
+
const value = output[key]
|
|
126
|
+
console.log('value', value.length)
|
|
127
|
+
if (value.length > 0) {
|
|
128
|
+
console.log('dbRewrites', dbRewrites)
|
|
129
|
+
console.log('key', key)
|
|
130
|
+
const dbKey = dbRewrites[key] || key
|
|
131
|
+
console.log('dbKey', dbKey)
|
|
132
|
+
dbObject[dbKey] = value
|
|
133
|
+
}
|
|
134
|
+
})
|
|
135
|
+
console.log('dbObject', dbObject)
|
|
136
|
+
|
|
137
|
+
let countTotal = 0
|
|
138
|
+
|
|
139
|
+
Object.keys(dbObject).forEach((key) => {
|
|
140
|
+
console.log(key)
|
|
141
|
+
let currentPosition = 0
|
|
142
|
+
const numOfItems = 20
|
|
143
|
+
const putItems = dbObject[key]
|
|
144
|
+
while (currentPosition < putItems.length) {
|
|
145
|
+
if (dbObject[key].length > numOfItems) {
|
|
146
|
+
const result = putItems.slice(currentPosition, currentPosition + numOfItems)
|
|
147
|
+
fileObject[`${fileName}-${countTotal}`] = { [key]: result }
|
|
148
|
+
currentPosition += numOfItems
|
|
149
|
+
countTotal += 1
|
|
150
|
+
} else {
|
|
151
|
+
const result = putItems.slice(currentPosition, putItems.length)
|
|
152
|
+
fileObject[`${fileName}-${countTotal}`] = { [key]: result }
|
|
153
|
+
currentPosition += numOfItems
|
|
154
|
+
countTotal += 1
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
})
|
|
158
|
+
|
|
159
|
+
Object.keys(fileObject).forEach((key) => {
|
|
160
|
+
console.log('Writing to file: ', key)
|
|
161
|
+
fs.writeFileSync(`${key}.json`, JSON.stringify(fileObject[key], null, 2))
|
|
162
|
+
})
|
|
163
|
+
|
|
164
|
+
console.log(`Export complete. Data written to ${fileName}`)
|
|
165
|
+
}
|
|
166
|
+
export function createDirIfNotExists(dirname) {
|
|
167
|
+
if (!fs.existsSync(dirname)) {
|
|
168
|
+
fs.mkdirSync(dirname)
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
// Run the function
|
|
173
|
+
|
|
174
|
+
export function exportTable(
|
|
175
|
+
newTables,
|
|
176
|
+
owner,
|
|
177
|
+
altOwner = 'dev',
|
|
178
|
+
rewriteDb,
|
|
179
|
+
live = false,
|
|
180
|
+
region = 'us-east-2',
|
|
181
|
+
profilesOnly = false,
|
|
182
|
+
) {
|
|
183
|
+
createDirIfNotExists(dirPath)
|
|
184
|
+
const tables = live ? ['MainDatabase'] : ['MainDB']
|
|
185
|
+
const dbRewrites = {}
|
|
186
|
+
const dbOg = {}
|
|
187
|
+
tables.map((table, index) => (dbOg[table] = newTables[index] || ''))
|
|
188
|
+
tables.map((table, index) => {
|
|
189
|
+
const rewriteDbIndex = rewriteDb[index]
|
|
190
|
+
if (rewriteDbIndex === 'MainDB') {
|
|
191
|
+
dbRewrites[`${projectName}-infra-${table}-${owner}`] = `${rewriteDbIndex || table}`
|
|
192
|
+
} else {
|
|
193
|
+
const newTable = tables[index].replace(tables[index], newTables[index] || tables[index])
|
|
194
|
+
dbRewrites[`${projectName}-infra-${table}-${owner}`] =
|
|
195
|
+
`${projectName}-infra-${newTable || table}-${altOwner || owner}`
|
|
196
|
+
}
|
|
197
|
+
})
|
|
198
|
+
|
|
199
|
+
let dbTables = ['MainDB']
|
|
200
|
+
|
|
201
|
+
if (live) {
|
|
202
|
+
dbTables = tables.map((table) => {
|
|
203
|
+
return `${projectName}-infra-${table}-${owner}`
|
|
204
|
+
})
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
let client = undefined
|
|
208
|
+
if (live) {
|
|
209
|
+
client = new DynamoDBClient({
|
|
210
|
+
region: region, // Replace with your AWS region
|
|
211
|
+
})
|
|
212
|
+
} else {
|
|
213
|
+
console.log('LOCAL')
|
|
214
|
+
client = new DynamoDBClient({
|
|
215
|
+
region: region, // Replace with your AWS region
|
|
216
|
+
endpoint: 'http://localhost:8000', // The default local DynamoDB endpoint
|
|
217
|
+
credentials: {
|
|
218
|
+
accessKeyId: 'fakeAccessKeyId', // Use fake credentials for local DynamoDB
|
|
219
|
+
secretAccessKey: 'fakeSecretAccessKey',
|
|
220
|
+
},
|
|
221
|
+
})
|
|
222
|
+
}
|
|
223
|
+
exportDynamoTable(dbTables, client, dbRewrites, profilesOnly)
|
|
224
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { program } from '../../app.js'
|
|
2
|
+
import { exportTable } from './generateSeed.js'
|
|
3
|
+
|
|
4
|
+
program
|
|
5
|
+
.command('generate')
|
|
6
|
+
.description('Generate seed data in ./docker/seed')
|
|
7
|
+
.argument('[<string>]', 'Tables to generateFrom')
|
|
8
|
+
.option('-o, --owner <owner>', 'Owner of the tables')
|
|
9
|
+
.option('-n, --newowner <newowner>', 'New owner of the tables')
|
|
10
|
+
.option('-p, --live', 'Pull from live')
|
|
11
|
+
.option('-r, --region <region>', 'Region to deploy to')
|
|
12
|
+
.option('-d, --db <db>', 'Databases to rewrite to')
|
|
13
|
+
.option('--profiles', 'Profiles only seed generation')
|
|
14
|
+
.action(async (str, options) => {
|
|
15
|
+
const owner = options.owner || 'dev'
|
|
16
|
+
const profilesOnly = options.profiles || false
|
|
17
|
+
const tables = (str || '').split(',')
|
|
18
|
+
let nameRedirect = []
|
|
19
|
+
if (options.db) nameRedirect = options.db.split(',')
|
|
20
|
+
|
|
21
|
+
exportTable(
|
|
22
|
+
tables,
|
|
23
|
+
owner,
|
|
24
|
+
options.newowner,
|
|
25
|
+
nameRedirect,
|
|
26
|
+
options.live,
|
|
27
|
+
options.region,
|
|
28
|
+
profilesOnly,
|
|
29
|
+
)
|
|
30
|
+
})
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import assert from 'assert'
|
|
2
|
+
import { spawn } from 'child_process'
|
|
3
|
+
import { readFileSync } from 'fs'
|
|
4
|
+
|
|
5
|
+
import { program } from '../../app.js'
|
|
6
|
+
|
|
7
|
+
import { join } from 'node:path';
|
|
8
|
+
const packageJSON = JSON.parse(readFileSync(join(process.cwd(), 'package.json'), 'utf8'));
|
|
9
|
+
|
|
10
|
+
console.log('Deploy command loaded')
|
|
11
|
+
|
|
12
|
+
const awsObject = packageJSON['aws'] || {}
|
|
13
|
+
|
|
14
|
+
const accountId = process.env.CDK_DEPLOY_ACCOUNT
|
|
15
|
+
const awsProfile = process.env.CDK_DEPLOY_PROFILE || 'default'
|
|
16
|
+
|
|
17
|
+
program
|
|
18
|
+
.command('init')
|
|
19
|
+
.description('Execute cdk deploy command')
|
|
20
|
+
.action((str, options) => {
|
|
21
|
+
const owner = str || 'dev'
|
|
22
|
+
const region = options.region || 'us-east-2'
|
|
23
|
+
console.log(`Deploying to ${owner} environment`)
|
|
24
|
+
const command = `workspace infra cdk bootstrap`
|
|
25
|
+
const child = spawn('yarn', [`${command}`], {
|
|
26
|
+
stdio: 'inherit',
|
|
27
|
+
shell: true, // required if using shell-style commands or cross-platform support
|
|
28
|
+
env: {
|
|
29
|
+
AWS_PROFILE: awsProfile,
|
|
30
|
+
},
|
|
31
|
+
})
|
|
32
|
+
|
|
33
|
+
child.on('exit', (code) => {
|
|
34
|
+
console.log(`Process exited with code ${code}`)
|
|
35
|
+
process.exit(code ?? 0)
|
|
36
|
+
})
|
|
37
|
+
})
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import fs from 'fs'
|
|
2
|
+
import path from 'path'
|
|
3
|
+
|
|
4
|
+
export function getRootDirectory() {
|
|
5
|
+
return path.join(process.cwd(), 'myfile.json')
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export function getRootJson() {
|
|
9
|
+
const jsonPath = path.join(process.cwd(), 'package.json') // cwd + file
|
|
10
|
+
const raw = fs.readFileSync(jsonPath, 'utf-8')
|
|
11
|
+
const data = JSON.parse(raw)
|
|
12
|
+
|
|
13
|
+
return data
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export function getHasteFiles() {
|
|
17
|
+
const dir = path.join(process.cwd(), '.haste')
|
|
18
|
+
|
|
19
|
+
if (!fs.existsSync(dir)) {
|
|
20
|
+
return []
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const files = fs.readdirSync(dir) // names only
|
|
24
|
+
return files.map((f) => path.join(dir, f))
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
export function getHasteConfig() {
|
|
28
|
+
const objHaste = getHasteFiles()
|
|
29
|
+
const hasteConfig = {}
|
|
30
|
+
for (const file of objHaste) {
|
|
31
|
+
const fileName = path.basename(file).replace('.json', '')
|
|
32
|
+
const raw = fs.readFileSync(file, 'utf-8')
|
|
33
|
+
console.log('file content', file, raw)
|
|
34
|
+
const data = JSON.parse(raw)
|
|
35
|
+
hasteConfig[fileName] = data
|
|
36
|
+
}
|
|
37
|
+
return hasteConfig
|
|
38
|
+
}
|