devskill 2.0.2 → 2.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -48,10 +48,17 @@ npx skills add vuluu2k/skills --skill='pinia-options'
48
48
  ```
49
49
 
50
50
  ### Options 3: Interactive CLI
51
- We've also built an elegant, interactive CLI for advanced management (requires cloning this repo):
51
+ We've also built an elegant, interactive CLI for advanced management:
52
52
 
53
53
  ```bash
54
+ # Open interactive menu
55
+ npx devskill
56
+
57
+ # Option 1: Install entire collections of skills
54
58
  npx devskill install
59
+
60
+ # Option 2: Add specific individual skills (use Space to select)
61
+ npx devskill add
55
62
  ```
56
63
 
57
64
  ---
package/README.vn.md CHANGED
@@ -27,9 +27,17 @@ Bằng cách cung cấp cho AI các tài liệu `SKILL.md` chuyên dụng, bạn
27
27
 
28
28
  ## 📦 Cài đặt & Bắt đầu nhanh
29
29
 
30
- Quên việc copy thủ công đi. Chúng tôi đã xây dựng một CLI tương tác thanh lịch để đưa kiến thức trực tiếpChạy CLI trong dự án của bạn:
30
+ Quên việc copy thủ công đi. Chúng tôi đã xây dựng một CLI tương tác thanh lịch để đưa kiến thức trực tiếp vào repo của bạn.
31
+
31
32
  ```bash
33
+ # Mở menu tương tác chính
34
+ npx devskill
35
+
36
+ # Tuỳ chọn 1: Cài đặt toàn bộ một nhóm (collection) skills
32
37
  npx devskill install
38
+
39
+ # Tuỳ chọn 2: Thêm các skills đơn lẻ (chọn bằng phím Space)
40
+ npx devskill add
33
41
  ```
34
42
 
35
43
  > **Mẹo:** Bạn cũng có thể cài đặt mọi skill trên toàn hệ thống bằng công cụ chính thức:
package/meta.ts CHANGED
@@ -118,6 +118,10 @@ export const collections: Record<string, string[]> = {
118
118
  'builderx_api': [
119
119
  'builderx_api-schemas',
120
120
  'builderx_api-controllers',
121
- 'builderx_api-contexts'
121
+ 'builderx_api-contexts',
122
+ 'builderx_api-kafka',
123
+ 'builderx_api-redis',
124
+ 'builderx_api-rabbitmq',
125
+ 'builderx_api-mongodb',
122
126
  ]
123
127
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "devskill",
3
- "version": "2.0.2",
3
+ "version": "2.0.4",
4
4
  "type": "module",
5
5
  "bin": {
6
6
  "devskill": "bin/devskill.js"
package/scripts/cli.ts CHANGED
@@ -97,9 +97,9 @@ async function initSubmodules(skipPrompt = false) {
97
97
  const shouldRemove = skipPrompt
98
98
  ? true
99
99
  : await p.confirm({
100
- message: 'Remove these extra submodules?',
101
- initialValue: true,
102
- })
100
+ message: 'Remove these extra submodules?',
101
+ initialValue: true,
102
+ })
103
103
 
104
104
  if (p.isCancel(shouldRemove)) {
105
105
  p.cancel('Cancelled')
@@ -130,14 +130,14 @@ async function initSubmodules(skipPrompt = false) {
130
130
  const selected = skipPrompt
131
131
  ? newProjects
132
132
  : await p.multiselect({
133
- message: 'Select projects to initialize',
134
- options: newProjects.map(project => ({
135
- value: project,
136
- label: `${project.name} (${project.type})`,
137
- hint: project.url,
138
- })),
139
- initialValues: newProjects,
140
- })
133
+ message: 'Select projects to initialize',
134
+ options: newProjects.map(project => ({
135
+ value: project,
136
+ label: `${project.name} (${project.type})`,
137
+ hint: project.url,
138
+ })),
139
+ initialValues: newProjects,
140
+ })
141
141
 
142
142
  if (p.isCancel(selected)) {
143
143
  p.cancel('Cancelled')
@@ -332,9 +332,9 @@ async function cleanup(skipPrompt = false) {
332
332
  const shouldRemove = skipPrompt
333
333
  ? true
334
334
  : await p.confirm({
335
- message: 'Remove these extra skills?',
336
- initialValue: false,
337
- })
335
+ message: 'Remove these extra skills?',
336
+ initialValue: false,
337
+ })
338
338
 
339
339
  if (p.isCancel(shouldRemove)) {
340
340
  p.cancel('Cancelled')
@@ -366,7 +366,7 @@ async function cleanup(skipPrompt = false) {
366
366
 
367
367
  async function installSkills() {
368
368
  const spinner = p.spinner()
369
-
369
+
370
370
  const allCollections: Record<string, string[]> = { ...collections }
371
371
  for (const [vendorName, config] of Object.entries(vendors)) {
372
372
  const vendorConfig = config as VendorConfig
@@ -379,7 +379,7 @@ async function installSkills() {
379
379
  }
380
380
 
381
381
  const selectedCollectionNames = await p.multiselect({
382
- message: 'Select collections to install',
382
+ message: 'Select collections to install (Space to select, Enter to confirm)',
383
383
  options: Object.keys(allCollections).map(name => ({
384
384
  value: name,
385
385
  label: name,
@@ -412,21 +412,45 @@ async function installSkills() {
412
412
  return
413
413
  }
414
414
 
415
- const skillsDirName = await p.text({
416
- message: 'Name of the skills directory inside target project?',
417
- initialValue: 'skills',
418
- placeholder: 'skills'
415
+ const toolChoice = await p.select({
416
+ message: 'Which AI tool are you installing these skills for?',
417
+ options: [
418
+ { value: 'skills', label: 'Generic (skills/)', hint: 'Default skills directory' },
419
+ { value: '.cursor/skills', label: 'Cursor (.cursor/skills)' },
420
+ { value: '.windsurf/skills', label: 'Windsurf (.windsurf/skills)' },
421
+ { value: '.claude/skills', label: 'Claude Desktop (.claude/skills)' },
422
+ { value: '.claudecode/skills', label: 'Claude Code (.claudecode/skills)' },
423
+ { value: '.agents/skills', label: 'Antigravity (.agents/skills)' },
424
+ { value: '.vscode/skills', label: 'VSCode / Copilot (.vscode/skills)' },
425
+ { value: 'custom', label: 'Custom path...' }
426
+ ]
419
427
  })
420
428
 
421
- if (p.isCancel(skillsDirName)) {
429
+ if (p.isCancel(toolChoice)) {
422
430
  p.cancel('Cancelled')
423
431
  return
424
432
  }
425
433
 
434
+ let skillsDirName = toolChoice as string
435
+
436
+ if (toolChoice === 'custom') {
437
+ const customDir = await p.text({
438
+ message: 'Enter custom skills directory path:',
439
+ initialValue: 'skills',
440
+ placeholder: 'skills'
441
+ })
442
+
443
+ if (p.isCancel(customDir)) {
444
+ p.cancel('Cancelled')
445
+ return
446
+ }
447
+ skillsDirName = customDir as string
448
+ }
449
+
426
450
  const selectedSkills = Array.from(new Set(
427
451
  (selectedCollectionNames as string[]).flatMap(name => allCollections[name])
428
452
  ))
429
- const targetDir = join(targetProject as string, skillsDirName as string)
453
+ const targetDir = join(targetProject as string, skillsDirName)
430
454
 
431
455
  if (!existsSync(targetDir)) {
432
456
  mkdirSync(targetDir, { recursive: true })
@@ -446,9 +470,129 @@ async function installSkills() {
446
470
  if (existsSync(destPath)) {
447
471
  rmSync(destPath, { recursive: true })
448
472
  }
449
-
473
+
450
474
  mkdirSync(destPath, { recursive: true })
451
-
475
+
476
+ const files = readdirSync(sourcePath, { recursive: true, withFileTypes: true })
477
+ for (const file of files) {
478
+ if (file.isFile()) {
479
+ const fullPath = join(file.parentPath, file.name)
480
+ const relativePath = fullPath.replace(sourcePath, '')
481
+ const dp = join(destPath, relativePath)
482
+ const dd = dirname(dp)
483
+ if (!existsSync(dd)) {
484
+ mkdirSync(dd, { recursive: true })
485
+ }
486
+ cpSync(fullPath, dp)
487
+ }
488
+ }
489
+ successCount++;
490
+ }
491
+
492
+ spinner.stop(`Installed ${successCount}/${selectedSkills.length} skills to target project`)
493
+ }
494
+
495
+ async function installSpecificSkills() {
496
+ const spinner = p.spinner()
497
+
498
+ const allSkills = getExistingSkillNames()
499
+
500
+ if (allSkills.length === 0) {
501
+ p.log.warn('No skills found in skills directory. Try running init or sync first.')
502
+ return
503
+ }
504
+
505
+ const selectedSkills = await p.multiselect({
506
+ message: 'Select specific skills to install (Space to select, Enter to confirm)',
507
+ options: allSkills.map(name => ({
508
+ value: name,
509
+ label: name
510
+ }))
511
+ })
512
+
513
+ if (p.isCancel(selectedSkills)) {
514
+ p.cancel('Cancelled')
515
+ return
516
+ }
517
+
518
+ if (selectedSkills.length === 0) {
519
+ p.log.warn('No skills selected')
520
+ return
521
+ }
522
+
523
+ const targetProject = await p.text({
524
+ message: 'Enter target project directory path (relative or absolute)',
525
+ initialValue: process.cwd(),
526
+ placeholder: process.cwd(),
527
+ validate: (value) => {
528
+ if (!value) return 'Path is required'
529
+ if (!existsSync(value)) return 'Directory does not exist'
530
+ }
531
+ })
532
+
533
+ if (p.isCancel(targetProject)) {
534
+ p.cancel('Cancelled')
535
+ return
536
+ }
537
+
538
+ const toolChoice = await p.select({
539
+ message: 'Which AI tool are you installing these skills for?',
540
+ options: [
541
+ { value: 'skills', label: 'Generic (skills/)', hint: 'Default skills directory' },
542
+ { value: '.cursor/skills', label: 'Cursor (.cursor/skills)' },
543
+ { value: '.windsurf/rules', label: 'Windsurf (.windsurf/rules)' },
544
+ { value: '.claude/skills', label: 'Claude Desktop (.claude/skills)' },
545
+ { value: '.claudecode/skills', label: 'Claude Code (.claudecode/skills)' },
546
+ { value: '.agents/skills', label: 'Antigravity (.agents/skills)' },
547
+ { value: '.vscode/skills', label: 'VSCode / Copilot (.vscode/skills)' },
548
+ { value: 'custom', label: 'Custom path...' }
549
+ ]
550
+ })
551
+
552
+ if (p.isCancel(toolChoice)) {
553
+ p.cancel('Cancelled')
554
+ return
555
+ }
556
+
557
+ let skillsDirName = toolChoice as string
558
+
559
+ if (toolChoice === 'custom') {
560
+ const customDir = await p.text({
561
+ message: 'Enter custom skills directory path:',
562
+ initialValue: 'skills',
563
+ placeholder: 'skills'
564
+ })
565
+
566
+ if (p.isCancel(customDir)) {
567
+ p.cancel('Cancelled')
568
+ return
569
+ }
570
+ skillsDirName = customDir as string
571
+ }
572
+
573
+ const targetDir = join(targetProject as string, skillsDirName)
574
+
575
+ if (!existsSync(targetDir)) {
576
+ mkdirSync(targetDir, { recursive: true })
577
+ }
578
+
579
+ spinner.start(`Installing ${selectedSkills.length} skills to ${targetDir}...`)
580
+
581
+ let successCount = 0;
582
+ for (const skill of selectedSkills as string[]) {
583
+ const sourcePath = join(root, 'skills', skill)
584
+ if (!existsSync(sourcePath)) {
585
+ p.log.warn(`Skill not found: ${skill}`)
586
+ continue
587
+ }
588
+
589
+ const destPath = join(targetDir, skill)
590
+ if (existsSync(destPath)) {
591
+ rmSync(destPath, { recursive: true })
592
+ }
593
+
594
+ mkdirSync(destPath, { recursive: true })
595
+
452
596
  const files = readdirSync(sourcePath, { recursive: true, withFileTypes: true })
453
597
  for (const file of files) {
454
598
  if (file.isFile()) {
@@ -502,15 +646,22 @@ async function main() {
502
646
  }
503
647
 
504
648
  if (command === 'install') {
505
- p.intro('Skills Manager - Install')
649
+ p.intro('Skills Manager - Install Groups')
506
650
  await installSkills()
507
651
  p.outro('Done')
508
652
  return
509
653
  }
510
654
 
655
+ if (command === 'add') {
656
+ p.intro('Skills Manager - Add Specific Skills')
657
+ await installSpecificSkills()
658
+ p.outro('Done')
659
+ return
660
+ }
661
+
511
662
  if (skipPrompt) {
512
663
  p.log.error('Command required when using -y flag')
513
- p.log.info('Available commands: install, init, sync, check, cleanup')
664
+ p.log.info('Available commands: install, add, init, sync, check, cleanup')
514
665
  process.exit(1)
515
666
  }
516
667
 
@@ -519,7 +670,8 @@ async function main() {
519
670
  const action = await p.select({
520
671
  message: 'What would you like to do?',
521
672
  options: [
522
- { value: 'install', label: 'Install collections', hint: 'Copy skill collections to a local project' },
673
+ { value: 'install', label: 'Install collections', hint: 'Copy entire skill collections to a local project' },
674
+ { value: 'add', label: 'Add specific skills', hint: 'Choose individual skills to add to your project' },
523
675
  { value: 'sync', label: 'Sync submodules', hint: 'Pull latest and sync Type 2 skills' },
524
676
  { value: 'init', label: 'Init submodules', hint: 'Add new submodules from meta.ts' },
525
677
  { value: 'check', label: 'Check updates', hint: 'See available updates' },
@@ -536,6 +688,9 @@ async function main() {
536
688
  case 'install':
537
689
  await installSkills()
538
690
  break
691
+ case 'add':
692
+ await installSpecificSkills()
693
+ break
539
694
  case 'init':
540
695
  await initSubmodules()
541
696
  break
@@ -0,0 +1,175 @@
1
+ ---
2
+ description: Patterns for using Kafka (:brod), creating producers, consumers, and offset management in BuilderX API
3
+ ---
4
+
5
+ # BuilderX API Kafka Skill
6
+
7
+ The `builderx_api` project uses the `:brod` Erlang package for interacting with Kafka. The central coordination module is `Kafka` (`lib/kafka/kafka.ex`), which starts the `:brod` client and registers producers and consumers under its supervisor.
8
+
9
+ ## 1. Creating a New Consumer
10
+
11
+ When you need to consume data from a new Kafka topic, follow the pattern established in `Kafka.QuestConsumer`. You need a GenServer that subscribes to `:brod` and relies on Redis to manage consumer offsets.
12
+
13
+ ### Example Structure (`lib/kafka/my_new_consumer.ex`):
14
+
15
+ ```elixir
16
+ defmodule Kafka.MyNewConsumer do
17
+ use GenServer
18
+ import Record, only: [defrecord: 2, extract: 2]
19
+
20
+ alias BuilderxApi.{Tools}
21
+
22
+ @topic "my.new.kafka.topic"
23
+ @prefetch_count 5
24
+
25
+ defmodule State do
26
+ @enforce_keys [:consumer_pid, :partition]
27
+ defstruct consumer_pid: nil, partition: nil
28
+ end
29
+
30
+ defmodule KafkaMessage do
31
+ @enforce_keys [:offset, :key, :value, :ts]
32
+ defstruct offset: nil, key: nil, value: nil, ts: nil
33
+ end
34
+
35
+ defrecord :kafka_message, extract(:kafka_message, from_lib: "brod/include/brod.hrl")
36
+ defrecord :kafka_message_set, extract(:kafka_message_set, from_lib: "brod/include/brod.hrl")
37
+ defrecord :kafka_fetch_error, extract(:kafka_fetch_error, from_lib: "brod/include/brod.hrl")
38
+
39
+ def start_link(opts) do
40
+ GenServer.start_link(__MODULE__, opts)
41
+ end
42
+
43
+ def init({client_id, partition}) do
44
+ consumer_config = [
45
+ prefetch_count: @prefetch_count,
46
+ max_bytes: @prefetch_count * 1024, # 1KB
47
+ max_wait_time: 0
48
+ ]
49
+
50
+ :ok = :brod.start_consumer(client_id, @topic, consumer_config)
51
+
52
+ {:ok, consumer_pid} = :brod.subscribe(client_id, self(), @topic, partition, consumer_config)
53
+
54
+ # Trigger the first fetch manually from Redis offset
55
+ Process.send(self(), {:fetch_message}, [])
56
+
57
+ {:ok, %State{consumer_pid: consumer_pid, partition: partition}}
58
+ end
59
+
60
+ # Receive fetched messages
61
+ def handle_info(
62
+ {consumer_pid, kafka_message_set(messages: msgs)},
63
+ %State{consumer_pid: consumer_pid, partition: partition} = state
64
+ ) do
65
+ msgs = Enum.map(msgs, &kafka_message_to_struct(&1))
66
+
67
+ Enum.each(msgs, fn msg ->
68
+ # Decode and process your message here
69
+ try do
70
+ parsed = Jason.decode!(msg.value)
71
+ # process parsed data...
72
+ rescue _ -> nil
73
+ end
74
+ end)
75
+
76
+ # Acknowledge messages and update Redis offset
77
+ for msg <- msgs do
78
+ key = "kafka_topic:#{@topic}:#{partition}"
79
+ Redis.PubSub.set(key, msg.offset)
80
+ :brod.consume_ack(consumer_pid, msg.offset)
81
+ end
82
+
83
+ {:noreply, state}
84
+ end
85
+
86
+ # Handle fetch errors
87
+ def handle_info({_pid, kafka_fetch_error()} = error, state) do
88
+ Logger.error("KAFKA: my_consumer fetch error #{inspect(error)}")
89
+ {:noreply, state}
90
+ end
91
+
92
+ # Fetch initially using the offset saved in Redis
93
+ def handle_info({:fetch_message}, %State{partition: partition} = state) do
94
+ host = System.get_env("KAFKA1_HOST")
95
+ port = System.get_env("KAFKA1_PORT") |> String.to_integer()
96
+ bootstrapEndpoints = [{host, port}]
97
+
98
+ key = "kafka_topic:#{@topic}:#{partition}"
99
+ {:ok, offset} = Redis.PubSub.get(key)
100
+ offset = Tools.to_int(offset)
101
+
102
+ with {:ok, batch} <- :brod.fetch(bootstrapEndpoints, @topic, partition, offset, %{}) do
103
+ {latest_offset, msgs} = batch
104
+
105
+ Enum.each(msgs, fn msg ->
106
+ msg = kafka_message_to_struct(msg)
107
+ # Often we just resend the message, or process it immediately.
108
+ # This behaves as a synchronization mechanism on start.
109
+ end)
110
+
111
+ Redis.PubSub.set(key, latest_offset)
112
+ end
113
+
114
+ {:noreply, state}
115
+ end
116
+
117
+ defp kafka_message_to_struct(kafka_message(offset: offset, key: key, value: value, ts: ts)) do
118
+ %KafkaMessage{
119
+ offset: offset,
120
+ key: key,
121
+ value: value,
122
+ ts: DateTime.from_unix!(ts, :millisecond)
123
+ }
124
+ end
125
+ end
126
+ ```
127
+
128
+ ## 2. Registering the Consumer
129
+
130
+ Consumer registration is done in `lib/kafka/kafka.ex` based on the hostname, to support distinct consumer groups and parallel partition processing.
131
+
132
+ 1. Ensure the `:brod` client has the client_id `:kafka_client`.
133
+ 2. Find the correct worker hostname conditions (e.g., `publish-consumer-01`, `publish-consumer-02`).
134
+ 3. Note how the *second argument* often denotes the Kafka **partition** index. For `publish-consumer-01`, it's usually `0`; for `publish-consumer-02`, it's `1`, etc.
135
+
136
+ ```elixir
137
+ "publish-consumer-01" ->
138
+ [
139
+ {KafkaProducer, {:kafka_client}},
140
+ {Kafka.QuestConsumer, {:kafka_client, 0}},
141
+ {Kafka.MyNewConsumer, {:kafka_client, 0}} # <--- Add your consumer for partition 0
142
+ ]
143
+
144
+ "publish-consumer-02" ->
145
+ [
146
+ {KafkaProducer, {:kafka_client}},
147
+ {Kafka.QuestConsumer, {:kafka_client, 1}},
148
+ {Kafka.MyNewConsumer, {:kafka_client, 1}} # <--- Add your consumer for partition 1
149
+ ]
150
+ ```
151
+
152
+ ## 3. Publishing Messages
153
+
154
+ All topics you wish to publish to must be registered in `@topics` in `lib/kafka/kafka_producer.ex`.
155
+
156
+ In `lib/kafka/kafka_producer.ex`:
157
+ ```elixir
158
+ @topics [
159
+ "store.cache.agg_products",
160
+ "store.queuing.questdb",
161
+ "my.new.kafka.topic" # <--- Register new topic
162
+ ]
163
+ ```
164
+
165
+ To publish a message, use `KafkaProducer.publish/4`:
166
+
167
+ ```elixir
168
+ topic = "my.new.kafka.topic"
169
+ partition = 0 # Generally, you need to manage partition distribution (e.g., hash the key)
170
+ key = "your_message_key"
171
+ message = Jason.encode!(%{hello: "world"})
172
+
173
+ # Send synchronously
174
+ :ok = KafkaProducer.publish(topic, partition, key, message)
175
+ ```
@@ -0,0 +1,93 @@
1
+ ---
2
+ description: Patterns for using MongoDB driver and dynamic collections in BuilderX API
3
+ ---
4
+
5
+ # BuilderX API MongoDB Skill
6
+
7
+ The `builderx_api` project integrates MongoDB via the `mongodb_driver` alongside its primary Postgres (Citus) database. This is used extensively for the *Dynamic Database Collections* feature in `BuilderxApi.DBCollections.DBCollections` (`lib/builderx_api/db_collections/db_collections.ex`).
8
+
9
+ In this pattern, metadata about the data models (schema) is stored in Postgres (`DBCollection`), but the actual records are physically stored in MongoDB (`MongoRepo`) using a single `records` table separated by `table_name` and `site_id`.
10
+
11
+ ## 1. Interacting with MongoDB Collections
12
+
13
+ You should generally not interact with `MongoRepo` directly unless you are inside the `builderx_api/db_collections/...` scope.
14
+
15
+ Instead, use `DBCollections`:
16
+
17
+ ### Checking if a record exists
18
+ ```elixir
19
+ filters = %{"slug" => "my-record"}
20
+
21
+ # conn must have assigns for customer, account, or is_check_record_creator as required
22
+ DBCollections.exists_record(table_name, filters, db_collection_struct, conn)
23
+ # => {:ok, true | false}
24
+ ```
25
+
26
+ ### Querying records
27
+ Retrieves customized results based on dynamic schemas.
28
+
29
+ ```elixir
30
+ select = %{"id" => 1, "name" => 1}
31
+ filters = %{"status" => "active"}
32
+ limit = 10
33
+ skip = 0
34
+ sort = %{"inserted_at" => -1} # Use 1 for ASC, -1 for DESC
35
+ populate = [] # Populate relations if any references are configured
36
+ params = %{"site_id" => "site_uuid"}
37
+
38
+ DBCollections.query_record(
39
+ table_name,
40
+ select,
41
+ filters,
42
+ sort,
43
+ limit,
44
+ skip,
45
+ populate,
46
+ params,
47
+ conn
48
+ )
49
+ # => List of normalized maps
50
+ ```
51
+
52
+ ### Inserting records
53
+ ```elixir
54
+ # attrs is a list of map: [%{"field_name" => "name", "field_value" => "Record 1"}]
55
+ # Note that we use a custom key format for dynamic mapping.
56
+
57
+ {:ok, inserted_record} = DBCollections.insert_record(table_name, attrs, params, conn)
58
+ ```
59
+
60
+ ## 2. Using `MongoRepo` directly
61
+
62
+ The `BuilderxApi.MongoRepo` is an abstraction over `:mongo` (the `mongodb_driver` pool).
63
+ For some administrative actions, it is called directly:
64
+
65
+ ```elixir
66
+ alias BuilderxApi.MongoRepo
67
+
68
+ table = "records"
69
+
70
+ # Find
71
+ records = MongoRepo.find(table, %{"site_id" => site_id, "table_name" => "users"})
72
+
73
+ # Find one
74
+ record = MongoRepo.find_one(table, %{"_id" => id})
75
+
76
+ # Update Many
77
+ MongoRepo.update_many(
78
+ table,
79
+ %{"site_id" => site_id, "table_name" => "users"},
80
+ %{"$unset" => %{"webcmscol_removed_field" => ""}}
81
+ )
82
+
83
+ # Insert Many
84
+ MongoRepo.insert_many(table, list_of_maps)
85
+
86
+ # Delete Many
87
+ MongoRepo.delete_many(table, %{"site_id" => site_id, "table_name" => "users"})
88
+ ```
89
+
90
+ ### Important Patterns
91
+ - `webcmscol_`: The system prepends `webcmscol_` to column names stored in MongoDB to prevent clashes with system variables like `_id`, `site_id`, `table_name`. You will see operations map/unmap this prefix (`DBUtils.sanitize_column_name/1`).
92
+ - **Caching Counts**: Because counting documents in Mongo can slowly become a bottleneck, the total document count per site collection is cached in Redis: `db_collection_records::{site_id}`.
93
+ - All MongoDB records share the `records` collection but are differentiated by standard root fields: `"site_id"` and `"table_name"`.
@@ -0,0 +1,169 @@
1
+ ---
2
+ description: Patterns for using RabbitMQ (AMQP), creating consumers/workers, and publishing messages in BuilderX API
3
+ ---
4
+
5
+ # BuilderX API RabbitMQ Skill
6
+
7
+ The `builderx_api` project uses the `amqp` package for interacting with RabbitMQ. All RabbitMQ operations orbit around the central `Rabbit` (`lib/rabbit/rabbit.ex`) module, which handles connection pooling, establishing connection channels automatically, and distributing work to supervisors.
8
+
9
+ ## 1. Creating a New Consumer (Worker)
10
+
11
+ When you need to create a new background worker to consume events from RabbitMQ, you should structure it following the existing consumer patterns (e.g., `OrderConsumer`, `IndexingConsumer`). This includes subscribing to a queue, setting prefetch count, configuring dead letter queues for errors, and optionally configuring wait queues for retry logic.
12
+
13
+ ### Example Structure (`lib/rabbit/my_new_consumer.ex`):
14
+
15
+ ```elixir
16
+ defmodule Rabbit.MyNewConsumer do
17
+ require Logger
18
+ use GenServer
19
+ use AMQP
20
+
21
+ alias BuilderxApi.Tools
22
+ alias Rabbit
23
+ alias Worker.MainWorker
24
+
25
+ @queue_base "my_new_queue_name"
26
+ @storecake_v2_exchange "storecake_v2_ex"
27
+ @storecake_v2_exchange_deadletter "storecake_v2_ex_deadletter"
28
+ @sync_queue_error "my_new_queue_error"
29
+ @prefetch_count 20
30
+
31
+ # Client API
32
+ def start_link() do
33
+ GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
34
+ end
35
+
36
+ def child_spec(_args) do
37
+ %{
38
+ id: __MODULE__,
39
+ start: {__MODULE__, :start_link, []}
40
+ }
41
+ end
42
+
43
+ def channel_available(chan) do
44
+ GenServer.cast(__MODULE__, {:channel_available, chan})
45
+ end
46
+
47
+ def consumer_tag() do
48
+ {:ok, hostname} = :inet.gethostname()
49
+ "#{hostname}-my-new-consumer"
50
+ end
51
+
52
+ # Server Callbacks
53
+ def init(:ok) do
54
+ # Request a channel once the gen server is initialized
55
+ Rabbit.request_channel(__MODULE__)
56
+ {:ok, nil}
57
+ end
58
+
59
+ def publish(payload) do
60
+ GenServer.cast(__MODULE__, {:publish, payload})
61
+ end
62
+
63
+ def handle_cast({:publish, payload}, channel) do
64
+ # When publishing directly through this consumer's channel
65
+ message = Jason.decode!(payload)
66
+ queue = get_queue(message)
67
+
68
+ AMQP.Basic.publish(channel, @storecake_v2_exchange, queue, payload, persistent: true)
69
+
70
+ {:noreply, channel}
71
+ end
72
+
73
+ def handle_cast({:channel_available, channel}, _state) do
74
+ Logger.info("CHANNEL_AVAILABLE FOR MY NEW CONSUMER")
75
+
76
+ Basic.qos(channel, prefetch_count: @prefetch_count)
77
+
78
+ Queue.declare(channel, @queue_base,
79
+ durable: true,
80
+ arguments: [
81
+ {"x-dead-letter-exchange", :longstr, @storecake_v2_exchange_deadletter},
82
+ {"x-dead-letter-routing-key", :longstr, @sync_queue_error}
83
+ ]
84
+ )
85
+
86
+ # Note: If implementing retries with delayed messages, declare wait queues here
87
+ # and bind them as in OrderConsumer.ex.
88
+
89
+ Queue.bind(channel, @queue_base, @storecake_v2_exchange, routing_key: @queue_base)
90
+ Queue.bind(channel, @sync_queue_error, @storecake_v2_exchange, routing_key: @sync_queue_error)
91
+
92
+ {:ok, _consumer_tag} = Basic.consume(channel, @queue_base, self(), consumer_tag: consumer_tag())
93
+
94
+ {:noreply, channel}
95
+ end
96
+
97
+ # Basic AMQP handlers...
98
+ def handle_info({:basic_consume_ok, %{consumer_tag: _consumer_tag}}, chan), do: {:noreply, chan}
99
+ def handle_info({:basic_cancel, %{consumer_tag: _consumer_tag}}, chan), do: {:stop, :normal, chan}
100
+ def handle_info({:basic_cancel_ok, %{consumer_tag: _consumer_tag}}, chan), do: {:noreply, chan}
101
+
102
+ def handle_info({:basic_deliver, payload, %{delivery_tag: tag, redelivered: redelivered}}, chan) do
103
+ spawn(fn -> consume(chan, tag, redelivered, payload) end)
104
+ {:noreply, chan}
105
+ end
106
+
107
+ def consume(chan, tag, _redelivered, payload) do
108
+ try do
109
+ # Pass data to your worker implementation
110
+ MainWorker.assign(Jason.decode!(payload))
111
+ rescue
112
+ e -> on_error(payload, e, __STACKTRACE__)
113
+ after
114
+ AMQP.Basic.ack(chan, tag)
115
+ end
116
+ end
117
+
118
+ def on_error(message, exception, stacktrace \\ []) do
119
+ Logger.error("Error consuming message: #{inspect(exception)}")
120
+ # Trigger retry logic if applicable using RabbitMq delayed messages
121
+ end
122
+
123
+ def get_queue(_payload) do
124
+ @queue_base
125
+ end
126
+ end
127
+ ```
128
+
129
+ ## 2. Registering the Worker
130
+
131
+ After creating the worker, **you MUST register it in the `Rabbit` supervisor** (`lib/rabbit/rabbit.ex`), otherwise it will not start and not consume any queues.
132
+
133
+ Open `lib/rabbit/rabbit.ex` and:
134
+
135
+ 1. Alias your new consumer at the top:
136
+ ```elixir
137
+ alias Rabbit.{
138
+ ProductConsumer,
139
+ OrderConsumer,
140
+ # ...
141
+ MyNewConsumer
142
+ }
143
+ ```
144
+ 2. In the `init(:ok)` function, append it to the `children` list for the specific worker hostnames (e.g., `store-worker-01`, `store-worker-02`).
145
+
146
+ ```elixir
147
+ "store-worker-01" ->
148
+ [
149
+ {ProductConsumer, []},
150
+ {OrderConsumer, []},
151
+ # ... existing consumers ...
152
+ {MyNewConsumer, []} # <------ Add here
153
+ ]
154
+ ```
155
+
156
+ ## 3. Publishing Messages
157
+
158
+ To publish a message from any location in the application without needing a specific consumer channel or `handle_cast` call, use the general `Rabbit.publish_message/2` helper provided in `Rabbit`:
159
+
160
+ ```elixir
161
+ message_payload = %WorkerMessage{
162
+ action: "sync_something_new",
163
+ turn: 0,
164
+ data: %{id: 123, status: "pending"}
165
+ }
166
+
167
+ # The queue name should match a binding routing key in your consumer setup
168
+ Rabbit.publish_message(message_payload, "my_new_queue_name")
169
+ ```
@@ -0,0 +1,93 @@
1
+ ---
2
+ description: Patterns for using Redis caching, PubSub, and Poolboy in BuilderX API
3
+ ---
4
+
5
+ # BuilderX API Redis Skill
6
+
7
+ The `builderx_api` project uses the `redix` library combined with Erlang's `:poolboy` for connection pooling. Standard usages revolve entirely around the `Redis.PubSub` module (`lib/redis/redis_pubsub.ex`).
8
+
9
+ ## 1. General Redis Commands
10
+
11
+ The `Redis.PubSub` module exposes wrapper functions for common Redis operations. Under the hood, they use `Redix.command/2` within a `:poolboy.transaction/2` call targeting the `:redis_poolex` pool.
12
+
13
+ ### Keys and Strings
14
+ ```elixir
15
+ # GET a key
16
+ {:ok, value} = Redis.PubSub.get("my_key")
17
+
18
+ # SET a key
19
+ {:ok, "OK"} = Redis.PubSub.set("my_key", "value")
20
+
21
+ # SET a key with expiration (in seconds)
22
+ {:ok, "OK"} = Redis.PubSub.set("my_key", "value", 3600)
23
+
24
+ # Delete keys
25
+ {:ok, deleted_count} = Redis.PubSub.del("my_key")
26
+ {:ok, deleted_count} = Redis.PubSub.del(["key1", "key2"])
27
+
28
+ # Expire an existing key
29
+ {:ok, 1} = Redis.PubSub.expire("my_key", 60)
30
+ ```
31
+
32
+ ### Counters
33
+ ```elixir
34
+ # Increment by 1
35
+ {:ok, new_val} = Redis.PubSub.incr("visits")
36
+
37
+ # Increment by N
38
+ {:ok, new_val} = Redis.PubSub.incr("visits", 5)
39
+ ```
40
+
41
+ ### Hash Maps
42
+ ```elixir
43
+ # Increment a field inside a hash
44
+ {:ok, new_val} = Redis.PubSub.hincrby("user:123", "login_count", 1)
45
+
46
+ # Get a field from a hash
47
+ {:ok, value} = Redis.PubSub.hget("user:123", "name")
48
+
49
+ # Get entire hash
50
+ {:ok, list_of_pairs} = Redis.PubSub.hgetall("user:123")
51
+ ```
52
+
53
+ ### Sets
54
+ ```elixir
55
+ # Add to Set
56
+ {:ok, added_count} = Redis.PubSub.sadd("my_set", "item1")
57
+ {:ok, added_count} = Redis.PubSub.sadd("my_set", ["item2", "item3"])
58
+
59
+ # Remove from Set
60
+ {:ok, removed_count} = Redis.PubSub.srem("my_set", "item1")
61
+ {:ok, removed_count} = Redis.PubSub.srem("my_set", ["item2", "item3"])
62
+
63
+ # Get all members
64
+ {:ok, members} = Redis.PubSub.smembers("my_set")
65
+ ```
66
+
67
+ ## 2. Transactions
68
+
69
+ You can execute Redis commands transactionally via `MULTI` and `EXEC` using the wrappers:
70
+
71
+ ```elixir
72
+ Redis.PubSub.transaction() # Sends MULTI
73
+ Redis.PubSub.set("key1", "val1")
74
+ Redis.PubSub.set("key2", "val2")
75
+ Redis.PubSub.commit() # Sends EXEC
76
+ ```
77
+
78
+ *Note: Since these execute under standard HTTP pools without locking the connection strictly contextually in the Redix API wrappers mapped here, be cautious when using Redis transactions via standard global pool dispatch; ensure your pipeline design handles concurrency correctly.*
79
+
80
+ ## 3. PubSub Features
81
+
82
+ Through `:redis_pubsub` pool, builderx_api can support pub-sub channels.
83
+
84
+ ```elixir
85
+ # To subscribe the current process to a channel
86
+ Redis.PubSub.subscribe("chat_room_1", self())
87
+
88
+ # To unsubscribe
89
+ Redis.PubSub.unsubscribe("chat_room_1", self())
90
+
91
+ # To publish to a channel
92
+ {:ok, subscribers_received} = Redis.PubSub.publish("chat_room_1", "Hello World!")
93
+ ```