devskill 2.0.3 → 2.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/.claude/settings.local.json +11 -0
  2. package/LICENSE +21 -0
  3. package/README.md +4 -1
  4. package/landing/AGENTS.md +5 -0
  5. package/landing/CLAUDE.md +1 -0
  6. package/landing/README.md +36 -0
  7. package/landing/app/[locale]/layout.tsx +58 -0
  8. package/landing/app/[locale]/page.tsx +291 -0
  9. package/landing/app/globals.css +129 -0
  10. package/landing/app/icon.tsx +41 -0
  11. package/landing/app/opengraph-image.tsx +69 -0
  12. package/landing/components/ui/accordion.tsx +74 -0
  13. package/landing/components/ui/badge.tsx +52 -0
  14. package/landing/components/ui/button.tsx +60 -0
  15. package/landing/components/ui/card.tsx +103 -0
  16. package/landing/components/ui/copy-button.tsx +60 -0
  17. package/landing/components/ui/navigation-menu.tsx +168 -0
  18. package/landing/components.json +25 -0
  19. package/landing/eslint.config.mjs +18 -0
  20. package/landing/i18n/request.ts +17 -0
  21. package/landing/i18n/routing.ts +17 -0
  22. package/landing/lib/utils.ts +6 -0
  23. package/landing/messages/en.json +32 -0
  24. package/landing/messages/vi.json +32 -0
  25. package/landing/middleware.ts +9 -0
  26. package/landing/next.config.ts +10 -0
  27. package/landing/package-lock.json +10540 -0
  28. package/landing/package.json +35 -0
  29. package/landing/postcss.config.mjs +7 -0
  30. package/landing/public/file.svg +1 -0
  31. package/landing/public/globe.svg +1 -0
  32. package/landing/public/next.svg +1 -0
  33. package/landing/public/vercel.svg +1 -0
  34. package/landing/public/window.svg +1 -0
  35. package/landing/tsconfig.json +34 -0
  36. package/meta.ts +5 -1
  37. package/package.json +7 -1
  38. package/skills/builderx_api-kafka/SKILL.md +175 -0
  39. package/skills/builderx_api-mongodb/SKILL.md +93 -0
  40. package/skills/builderx_api-rabbitmq/SKILL.md +169 -0
  41. package/skills/builderx_api-redis/SKILL.md +93 -0
@@ -0,0 +1,35 @@
1
+ {
2
+ "name": "landing",
3
+ "version": "0.1.0",
4
+ "private": true,
5
+ "scripts": {
6
+ "dev": "next dev",
7
+ "build": "next build",
8
+ "start": "next start",
9
+ "lint": "eslint"
10
+ },
11
+ "dependencies": {
12
+ "@base-ui/react": "^1.3.0",
13
+ "class-variance-authority": "^0.7.1",
14
+ "clsx": "^2.1.1",
15
+ "framer-motion": "^12.38.0",
16
+ "lucide-react": "^1.0.1",
17
+ "next": "^16.2.1",
18
+ "next-intl": "^4.8.3",
19
+ "react": "^19.2.4",
20
+ "react-dom": "^19.2.4",
21
+ "shadcn": "^4.1.0",
22
+ "tailwind-merge": "^3.5.0",
23
+ "tw-animate-css": "^1.4.0"
24
+ },
25
+ "devDependencies": {
26
+ "@tailwindcss/postcss": "^4.2.2",
27
+ "@types/node": "^20",
28
+ "@types/react": "^19",
29
+ "@types/react-dom": "^19",
30
+ "eslint": "^9",
31
+ "eslint-config-next": "16.2.1",
32
+ "tailwindcss": "^4.2.2",
33
+ "typescript": "^5"
34
+ }
35
+ }
@@ -0,0 +1,7 @@
1
+ const config = {
2
+ plugins: {
3
+ "@tailwindcss/postcss": {},
4
+ },
5
+ };
6
+
7
+ export default config;
@@ -0,0 +1 @@
1
+ <svg fill="none" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg"><path d="M14.5 13.5V5.41a1 1 0 0 0-.3-.7L9.8.29A1 1 0 0 0 9.08 0H1.5v13.5A2.5 2.5 0 0 0 4 16h8a2.5 2.5 0 0 0 2.5-2.5m-1.5 0v-7H8v-5H3v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1M9.5 5V2.12L12.38 5zM5.13 5h-.62v1.25h2.12V5zm-.62 3h7.12v1.25H4.5zm.62 3h-.62v1.25h7.12V11z" clip-rule="evenodd" fill="#666" fill-rule="evenodd"/></svg>
@@ -0,0 +1 @@
1
+ <svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16"><g clip-path="url(#a)"><path fill-rule="evenodd" clip-rule="evenodd" d="M10.27 14.1a6.5 6.5 0 0 0 3.67-3.45q-1.24.21-2.7.34-.31 1.83-.97 3.1M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16m.48-1.52a7 7 0 0 1-.96 0H7.5a4 4 0 0 1-.84-1.32q-.38-.89-.63-2.08a40 40 0 0 0 3.92 0q-.25 1.2-.63 2.08a4 4 0 0 1-.84 1.31zm2.94-4.76q1.66-.15 2.95-.43a7 7 0 0 0 0-2.58q-1.3-.27-2.95-.43a18 18 0 0 1 0 3.44m-1.27-3.54a17 17 0 0 1 0 3.64 39 39 0 0 1-4.3 0 17 17 0 0 1 0-3.64 39 39 0 0 1 4.3 0m1.1-1.17q1.45.13 2.69.34a6.5 6.5 0 0 0-3.67-3.44q.65 1.26.98 3.1M8.48 1.5l.01.02q.41.37.84 1.31.38.89.63 2.08a40 40 0 0 0-3.92 0q.25-1.2.63-2.08a4 4 0 0 1 .85-1.32 7 7 0 0 1 .96 0m-2.75.4a6.5 6.5 0 0 0-3.67 3.44 29 29 0 0 1 2.7-.34q.31-1.83.97-3.1M4.58 6.28q-1.66.16-2.95.43a7 7 0 0 0 0 2.58q1.3.27 2.95.43a18 18 0 0 1 0-3.44m.17 4.71q-1.45-.12-2.69-.34a6.5 6.5 0 0 0 3.67 3.44q-.65-1.27-.98-3.1" fill="#666"/></g><defs><clipPath id="a"><path fill="#fff" d="M0 0h16v16H0z"/></clipPath></defs></svg>
@@ -0,0 +1 @@
1
+ <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 394 80"><path fill="#000" d="M262 0h68.5v12.7h-27.2v66.6h-13.6V12.7H262V0ZM149 0v12.7H94v20.4h44.3v12.6H94v21h55v12.6H80.5V0h68.7zm34.3 0h-17.8l63.8 79.4h17.9l-32-39.7 32-39.6h-17.9l-23 28.6-23-28.6zm18.3 56.7-9-11-27.1 33.7h17.8l18.3-22.7z"/><path fill="#000" d="M81 79.3 17 0H0v79.3h13.6V17l50.2 62.3H81Zm252.6-.4c-1 0-1.8-.4-2.5-1s-1.1-1.6-1.1-2.6.3-1.8 1-2.5 1.6-1 2.6-1 1.8.3 2.5 1a3.4 3.4 0 0 1 .6 4.3 3.7 3.7 0 0 1-3 1.8zm23.2-33.5h6v23.3c0 2.1-.4 4-1.3 5.5a9.1 9.1 0 0 1-3.8 3.5c-1.6.8-3.5 1.3-5.7 1.3-2 0-3.7-.4-5.3-1s-2.8-1.8-3.7-3.2c-.9-1.3-1.4-3-1.4-5h6c.1.8.3 1.6.7 2.2s1 1.2 1.6 1.5c.7.4 1.5.5 2.4.5 1 0 1.8-.2 2.4-.6a4 4 0 0 0 1.6-1.8c.3-.8.5-1.8.5-3V45.5zm30.9 9.1a4.4 4.4 0 0 0-2-3.3 7.5 7.5 0 0 0-4.3-1.1c-1.3 0-2.4.2-3.3.5-.9.4-1.6 1-2 1.6a3.5 3.5 0 0 0-.3 4c.3.5.7.9 1.3 1.2l1.8 1 2 .5 3.2.8c1.3.3 2.5.7 3.7 1.2a13 13 0 0 1 3.2 1.8 8.1 8.1 0 0 1 3 6.5c0 2-.5 3.7-1.5 5.1a10 10 0 0 1-4.4 3.5c-1.8.8-4.1 1.2-6.8 1.2-2.6 0-4.9-.4-6.8-1.2-2-.8-3.4-2-4.5-3.5a10 10 0 0 1-1.7-5.6h6a5 5 0 0 0 3.5 4.6c1 .4 2.2.6 3.4.6 1.3 0 2.5-.2 3.5-.6 1-.4 1.8-1 2.4-1.7a4 4 0 0 0 .8-2.4c0-.9-.2-1.6-.7-2.2a11 11 0 0 0-2.1-1.4l-3.2-1-3.8-1c-2.8-.7-5-1.7-6.6-3.2a7.2 7.2 0 0 1-2.4-5.7 8 8 0 0 1 1.7-5 10 10 0 0 1 4.3-3.5c2-.8 4-1.2 6.4-1.2 2.3 0 4.4.4 6.2 1.2 1.8.8 3.2 2 4.3 3.4 1 1.4 1.5 3 1.5 5h-5.8z"/></svg>
@@ -0,0 +1 @@
1
+ <svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1155 1000"><path d="m577.3 0 577.4 1000H0z" fill="#fff"/></svg>
@@ -0,0 +1 @@
1
+ <svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16"><path fill-rule="evenodd" clip-rule="evenodd" d="M1.5 2.5h13v10a1 1 0 0 1-1 1h-11a1 1 0 0 1-1-1zM0 1h16v11.5a2.5 2.5 0 0 1-2.5 2.5h-11A2.5 2.5 0 0 1 0 12.5zm3.75 4.5a.75.75 0 1 0 0-1.5.75.75 0 0 0 0 1.5M7 4.75a.75.75 0 1 1-1.5 0 .75.75 0 0 1 1.5 0m1.75.75a.75.75 0 1 0 0-1.5.75.75 0 0 0 0 1.5" fill="#666"/></svg>
@@ -0,0 +1,34 @@
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2017",
4
+ "lib": ["dom", "dom.iterable", "esnext"],
5
+ "allowJs": true,
6
+ "skipLibCheck": true,
7
+ "strict": true,
8
+ "noEmit": true,
9
+ "esModuleInterop": true,
10
+ "module": "esnext",
11
+ "moduleResolution": "bundler",
12
+ "resolveJsonModule": true,
13
+ "isolatedModules": true,
14
+ "jsx": "react-jsx",
15
+ "incremental": true,
16
+ "plugins": [
17
+ {
18
+ "name": "next"
19
+ }
20
+ ],
21
+ "paths": {
22
+ "@/*": ["./*"]
23
+ }
24
+ },
25
+ "include": [
26
+ "next-env.d.ts",
27
+ "**/*.ts",
28
+ "**/*.tsx",
29
+ ".next/types/**/*.ts",
30
+ ".next/dev/types/**/*.ts",
31
+ "**/*.mts"
32
+ ],
33
+ "exclude": ["node_modules"]
34
+ }
package/meta.ts CHANGED
@@ -118,6 +118,10 @@ export const collections: Record<string, string[]> = {
118
118
  'builderx_api': [
119
119
  'builderx_api-schemas',
120
120
  'builderx_api-controllers',
121
- 'builderx_api-contexts'
121
+ 'builderx_api-contexts',
122
+ 'builderx_api-kafka',
123
+ 'builderx_api-redis',
124
+ 'builderx_api-rabbitmq',
125
+ 'builderx_api-mongodb',
122
126
  ]
123
127
  }
package/package.json CHANGED
@@ -1,6 +1,12 @@
1
1
  {
2
2
  "name": "devskill",
3
- "version": "2.0.3",
3
+ "version": "2.0.5",
4
+ "description": "Equip Cursor, Windsurf, Cline, Antigravity, Claude Code, Codex, GitHub Copilot and other AI Agents with expert programming superpowers via a single interactive prompt.",
5
+ "homepage": "https://vskill.vercel.app",
6
+ "repository": {
7
+ "type": "git",
8
+ "url": "https://github.com/vuluu2k/skills.git"
9
+ },
4
10
  "type": "module",
5
11
  "bin": {
6
12
  "devskill": "bin/devskill.js"
@@ -0,0 +1,175 @@
1
+ ---
2
+ description: Patterns for using Kafka (:brod), creating producers, consumers, and offset management in BuilderX API
3
+ ---
4
+
5
+ # BuilderX API Kafka Skill
6
+
7
+ The `builderx_api` project uses the `:brod` Erlang package for interacting with Kafka. The central coordination module is `Kafka` (`lib/kafka/kafka.ex`), which starts the `:brod` client and registers producers and consumers under its supervisor.
8
+
9
+ ## 1. Creating a New Consumer
10
+
11
+ When you need to consume data from a new Kafka topic, follow the pattern established in `Kafka.QuestConsumer`. You need a GenServer that subscribes to `:brod` and relies on Redis to manage consumer offsets.
12
+
13
+ ### Example Structure (`lib/kafka/my_new_consumer.ex`):
14
+
15
+ ```elixir
16
+ defmodule Kafka.MyNewConsumer do
17
+ use GenServer
18
+ import Record, only: [defrecord: 2, extract: 2]
19
+
20
+ alias BuilderxApi.{Tools}
21
+
22
+ @topic "my.new.kafka.topic"
23
+ @prefetch_count 5
24
+
25
+ defmodule State do
26
+ @enforce_keys [:consumer_pid, :partition]
27
+ defstruct consumer_pid: nil, partition: nil
28
+ end
29
+
30
+ defmodule KafkaMessage do
31
+ @enforce_keys [:offset, :key, :value, :ts]
32
+ defstruct offset: nil, key: nil, value: nil, ts: nil
33
+ end
34
+
35
+ defrecord :kafka_message, extract(:kafka_message, from_lib: "brod/include/brod.hrl")
36
+ defrecord :kafka_message_set, extract(:kafka_message_set, from_lib: "brod/include/brod.hrl")
37
+ defrecord :kafka_fetch_error, extract(:kafka_fetch_error, from_lib: "brod/include/brod.hrl")
38
+
39
+ def start_link(opts) do
40
+ GenServer.start_link(__MODULE__, opts)
41
+ end
42
+
43
+ def init({client_id, partition}) do
44
+ consumer_config = [
45
+ prefetch_count: @prefetch_count,
46
+ max_bytes: @prefetch_count * 1024, # 1KB
47
+ max_wait_time: 0
48
+ ]
49
+
50
+ :ok = :brod.start_consumer(client_id, @topic, consumer_config)
51
+
52
+ {:ok, consumer_pid} = :brod.subscribe(client_id, self(), @topic, partition, consumer_config)
53
+
54
+ # Trigger the first fetch manually from Redis offset
55
+ Process.send(self(), {:fetch_message}, [])
56
+
57
+ {:ok, %State{consumer_pid: consumer_pid, partition: partition}}
58
+ end
59
+
60
+ # Receive fetched messages
61
+ def handle_info(
62
+ {consumer_pid, kafka_message_set(messages: msgs)},
63
+ %State{consumer_pid: consumer_pid, partition: partition} = state
64
+ ) do
65
+ msgs = Enum.map(msgs, &kafka_message_to_struct(&1))
66
+
67
+ Enum.each(msgs, fn msg ->
68
+ # Decode and process your message here
69
+ try do
70
+ parsed = Jason.decode!(msg.value)
71
+ # process parsed data...
72
+ rescue _ -> nil
73
+ end
74
+ end)
75
+
76
+ # Acknowledge messages and update Redis offset
77
+ for msg <- msgs do
78
+ key = "kafka_topic:#{@topic}:#{partition}"
79
+ Redis.PubSub.set(key, msg.offset)
80
+ :brod.consume_ack(consumer_pid, msg.offset)
81
+ end
82
+
83
+ {:noreply, state}
84
+ end
85
+
86
+ # Handle fetch errors
87
+ def handle_info({_pid, kafka_fetch_error()} = error, state) do
88
+ Logger.error("KAFKA: my_consumer fetch error #{inspect(error)}")
89
+ {:noreply, state}
90
+ end
91
+
92
+ # Fetch initially using the offset saved in Redis
93
+ def handle_info({:fetch_message}, %State{partition: partition} = state) do
94
+ host = System.get_env("KAFKA1_HOST")
95
+ port = System.get_env("KAFKA1_PORT") |> String.to_integer()
96
+ bootstrapEndpoints = [{host, port}]
97
+
98
+ key = "kafka_topic:#{@topic}:#{partition}"
99
+ {:ok, offset} = Redis.PubSub.get(key)
100
+ offset = Tools.to_int(offset)
101
+
102
+ with {:ok, batch} <- :brod.fetch(bootstrapEndpoints, @topic, partition, offset, %{}) do
103
+ {latest_offset, msgs} = batch
104
+
105
+ Enum.each(msgs, fn msg ->
106
+ msg = kafka_message_to_struct(msg)
107
+ # Often we just resend the message, or process it immediately.
108
+ # This behaves as a synchronization mechanism on start.
109
+ end)
110
+
111
+ Redis.PubSub.set(key, latest_offset)
112
+ end
113
+
114
+ {:noreply, state}
115
+ end
116
+
117
+ defp kafka_message_to_struct(kafka_message(offset: offset, key: key, value: value, ts: ts)) do
118
+ %KafkaMessage{
119
+ offset: offset,
120
+ key: key,
121
+ value: value,
122
+ ts: DateTime.from_unix!(ts, :millisecond)
123
+ }
124
+ end
125
+ end
126
+ ```
127
+
128
+ ## 2. Registering the Consumer
129
+
130
+ Consumer registration is done in `lib/kafka/kafka.ex` based on the hostname, to support distinct consumer groups and parallel partition processing.
131
+
132
+ 1. Ensure the `:brod` client has the client_id `:kafka_client`.
133
+ 2. Find the correct worker hostname conditions (e.g., `publish-consumer-01`, `publish-consumer-02`).
134
+ 3. Note how the *second argument* often denotes the Kafka **partition** index. For `publish-consumer-01`, it's usually `0`; for `publish-consumer-02`, it's `1`, etc.
135
+
136
+ ```elixir
137
+ "publish-consumer-01" ->
138
+ [
139
+ {KafkaProducer, {:kafka_client}},
140
+ {Kafka.QuestConsumer, {:kafka_client, 0}},
141
+ {Kafka.MyNewConsumer, {:kafka_client, 0}} # <--- Add your consumer for partition 0
142
+ ]
143
+
144
+ "publish-consumer-02" ->
145
+ [
146
+ {KafkaProducer, {:kafka_client}},
147
+ {Kafka.QuestConsumer, {:kafka_client, 1}},
148
+ {Kafka.MyNewConsumer, {:kafka_client, 1}} # <--- Add your consumer for partition 1
149
+ ]
150
+ ```
151
+
152
+ ## 3. Publishing Messages
153
+
154
+ All topics you wish to publish to must be registered in `@topics` in `lib/kafka/kafka_producer.ex`.
155
+
156
+ In `lib/kafka/kafka_producer.ex`:
157
+ ```elixir
158
+ @topics [
159
+ "store.cache.agg_products",
160
+ "store.queuing.questdb",
161
+ "my.new.kafka.topic" # <--- Register new topic
162
+ ]
163
+ ```
164
+
165
+ To publish a message, use `KafkaProducer.publish/4`:
166
+
167
+ ```elixir
168
+ topic = "my.new.kafka.topic"
169
+ partition = 0 # Generally, you need to manage partition distribution (e.g., hash the key)
170
+ key = "your_message_key"
171
+ message = Jason.encode!(%{hello: "world"})
172
+
173
+ # Send synchronously
174
+ :ok = KafkaProducer.publish(topic, partition, key, message)
175
+ ```
@@ -0,0 +1,93 @@
1
+ ---
2
+ description: Patterns for using MongoDB driver and dynamic collections in BuilderX API
3
+ ---
4
+
5
+ # BuilderX API MongoDB Skill
6
+
7
+ The `builderx_api` project integrates MongoDB via the `mongodb_driver` alongside its primary Postgres (Citus) database. This is used extensively for the *Dynamic Database Collections* feature in `BuilderxApi.DBCollections.DBCollections` (`lib/builderx_api/db_collections/db_collections.ex`).
8
+
9
+ In this pattern, metadata about the data models (schema) is stored in Postgres (`DBCollection`), but the actual records are physically stored in MongoDB (`MongoRepo`) using a single `records` table separated by `table_name` and `site_id`.
10
+
11
+ ## 1. Interacting with MongoDB Collections
12
+
13
+ You should generally not interact with `MongoRepo` directly unless you are inside the `builderx_api/db_collections/...` scope.
14
+
15
+ Instead, use `DBCollections`:
16
+
17
+ ### Checking if a record exists
18
+ ```elixir
19
+ filters = %{"slug" => "my-record"}
20
+
21
+ # conn must have assigns for customer, account, or is_check_record_creator as required
22
+ DBCollections.exists_record(table_name, filters, db_collection_struct, conn)
23
+ # => {:ok, true | false}
24
+ ```
25
+
26
+ ### Querying records
27
+ Retrieves customized results based on dynamic schemas.
28
+
29
+ ```elixir
30
+ select = %{"id" => 1, "name" => 1}
31
+ filters = %{"status" => "active"}
32
+ limit = 10
33
+ skip = 0
34
+ sort = %{"inserted_at" => -1} # Use 1 for ASC, -1 for DESC
35
+ populate = [] # Populate relations if any references are configured
36
+ params = %{"site_id" => "site_uuid"}
37
+
38
+ DBCollections.query_record(
39
+ table_name,
40
+ select,
41
+ filters,
42
+ sort,
43
+ limit,
44
+ skip,
45
+ populate,
46
+ params,
47
+ conn
48
+ )
49
+ # => List of normalized maps
50
+ ```
51
+
52
+ ### Inserting records
53
+ ```elixir
54
+ # attrs is a list of map: [%{"field_name" => "name", "field_value" => "Record 1"}]
55
+ # Note that we use a custom key format for dynamic mapping.
56
+
57
+ {:ok, inserted_record} = DBCollections.insert_record(table_name, attrs, params, conn)
58
+ ```
59
+
60
+ ## 2. Using `MongoRepo` directly
61
+
62
+ The `BuilderxApi.MongoRepo` is an abstraction over `:mongo` (the `mongodb_driver` pool).
63
+ For some administrative actions, it is called directly:
64
+
65
+ ```elixir
66
+ alias BuilderxApi.MongoRepo
67
+
68
+ table = "records"
69
+
70
+ # Find
71
+ records = MongoRepo.find(table, %{"site_id" => site_id, "table_name" => "users"})
72
+
73
+ # Find one
74
+ record = MongoRepo.find_one(table, %{"_id" => id})
75
+
76
+ # Update Many
77
+ MongoRepo.update_many(
78
+ table,
79
+ %{"site_id" => site_id, "table_name" => "users"},
80
+ %{"$unset" => %{"webcmscol_removed_field" => ""}}
81
+ )
82
+
83
+ # Insert Many
84
+ MongoRepo.insert_many(table, list_of_maps)
85
+
86
+ # Delete Many
87
+ MongoRepo.delete_many(table, %{"site_id" => site_id, "table_name" => "users"})
88
+ ```
89
+
90
+ ### Important Patterns
91
+ - `webcmscol_`: The system prepends `webcmscol_` to column names stored in MongoDB to prevent clashes with system variables like `_id`, `site_id`, `table_name`. You will see operations map/unmap this prefix (`DBUtils.sanitize_column_name/1`).
92
+ - **Caching Counts**: Because counting documents in Mongo can slowly become a bottleneck, the total document count per site collection is cached in Redis: `db_collection_records::{site_id}`.
93
+ - All MongoDB records share the `records` collection but are differentiated by standard root fields: `"site_id"` and `"table_name"`.
@@ -0,0 +1,169 @@
1
+ ---
2
+ description: Patterns for using RabbitMQ (AMQP), creating consumers/workers, and publishing messages in BuilderX API
3
+ ---
4
+
5
+ # BuilderX API RabbitMQ Skill
6
+
7
+ The `builderx_api` project uses the `amqp` package for interacting with RabbitMQ. All RabbitMQ operations orbit around the central `Rabbit` (`lib/rabbit/rabbit.ex`) module, which handles connection pooling, establishing connection channels automatically, and distributing work to supervisors.
8
+
9
+ ## 1. Creating a New Consumer (Worker)
10
+
11
+ When you need to create a new background worker to consume events from RabbitMQ, you should structure it following the existing consumer patterns (e.g., `OrderConsumer`, `IndexingConsumer`). This includes subscribing to a queue, setting prefetch count, configuring dead letter queues for errors, and optionally configuring wait queues for retry logic.
12
+
13
+ ### Example Structure (`lib/rabbit/my_new_consumer.ex`):
14
+
15
+ ```elixir
16
+ defmodule Rabbit.MyNewConsumer do
17
+ require Logger
18
+ use GenServer
19
+ use AMQP
20
+
21
+ alias BuilderxApi.Tools
22
+ alias Rabbit
23
+ alias Worker.MainWorker
24
+
25
+ @queue_base "my_new_queue_name"
26
+ @storecake_v2_exchange "storecake_v2_ex"
27
+ @storecake_v2_exchange_deadletter "storecake_v2_ex_deadletter"
28
+ @sync_queue_error "my_new_queue_error"
29
+ @prefetch_count 20
30
+
31
+ # Client API
32
+ def start_link() do
33
+ GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
34
+ end
35
+
36
+ def child_spec(_args) do
37
+ %{
38
+ id: __MODULE__,
39
+ start: {__MODULE__, :start_link, []}
40
+ }
41
+ end
42
+
43
+ def channel_available(chan) do
44
+ GenServer.cast(__MODULE__, {:channel_available, chan})
45
+ end
46
+
47
+ def consumer_tag() do
48
+ {:ok, hostname} = :inet.gethostname()
49
+ "#{hostname}-my-new-consumer"
50
+ end
51
+
52
+ # Server Callbacks
53
+ def init(:ok) do
54
+ # Request a channel once the gen server is initialized
55
+ Rabbit.request_channel(__MODULE__)
56
+ {:ok, nil}
57
+ end
58
+
59
+ def publish(payload) do
60
+ GenServer.cast(__MODULE__, {:publish, payload})
61
+ end
62
+
63
+ def handle_cast({:publish, payload}, channel) do
64
+ # When publishing directly through this consumer's channel
65
+ message = Jason.decode!(payload)
66
+ queue = get_queue(message)
67
+
68
+ AMQP.Basic.publish(channel, @storecake_v2_exchange, queue, payload, persistent: true)
69
+
70
+ {:noreply, channel}
71
+ end
72
+
73
+ def handle_cast({:channel_available, channel}, _state) do
74
+ Logger.info("CHANNEL_AVAILABLE FOR MY NEW CONSUMER")
75
+
76
+ Basic.qos(channel, prefetch_count: @prefetch_count)
77
+
78
+ Queue.declare(channel, @queue_base,
79
+ durable: true,
80
+ arguments: [
81
+ {"x-dead-letter-exchange", :longstr, @storecake_v2_exchange_deadletter},
82
+ {"x-dead-letter-routing-key", :longstr, @sync_queue_error}
83
+ ]
84
+ )
85
+
86
+ # Note: If implementing retries with delayed messages, declare wait queues here
87
+ # and bind them as in OrderConsumer.ex.
88
+
89
+ Queue.bind(channel, @queue_base, @storecake_v2_exchange, routing_key: @queue_base)
90
+ Queue.bind(channel, @sync_queue_error, @storecake_v2_exchange, routing_key: @sync_queue_error)
91
+
92
+ {:ok, _consumer_tag} = Basic.consume(channel, @queue_base, self(), consumer_tag: consumer_tag())
93
+
94
+ {:noreply, channel}
95
+ end
96
+
97
+ # Basic AMQP handlers...
98
+ def handle_info({:basic_consume_ok, %{consumer_tag: _consumer_tag}}, chan), do: {:noreply, chan}
99
+ def handle_info({:basic_cancel, %{consumer_tag: _consumer_tag}}, chan), do: {:stop, :normal, chan}
100
+ def handle_info({:basic_cancel_ok, %{consumer_tag: _consumer_tag}}, chan), do: {:noreply, chan}
101
+
102
+ def handle_info({:basic_deliver, payload, %{delivery_tag: tag, redelivered: redelivered}}, chan) do
103
+ spawn(fn -> consume(chan, tag, redelivered, payload) end)
104
+ {:noreply, chan}
105
+ end
106
+
107
+ def consume(chan, tag, _redelivered, payload) do
108
+ try do
109
+ # Pass data to your worker implementation
110
+ MainWorker.assign(Jason.decode!(payload))
111
+ rescue
112
+ e -> on_error(payload, e, __STACKTRACE__)
113
+ after
114
+ AMQP.Basic.ack(chan, tag)
115
+ end
116
+ end
117
+
118
+ def on_error(message, exception, stacktrace \\ []) do
119
+ Logger.error("Error consuming message: #{inspect(exception)}")
120
+ # Trigger retry logic if applicable using RabbitMq delayed messages
121
+ end
122
+
123
+ def get_queue(_payload) do
124
+ @queue_base
125
+ end
126
+ end
127
+ ```
128
+
129
+ ## 2. Registering the Worker
130
+
131
+ After creating the worker, **you MUST register it in the `Rabbit` supervisor** (`lib/rabbit/rabbit.ex`), otherwise it will not start and not consume any queues.
132
+
133
+ Open `lib/rabbit/rabbit.ex` and:
134
+
135
+ 1. Alias your new consumer at the top:
136
+ ```elixir
137
+ alias Rabbit.{
138
+ ProductConsumer,
139
+ OrderConsumer,
140
+ # ...
141
+ MyNewConsumer
142
+ }
143
+ ```
144
+ 2. In the `init(:ok)` function, append it to the `children` list for the specific worker hostnames (e.g., `store-worker-01`, `store-worker-02`).
145
+
146
+ ```elixir
147
+ "store-worker-01" ->
148
+ [
149
+ {ProductConsumer, []},
150
+ {OrderConsumer, []},
151
+ # ... existing consumers ...
152
+ {MyNewConsumer, []} # <------ Add here
153
+ ]
154
+ ```
155
+
156
+ ## 3. Publishing Messages
157
+
158
+ To publish a message from any location in the application without needing a specific consumer channel or `handle_cast` call, use the general `Rabbit.publish_message/2` helper provided in `Rabbit`:
159
+
160
+ ```elixir
161
+ message_payload = %WorkerMessage{
162
+ action: "sync_something_new",
163
+ turn: 0,
164
+ data: %{id: 123, status: "pending"}
165
+ }
166
+
167
+ # The queue name should match a binding routing key in your consumer setup
168
+ Rabbit.publish_message(message_payload, "my_new_queue_name")
169
+ ```
@@ -0,0 +1,93 @@
1
+ ---
2
+ description: Patterns for using Redis caching, PubSub, and Poolboy in BuilderX API
3
+ ---
4
+
5
+ # BuilderX API Redis Skill
6
+
7
+ The `builderx_api` project uses the `redix` library combined with Erlang's `:poolboy` for connection pooling. Standard usages revolve entirely around the `Redis.PubSub` module (`lib/redis/redis_pubsub.ex`).
8
+
9
+ ## 1. General Redis Commands
10
+
11
+ The `Redis.PubSub` module exposes wrapper functions for common Redis operations. Under the hood, they use `Redix.command/2` within a `:poolboy.transaction/2` call targeting the `:redis_poolex` pool.
12
+
13
+ ### Keys and Strings
14
+ ```elixir
15
+ # GET a key
16
+ {:ok, value} = Redis.PubSub.get("my_key")
17
+
18
+ # SET a key
19
+ {:ok, "OK"} = Redis.PubSub.set("my_key", "value")
20
+
21
+ # SET a key with expiration (in seconds)
22
+ {:ok, "OK"} = Redis.PubSub.set("my_key", "value", 3600)
23
+
24
+ # Delete keys
25
+ {:ok, deleted_count} = Redis.PubSub.del("my_key")
26
+ {:ok, deleted_count} = Redis.PubSub.del(["key1", "key2"])
27
+
28
+ # Expire an existing key
29
+ {:ok, 1} = Redis.PubSub.expire("my_key", 60)
30
+ ```
31
+
32
+ ### Counters
33
+ ```elixir
34
+ # Increment by 1
35
+ {:ok, new_val} = Redis.PubSub.incr("visits")
36
+
37
+ # Increment by N
38
+ {:ok, new_val} = Redis.PubSub.incr("visits", 5)
39
+ ```
40
+
41
+ ### Hash Maps
42
+ ```elixir
43
+ # Increment a field inside a hash
44
+ {:ok, new_val} = Redis.PubSub.hincrby("user:123", "login_count", 1)
45
+
46
+ # Get a field from a hash
47
+ {:ok, value} = Redis.PubSub.hget("user:123", "name")
48
+
49
+ # Get entire hash
50
+ {:ok, list_of_pairs} = Redis.PubSub.hgetall("user:123")
51
+ ```
52
+
53
+ ### Sets
54
+ ```elixir
55
+ # Add to Set
56
+ {:ok, added_count} = Redis.PubSub.sadd("my_set", "item1")
57
+ {:ok, added_count} = Redis.PubSub.sadd("my_set", ["item2", "item3"])
58
+
59
+ # Remove from Set
60
+ {:ok, removed_count} = Redis.PubSub.srem("my_set", "item1")
61
+ {:ok, removed_count} = Redis.PubSub.srem("my_set", ["item2", "item3"])
62
+
63
+ # Get all members
64
+ {:ok, members} = Redis.PubSub.smembers("my_set")
65
+ ```
66
+
67
+ ## 2. Transactions
68
+
69
+ You can execute Redis commands transactionally via `MULTI` and `EXEC` using the wrappers:
70
+
71
+ ```elixir
72
+ Redis.PubSub.transaction() # Sends MULTI
73
+ Redis.PubSub.set("key1", "val1")
74
+ Redis.PubSub.set("key2", "val2")
75
+ Redis.PubSub.commit() # Sends EXEC
76
+ ```
77
+
78
+ *Note: Since these execute under standard HTTP pools without locking the connection strictly contextually in the Redix API wrappers mapped here, be cautious when using Redis transactions via standard global pool dispatch; ensure your pipeline design handles concurrency correctly.*
79
+
80
+ ## 3. PubSub Features
81
+
82
+ Through `:redis_pubsub` pool, builderx_api can support pub-sub channels.
83
+
84
+ ```elixir
85
+ # To subscribe the current process to a channel
86
+ Redis.PubSub.subscribe("chat_room_1", self())
87
+
88
+ # To unsubscribe
89
+ Redis.PubSub.unsubscribe("chat_room_1", self())
90
+
91
+ # To publish to a channel
92
+ {:ok, subscribers_received} = Redis.PubSub.publish("chat_room_1", "Hello World!")
93
+ ```