@vira-ui/cli 0.3.2-alpha → 0.4.0-alpha
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/go/appYaml.js +34 -0
- package/dist/go/backendEnvExample.js +21 -0
- package/dist/go/backendReadme.js +18 -0
- package/dist/go/channelHelpers.js +29 -0
- package/dist/go/configGo.js +262 -0
- package/dist/go/dbGo.js +47 -0
- package/dist/go/dbYaml.js +11 -0
- package/dist/go/dockerCompose.js +38 -0
- package/dist/go/dockerComposeProd.js +54 -0
- package/dist/go/dockerfile.js +19 -0
- package/dist/go/eventHandlerTemplate.js +34 -0
- package/dist/go/eventsAPI.js +414 -0
- package/dist/go/goMod.js +20 -0
- package/dist/go/kafkaGo.js +71 -0
- package/dist/go/kafkaYaml.js +10 -0
- package/dist/go/kanbanHandlers.js +221 -0
- package/dist/go/mainGo.js +527 -0
- package/dist/go/readme.js +14 -0
- package/dist/go/redisGo.js +35 -0
- package/dist/go/redisYaml.js +8 -0
- package/dist/go/registryGo.js +47 -0
- package/dist/go/sqlcYaml.js +17 -0
- package/dist/go/stateStore.js +119 -0
- package/dist/go/typesGo.js +15 -0
- package/dist/go/useViraState.js +160 -0
- package/dist/go/useViraStream.js +167 -0
- package/dist/index.js +608 -200
- package/dist/react/appTsx.js +52 -0
- package/dist/react/envExample.js +7 -0
- package/dist/react/envLocal.js +5 -0
- package/dist/react/indexCss.js +22 -0
- package/dist/react/indexHtml.js +16 -0
- package/dist/react/kanbanAppTsx.js +34 -0
- package/dist/react/kanbanBoard.js +63 -0
- package/dist/react/kanbanCard.js +65 -0
- package/dist/react/kanbanColumn.js +67 -0
- package/dist/react/kanbanModels.js +37 -0
- package/dist/react/kanbanService.js +119 -0
- package/dist/react/mainTsx.js +16 -0
- package/dist/react/tsconfig.js +25 -0
- package/dist/react/viteConfig.js +31 -0
- package/package.json +3 -4
|
@@ -0,0 +1,527 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.mainGo = void 0;
|
|
4
|
+
exports.mainGo = `package main
|
|
5
|
+
|
|
6
|
+
import (
|
|
7
|
+
"context"
|
|
8
|
+
"encoding/json"
|
|
9
|
+
"fmt"
|
|
10
|
+
"net/http"
|
|
11
|
+
"os/signal"
|
|
12
|
+
"strconv"
|
|
13
|
+
"strings"
|
|
14
|
+
"sync"
|
|
15
|
+
"syscall"
|
|
16
|
+
"time"
|
|
17
|
+
|
|
18
|
+
"github.com/go-chi/chi/v5"
|
|
19
|
+
"github.com/go-chi/chi/v5/middleware"
|
|
20
|
+
"github.com/go-chi/cors"
|
|
21
|
+
"github.com/gorilla/websocket"
|
|
22
|
+
"github.com/google/uuid"
|
|
23
|
+
"github.com/rs/zerolog"
|
|
24
|
+
"github.com/rs/zerolog/log"
|
|
25
|
+
|
|
26
|
+
"vira-engine-backend/internal/cache"
|
|
27
|
+
"vira-engine-backend/internal/config"
|
|
28
|
+
"vira-engine-backend/internal/db"
|
|
29
|
+
"vira-engine-backend/internal/events"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
type ctxKey string
|
|
33
|
+
|
|
34
|
+
const (
|
|
35
|
+
ctxReqID ctxKey = "reqID"
|
|
36
|
+
ctxUserID ctxKey = "userID"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
type healthResponse struct {
|
|
40
|
+
Status string \`json:"status"\`
|
|
41
|
+
Time string \`json:"time"\`
|
|
42
|
+
DB string \`json:"db"\`
|
|
43
|
+
Redis string \`json:"redis"\`
|
|
44
|
+
Kafka string \`json:"kafka"\`
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// wsHub wraps events.Hub with WebSocket connection management.
|
|
48
|
+
type wsHub struct {
|
|
49
|
+
*events.Hub
|
|
50
|
+
mu sync.Mutex
|
|
51
|
+
clients map[*websocket.Conn]bool
|
|
52
|
+
subs map[string]map[*websocket.Conn]bool
|
|
53
|
+
sessions map[*websocket.Conn]string
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
func newWSHub(eventHub *events.Hub) *wsHub {
|
|
57
|
+
return &wsHub{
|
|
58
|
+
Hub: eventHub,
|
|
59
|
+
clients: make(map[*websocket.Conn]bool),
|
|
60
|
+
subs: make(map[string]map[*websocket.Conn]bool),
|
|
61
|
+
sessions: make(map[*websocket.Conn]string),
|
|
62
|
+
sessionSubs: make(map[string][]string),
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
func (h *wsHub) add(c *websocket.Conn) {
|
|
67
|
+
h.mu.Lock()
|
|
68
|
+
h.clients[c] = true
|
|
69
|
+
h.mu.Unlock()
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
func (h *wsHub) remove(c *websocket.Conn) {
|
|
73
|
+
h.mu.Lock()
|
|
74
|
+
delete(h.clients, c)
|
|
75
|
+
for ch, set := range h.subs {
|
|
76
|
+
delete(set, c)
|
|
77
|
+
if len(set) == 0 {
|
|
78
|
+
delete(h.subs, ch)
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
delete(h.sessions, c)
|
|
82
|
+
h.mu.Unlock()
|
|
83
|
+
_ = c.Close()
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
func (h *wsHub) setSession(c *websocket.Conn, session string) {
|
|
87
|
+
h.mu.Lock()
|
|
88
|
+
h.sessions[c] = session
|
|
89
|
+
// Restore previous subscriptions if session exists
|
|
90
|
+
if subs, ok := h.sessionSubs[session]; ok {
|
|
91
|
+
for _, ch := range subs {
|
|
92
|
+
if _, exists := h.subs[ch]; !exists {
|
|
93
|
+
h.subs[ch] = make(map[*websocket.Conn]bool)
|
|
94
|
+
}
|
|
95
|
+
h.subs[ch][c] = true
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
h.mu.Unlock()
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
func (h *wsHub) subscribe(c *websocket.Conn, channel string) {
|
|
102
|
+
h.mu.Lock()
|
|
103
|
+
if _, ok := h.subs[channel]; !ok {
|
|
104
|
+
h.subs[channel] = make(map[*websocket.Conn]bool)
|
|
105
|
+
}
|
|
106
|
+
h.subs[channel][c] = true
|
|
107
|
+
session := h.sessions[c]
|
|
108
|
+
if session != "" {
|
|
109
|
+
// Persist subscription for session
|
|
110
|
+
subs := h.sessionSubs[session]
|
|
111
|
+
found := false
|
|
112
|
+
for _, ch := range subs {
|
|
113
|
+
if ch == channel {
|
|
114
|
+
found = true
|
|
115
|
+
break
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
if !found {
|
|
119
|
+
h.sessionSubs[session] = append(subs, channel)
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
h.mu.Unlock()
|
|
123
|
+
|
|
124
|
+
// Send snapshot if available
|
|
125
|
+
if snap, version, ok := h.Snapshot(channel); ok {
|
|
126
|
+
msg := events.WSMessage{
|
|
127
|
+
Type: "update",
|
|
128
|
+
Channel: channel,
|
|
129
|
+
Data: snap,
|
|
130
|
+
VersionNo: version,
|
|
131
|
+
Ts: time.Now().UnixMilli(),
|
|
132
|
+
}
|
|
133
|
+
raw, _ := json.Marshal(msg)
|
|
134
|
+
_ = c.WriteMessage(websocket.TextMessage, raw)
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
func (h *wsHub) unsubscribe(c *websocket.Conn, channel string) {
|
|
139
|
+
h.mu.Lock()
|
|
140
|
+
if set, ok := h.subs[channel]; ok {
|
|
141
|
+
delete(set, c)
|
|
142
|
+
if len(set) == 0 {
|
|
143
|
+
delete(h.subs, channel)
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
h.mu.Unlock()
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
func (h *wsHub) broadcast(channel string, raw json.RawMessage) {
|
|
150
|
+
h.mu.Lock()
|
|
151
|
+
targets := h.subs[channel]
|
|
152
|
+
h.mu.Unlock()
|
|
153
|
+
|
|
154
|
+
if len(targets) == 0 {
|
|
155
|
+
return
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
for c := range targets {
|
|
159
|
+
if err := c.WriteMessage(websocket.TextMessage, raw); err != nil {
|
|
160
|
+
_ = c.Close()
|
|
161
|
+
h.remove(c)
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
func handleDemoEvent(ctx context.Context, hub events.EventEmitter, conn *websocket.Conn, msg events.WSMessage) {
|
|
167
|
+
var payload map[string]any
|
|
168
|
+
if len(msg.Data) > 0 {
|
|
169
|
+
_ = json.Unmarshal(msg.Data, &payload)
|
|
170
|
+
}
|
|
171
|
+
ch := "demo"
|
|
172
|
+
if v, ok := payload["channel"].(string); ok && v != "" {
|
|
173
|
+
ch = v
|
|
174
|
+
}
|
|
175
|
+
hub.Update(ch, payload)
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
func init() {
|
|
179
|
+
events.Register("demo.echo", handleDemoEvent)
|
|
180
|
+
// Kanban handlers are auto-registered via registry_kanban.go (generated by CLI)
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
var upgrader = websocket.Upgrader{
|
|
184
|
+
CheckOrigin: func(r *http.Request) bool { return true },
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
func main() {
|
|
188
|
+
cfg := config.Load("config/app.yaml")
|
|
189
|
+
logger := config.NewLogger(cfg)
|
|
190
|
+
eventHub := events.NewHub()
|
|
191
|
+
hub := newWSHub(eventHub)
|
|
192
|
+
// Connect Hub's broadcast to wsHub's broadcast
|
|
193
|
+
events.SetBroadcaster(hub.broadcast)
|
|
194
|
+
// Apply state settings
|
|
195
|
+
if strings.ToLower(cfg.State.DiffMode) == "patch" {
|
|
196
|
+
eventHub.SetDiffMode(events.DiffModePatch)
|
|
197
|
+
}
|
|
198
|
+
if cfg.State.MaxHistory > 0 {
|
|
199
|
+
eventHub.SetHistoryLimit(cfg.State.MaxHistory)
|
|
200
|
+
}
|
|
201
|
+
eventHub.ApplyRegistry() // Apply auto-registered handlers
|
|
202
|
+
|
|
203
|
+
r := chi.NewRouter()
|
|
204
|
+
r.Use(middleware.RequestID)
|
|
205
|
+
r.Use(middleware.RealIP)
|
|
206
|
+
r.Use(middleware.Recoverer)
|
|
207
|
+
r.Use(cors.Handler(cors.Options{
|
|
208
|
+
AllowedOrigins: []string{"*"},
|
|
209
|
+
AllowedMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"},
|
|
210
|
+
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-User-ID", "X-Requested-With"},
|
|
211
|
+
ExposedHeaders: []string{"Link"},
|
|
212
|
+
AllowCredentials: false,
|
|
213
|
+
MaxAge: 300,
|
|
214
|
+
}))
|
|
215
|
+
r.Use(requestContext(logger))
|
|
216
|
+
r.Use(httpLogger(logger))
|
|
217
|
+
|
|
218
|
+
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
|
219
|
+
defer stop()
|
|
220
|
+
|
|
221
|
+
pool, err := db.NewPool(ctx, cfg, logger)
|
|
222
|
+
if err != nil {
|
|
223
|
+
logger.Fatal().Err(err).Msg("failed to init db pool")
|
|
224
|
+
}
|
|
225
|
+
defer pool.Close()
|
|
226
|
+
|
|
227
|
+
redisClient, err := cache.NewRedisClient(ctx, cfg, logger)
|
|
228
|
+
if err != nil {
|
|
229
|
+
logger.Fatal().Err(err).Msg("failed to init redis client")
|
|
230
|
+
}
|
|
231
|
+
defer redisClient.Close()
|
|
232
|
+
|
|
233
|
+
// Configure persistence
|
|
234
|
+
if strings.ToLower(cfg.State.Persist) == "redis" {
|
|
235
|
+
eventHub.SetStore(events.NewRedisStore(redisClient))
|
|
236
|
+
logger.Info().Msg("state persistence enabled: redis")
|
|
237
|
+
}
|
|
238
|
+
if cfg.State.TTLSec > 0 {
|
|
239
|
+
eventHub.SetTTL(cfg.State.TTLSec)
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
kafkaClient, err := events.NewKafka(cfg, logger)
|
|
243
|
+
if err != nil {
|
|
244
|
+
logger.Fatal().Err(err).Msg("failed to init kafka client")
|
|
245
|
+
}
|
|
246
|
+
defer kafkaClient.Close()
|
|
247
|
+
|
|
248
|
+
r.Get("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
|
249
|
+
dbStatus := "ok"
|
|
250
|
+
if err := pool.Ping(r.Context()); err != nil {
|
|
251
|
+
dbStatus = fmt.Sprintf("error: %v", err)
|
|
252
|
+
}
|
|
253
|
+
redisStatus := "ok"
|
|
254
|
+
if _, err := redisClient.Ping(r.Context()).Result(); err != nil {
|
|
255
|
+
redisStatus = fmt.Sprintf("error: %v", err)
|
|
256
|
+
}
|
|
257
|
+
kafkaStatus := "ok"
|
|
258
|
+
if err := kafkaClient.Ping(r.Context()); err != nil {
|
|
259
|
+
kafkaStatus = fmt.Sprintf("error: %v", err)
|
|
260
|
+
}
|
|
261
|
+
respondJSON(w, http.StatusOK, healthResponse{
|
|
262
|
+
Status: "ok",
|
|
263
|
+
Time: time.Now().UTC().Format(time.RFC3339),
|
|
264
|
+
DB: dbStatus,
|
|
265
|
+
Redis: redisStatus,
|
|
266
|
+
Kafka: kafkaStatus,
|
|
267
|
+
})
|
|
268
|
+
})
|
|
269
|
+
|
|
270
|
+
// Debug: current state snapshot
|
|
271
|
+
r.Get("/api/state/{channel}", func(w http.ResponseWriter, r *http.Request) {
|
|
272
|
+
ch := chi.URLParam(r, "channel")
|
|
273
|
+
if ch == "" {
|
|
274
|
+
respondJSON(w, http.StatusBadRequest, map[string]string{"error": "channel required"})
|
|
275
|
+
return
|
|
276
|
+
}
|
|
277
|
+
snap, ver, ok := eventHub.Snapshot(ch)
|
|
278
|
+
if !ok {
|
|
279
|
+
respondJSON(w, http.StatusNotFound, map[string]string{"error": "not found"})
|
|
280
|
+
return
|
|
281
|
+
}
|
|
282
|
+
respondJSON(w, http.StatusOK, map[string]any{
|
|
283
|
+
"channel": ch,
|
|
284
|
+
"versionNo": ver,
|
|
285
|
+
"data": json.RawMessage(snap),
|
|
286
|
+
})
|
|
287
|
+
})
|
|
288
|
+
|
|
289
|
+
// Debug: replay from version
|
|
290
|
+
r.Get("/api/replay/{channel}", func(w http.ResponseWriter, r *http.Request) {
|
|
291
|
+
ch := chi.URLParam(r, "channel")
|
|
292
|
+
if ch == "" {
|
|
293
|
+
respondJSON(w, http.StatusBadRequest, map[string]string{"error": "channel required"})
|
|
294
|
+
return
|
|
295
|
+
}
|
|
296
|
+
fromStr := r.URL.Query().Get("from")
|
|
297
|
+
var from int64 = 0
|
|
298
|
+
if fromStr != "" {
|
|
299
|
+
if v, err := strconv.ParseInt(fromStr, 10, 64); err == nil {
|
|
300
|
+
from = v
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
snapshots := eventHub.Replay(ch, from)
|
|
304
|
+
respondJSON(w, http.StatusOK, map[string]any{
|
|
305
|
+
"channel": ch,
|
|
306
|
+
"from": from,
|
|
307
|
+
"snapshots": snapshots,
|
|
308
|
+
})
|
|
309
|
+
})
|
|
310
|
+
|
|
311
|
+
r.Get("/ws", func(w http.ResponseWriter, r *http.Request) {
|
|
312
|
+
conn, err := upgrader.Upgrade(w, r, nil)
|
|
313
|
+
if err != nil {
|
|
314
|
+
logger.Error().Err(err).Msg("upgrade ws")
|
|
315
|
+
return
|
|
316
|
+
}
|
|
317
|
+
hub.add(conn)
|
|
318
|
+
|
|
319
|
+
go func(c *websocket.Conn) {
|
|
320
|
+
pingInterval := 15 * time.Second
|
|
321
|
+
ping := time.NewTicker(pingInterval)
|
|
322
|
+
defer ping.Stop()
|
|
323
|
+
defer hub.remove(c)
|
|
324
|
+
for {
|
|
325
|
+
select {
|
|
326
|
+
case <-ping.C:
|
|
327
|
+
_ = c.WriteMessage(websocket.TextMessage, []byte(fmt.Sprintf("{\"type\":\"ping\",\"ts\":%d}", time.Now().UnixMilli())))
|
|
328
|
+
default:
|
|
329
|
+
_, msg, err := c.ReadMessage()
|
|
330
|
+
if err != nil {
|
|
331
|
+
return
|
|
332
|
+
}
|
|
333
|
+
var m events.WSMessage
|
|
334
|
+
if err := json.Unmarshal(msg, &m); err != nil {
|
|
335
|
+
logger.Warn().Err(err).Msg("ws decode")
|
|
336
|
+
continue
|
|
337
|
+
}
|
|
338
|
+
switch m.Type {
|
|
339
|
+
case "sub":
|
|
340
|
+
for _, ch := range m.Channels {
|
|
341
|
+
if ch == "" {
|
|
342
|
+
continue
|
|
343
|
+
}
|
|
344
|
+
hub.subscribe(c, ch)
|
|
345
|
+
}
|
|
346
|
+
ack, _ := json.Marshal(events.WSMessage{Type: "sub_ack", Channels: m.Channels})
|
|
347
|
+
_ = c.WriteMessage(websocket.TextMessage, ack)
|
|
348
|
+
case "unsub":
|
|
349
|
+
for _, ch := range m.Channels {
|
|
350
|
+
if ch == "" {
|
|
351
|
+
continue
|
|
352
|
+
}
|
|
353
|
+
hub.unsubscribe(c, ch)
|
|
354
|
+
}
|
|
355
|
+
ack, _ := json.Marshal(events.WSMessage{Type: "unsub_ack", Channels: m.Channels})
|
|
356
|
+
_ = c.WriteMessage(websocket.TextMessage, ack)
|
|
357
|
+
case "handshake":
|
|
358
|
+
// Validate protocol version
|
|
359
|
+
if m.Version != "" && m.Version != events.ProtocolVersion() {
|
|
360
|
+
errMsg, _ := json.Marshal(events.WSMessage{
|
|
361
|
+
Type: "error",
|
|
362
|
+
Code: "version_mismatch",
|
|
363
|
+
Message: fmt.Sprintf("Protocol version mismatch: client=%s, server=%s", m.Version, events.ProtocolVersion()),
|
|
364
|
+
Retry: false,
|
|
365
|
+
})
|
|
366
|
+
_ = c.WriteMessage(websocket.TextMessage, errMsg)
|
|
367
|
+
c.Close()
|
|
368
|
+
return
|
|
369
|
+
}
|
|
370
|
+
// Auth token check
|
|
371
|
+
if cfg.Auth.Token != "" && m.Auth != cfg.Auth.Token {
|
|
372
|
+
errMsg, _ := json.Marshal(events.WSMessage{
|
|
373
|
+
Type: "error",
|
|
374
|
+
Code: "unauthorized",
|
|
375
|
+
Message: "Invalid auth token",
|
|
376
|
+
Retry: false,
|
|
377
|
+
})
|
|
378
|
+
_ = c.WriteMessage(websocket.TextMessage, errMsg)
|
|
379
|
+
c.Close()
|
|
380
|
+
return
|
|
381
|
+
}
|
|
382
|
+
session := m.Session
|
|
383
|
+
if session == "" {
|
|
384
|
+
session = uuid.NewString()
|
|
385
|
+
}
|
|
386
|
+
hub.setSession(c, session)
|
|
387
|
+
ack, _ := json.Marshal(events.WSMessage{
|
|
388
|
+
Type: "ack",
|
|
389
|
+
Session: session,
|
|
390
|
+
Interval: pingInterval.Milliseconds(),
|
|
391
|
+
Version: events.ProtocolVersion(),
|
|
392
|
+
Ts: time.Now().UnixMilli(),
|
|
393
|
+
})
|
|
394
|
+
_ = c.WriteMessage(websocket.TextMessage, ack)
|
|
395
|
+
case "event":
|
|
396
|
+
if m.Name != "" {
|
|
397
|
+
// Check idempotency (msgId dedup)
|
|
398
|
+
if m.MsgID != "" && eventHub.CheckMsgID(m.MsgID) {
|
|
399
|
+
// Duplicate message, ignore
|
|
400
|
+
continue
|
|
401
|
+
}
|
|
402
|
+
if handler, ok := eventHub.Get(m.Name); ok {
|
|
403
|
+
handler(context.Background(), eventHub, c, m)
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
case "update":
|
|
407
|
+
if m.Channel != "" {
|
|
408
|
+
// Check idempotency
|
|
409
|
+
if m.MsgID != "" && eventHub.CheckMsgID(m.MsgID) {
|
|
410
|
+
continue
|
|
411
|
+
}
|
|
412
|
+
var payload any
|
|
413
|
+
if len(m.Data) > 0 {
|
|
414
|
+
_ = json.Unmarshal(m.Data, &payload)
|
|
415
|
+
}
|
|
416
|
+
eventHub.Update(m.Channel, payload)
|
|
417
|
+
}
|
|
418
|
+
case "diff":
|
|
419
|
+
if m.Channel != "" {
|
|
420
|
+
// Check idempotency
|
|
421
|
+
if m.MsgID != "" && eventHub.CheckMsgID(m.MsgID) {
|
|
422
|
+
continue
|
|
423
|
+
}
|
|
424
|
+
var patch any
|
|
425
|
+
if len(m.Patch) > 0 {
|
|
426
|
+
_ = json.Unmarshal(m.Patch, &patch)
|
|
427
|
+
}
|
|
428
|
+
eventHub.Diff(m.Channel, patch)
|
|
429
|
+
}
|
|
430
|
+
case "ping":
|
|
431
|
+
pong, _ := json.Marshal(events.WSMessage{Type: "pong", Ts: time.Now().UnixMilli()})
|
|
432
|
+
_ = c.WriteMessage(websocket.TextMessage, pong)
|
|
433
|
+
default:
|
|
434
|
+
// ignore unknown
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
}(conn)
|
|
439
|
+
})
|
|
440
|
+
|
|
441
|
+
r.Post("/api/demo", func(w http.ResponseWriter, r *http.Request) {
|
|
442
|
+
var payload map[string]any
|
|
443
|
+
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
|
444
|
+
respondJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid json"})
|
|
445
|
+
return
|
|
446
|
+
}
|
|
447
|
+
ch := "demo"
|
|
448
|
+
if v, ok := payload["channel"].(string); ok && v != "" {
|
|
449
|
+
ch = v
|
|
450
|
+
}
|
|
451
|
+
eventHub.Emit(ch, payload)
|
|
452
|
+
respondJSON(w, http.StatusOK, map[string]string{"status": "ok"})
|
|
453
|
+
})
|
|
454
|
+
|
|
455
|
+
srv := &http.Server{
|
|
456
|
+
Addr: httpAddr(cfg.HTTP.Port),
|
|
457
|
+
Handler: r,
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
logger.Info().Str("addr", srv.Addr).Msg("Vira Engine stub API listening")
|
|
461
|
+
go func() {
|
|
462
|
+
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
|
463
|
+
logger.Fatal().Err(err).Msg("server error")
|
|
464
|
+
}
|
|
465
|
+
}()
|
|
466
|
+
|
|
467
|
+
<-ctx.Done()
|
|
468
|
+
logger.Info().Msg("shutting down...")
|
|
469
|
+
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
470
|
+
defer cancel()
|
|
471
|
+
if err := srv.Shutdown(shutdownCtx); err != nil {
|
|
472
|
+
logger.Fatal().Err(err).Msg("server shutdown error")
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
func requestContext(logger zerolog.Logger) func(next http.Handler) http.Handler {
|
|
477
|
+
return func(next http.Handler) http.Handler {
|
|
478
|
+
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
479
|
+
reqID := middleware.GetReqID(r.Context())
|
|
480
|
+
userID := r.Header.Get("X-User-ID")
|
|
481
|
+
|
|
482
|
+
ctx := context.WithValue(r.Context(), ctxReqID, reqID)
|
|
483
|
+
if userID != "" {
|
|
484
|
+
ctx = context.WithValue(ctx, ctxUserID, userID)
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
next.ServeHTTP(w, r.WithContext(ctx))
|
|
488
|
+
})
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
func httpLogger(logger zerolog.Logger) func(next http.Handler) http.Handler {
|
|
493
|
+
return func(next http.Handler) http.Handler {
|
|
494
|
+
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
495
|
+
started := time.Now()
|
|
496
|
+
ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)
|
|
497
|
+
|
|
498
|
+
next.ServeHTTP(ww, r)
|
|
499
|
+
|
|
500
|
+
evt := logger.Info()
|
|
501
|
+
if reqID, ok := r.Context().Value(ctxReqID).(string); ok && reqID != "" {
|
|
502
|
+
evt = evt.Str("reqID", reqID)
|
|
503
|
+
}
|
|
504
|
+
if userID, ok := r.Context().Value(ctxUserID).(string); ok && userID != "" {
|
|
505
|
+
evt = evt.Str("userID", userID)
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
evt.
|
|
509
|
+
Str("method", r.Method).
|
|
510
|
+
Str("path", r.URL.Path).
|
|
511
|
+
Int("status", ww.Status()).
|
|
512
|
+
Dur("latency", time.Since(started)).
|
|
513
|
+
Msg("http_request")
|
|
514
|
+
})
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
func respondJSON(w http.ResponseWriter, status int, payload any) {
|
|
519
|
+
w.Header().Set("Content-Type", "application/json")
|
|
520
|
+
w.WriteHeader(status)
|
|
521
|
+
_ = json.NewEncoder(w).Encode(payload)
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
func httpAddr(port int) string {
|
|
525
|
+
return ":" + strconv.Itoa(port)
|
|
526
|
+
}
|
|
527
|
+
`;
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.readme = void 0;
|
|
4
|
+
exports.readme = `# Vira Engine Monorepo (scaffold)
|
|
5
|
+
|
|
6
|
+
Структура:
|
|
7
|
+
- frontend/ — Vite + Vira UI приложение
|
|
8
|
+
- backend/ — Go API (стаб)
|
|
9
|
+
- ui/ — Vira UI пакет/шоукейсы (vite)
|
|
10
|
+
- cli/ — CLI расширения/плагины
|
|
11
|
+
- plugins/ — интеграции
|
|
12
|
+
- migrations/ — SQL/Go миграции
|
|
13
|
+
- deploy/ — docker-compose/devops артефакты
|
|
14
|
+
`;
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.redisGo = void 0;
|
|
4
|
+
exports.redisGo = `package cache
|
|
5
|
+
|
|
6
|
+
import (
|
|
7
|
+
"context"
|
|
8
|
+
"fmt"
|
|
9
|
+
"time"
|
|
10
|
+
|
|
11
|
+
"github.com/redis/go-redis/v9"
|
|
12
|
+
"github.com/rs/zerolog"
|
|
13
|
+
|
|
14
|
+
"vira-engine-backend/internal/config"
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
func NewRedisClient(ctx context.Context, cfg config.Config, logger zerolog.Logger) (*redis.Client, error) {
|
|
18
|
+
addr := fmt.Sprintf("%s:%d", cfg.Redis.Host, cfg.Redis.Port)
|
|
19
|
+
client := redis.NewClient(&redis.Options{
|
|
20
|
+
Addr: addr,
|
|
21
|
+
Password: cfg.Redis.Password,
|
|
22
|
+
DB: cfg.Redis.DB,
|
|
23
|
+
ReadTimeout: 3 * time.Second,
|
|
24
|
+
WriteTimeout: 3 * time.Second,
|
|
25
|
+
DialTimeout: 3 * time.Second,
|
|
26
|
+
})
|
|
27
|
+
|
|
28
|
+
if _, err := client.Ping(ctx).Result(); err != nil {
|
|
29
|
+
return nil, fmt.Errorf("ping redis: %w", err)
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
logger.Info().Str("redis.addr", addr).Int("redis.db", cfg.Redis.DB).Msg("redis ready")
|
|
33
|
+
return client, nil
|
|
34
|
+
}
|
|
35
|
+
`;
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.registryGo = void 0;
|
|
4
|
+
exports.registryGo = `package events
|
|
5
|
+
|
|
6
|
+
import (
|
|
7
|
+
"context"
|
|
8
|
+
"github.com/gorilla/websocket"
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
// Registry holds all registered event handlers.
|
|
12
|
+
var Registry = make(map[string]EventHandler)
|
|
13
|
+
|
|
14
|
+
// Register registers an event handler by name.
|
|
15
|
+
func Register(name string, handler EventHandler) {
|
|
16
|
+
Registry[name] = handler
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
// RegisterAll registers multiple handlers at once.
|
|
20
|
+
func RegisterAll(handlers map[string]EventHandler) {
|
|
21
|
+
for name, handler := range handlers {
|
|
22
|
+
Registry[name] = handler
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
// Get returns a handler by name.
|
|
27
|
+
func Get(name string) (EventHandler, bool) {
|
|
28
|
+
handler, ok := Registry[name]
|
|
29
|
+
return handler, ok
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// Apply registers all handlers from Registry into a Hub.
|
|
33
|
+
func (h *Hub) ApplyRegistry() {
|
|
34
|
+
h.mu.Lock()
|
|
35
|
+
defer h.mu.Unlock()
|
|
36
|
+
for name, handler := range Registry {
|
|
37
|
+
h.events[name] = handler
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Example handler registration (auto-generated by CLI):
|
|
42
|
+
// func init() {
|
|
43
|
+
// Register("demo.echo", handleDemoEvent)
|
|
44
|
+
// Register("task.update", handleTaskUpdate)
|
|
45
|
+
// Register("user.setStatus", handleUserStatus)
|
|
46
|
+
// }
|
|
47
|
+
`;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.sqlcYaml = void 0;
|
|
4
|
+
exports.sqlcYaml = `version: "2"
|
|
5
|
+
sql:
|
|
6
|
+
- engine: "postgresql"
|
|
7
|
+
schema: "migrations"
|
|
8
|
+
queries: "queries"
|
|
9
|
+
gen:
|
|
10
|
+
go:
|
|
11
|
+
package: "gen"
|
|
12
|
+
out: "internal/db/gen"
|
|
13
|
+
sql_package: "pgx/v5"
|
|
14
|
+
emit_interface: true
|
|
15
|
+
emit_db_tags: true
|
|
16
|
+
emit_empty_slices: true
|
|
17
|
+
`;
|