@elgap/edukaai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (306) hide show
  1. package/.output/nitro.json +15 -0
  2. package/.output/public/_nuxt/BHDzAs85.js +1 -0
  3. package/.output/public/_nuxt/BJzbfdL_.js +1 -0
  4. package/.output/public/_nuxt/BKGLlKXk.js +6 -0
  5. package/.output/public/_nuxt/BLuXzJk3.js +1 -0
  6. package/.output/public/_nuxt/BNOzzLI3.js +1 -0
  7. package/.output/public/_nuxt/BO5GMwXh.js +1 -0
  8. package/.output/public/_nuxt/BQRbsFGy.js +1 -0
  9. package/.output/public/_nuxt/Be4MVdOg.js +1 -0
  10. package/.output/public/_nuxt/Bu4d3Z1T.js +1 -0
  11. package/.output/public/_nuxt/BwpBBNv4.js +1 -0
  12. package/.output/public/_nuxt/C1Og9-6n.js +97 -0
  13. package/.output/public/_nuxt/C3CjAhtj.js +1 -0
  14. package/.output/public/_nuxt/C6E3Ty3z.js +1 -0
  15. package/.output/public/_nuxt/COct4B42.js +1 -0
  16. package/.output/public/_nuxt/CQ6g2dtj.js +1 -0
  17. package/.output/public/_nuxt/CURSW5TV.js +246 -0
  18. package/.output/public/_nuxt/CW57JW4s.js +1 -0
  19. package/.output/public/_nuxt/Cg2fM61o.js +18 -0
  20. package/.output/public/_nuxt/CgGOeWta.js +1 -0
  21. package/.output/public/_nuxt/CoyYbT7u.js +1 -0
  22. package/.output/public/_nuxt/D11MHbGc.js +133 -0
  23. package/.output/public/_nuxt/D24dz0S8.js +1 -0
  24. package/.output/public/_nuxt/D3EFPr8x.js +1 -0
  25. package/.output/public/_nuxt/Dvt4ZHil.js +1 -0
  26. package/.output/public/_nuxt/builds/latest.json +1 -0
  27. package/.output/public/_nuxt/builds/meta/ab87ba0e-6d0d-4466-8b12-00d44f8fb9d3.json +1 -0
  28. package/.output/public/_nuxt/default.C1K1-g4D.css +1 -0
  29. package/.output/public/_nuxt/entry.CH8k-Mga.css +1 -0
  30. package/.output/public/_nuxt/error-404.C-Ezrlz-.css +1 -0
  31. package/.output/public/_nuxt/error-500.DBWf9FGj.css +1 -0
  32. package/.output/public/_nuxt/first-training.wFDD65zm.css +1 -0
  33. package/.output/public/_nuxt/import.cUKYdCrq.css +1 -0
  34. package/.output/public/_nuxt/index.D-2VHyHA.css +1 -0
  35. package/.output/public/_nuxt/index.D4J_vHmL.css +1 -0
  36. package/.output/public/_nuxt/mPJeLIz7.js +1 -0
  37. package/.output/server/chunks/_/error-500.mjs +19 -0
  38. package/.output/server/chunks/_/error-500.mjs.map +1 -0
  39. package/.output/server/chunks/build/ExampleCard-styles.BUOJhEg0.mjs +8 -0
  40. package/.output/server/chunks/build/ExampleCard-styles.BUOJhEg0.mjs.map +1 -0
  41. package/.output/server/chunks/build/ExampleForm-Bcpl0CfL.mjs +238 -0
  42. package/.output/server/chunks/build/ExampleForm-Bcpl0CfL.mjs.map +1 -0
  43. package/.output/server/chunks/build/ImportPreview-styles.CFmMl5Ok.mjs +8 -0
  44. package/.output/server/chunks/build/ImportPreview-styles.CFmMl5Ok.mjs.map +1 -0
  45. package/.output/server/chunks/build/_id_-BUSGcL-H.mjs +113 -0
  46. package/.output/server/chunks/build/_id_-BUSGcL-H.mjs.map +1 -0
  47. package/.output/server/chunks/build/axolotl-C-EXGgM8.mjs +112 -0
  48. package/.output/server/chunks/build/axolotl-C-EXGgM8.mjs.map +1 -0
  49. package/.output/server/chunks/build/best-practices-DsadorHb.mjs +38 -0
  50. package/.output/server/chunks/build/best-practices-DsadorHb.mjs.map +1 -0
  51. package/.output/server/chunks/build/client.precomputed.mjs +4 -0
  52. package/.output/server/chunks/build/client.precomputed.mjs.map +1 -0
  53. package/.output/server/chunks/build/default-DPkqqHOE.mjs +354 -0
  54. package/.output/server/chunks/build/default-DPkqqHOE.mjs.map +1 -0
  55. package/.output/server/chunks/build/default-styles.DsqVVS7k.mjs +8 -0
  56. package/.output/server/chunks/build/default-styles.DsqVVS7k.mjs.map +1 -0
  57. package/.output/server/chunks/build/error-404-K4UfZNck.mjs +97 -0
  58. package/.output/server/chunks/build/error-404-K4UfZNck.mjs.map +1 -0
  59. package/.output/server/chunks/build/error-404-styles.DuDrf-v0.mjs +8 -0
  60. package/.output/server/chunks/build/error-404-styles.DuDrf-v0.mjs.map +1 -0
  61. package/.output/server/chunks/build/error-500-CA7TEPNg.mjs +79 -0
  62. package/.output/server/chunks/build/error-500-CA7TEPNg.mjs.map +1 -0
  63. package/.output/server/chunks/build/error-500-styles.8IYEHzz6.mjs +8 -0
  64. package/.output/server/chunks/build/error-500-styles.8IYEHzz6.mjs.map +1 -0
  65. package/.output/server/chunks/build/examples-BHK8MDrs.mjs +134 -0
  66. package/.output/server/chunks/build/examples-BHK8MDrs.mjs.map +1 -0
  67. package/.output/server/chunks/build/export-BzxFqqfP.mjs +233 -0
  68. package/.output/server/chunks/build/export-BzxFqqfP.mjs.map +1 -0
  69. package/.output/server/chunks/build/faq-DKch73dS.mjs +112 -0
  70. package/.output/server/chunks/build/faq-DKch73dS.mjs.map +1 -0
  71. package/.output/server/chunks/build/field-guide-qbukT8F0.mjs +38 -0
  72. package/.output/server/chunks/build/field-guide-qbukT8F0.mjs.map +1 -0
  73. package/.output/server/chunks/build/first-training-BbEOBcEW.mjs +343 -0
  74. package/.output/server/chunks/build/first-training-BbEOBcEW.mjs.map +1 -0
  75. package/.output/server/chunks/build/first-training-styles.BzOa_KRD.mjs +8 -0
  76. package/.output/server/chunks/build/first-training-styles.BzOa_KRD.mjs.map +1 -0
  77. package/.output/server/chunks/build/glossary-CxSGCJoH.mjs +397 -0
  78. package/.output/server/chunks/build/glossary-CxSGCJoH.mjs.map +1 -0
  79. package/.output/server/chunks/build/huggingface-TSmyUzZU.mjs +128 -0
  80. package/.output/server/chunks/build/huggingface-TSmyUzZU.mjs.map +1 -0
  81. package/.output/server/chunks/build/import-C0kYhRv9.mjs +247 -0
  82. package/.output/server/chunks/build/import-C0kYhRv9.mjs.map +1 -0
  83. package/.output/server/chunks/build/index-C1ntBEWd.mjs +235 -0
  84. package/.output/server/chunks/build/index-C1ntBEWd.mjs.map +1 -0
  85. package/.output/server/chunks/build/index-D3CxihcX.mjs +320 -0
  86. package/.output/server/chunks/build/index-D3CxihcX.mjs.map +1 -0
  87. package/.output/server/chunks/build/index-DgKa1cy0.mjs +360 -0
  88. package/.output/server/chunks/build/index-DgKa1cy0.mjs.map +1 -0
  89. package/.output/server/chunks/build/index-cG54gaKX.mjs +513 -0
  90. package/.output/server/chunks/build/index-cG54gaKX.mjs.map +1 -0
  91. package/.output/server/chunks/build/index-styles.BX2SZiiS.mjs +8 -0
  92. package/.output/server/chunks/build/index-styles.BX2SZiiS.mjs.map +1 -0
  93. package/.output/server/chunks/build/llm-training-DIqc0eiM.mjs +91 -0
  94. package/.output/server/chunks/build/llm-training-DIqc0eiM.mjs.map +1 -0
  95. package/.output/server/chunks/build/new-b3338aLF.mjs +92 -0
  96. package/.output/server/chunks/build/new-b3338aLF.mjs.map +1 -0
  97. package/.output/server/chunks/build/nuxt-link-Ceyd90PQ.mjs +290 -0
  98. package/.output/server/chunks/build/nuxt-link-Ceyd90PQ.mjs.map +1 -0
  99. package/.output/server/chunks/build/sample-datasets-CVLWMQUA.mjs +45 -0
  100. package/.output/server/chunks/build/sample-datasets-CVLWMQUA.mjs.map +1 -0
  101. package/.output/server/chunks/build/server.mjs +1979 -0
  102. package/.output/server/chunks/build/server.mjs.map +1 -0
  103. package/.output/server/chunks/build/styles.mjs +20 -0
  104. package/.output/server/chunks/build/styles.mjs.map +1 -0
  105. package/.output/server/chunks/build/test-router-BYM6Cpst.mjs +105 -0
  106. package/.output/server/chunks/build/test-router-BYM6Cpst.mjs.map +1 -0
  107. package/.output/server/chunks/nitro/nitro.mjs +5800 -0
  108. package/.output/server/chunks/nitro/nitro.mjs.map +1 -0
  109. package/.output/server/chunks/routes/renderer.mjs +481 -0
  110. package/.output/server/chunks/routes/renderer.mjs.map +1 -0
  111. package/.output/server/chunks/virtual/_virtual_spa-template.mjs +4 -0
  112. package/.output/server/chunks/virtual/_virtual_spa-template.mjs.map +1 -0
  113. package/.output/server/index.mjs +12 -0
  114. package/.output/server/index.mjs.map +1 -0
  115. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/api/api.js +2 -0
  116. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/api/app.js +2 -0
  117. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/api/component.js +2 -0
  118. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/api/context.js +2 -0
  119. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/api/hooks.js +2 -0
  120. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/api/index.js +22 -0
  121. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/api/util.js +2 -0
  122. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/const.js +5 -0
  123. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/env.js +17 -0
  124. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/index.js +45 -0
  125. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/plugin.js +2 -0
  126. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/proxy.js +111 -0
  127. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/lib/cjs/time.js +28 -0
  128. package/.output/server/node_modules/.nitro/@vue/devtools-api@6.6.4/package.json +37 -0
  129. package/.output/server/node_modules/.nitro/@vue/devtools-api@7.7.9/dist/index.cjs +42 -0
  130. package/.output/server/node_modules/.nitro/@vue/devtools-api@7.7.9/package.json +32 -0
  131. package/.output/server/node_modules/.nitro/hookable@5.5.3/dist/index.cjs +299 -0
  132. package/.output/server/node_modules/.nitro/hookable@5.5.3/package.json +49 -0
  133. package/.output/server/node_modules/.nitro/hookable@6.0.1/dist/index.mjs +266 -0
  134. package/.output/server/node_modules/.nitro/hookable@6.0.1/package.json +52 -0
  135. package/.output/server/node_modules/.nitro/perfect-debounce@1.0.0/dist/index.cjs +59 -0
  136. package/.output/server/node_modules/.nitro/perfect-debounce@1.0.0/package.json +44 -0
  137. package/.output/server/node_modules/.nitro/perfect-debounce@2.1.0/dist/index.mjs +89 -0
  138. package/.output/server/node_modules/.nitro/perfect-debounce@2.1.0/package.json +41 -0
  139. package/.output/server/node_modules/@babel/parser/lib/index.js +14582 -0
  140. package/.output/server/node_modules/@babel/parser/package.json +50 -0
  141. package/.output/server/node_modules/@iconify/utils/lib/colors/index.js +292 -0
  142. package/.output/server/node_modules/@iconify/utils/lib/colors/keywords.js +702 -0
  143. package/.output/server/node_modules/@iconify/utils/lib/css/common.js +76 -0
  144. package/.output/server/node_modules/@iconify/utils/lib/css/format.js +40 -0
  145. package/.output/server/node_modules/@iconify/utils/lib/css/icon.js +52 -0
  146. package/.output/server/node_modules/@iconify/utils/lib/css/icons.js +133 -0
  147. package/.output/server/node_modules/@iconify/utils/lib/customisations/bool.js +20 -0
  148. package/.output/server/node_modules/@iconify/utils/lib/customisations/defaults.js +15 -0
  149. package/.output/server/node_modules/@iconify/utils/lib/customisations/flip.js +18 -0
  150. package/.output/server/node_modules/@iconify/utils/lib/customisations/merge.js +18 -0
  151. package/.output/server/node_modules/@iconify/utils/lib/customisations/rotate.js +31 -0
  152. package/.output/server/node_modules/@iconify/utils/lib/emoji/cleanup.js +80 -0
  153. package/.output/server/node_modules/@iconify/utils/lib/emoji/convert.js +102 -0
  154. package/.output/server/node_modules/@iconify/utils/lib/emoji/data.js +29 -0
  155. package/.output/server/node_modules/@iconify/utils/lib/emoji/format.js +60 -0
  156. package/.output/server/node_modules/@iconify/utils/lib/emoji/parse.js +50 -0
  157. package/.output/server/node_modules/@iconify/utils/lib/emoji/regex/base.js +204 -0
  158. package/.output/server/node_modules/@iconify/utils/lib/emoji/regex/create.js +35 -0
  159. package/.output/server/node_modules/@iconify/utils/lib/emoji/regex/numbers.js +134 -0
  160. package/.output/server/node_modules/@iconify/utils/lib/emoji/regex/similar.js +167 -0
  161. package/.output/server/node_modules/@iconify/utils/lib/emoji/regex/tree.js +81 -0
  162. package/.output/server/node_modules/@iconify/utils/lib/emoji/replace/find.js +94 -0
  163. package/.output/server/node_modules/@iconify/utils/lib/emoji/replace/replace.js +28 -0
  164. package/.output/server/node_modules/@iconify/utils/lib/emoji/test/components.js +78 -0
  165. package/.output/server/node_modules/@iconify/utils/lib/emoji/test/missing.js +68 -0
  166. package/.output/server/node_modules/@iconify/utils/lib/emoji/test/name.js +47 -0
  167. package/.output/server/node_modules/@iconify/utils/lib/emoji/test/parse.js +105 -0
  168. package/.output/server/node_modules/@iconify/utils/lib/emoji/test/similar.js +38 -0
  169. package/.output/server/node_modules/@iconify/utils/lib/emoji/test/tree.js +94 -0
  170. package/.output/server/node_modules/@iconify/utils/lib/emoji/test/variations.js +64 -0
  171. package/.output/server/node_modules/@iconify/utils/lib/icon/defaults.js +26 -0
  172. package/.output/server/node_modules/@iconify/utils/lib/icon/merge.js +18 -0
  173. package/.output/server/node_modules/@iconify/utils/lib/icon/name.js +58 -0
  174. package/.output/server/node_modules/@iconify/utils/lib/icon/square.js +34 -0
  175. package/.output/server/node_modules/@iconify/utils/lib/icon/transformations.js +13 -0
  176. package/.output/server/node_modules/@iconify/utils/lib/icon-set/convert-info.js +126 -0
  177. package/.output/server/node_modules/@iconify/utils/lib/icon-set/expand.js +21 -0
  178. package/.output/server/node_modules/@iconify/utils/lib/icon-set/get-icon.js +27 -0
  179. package/.output/server/node_modules/@iconify/utils/lib/icon-set/get-icons.js +38 -0
  180. package/.output/server/node_modules/@iconify/utils/lib/icon-set/minify.js +93 -0
  181. package/.output/server/node_modules/@iconify/utils/lib/icon-set/parse.js +48 -0
  182. package/.output/server/node_modules/@iconify/utils/lib/icon-set/tree.js +24 -0
  183. package/.output/server/node_modules/@iconify/utils/lib/icon-set/validate-basic.js +44 -0
  184. package/.output/server/node_modules/@iconify/utils/lib/icon-set/validate.js +125 -0
  185. package/.output/server/node_modules/@iconify/utils/lib/index.js +53 -0
  186. package/.output/server/node_modules/@iconify/utils/lib/loader/custom.js +32 -0
  187. package/.output/server/node_modules/@iconify/utils/lib/loader/loader.js +28 -0
  188. package/.output/server/node_modules/@iconify/utils/lib/loader/modern.js +42 -0
  189. package/.output/server/node_modules/@iconify/utils/lib/loader/utils.js +63 -0
  190. package/.output/server/node_modules/@iconify/utils/lib/misc/objects.js +27 -0
  191. package/.output/server/node_modules/@iconify/utils/lib/misc/strings.js +27 -0
  192. package/.output/server/node_modules/@iconify/utils/lib/misc/title.js +10 -0
  193. package/.output/server/node_modules/@iconify/utils/lib/svg/build.js +115 -0
  194. package/.output/server/node_modules/@iconify/utils/lib/svg/defs.js +32 -0
  195. package/.output/server/node_modules/@iconify/utils/lib/svg/encode-svg-for-css.js +15 -0
  196. package/.output/server/node_modules/@iconify/utils/lib/svg/html.js +10 -0
  197. package/.output/server/node_modules/@iconify/utils/lib/svg/id.js +42 -0
  198. package/.output/server/node_modules/@iconify/utils/lib/svg/inner-html.js +23 -0
  199. package/.output/server/node_modules/@iconify/utils/lib/svg/parse.js +69 -0
  200. package/.output/server/node_modules/@iconify/utils/lib/svg/pretty.js +55 -0
  201. package/.output/server/node_modules/@iconify/utils/lib/svg/size.js +28 -0
  202. package/.output/server/node_modules/@iconify/utils/lib/svg/trim.js +8 -0
  203. package/.output/server/node_modules/@iconify/utils/lib/svg/url.js +23 -0
  204. package/.output/server/node_modules/@iconify/utils/lib/svg/viewbox.js +9 -0
  205. package/.output/server/node_modules/@iconify/utils/package.json +118 -0
  206. package/.output/server/node_modules/@iconify/vue/dist/iconify.mjs +1893 -0
  207. package/.output/server/node_modules/@iconify/vue/package.json +64 -0
  208. package/.output/server/node_modules/@vue/compiler-core/dist/compiler-core.cjs.prod.js +6763 -0
  209. package/.output/server/node_modules/@vue/compiler-core/package.json +58 -0
  210. package/.output/server/node_modules/@vue/compiler-dom/dist/compiler-dom.cjs.prod.js +689 -0
  211. package/.output/server/node_modules/@vue/compiler-dom/package.json +57 -0
  212. package/.output/server/node_modules/@vue/compiler-ssr/dist/compiler-ssr.cjs.js +1413 -0
  213. package/.output/server/node_modules/@vue/compiler-ssr/package.json +34 -0
  214. package/.output/server/node_modules/@vue/devtools-kit/dist/index.cjs +6850 -0
  215. package/.output/server/node_modules/@vue/devtools-kit/package.json +44 -0
  216. package/.output/server/node_modules/@vue/devtools-shared/dist/index.cjs +378 -0
  217. package/.output/server/node_modules/@vue/devtools-shared/package.json +34 -0
  218. package/.output/server/node_modules/@vue/reactivity/dist/reactivity.cjs.prod.js +1870 -0
  219. package/.output/server/node_modules/@vue/reactivity/package.json +55 -0
  220. package/.output/server/node_modules/@vue/runtime-core/dist/runtime-core.cjs.prod.js +6810 -0
  221. package/.output/server/node_modules/@vue/runtime-core/package.json +52 -0
  222. package/.output/server/node_modules/@vue/runtime-dom/dist/runtime-dom.cjs.prod.js +1750 -0
  223. package/.output/server/node_modules/@vue/runtime-dom/package.json +60 -0
  224. package/.output/server/node_modules/@vue/server-renderer/dist/server-renderer.cjs.prod.js +883 -0
  225. package/.output/server/node_modules/@vue/server-renderer/package.json +55 -0
  226. package/.output/server/node_modules/@vue/shared/dist/shared.cjs.prod.js +604 -0
  227. package/.output/server/node_modules/@vue/shared/package.json +47 -0
  228. package/.output/server/node_modules/birpc/dist/index.cjs +296 -0
  229. package/.output/server/node_modules/birpc/package.json +57 -0
  230. package/.output/server/node_modules/consola/dist/chunks/prompt.mjs +280 -0
  231. package/.output/server/node_modules/consola/dist/core.mjs +512 -0
  232. package/.output/server/node_modules/consola/dist/index.mjs +651 -0
  233. package/.output/server/node_modules/consola/dist/shared/consola.DRwqZj3T.mjs +72 -0
  234. package/.output/server/node_modules/consola/dist/shared/consola.DXBYu-KD.mjs +288 -0
  235. package/.output/server/node_modules/consola/package.json +136 -0
  236. package/.output/server/node_modules/devalue/index.js +4 -0
  237. package/.output/server/node_modules/devalue/package.json +37 -0
  238. package/.output/server/node_modules/devalue/src/base64.js +110 -0
  239. package/.output/server/node_modules/devalue/src/constants.js +7 -0
  240. package/.output/server/node_modules/devalue/src/parse.js +246 -0
  241. package/.output/server/node_modules/devalue/src/stringify.js +350 -0
  242. package/.output/server/node_modules/devalue/src/uneval.js +490 -0
  243. package/.output/server/node_modules/devalue/src/utils.js +148 -0
  244. package/.output/server/node_modules/entities/dist/commonjs/decode-codepoint.js +77 -0
  245. package/.output/server/node_modules/entities/dist/commonjs/decode.js +568 -0
  246. package/.output/server/node_modules/entities/dist/commonjs/generated/decode-data-html.js +7 -0
  247. package/.output/server/node_modules/entities/dist/commonjs/generated/decode-data-xml.js +7 -0
  248. package/.output/server/node_modules/entities/dist/commonjs/internal/bin-trie-flags.js +21 -0
  249. package/.output/server/node_modules/entities/dist/commonjs/internal/decode-shared.js +31 -0
  250. package/.output/server/node_modules/entities/dist/commonjs/package.json +3 -0
  251. package/.output/server/node_modules/entities/package.json +120 -0
  252. package/.output/server/node_modules/estree-walker/dist/umd/estree-walker.js +344 -0
  253. package/.output/server/node_modules/estree-walker/package.json +37 -0
  254. package/.output/server/node_modules/pinia/dist/pinia.prod.cjs +719 -0
  255. package/.output/server/node_modules/pinia/package.json +94 -0
  256. package/.output/server/node_modules/source-map-js/lib/array-set.js +121 -0
  257. package/.output/server/node_modules/source-map-js/lib/base64-vlq.js +140 -0
  258. package/.output/server/node_modules/source-map-js/lib/base64.js +67 -0
  259. package/.output/server/node_modules/source-map-js/lib/binary-search.js +111 -0
  260. package/.output/server/node_modules/source-map-js/lib/mapping-list.js +79 -0
  261. package/.output/server/node_modules/source-map-js/lib/quick-sort.js +132 -0
  262. package/.output/server/node_modules/source-map-js/lib/source-map-consumer.js +1188 -0
  263. package/.output/server/node_modules/source-map-js/lib/source-map-generator.js +444 -0
  264. package/.output/server/node_modules/source-map-js/lib/source-node.js +413 -0
  265. package/.output/server/node_modules/source-map-js/lib/util.js +594 -0
  266. package/.output/server/node_modules/source-map-js/package.json +71 -0
  267. package/.output/server/node_modules/source-map-js/source-map.js +8 -0
  268. package/.output/server/node_modules/tailwindcss/dist/chunk-X4GG3EDV.mjs +1 -0
  269. package/.output/server/node_modules/tailwindcss/dist/colors.mjs +1 -0
  270. package/.output/server/node_modules/tailwindcss/package.json +89 -0
  271. package/.output/server/node_modules/ufo/dist/index.mjs +645 -0
  272. package/.output/server/node_modules/ufo/package.json +48 -0
  273. package/.output/server/node_modules/unhead/dist/index.mjs +9 -0
  274. package/.output/server/node_modules/unhead/dist/parser.mjs +508 -0
  275. package/.output/server/node_modules/unhead/dist/plugins.mjs +101 -0
  276. package/.output/server/node_modules/unhead/dist/scripts.mjs +30 -0
  277. package/.output/server/node_modules/unhead/dist/server.mjs +180 -0
  278. package/.output/server/node_modules/unhead/dist/shared/unhead.BPM0-cfG.mjs +44 -0
  279. package/.output/server/node_modules/unhead/dist/shared/unhead.BYvz9V1x.mjs +43 -0
  280. package/.output/server/node_modules/unhead/dist/shared/unhead.BnoAbrHA.mjs +269 -0
  281. package/.output/server/node_modules/unhead/dist/shared/unhead.CApf5sj3.mjs +148 -0
  282. package/.output/server/node_modules/unhead/dist/shared/unhead.CbpEuj3y.mjs +71 -0
  283. package/.output/server/node_modules/unhead/dist/shared/unhead.DQc16pHI.mjs +196 -0
  284. package/.output/server/node_modules/unhead/dist/shared/unhead.D_nrZZPH.mjs +182 -0
  285. package/.output/server/node_modules/unhead/dist/shared/unhead.ckV6dpEQ.mjs +166 -0
  286. package/.output/server/node_modules/unhead/dist/shared/unhead.fVVqDC1O.mjs +203 -0
  287. package/.output/server/node_modules/unhead/dist/shared/unhead.yem5I2v_.mjs +38 -0
  288. package/.output/server/node_modules/unhead/dist/utils.mjs +5 -0
  289. package/.output/server/node_modules/unhead/package.json +105 -0
  290. package/.output/server/node_modules/vue/dist/vue.cjs.js +80 -0
  291. package/.output/server/node_modules/vue/dist/vue.cjs.prod.js +66 -0
  292. package/.output/server/node_modules/vue/index.js +7 -0
  293. package/.output/server/node_modules/vue/index.mjs +1 -0
  294. package/.output/server/node_modules/vue/package.json +112 -0
  295. package/.output/server/node_modules/vue/server-renderer/index.mjs +1 -0
  296. package/.output/server/node_modules/vue-bundle-renderer/dist/runtime.mjs +301 -0
  297. package/.output/server/node_modules/vue-bundle-renderer/package.json +55 -0
  298. package/.output/server/node_modules/vue-router/dist/devtools-EWN81iOl.mjs +1220 -0
  299. package/.output/server/node_modules/vue-router/dist/vue-router.mjs +1557 -0
  300. package/.output/server/node_modules/vue-router/package.json +153 -0
  301. package/.output/server/node_modules/vue-router/vue-router.node.mjs +2 -0
  302. package/.output/server/package.json +37 -0
  303. package/LICENSE +21 -0
  304. package/README.md +132 -0
  305. package/bin/cli.js +75 -0
  306. package/package.json +71 -0
@@ -0,0 +1,91 @@
1
+ import { _ as __nuxt_component_0 } from './nuxt-link-Ceyd90PQ.mjs';
2
+ import { mergeProps, withCtx, createTextVNode, useSSRContext } from 'vue';
3
+ import { ssrRenderAttrs, ssrRenderComponent } from 'vue/server-renderer';
4
+ import '../nitro/nitro.mjs';
5
+ import 'node:http';
6
+ import 'node:https';
7
+ import 'node:events';
8
+ import 'node:buffer';
9
+ import 'node:fs';
10
+ import 'node:path';
11
+ import 'node:crypto';
12
+ import 'node:url';
13
+ import '@iconify/utils';
14
+ import 'consola';
15
+ import './server.mjs';
16
+ import 'pinia';
17
+ import 'vue-router';
18
+ import 'tailwindcss/colors';
19
+ import '@iconify/vue';
20
+ import '../routes/renderer.mjs';
21
+ import 'vue-bundle-renderer/runtime';
22
+ import 'unhead/server';
23
+ import 'devalue';
24
+ import 'unhead/utils';
25
+
26
+ const _sfc_main = {
27
+ __name: "llm-training",
28
+ __ssrInlineRender: true,
29
+ setup(__props) {
30
+ return (_ctx, _push, _parent, _attrs) => {
31
+ const _component_NuxtLink = __nuxt_component_0;
32
+ _push(`<div${ssrRenderAttrs(mergeProps({ class: "max-w-4xl mx-auto pb-20" }, _attrs))}><div class="text-sm text-gray-500 mb-6">`);
33
+ _push(ssrRenderComponent(_component_NuxtLink, {
34
+ to: "/help",
35
+ class: "hover:text-blue-600"
36
+ }, {
37
+ default: withCtx((_, _push2, _parent2, _scopeId) => {
38
+ if (_push2) {
39
+ _push2(`← Back to Guide`);
40
+ } else {
41
+ return [
42
+ createTextVNode("← Back to Guide")
43
+ ];
44
+ }
45
+ }),
46
+ _: 1
47
+ }, _parent));
48
+ _push(`</div><div class="mb-12"><div class="flex items-center gap-3 mb-4"><span class="text-4xl">🧠</span><h1 class="text-4xl font-bold">LLM Training Explained</h1></div><p class="text-xl text-gray-600"> A technical deep-dive into how Large Language Models actually work, explained for developers who want to understand the magic behind AI. </p></div><div class="card mb-8 bg-gradient-to-br from-blue-50 to-purple-50"><h2 class="text-xl font-semibold mb-4">📖 The Story Begins...</h2><p class="text-gray-700 leading-relaxed mb-4"> Imagine you&#39;re texting with a friend who&#39;s incredibly good at predicting what you&#39;re going to say next. Not because they&#39;re psychic, but because they&#39;ve read every book, article, and conversation in existence. </p><p class="text-gray-700 leading-relaxed"> That&#39;s essentially what an LLM is — a statistical prediction machine that learned patterns from trillions of words. But how does it actually work under the hood? Let&#39;s dive into the technical magic, step by step. </p></div><section id="what-is-llm" class="mb-12"><div class="flex items-center gap-3 mb-6"><span class="text-3xl">1️⃣</span><h2 class="text-2xl font-bold">What is an LLM, Actually?</h2></div><div class="card"><h3 class="text-lg font-semibold mb-4 text-blue-700">The Next Token Prediction Machine</h3><p class="text-gray-700 mb-4 leading-relaxed"> At its core, an LLM (Large Language Model) is doing one thing and one thing only: <strong>predicting the next token</strong>. </p><div class="bg-gray-50 p-6 rounded-xl mb-6"><h4 class="font-medium mb-3">🎯 The Core Task</h4><div class="space-y-3"><div class="flex items-center gap-3"><span class="text-2xl">📜</span><div class="flex-1"><p class="font-mono text-sm bg-white p-2 rounded border"> &quot;The capital of France is&quot; </p></div></div><div class="text-center"><svg class="w-8 h-8 mx-auto text-blue-500" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 14l-7 7m0 0l-7-7m7 7V3"></path></svg></div><div class="flex items-center gap-3"><span class="text-2xl">🤖</span><div class="flex-1"><p class="font-mono text-sm bg-green-50 p-2 rounded border border-green-200"> &quot;Paris&quot; <span class="text-green-600">(probability: 99.9%)</span></p></div></div></div></div><p class="text-gray-700 mb-4 leading-relaxed"> That&#39;s it. The entire &quot;intelligence&quot; of ChatGPT, Claude, or any LLM comes from doing this one task extremely well, billions of times over. </p><div class="bg-blue-50 p-4 rounded-lg mb-6"><h4 class="font-medium text-blue-900 mb-2">💡 Why This Works</h4><p class="text-sm text-blue-800"> If you can predict &quot;Paris&quot; after &quot;The capital of France is&quot;, and you can predict &quot;def&quot; after &quot;class MyClass:&quot;, and you can predict &quot;sincerely&quot; after &quot;Yours&quot;, then you&#39;ve learned the patterns of language, facts about the world, programming syntax, and letter-writing etiquette — all from next-token prediction. </p></div><h3 class="text-lg font-semibold mb-4 text-blue-700">The Autoregressive Loop</h3><p class="text-gray-700 mb-4 leading-relaxed"> Here&#39;s the clever part: once the model predicts &quot;Paris&quot;, it adds that to the context and predicts the NEXT token: </p><div class="bg-gray-50 p-4 rounded-xl mb-6"><div class="space-y-2 font-mono text-sm"><p><strong>Step 1:</strong> &quot;The capital of France is&quot; → predicts &quot;Paris&quot;</p><p><strong>Step 2:</strong> &quot;The capital of France is Paris&quot; → predicts &quot;,&quot;</p><p><strong>Step 3:</strong> &quot;The capital of France is Paris,&quot; → predicts &quot;a&quot;</p><p><strong>Step 4:</strong> &quot;The capital of France is Paris, a&quot; → predicts &quot;city&quot;</p><p><strong>Step 5:</strong> ...continues until it predicts an &quot;end&quot; token</p></div></div><p class="text-gray-700 leading-relaxed"> This is called <strong>autoregressive generation</strong> — the model feeds its own predictions back as input to generate the next part. This is how it writes essays, answers questions, or generates code one piece at a time. </p></div></section><section id="tokens" class="mb-12"><div class="flex items-center gap-3 mb-6"><span class="text-3xl">2️⃣</span><h2 class="text-2xl font-bold">Tokens: The Building Blocks</h2></div><div class="card"><h3 class="text-lg font-semibold mb-4 text-blue-700">Not Words, Not Characters</h3><p class="text-gray-700 mb-4 leading-relaxed"> LLMs don&#39;t actually work with words or characters. They work with <strong>tokens</strong> — pieces of text that are somewhere in between. </p><div class="bg-gray-50 p-6 rounded-xl mb-6"><h4 class="font-medium mb-4">How Tokenization Works</h4><div class="space-y-4"><div><p class="text-sm text-gray-500 mb-1">Original text:</p><p class="font-mono text-lg">&quot;ChatGPT is amazing!&quot;</p></div><div class="text-center"><span class="text-2xl">↓ Tokenized ↓</span></div><div class="flex flex-wrap gap-2"><span class="px-3 py-2 bg-blue-100 rounded-lg font-mono text-sm">Chat</span><span class="px-3 py-2 bg-blue-100 rounded-lg font-mono text-sm">G</span><span class="px-3 py-2 bg-blue-100 rounded-lg font-mono text-sm">PT</span><span class="px-3 py-2 bg-green-100 rounded-lg font-mono text-sm">is</span><span class="px-3 py-2 bg-purple-100 rounded-lg font-mono text-sm">amazing</span><span class="px-3 py-2 bg-red-100 rounded-lg font-mono text-sm">!</span></div><p class="text-sm text-gray-600 mt-2">6 tokens total</p></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">Why Tokens?</h3><div class="grid grid-cols-1 md:grid-cols-2 gap-4 mb-6"><div class="p-4 bg-green-50 rounded-lg"><h4 class="font-medium text-green-800 mb-2">✅ Efficient</h4><p class="text-sm text-gray-700"> Common words like &quot;the&quot;, &quot;and&quot;, &quot;is&quot; are single tokens. Rare words get broken into subword pieces. This gives a good balance between vocabulary size and sequence length. </p></div><div class="p-4 bg-blue-50 rounded-lg"><h4 class="font-medium text-blue-800 mb-2">✅ Handles Any Text</h4><p class="text-sm text-gray-700"> By breaking unknown words into pieces (like &quot;unbelievable&quot; → &quot;un&quot; + &quot;believable&quot;), the model can handle words it&#39;s never seen before. </p></div></div><div class="bg-yellow-50 p-4 rounded-lg mb-6"><h4 class="font-medium text-yellow-800 mb-2">📊 Token Count Examples</h4><table class="w-full text-sm"><thead><tr class="border-b"><th class="text-left py-2">Text</th><th class="text-right py-2">Tokens</th></tr></thead><tbody><tr class="border-b"><td class="py-2">&quot;Hello&quot;</td><td class="text-right">1</td></tr><tr class="border-b"><td class="py-2">&quot;Hello world&quot;</td><td class="text-right">2</td></tr><tr class="border-b"><td class="py-2">&quot;The quick brown fox&quot;</td><td class="text-right">4</td></tr><tr class="border-b"><td class="py-2">&quot;uncharacteristically&quot;</td><td class="text-right">4 (broken into pieces)</td></tr><tr><td class="py-2">A full paragraph (~100 words)</td><td class="text-right">~130-150 tokens</td></tr></tbody></table></div><div class="bg-purple-50 p-4 rounded-lg"><h4 class="font-medium text-purple-800 mb-2">💡 Why This Matters for You</h4><p class="text-sm text-gray-700"> When you&#39;re charged by the token, or when your model has a &quot;context window&quot; of 4096 tokens, you&#39;re not being limited by words or characters — you&#39;re being limited by these token pieces. That&#39;s why a 100-word paragraph might be 130 tokens, not 100. </p></div></div></section><section id="neural-networks" class="mb-12"><div class="flex items-center gap-3 mb-6"><span class="text-3xl">3️⃣</span><h2 class="text-2xl font-bold">Neural Networks: Pattern Recognizers</h2></div><div class="card"><h3 class="text-lg font-semibold mb-4 text-blue-700">The Pattern Recognition Machine</h3><p class="text-gray-700 mb-4 leading-relaxed"> At the heart of an LLM is a <strong>neural network</strong> — a massive system of interconnected nodes that learns patterns from data. Think of it like a giant sieve that filters information, learning which patterns are important. </p><div class="bg-gray-50 p-6 rounded-xl mb-6"><h4 class="font-medium mb-4">How a Neural Network Works (Simplified)</h4><div class="space-y-4"><div class="flex items-center gap-4"><div class="w-20 text-right text-sm text-gray-500">Input</div><div class="flex-1 p-3 bg-blue-100 rounded-lg"><p class="font-mono text-sm">Token IDs: [15496, 11, 616, 329, 11406]</p><p class="text-xs text-gray-600">(&quot;The cat sat...&quot;)</p></div></div><div class="text-center text-2xl">↓</div><div class="flex items-center gap-4"><div class="w-20 text-right text-sm text-gray-500">Processing</div><div class="flex-1 p-4 bg-purple-100 rounded-lg"><p class="font-medium text-sm mb-2">Millions of mathematical operations</p><p class="text-xs text-gray-600">Matrix multiplications, activations, transformations through layers</p></div></div><div class="text-center text-2xl">↓</div><div class="flex items-center gap-4"><div class="w-20 text-right text-sm text-gray-500">Output</div><div class="flex-1 p-3 bg-green-100 rounded-lg"><p class="font-mono text-sm">Probability distribution over all tokens</p><p class="text-xs text-gray-600">&quot;on&quot; = 45%, &quot;down&quot; = 30%, &quot;there&quot; = 15%, ...</p></div></div></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">The Key Idea: Learning from Examples</h3><p class="text-gray-700 mb-4 leading-relaxed"> The network doesn&#39;t &quot;know&quot; anything initially. It starts with random values (weights). During training, it sees millions of examples like: </p><div class="bg-gray-50 p-4 rounded-xl mb-6"><div class="space-y-2 text-sm"><p><strong>Input:</strong> &quot;The cat sat on the&quot;</p><p><strong>Expected Output:</strong> &quot;mat&quot;</p><p><strong>Network&#39;s Guess:</strong> &quot;floor&quot; (wrong!)</p><p class="text-red-600">→ Adjust weights slightly to do better next time</p></div></div><p class="text-gray-700 mb-4 leading-relaxed"> After seeing this pattern millions of times across different contexts, the network learns: </p><ul class="list-disc list-inside text-gray-700 space-y-2 mb-6 ml-4"><li>Cats often sit on things</li><li>&quot;Mat&quot; commonly follows &quot;sat on the&quot;</li><li>Grammar patterns (articles, prepositions, word order)</li><li>World knowledge (cats are pets, mats are for sitting)</li></ul><div class="bg-green-50 p-4 rounded-lg"><h4 class="font-medium text-green-800 mb-2">🧠 The Emergence of &quot;Understanding&quot;</h4><p class="text-sm text-gray-700"> Notice how the model doesn&#39;t have a &quot;cat database&quot; or a &quot;mat definition.&quot; It just learned statistical patterns. Yet from these patterns, complex behaviors emerge — answering questions, writing code, reasoning through problems. This emergent complexity is what makes LLMs so powerful (and surprising). </p></div></div></section><section id="transformers" class="mb-12"><div class="flex items-center gap-3 mb-6"><span class="text-3xl">4️⃣</span><h2 class="text-2xl font-bold">The Transformer Architecture</h2></div><div class="card"><h3 class="text-lg font-semibold mb-4 text-blue-700">&quot;Attention Is All You Need&quot;</h3><p class="text-gray-700 mb-4 leading-relaxed"> In 2017, Google researchers published a paper with that title. It revolutionized AI. The key insight: <strong>attention mechanisms</strong> allow models to focus on relevant parts of the input when making predictions. </p><div class="bg-gray-50 p-6 rounded-xl mb-6"><h4 class="font-medium mb-4">The Attention Analogy</h4><p class="text-gray-700 mb-4"> Imagine you&#39;re reading a long paragraph and encounter the word &quot;it&quot; at the end: </p><div class="bg-white p-4 rounded border"><p class="mb-3"> &quot;The computer was old and slow. The user tried to run a new program on <span class="bg-yellow-200 px-1 font-bold">it</span>, but...&quot; </p><p class="text-sm text-gray-600"> To understand what &quot;it&quot; refers to, your brain looks back and <strong>attends to</strong> the most relevant words: &quot;computer&quot;, &quot;old&quot;, &quot;slow&quot;. You don&#39;t equally consider every word — you focus on what matters. </p></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">Self-Attention Mechanism</h3><p class="text-gray-700 mb-4 leading-relaxed"> The transformer uses <strong>self-attention</strong> to let every token &quot;look at&quot; every other token and decide which ones are important for understanding its meaning. </p><div class="bg-blue-50 p-6 rounded-xl mb-6"><h4 class="font-medium text-blue-900 mb-4">How It Works</h4><div class="space-y-4"><div class="flex items-start gap-3"><div class="w-8 h-8 bg-blue-500 rounded-full flex items-center justify-center text-white font-bold text-sm flex-shrink-0">1</div><div><h5 class="font-medium">Query, Key, Value</h5><p class="text-sm text-gray-700 mb-2"> For each token, the model creates three vectors: </p><ul class="list-disc list-inside ml-4 space-y-1 text-sm text-gray-700"><li><strong>Query:</strong> &quot;What am I looking for?&quot;</li><li><strong>Key:</strong> &quot;What do I contain?&quot;</li><li><strong>Value:</strong> &quot;What information do I have?&quot;</li></ul></div></div><div class="flex items-start gap-3"><div class="w-8 h-8 bg-blue-500 rounded-full flex items-center justify-center text-white font-bold text-sm flex-shrink-0">2</div><div><h5 class="font-medium">Compute Attention Scores</h5><p class="text-sm text-gray-700"> Each token&#39;s Query is compared to every other token&#39;s Key. High match = high attention weight. The model learns which relationships matter. </p></div></div><div class="flex items-start gap-3"><div class="w-8 h-8 bg-blue-500 rounded-full flex items-center justify-center text-white font-bold text-sm flex-shrink-0">3</div><div><h5 class="font-medium">Weighted Sum</h5><p class="text-sm text-gray-700"> Each token&#39;s new representation becomes a weighted combination of all tokens&#39; Values, weighted by attention scores. </p></div></div></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">Multi-Head Attention</h3><p class="text-gray-700 mb-4 leading-relaxed"> The model doesn&#39;t just do this once — it runs multiple &quot;attention heads&quot; in parallel. Each head can learn different types of relationships: </p><div class="grid grid-cols-1 md:grid-cols-2 gap-4 mb-6"><div class="p-4 bg-green-50 rounded-lg"><h5 class="font-medium text-green-800 mb-2">Head A: Syntax</h5><p class="text-sm text-gray-700">Learns grammatical relationships — subjects match with verbs, pronouns match with nouns.</p></div><div class="p-4 bg-purple-50 rounded-lg"><h5 class="font-medium text-purple-800 mb-2">Head B: Semantics</h5><p class="text-sm text-gray-700">Learns meaning relationships — &quot;king&quot; relates to &quot;queen&quot;, &quot;Paris&quot; relates to &quot;France&quot;.</p></div><div class="p-4 bg-blue-50 rounded-lg"><h5 class="font-medium text-blue-800 mb-2">Head C: Long-Range</h5><p class="text-sm text-gray-700">Learns connections across long distances in text — a character introduced in paragraph 1 mentioned again in paragraph 5.</p></div><div class="p-4 bg-orange-50 rounded-lg"><h5 class="font-medium text-orange-800 mb-2">Head D: Context</h5><p class="text-sm text-gray-700">Learns task-specific patterns — in code, variable definitions match with usages.</p></div></div><div class="bg-gray-50 p-4 rounded-xl mb-6"><h4 class="font-medium mb-3">Visual: Attention Pattern</h4><div class="bg-white p-4 rounded border"><p class="mb-4 font-mono text-sm">&quot;The animal didn&#39;t cross the street because it was too tired.&quot;</p><p class="text-sm text-gray-700 mb-2"> When processing &quot;it&quot;, the model&#39;s attention might look like: </p><div class="flex flex-wrap gap-2"><span class="px-2 py-1 bg-red-100 rounded text-xs">The (5%)</span><span class="px-2 py-1 bg-green-200 rounded text-xs font-bold">animal (75%)</span><span class="px-2 py-1 bg-red-100 rounded text-xs">didn&#39;t (2%)</span><span class="px-2 py-1 bg-red-100 rounded text-xs">cross (3%)</span><span class="px-2 py-1 bg-red-100 rounded text-xs">the (5%)</span><span class="px-2 py-1 bg-yellow-200 rounded text-xs">street (5%)</span><span class="px-2 py-1 bg-red-100 rounded text-xs">because (2%)</span><span class="px-2 py-1 bg-yellow-200 rounded text-xs">tired (3%)</span></div><p class="text-xs text-gray-500 mt-3"> The model learns &quot;it&quot; refers to &quot;animal&quot; (75%), with some attention to &quot;tired&quot; (3%) to understand context. </p></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">The Transformer Stack</h3><p class="text-gray-700 mb-4 leading-relaxed"> A modern LLM like GPT-4 or Llama has dozens of these attention layers stacked on top of each other. Each layer refines the understanding: </p><div class="bg-gray-50 p-4 rounded-xl"><div class="space-y-3"><div class="flex items-center gap-3"><div class="w-24 text-right text-xs text-gray-500">Layer 1</div><div class="flex-1 h-8 bg-blue-200 rounded flex items-center px-3"><span class="text-xs">Basic word relationships, syntax</span></div></div><div class="flex items-center gap-3"><div class="w-24 text-right text-xs text-gray-500">Layer 12</div><div class="flex-1 h-8 bg-blue-300 rounded flex items-center px-3"><span class="text-xs">Phrase meanings, local context</span></div></div><div class="flex items-center gap-3"><div class="w-24 text-right text-xs text-gray-500">Layer 24</div><div class="flex-1 h-8 bg-blue-400 rounded flex items-center px-3"><span class="text-xs text-white">Sentences, reasoning steps</span></div></div><div class="flex items-center gap-3"><div class="w-24 text-right text-xs text-gray-500">Layer 48</div><div class="flex-1 h-8 bg-blue-600 rounded flex items-center px-3"><span class="text-xs text-white">High-level concepts, document structure</span></div></div><div class="flex items-center gap-3"><div class="w-24 text-right text-xs text-gray-500">Final</div><div class="flex-1 h-8 bg-green-500 rounded flex items-center px-3"><span class="text-xs text-white">Next token prediction</span></div></div></div><p class="text-xs text-gray-500 mt-4"> Lower layers handle local patterns (words, grammar). Higher layers handle global patterns (meaning, reasoning, context). </p></div></div></section><section id="training-process" class="mb-12"><div class="flex items-center gap-3 mb-6"><span class="text-3xl">5️⃣</span><h2 class="text-2xl font-bold">How Training Actually Works</h2></div><div class="card"><h3 class="text-lg font-semibold mb-4 text-blue-700">From Random to Brilliant</h3><p class="text-gray-700 mb-4 leading-relaxed"> Training an LLM is like teaching a student who starts knowing nothing. You show them examples, correct their mistakes, and gradually they improve. </p><div class="bg-gray-50 p-6 rounded-xl mb-6"><h4 class="font-medium mb-4">The Training Loop</h4><div class="space-y-4"><div class="flex items-start gap-3"><div class="w-8 h-8 bg-blue-500 rounded-full flex items-center justify-center text-white font-bold text-sm flex-shrink-0">1</div><div class="flex-1"><h5 class="font-medium">Feed Input</h5><p class="text-sm text-gray-700"> Give the model a sequence: &quot;The capital of France is&quot; </p></div></div><div class="flex items-start gap-3"><div class="w-8 h-8 bg-blue-500 rounded-full flex items-center justify-center text-white font-bold text-sm flex-shrink-0">2</div><div class="flex-1"><h5 class="font-medium">Make Prediction</h5><p class="text-sm text-gray-700"> Model runs through layers and guesses: &quot;Paris&quot; (or maybe &quot;London&quot; if it&#39;s early in training) </p></div></div><div class="flex items-start gap-3"><div class="w-8 h-8 bg-blue-500 rounded-full flex items-center justify-center text-white font-bold text-sm flex-shrink-0">3</div><div class="flex-1"><h5 class="font-medium">Compare to Truth</h5><p class="text-sm text-gray-700"> We know the answer should be &quot;Paris&quot;. Calculate how wrong the model was. </p></div></div><div class="flex items-start gap-3"><div class="w-8 h-8 bg-blue-500 rounded-full flex items-center justify-center text-white font-bold text-sm flex-shrink-0">4</div><div class="flex-1"><h5 class="font-medium">Adjust Weights</h5><p class="text-sm text-gray-700"> Use calculus (backpropagation) to figure out which weights to tweak so the model does better next time. </p></div></div><div class="flex items-start gap-3"><div class="w-8 h-8 bg-blue-500 rounded-full flex items-center justify-center text-white font-bold text-sm flex-shrink-0">5</div><div class="flex-1"><h5 class="font-medium">Repeat Billions of Times</h5><p class="text-sm text-gray-700"> Do this for trillions of tokens across the entire internet. Gradually, the model gets better. </p></div></div></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">Key Concepts</h3><div class="grid grid-cols-1 md:grid-cols-2 gap-4 mb-6"><div class="p-4 bg-blue-50 rounded-lg"><h5 class="font-medium text-blue-800 mb-2">Loss Function</h5><p class="text-sm text-gray-700"> A mathematical measure of &quot;how wrong&quot; the model was. Lower loss = better predictions. Training tries to minimize this. </p></div><div class="p-4 bg-green-50 rounded-lg"><h5 class="font-medium text-green-800 mb-2">Learning Rate</h5><p class="text-sm text-gray-700"> How big of adjustments to make. Too big = unstable. Too small = slow. Like turning the steering wheel when driving. </p></div><div class="p-4 bg-purple-50 rounded-lg"><h5 class="font-medium text-purple-800 mb-2">Epochs</h5><p class="text-sm text-gray-700"> How many times the model sees the entire dataset. More epochs = more learning, but too many = overfitting. </p></div><div class="p-4 bg-orange-50 rounded-lg"><h5 class="font-medium text-orange-800 mb-2">Batch Size</h5><p class="text-sm text-gray-700"> How many examples to process before updating weights. Larger batches = more stable but need more memory. </p></div></div><div class="bg-yellow-50 p-4 rounded-lg"><h4 class="font-medium text-yellow-800 mb-2">⚠️ Why This Takes So Long</h4><p class="text-sm text-gray-700"> GPT-3 was trained on ~500 billion tokens. That&#39;s like reading the entire written works of humanity hundreds of times. Each token requires running billions of mathematical operations through the network. Even with thousands of GPUs, this takes weeks or months. That&#39;s why pre-trained models are so valuable — you&#39;re leveraging weeks of computation! </p></div></div></section><section id="fine-tuning-deep" class="mb-12"><div class="flex items-center gap-3 mb-6"><span class="text-3xl">6️⃣</span><h2 class="text-2xl font-bold">Fine-Tuning: Teaching the Specialist</h2></div><div class="card"><h3 class="text-lg font-semibold mb-4 text-blue-700">Why Fine-Tune?</h3><p class="text-gray-700 mb-4 leading-relaxed"> A pre-trained model knows general language and facts, but it doesn&#39;t know YOUR specific domain. Fine-tuning is like giving it specialized training. </p><div class="bg-gray-50 p-6 rounded-xl mb-6"><h4 class="font-medium mb-4">The Analogy: Medical School</h4><div class="space-y-3"><div class="flex items-start gap-3"><span class="text-2xl">📚</span><div><h5 class="font-medium">Pre-training = College</h5><p class="text-sm text-gray-700"> The model learns general knowledge, critical thinking, and how to communicate. Like a college graduate who knows a bit about everything. </p></div></div><div class="text-center text-2xl">↓</div><div class="flex items-start gap-3"><span class="text-2xl">🎓</span><div><h5 class="font-medium">Fine-tuning = Medical School</h5><p class="text-sm text-gray-700"> Now you give them specialized training. Thousands of examples of medical cases, diagnoses, patient interactions. They become a doctor. </p></div></div></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">How Fine-Tuning Works</h3><p class="text-gray-700 mb-4 leading-relaxed"> Instead of training from scratch (which takes weeks and costs millions), you start with a pre-trained model and continue training on your specific dataset. This is much faster because: </p><ul class="list-disc list-inside text-gray-700 space-y-2 mb-6 ml-4"><li>The model already knows language, grammar, and general facts</li><li>You only need to teach it your specific domain</li><li>Training takes hours or days, not weeks</li><li>Costs hundreds of dollars, not millions</li><li>Needs hundreds or thousands of examples, not billions</li></ul><div class="bg-green-50 p-4 rounded-lg mb-6"><h4 class="font-medium text-green-800 mb-2">✨ What Changes During Fine-Tuning?</h4><p class="text-sm text-gray-700 mb-3"> The model&#39;s weights adjust to better predict your specific examples. It learns: </p><ul class="list-disc list-inside text-sm text-gray-700 space-y-1 ml-4"><li>Your terminology and jargon</li><li>Your preferred response style and tone</li><li>Patterns specific to your domain</li><li>How to format responses the way you want</li><li>Your specific knowledge base</li></ul></div><div class="bg-blue-50 p-4 rounded-lg"><h4 class="font-medium text-blue-800 mb-2">🎯 Why Your Dataset Matters</h4><p class="text-sm text-gray-700"> Every example in your dataset is teaching the model: &quot;When you see this kind of input, produce this kind of output.&quot; The quality and diversity of your examples directly determines the quality of your fine-tuned model. That&#39;s why edukaAI focuses so much on helping you create great examples — they become the training signal that shapes your AI&#39;s behavior. </p></div></div></section><section id="model-sizes" class="mb-12"><div class="flex items-center gap-3 mb-6"><span class="text-3xl">7️⃣</span><h2 class="text-2xl font-bold">Understanding Model Sizes</h2></div><div class="card"><h3 class="text-lg font-semibold mb-4 text-blue-700">What Does &quot;7B&quot; Mean?</h3><p class="text-gray-700 mb-4 leading-relaxed"> When you see &quot;Llama 2 7B&quot; or &quot;GPT-3 175B&quot;, the &quot;B&quot; stands for <strong>billion parameters</strong>. Think of parameters as the &quot;knobs&quot; or &quot;dials&quot; inside the neural network that get adjusted during training. More parameters = more capacity to learn, but also more compute needed. </p><div class="bg-gray-50 p-6 rounded-xl mb-6"><h4 class="font-medium mb-4">The Parameter Scale</h4><div class="space-y-4"><div class="flex items-center gap-4"><div class="w-32 text-right"><span class="text-2xl">🌱</span></div><div class="flex-1 p-3 bg-green-100 rounded-lg"><h5 class="font-medium">Small (1B - 7B)</h5><p class="text-sm text-gray-700"><strong>Examples:</strong> TinyLlama, Phi-2, Llama 2 7B<br><strong>Good for:</strong> Testing, edge devices, simple tasks<br><strong>Hardware:</strong> Runs on consumer GPUs (RTX 3060)<br><strong>Speed:</strong> Very fast, low latency </p></div></div><div class="flex items-center gap-4"><div class="w-32 text-right"><span class="text-2xl">🌳</span></div><div class="flex-1 p-3 bg-blue-100 rounded-lg"><h5 class="font-medium">Medium (13B - 30B)</h5><p class="text-sm text-gray-700"><strong>Examples:</strong> Llama 2 13B, CodeLlama 13B, Mistral 7B (punches above its weight!)<br><strong>Good for:</strong> Production use, most practical applications<br><strong>Hardware:</strong> RTX 3090, RTX 4090, or cloud A10G<br><strong>Speed:</strong> Good balance of quality and speed </p></div></div><div class="flex items-center gap-4"><div class="w-32 text-right"><span class="text-2xl">🏔️</span></div><div class="flex-1 p-3 bg-purple-100 rounded-lg"><h5 class="font-medium">Large (70B - 175B)</h5><p class="text-sm text-gray-700"><strong>Examples:</strong> Llama 2 70B, GPT-3, Claude 2<br><strong>Good for:</strong> Complex reasoning, research, maximum capability<br><strong>Hardware:</strong> Multiple GPUs, A100s, or API access only<br><strong>Speed:</strong> Slower but smartest </p></div></div></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">Bigger Isn&#39;t Always Better</h3><p class="text-gray-700 mb-4 leading-relaxed"> It&#39;s tempting to think &quot;bigger model = better,&quot; but that&#39;s not always true. A well-trained 13B model can outperform a poorly-trained 70B model on specific tasks. Plus, bigger models have downsides: </p><div class="grid grid-cols-1 md:grid-cols-2 gap-4 mb-6"><div class="p-4 bg-red-50 rounded-lg border border-red-200"><h5 class="font-medium text-red-800 mb-2">❌ Large Model Problems</h5><ul class="text-sm text-gray-700 space-y-1"><li>Higher inference costs (more $ per request)</li><li>Slower responses</li><li>Requires expensive hardware</li><li>Higher energy consumption</li><li>Harder to deploy on edge devices</li></ul></div><div class="p-4 bg-green-50 rounded-lg border border-green-200"><h5 class="font-medium text-green-800 mb-2">✅ Right-Size Benefits</h5><ul class="text-sm text-gray-700 space-y-1"><li>Faster responses = better UX</li><li>Lower costs = scalable</li><li>Runs on affordable hardware</li><li>Easier to fine-tune</li><li>Can deploy anywhere</li></ul></div></div><div class="bg-yellow-50 p-4 rounded-lg"><h4 class="font-medium text-yellow-800 mb-2">💡 The Sweet Spot for Beginners</h4><p class="text-sm text-yellow-700"> For your first fine-tuning project, we recommend starting with <strong>7B-13B models</strong>. They&#39;re big enough to learn your domain well, small enough to train affordably, and can run on consumer hardware. Once you master these, you can experiment with larger models. </p></div></div></section><section id="quantization" class="mb-12"><div class="flex items-center gap-3 mb-6"><span class="text-3xl">8️⃣</span><h2 class="text-2xl font-bold">Quantization: Making Models Smaller</h2></div><div class="card"><h3 class="text-lg font-semibold mb-4 text-blue-700">The Magic of Model Compression</h3><p class="text-gray-700 mb-4 leading-relaxed"> Remember those billions of parameters? Each one is stored as a number (usually 16 or 32 bits). <strong>Quantization</strong> is a technique that reduces the precision of these numbers, making the model smaller and faster while keeping most of its intelligence. Think of it like compressing an MP3 — smaller file, same song. </p><div class="bg-gray-50 p-6 rounded-xl mb-6"><h4 class="font-medium mb-4">How It Works (The Simple Version)</h4><div class="space-y-4"><div class="p-4 bg-white rounded-lg border"><h5 class="font-medium mb-2">Normal (FP16) - 16-bit precision</h5><p class="font-mono text-xs text-gray-600 mb-2"> Weight value: 0.3847265849234712 </p><p class="text-sm text-gray-700"> Very precise, but takes 16 bits to store. A 7B model needs ~14GB RAM. </p></div><div class="text-center text-2xl">↓ Quantize ↓</div><div class="p-4 bg-green-50 rounded-lg border border-green-200"><h5 class="font-medium text-green-800 mb-2">Quantized (INT8) - 8-bit precision</h5><p class="font-mono text-xs text-gray-600 mb-2"> Weight value: 0.38 </p><p class="text-sm text-gray-700"> Less precise, but only 8 bits. Same 7B model now needs ~7GB RAM — <strong>half the size!</strong></p></div><div class="text-center text-2xl">↓ Quantize More ↓</div><div class="p-4 bg-blue-50 rounded-lg border border-blue-200"><h5 class="font-medium text-blue-800 mb-2">Highly Quantized (INT4) - 4-bit precision</h5><p class="font-mono text-xs text-gray-600 mb-2"> Weight value: 0.4 </p><p class="text-sm text-gray-700"> Even less precise, only 4 bits. Same 7B model now needs ~3.5GB RAM — <strong>quarter the size!</strong></p></div></div></div><h3 class="text-lg font-semibold mb-4 text-blue-700">Common Quantization Formats</h3><div class="overflow-x-auto"><table class="w-full text-sm mb-6"><thead><tr class="border-b"><th class="text-left py-2">Format</th><th class="text-left py-2">Bits</th><th class="text-left py-2">Size (7B model)</th><th class="text-left py-2">Quality Loss</th><th class="text-left py-2">Use Case</th></tr></thead><tbody><tr class="border-b"><td class="py-2 font-medium">FP16</td><td class="py-2">16</td><td class="py-2">~14 GB</td><td class="py-2 text-green-600">None</td><td class="py-2">Training, max quality</td></tr><tr class="border-b"><td class="py-2 font-medium">INT8</td><td class="py-2">8</td><td class="py-2">~7 GB</td><td class="py-2 text-yellow-600">Minimal</td><td class="py-2">Production inference</td></tr><tr class="border-b"><td class="py-2 font-medium">INT4 (Q4)</td><td class="py-2">4</td><td class="py-2">~3.5 GB</td><td class="py-2 text-orange-600">Small</td><td class="py-2">Consumer hardware</td></tr><tr><td class="py-2 font-medium">INT4 (Q2/Q3)</td><td class="py-2">2-3</td><td class="py-2">~2-2.5 GB</td><td class="py-2 text-red-600">Noticeable</td><td class="py-2">Edge devices, testing</td></tr></tbody></table></div><h3 class="text-lg font-semibold mb-4 text-blue-700">When to Use Quantization</h3><div class="grid grid-cols-1 md:grid-cols-2 gap-4 mb-6"><div class="p-4 bg-green-50 rounded-lg"><h5 class="font-medium text-green-800 mb-2">✅ Quantize When:</h5><ul class="text-sm text-gray-700 space-y-1"><li>Running inference (generating responses)</li><li>Deploying to consumer hardware</li><li>API cost reduction is important</li><li>Mobile/edge device deployment</li><li>Speed is critical</li></ul></div><div class="p-4 bg-yellow-50 rounded-lg"><h5 class="font-medium text-yellow-800 mb-2">⚠️ Don&#39;t Quantize When:</h5><ul class="text-sm text-gray-700 space-y-1"><li>Training/fine-tuning (use FP16)</li><li>Maximum accuracy is required</li><li>Complex reasoning tasks</li><li>Medical/legal applications</li><li>You have plenty of GPU memory</li></ul></div></div><div class="bg-purple-50 p-4 rounded-lg mb-6"><h4 class="font-medium text-purple-800 mb-2">🎯 Practical Example</h4><p class="text-sm text-gray-700 mb-3"> Let&#39;s say you want to run Llama 2 13B on your laptop: </p><div class="space-y-2 text-sm"><div class="flex justify-between items-center p-2 bg-white rounded"><span>Full precision (FP16):</span><span class="font-mono text-red-600">~26 GB RAM ❌</span></div><div class="flex justify-between items-center p-2 bg-white rounded"><span>8-bit quantized:</span><span class="font-mono text-yellow-600">~13 GB RAM ⚠️</span></div><div class="flex justify-between items-center p-2 bg-white rounded"><span>4-bit quantized (Q4):</span><span class="font-mono text-green-600">~6.5 GB RAM ✅</span></div></div><p class="text-sm text-gray-700 mt-3"><strong>Result:</strong> By quantizing to 4-bit, you can run a 13B model on a laptop with 8GB VRAM (like an RTX 3070) with minimal quality loss! </p></div><div class="bg-blue-50 p-4 rounded-lg"><h4 class="font-medium text-blue-800 mb-2">🔧 Tools for Quantization</h4><p class="text-sm text-gray-700 mb-2"> Popular tools for quantizing models: </p><ul class="text-sm text-gray-700 space-y-1"><li><strong>llama.cpp</strong> — Most popular, supports GGUF format</li><li><strong>AutoGPTQ</strong> — Easy quantization for HuggingFace models</li><li><strong>BitsAndBytes</strong> — 8-bit quantization for training</li><li><strong>ExLlama</strong> — Fast inference for 4-bit models</li></ul><p class="text-sm text-gray-700 mt-2"><strong>Good news:</strong> Many pre-quantized models are already available on HuggingFace — just download and use! </p></div></div></section><div class="card bg-gradient-to-br from-blue-50 to-purple-50 mb-12"><h2 class="text-xl font-semibold mb-4">🎓 What You Now Understand</h2><div class="grid grid-cols-1 md:grid-cols-2 gap-4 text-sm"><div class="p-3 bg-white rounded-lg"><strong>LLMs are next-token predictors</strong><p class="text-gray-600 mt-1">They predict one token at a time, feeding predictions back as input.</p></div><div class="p-3 bg-white rounded-lg"><strong>Tokens are the building blocks</strong><p class="text-gray-600 mt-1">Not words or characters, but pieces somewhere in between.</p></div><div class="p-3 bg-white rounded-lg"><strong>Neural networks learn patterns</strong><p class="text-gray-600 mt-1">They adjust millions of weights to get better at predictions.</p></div><div class="p-3 bg-white rounded-lg"><strong>Attention finds relationships</strong><p class="text-gray-600 mt-1">Lets tokens focus on other relevant tokens in the context.</p></div><div class="p-3 bg-white rounded-lg"><strong>Training is iterative correction</strong><p class="text-gray-600 mt-1">Show example, predict, compare to truth, adjust, repeat billions of times.</p></div><div class="p-3 bg-white rounded-lg"><strong>Fine-tuning specializes the model</strong><p class="text-gray-600 mt-1">Start with general knowledge, train on your specific examples.</p></div><div class="p-3 bg-white rounded-lg"><strong>Model size matters (but not too much)</strong><p class="text-gray-600 mt-1">7B-13B is the sweet spot. Bigger = smarter but slower and costlier.</p></div><div class="p-3 bg-white rounded-lg"><strong>Quantization makes models practical</strong><p class="text-gray-600 mt-1">Compress 16-bit weights to 4-bit. Run big models on consumer hardware!</p></div></div></div><div class="text-center"><h2 class="text-2xl font-bold mb-4">Ready to Apply This Knowledge?</h2><p class="text-gray-600 mb-6">Now you understand how LLMs work. Time to build your dataset!</p><div class="flex justify-center gap-4">`);
49
+ _push(ssrRenderComponent(_component_NuxtLink, {
50
+ to: "/examples/new",
51
+ class: "btn-primary"
52
+ }, {
53
+ default: withCtx((_, _push2, _parent2, _scopeId) => {
54
+ if (_push2) {
55
+ _push2(` Create Your First Example `);
56
+ } else {
57
+ return [
58
+ createTextVNode(" Create Your First Example ")
59
+ ];
60
+ }
61
+ }),
62
+ _: 1
63
+ }, _parent));
64
+ _push(ssrRenderComponent(_component_NuxtLink, {
65
+ to: "/help",
66
+ class: "btn-secondary"
67
+ }, {
68
+ default: withCtx((_, _push2, _parent2, _scopeId) => {
69
+ if (_push2) {
70
+ _push2(` Back to Guide `);
71
+ } else {
72
+ return [
73
+ createTextVNode(" Back to Guide ")
74
+ ];
75
+ }
76
+ }),
77
+ _: 1
78
+ }, _parent));
79
+ _push(`</div></div><div class="mt-12 pt-8 border-t text-sm text-gray-500"><h3 class="font-semibold mb-2">Further Reading</h3><ul class="space-y-1"><li><a href="https://arxiv.org/abs/1706.03762" target="_blank" class="text-blue-600 hover:underline"> &quot;Attention Is All You Need&quot; — Vaswani et al. (2017) </a><span class="text-gray-400">— The original transformer paper</span></li><li><a href="https://jalammar.github.io/illustrated-transformer/" target="_blank" class="text-blue-600 hover:underline"> The Illustrated Transformer — Jay Alammar </a><span class="text-gray-400">— Visual guide to how transformers work</span></li><li><a href="https://d2l.ai/chapter_attention-mechanisms/index.html" target="_blank" class="text-blue-600 hover:underline"> Dive into Deep Learning — Attention Mechanisms </a><span class="text-gray-400">— Technical deep dive</span></li></ul></div></div>`);
80
+ };
81
+ }
82
+ };
83
+ const _sfc_setup = _sfc_main.setup;
84
+ _sfc_main.setup = (props, ctx) => {
85
+ const ssrContext = useSSRContext();
86
+ (ssrContext.modules || (ssrContext.modules = /* @__PURE__ */ new Set())).add("pages/help/llm-training.vue");
87
+ return _sfc_setup ? _sfc_setup(props, ctx) : void 0;
88
+ };
89
+
90
+ export { _sfc_main as default };
91
+ //# sourceMappingURL=llm-training-DIqc0eiM.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm-training-DIqc0eiM.mjs","sources":["../../../../node_modules/.cache/nuxt/.nuxt/dist/server/_nuxt/llm-training-DIqc0eiM.js"],"names":[],"mappings":"","x_google_ignoreList":[0]}
@@ -0,0 +1,92 @@
1
+ import { _ as __nuxt_component_1 } from './ExampleForm-Bcpl0CfL.mjs';
2
+ import { mergeProps, useSSRContext } from 'vue';
3
+ import { ssrRenderAttrs, ssrRenderComponent } from 'vue/server-renderer';
4
+ import { a as useRouter } from './server.mjs';
5
+ import 'vue-router';
6
+ import '../nitro/nitro.mjs';
7
+ import 'node:http';
8
+ import 'node:https';
9
+ import 'node:events';
10
+ import 'node:buffer';
11
+ import 'node:fs';
12
+ import 'node:path';
13
+ import 'node:crypto';
14
+ import 'node:url';
15
+ import '@iconify/utils';
16
+ import 'consola';
17
+ import 'pinia';
18
+ import 'tailwindcss/colors';
19
+ import '@iconify/vue';
20
+ import '../routes/renderer.mjs';
21
+ import 'vue-bundle-renderer/runtime';
22
+ import 'unhead/server';
23
+ import 'devalue';
24
+ import 'unhead/utils';
25
+
26
+ const _sfc_main = {
27
+ __name: "new",
28
+ __ssrInlineRender: true,
29
+ setup(__props) {
30
+ const router = useRouter();
31
+ const handleSubmit = async (formData) => {
32
+ try {
33
+ const response = await $fetch("/api/examples", {
34
+ method: "POST",
35
+ body: {
36
+ ...formData,
37
+ status: "approved"
38
+ }
39
+ });
40
+ if (response.success) {
41
+ alert("Example created successfully!");
42
+ router.push("/examples");
43
+ }
44
+ } catch (error) {
45
+ console.error("Error creating example:", error);
46
+ alert("Failed to create example. Please try again.");
47
+ }
48
+ };
49
+ const handleSaveDraft = async (formData) => {
50
+ try {
51
+ const response = await $fetch("/api/examples", {
52
+ method: "POST",
53
+ body: {
54
+ ...formData,
55
+ status: "draft"
56
+ }
57
+ });
58
+ if (response.success) {
59
+ alert("Draft saved!");
60
+ router.push("/examples");
61
+ }
62
+ } catch (error) {
63
+ console.error("Error saving draft:", error);
64
+ alert("Failed to save draft. Please try again.");
65
+ }
66
+ };
67
+ const handleCancel = () => {
68
+ if (confirm("Are you sure you want to cancel? Any unsaved changes will be lost.")) {
69
+ router.push("/examples");
70
+ }
71
+ };
72
+ return (_ctx, _push, _parent, _attrs) => {
73
+ const _component_ExampleForm = __nuxt_component_1;
74
+ _push(`<div${ssrRenderAttrs(mergeProps({ class: "max-w-4xl mx-auto" }, _attrs))}><div class="mb-6"><h1 class="text-2xl font-bold mb-2">Create New Training Example</h1><p class="text-gray-600">Add a new example to your dataset. All fields help you build a better training dataset.</p></div>`);
75
+ _push(ssrRenderComponent(_component_ExampleForm, {
76
+ onSubmit: handleSubmit,
77
+ onSaveDraft: handleSaveDraft,
78
+ onCancel: handleCancel
79
+ }, null, _parent));
80
+ _push(`</div>`);
81
+ };
82
+ }
83
+ };
84
+ const _sfc_setup = _sfc_main.setup;
85
+ _sfc_main.setup = (props, ctx) => {
86
+ const ssrContext = useSSRContext();
87
+ (ssrContext.modules || (ssrContext.modules = /* @__PURE__ */ new Set())).add("pages/examples/new.vue");
88
+ return _sfc_setup ? _sfc_setup(props, ctx) : void 0;
89
+ };
90
+
91
+ export { _sfc_main as default };
92
+ //# sourceMappingURL=new-b3338aLF.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"new-b3338aLF.mjs","sources":["../../../../node_modules/.cache/nuxt/.nuxt/dist/server/_nuxt/new-b3338aLF.js"],"names":[],"mappings":"","x_google_ignoreList":[0]}
@@ -0,0 +1,290 @@
1
+ import { defineComponent, shallowRef, h, resolveComponent, computed } from 'vue';
2
+ import { z as parseQuery, o as hasProtocol, j as joinURL, A as withTrailingSlash, B as withoutTrailingSlash } from '../nitro/nitro.mjs';
3
+ import { a as useRouter, h as encodeRoutePath, r as resolveRouteObject, n as navigateTo, c as useNuxtApp, g as useRuntimeConfig, i as nuxtLinkDefaults } from './server.mjs';
4
+
5
+ const firstNonUndefined = (...args) => args.find((arg) => arg !== void 0);
6
+ // @__NO_SIDE_EFFECTS__
7
+ function defineNuxtLink(options) {
8
+ const componentName = options.componentName || "NuxtLink";
9
+ function isHashLinkWithoutHashMode(link) {
10
+ return typeof link === "string" && link.startsWith("#");
11
+ }
12
+ function resolveTrailingSlashBehavior(to, resolve, trailingSlash) {
13
+ const effectiveTrailingSlash = trailingSlash ?? options.trailingSlash;
14
+ if (!to || effectiveTrailingSlash !== "append" && effectiveTrailingSlash !== "remove") {
15
+ return to;
16
+ }
17
+ if (typeof to === "string") {
18
+ return applyTrailingSlashBehavior(to, effectiveTrailingSlash);
19
+ }
20
+ const path = "path" in to && to.path !== void 0 ? to.path : resolve(to).path;
21
+ const resolvedPath = {
22
+ ...to,
23
+ name: void 0,
24
+ // named routes would otherwise always override trailing slash behavior
25
+ path: applyTrailingSlashBehavior(path, effectiveTrailingSlash)
26
+ };
27
+ return resolvedPath;
28
+ }
29
+ function useNuxtLink(props) {
30
+ const router = useRouter();
31
+ const config = useRuntimeConfig();
32
+ const hasTarget = computed(() => !!props.target && props.target !== "_self");
33
+ const isAbsoluteUrl = computed(() => {
34
+ const path = props.to || props.href || "";
35
+ return typeof path === "string" && hasProtocol(path, { acceptRelative: true });
36
+ });
37
+ const builtinRouterLink = resolveComponent("RouterLink");
38
+ const useBuiltinLink = builtinRouterLink && typeof builtinRouterLink !== "string" ? builtinRouterLink.useLink : void 0;
39
+ const isExternal = computed(() => {
40
+ if (props.external) {
41
+ return true;
42
+ }
43
+ const path = props.to || props.href || "";
44
+ if (typeof path === "object") {
45
+ return false;
46
+ }
47
+ return path === "" || isAbsoluteUrl.value;
48
+ });
49
+ const to = computed(() => {
50
+ const path = props.to || props.href || "";
51
+ if (isExternal.value) {
52
+ return path;
53
+ }
54
+ return resolveTrailingSlashBehavior(path, router.resolve, props.trailingSlash);
55
+ });
56
+ const link = isExternal.value ? void 0 : useBuiltinLink?.({ ...props, to });
57
+ const href = computed(() => {
58
+ const effectiveTrailingSlash = props.trailingSlash ?? options.trailingSlash;
59
+ if (!to.value || isAbsoluteUrl.value || isHashLinkWithoutHashMode(to.value)) {
60
+ return to.value;
61
+ }
62
+ if (isExternal.value) {
63
+ const path = typeof to.value === "object" && "path" in to.value ? resolveRouteObject(to.value) : to.value;
64
+ const href2 = typeof path === "object" ? router.resolve(path).href : path;
65
+ return applyTrailingSlashBehavior(href2, effectiveTrailingSlash);
66
+ }
67
+ if (typeof to.value === "object") {
68
+ return router.resolve(to.value)?.href ?? null;
69
+ }
70
+ return applyTrailingSlashBehavior(joinURL(config.app.baseURL, to.value), effectiveTrailingSlash);
71
+ });
72
+ return {
73
+ to,
74
+ hasTarget,
75
+ isAbsoluteUrl,
76
+ isExternal,
77
+ //
78
+ href,
79
+ isActive: link?.isActive ?? computed(() => to.value === router.currentRoute.value.path),
80
+ isExactActive: link?.isExactActive ?? computed(() => to.value === router.currentRoute.value.path),
81
+ route: link?.route ?? computed(() => router.resolve(to.value)),
82
+ async navigate(_e) {
83
+ await navigateTo(href.value, { replace: props.replace, external: isExternal.value || hasTarget.value });
84
+ }
85
+ };
86
+ }
87
+ return defineComponent({
88
+ name: componentName,
89
+ props: {
90
+ // Routing
91
+ to: {
92
+ type: [String, Object],
93
+ default: void 0,
94
+ required: false
95
+ },
96
+ href: {
97
+ type: [String, Object],
98
+ default: void 0,
99
+ required: false
100
+ },
101
+ // Attributes
102
+ target: {
103
+ type: String,
104
+ default: void 0,
105
+ required: false
106
+ },
107
+ rel: {
108
+ type: String,
109
+ default: void 0,
110
+ required: false
111
+ },
112
+ noRel: {
113
+ type: Boolean,
114
+ default: void 0,
115
+ required: false
116
+ },
117
+ // Prefetching
118
+ prefetch: {
119
+ type: Boolean,
120
+ default: void 0,
121
+ required: false
122
+ },
123
+ prefetchOn: {
124
+ type: [String, Object],
125
+ default: void 0,
126
+ required: false
127
+ },
128
+ noPrefetch: {
129
+ type: Boolean,
130
+ default: void 0,
131
+ required: false
132
+ },
133
+ // Styling
134
+ activeClass: {
135
+ type: String,
136
+ default: void 0,
137
+ required: false
138
+ },
139
+ exactActiveClass: {
140
+ type: String,
141
+ default: void 0,
142
+ required: false
143
+ },
144
+ prefetchedClass: {
145
+ type: String,
146
+ default: void 0,
147
+ required: false
148
+ },
149
+ // Vue Router's `<RouterLink>` additional props
150
+ replace: {
151
+ type: Boolean,
152
+ default: void 0,
153
+ required: false
154
+ },
155
+ ariaCurrentValue: {
156
+ type: String,
157
+ default: void 0,
158
+ required: false
159
+ },
160
+ // Edge cases handling
161
+ external: {
162
+ type: Boolean,
163
+ default: void 0,
164
+ required: false
165
+ },
166
+ // Slot API
167
+ custom: {
168
+ type: Boolean,
169
+ default: void 0,
170
+ required: false
171
+ },
172
+ // Behavior
173
+ trailingSlash: {
174
+ type: String,
175
+ default: void 0,
176
+ required: false
177
+ }
178
+ },
179
+ useLink: useNuxtLink,
180
+ setup(props, { slots }) {
181
+ const router = useRouter();
182
+ const { to, href, navigate, isExternal, hasTarget, isAbsoluteUrl } = useNuxtLink(props);
183
+ shallowRef(false);
184
+ const el = void 0;
185
+ const elRef = void 0;
186
+ async function prefetch(nuxtApp = useNuxtApp()) {
187
+ {
188
+ return;
189
+ }
190
+ }
191
+ return () => {
192
+ if (!isExternal.value && !hasTarget.value && !isHashLinkWithoutHashMode(to.value)) {
193
+ const routerLinkProps = {
194
+ ref: elRef,
195
+ to: to.value,
196
+ activeClass: props.activeClass || options.activeClass,
197
+ exactActiveClass: props.exactActiveClass || options.exactActiveClass,
198
+ replace: props.replace,
199
+ ariaCurrentValue: props.ariaCurrentValue,
200
+ custom: props.custom
201
+ };
202
+ if (!props.custom) {
203
+ routerLinkProps.rel = props.rel || void 0;
204
+ }
205
+ return h(
206
+ resolveComponent("RouterLink"),
207
+ routerLinkProps,
208
+ slots.default
209
+ );
210
+ }
211
+ const target = props.target || null;
212
+ const rel = firstNonUndefined(
213
+ // converts `""` to `null` to prevent the attribute from being added as empty (`rel=""`)
214
+ props.noRel ? "" : props.rel,
215
+ options.externalRelAttribute,
216
+ /*
217
+ * A fallback rel of `noopener noreferrer` is applied for external links or links that open in a new tab.
218
+ * This solves a reverse tabnapping security flaw in browsers pre-2021 as well as improving privacy.
219
+ */
220
+ isAbsoluteUrl.value || hasTarget.value ? "noopener noreferrer" : ""
221
+ ) || null;
222
+ if (props.custom) {
223
+ if (!slots.default) {
224
+ return null;
225
+ }
226
+ return slots.default({
227
+ href: href.value,
228
+ navigate,
229
+ prefetch,
230
+ get route() {
231
+ if (!href.value) {
232
+ return void 0;
233
+ }
234
+ const url = new URL(href.value, "http://localhost");
235
+ return {
236
+ path: url.pathname,
237
+ fullPath: url.pathname,
238
+ get query() {
239
+ return parseQuery(url.search);
240
+ },
241
+ hash: url.hash,
242
+ params: {},
243
+ name: void 0,
244
+ matched: [],
245
+ redirectedFrom: void 0,
246
+ meta: {},
247
+ href: href.value
248
+ };
249
+ },
250
+ rel,
251
+ target,
252
+ isExternal: isExternal.value || hasTarget.value,
253
+ isActive: false,
254
+ isExactActive: false
255
+ });
256
+ }
257
+ return h("a", {
258
+ ref: el,
259
+ href: href.value || null,
260
+ // converts `""` to `null` to prevent the attribute from being added as empty (`href=""`)
261
+ rel,
262
+ target,
263
+ onClick: async (event) => {
264
+ if (isExternal.value || hasTarget.value) {
265
+ return;
266
+ }
267
+ event.preventDefault();
268
+ try {
269
+ const encodedHref = encodeRoutePath(href.value);
270
+ return await (props.replace ? router.replace(encodedHref) : router.push(encodedHref));
271
+ } finally {
272
+ }
273
+ }
274
+ }, slots.default?.());
275
+ };
276
+ }
277
+ });
278
+ }
279
+ const __nuxt_component_0 = /* @__PURE__ */ defineNuxtLink(nuxtLinkDefaults);
280
+ function applyTrailingSlashBehavior(to, trailingSlash) {
281
+ const normalizeFn = trailingSlash === "append" ? withTrailingSlash : withoutTrailingSlash;
282
+ const hasProtocolDifferentFromHttp = hasProtocol(to) && !to.startsWith("http");
283
+ if (hasProtocolDifferentFromHttp) {
284
+ return to;
285
+ }
286
+ return normalizeFn(to, true);
287
+ }
288
+
289
+ export { __nuxt_component_0 as _ };
290
+ //# sourceMappingURL=nuxt-link-Ceyd90PQ.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"nuxt-link-Ceyd90PQ.mjs","sources":["../../../../node_modules/.cache/nuxt/.nuxt/dist/server/_nuxt/nuxt-link-Ceyd90PQ.js"],"names":[],"mappings":"","x_google_ignoreList":[0]}