writer 0.8.3rc4__py3-none-any.whl → 1.25.1rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (378) hide show
  1. writer/__init__.py +1 -1
  2. writer/abstract.py +1 -1
  3. writer/{ai.py → ai/__init__.py} +867 -163
  4. writer/app_runner.py +596 -241
  5. writer/app_templates/default/.wf/components-blueprints_blueprint-0-0decp3w5erhvl0nw.jsonl +11 -0
  6. writer/app_templates/default/.wf/components-blueprints_root.jsonl +1 -0
  7. writer/app_templates/default/.wf/components-page-0-c0f99a9e-5004-4e75-a6c6-36f17490b134.jsonl +27 -0
  8. writer/app_templates/default/.wf/components-root.jsonl +1 -0
  9. writer/app_templates/default/.wf/components-workflows_root.jsonl +1 -0
  10. writer/app_templates/default/.wf/components-workflows_workflow-0-lfltcky7l1fsm6j2.jsonl +1 -0
  11. writer/app_templates/default/.wf/metadata.json +3 -0
  12. writer/app_templates/default/README.md +3 -0
  13. writer/app_templates/default/main.py +16 -0
  14. writer/app_templates/default/requirements.txt +1 -0
  15. writer/app_templates/default/static/README.md +8 -0
  16. writer/app_templates/default/static/agent_builder_demo.png +0 -0
  17. writer/app_templates/default/static/favicon.png +0 -0
  18. writer/app_templates/hello/.wf/components-blueprints_blueprint-0-t84xyhxau9ej3823.jsonl +18 -0
  19. writer/app_templates/hello/.wf/components-blueprints_root.jsonl +1 -0
  20. writer/app_templates/hello/.wf/components-page-0-c0f99a9e-5004-4e75-a6c6-36f17490b134.jsonl +15 -0
  21. writer/app_templates/hello/.wf/components-root.jsonl +1 -0
  22. writer/app_templates/hello/.wf/metadata.json +3 -0
  23. writer/app_templates/hello/main.py +16 -0
  24. writer/app_templates/hello/static/README.md +8 -0
  25. writer/app_templates/hello/static/favicon.png +0 -0
  26. writer/app_templates/hello/static/welcome.svg +40 -0
  27. writer/auth.py +7 -2
  28. writer/autogen.py +352 -0
  29. writer/blocks/__init__.py +51 -17
  30. writer/blocks/addtostatelist.py +10 -9
  31. writer/blocks/apitrigger.py +45 -0
  32. writer/blocks/base_block.py +332 -21
  33. writer/blocks/base_trigger.py +14 -0
  34. writer/blocks/calleventhandler.py +39 -35
  35. writer/blocks/changepage.py +48 -0
  36. writer/blocks/code.py +102 -0
  37. writer/blocks/crontrigger.py +49 -0
  38. writer/blocks/foreach.py +70 -53
  39. writer/blocks/httprequest.py +112 -99
  40. writer/blocks/ifelse.py +71 -0
  41. writer/blocks/logmessage.py +34 -39
  42. writer/blocks/parsejson.py +30 -29
  43. writer/blocks/returnvalue.py +7 -7
  44. writer/blocks/runblueprint.py +63 -0
  45. writer/blocks/setstate.py +43 -33
  46. writer/blocks/sharedblueprint.py +86 -0
  47. writer/blocks/uieventtrigger.py +49 -0
  48. writer/blocks/writeraddchatmessage.py +50 -12
  49. writer/blocks/writeraddtokg.py +38 -11
  50. writer/blocks/writeraskkg.py +123 -0
  51. writer/blocks/writerchat.py +80 -61
  52. writer/blocks/writerchatreply.py +279 -0
  53. writer/blocks/writerchatreplywithtoolconfig.py +393 -0
  54. writer/blocks/writerclassification.py +78 -39
  55. writer/blocks/writercompletion.py +49 -44
  56. writer/blocks/writerfileapi.py +85 -0
  57. writer/blocks/writerinitchat.py +24 -12
  58. writer/blocks/writerkeyvaluestorage.py +106 -0
  59. writer/blocks/writernocodeapp.py +35 -37
  60. writer/blocks/writerparsepdf.py +73 -0
  61. writer/blocks/writerstructuredoutput.py +105 -0
  62. writer/blocks/writertoolcalling.py +251 -0
  63. writer/blocks/writervision.py +141 -0
  64. writer/blocks/writerwebsearch.py +175 -0
  65. writer/blueprints.py +839 -0
  66. writer/command_line.py +52 -16
  67. writer/core.py +562 -290
  68. writer/core_ui.py +6 -2
  69. writer/evaluator.py +98 -46
  70. writer/journal.py +227 -0
  71. writer/keyvalue_storage.py +93 -0
  72. writer/logs.py +277 -0
  73. writer/serve.py +625 -327
  74. writer/ss_types.py +101 -12
  75. writer/static/assets/Arrow.dom-GBJpMYQS.js +1 -0
  76. writer/static/assets/BaseMarkdown-Wrvby5J8.js +1 -0
  77. writer/static/assets/BlueprintToolbar-BuXNRxWT.js +1 -0
  78. writer/static/assets/BlueprintToolbar-wpfX0jo_.css +1 -0
  79. writer/static/assets/BuilderApp-PTOI76jZ.js +8 -0
  80. writer/static/assets/BuilderApp-WimUfNZr.css +1 -0
  81. writer/static/assets/BuilderApplicationSelect-DXzy4e_h.js +7 -0
  82. writer/static/assets/BuilderApplicationSelect-XaM1D5fv.css +1 -0
  83. writer/static/assets/BuilderBlueprintLibraryPanel-Ckrhknlh.css +1 -0
  84. writer/static/assets/BuilderBlueprintLibraryPanel-DBDzhTlc.js +1 -0
  85. writer/static/assets/BuilderEmbeddedCodeEditor-B0bcjlhk.css +1 -0
  86. writer/static/assets/BuilderEmbeddedCodeEditor-Dn7eDICN.js +726 -0
  87. writer/static/assets/BuilderGraphSelect-C-LRsO8W.js +7 -0
  88. writer/static/assets/BuilderGraphSelect-D7B61d5s.css +1 -0
  89. writer/static/assets/BuilderInsertionLabel-BhyL9wgn.js +1 -0
  90. writer/static/assets/BuilderInsertionLabel-_YS5WPfq.css +1 -0
  91. writer/static/assets/BuilderInsertionOverlay-D2XS0ij9.css +1 -0
  92. writer/static/assets/BuilderInsertionOverlay-MkAIVruY.js +1 -0
  93. writer/static/assets/BuilderJournal-A0LcEwGI.js +7 -0
  94. writer/static/assets/BuilderJournal-DHv3Pvvm.css +1 -0
  95. writer/static/assets/BuilderModelSelect-CdSo_sih.js +7 -0
  96. writer/static/assets/BuilderModelSelect-Dc4IPLp2.css +1 -0
  97. writer/static/assets/BuilderSettings-BDwZBveu.js +16 -0
  98. writer/static/assets/BuilderSettings-lZkOXEYw.css +1 -0
  99. writer/static/assets/BuilderSettingsArtifactAPITriggerDetails-3O6jKBXD.js +4 -0
  100. writer/static/assets/BuilderSettingsArtifactAPITriggerDetails-DnX66iRg.css +1 -0
  101. writer/static/assets/BuilderSettingsDeploySharedBlueprint-BR_3ptsd.js +1 -0
  102. writer/static/assets/BuilderSettingsDeploySharedBlueprint-KJTl8gxP.css +1 -0
  103. writer/static/assets/BuilderSettingsHandlers-CBtEQFSo.js +1 -0
  104. writer/static/assets/BuilderSettingsHandlers-DJPeASfz.css +1 -0
  105. writer/static/assets/BuilderSidebarComponentTree-DLltgas5.js +1 -0
  106. writer/static/assets/BuilderSidebarComponentTree-DYu1F793.css +1 -0
  107. writer/static/assets/BuilderSidebarToolkit-CApZNTAq.js +7 -0
  108. writer/static/assets/BuilderSidebarToolkit-CwqbjRv8.css +1 -0
  109. writer/static/assets/BuilderTemplateEditor-CYSDeWgV.css +1 -0
  110. writer/static/assets/BuilderTemplateEditor-DnRDRcA0.js +87 -0
  111. writer/static/assets/BuilderVault-2vGoV0sx.js +1 -0
  112. writer/static/assets/BuilderVault-Cx6oQSES.css +1 -0
  113. writer/static/assets/ComponentRenderer-72hqvEvI.css +1 -0
  114. writer/static/assets/ComponentRenderer-D4Pj1i3s.js +1 -0
  115. writer/static/assets/SharedCopyClipboardButton-BipJKGtz.css +1 -0
  116. writer/static/assets/SharedCopyClipboardButton-DNI9kLe6.js +1 -0
  117. writer/static/assets/WdsCheckbox-DKvpPA4D.css +1 -0
  118. writer/static/assets/WdsCheckbox-edQcn1cf.js +1 -0
  119. writer/static/assets/WdsDropdownMenu-CzzPN9Wg.css +1 -0
  120. writer/static/assets/WdsDropdownMenu-DQnrRBNV.js +1 -0
  121. writer/static/assets/WdsFieldWrapper-Cmufx5Nj.js +1 -0
  122. writer/static/assets/WdsFieldWrapper-CsemOh8D.css +1 -0
  123. writer/static/assets/WdsTabs-DKj7BqI0.css +1 -0
  124. writer/static/assets/WdsTabs-DcfY_zn5.js +1 -0
  125. writer/static/assets/abap-D8nrxEjS.js +6 -0
  126. writer/static/assets/apex-BrXDlLUW.js +6 -0
  127. writer/static/assets/art-paper-D70v1WMA.svg +180 -0
  128. writer/static/assets/azcli-CElzELwZ.js +6 -0
  129. writer/static/assets/bat-CUsyEhik.js +6 -0
  130. writer/static/assets/bicep-BtxyJn6H.js +7 -0
  131. writer/static/assets/cameligo-ClBCoF8h.js +6 -0
  132. writer/static/assets/clojure-B9TqLHAk.js +6 -0
  133. writer/static/assets/codicon-BA2IlpFX.ttf +0 -0
  134. writer/static/assets/coffee-DYsfeylR.js +6 -0
  135. writer/static/assets/cpp-VVGvvgir.js +6 -0
  136. writer/static/assets/csharp-Z6z2stHy.js +6 -0
  137. writer/static/assets/csp-DgZoLDI1.js +6 -0
  138. writer/static/assets/css-KqQ96-gC.js +8 -0
  139. writer/static/assets/css.worker-DvNUQFd1.js +84 -0
  140. writer/static/assets/cssMode-BYq4oZGq.js +9 -0
  141. writer/static/assets/cypher-CYoSlgTu.js +6 -0
  142. writer/static/assets/dart-BGDl7St1.js +6 -0
  143. writer/static/assets/dockerfile-CuCtxA7T.js +6 -0
  144. writer/static/assets/ecl-BCTFAUpS.js +6 -0
  145. writer/static/assets/editor.worker-BVwmgLrR.js +11 -0
  146. writer/static/assets/elixir-C7hRTYZ9.js +6 -0
  147. writer/static/assets/flow9-Bi_qi707.js +6 -0
  148. writer/static/assets/freemarker2-CnNourkO.js +8 -0
  149. writer/static/assets/fsharp-CxaaEKKi.js +6 -0
  150. writer/static/assets/go-DUImKuGY.js +6 -0
  151. writer/static/assets/graphql-D5sGVkLV.js +6 -0
  152. writer/static/assets/handlebars-Bm22yapJ.js +6 -0
  153. writer/static/assets/hcl-zD_CCkZ1.js +6 -0
  154. writer/static/assets/html-CAKAfoZF.js +6 -0
  155. writer/static/assets/html.worker-BJMlcbMU.js +458 -0
  156. writer/static/assets/htmlMode-BGZ97n-V.js +9 -0
  157. writer/static/assets/index-5u5REPT4.js +16 -0
  158. writer/static/assets/index-BKNuk68o.css +1 -0
  159. writer/static/assets/index-BQNXU3IR.js +17 -0
  160. writer/static/assets/index-BQr1pfrb.js +1 -0
  161. writer/static/assets/index-DHXAd5Yn.js +4 -0
  162. writer/static/assets/index-Zki-pfO-.js +8525 -0
  163. writer/static/assets/index.esm-B1ZQtduY.js +17 -0
  164. writer/static/assets/ini-8kKHd4ZL.js +6 -0
  165. writer/static/assets/java-De1axCfe.js +6 -0
  166. writer/static/assets/javascript-X1f02eyK.js +6 -0
  167. writer/static/assets/json.worker-BwvX8PuZ.js +42 -0
  168. writer/static/assets/jsonMode-hT0bNgT8.js +11 -0
  169. writer/static/assets/julia-D3ApGBxz.js +6 -0
  170. writer/static/assets/kotlin-GbSrCElU.js +6 -0
  171. writer/static/assets/less-DNUaDNdz.js +7 -0
  172. writer/static/assets/lexon-Bg9QKxBu.js +6 -0
  173. writer/static/assets/liquid-KmCCiJw2.js +6 -0
  174. writer/static/assets/lua-Crkvc3mc.js +6 -0
  175. writer/static/assets/m3-DsrzVyM1.js +6 -0
  176. writer/static/assets/mapbox-gl-C0cyFYYW.js +2329 -0
  177. writer/static/assets/markdown-CY5IOZuu.js +6 -0
  178. writer/static/assets/marked.esm-273vDTCT.js +45 -0
  179. writer/static/assets/mdx-DtRFauUw.js +6 -0
  180. writer/static/assets/mips-BE8RsGBA.js +6 -0
  181. writer/static/assets/msdax-N5ajIiFQ.js +6 -0
  182. writer/static/assets/mysql-DRxbB97D.js +6 -0
  183. writer/static/assets/objective-c-BHUZy23s.js +6 -0
  184. writer/static/assets/pascal-BemVzBTY.js +6 -0
  185. writer/static/assets/pascaligo-BACCcnx_.js +6 -0
  186. writer/static/assets/pdf-B6-yWJ-Y.js +12 -0
  187. writer/static/assets/pdf.worker.min-CyUfim15.mjs +21 -0
  188. writer/static/assets/perl-CuU66Ptk.js +6 -0
  189. writer/static/assets/pgsql-CQ6TMH2r.js +6 -0
  190. writer/static/assets/php-BvyzZa65.js +6 -0
  191. writer/static/assets/pla-DrIuu9u1.js +6 -0
  192. writer/static/assets/plotly.min-DutuuatZ.js +4030 -0
  193. writer/static/assets/poppins-latin-300-italic-4WBEAciR.woff +0 -0
  194. writer/static/assets/poppins-latin-300-italic-EWCPeN2Y.woff2 +0 -0
  195. writer/static/assets/poppins-latin-300-normal-DCNuMXUj.woff +0 -0
  196. writer/static/assets/poppins-latin-300-normal-Dku2WoCh.woff2 +0 -0
  197. writer/static/assets/poppins-latin-400-italic-B4GYq972.woff2 +0 -0
  198. writer/static/assets/poppins-latin-400-italic-BPejoDS-.woff +0 -0
  199. writer/static/assets/poppins-latin-400-normal-BOb3E3N0.woff +0 -0
  200. writer/static/assets/poppins-latin-400-normal-cpxAROuN.woff2 +0 -0
  201. writer/static/assets/poppins-latin-500-italic-Ce_qjtl5.woff +0 -0
  202. writer/static/assets/poppins-latin-500-italic-o28Otv0U.woff2 +0 -0
  203. writer/static/assets/poppins-latin-500-normal-C8OXljZJ.woff2 +0 -0
  204. writer/static/assets/poppins-latin-500-normal-DGXqpDMm.woff +0 -0
  205. writer/static/assets/poppins-latin-600-italic-BhOZippK.woff +0 -0
  206. writer/static/assets/poppins-latin-600-italic-CZ4wqKBi.woff2 +0 -0
  207. writer/static/assets/poppins-latin-600-normal-BJdTmd5m.woff +0 -0
  208. writer/static/assets/poppins-latin-600-normal-zEkxB9Mr.woff2 +0 -0
  209. writer/static/assets/poppins-latin-700-italic-CW91C-LJ.woff +0 -0
  210. writer/static/assets/poppins-latin-700-italic-RKf6esGj.woff2 +0 -0
  211. writer/static/assets/poppins-latin-700-normal-BVuQR_eA.woff +0 -0
  212. writer/static/assets/poppins-latin-700-normal-Qrb0O0WB.woff2 +0 -0
  213. writer/static/assets/poppins-latin-ext-300-italic-CBzyU4Pf.woff +0 -0
  214. writer/static/assets/poppins-latin-ext-300-italic-DdDvTq5-.woff2 +0 -0
  215. writer/static/assets/poppins-latin-ext-300-normal-7Zg2msWE.woff2 +0 -0
  216. writer/static/assets/poppins-latin-ext-300-normal-C9p7gvmA.woff +0 -0
  217. writer/static/assets/poppins-latin-ext-400-italic-BiCGV3eO.woff2 +0 -0
  218. writer/static/assets/poppins-latin-ext-400-italic-gsPYOGqV.woff +0 -0
  219. writer/static/assets/poppins-latin-ext-400-normal-CIpeJEZw.woff2 +0 -0
  220. writer/static/assets/poppins-latin-ext-400-normal-Ce_uWq1Z.woff +0 -0
  221. writer/static/assets/poppins-latin-ext-500-italic-CwrTHwbn.woff2 +0 -0
  222. writer/static/assets/poppins-latin-ext-500-italic-jdc8Bv4M.woff +0 -0
  223. writer/static/assets/poppins-latin-ext-500-normal-Bl1-S02S.woff +0 -0
  224. writer/static/assets/poppins-latin-ext-500-normal-H4Q0z8D2.woff2 +0 -0
  225. writer/static/assets/poppins-latin-ext-600-italic-BqeDa496.woff2 +0 -0
  226. writer/static/assets/poppins-latin-ext-600-italic-C7MQPb_A.woff +0 -0
  227. writer/static/assets/poppins-latin-ext-600-normal-Cn4C8475.woff2 +0 -0
  228. writer/static/assets/poppins-latin-ext-600-normal-DB6FJURc.woff +0 -0
  229. writer/static/assets/poppins-latin-ext-700-italic-BAdhB_WS.woff2 +0 -0
  230. writer/static/assets/poppins-latin-ext-700-italic-WKTwQMp8.woff +0 -0
  231. writer/static/assets/poppins-latin-ext-700-normal-CE2WFKmF.woff +0 -0
  232. writer/static/assets/poppins-latin-ext-700-normal-DDaViAzG.woff2 +0 -0
  233. writer/static/assets/postiats-BR_hrfni.js +6 -0
  234. writer/static/assets/powerquery-CKDUeRmd.js +6 -0
  235. writer/static/assets/powershell-Dsa4rhA_.js +6 -0
  236. writer/static/assets/protobuf-CGsvhooB.js +7 -0
  237. writer/static/assets/pug-D2p3uOX2.js +6 -0
  238. writer/static/assets/python-DVhxg746.js +6 -0
  239. writer/static/assets/qsharp-B7F3HtPF.js +6 -0
  240. writer/static/assets/r-3aLoi2fs.js +6 -0
  241. writer/static/assets/razor-DR5Ns_BC.js +6 -0
  242. writer/static/assets/redis-jqFeRM5s.js +6 -0
  243. writer/static/assets/redshift-BriwQgXR.js +6 -0
  244. writer/static/assets/restructuredtext-hbBFZ0w9.js +6 -0
  245. writer/static/assets/ruby-ByThyB2Q.js +6 -0
  246. writer/static/assets/rust-DIEZMp5R.js +6 -0
  247. writer/static/assets/sb-C6Gjjw_x.js +6 -0
  248. writer/static/assets/scala-DZNw3jJB.js +6 -0
  249. writer/static/assets/scheme-55eqh71t.js +6 -0
  250. writer/static/assets/scss-D-OVkc4F.js +8 -0
  251. writer/static/assets/serialization-DJC7NP0N.js +20 -0
  252. writer/static/assets/shell-DSpi8_qN.js +6 -0
  253. writer/static/assets/solidity-BHddiNFS.js +6 -0
  254. writer/static/assets/sophia-D6taVZFb.js +6 -0
  255. writer/static/assets/sparql-LA0C7mUc.js +6 -0
  256. writer/static/assets/sql-C3-3IcFM.js +6 -0
  257. writer/static/assets/st-C4g7059C.js +6 -0
  258. writer/static/assets/swift-DNI1vH3h.js +8 -0
  259. writer/static/assets/systemverilog-DL_FVbcQ.js +6 -0
  260. writer/static/assets/tcl-DVJXmIwd.js +6 -0
  261. writer/static/assets/ts.worker-CwG1rUES.js +37021 -0
  262. writer/static/assets/tsMode-BNUEZzir.js +16 -0
  263. writer/static/assets/twig-BVWDLtw5.js +6 -0
  264. writer/static/assets/typescript-CRVt7Hx0.js +6 -0
  265. writer/static/assets/useBlueprintRun-C00bCxh-.js +1 -0
  266. writer/static/assets/useKeyValueEditor-nDmI7cTJ.js +1 -0
  267. writer/static/assets/useListResources-DLkZhRSJ.js +1 -0
  268. writer/static/assets/vb-Btz91-7U.js +6 -0
  269. writer/static/assets/vega-embed.module-SNP5iNdJ.js +201 -0
  270. writer/static/assets/wgsl-D8V_buCG.js +303 -0
  271. writer/static/assets/xml-C_6-t1tb.js +6 -0
  272. writer/static/assets/yaml-DIw8G7jk.js +6 -0
  273. writer/static/components/annotatedtext.svg +4 -0
  274. writer/static/components/avatar.svg +4 -0
  275. writer/static/components/blueprints_addtostatelist.svg +4 -0
  276. writer/static/components/blueprints_apitrigger.svg +4 -0
  277. writer/static/components/blueprints_calleventhandler.svg +9 -0
  278. writer/static/components/blueprints_category_Logic.svg +4 -0
  279. writer/static/components/blueprints_category_Other.svg +4 -0
  280. writer/static/components/blueprints_category_Triggers.svg +4 -0
  281. writer/static/components/blueprints_category_Writer.svg +25 -0
  282. writer/static/components/blueprints_code.svg +9 -0
  283. writer/static/components/blueprints_crontrigger.svg +6 -0
  284. writer/static/components/blueprints_foreach.svg +4 -0
  285. writer/static/components/blueprints_httprequest.svg +11 -0
  286. writer/static/components/blueprints_logmessage.svg +11 -0
  287. writer/static/components/blueprints_parsejson.svg +4 -0
  288. writer/static/components/blueprints_returnvalue.svg +4 -0
  289. writer/static/components/blueprints_runblueprint.svg +4 -0
  290. writer/static/components/blueprints_setstate.svg +4 -0
  291. writer/static/components/blueprints_uieventtrigger.svg +4 -0
  292. writer/static/components/blueprints_writeraddchatmessage.svg +19 -0
  293. writer/static/components/blueprints_writeraddtokg.svg +19 -0
  294. writer/static/components/blueprints_writerchat.svg +11 -0
  295. writer/static/components/blueprints_writerchatreply.svg +19 -0
  296. writer/static/components/blueprints_writerclassification.svg +24 -0
  297. writer/static/components/blueprints_writercompletion.svg +14 -0
  298. writer/static/components/blueprints_writerinitchat.svg +11 -0
  299. writer/static/components/blueprints_writernocodeapp.svg +14 -0
  300. writer/static/components/button.svg +4 -0
  301. writer/static/components/category_Content.svg +4 -0
  302. writer/static/components/category_Embed.svg +4 -0
  303. writer/static/components/category_Input.svg +5 -0
  304. writer/static/components/category_Layout.svg +9 -0
  305. writer/static/components/category_Other.svg +4 -0
  306. writer/static/components/chatbot.svg +4 -0
  307. writer/static/components/checkboxinput.svg +4 -0
  308. writer/static/components/colorinput.svg +11 -0
  309. writer/static/components/column.svg +4 -0
  310. writer/static/components/columns.svg +4 -0
  311. writer/static/components/dataframe.svg +4 -0
  312. writer/static/components/dateinput.svg +4 -0
  313. writer/static/components/dropdowninput.svg +5 -0
  314. writer/static/components/fileinput.svg +4 -0
  315. writer/static/components/googlemaps.svg +4 -0
  316. writer/static/components/header.svg +4 -0
  317. writer/static/components/heading.svg +9 -0
  318. writer/static/components/horizontalstack.svg +4 -0
  319. writer/static/components/html.svg +9 -0
  320. writer/static/components/icon.svg +4 -0
  321. writer/static/components/iframe.svg +4 -0
  322. writer/static/components/image.svg +11 -0
  323. writer/static/components/jsonviewer.svg +4 -0
  324. writer/static/components/link.svg +12 -0
  325. writer/static/components/mapbox.svg +4 -0
  326. writer/static/components/message.svg +4 -0
  327. writer/static/components/metric.svg +4 -0
  328. writer/static/components/multiselectinput.svg +4 -0
  329. writer/static/components/numberinput.svg +4 -0
  330. writer/static/components/page.svg +50 -0
  331. writer/static/components/pagination.svg +4 -0
  332. writer/static/components/pdf.svg +4 -0
  333. writer/static/components/plotlygraph.svg +7 -0
  334. writer/static/components/progressbar.svg +5 -0
  335. writer/static/components/radioinput.svg +4 -0
  336. writer/static/components/rangeinput.svg +4 -0
  337. writer/static/components/ratinginput.svg +4 -0
  338. writer/static/components/repeater.svg +4 -0
  339. writer/static/components/reuse.svg +4 -0
  340. writer/static/components/section.svg +4 -0
  341. writer/static/components/selectinput.svg +5 -0
  342. writer/static/components/separator.svg +4 -0
  343. writer/static/components/sidebar.svg +4 -0
  344. writer/static/components/sliderinput.svg +4 -0
  345. writer/static/components/step.svg +4 -0
  346. writer/static/components/steps.svg +4 -0
  347. writer/static/components/switchinput.svg +4 -0
  348. writer/static/components/tab.svg +4 -0
  349. writer/static/components/tabs.svg +4 -0
  350. writer/static/components/tags.svg +11 -0
  351. writer/static/components/text.svg +4 -0
  352. writer/static/components/textareainput.svg +11 -0
  353. writer/static/components/textinput.svg +4 -0
  354. writer/static/components/timeinput.svg +4 -0
  355. writer/static/components/timer.svg +4 -0
  356. writer/static/components/vegalitechart.svg +7 -0
  357. writer/static/components/videoplayer.svg +11 -0
  358. writer/static/components/webcamcapture.svg +4 -0
  359. writer/static/favicon.png +0 -0
  360. writer/static/index.html +84 -0
  361. writer/static/status/cancelled.svg +5 -0
  362. writer/static/status/error.svg +5 -0
  363. writer/static/status/skipped.svg +4 -0
  364. writer/static/status/stopped.svg +4 -0
  365. writer/static/status/success.svg +4 -0
  366. writer/sync.py +431 -0
  367. writer/ui.py +2268 -0
  368. writer/vault.py +48 -0
  369. writer/wf_project.py +90 -66
  370. writer-1.25.1rc1.dist-info/METADATA +92 -0
  371. writer-1.25.1rc1.dist-info/RECORD +382 -0
  372. {writer-0.8.3rc4.dist-info → writer-1.25.1rc1.dist-info}/WHEEL +1 -1
  373. writer/blocks/runworkflow.py +0 -59
  374. writer/workflows.py +0 -183
  375. writer-0.8.3rc4.dist-info/METADATA +0 -117
  376. writer-0.8.3rc4.dist-info/RECORD +0 -44
  377. {writer-0.8.3rc4.dist-info → writer-1.25.1rc1.dist-info}/entry_points.txt +0 -0
  378. {writer-0.8.3rc4.dist-info → writer-1.25.1rc1.dist-info/licenses}/LICENSE.txt +0 -0
@@ -1,6 +1,8 @@
1
1
  import json
2
2
  import logging
3
+ from contextvars import ContextVar
3
4
  from datetime import datetime
5
+ from functools import wraps
4
6
  from typing import (
5
7
  Any,
6
8
  Callable,
@@ -12,40 +14,72 @@ from typing import (
12
14
  Optional,
13
15
  Set,
14
16
  TypedDict,
17
+ TypeVar,
15
18
  Union,
16
19
  cast,
17
20
  )
18
21
  from uuid import uuid4
19
22
 
20
23
  from httpx import Timeout
21
- from writerai import Writer
22
- from writerai._exceptions import WriterError
24
+ from writerai import DefaultHttpxClient, Writer
25
+ from writerai._exceptions import BadRequestError, WriterError
23
26
  from writerai._response import BinaryAPIResponse
24
27
  from writerai._streaming import Stream
25
28
  from writerai._types import Body, Headers, NotGiven, Query
26
29
  from writerai.resources import FilesResource, GraphsResource
27
30
  from writerai.types import (
28
- Chat,
31
+ ApplicationListResponse,
32
+ ApplicationRetrieveResponse,
33
+ ChatCompletion,
29
34
  Completion,
35
+ CompletionChunk,
30
36
  FileDeleteResponse,
31
37
  GraphDeleteResponse,
32
38
  GraphRemoveFileFromGraphResponse,
33
39
  GraphUpdateResponse,
34
- StreamingData,
35
40
  )
36
41
  from writerai.types import File as SDKFile
37
42
  from writerai.types import Graph as SDKGraph
38
43
  from writerai.types.application_generate_content_params import Input
39
- from writerai.types.chat import ChoiceMessage, ChoiceMessageGraphData, ChoiceMessageToolCall
44
+ from writerai.types.applications import (
45
+ ApplicationGenerateAsyncResponse,
46
+ JobCreateResponse,
47
+ JobRetryResponse,
48
+ )
49
+ from writerai.types.applications.application_graphs_response import ApplicationGraphsResponse
50
+ from writerai.types.chat_chat_params import (
51
+ GraphData,
52
+ MessageContentMixedContentImageFragment,
53
+ MessageContentMixedContentImageFragmentImageURL,
54
+ MessageContentMixedContentTextFragment,
55
+ ResponseFormat,
56
+ ToolChoice,
57
+ )
40
58
  from writerai.types.chat_chat_params import Message as WriterAIMessage
41
- from writerai.types.chat_chat_params import MessageGraphData
42
- from writerai.types.chat_chat_params import ToolFunctionTool as SDKFunctionTool
43
- from writerai.types.chat_chat_params import ToolGraphTool as SDKGraphTool
59
+ from writerai.types.chat_completion_message import ChatCompletionMessage
44
60
  from writerai.types.question import Question
45
61
  from writerai.types.question_response_chunk import QuestionResponseChunk
62
+ from writerai.types.shared_params.tool_param import FunctionTool as SDKFunctionTool
63
+ from writerai.types.shared_params.tool_param import GraphTool as SDKGraphTool
64
+ from writerai.types.shared_params.tool_param import LlmTool as SDKLlmTool
65
+ from writerai.types.shared_params.tool_param import WebSearchTool as SDKWebSearchTool
46
66
 
47
67
  from writer.core import get_app_process
48
68
 
69
+ DEFAULT_CHAT_MODEL = "palmyra-x5"
70
+ DEFAULT_COMPLETION_MODEL = "palmyra-x5"
71
+
72
+
73
+ _ai_client: ContextVar[Optional[Writer]] = ContextVar(
74
+ "ai_client", default=None
75
+ )
76
+
77
+
78
+ class ExtendedWebSearchTool(TypedDict, total=False):
79
+ """Extended web search tool that includes all fields supported by the API"""
80
+ type: Literal["web_search"]
81
+ function: Dict[str, Any] # Flexible to support include_raw_content and other fields
82
+
49
83
 
50
84
  class APIOptions(TypedDict, total=False):
51
85
  extra_headers: Optional[Headers]
@@ -56,7 +90,15 @@ class APIOptions(TypedDict, total=False):
56
90
 
57
91
  class ChatOptions(APIOptions, total=False):
58
92
  model: str
59
- tools: Union[Iterable[Union[SDKGraphTool, SDKFunctionTool]], NotGiven]
93
+ tool_choice: ToolChoice
94
+ tools: Union[
95
+ Iterable[
96
+ Union[SDKGraphTool, SDKFunctionTool, SDKLlmTool, SDKWebSearchTool]
97
+ ],
98
+ NotGiven
99
+ ]
100
+ response_format: Union[ResponseFormat, NotGiven]
101
+ logprobs: Union[bool, NotGiven]
60
102
  max_tokens: Union[int, NotGiven]
61
103
  n: Union[int, NotGiven]
62
104
  stop: Union[List[str], str, NotGiven]
@@ -81,6 +123,15 @@ class APIListOptions(APIOptions, total=False):
81
123
  order: Union[Literal["asc", "desc"], NotGiven]
82
124
 
83
125
 
126
+ class APIRetrieveJobsOptions(APIOptions, total=False):
127
+ limit: Union[int, NotGiven]
128
+ offset: Union[int, NotGiven]
129
+ status: Union[
130
+ Literal["completed", "failed", "in_progress"],
131
+ NotGiven
132
+ ]
133
+
134
+
84
135
  class Tool(TypedDict, total=False):
85
136
  type: str
86
137
 
@@ -88,6 +139,7 @@ class Tool(TypedDict, total=False):
88
139
  class GraphTool(Tool):
89
140
  graph_ids: List[str]
90
141
  subqueries: bool
142
+ description: Optional[str]
91
143
 
92
144
 
93
145
  class FunctionToolParameterMeta(TypedDict):
@@ -128,7 +180,18 @@ def create_function_tool(
128
180
  )
129
181
 
130
182
 
131
- def _process_completion_data_chunk(choice: StreamingData) -> str:
183
+ class LLMTool(Tool):
184
+ model: str
185
+ description: str
186
+
187
+
188
+ class WebSearchTool(Tool):
189
+ include_domains: Optional[List[str]]
190
+ exclude_domains: Optional[List[str]]
191
+ include_raw_content: Optional[bool]
192
+
193
+
194
+ def _process_completion_data_chunk(choice: CompletionChunk) -> str:
132
195
  text = choice.value
133
196
  if not text:
134
197
  return ""
@@ -137,7 +200,7 @@ def _process_completion_data_chunk(choice: StreamingData) -> str:
137
200
  raise ValueError("Failed to retrieve text from completion stream")
138
201
 
139
202
 
140
- def _process_chat_data_chunk(chat_data: Chat) -> tuple[dict, dict]:
203
+ def _process_chat_data_chunk(chat_data: ChatCompletion) -> tuple[dict, dict]:
141
204
  choices = chat_data.choices
142
205
  for entry in choices:
143
206
  dict_entry = entry.model_dump()
@@ -173,18 +236,8 @@ class WriterAIManager:
173
236
  :raises RuntimeError: If an API key was not provided to initialize
174
237
  SDK client properly.
175
238
  """
176
- try:
177
- self.client = Writer(
178
- # This is the default and can be omitted
179
- api_key=token,
180
- )
181
- except WriterError:
182
- raise RuntimeError(
183
- "Failed to acquire Writer API key. " +
184
- "Provide it by either setting a WRITER_API_KEY" +
185
- " environment variable, or by initializing the" +
186
- " AI module explicitly: writer.ai.init(\"my-writer-api-key\")"
187
- ) from None
239
+ self.token = token
240
+ from writer.core import get_app_process
188
241
  current_process = get_app_process()
189
242
  setattr(current_process, 'ai_manager', self)
190
243
 
@@ -218,7 +271,7 @@ class WriterAIManager:
218
271
  :param token: The new token to use for authentication.
219
272
  """
220
273
  instance = cls.acquire_instance()
221
- instance.client = Writer(api_key=token)
274
+ instance.token = token
222
275
 
223
276
  @classmethod
224
277
  def use_chat_model(cls) -> str:
@@ -227,7 +280,7 @@ class WriterAIManager:
227
280
 
228
281
  :returns: Name for the chat model.
229
282
  """
230
- return "palmyra-x-004"
283
+ return DEFAULT_CHAT_MODEL
231
284
 
232
285
  @classmethod
233
286
  def use_completion_model(cls) -> str:
@@ -236,12 +289,50 @@ class WriterAIManager:
236
289
 
237
290
  :returns: Name for the completion model.
238
291
  """
239
- return "palmyra-x-004"
292
+ return DEFAULT_COMPLETION_MODEL
240
293
 
241
294
  @classmethod
242
- def acquire_client(cls) -> Writer:
295
+ def acquire_client(
296
+ cls,
297
+ custom_httpx_client: Optional[DefaultHttpxClient] = None,
298
+ force_new_client: Optional[bool] = False
299
+ ) -> Writer:
300
+ from writer.core import get_session
243
301
  instance = cls.acquire_instance()
244
- return instance.client
302
+
303
+ # Acquire header from session
304
+ # and set it to the client
305
+
306
+ current_session = get_session()
307
+ custom_headers = {}
308
+
309
+ if current_session:
310
+ headers = current_session.headers or {}
311
+ agent_token_header = headers.get("x-agent-token")
312
+ if agent_token_header:
313
+ custom_headers = {
314
+ "X-Agent-Token": agent_token_header
315
+ }
316
+
317
+ try:
318
+ context_client = _ai_client.get(None)
319
+ if force_new_client or not context_client:
320
+ client = Writer(
321
+ api_key=instance.token,
322
+ default_headers=custom_headers,
323
+ http_client=custom_httpx_client
324
+ )
325
+ _ai_client.set(client)
326
+ return client
327
+ else:
328
+ return context_client
329
+ except WriterError:
330
+ raise RuntimeError(
331
+ "Failed to acquire Writer API key. " +
332
+ "Provide it by either setting a WRITER_API_KEY" +
333
+ " environment variable, or by initializing the" +
334
+ " AI module explicitly: writer.ai.init(\"my-writer-api-key\")"
335
+ ) from None
245
336
 
246
337
 
247
338
  class SDKWrapper:
@@ -372,7 +463,7 @@ class Graph(SDKWrapper):
372
463
  - `extra_body` (Optional[Body]):
373
464
  Additional body parameters for the request.
374
465
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
375
- Timeout for the request.
466
+ Timeout for the request in seconds.
376
467
  """
377
468
  config = config or {}
378
469
 
@@ -385,7 +476,12 @@ class Graph(SDKWrapper):
385
476
  if description:
386
477
  payload["description"] = description
387
478
  graphs = self._retrieve_graphs_accessor()
388
- response = graphs.update(self.id, **payload, **config)
479
+ response = graphs.update(
480
+ self.id,
481
+ name=payload.get("name", NotGiven()),
482
+ description=payload.get("description", NotGiven()),
483
+ **config
484
+ )
389
485
  Graph.stale_ids.add(self.id)
390
486
  return response
391
487
 
@@ -414,7 +510,7 @@ class Graph(SDKWrapper):
414
510
  - `extra_body` (Optional[Body]):
415
511
  Additional body parameters for the request.
416
512
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
417
- Timeout for the request.
513
+ Timeout for the request in seconds.
418
514
  """
419
515
  config = config or {}
420
516
  file_id = None
@@ -461,7 +557,7 @@ class Graph(SDKWrapper):
461
557
  - `extra_body` (Optional[Body]):
462
558
  Additional body parameters for the request.
463
559
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
464
- Timeout for the request.
560
+ Timeout for the request in seconds.
465
561
  """
466
562
  config = config or {}
467
563
  file_id = None
@@ -516,25 +612,30 @@ class Graph(SDKWrapper):
516
612
  """
517
613
  Streams response for a question posed to the graph.
518
614
 
519
- This method returns incremental chunks of the response, ideal for long
615
+ This method returns incremental chunks of the response, ideal for long
520
616
  responses or when reduced latency is needed.
521
617
 
522
618
  :param question: The query or question to be answered by the graph.
523
- :param subqueries: Enables subquery generation if set to True,
619
+ :param subqueries: Enables subquery generation if set to True,
524
620
  enhancing the result.
525
- :param config: Optional dictionary for additional API
526
- configuration settings.
527
- The configuration can include:
528
- - ``extra_headers`` (Optional[Headers]): Additional headers.
529
- - ``extra_query`` (Optional[Query]): Extra query parameters.
530
- - ``extra_body`` (Optional[Body]): Additional body data.
531
- - ``timeout`` (Union[float, Timeout, None, NotGiven]): Request timeout.
621
+ :param config: Optional dictionary for additional API
622
+ configuration settings.
532
623
 
533
624
  :yields: Incremental chunks of the answer to the question.
534
625
 
535
626
  :raises ValueError: If an invalid graph or graph ID
536
627
  is provided in `graphs_or_graph_ids`.
537
628
 
629
+ The `config` dictionary can include the following keys:
630
+ - `extra_headers` (Optional[Headers]):
631
+ Additional headers for the request.
632
+ - `extra_query` (Optional[Query]):
633
+ Additional query parameters for the request.
634
+ - `extra_body` (Optional[Body]):
635
+ Additional body parameters for the request.
636
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
637
+ Timeout for the request in seconds.
638
+
538
639
  **Example Usage**:
539
640
 
540
641
  >>> for chunk in graph.stream_ask(
@@ -543,7 +644,7 @@ class Graph(SDKWrapper):
543
644
  ... print(chunk)
544
645
  ...
545
646
  """
546
-
647
+
547
648
  response = cast(
548
649
  Stream[QuestionResponseChunk],
549
650
  self._question(
@@ -572,22 +673,27 @@ class Graph(SDKWrapper):
572
673
  config: Optional[APIOptions] = None
573
674
  ):
574
675
  """
575
- Sends a question to the graph and retrieves
676
+ Sends a question to the graph and retrieves
576
677
  a single response.
577
678
 
578
679
  :param question: The query or question to be answered by the graph.
579
- :param subqueries: Enables subquery generation if set to True,
680
+ :param subqueries: Enables subquery generation if set to True,
580
681
  enhancing the result.
581
- :param config: Optional dictionary for additional API
682
+ :param config: Optional dictionary for additional API
582
683
  configuration settings.
583
- The configuration can include:
584
- - ``extra_headers`` (Optional[Headers]): Additional headers.
585
- - ``extra_query`` (Optional[Query]): Extra query parameters.
586
- - ``extra_body`` (Optional[Body]): Additional body data.
587
- - ``timeout`` (Union[float, Timeout, None, NotGiven]): Request timeout.
588
684
 
589
685
  :return: The answer to the question from the graph(s).
590
686
 
687
+ The `config` dictionary can include the following keys:
688
+ - `extra_headers` (Optional[Headers]):
689
+ Additional headers for the request.
690
+ - `extra_query` (Optional[Query]):
691
+ Additional query parameters for the request.
692
+ - `extra_body` (Optional[Body]):
693
+ Additional body parameters for the request.
694
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
695
+ Timeout for the request in seconds.
696
+
591
697
  **Example Usage**:
592
698
 
593
699
  >>> response = graph.ask(
@@ -631,7 +737,7 @@ def create_graph(
631
737
  - `extra_body` (Optional[Body]):
632
738
  Additional body parameters for the request.
633
739
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
634
- Timeout for the request.
740
+ Timeout for the request in seconds.
635
741
  """
636
742
  config = config or {}
637
743
  graphs = Graph._retrieve_graphs_accessor()
@@ -667,7 +773,7 @@ def retrieve_graph(
667
773
  - `extra_body` (Optional[Body]):
668
774
  Additional body parameters for the request.
669
775
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
670
- Timeout for the request.
776
+ Timeout for the request in seconds.
671
777
  """
672
778
  config = config or {}
673
779
  graphs = Graph._retrieve_graphs_accessor()
@@ -693,7 +799,7 @@ def list_graphs(config: Optional[APIListOptions] = None) -> List[Graph]:
693
799
  - `extra_body` (Optional[Body]):
694
800
  Additional body parameters for the request.
695
801
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
696
- Timeout for the request.
802
+ Timeout for the request in seconds.
697
803
  - `after` (Union[str, NotGiven]):
698
804
  Filter to retrieve items created after a specific cursor.
699
805
  - `before` (Union[str, NotGiven]):
@@ -808,7 +914,7 @@ def retrieve_file(file_id: str, config: Optional[APIOptions] = None) -> File:
808
914
  - `extra_body` (Optional[Body]):
809
915
  Additional body parameters for the request.
810
916
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
811
- Timeout for the request.
917
+ Timeout for the request in seconds.
812
918
  """
813
919
  config = config or {}
814
920
  files = File._retrieve_files_accessor()
@@ -834,7 +940,7 @@ def list_files(config: Optional[APIListOptions] = None) -> List[File]:
834
940
  - `extra_body` (Optional[Body]):
835
941
  Additional body parameters for the request.
836
942
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
837
- Timeout for the request.
943
+ Timeout for the request in seconds.
838
944
  - `after` (Union[str, NotGiven]):
839
945
  Filter to retrieve items created after a specific cursor.
840
946
  - `before` (Union[str, NotGiven]):
@@ -878,7 +984,7 @@ def upload_file(
878
984
  - `extra_body` (Optional[Body]):
879
985
  Additional body parameters for the request.
880
986
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
881
- Timeout for the request.
987
+ Timeout for the request in seconds.
882
988
  """
883
989
  config = config or {}
884
990
  files = File._retrieve_files_accessor()
@@ -919,7 +1025,7 @@ def delete_file(
919
1025
  - `extra_body` (Optional[Body]):
920
1026
  Additional body parameters for the request.
921
1027
  - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
922
- Timeout for the request.
1028
+ Timeout for the request in seconds.
923
1029
  """
924
1030
  config = config or {}
925
1031
  file_id = None
@@ -937,6 +1043,39 @@ def delete_file(
937
1043
  return files.delete(file_id, **config)
938
1044
 
939
1045
 
1046
+ class GuardrailError(Exception):
1047
+ def __init__(self, name: str, message: str, *args):
1048
+ super().__init__(f"{message}: {name}", *args)
1049
+
1050
+
1051
+ R = TypeVar("R")
1052
+
1053
+
1054
+ def catch_guardrail_error(func: Callable[..., R]) -> Callable[..., R]:
1055
+ @wraps(func)
1056
+ def wrapper(*args: Any, **kwargs: Any) -> R:
1057
+ try:
1058
+ return func(*args, **kwargs)
1059
+ except BadRequestError as e:
1060
+ parsed = e.response.json()
1061
+ errors = parsed.get("errors")
1062
+ if not errors:
1063
+ raise
1064
+
1065
+ extras = parsed.get("extras")
1066
+ if extras is None:
1067
+ raise
1068
+ guardrail_info = extras.get("guardrail_info")
1069
+ if guardrail_info is None:
1070
+ raise
1071
+
1072
+ raise GuardrailError(
1073
+ name=guardrail_info["guardrail_name"],
1074
+ message=errors[0]["description"],
1075
+ ) from None
1076
+
1077
+ return wrapper
1078
+
940
1079
  class Conversation:
941
1080
  """
942
1081
  Manages messages within a conversation flow with an AI system,
@@ -970,13 +1109,44 @@ class Conversation:
970
1109
  :param prompt_or_history: Initial system prompt as a string, or
971
1110
  history of messages as a list, used to start
972
1111
  a new conversation or to load an existing one.
973
- :param config: Configuration settings for the conversation. These settings
974
- can include parameters such as `max_tokens`, `temperature`,
975
- and `timeout`, which affect the behavior and performance
976
- of the conversation operations. This configuration provides
977
- a default context for all operations, but can be overridden
978
- or extended by additional configurations passed directly
979
- to specific methods.
1112
+ :param config: Configuration settings for the conversation.
1113
+
1114
+ The `config` dictionary can include the following keys:
1115
+ - `extra_headers` (Optional[Headers]):
1116
+ Additional headers for the request.
1117
+ - `extra_query` (Optional[Query]):
1118
+ Additional query parameters for the request.
1119
+ - `extra_body` (Optional[Body]):
1120
+ Additional body parameters for the request.
1121
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
1122
+ Timeout for the request in seconds.
1123
+ - `model` (str):
1124
+ The model to use for completion.
1125
+ - `tool_choice` (ToolChoice):
1126
+ Configure how the model will call functions: `auto` will allow the model
1127
+ to automatically choose the best tool, `none` disables tool calling.
1128
+ You can also pass a specific previously defined function.
1129
+ - `logprobs` (Union[bool, NotGiven]):
1130
+ Specifies whether to return log probabilities of the output tokens.
1131
+ - `tools` (Union[Iterable[Union[SDKGraphTool,
1132
+ SDKFunctionTool, SDKLlmTool]], NotGiven]):
1133
+ Tools available for the model to use.
1134
+ - `max_tokens` (Union[int, NotGiven]):
1135
+ Maximum number of tokens to generate.
1136
+ - `n` (Union[int, NotGiven]):
1137
+ Number of completions to generate.
1138
+ - `stop` (Union[List[str], str, NotGiven]):
1139
+ Sequences where the API will stop generating tokens.
1140
+ - `temperature` (Union[float, NotGiven]):
1141
+ Controls the randomness or creativity of the model's responses.
1142
+ A higher temperature results in more varied and less predictable text,
1143
+ while a lower temperature produces more deterministic
1144
+ and conservative outputs.
1145
+ - `top_p` (Union[float, NotGiven]):
1146
+ Sets the threshold for "nucleus sampling," a technique to focus the model's
1147
+ token generation on the most likely subset of tokens. Only tokens with
1148
+ cumulative probability above this threshold are considered, controlling the
1149
+ trade-off between creativity and coherence.
980
1150
 
981
1151
  **Configuration Example:**
982
1152
 
@@ -990,7 +1160,7 @@ class Conversation:
990
1160
  or extend the initial configuration:
991
1161
 
992
1162
  >>> response = conversation.complete(
993
- ... data={
1163
+ ... config={
994
1164
  ... 'max_tokens': 150,
995
1165
  ... 'temperature': 0.7
996
1166
  ... }
@@ -1000,17 +1170,24 @@ class Conversation:
1000
1170
  the `temperature` to 0.7 for this specific call.
1001
1171
 
1002
1172
  """
1173
+ class ContentFragment(TypedDict, total=False):
1174
+ """Content fragment for messages that can contain text or images."""
1175
+ type: Literal["text", "image_url"]
1176
+ text: Optional[str]
1177
+ image_url: Optional[Dict[str, str]]
1178
+
1003
1179
  class Message(TypedDict, total=False):
1004
1180
  """
1005
1181
  Typed dictionary for conversation messages.
1006
1182
 
1007
1183
  :param role: Specifies the sender role.
1008
- :param content: Text content of the message.
1184
+ :param content: Text content of the message or array of content fragments
1185
+ for multimodal messages.
1009
1186
  :param actions: Optional dictionary containing actions
1010
1187
  related to the message.
1011
1188
  """
1012
1189
  role: Literal["system", "assistant", "user", "tool"]
1013
- content: str
1190
+ content: Union[str, List['Conversation.ContentFragment']]
1014
1191
  actions: Optional[dict]
1015
1192
  name: Optional[str]
1016
1193
  tool_call_id: Optional[str]
@@ -1037,13 +1214,44 @@ class Conversation:
1037
1214
  f"Improper message format to add to Conversation: {message}"
1038
1215
  )
1039
1216
 
1040
- if not (
1041
- isinstance(message["content"], str)
1042
- or
1043
- message["content"] is None
1217
+ content = message["content"]
1218
+ if isinstance(content, list):
1219
+ # Validate multimodal content structure
1220
+ for fragment in content:
1221
+ if not isinstance(fragment, dict):
1222
+ raise ValueError(
1223
+ f"Invalid content fragment in message: {message}. "
1224
+ f"Fragments must be dictionaries."
1225
+ )
1226
+ fragment_type = fragment.get("type")
1227
+ if fragment_type not in ["text", "image_url"]:
1228
+ raise ValueError(
1229
+ f"Invalid fragment type '{fragment_type}' in message: {message}. "
1230
+ f"Type must be 'text' or 'image_url'."
1231
+ )
1232
+ if fragment_type == "text" and "text" not in fragment:
1233
+ raise ValueError(
1234
+ f"Text fragment missing 'text' field in message: {message}"
1235
+ )
1236
+ if fragment_type == "image_url" and "image_url" not in fragment:
1237
+ raise ValueError(
1238
+ f"Image fragment missing 'image_url' field in message: {message}"
1239
+ )
1240
+ if fragment_type == "image_url" and not isinstance(fragment.get("image_url"), dict):
1241
+ raise ValueError(
1242
+ f"Image fragment 'image_url' must be a dict in message: {message}"
1243
+ )
1244
+ if fragment_type == "image_url" and "url" not in fragment.get("image_url", {}):
1245
+ raise ValueError(
1246
+ f"Image fragment missing 'url' in 'image_url' in message: {message}"
1247
+ )
1248
+ elif not (
1249
+ isinstance(content, str)
1250
+ or content is None
1044
1251
  ):
1045
1252
  raise ValueError(
1046
- f"Non-string content in message cannot be added: {message}"
1253
+ f"Invalid content format in message: {message}. "
1254
+ f"Content must be a string, None, or array of content fragments."
1047
1255
  )
1048
1256
 
1049
1257
  if message["role"] not in ["system", "assistant", "user", "tool"]:
@@ -1125,7 +1333,24 @@ class Conversation:
1125
1333
  clear_chunk = _clear_chunk_flag(raw_chunk)
1126
1334
  updated_last_message: 'Conversation.Message' = self.messages[-1]
1127
1335
  if "content" in clear_chunk:
1128
- updated_last_message["content"] += clear_chunk.pop("content") or ""
1336
+ new_content = clear_chunk.pop("content") or ""
1337
+ if isinstance(updated_last_message["content"], list):
1338
+ # Handle list content (multimodal)
1339
+ if isinstance(new_content, str) and new_content:
1340
+ # Find the last text fragment and append to it, or create new one
1341
+ last_text_fragment = None
1342
+ for i in range(len(updated_last_message["content"]) - 1, -1, -1):
1343
+ if updated_last_message["content"][i].get("type") == "text":
1344
+ last_text_fragment = updated_last_message["content"][i]
1345
+ break
1346
+
1347
+ if last_text_fragment:
1348
+ last_text_fragment["text"] = (last_text_fragment.get("text") or "") + new_content
1349
+ else:
1350
+ updated_last_message["content"].append({"type": "text", "text": new_content})
1351
+ else:
1352
+ # Handle string content
1353
+ updated_last_message["content"] = str(updated_last_message["content"]) + str(new_content)
1129
1354
 
1130
1355
  if "tool_calls" in clear_chunk:
1131
1356
  # Ensure 'tool_calls' exists in updated_last_message as list
@@ -1138,12 +1363,13 @@ class Conversation:
1138
1363
  new_tool_calls = clear_chunk.pop("tool_calls", []) or []
1139
1364
 
1140
1365
  for new_call in new_tool_calls:
1141
- index = new_call.get("index", 0)
1142
1366
  last_message_tool_calls = \
1143
1367
  cast(list, updated_last_message["tool_calls"])
1144
- if index < len(last_message_tool_calls):
1145
- # If there's an existing call at this index, update it
1146
- existing_call = last_message_tool_calls[index]
1368
+ if not new_call.get("id"):
1369
+ # Received ID is an indicator
1370
+ # of a new call that needs to be added:
1371
+ # otherwise, modify the latest existing call
1372
+ existing_call = last_message_tool_calls[-1]
1147
1373
  for key, value in new_call.items():
1148
1374
  if key == "function":
1149
1375
  if key not in existing_call:
@@ -1166,12 +1392,12 @@ class Conversation:
1166
1392
  # to existing arguments
1167
1393
  call_function[fkey] += \
1168
1394
  fvalue or ""
1169
- elif fvalue is not None:
1395
+ elif fvalue is not None and fvalue != '':
1170
1396
  call_function[fkey] = fvalue
1171
- elif value is not None:
1397
+ elif value is not None and value != '':
1172
1398
  existing_call[key] = value
1173
1399
  else:
1174
- # If no existing call, append the new call
1400
+ # If ID was received, this is the new call and we add it
1175
1401
  last_message_tool_calls.append(new_call)
1176
1402
 
1177
1403
  updated_last_message |= clear_chunk
@@ -1182,15 +1408,48 @@ class Conversation:
1182
1408
  Converts a message object stored in Conversation to a Writer AI SDK
1183
1409
  `Message` model, suitable for calls to API.
1184
1410
 
1185
- :param raw_chunk: The data to be merged into the last message.
1411
+ :param message: The message to prepare.
1186
1412
  :raises ValueError: If there are no messages in the conversation
1187
1413
  to merge with.
1188
1414
  """
1189
1415
  if not ("role" in message and "content" in message):
1190
1416
  raise ValueError("Improper message format")
1191
- sdk_message = WriterAIMessage(
1192
- content=message.get("content", None) or "",
1193
- role=message["role"]
1417
+
1418
+ content = message.get("content")
1419
+ if isinstance(content, list):
1420
+ # Handle multimodal content (text + images)
1421
+ # Convert our ContentFragment format to SDK format
1422
+ sdk_content: List[Union[MessageContentMixedContentTextFragment, MessageContentMixedContentImageFragment]] = []
1423
+ for fragment in content:
1424
+ if fragment.get("type") == "text":
1425
+ sdk_content.append({
1426
+ "type": "text",
1427
+ "text": fragment.get("text") or ""
1428
+ })
1429
+ elif fragment.get("type") == "image_url":
1430
+ image_url_data = fragment.get("image_url", {})
1431
+ if isinstance(image_url_data, dict):
1432
+ # Extract the URL from the dict structure
1433
+ url = image_url_data.get("url", "")
1434
+ else:
1435
+ # Assume it's already a URL string
1436
+ url = str(image_url_data) if image_url_data else ""
1437
+
1438
+ image_url_obj: MessageContentMixedContentImageFragmentImageURL = {"url": url}
1439
+ sdk_content.append({
1440
+ "type": "image_url",
1441
+ "image_url": image_url_obj
1442
+ })
1443
+
1444
+ sdk_message = WriterAIMessage(
1445
+ content=sdk_content,
1446
+ role=message["role"]
1447
+ )
1448
+ else:
1449
+ # Handle simple text content
1450
+ sdk_message = WriterAIMessage(
1451
+ content=content or "",
1452
+ role=message["role"]
1194
1453
  )
1195
1454
  if msg_name := message.get("name"):
1196
1455
  sdk_message["name"] = cast(str, msg_name)
@@ -1200,7 +1459,7 @@ class Conversation:
1200
1459
  sdk_message["tool_calls"] = cast(list, msg_tool_calls)
1201
1460
  if msg_graph_data := message.get("graph_data"):
1202
1461
  sdk_message["graph_data"] = cast(
1203
- MessageGraphData,
1462
+ GraphData,
1204
1463
  msg_graph_data
1205
1464
  )
1206
1465
  if msg_refusal := message.get("refusal"):
@@ -1275,8 +1534,8 @@ class Conversation:
1275
1534
 
1276
1535
  def _prepare_tool(
1277
1536
  self,
1278
- tool_instance: Union['Graph', GraphTool, FunctionTool]
1279
- ) -> Union[SDKGraphTool, SDKFunctionTool]:
1537
+ tool_instance: Union['Graph', GraphTool, FunctionTool, LLMTool, WebSearchTool, dict]
1538
+ ) -> Union[SDKGraphTool, SDKFunctionTool, SDKLlmTool, SDKWebSearchTool]:
1280
1539
  """
1281
1540
  Internal helper function to process a tool instance
1282
1541
  into the required format.
@@ -1310,15 +1569,18 @@ class Conversation:
1310
1569
  raise ValueError(
1311
1570
  f"'type' for parameter '{param_name}' must be a string"
1312
1571
  )
1313
-
1572
+
1314
1573
  supported_types = {
1315
- "string", "number", "integer", "float",
1574
+ "string", "number", "integer", "float",
1316
1575
  "boolean", "array", "object", "null"
1317
1576
  }
1318
1577
  if param_info["type"] not in supported_types:
1319
- raise ValueError(
1578
+ logging.warning(
1320
1579
  f"Unsupported type '{param_info['type']}' " +
1321
- f"for parameter '{param_name}'"
1580
+ f"for parameter '{param_name}'. " +
1581
+ "Fallback to 'string' type will be used. " +
1582
+ "This may lead to unexpected results. " +
1583
+ f"Supported types are: {supported_types}"
1322
1584
  )
1323
1585
 
1324
1586
  # Optional 'description' validation (if provided)
@@ -1334,7 +1596,9 @@ class Conversation:
1334
1596
 
1335
1597
  return True
1336
1598
 
1337
- def prepare_parameters(parameters: Dict[str, FunctionToolParameterMeta]) -> Dict:
1599
+ def prepare_parameters(
1600
+ parameters: Dict[str, FunctionToolParameterMeta]
1601
+ ) -> Dict:
1338
1602
  """
1339
1603
  Prepares the parameters dictionary for a function tool.
1340
1604
 
@@ -1388,13 +1652,14 @@ class Conversation:
1388
1652
  "type": "graph",
1389
1653
  "function": {
1390
1654
  "graph_ids": [tool_instance.id],
1391
- "subqueries": True
1655
+ "subqueries": True,
1656
+ "description": tool_instance.description
1392
1657
  }
1393
1658
  }
1394
1659
  )
1395
1660
 
1396
1661
  elif isinstance(tool_instance, dict):
1397
- # Handle a dictionary (either a graph or a function)
1662
+ # Handle a dictionary (either a graph, a function or a LLM tool)
1398
1663
  if "type" not in tool_instance:
1399
1664
  raise ValueError(
1400
1665
  "Invalid tool definition: 'type' field is missing"
@@ -1406,6 +1671,13 @@ class Conversation:
1406
1671
  tool_instance = cast(GraphTool, tool_instance)
1407
1672
  if "graph_ids" not in tool_instance:
1408
1673
  raise ValueError("Graph tool must include 'graph_ids'")
1674
+ if "description" not in tool_instance:
1675
+ logging.warning(
1676
+ "No description provided for `graph` tool. " +
1677
+ "This may produce suboptimal results. " +
1678
+ "To increase output quality, provide a description " +
1679
+ "for the tool."
1680
+ )
1409
1681
  # Return graph tool JSON
1410
1682
 
1411
1683
  graph_ids_valid = \
@@ -1419,6 +1691,9 @@ class Conversation:
1419
1691
  "graph_ids": tool_instance["graph_ids"],
1420
1692
  "subqueries": tool_instance.get(
1421
1693
  "subqueries", False
1694
+ ),
1695
+ "description": tool_instance.get(
1696
+ "description", None
1422
1697
  )
1423
1698
  }
1424
1699
  }
@@ -1441,6 +1716,13 @@ class Conversation:
1441
1716
  raise ValueError(
1442
1717
  "Function tool must include 'name' and 'parameters'"
1443
1718
  )
1719
+ if "description" not in tool_instance:
1720
+ logging.warning(
1721
+ "No description provided for `function` tool. " +
1722
+ "This may produce suboptimal results. " +
1723
+ "To increase output quality, provide a description " +
1724
+ "for the tool."
1725
+ )
1444
1726
 
1445
1727
  parameters_valid = \
1446
1728
  validate_parameters(tool_instance["parameters"])
@@ -1460,7 +1742,8 @@ class Conversation:
1460
1742
  "parameters":
1461
1743
  prepare_parameters(
1462
1744
  tool_instance["parameters"]
1463
- )
1745
+ ),
1746
+ "description": tool_instance.get("description")
1464
1747
  }
1465
1748
  }
1466
1749
  )
@@ -1470,6 +1753,54 @@ class Conversation:
1470
1753
  f"`{tool_instance['name']}`"
1471
1754
  )
1472
1755
 
1756
+ elif tool_type == "llm":
1757
+ tool_instance = cast(LLMTool, tool_instance)
1758
+ if "model" not in tool_instance:
1759
+ raise ValueError("LLM tool must include 'model'")
1760
+ if "description" not in tool_instance:
1761
+ logging.warning(
1762
+ "No description provided for `llm` tool. " +
1763
+ "This may produce suboptimal results. " +
1764
+ "To increase output quality, provide a description " +
1765
+ "for the tool."
1766
+ )
1767
+ # Return LLM tool JSON
1768
+ return cast(
1769
+ SDKLlmTool,
1770
+ {
1771
+ "type": "llm",
1772
+ "function": {
1773
+ "model": tool_instance["model"],
1774
+ "description": tool_instance.get("description")
1775
+ }
1776
+ }
1777
+ )
1778
+ elif tool_type == "web_search":
1779
+ # Return web search tool JSON - SDK format
1780
+ tool_instance = cast(WebSearchTool, tool_instance)
1781
+ function_config: Dict[str, Any] = {}
1782
+
1783
+ # Add required parameters - SDK expects these as lists
1784
+ if "include_domains" in tool_instance:
1785
+ include_domains = tool_instance["include_domains"]
1786
+ if include_domains:
1787
+ function_config["include_domains"] = include_domains
1788
+ if "exclude_domains" in tool_instance:
1789
+ exclude_domains = tool_instance["exclude_domains"]
1790
+ if exclude_domains:
1791
+ function_config["exclude_domains"] = exclude_domains
1792
+ if "include_raw_content" in tool_instance:
1793
+ include_raw_content = tool_instance["include_raw_content"]
1794
+ if include_raw_content is not None:
1795
+ function_config["include_raw_content"] = include_raw_content
1796
+
1797
+ # Return as ExtendedWebSearchTool but ensure SDK compatibility
1798
+ result = ExtendedWebSearchTool({
1799
+ "type": "web_search",
1800
+ "function": function_config
1801
+ })
1802
+ # Cast to SDKWebSearchTool for SDK compatibility while preserving extra fields
1803
+ return cast(SDKWebSearchTool, result)
1473
1804
  else:
1474
1805
  raise ValueError(f"Unsupported tool type: {tool_type}")
1475
1806
 
@@ -1522,12 +1853,43 @@ class Conversation:
1522
1853
  """
1523
1854
  self.__add__({"role": role, "content": message})
1524
1855
 
1856
+ def add_with_images(
1857
+ self,
1858
+ role: Literal["system", "assistant", "user", "tool"],
1859
+ text: Optional[str] = None,
1860
+ image_urls: Optional[List[str]] = None
1861
+ ):
1862
+ """
1863
+ Adds a new multimodal message with text and/or images.
1864
+
1865
+ :param role: The role of the message sender.
1866
+ :param text: Optional text content.
1867
+ :param image_urls: Optional list of image URLs (web URLs or base64 data URLs).
1868
+ """
1869
+ if not text and not image_urls:
1870
+ raise ValueError("At least one of text or image_urls must be provided")
1871
+
1872
+ content: List['Conversation.ContentFragment'] = []
1873
+
1874
+ if text:
1875
+ content.append({"type": "text", "text": text})
1876
+
1877
+ if image_urls:
1878
+ for image_url in image_urls:
1879
+ content.append({
1880
+ "type": "image_url",
1881
+ "image_url": {"url": image_url}
1882
+ })
1883
+
1884
+ self.__add__({"role": role, "content": content})
1885
+
1886
+ @catch_guardrail_error
1525
1887
  def _send_chat_request(
1526
1888
  self,
1527
1889
  request_model: str,
1528
1890
  request_data: ChatOptions,
1529
1891
  stream: bool = False
1530
- ) -> Union[Stream, Chat]:
1892
+ ) -> Union[Stream, ChatCompletion]:
1531
1893
  """
1532
1894
  Helper function to send a chat request to the LLM.
1533
1895
 
@@ -1535,7 +1897,7 @@ class Conversation:
1535
1897
  :param request_data: Configuration settings for the chat request.
1536
1898
  :param stream: Whether to use streaming mode.
1537
1899
  :return: The response from the LLM, either as
1538
- a Stream or a Chat object.
1900
+ a Stream or a ChatCompletion object.
1539
1901
  """
1540
1902
  client = WriterAIManager.acquire_client()
1541
1903
  prepared_messages = [
@@ -1547,11 +1909,20 @@ class Conversation:
1547
1909
  f"prepared messages – {prepared_messages}, " +
1548
1910
  f"request_data – {request_data}"
1549
1911
  )
1912
+ tools = request_data.get('tools', NotGiven())
1913
+ tool_choice: Union[ToolChoice, NotGiven]
1914
+ if isinstance(tools, NotGiven):
1915
+ tool_choice = NotGiven()
1916
+ else:
1917
+ tool_choice = request_data.get('tool_choice', cast(ToolChoice, 'auto'))
1550
1918
  return client.chat.chat(
1551
1919
  messages=prepared_messages,
1552
1920
  model=request_model,
1553
1921
  stream=stream,
1554
- tools=request_data.get('tools', NotGiven()),
1922
+ logprobs=request_data.get('logprobs', NotGiven()),
1923
+ tools=tools,
1924
+ tool_choice=tool_choice,
1925
+ response_format=request_data.get('response_format', NotGiven()),
1555
1926
  max_tokens=request_data.get('max_tokens', NotGiven()),
1556
1927
  n=request_data.get('n', NotGiven()),
1557
1928
  stop=request_data.get('stop', NotGiven()),
@@ -1602,7 +1973,11 @@ class Conversation:
1602
1973
  elif target_type == "null":
1603
1974
  return None
1604
1975
  else:
1605
- raise ValueError(f"Unsupported target type: {target_type}")
1976
+ logging.warning(
1977
+ f"Unsupported target type: {target_type}. " +
1978
+ "Falling back to string type. " +
1979
+ "This may lead to unexpected results.")
1980
+ return str(value)
1606
1981
 
1607
1982
  def _check_if_arguments_are_required(self, function_name: str) -> bool:
1608
1983
  callable_entry = self._callable_registry.get(function_name)
@@ -1654,7 +2029,7 @@ class Conversation:
1654
2029
  value,
1655
2030
  target_type
1656
2031
  )
1657
- else:
2032
+ elif param_info.get("required") is True:
1658
2033
  raise ValueError(
1659
2034
  f"Missing required parameter: {param_name}"
1660
2035
  )
@@ -1723,28 +2098,17 @@ class Conversation:
1723
2098
  }
1724
2099
 
1725
2100
  # Capture `tool_call_id` from the message
1726
- if tool_call_id is not None:
2101
+ if tool_call_id is not None and tool_call_id != '':
1727
2102
  self._ongoing_tool_calls[index]["tool_call_id"] = tool_call_id
1728
2103
 
1729
2104
  # Capture `name` for function call
1730
- if tool_call_name is not None:
2105
+ if tool_call_name is not None and tool_call_name != '':
1731
2106
  self._ongoing_tool_calls[index]["name"] = tool_call_name
1732
2107
 
1733
2108
  # Accumulate arguments across chunks
1734
- if tool_call_arguments is not None:
1735
- if (
1736
- tool_call_arguments.startswith("{")
1737
- and tool_call_arguments.endswith("}")
1738
- ):
1739
- # For cases when LLM "bugs" and returns
1740
- # the whole arguments string as a last chunk
1741
- fixed_chunk = tool_call_arguments.rsplit("{")[-1]
1742
- self._ongoing_tool_calls[index]["arguments"] = \
1743
- "{" + fixed_chunk
1744
- else:
1745
- # Process normally
1746
- self._ongoing_tool_calls[index]["arguments"] += \
1747
- tool_call_arguments
2109
+ if tool_call_arguments is not None and tool_call_arguments != '':
2110
+ self._ongoing_tool_calls[index]["arguments"] += \
2111
+ tool_call_arguments
1748
2112
 
1749
2113
  # Check if we have all necessary data to execute the function
1750
2114
  tool_call_id, tool_call_name, tool_call_arguments = \
@@ -1752,8 +2116,12 @@ class Conversation:
1752
2116
  self._ongoing_tool_calls[index]["name"], \
1753
2117
  self._ongoing_tool_calls[index]["arguments"]
1754
2118
 
1755
- tool_call_id_ready = tool_call_id is not None
1756
- tool_call_name_ready = tool_call_name is not None
2119
+ tool_call_id_ready = \
2120
+ tool_call_id is not None \
2121
+ and tool_call_id != ''
2122
+ tool_call_name_ready = \
2123
+ tool_call_name is not None \
2124
+ and tool_call_name != ''
1757
2125
 
1758
2126
  # Check whether the arguments are prepared properly -
1759
2127
  # either present in correct format
@@ -1794,7 +2162,7 @@ class Conversation:
1794
2162
  if follow_up_message:
1795
2163
  self._ongoing_tool_calls[index]["res"] = follow_up_message
1796
2164
 
1797
- def _process_tool_calls(self, message: ChoiceMessage):
2165
+ def _process_tool_calls(self, message: ChatCompletionMessage):
1798
2166
  if message.tool_calls:
1799
2167
  for helper_index, tool_call in enumerate(message.tool_calls):
1800
2168
  index = tool_call.index or helper_index
@@ -1812,8 +2180,8 @@ class Conversation:
1812
2180
  def _process_streaming_tool_calls(self, chunk: Dict):
1813
2181
  tool_calls = chunk["tool_calls"]
1814
2182
  if isinstance(tool_calls, list):
1815
- for tool_call in tool_calls:
1816
- index = tool_call["index"]
2183
+ for helper_index, tool_call in enumerate(tool_calls):
2184
+ index = tool_call["index"] or helper_index
1817
2185
  tool_call_id = tool_call["id"]
1818
2186
  tool_call_name = tool_call["function"]["name"]
1819
2187
  tool_call_arguments = tool_call["function"]["arguments"]
@@ -1825,7 +2193,10 @@ class Conversation:
1825
2193
  tool_call_arguments
1826
2194
  )
1827
2195
 
1828
- def _prepare_received_message_for_history(self, message: ChoiceMessage):
2196
+ def _prepare_received_message_for_history(
2197
+ self,
2198
+ message: ChatCompletionMessage
2199
+ ):
1829
2200
  """
1830
2201
  Prepares a received message for adding to the conversation history.
1831
2202
 
@@ -1839,7 +2210,7 @@ class Conversation:
1839
2210
 
1840
2211
  def _process_response_data(
1841
2212
  self,
1842
- response_data: Chat,
2213
+ response_data: ChatCompletion,
1843
2214
  request_model: str,
1844
2215
  request_data: ChatOptions,
1845
2216
  depth=1,
@@ -1865,7 +2236,7 @@ class Conversation:
1865
2236
  # Send follow-up call to LLM
1866
2237
  logging.debug("Sending a request to LLM")
1867
2238
  follow_up_response = cast(
1868
- Chat,
2239
+ ChatCompletion,
1869
2240
  self._send_chat_request(
1870
2241
  request_model=request_model,
1871
2242
  request_data=request_data
@@ -1963,10 +2334,12 @@ class Conversation:
1963
2334
  Graph,
1964
2335
  GraphTool,
1965
2336
  FunctionTool,
1966
- List[Union[Graph, GraphTool, FunctionTool]]
2337
+ LLMTool,
2338
+ List[Union[Graph, GraphTool, FunctionTool, LLMTool]]
1967
2339
  ] # can be an instance of tool or a list of instances
1968
2340
  ] = None,
1969
2341
  max_tool_depth: int = 5,
2342
+ response_format: Optional[ResponseFormat] = None
1970
2343
  ) -> 'Conversation.Message':
1971
2344
  """
1972
2345
  Processes the conversation with the current messages and additional
@@ -1974,7 +2347,10 @@ class Conversation:
1974
2347
  Note: this method only produces AI model output and does not attach the
1975
2348
  result to the existing conversation history.
1976
2349
 
2350
+ :param tools: Optional tools to use for processing.
1977
2351
  :param config: Optional parameters to pass for processing.
2352
+ :param max_tool_depth: Maximum depth for tool calls processing.
2353
+ :param response_format: Optional JSON schema used to format the model's output.
1978
2354
  :return: Generated message.
1979
2355
  :raises RuntimeError: If response data was not properly formatted
1980
2356
  to retrieve model text.
@@ -1991,11 +2367,18 @@ class Conversation:
1991
2367
  request_data: ChatOptions = {**config, **self.config}
1992
2368
  if prepared_tools:
1993
2369
  request_data |= {"tools": prepared_tools}
2370
+ if response_format:
2371
+ if not isinstance(response_format, dict):
2372
+ raise ValueError(
2373
+ "Invalid schema for response_format: "
2374
+ f"dictionary required, got {type(response_format)}"
2375
+ )
2376
+ request_data |= {"response_format": response_format}
1994
2377
  request_model = \
1995
2378
  request_data.get("model") or WriterAIManager.use_chat_model()
1996
2379
 
1997
- response_data: Chat = cast(
1998
- Chat,
2380
+ response_data: ChatCompletion = cast(
2381
+ ChatCompletion,
1999
2382
  self._send_chat_request(
2000
2383
  request_model=request_model,
2001
2384
  request_data=request_data
@@ -2025,14 +2408,17 @@ class Conversation:
2025
2408
  List[Union[Graph, GraphTool, FunctionTool]]
2026
2409
  ] # can be an instance of tool or a list of instances
2027
2410
  ] = None,
2028
- max_tool_depth: int = 5
2411
+ max_tool_depth: int = 5,
2412
+ response_format: Optional[ResponseFormat] = None
2029
2413
  ) -> Generator[dict, None, None]:
2030
2414
  """
2031
2415
  Initiates a stream to receive chunks of the model's reply.
2032
2416
  Note: this method only produces AI model output and does not attach
2033
2417
  the result to the existing conversation history.
2034
2418
 
2419
+ :param tools: Optional tools to use for processing.
2035
2420
  :param config: Optional parameters to pass for processing.
2421
+ :param max_tool_depth: Maximum depth for tool calls processing.
2036
2422
  :yields: Model response chunks as they arrive from the stream.
2037
2423
  """
2038
2424
  config = config or {}
@@ -2047,6 +2433,13 @@ class Conversation:
2047
2433
  request_data: ChatOptions = {**config, **self.config}
2048
2434
  if prepared_tools:
2049
2435
  request_data |= {"tools": prepared_tools}
2436
+ if response_format:
2437
+ if not isinstance(response_format, dict):
2438
+ raise ValueError(
2439
+ "Invalid schema for response_format: "
2440
+ f"dictionary required, got {type(response_format)}"
2441
+ )
2442
+ request_data |= {"response_format": response_format}
2050
2443
  request_model = \
2051
2444
  request_data.get("model") or WriterAIManager.use_chat_model()
2052
2445
 
@@ -2074,7 +2467,7 @@ class Conversation:
2074
2467
  """
2075
2468
  Function to verify whether the message should be serializable.
2076
2469
 
2077
- :return: Boolean indicating if the message meets
2470
+ :return: Boolean indicating if the message meets
2078
2471
  the criteria for serialization.
2079
2472
  """
2080
2473
  if message["role"] in ["system", "tool"]:
@@ -2114,21 +2507,135 @@ class Conversation:
2114
2507
 
2115
2508
 
2116
2509
  class Apps:
2510
+ def list(
2511
+ self,
2512
+ config: Optional[APIOptions] = None
2513
+ ) -> List[ApplicationListResponse]:
2514
+ """
2515
+ Lists all applications available to the user.
2516
+
2517
+ :param config: Optional dictionary containing parameters
2518
+ for the list call.
2519
+ :return: List of applications.
2520
+
2521
+ The `config` dictionary can include the following keys:
2522
+ - `extra_headers` (Optional[Headers]):
2523
+ Additional headers for the request.
2524
+ - `extra_query` (Optional[Query]):
2525
+ Additional query parameters for the request.
2526
+ - `extra_body` (Optional[Body]):
2527
+ Additional body parameters for the request.
2528
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
2529
+ Timeout for the request in seconds.
2530
+ """
2531
+ client = WriterAIManager.acquire_client()
2532
+ config = config or {}
2533
+
2534
+ response = client.applications.list(**config)
2535
+ # Convert the response to list
2536
+ # to collect all the apps available
2537
+ result = list(response)
2538
+
2539
+ return result
2540
+
2541
+ def retrieve(
2542
+ self,
2543
+ application_id: str,
2544
+ config: Optional[APIOptions] = None
2545
+ ) -> ApplicationRetrieveResponse:
2546
+ """
2547
+ Retrieves all information about a specific application by its ID.
2548
+
2549
+ :param application_id: The ID of the application to retrieve data for.
2550
+ :param config: Optional dictionary containing parameters
2551
+ for the retrieve call.
2552
+ :return: The application data.
2553
+
2554
+ The `config` dictionary can include the following keys:
2555
+ - `extra_headers` (Optional[Headers]):
2556
+ Additional headers for the request.
2557
+ - `extra_query` (Optional[Query]):
2558
+ Additional query parameters for the request.
2559
+ - `extra_body` (Optional[Body]):
2560
+ Additional body parameters for the request.
2561
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
2562
+ Timeout for the request in seconds.
2563
+ """
2564
+ client = WriterAIManager.acquire_client()
2565
+ config = config or {}
2566
+
2567
+ response_data = client.applications.retrieve(
2568
+ application_id=application_id,
2569
+ **config
2570
+ )
2571
+ return response_data
2572
+
2117
2573
  def generate_content(
2118
2574
  self,
2119
2575
  application_id: str,
2120
- input_dict: Optional[Dict[str, str]] = None,
2121
- config: Optional[APIOptions] = None) -> str:
2576
+ input_dict: Optional[Dict[str, Optional[Union[List[str], str]]]] = None,
2577
+ async_job: Optional[bool] = False,
2578
+ config: Optional[APIOptions] = None
2579
+ ) -> Union[str, JobCreateResponse]:
2122
2580
  """
2123
2581
  Generates output based on an existing AI Studio no-code application.
2124
2582
 
2125
2583
  :param application_id: The id for the application, which can be
2126
- obtained on AI Studio.
2584
+ obtained on AI Studio.
2127
2585
  :param input_dict: Optional dictionary containing parameters for
2128
- the generation call.
2129
- :return: The generated text.
2586
+ the generation call.
2587
+ :param async_job: Optional. If True, the function initiates
2588
+ an asynchronous job and returns job details instead of
2589
+ waiting for the immediate output.
2590
+ :return: The generated text
2591
+ or the information about new asynchronous job.
2130
2592
  :raises RuntimeError: If response data was not properly formatted
2131
2593
  to retrieve model text.
2594
+
2595
+ The `config` dictionary can include the following keys:
2596
+ - `extra_headers` (Optional[Headers]):
2597
+ Additional headers for the request.
2598
+ - `extra_query` (Optional[Query]):
2599
+ Additional query parameters for the request.
2600
+ - `extra_body` (Optional[Body]):
2601
+ Additional body parameters for the request.
2602
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
2603
+ Timeout for the request in seconds.
2604
+
2605
+ Examples:
2606
+
2607
+ **Synchronous Call (Immediate Output)**
2608
+
2609
+ >>> response = writer.ai.apps.generate_content(
2610
+ ... application_id="app_123",
2611
+ ... input_dict={"topic": "Climate Change"},
2612
+ ... async_job=False
2613
+ ... )
2614
+ >>> print(response)
2615
+ "Climate change refers to long-term shifts in temperatures and weather patterns..."
2616
+
2617
+ **Asynchronous Call (Job Creation)**
2618
+
2619
+ >>> response = writer.ai.apps.generate_content(
2620
+ ... application_id="app_123",
2621
+ ... input_dict={"topic": "Climate Change"},
2622
+ ... async_job=True
2623
+ ... )
2624
+ >>> print(response)
2625
+ JobCreateResponse(
2626
+ id="job_456",
2627
+ created_at=datetime(2025, 2, 24, 12, 30, 45),
2628
+ status="in_progress"
2629
+ )
2630
+ >>> result = writer.ai.apps.retrieve_job(job_id=response.id)
2631
+ >>> if result.status == "completed":
2632
+ ... print(result.data)
2633
+ {
2634
+ "title": "output",
2635
+ "suggestion": "Climate change refers to long-term shifts in "
2636
+ "temperatures and weather patterns..."
2637
+ }
2638
+
2132
2639
  """
2133
2640
 
2134
2641
  client = WriterAIManager.acquire_client()
@@ -2137,26 +2644,211 @@ class Apps:
2137
2644
  inputs = []
2138
2645
 
2139
2646
  for k, v in input_dict.items():
2647
+ # Convert None/empty to []
2648
+ # to avoid API 400 errors on optional inputs
2649
+ if v is None or v == "":
2650
+ value = []
2651
+ else:
2652
+ value = v if isinstance(v, list) else [v]
2653
+
2140
2654
  inputs.append(Input({
2141
2655
  "id": k,
2142
- "value": v if isinstance(v, list) else [v]
2656
+ "value": value
2143
2657
  }))
2144
2658
 
2145
- response_data = client.applications.generate_content(
2146
- application_id=application_id,
2147
- inputs=inputs,
2659
+ if not async_job:
2660
+ response_data = client.applications.generate_content(
2661
+ application_id=application_id,
2662
+ inputs=inputs,
2663
+ **config
2664
+ )
2665
+
2666
+ text = response_data.suggestion
2667
+ if text:
2668
+ return text
2669
+
2670
+ raise RuntimeError(
2671
+ "Failed to acquire proper response " +
2672
+ "for completion from data: " +
2673
+ f"{response_data}"
2674
+ )
2675
+
2676
+ else:
2677
+ async_response_data = client.applications.jobs.create(
2678
+ application_id=application_id,
2679
+ inputs=inputs,
2680
+ **config
2681
+ )
2682
+
2683
+ return async_response_data
2684
+
2685
+ def retry_job(
2686
+ self,
2687
+ job_id: str,
2688
+ config: Optional[APIOptions] = None
2689
+ ) -> JobRetryResponse:
2690
+ """
2691
+ Retries a specific asynchronous job execution.
2692
+
2693
+ :param job_id: The unique identifier of the job to retry.
2694
+ :param config: Optional API configuration options for
2695
+ the retry request.
2696
+ :return: The response data from retrying the job.
2697
+
2698
+ The `config` dictionary can include the following keys:
2699
+ - `extra_headers` (Optional[Headers]):
2700
+ Additional headers for the request.
2701
+ - `extra_query` (Optional[Query]):
2702
+ Additional query parameters for the request.
2703
+ - `extra_body` (Optional[Body]):
2704
+ Additional body parameters for the request.
2705
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
2706
+ Timeout for the request in seconds.
2707
+ """
2708
+ client = WriterAIManager.acquire_client()
2709
+ config = config or {}
2710
+
2711
+ response_data = client.applications.jobs.retry(
2712
+ job_id=job_id,
2148
2713
  **config
2149
2714
  )
2150
2715
 
2151
- text = response_data.suggestion
2152
- if text:
2153
- return text
2716
+ return response_data
2154
2717
 
2155
- raise RuntimeError(
2156
- "Failed to acquire proper response for completion from data: " +
2157
- f"{response_data}"
2718
+ def retrieve_jobs(
2719
+ self,
2720
+ application_id: str,
2721
+ config: Optional[APIRetrieveJobsOptions] = None
2722
+ ) -> List[ApplicationGenerateAsyncResponse]:
2723
+ """
2724
+ Retrieves a list of jobs for a specific application.
2725
+
2726
+ :param application_id: The unique identifier of the application.
2727
+ :param config: Optional configuration parameters for the API request.
2728
+ :return: A list of job responses associated with
2729
+ the specified application.
2730
+
2731
+ The `config` dictionary can include:
2732
+ - `limit` (int): The pagination limit for retrieving the jobs.
2733
+ - `offset` (int): The pagination offset for retrieving the jobs.
2734
+ - `status` (Literal['in_progress', 'failed', 'completed']):
2735
+ Filter jobs by the provided status.
2736
+ - `extra_headers` (Optional[Headers]): Additional headers.
2737
+ - `extra_query` (Optional[Query]): Extra query parameters.
2738
+ - `extra_body` (Optional[Body]): Additional body data.
2739
+ - `timeout` (Union[float, Timeout, None, NotGiven]):
2740
+ Request timeout in seconds.
2741
+ """
2742
+ client = WriterAIManager.acquire_client()
2743
+ config = config or {}
2744
+
2745
+ jobs = client.applications.jobs.list(
2746
+ application_id=application_id,
2747
+ **config
2748
+ )
2749
+
2750
+ return list(jobs)
2751
+
2752
+ def retrieve_job(
2753
+ self,
2754
+ job_id: str,
2755
+ config: Optional[APIOptions] = None
2756
+ ) -> ApplicationGenerateAsyncResponse:
2757
+ """
2758
+ Retrieves an asynchronous job from the Writer AI API based on its ID.
2759
+
2760
+ :param job_id: The unique identifier of the job to retrieve.
2761
+ :param config: Additional API configuration options.
2762
+ :returns: The retrieved job object from the API.
2763
+
2764
+ The `config` dictionary can include the following keys:
2765
+ - `extra_headers` (Optional[Headers]):
2766
+ Additional headers for the request.
2767
+ - `extra_query` (Optional[Query]):
2768
+ Additional query parameters for the request.
2769
+ - `extra_body` (Optional[Body]):
2770
+ Additional body parameters for the request.
2771
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
2772
+ Timeout for the request in seconds.
2773
+ """
2774
+ client = WriterAIManager.acquire_client()
2775
+ config = config or {}
2776
+
2777
+ job = client.applications.jobs.retrieve(
2778
+ job_id=job_id,
2779
+ **config
2780
+ )
2781
+
2782
+ return job
2783
+
2784
+ def retrieve_graphs(
2785
+ self,
2786
+ application_id: str,
2787
+ config: Optional[APIOptions] = None
2788
+ ) -> List[str]:
2789
+ """
2790
+ Retrieves a list of graph IDs for a specific application.
2791
+
2792
+ :param application_id: The unique identifier of the application.
2793
+ :param config: Optional configuration parameters for the API request.
2794
+ :return: A list of graph IDs associated with the specified application.
2795
+
2796
+ The `config` dictionary can include the following keys:
2797
+ - `extra_headers` (Optional[Headers]):
2798
+ Additional headers for the request.
2799
+ - `extra_query` (Optional[Query]):
2800
+ Additional query parameters for the request.
2801
+ - `extra_body` (Optional[Body]):
2802
+ Additional body parameters for the request.
2803
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
2804
+ Timeout for the request in seconds.
2805
+ """
2806
+ client = WriterAIManager.acquire_client()
2807
+ config = config or {}
2808
+
2809
+ graphs = client.applications.graphs.list(
2810
+ application_id=application_id,
2811
+ **config
2812
+ )
2813
+
2814
+ return graphs.graph_ids
2815
+
2816
+ def associate_graphs(
2817
+ self,
2818
+ application_id: str,
2819
+ graph_ids: List[str],
2820
+ config: Optional[APIOptions] = None
2821
+ ) -> ApplicationGraphsResponse:
2822
+ """
2823
+ Associates a list of graph IDs with a specific application.
2824
+
2825
+ :param application_id: The unique identifier of the application.
2826
+ :param graph_ids: A list of graph IDs to associate with
2827
+ the application.
2828
+ :param config: Optional configuration parameters for the API request.
2829
+ :return: The response data from associating the graphs.
2830
+
2831
+ The `config` dictionary can include the following keys:
2832
+ - `extra_headers` (Optional[Headers]):
2833
+ Additional headers for the request.
2834
+ - `extra_query` (Optional[Query]):
2835
+ Additional query parameters for the request.
2836
+ - `extra_body` (Optional[Body]):
2837
+ Additional body parameters for the request.
2838
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
2839
+ Timeout for the request in seconds.
2840
+ """
2841
+ client = WriterAIManager.acquire_client()
2842
+ config = config or {}
2843
+
2844
+ response_data = client.applications.graphs.update(
2845
+ application_id=application_id,
2846
+ graph_ids=graph_ids,
2847
+ **config
2158
2848
  )
2159
2849
 
2850
+ return response_data
2851
+
2160
2852
 
2161
2853
  class Tools:
2162
2854
  SplittingStrategy = Union[
@@ -2262,6 +2954,7 @@ class Tools:
2262
2954
  return result.entities
2263
2955
 
2264
2956
 
2957
+ @catch_guardrail_error
2265
2958
  def complete(
2266
2959
  initial_text: str,
2267
2960
  config: Optional['CreateOptions'] = None
@@ -2308,6 +3001,7 @@ def complete(
2308
3001
  f"{response_data}")
2309
3002
 
2310
3003
 
3004
+ @catch_guardrail_error
2311
3005
  def stream_complete(
2312
3006
  initial_text: str,
2313
3007
  config: Optional['CreateOptions'] = None
@@ -2373,21 +3067,16 @@ def ask(
2373
3067
  config: Optional[APIOptions] = None
2374
3068
  ):
2375
3069
  """
2376
- Sends a question to the specified graph(s) and retrieves
3070
+ Sends a question to the specified graph(s) and retrieves
2377
3071
  a single response.
2378
3072
 
2379
3073
  :param question: The query or question to be answered by the graph(s).
2380
- :param graphs_or_graph_ids: A list of `Graph` objects or graph IDs that
3074
+ :param graphs_or_graph_ids: A list of `Graph` objects or graph IDs that
2381
3075
  should be queried.
2382
- :param subqueries: Enables subquery generation if set to True,
3076
+ :param subqueries: Enables subquery generation if set to True,
2383
3077
  enhancing the result.
2384
- :param config: Optional dictionary for additional API
3078
+ :param config: Optional dictionary for additional API
2385
3079
  configuration settings.
2386
- The configuration can include:
2387
- - ``extra_headers`` (Optional[Headers]): Additional headers.
2388
- - ``extra_query`` (Optional[Query]): Extra query parameters.
2389
- - ``extra_body`` (Optional[Body]): Additional body data.
2390
- - ``timeout`` (Union[float, Timeout, None, NotGiven]): Request timeout.
2391
3080
 
2392
3081
  :return: The answer to the question from the graph(s).
2393
3082
 
@@ -2396,6 +3085,16 @@ def ask(
2396
3085
  :raises RuntimeError: If the API response is improperly formatted
2397
3086
  or the answer cannot be retrieved.
2398
3087
 
3088
+ The `config` dictionary can include the following keys:
3089
+ - `extra_headers` (Optional[Headers]):
3090
+ Additional headers for the request.
3091
+ - `extra_query` (Optional[Query]):
3092
+ Additional query parameters for the request.
3093
+ - `extra_body` (Optional[Body]):
3094
+ Additional body parameters for the request.
3095
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
3096
+ Timeout for the request in seconds.
3097
+
2399
3098
  **Example Usage**:
2400
3099
 
2401
3100
  >>> response = ask(
@@ -2430,27 +3129,32 @@ def stream_ask(
2430
3129
  """
2431
3130
  Streams response for a question posed to the specified graph(s).
2432
3131
 
2433
- This method returns incremental chunks of the response, ideal for long
3132
+ This method returns incremental chunks of the response, ideal for long
2434
3133
  responses or when reduced latency is needed.
2435
3134
 
2436
3135
  :param question: The query or question to be answered by the graph(s).
2437
- :param graphs_or_graph_ids: A list of Graph objects or graph IDs that
3136
+ :param graphs_or_graph_ids: A list of Graph objects or graph IDs that
2438
3137
  should be queried.
2439
- :param subqueries: Enables subquery generation if set to True,
3138
+ :param subqueries: Enables subquery generation if set to True,
2440
3139
  enhancing the result.
2441
- :param config: Optional dictionary for additional API
2442
- configuration settings.
2443
- The configuration can include:
2444
- - ``extra_headers`` (Optional[Headers]): Additional headers.
2445
- - ``extra_query`` (Optional[Query]): Extra query parameters.
2446
- - ``extra_body`` (Optional[Body]): Additional body data.
2447
- - ``timeout`` (Union[float, Timeout, None, NotGiven]): Request timeout.
3140
+ :param config: Optional dictionary for additional API
3141
+ configuration settings.
2448
3142
 
2449
3143
  :yields: Incremental chunks of the answer to the question.
2450
3144
 
2451
3145
  :raises ValueError: If an invalid graph or graph ID
2452
3146
  is provided in `graphs_or_graph_ids`.
2453
3147
 
3148
+ The `config` dictionary can include the following keys:
3149
+ - `extra_headers` (Optional[Headers]):
3150
+ Additional headers for the request.
3151
+ - `extra_query` (Optional[Query]):
3152
+ Additional query parameters for the request.
3153
+ - `extra_body` (Optional[Body]):
3154
+ Additional body parameters for the request.
3155
+ - `timeout` (Union[float, httpx.Timeout, None, NotGiven]):
3156
+ Timeout for the request in seconds.
3157
+
2454
3158
  **Example Usage**:
2455
3159
 
2456
3160
  >>> for chunk in stream_ask(