shotgun-sh 0.1.16.dev2__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (55) hide show
  1. shotgun/agents/common.py +4 -5
  2. shotgun/agents/config/constants.py +23 -6
  3. shotgun/agents/config/manager.py +239 -76
  4. shotgun/agents/config/models.py +74 -84
  5. shotgun/agents/config/provider.py +174 -85
  6. shotgun/agents/history/compaction.py +1 -1
  7. shotgun/agents/history/history_processors.py +18 -9
  8. shotgun/agents/history/token_counting/__init__.py +31 -0
  9. shotgun/agents/history/token_counting/anthropic.py +89 -0
  10. shotgun/agents/history/token_counting/base.py +67 -0
  11. shotgun/agents/history/token_counting/openai.py +80 -0
  12. shotgun/agents/history/token_counting/sentencepiece_counter.py +119 -0
  13. shotgun/agents/history/token_counting/tokenizer_cache.py +90 -0
  14. shotgun/agents/history/token_counting/utils.py +147 -0
  15. shotgun/agents/history/token_estimation.py +12 -12
  16. shotgun/agents/llm.py +62 -0
  17. shotgun/agents/models.py +2 -2
  18. shotgun/agents/tools/web_search/__init__.py +42 -15
  19. shotgun/agents/tools/web_search/anthropic.py +54 -50
  20. shotgun/agents/tools/web_search/gemini.py +31 -20
  21. shotgun/agents/tools/web_search/openai.py +4 -4
  22. shotgun/build_constants.py +2 -2
  23. shotgun/cli/config.py +34 -63
  24. shotgun/cli/feedback.py +4 -2
  25. shotgun/cli/models.py +2 -2
  26. shotgun/codebase/core/ingestor.py +47 -8
  27. shotgun/codebase/core/manager.py +7 -3
  28. shotgun/codebase/models.py +4 -4
  29. shotgun/llm_proxy/__init__.py +16 -0
  30. shotgun/llm_proxy/clients.py +39 -0
  31. shotgun/llm_proxy/constants.py +8 -0
  32. shotgun/main.py +6 -0
  33. shotgun/posthog_telemetry.py +15 -11
  34. shotgun/sentry_telemetry.py +3 -3
  35. shotgun/shotgun_web/__init__.py +19 -0
  36. shotgun/shotgun_web/client.py +138 -0
  37. shotgun/shotgun_web/constants.py +17 -0
  38. shotgun/shotgun_web/models.py +47 -0
  39. shotgun/telemetry.py +7 -4
  40. shotgun/tui/app.py +26 -8
  41. shotgun/tui/screens/chat.py +2 -8
  42. shotgun/tui/screens/chat_screen/command_providers.py +118 -11
  43. shotgun/tui/screens/chat_screen/history.py +3 -1
  44. shotgun/tui/screens/feedback.py +2 -2
  45. shotgun/tui/screens/model_picker.py +327 -0
  46. shotgun/tui/screens/provider_config.py +118 -28
  47. shotgun/tui/screens/shotgun_auth.py +295 -0
  48. shotgun/tui/screens/welcome.py +176 -0
  49. shotgun/utils/env_utils.py +12 -0
  50. {shotgun_sh-0.1.16.dev2.dist-info → shotgun_sh-0.2.1.dist-info}/METADATA +2 -2
  51. {shotgun_sh-0.1.16.dev2.dist-info → shotgun_sh-0.2.1.dist-info}/RECORD +54 -37
  52. shotgun/agents/history/token_counting.py +0 -429
  53. {shotgun_sh-0.1.16.dev2.dist-info → shotgun_sh-0.2.1.dist-info}/WHEEL +0 -0
  54. {shotgun_sh-0.1.16.dev2.dist-info → shotgun_sh-0.2.1.dist-info}/entry_points.txt +0 -0
  55. {shotgun_sh-0.1.16.dev2.dist-info → shotgun_sh-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,38 +1,45 @@
1
1
  shotgun/__init__.py,sha256=P40K0fnIsb7SKcQrFnXZ4aREjpWchVDhvM1HxI4cyIQ,104
2
- shotgun/build_constants.py,sha256=RXNxMz46HaB5jucgMVpw8a2yCJqjbhTOh0PddyEVMN8,713
2
+ shotgun/build_constants.py,sha256=hDFr6eO0lwN0iCqHQ1A5s0D68txR8sYrTJLGa7tSi0o,654
3
3
  shotgun/logging_config.py,sha256=UKenihvgH8OA3W0b8ZFcItYaFJVe9MlsMYlcevyW1HY,7440
4
- shotgun/main.py,sha256=670RwzIwEIz9QRil37IbVoxWuX66YATqXFLSYSqKw-w,4955
5
- shotgun/posthog_telemetry.py,sha256=ZD_BjRej1v4Mxh7VN3AlXGKV4jIU9SC0uBrH94VQa6c,5885
4
+ shotgun/main.py,sha256=RA3q1xPfqxCu43UmgI2ryZpA-IxPhJb_MJrbLqp9c_g,5140
5
+ shotgun/posthog_telemetry.py,sha256=TOiyBtLg21SttHGWKc4-e-PQgpbq6Uz_4OzlvlxMcZ0,6099
6
6
  shotgun/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- shotgun/sentry_telemetry.py,sha256=L7jFMNAnDIENWVeQYSLpyul2nmIm2w3wnOp2kDP_cic,2902
8
- shotgun/telemetry.py,sha256=Ves6Ih3hshpKVNVAUUmwRdtW8NkTjFPg8hEqvFKZ0t0,3208
7
+ shotgun/sentry_telemetry.py,sha256=VD8es-tREfgtRKhDsEVvqpo0_kM_ab6iVm2lkOEmTlI,2950
8
+ shotgun/telemetry.py,sha256=WfxdHALh5_51nw783ZZvD-LEyC6ypHxSUTMXUioZhTQ,3339
9
9
  shotgun/agents/__init__.py,sha256=8Jzv1YsDuLyNPFJyckSr_qI4ehTVeDyIMDW4omsfPGc,25
10
10
  shotgun/agents/agent_manager.py,sha256=xq8L0oAFgtFCpKVsyUoMtYJqUyz5XxjWLKNnxoe1zo4,26577
11
- shotgun/agents/common.py,sha256=vt7ECq1rT6GR5Rt63t0whH0R0cydrk7Mty2KyPL8mEg,19045
11
+ shotgun/agents/common.py,sha256=Hr9HigsDopkI0Sr3FThGDv1f67NLemOjcYA6LV9v970,18963
12
12
  shotgun/agents/conversation_history.py,sha256=5J8_1yxdZiiWTq22aDio88DkBDZ4_Lh_p5Iy5_ENszc,3898
13
13
  shotgun/agents/conversation_manager.py,sha256=fxAvXbEl3Cl2ugJ4N9aWXaqZtkrnfj3QzwjWC4LFXwI,3514
14
14
  shotgun/agents/export.py,sha256=Zke952DbJ_lOBUmN-TPHw7qmjbfqsFu1uycBRQI_pkg,2969
15
+ shotgun/agents/llm.py,sha256=hs8j1wwTczGtehzahL1Z_5D4qus5QUx4-h9-m5ZPzm4,2209
15
16
  shotgun/agents/messages.py,sha256=wNn0qC5AqASM8LMaSGFOerZEJPn5FsIOmaJs1bdosuU,1036
16
- shotgun/agents/models.py,sha256=ULN7wdOJebYlNxtbKFTm9nJ9uv8g1C8nAG46XOcscTY,8104
17
+ shotgun/agents/models.py,sha256=IvwwjbJYi5wi9S-budg8g1ezi1VaO57Q-XtegkbTrXg,8096
17
18
  shotgun/agents/plan.py,sha256=s-WfILBOW4l8kY59RUOVtX5MJSuSzFm1nGp6b17If78,3030
18
19
  shotgun/agents/research.py,sha256=lYG7Rytcitop8mXs3isMI3XvYzzI3JH9u0VZz6K9zfo,3274
19
20
  shotgun/agents/specify.py,sha256=7MoMxfIn34G27mw6wrp_F0i2O5rid476L3kHFONDCd0,3137
20
21
  shotgun/agents/tasks.py,sha256=nk8zIl24o01hfzOGyWSbeVWeke6OGseO4Ppciurh13U,2999
21
22
  shotgun/agents/usage_manager.py,sha256=5d9JC4_cthXwhTSytMfMExMDAUYp8_nkPepTJZXk13w,5017
22
23
  shotgun/agents/config/__init__.py,sha256=Fl8K_81zBpm-OfOW27M_WWLSFdaHHek6lWz95iDREjQ,318
23
- shotgun/agents/config/constants.py,sha256=MogArrb2r5rFI6BBzc6NhPz1payGeM6K-t5oIFbJgxg,494
24
- shotgun/agents/config/manager.py,sha256=kwMbPjz0kEH_WCQAamESGjHdE8d_P-ztel4NL4FWNUw,10662
25
- shotgun/agents/config/models.py,sha256=vpVXrtiHsDt2D_h7BLyMiiQeT97vAz2L6lYKx2SEMjo,5909
26
- shotgun/agents/config/provider.py,sha256=pVWf_WM3MNWH0v2fU-peBCqx49X-nW81piQ_M-AKWRE,7249
24
+ shotgun/agents/config/constants.py,sha256=JNuLpeBUKikEsxGSjwX3RVWUQpbCKnDKstF2NczuDqk,932
25
+ shotgun/agents/config/manager.py,sha256=e1HjGWKN1l9jDmK5MG8cZ6UMeWq6MntVv0NfETIgSO8,17577
26
+ shotgun/agents/config/models.py,sha256=ohLXt9niCy4uFfFP1E6WSBZtxh7aZ16gTA2S3pHYkmc,5431
27
+ shotgun/agents/config/provider.py,sha256=TwwZC_BtYSOpN2jdX6WZdor29EnAqfMoQK5GmNEYaPI,11012
27
28
  shotgun/agents/history/__init__.py,sha256=XFQj2a6fxDqVg0Q3juvN9RjV_RJbgvFZtQOCOjVJyp4,147
28
- shotgun/agents/history/compaction.py,sha256=Je8-7T2i78gv_QWzgZNe9mvscaHcaTMttQX9xS8M38Q,3509
29
+ shotgun/agents/history/compaction.py,sha256=9RMpG0aY_7L4TecbgwHSOkGtbd9W5XZTg-MbzZmNl00,3515
29
30
  shotgun/agents/history/constants.py,sha256=yWY8rrTZarLA3flCCMB_hS2NMvUDRDTwP4D4j7MIh1w,446
30
31
  shotgun/agents/history/context_extraction.py,sha256=yVka1U6TqNVsORR4JlxpWi9yBt3Quip8g_u3x2Vi9Gs,3564
31
32
  shotgun/agents/history/history_building.py,sha256=6LFDZ60MTPDoGAcmu_mjlnjVYu8YYWdIi-cGbF3jm7A,3532
32
- shotgun/agents/history/history_processors.py,sha256=NbStr6CvszwK3DKO5LCiaVEJI-RAcVnH5dCpHTAHjQo,17731
33
+ shotgun/agents/history/history_processors.py,sha256=D3z-hzrXHxE7OAZaVX4_YAKN_nyxSF5iYMIYO24V_CI,17943
33
34
  shotgun/agents/history/message_utils.py,sha256=aPusAl2RYKbjc7lBxPaNprRHmZEG6fe97q7DQUlhlzU,2918
34
- shotgun/agents/history/token_counting.py,sha256=RasWy84eNjbmqyQDTGAzj1Q1I9ml_G_9R-maWN7gr8s,13839
35
- shotgun/agents/history/token_estimation.py,sha256=iNqhDSqFzG0YYxGijMRzj54GALFglOp0qVMB6G59RhU,4690
35
+ shotgun/agents/history/token_estimation.py,sha256=iRyKq-YDivEpJrULIbQgNpjhOuSC4nHVJYfsWEFV8sQ,4770
36
+ shotgun/agents/history/token_counting/__init__.py,sha256=YZt5Lus--fkF6l1hdkIlp1e_oAIpACNwHOI0FRP4q8s,924
37
+ shotgun/agents/history/token_counting/anthropic.py,sha256=b2LvwKM4dSILGhv_-W4mLMKMUCPLhe1ov9UGW_-iBsw,3011
38
+ shotgun/agents/history/token_counting/base.py,sha256=TN4mzwSyWNQyTuOuCFaU-8AgLdAyquoX3af4qrmkxCs,1904
39
+ shotgun/agents/history/token_counting/openai.py,sha256=XJ2z2HaUG6f3Cw9tCK_yaOsaMJGHpSFF1I30-d3soSI,2350
40
+ shotgun/agents/history/token_counting/sentencepiece_counter.py,sha256=qj1bT7J5nCd5y6Mr42O9K1KTaele0rjdd09FeyyEA70,3987
41
+ shotgun/agents/history/token_counting/tokenizer_cache.py,sha256=Y0V6KMtEwn42M5-zJGAc7YudM8X6m5-j2ekA6YGL5Xk,2868
42
+ shotgun/agents/history/token_counting/utils.py,sha256=d124IDjtd0IYBYrr3gDJGWxSbdP10Vrc7ZistbUosMg,5002
36
43
  shotgun/agents/tools/__init__.py,sha256=QaN80IqWvB5qEcjHqri1-PYvYlO74vdhcwLugoEdblo,772
37
44
  shotgun/agents/tools/file_management.py,sha256=HYNe_QA4T3_bPzSWBYcFZcnWdj8eb4aQ3GB735-G8Nw,7138
38
45
  shotgun/agents/tools/user_interaction.py,sha256=b3ncEpvoD06Cz4hwsS-ppVbQajQj640iWnVfA5WBjAA,1236
@@ -43,16 +50,16 @@ shotgun/agents/tools/codebase/file_read.py,sha256=EGK5yNqiS4cbIEQfDtdKVoJSJYk20N
43
50
  shotgun/agents/tools/codebase/models.py,sha256=8eR3_8DQiBNgB2twu0aC_evIJbugN9KW3gtxMZdGYCE,10087
44
51
  shotgun/agents/tools/codebase/query_graph.py,sha256=vOeyN4-OZj-vpTSk3Z9W5TjraZAepJ-Qjk_zzvum3fU,2115
45
52
  shotgun/agents/tools/codebase/retrieve_code.py,sha256=2VjiqVKJMd9rPV-mGrL4C-N8fqGjYLW6ZInFGbcTxOM,2878
46
- shotgun/agents/tools/web_search/__init__.py,sha256=Sj1tVokrCsJiLRWWTq0zrAolMHEGntRIYnqiyFi8L2E,1840
47
- shotgun/agents/tools/web_search/anthropic.py,sha256=NDhj8MrdxLsmGwHp7uM0IQeJVW2poY58GCUTJEM9dew,4827
48
- shotgun/agents/tools/web_search/gemini.py,sha256=hXjWUF-aTX3B9ViaKe5aF2aHXlaoBA5am40cgilinGE,2981
49
- shotgun/agents/tools/web_search/openai.py,sha256=V8GeqwUAi5wrbRuU41Y38schpXRdyeIfw85-CT5rAhY,3415
53
+ shotgun/agents/tools/web_search/__init__.py,sha256=_9rgs_gv41-wfPvwfWM_Qfq-zvboyQ_srfyneGsxgM4,3182
54
+ shotgun/agents/tools/web_search/anthropic.py,sha256=GelAhAmb-b4o87-3sgxNFfw-G2LXDEjfdZ7XfF0bQD0,4983
55
+ shotgun/agents/tools/web_search/gemini.py,sha256=-fI_deaBT4-_61A7KlKtz8tmKXW50fVx_97WAJTUg4w,3468
56
+ shotgun/agents/tools/web_search/openai.py,sha256=pnIcTV3vwXJQuxPs4I7gQNX18XzM7D7FqeNxnn1E7yw,3437
50
57
  shotgun/agents/tools/web_search/utils.py,sha256=GLJ5QV9bT2ubFMuFN7caMN7tK9OTJ0R3GD57B-tCMF0,532
51
58
  shotgun/cli/__init__.py,sha256=_F1uW2g87y4bGFxz8Gp8u7mq2voHp8vQIUtCmm8Tojo,40
52
- shotgun/cli/config.py,sha256=LbjxDNPdetYJiwlcyOYLnqwzALfgU-m54cfstUshbrs,8715
59
+ shotgun/cli/config.py,sha256=lT_zXwui-Wv3hewjebQeu9eLwK3tYn1wla5vKit6eqs,7931
53
60
  shotgun/cli/export.py,sha256=3hIwK2_OM1MFYSTfzBxsGuuBGm5fo0XdxASfQ5Uqb3Y,2471
54
- shotgun/cli/feedback.py,sha256=Me1dQQgkYwP4AIFwYgfHcPXxFdJ6CzFbCBttKcFd2Q0,1238
55
- shotgun/cli/models.py,sha256=LoajeEK7MEDUSnZXb1Li-dbhXqne812YZglx-LcVpiQ,181
61
+ shotgun/cli/feedback.py,sha256=K8iFDl5051_g95jwDEm9gdKUjDWO8HBVZjlRN8uD7Mk,1300
62
+ shotgun/cli/models.py,sha256=kwZEldQWUheNsqF_ezgDzRBc6h0Y0JxFw1VMQjZlvPE,182
56
63
  shotgun/cli/plan.py,sha256=T-eu-I9z-dSoKqJ-KI8X5i5Mm0VL1BfornxRiUjTgnk,2324
57
64
  shotgun/cli/research.py,sha256=qvBBtX3Wyn6pDZlJpcEvbeK-0iTOXegi71tm8HKVYaE,2490
58
65
  shotgun/cli/specify.py,sha256=ErRQ72Zc75fmxopZbKy0vvnLPuYBLsGynpjj1X6-BwI,2166
@@ -63,17 +70,20 @@ shotgun/cli/codebase/__init__.py,sha256=rKdvx33p0i_BYbNkz5_4DCFgEMwzOOqLi9f5p7XT
63
70
  shotgun/cli/codebase/commands.py,sha256=1N2yOGmok0ZarqXPIpWGcsQrwm_ZJcyWiMxy6tm0j70,8711
64
71
  shotgun/cli/codebase/models.py,sha256=B9vs-d-Bq0aS6FZKebhHT-9tw90Y5f6k_t71VlZpL8k,374
65
72
  shotgun/codebase/__init__.py,sha256=QBgFE2Abd5Vl7_NdYOglF9S6d-vIjkb3C0cpIYoHZEU,309
66
- shotgun/codebase/models.py,sha256=1AAipm6KrGOHmYBBavugnyeOsVkzX-YXAD6dDsSVRWg,5299
73
+ shotgun/codebase/models.py,sha256=5e_7zaPL032n_ghcvs01Uug3BH4jyKiQ3S3U5w21BSM,5296
67
74
  shotgun/codebase/service.py,sha256=nyggapfHKdwkKXyuT9oA0tJ9qf4RNVsOxfY8lC5pHro,8006
68
75
  shotgun/codebase/core/__init__.py,sha256=GWWhJEqChiDXAF4omYCgzgoZmJjwsAf6P1aZ5Bl8OE0,1170
69
76
  shotgun/codebase/core/change_detector.py,sha256=kWCYLWzRzb3IGGOj71KBn7UOCOKMpINJbOBDf98aMxE,12409
70
77
  shotgun/codebase/core/code_retrieval.py,sha256=_JVyyQKHDFm3dxOOua1mw9eIIOHIVz3-I8aZtEsEj1E,7927
71
78
  shotgun/codebase/core/cypher_models.py,sha256=Yfysfa9lLguILftkmtuJCN3kLBFIo7WW7NigM-Zr-W4,1735
72
- shotgun/codebase/core/ingestor.py,sha256=yh6BEIuUUfXU3dVpP0Llk19SrxA-uo3pdGnfcQsDsSo,63368
79
+ shotgun/codebase/core/ingestor.py,sha256=CNYbdoJycnbA2psYCD9uKcUwIe3Ao7I7T6NrPhTQE9k,64613
73
80
  shotgun/codebase/core/language_config.py,sha256=vsqHyuFnumRPRBV1lMOxWKNOIiClO6FyfKQR0fGrtl4,8934
74
- shotgun/codebase/core/manager.py,sha256=USGLBdDUoFtq6fMFWRtUu2HBC_FI8d6lWcAV4l6fcvk,66000
81
+ shotgun/codebase/core/manager.py,sha256=kjxQ9eCs5vVCVDproCN1eYSKuGiqtcxF01reQ18JfOw,66184
75
82
  shotgun/codebase/core/nl_query.py,sha256=kPoSJXBlm5rLhzOofZhqPVMJ_Lj3rV2H6sld6BwtMdg,16115
76
83
  shotgun/codebase/core/parser_loader.py,sha256=LZRrDS8Sp518jIu3tQW-BxdwJ86lnsTteI478ER9Td8,4278
84
+ shotgun/llm_proxy/__init__.py,sha256=BLD9NnVzdD0H7gFb65Ajud-Q7SiCymegLRaGx8UkC-Y,435
85
+ shotgun/llm_proxy/clients.py,sha256=wP4UlgtCdrNwWsZLZ9inE3fEIDa-i1j7gsr9oXQf1o4,1037
86
+ shotgun/llm_proxy/constants.py,sha256=E8sqL-8GZzl989T3OS7E1hImSZPj2vqmp3lbM6zGiQU,309
77
87
  shotgun/prompts/__init__.py,sha256=RswUm0HMdfm2m2YKUwUsEdRIwoczdbI7zlucoEvHYRo,132
78
88
  shotgun/prompts/loader.py,sha256=jy24-E02pCSmz2651aCT2NgHfRrHAGMYvKrD6gs0Er8,4424
79
89
  shotgun/prompts/agents/__init__.py,sha256=YRIJMbzpArojNX1BP5gfxxois334z_GQga8T-xyWMbY,39
@@ -103,8 +113,12 @@ shotgun/sdk/codebase.py,sha256=7doUvwwl27RDJZIbP56LQsAx26GANtAKEBptTUhLT6w,8842
103
113
  shotgun/sdk/exceptions.py,sha256=qBcQv0v7ZTwP7CMcxZST4GqCsfOWtOUjSzGBo0-heqo,412
104
114
  shotgun/sdk/models.py,sha256=X9nOTUHH0cdkQW1NfnMEDu-QgK9oUsEISh1Jtwr5Am4,5496
105
115
  shotgun/sdk/services.py,sha256=J4PJFSxCQ6--u7rb3Ta-9eYtlYcxcbnzrMP6ThyCnw4,705
116
+ shotgun/shotgun_web/__init__.py,sha256=IB-TvK3WvLNrdKH0j9MwMGtIjqi81ASFIVwaZa0ifNg,461
117
+ shotgun/shotgun_web/client.py,sha256=n5DDuVfSa6VPZjhSsfSxQlSFOnhgDHyidRnB8Hv9XF4,4134
118
+ shotgun/shotgun_web/constants.py,sha256=b1pwWr9l__3fVex6EUx8Z0fBO3jkzvr9gDdZK9_0jik,553
119
+ shotgun/shotgun_web/models.py,sha256=Ie9VfqKZM2tIJhIjentU9qLoNaMZvnUJaIu-xg9kQsA,1391
106
120
  shotgun/tui/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
107
- shotgun/tui/app.py,sha256=Ca7pAF2GOR6RvJdK1_P_aqF7OosTm5WRUhxF1M-Ihcs,4994
121
+ shotgun/tui/app.py,sha256=bWs0GVJzXb6ZdANDi0qw1xtOAuVaCX6Xgb-ziNi5kVg,5753
108
122
  shotgun/tui/filtered_codebase_service.py,sha256=lJ8gTMhIveTatmvmGLP299msWWTkVYKwvY_2FhuL2s4,1687
109
123
  shotgun/tui/styles.tcss,sha256=ETyyw1bpMBOqTi5RLcAJUScdPWTvAWEqE9YcT0kVs_E,121
110
124
  shotgun/tui/commands/__init__.py,sha256=8D5lvtpqMW5-fF7Bg3oJtUzU75cKOv6aUaHYYszydU8,2518
@@ -112,25 +126,28 @@ shotgun/tui/components/prompt_input.py,sha256=Ss-htqraHZAPaehGE4x86ij0veMjc4Ugad
112
126
  shotgun/tui/components/spinner.py,sha256=ovTDeaJ6FD6chZx_Aepia6R3UkPOVJ77EKHfRmn39MY,2427
113
127
  shotgun/tui/components/splash.py,sha256=vppy9vEIEvywuUKRXn2y11HwXSRkQZHLYoVjhDVdJeU,1267
114
128
  shotgun/tui/components/vertical_tail.py,sha256=kROwTaRjUwVB7H35dtmNcUVPQqNYvvfq7K2tXBKEb6c,638
115
- shotgun/tui/screens/chat.py,sha256=H3uAE4sQ7iJO33tJhdoW_OkzaNxXgW6BUP-SxoNiohw,30465
129
+ shotgun/tui/screens/chat.py,sha256=CqAv_x6R4zl-MGbtg8KgZWt8OhpBJYpx5gGBQ3oxqgw,30313
116
130
  shotgun/tui/screens/chat.tcss,sha256=2Yq3E23jxsySYsgZf4G1AYrYVcpX0UDW6kNNI0tDmtM,437
117
131
  shotgun/tui/screens/directory_setup.py,sha256=lIZ1J4A6g5Q2ZBX8epW7BhR96Dmdcg22CyiM5S-I5WU,3237
118
- shotgun/tui/screens/feedback.py,sha256=cYtmuM3qqKwevstu8gJ9mmk7lkIKZvfAyDEBUOLh-yI,5660
119
- shotgun/tui/screens/provider_config.py,sha256=KIsI9bCOzk6wf_WIDHoaWOcrAaXgT4gF6hnX-1ArEn8,7487
132
+ shotgun/tui/screens/feedback.py,sha256=VxpW0PVxMp22ZvSfQkTtgixNrpEOlfWtekjqlVfYEjA,5708
133
+ shotgun/tui/screens/model_picker.py,sha256=G-EvalpxgHKk0W3FgHMcxIr817VwZyEgh_ZadSQiRwo,11831
134
+ shotgun/tui/screens/provider_config.py,sha256=s3SA13BJsV_Ge5lXdd4nOtbhCMjBPKqNOGaN20uMRBA,11069
135
+ shotgun/tui/screens/shotgun_auth.py,sha256=Y--7LZewV6gfDkucxymfAO7BCd7eI2C3H1ClDMztVio,10663
120
136
  shotgun/tui/screens/splash.py,sha256=E2MsJihi3c9NY1L28o_MstDxGwrCnnV7zdq00MrGAsw,706
137
+ shotgun/tui/screens/welcome.py,sha256=cpsBK2Gy99Nz7rwZhxVn310G68TjSdGXpIXGRp7DoLY,5329
121
138
  shotgun/tui/screens/chat_screen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
122
- shotgun/tui/screens/chat_screen/command_providers.py,sha256=A3vOs6J9QXg-JVi47knQdYM2_vA_IygsDJst0xpDibg,8923
139
+ shotgun/tui/screens/chat_screen/command_providers.py,sha256=7Xnxd4k30bpLOMZSX32bcugU4IgpqU4Y8f6eHWKXd4o,12694
123
140
  shotgun/tui/screens/chat_screen/hint_message.py,sha256=WOpbk8q7qt7eOHTyyHvh_IQIaublVDeJGaLpsxEk9FA,933
124
- shotgun/tui/screens/chat_screen/history.py,sha256=NVLA3_tERTyB4vkH71w8ef_M5CszfkwbQOuMb100Fzc,12272
141
+ shotgun/tui/screens/chat_screen/history.py,sha256=Go859iEjw0s5aELKpF42MjLXy7UFQ52XnJMTIkV3aLo,12406
125
142
  shotgun/tui/utils/__init__.py,sha256=cFjDfoXTRBq29wgP7TGRWUu1eFfiIG-LLOzjIGfadgI,150
126
143
  shotgun/tui/utils/mode_progress.py,sha256=lseRRo7kMWLkBzI3cU5vqJmS2ZcCjyRYf9Zwtvc-v58,10931
127
144
  shotgun/utils/__init__.py,sha256=WinIEp9oL2iMrWaDkXz2QX4nYVPAm8C9aBSKTeEwLtE,198
128
- shotgun/utils/env_utils.py,sha256=8QK5aw_f_V2AVTleQQlcL0RnD4sPJWXlDG46fsHu0d8,1057
145
+ shotgun/utils/env_utils.py,sha256=5spVCdeqVKtlWoKocPhz_5j_iRN30neqcGUzUuiWmfc,1365
129
146
  shotgun/utils/file_system_utils.py,sha256=l-0p1bEHF34OU19MahnRFdClHufThfGAjQ431teAIp0,1004
130
147
  shotgun/utils/source_detection.py,sha256=Co6Q03R3fT771TF3RzB-70stfjNP2S4F_ArZKibwzm8,454
131
148
  shotgun/utils/update_checker.py,sha256=IgzPHRhS1ETH7PnJR_dIx6lxgr1qHpCkMTgzUxvGjhI,7586
132
- shotgun_sh-0.1.16.dev2.dist-info/METADATA,sha256=593zGac-zT_lBaqzrAXKl_N0WxPigfiXnLSWmY92Z14,11233
133
- shotgun_sh-0.1.16.dev2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
134
- shotgun_sh-0.1.16.dev2.dist-info/entry_points.txt,sha256=asZxLU4QILneq0MWW10saVCZc4VWhZfb0wFZvERnzfA,45
135
- shotgun_sh-0.1.16.dev2.dist-info/licenses/LICENSE,sha256=YebsZl590zCHrF_acCU5pmNt0pnAfD2DmAnevJPB1tY,1065
136
- shotgun_sh-0.1.16.dev2.dist-info/RECORD,,
149
+ shotgun_sh-0.2.1.dist-info/METADATA,sha256=HpEVBqcRqqhsBOk2GkapVPVVzDkVQkb2AcGy8stTq5Y,11221
150
+ shotgun_sh-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
151
+ shotgun_sh-0.2.1.dist-info/entry_points.txt,sha256=asZxLU4QILneq0MWW10saVCZc4VWhZfb0wFZvERnzfA,45
152
+ shotgun_sh-0.2.1.dist-info/licenses/LICENSE,sha256=YebsZl590zCHrF_acCU5pmNt0pnAfD2DmAnevJPB1tY,1065
153
+ shotgun_sh-0.2.1.dist-info/RECORD,,
@@ -1,429 +0,0 @@
1
- """Real token counting for all supported providers.
2
-
3
- This module provides accurate token counting using each provider's official
4
- APIs and libraries, eliminating the need for rough character-based estimation.
5
- """
6
-
7
- from abc import ABC, abstractmethod
8
- from typing import TYPE_CHECKING
9
-
10
- from pydantic_ai.messages import ModelMessage
11
-
12
- from shotgun.agents.config.models import ModelConfig, ProviderType
13
- from shotgun.logging_config import get_logger
14
-
15
- if TYPE_CHECKING:
16
- pass
17
-
18
- logger = get_logger(__name__)
19
-
20
- # Global cache for token counter instances (singleton pattern)
21
- _token_counter_cache: dict[tuple[str, str, str], "TokenCounter"] = {}
22
-
23
-
24
- class TokenCounter(ABC):
25
- """Abstract base class for provider-specific token counting."""
26
-
27
- @abstractmethod
28
- def count_tokens(self, text: str) -> int:
29
- """Count tokens in text using provider-specific method.
30
-
31
- Args:
32
- text: Text to count tokens for
33
-
34
- Returns:
35
- Exact token count as determined by the provider
36
-
37
- Raises:
38
- RuntimeError: If token counting fails
39
- """
40
-
41
- @abstractmethod
42
- def count_message_tokens(self, messages: list[ModelMessage]) -> int:
43
- """Count tokens in PydanticAI message structures.
44
-
45
- Args:
46
- messages: List of messages to count tokens for
47
-
48
- Returns:
49
- Total token count across all messages
50
-
51
- Raises:
52
- RuntimeError: If token counting fails
53
- """
54
-
55
-
56
- class OpenAITokenCounter(TokenCounter):
57
- """Token counter for OpenAI models using tiktoken."""
58
-
59
- # Official encoding mappings for OpenAI models
60
- ENCODING_MAP = {
61
- "gpt-5": "o200k_base",
62
- "gpt-4o": "o200k_base",
63
- "gpt-4": "cl100k_base",
64
- "gpt-3.5-turbo": "cl100k_base",
65
- }
66
-
67
- def __init__(self, model_name: str):
68
- """Initialize OpenAI token counter.
69
-
70
- Args:
71
- model_name: OpenAI model name to get correct encoding for
72
-
73
- Raises:
74
- RuntimeError: If encoding initialization fails
75
- """
76
- self.model_name = model_name
77
-
78
- import tiktoken
79
-
80
- try:
81
- # Get the appropriate encoding for this model
82
- encoding_name = self.ENCODING_MAP.get(model_name, "o200k_base")
83
- self.encoding = tiktoken.get_encoding(encoding_name)
84
- logger.debug(
85
- f"Initialized OpenAI token counter with {encoding_name} encoding"
86
- )
87
- except Exception as e:
88
- raise RuntimeError(
89
- f"Failed to initialize tiktoken encoding for {model_name}"
90
- ) from e
91
-
92
- def count_tokens(self, text: str) -> int:
93
- """Count tokens using tiktoken.
94
-
95
- Args:
96
- text: Text to count tokens for
97
-
98
- Returns:
99
- Exact token count using tiktoken
100
-
101
- Raises:
102
- RuntimeError: If token counting fails
103
- """
104
- try:
105
- return len(self.encoding.encode(text))
106
- except Exception as e:
107
- raise RuntimeError(
108
- f"Failed to count tokens for OpenAI model {self.model_name}"
109
- ) from e
110
-
111
- def count_message_tokens(self, messages: list[ModelMessage]) -> int:
112
- """Count tokens across all messages using tiktoken.
113
-
114
- Args:
115
- messages: List of PydanticAI messages
116
-
117
- Returns:
118
- Total token count for all messages
119
-
120
- Raises:
121
- RuntimeError: If token counting fails
122
- """
123
- total_text = self._extract_text_from_messages(messages)
124
- return self.count_tokens(total_text)
125
-
126
- def _extract_text_from_messages(self, messages: list[ModelMessage]) -> str:
127
- """Extract all text content from messages for token counting."""
128
- text_parts = []
129
-
130
- for message in messages:
131
- if hasattr(message, "parts"):
132
- for part in message.parts:
133
- if hasattr(part, "content") and isinstance(part.content, str):
134
- text_parts.append(part.content)
135
- else:
136
- # Handle non-text parts (tool calls, etc.)
137
- text_parts.append(str(part))
138
- else:
139
- # Handle messages without parts
140
- text_parts.append(str(message))
141
-
142
- return "\n".join(text_parts)
143
-
144
-
145
- class AnthropicTokenCounter(TokenCounter):
146
- """Token counter for Anthropic models using official client."""
147
-
148
- def __init__(self, model_name: str, api_key: str):
149
- """Initialize Anthropic token counter.
150
-
151
- Args:
152
- model_name: Anthropic model name for token counting
153
- api_key: Anthropic API key
154
-
155
- Raises:
156
- RuntimeError: If client initialization fails
157
- """
158
- self.model_name = model_name
159
- import anthropic
160
-
161
- try:
162
- self.client = anthropic.Anthropic(api_key=api_key)
163
- logger.debug(f"Initialized Anthropic token counter for {model_name}")
164
- except Exception as e:
165
- raise RuntimeError("Failed to initialize Anthropic client") from e
166
-
167
- def count_tokens(self, text: str) -> int:
168
- """Count tokens using Anthropic's official API.
169
-
170
- Args:
171
- text: Text to count tokens for
172
-
173
- Returns:
174
- Exact token count from Anthropic API
175
-
176
- Raises:
177
- RuntimeError: If API call fails
178
- """
179
- try:
180
- # Anthropic API expects messages format and model parameter
181
- result = self.client.messages.count_tokens(
182
- messages=[{"role": "user", "content": text}], model=self.model_name
183
- )
184
- return result.input_tokens
185
- except Exception as e:
186
- raise RuntimeError(
187
- f"Anthropic token counting API failed for {self.model_name}"
188
- ) from e
189
-
190
- def count_message_tokens(self, messages: list[ModelMessage]) -> int:
191
- """Count tokens across all messages using Anthropic API.
192
-
193
- Args:
194
- messages: List of PydanticAI messages
195
-
196
- Returns:
197
- Total token count for all messages
198
-
199
- Raises:
200
- RuntimeError: If token counting fails
201
- """
202
- total_text = self._extract_text_from_messages(messages)
203
- return self.count_tokens(total_text)
204
-
205
- def _extract_text_from_messages(self, messages: list[ModelMessage]) -> str:
206
- """Extract all text content from messages for token counting."""
207
- text_parts = []
208
-
209
- for message in messages:
210
- if hasattr(message, "parts"):
211
- for part in message.parts:
212
- if hasattr(part, "content") and isinstance(part.content, str):
213
- text_parts.append(part.content)
214
- else:
215
- # Handle non-text parts (tool calls, etc.)
216
- text_parts.append(str(part))
217
- else:
218
- # Handle messages without parts
219
- text_parts.append(str(message))
220
-
221
- return "\n".join(text_parts)
222
-
223
-
224
- class GoogleTokenCounter(TokenCounter):
225
- """Token counter for Google models using genai API."""
226
-
227
- def __init__(self, model_name: str, api_key: str):
228
- """Initialize Google token counter.
229
-
230
- Args:
231
- model_name: Google model name
232
- api_key: Google API key
233
-
234
- Raises:
235
- RuntimeError: If configuration fails
236
- """
237
- self.model_name = model_name
238
-
239
- import google.generativeai as genai
240
-
241
- try:
242
- genai.configure(api_key=api_key) # type: ignore[attr-defined]
243
- self.model = genai.GenerativeModel(model_name) # type: ignore[attr-defined]
244
- logger.debug(f"Initialized Google token counter for {model_name}")
245
- except Exception as e:
246
- raise RuntimeError(
247
- f"Failed to configure Google genai client for {model_name}"
248
- ) from e
249
-
250
- def count_tokens(self, text: str) -> int:
251
- """Count tokens using Google's genai API.
252
-
253
- Args:
254
- text: Text to count tokens for
255
-
256
- Returns:
257
- Exact token count from Google API
258
-
259
- Raises:
260
- RuntimeError: If API call fails
261
- """
262
- try:
263
- result = self.model.count_tokens(text)
264
- return result.total_tokens
265
- except Exception as e:
266
- raise RuntimeError(
267
- f"Google token counting API failed for {self.model_name}"
268
- ) from e
269
-
270
- def count_message_tokens(self, messages: list[ModelMessage]) -> int:
271
- """Count tokens across all messages using Google API.
272
-
273
- Args:
274
- messages: List of PydanticAI messages
275
-
276
- Returns:
277
- Total token count for all messages
278
-
279
- Raises:
280
- RuntimeError: If token counting fails
281
- """
282
- total_text = self._extract_text_from_messages(messages)
283
- return self.count_tokens(total_text)
284
-
285
- def _extract_text_from_messages(self, messages: list[ModelMessage]) -> str:
286
- """Extract all text content from messages for token counting."""
287
- text_parts = []
288
-
289
- for message in messages:
290
- if hasattr(message, "parts"):
291
- for part in message.parts:
292
- if hasattr(part, "content") and isinstance(part.content, str):
293
- text_parts.append(part.content)
294
- else:
295
- # Handle non-text parts (tool calls, etc.)
296
- text_parts.append(str(part))
297
- else:
298
- # Handle messages without parts
299
- text_parts.append(str(message))
300
-
301
- return "\n".join(text_parts)
302
-
303
-
304
- def get_token_counter(model_config: ModelConfig) -> TokenCounter:
305
- """Get appropriate token counter for the model provider (cached singleton).
306
-
307
- This function ensures that every provider has a proper token counting
308
- implementation without any fallbacks to estimation. Token counters are
309
- cached to avoid repeated initialization overhead.
310
-
311
- Args:
312
- model_config: Model configuration with provider and credentials
313
-
314
- Returns:
315
- Cached provider-specific token counter
316
-
317
- Raises:
318
- ValueError: If provider is not supported for token counting
319
- RuntimeError: If token counter initialization fails
320
- """
321
- # Create cache key from provider, model name, and API key
322
- cache_key = (
323
- model_config.provider.value,
324
- model_config.name,
325
- model_config.api_key[:10]
326
- if model_config.api_key
327
- else "no-key", # Partial key for cache
328
- )
329
-
330
- # Return cached instance if available
331
- if cache_key in _token_counter_cache:
332
- logger.debug(
333
- f"Reusing cached token counter for {model_config.provider.value}:{model_config.name}"
334
- )
335
- return _token_counter_cache[cache_key]
336
-
337
- # Create new instance and cache it
338
- logger.debug(
339
- f"Creating new token counter for {model_config.provider.value}:{model_config.name}"
340
- )
341
-
342
- counter: TokenCounter
343
- if model_config.provider == ProviderType.OPENAI:
344
- counter = OpenAITokenCounter(model_config.name)
345
- elif model_config.provider == ProviderType.ANTHROPIC:
346
- counter = AnthropicTokenCounter(model_config.name, model_config.api_key)
347
- elif model_config.provider == ProviderType.GOOGLE:
348
- counter = GoogleTokenCounter(model_config.name, model_config.api_key)
349
- else:
350
- raise ValueError(
351
- f"Unsupported provider for token counting: {model_config.provider}. "
352
- f"Supported providers: {[p.value for p in ProviderType]}"
353
- )
354
-
355
- # Cache the instance
356
- _token_counter_cache[cache_key] = counter
357
- logger.debug(
358
- f"Cached token counter for {model_config.provider.value}:{model_config.name}"
359
- )
360
-
361
- return counter
362
-
363
-
364
- def count_tokens_from_messages(
365
- messages: list[ModelMessage], model_config: ModelConfig
366
- ) -> int:
367
- """Count actual tokens from messages using provider-specific methods.
368
-
369
- This replaces the old estimation approach with accurate token counting
370
- using each provider's official APIs and libraries.
371
-
372
- Args:
373
- messages: List of messages to count tokens for
374
- model_config: Model configuration with provider info
375
-
376
- Returns:
377
- Exact token count for the messages
378
-
379
- Raises:
380
- ValueError: If provider is not supported
381
- RuntimeError: If token counting fails
382
- """
383
- counter = get_token_counter(model_config)
384
- return counter.count_message_tokens(messages)
385
-
386
-
387
- def count_post_summary_tokens(
388
- messages: list[ModelMessage], summary_index: int, model_config: ModelConfig
389
- ) -> int:
390
- """Count actual tokens from summary onwards for incremental compaction decisions.
391
-
392
- Args:
393
- messages: Full message history
394
- summary_index: Index of the last summary message
395
- model_config: Model configuration with provider info
396
-
397
- Returns:
398
- Exact token count from summary onwards
399
-
400
- Raises:
401
- ValueError: If provider is not supported
402
- RuntimeError: If token counting fails
403
- """
404
- if summary_index >= len(messages):
405
- return 0
406
-
407
- post_summary_messages = messages[summary_index:]
408
- return count_tokens_from_messages(post_summary_messages, model_config)
409
-
410
-
411
- def count_tokens_from_message_parts(
412
- messages: list[ModelMessage], model_config: ModelConfig
413
- ) -> int:
414
- """Count actual tokens from message parts for summarization requests.
415
-
416
- Args:
417
- messages: List of messages to count tokens for
418
- model_config: Model configuration with provider info
419
-
420
- Returns:
421
- Exact token count from message parts
422
-
423
- Raises:
424
- ValueError: If provider is not supported
425
- RuntimeError: If token counting fails
426
- """
427
- # For now, use the same logic as count_tokens_from_messages
428
- # This can be optimized later if needed for different counting strategies
429
- return count_tokens_from_messages(messages, model_config)