npcsh 1.1.1__tar.gz → 1.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. {npcsh-1.1.1 → npcsh-1.1.3}/PKG-INFO +10 -1
  2. {npcsh-1.1.1 → npcsh-1.1.3}/README.md +9 -0
  3. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/_state.py +160 -123
  4. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/alicanto.py +10 -5
  5. npcsh-1.1.3/npcsh/build.py +291 -0
  6. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/corca.py +263 -154
  7. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc.py +127 -46
  8. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/routes.py +229 -21
  9. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh.egg-info/PKG-INFO +10 -1
  10. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh.egg-info/SOURCES.txt +1 -0
  11. {npcsh-1.1.1 → npcsh-1.1.3}/setup.py +1 -1
  12. {npcsh-1.1.1 → npcsh-1.1.3}/LICENSE +0 -0
  13. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/__init__.py +0 -0
  14. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/guac.py +0 -0
  15. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/mcp_helpers.py +0 -0
  16. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/mcp_server.py +0 -0
  17. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/alicanto.npc +0 -0
  18. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/alicanto.png +0 -0
  19. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/corca.npc +0 -0
  20. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/corca.png +0 -0
  21. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/foreman.npc +0 -0
  22. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/frederic.npc +0 -0
  23. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/frederic4.png +0 -0
  24. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/guac.png +0 -0
  25. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/jinxs/bash_executer.jinx +0 -0
  26. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/jinxs/edit_file.jinx +0 -0
  27. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/jinxs/image_generation.jinx +0 -0
  28. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/jinxs/internet_search.jinx +0 -0
  29. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/jinxs/kg_search.jinx +0 -0
  30. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/jinxs/memory_search.jinx +0 -0
  31. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/jinxs/python_executor.jinx +0 -0
  32. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/jinxs/screen_cap.jinx +0 -0
  33. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/kadiefa.npc +0 -0
  34. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/kadiefa.png +0 -0
  35. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/npcsh.ctx +0 -0
  36. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/npcsh_sibiji.png +0 -0
  37. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/plonk.npc +0 -0
  38. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/plonk.png +0 -0
  39. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/plonkjr.npc +0 -0
  40. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/plonkjr.png +0 -0
  41. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/sibiji.npc +0 -0
  42. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/sibiji.png +0 -0
  43. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/spool.png +0 -0
  44. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npc_team/yap.png +0 -0
  45. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/npcsh.py +0 -0
  46. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/plonk.py +0 -0
  47. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/pti.py +0 -0
  48. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/spool.py +0 -0
  49. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/wander.py +0 -0
  50. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh/yap.py +0 -0
  51. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh.egg-info/dependency_links.txt +0 -0
  52. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh.egg-info/entry_points.txt +0 -0
  53. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh.egg-info/requires.txt +0 -0
  54. {npcsh-1.1.1 → npcsh-1.1.3}/npcsh.egg-info/top_level.txt +0 -0
  55. {npcsh-1.1.1 → npcsh-1.1.3}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.1.1
3
+ Version: 1.1.3
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -194,6 +194,14 @@ and you will enter the NPC shell. Additionally, the pip installation includes th
194
194
  /corca --mcp-server-path /path.to.server.py
195
195
  ```
196
196
 
197
+ - **Build an NPC Team**:
198
+
199
+ ``` bash
200
+ npc build flask --output ./dist --port 5337
201
+ npc build docker --output ./deploy
202
+ npc build cli --output ./bin
203
+ npc build static --api_url https://api.example.com
204
+ ```
197
205
 
198
206
  # NPC Data Layer
199
207
 
@@ -217,6 +225,7 @@ Importantly, users can switch easily between the NPCs they are chatting with by
217
225
  - activated by invoking `/<command> ...` in `npcsh`, macros can be called in bash or through the `npc` CLI. In our examples, we provide both `npcsh` calls as well as bash calls with the `npc` cli where relevant. For converting any `/<command>` in `npcsh` to a bash version, replace the `/` with `npc ` and the macro command will be invoked as a positional argument. Some, like breathe, flush,
218
226
 
219
227
  - `/alicanto` - Conduct deep research with multiple perspectives, identifying gold insights and cliff warnings. Usage: `/alicanto 'query to be researched' --num-npcs <int> --depth <int>`
228
+ - `/build` - Builds the current npc team to an executable format . Usage: `/build <output[flask,docker,cli,static]> --options`
220
229
  - `/brainblast` - Execute an advanced chunked search on command history. Usage: `/brainblast 'query' --top_k 10`
221
230
  - `/breathe` - Condense context on a regular cadence. Usage: `/breathe -p <provider: NPCSH_CHAT_PROVIDER> -m <model: NPCSH_CHAT_MODEL>`
222
231
  - `/compile` - Compile NPC profiles. Usage: `/compile <path_to_npc> `
@@ -94,6 +94,14 @@ and you will enter the NPC shell. Additionally, the pip installation includes th
94
94
  /corca --mcp-server-path /path.to.server.py
95
95
  ```
96
96
 
97
+ - **Build an NPC Team**:
98
+
99
+ ``` bash
100
+ npc build flask --output ./dist --port 5337
101
+ npc build docker --output ./deploy
102
+ npc build cli --output ./bin
103
+ npc build static --api_url https://api.example.com
104
+ ```
97
105
 
98
106
  # NPC Data Layer
99
107
 
@@ -117,6 +125,7 @@ Importantly, users can switch easily between the NPCs they are chatting with by
117
125
  - activated by invoking `/<command> ...` in `npcsh`, macros can be called in bash or through the `npc` CLI. In our examples, we provide both `npcsh` calls as well as bash calls with the `npc` cli where relevant. For converting any `/<command>` in `npcsh` to a bash version, replace the `/` with `npc ` and the macro command will be invoked as a positional argument. Some, like breathe, flush,
118
126
 
119
127
  - `/alicanto` - Conduct deep research with multiple perspectives, identifying gold insights and cliff warnings. Usage: `/alicanto 'query to be researched' --num-npcs <int> --depth <int>`
128
+ - `/build` - Builds the current npc team to an executable format . Usage: `/build <output[flask,docker,cli,static]> --options`
120
129
  - `/brainblast` - Execute an advanced chunked search on command history. Usage: `/brainblast 'query' --top_k 10`
121
130
  - `/breathe` - Condense context on a regular cadence. Usage: `/breathe -p <provider: NPCSH_CHAT_PROVIDER> -m <model: NPCSH_CHAT_MODEL>`
122
131
  - `/compile` - Compile NPC profiles. Usage: `/compile <path_to_npc> `
@@ -645,7 +645,6 @@ BASH_COMMANDS = [
645
645
  "fg",
646
646
  "getopts",
647
647
  "hash",
648
- "help",
649
648
  "history",
650
649
  "if",
651
650
  "jobs",
@@ -2206,34 +2205,6 @@ def execute_command(
2206
2205
  npc_name = state.npc.name if isinstance(state.npc, NPC) else "__none__"
2207
2206
  team_name = state.team.name if state.team else "__none__"
2208
2207
 
2209
- if command_history:
2210
- relevant_memories = get_relevant_memories(
2211
- command_history=command_history,
2212
- npc_name=npc_name,
2213
- team_name=team_name,
2214
- path=state.current_path,
2215
- query=command,
2216
- max_memories=5,
2217
- state=state
2218
- )
2219
- print('Memory jogged...')
2220
- print(relevant_memories)
2221
-
2222
- if relevant_memories:
2223
- memory_context = "\n".join([
2224
- f"- {m.get('final_memory', '')}"
2225
- for m in relevant_memories
2226
- ])
2227
- memory_msg = {
2228
- "role": "system",
2229
- "content": f"Relevant memories:\n{memory_context}"
2230
- }
2231
- if not state.messages or \
2232
- state.messages[0].get("role") != "system":
2233
- state.messages.insert(0, memory_msg)
2234
- else:
2235
- state.messages[0]["content"] += \
2236
- f"\n\n{memory_msg['content']}"
2237
2208
 
2238
2209
  original_command_for_embedding = command
2239
2210
  commands = split_by_pipes(command)
@@ -2755,106 +2726,155 @@ def process_result(
2755
2726
  team=team_name,
2756
2727
  )
2757
2728
 
2758
- conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
2759
- engine = command_history.engine
2729
+ result_state.turn_count += 1
2760
2730
 
2761
- memory_examples = command_history.get_memory_examples_for_context(
2762
- npc=npc_name,
2763
- team=team_name,
2764
- directory_path=result_state.current_path
2765
- )
2766
-
2767
- memory_context = format_memory_context(memory_examples)
2768
-
2769
- approved_facts = []
2770
- try:
2771
- facts = get_facts(
2772
- conversation_turn_text,
2773
- model=active_npc.model,
2774
- provider=active_npc.provider,
2775
- npc=active_npc,
2776
- context=memory_context + 'Memories should be fully self contained. They should not use vague pronouns or words like that or this or it. Do not generate more than 1-2 memories at a time.'
2731
+ if result_state.turn_count % 10 == 0:
2732
+ approved_facts = []
2733
+
2734
+ conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
2735
+ engine = command_history.engine
2736
+
2737
+ memory_examples = command_history.get_memory_examples_for_context(
2738
+ npc=npc_name,
2739
+ team=team_name,
2740
+ directory_path=result_state.current_path
2777
2741
  )
2778
2742
 
2779
- if facts:
2780
- memories_for_approval = []
2781
- for i, fact in enumerate(facts):
2782
- memories_for_approval.append({
2783
- "memory_id": f"temp_{i}",
2784
- "content": fact['statement'],
2785
- "context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
2786
- "npc": npc_name,
2787
- "fact_data": fact
2788
- })
2789
-
2790
- approvals = memory_approval_ui(memories_for_approval)
2743
+ memory_context = format_memory_context(memory_examples)
2744
+
2745
+ try:
2746
+ facts = get_facts(
2747
+ conversation_turn_text,
2748
+ model=active_npc.model,
2749
+ provider=active_npc.provider,
2750
+ npc=active_npc,
2751
+ context=memory_context + 'Memories should be fully self contained. They should not use vague pronouns or words like that or this or it. Do not generate more than 1-2 memories at a time.'
2752
+ )
2791
2753
 
2792
- for approval in approvals:
2793
- fact_data = next(m['fact_data'] for m in memories_for_approval
2794
- if m['memory_id'] == approval['memory_id'])
2754
+ if facts:
2755
+ num_memories = len(facts)
2756
+ print(colored(
2757
+ f"\nThere are {num_memories} potential memories. Do you want to review them now?",
2758
+ "cyan"
2759
+ ))
2760
+ review_choice = input("[y/N]: ").strip().lower()
2795
2761
 
2796
- command_history.add_memory_to_database(
2797
- message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2798
- conversation_id=result_state.conversation_id,
2799
- npc=npc_name,
2800
- team=team_name,
2801
- directory_path=result_state.current_path,
2802
- initial_memory=fact_data['statement'],
2803
- status=approval['decision'],
2804
- model=active_npc.model,
2805
- provider=active_npc.provider,
2806
- final_memory=approval.get('final_memory')
2807
- )
2762
+ if review_choice == 'y':
2763
+ memories_for_approval = []
2764
+ for i, fact in enumerate(facts):
2765
+ memories_for_approval.append({
2766
+ "memory_id": f"temp_{i}",
2767
+ "content": fact['statement'],
2768
+ "context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
2769
+ "npc": npc_name,
2770
+ "fact_data": fact
2771
+ })
2772
+
2773
+ approvals = memory_approval_ui(memories_for_approval)
2774
+
2775
+ for approval in approvals:
2776
+ fact_data = next(
2777
+ m['fact_data'] for m in memories_for_approval
2778
+ if m['memory_id'] == approval['memory_id']
2779
+ )
2780
+
2781
+ command_history.add_memory_to_database(
2782
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2783
+ conversation_id=result_state.conversation_id,
2784
+ npc=npc_name,
2785
+ team=team_name,
2786
+ directory_path=result_state.current_path,
2787
+ initial_memory=fact_data['statement'],
2788
+ status=approval['decision'],
2789
+ model=active_npc.model,
2790
+ provider=active_npc.provider,
2791
+ final_memory=approval.get('final_memory')
2792
+ )
2793
+
2794
+ if approval['decision'] in ['human-approved', 'human-edited']:
2795
+ approved_fact = {
2796
+ 'statement': approval.get('final_memory') or fact_data['statement'],
2797
+ 'source_text': fact_data.get('source_text', ''),
2798
+ 'type': fact_data.get('type', 'explicit'),
2799
+ 'generation': 0
2800
+ }
2801
+ approved_facts.append(approved_fact)
2802
+ else:
2803
+ for i, fact in enumerate(facts):
2804
+ command_history.add_memory_to_database(
2805
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2806
+ conversation_id=result_state.conversation_id,
2807
+ npc=npc_name,
2808
+ team=team_name,
2809
+ directory_path=result_state.current_path,
2810
+ initial_memory=fact['statement'],
2811
+ status='skipped',
2812
+ model=active_npc.model,
2813
+ provider=active_npc.provider,
2814
+ final_memory=None
2815
+ )
2816
+
2817
+ print(colored(
2818
+ f"Marked {num_memories} memories as skipped.",
2819
+ "yellow"
2820
+ ))
2808
2821
 
2809
- if approval['decision'] in ['human-approved', 'human-edited']:
2810
- approved_fact = {
2811
- 'statement': approval.get('final_memory') or fact_data['statement'],
2812
- 'source_text': fact_data.get('source_text', ''),
2813
- 'type': fact_data.get('type', 'explicit'),
2814
- 'generation': 0
2815
- }
2816
- approved_facts.append(approved_fact)
2817
-
2818
- except Exception as e:
2819
- print(colored(f"Memory generation error: {e}", "yellow"))
2820
-
2821
- if result_state.build_kg and approved_facts:
2822
- try:
2823
- if not should_skip_kg_processing(user_input, final_output_str):
2824
- npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
2825
- evolved_npc_kg, _ = kg_evolve_incremental(
2826
- existing_kg=npc_kg,
2827
- new_facts=approved_facts,
2828
- model=active_npc.model,
2829
- provider=active_npc.provider,
2830
- npc=active_npc,
2831
- get_concepts=True,
2832
- link_concepts_facts=False,
2833
- link_concepts_concepts=False,
2834
- link_facts_facts=False,
2835
- )
2836
- save_kg_to_db(
2837
- engine,
2838
- evolved_npc_kg,
2839
- team_name,
2840
- npc_name,
2841
- result_state.current_path
2842
- )
2843
2822
  except Exception as e:
2844
- print(colored(f"Error during real-time KG evolution: {e}", "red"))
2823
+ print(colored(f"Memory generation error: {e}", "yellow"))
2845
2824
 
2846
- result_state.turn_count += 1
2825
+ if result_state.build_kg and approved_facts:
2826
+ try:
2827
+ if not should_skip_kg_processing(user_input, final_output_str):
2828
+ npc_kg = load_kg_from_db(
2829
+ engine,
2830
+ team_name,
2831
+ npc_name,
2832
+ result_state.current_path
2833
+ )
2834
+ evolved_npc_kg, _ = kg_evolve_incremental(
2835
+ existing_kg=npc_kg,
2836
+ new_facts=approved_facts,
2837
+ model=active_npc.model,
2838
+ provider=active_npc.provider,
2839
+ npc=active_npc,
2840
+ get_concepts=True,
2841
+ link_concepts_facts=False,
2842
+ link_concepts_concepts=False,
2843
+ link_facts_facts=False,
2844
+ )
2845
+ save_kg_to_db(
2846
+ engine,
2847
+ evolved_npc_kg,
2848
+ team_name,
2849
+ npc_name,
2850
+ result_state.current_path
2851
+ )
2852
+ except Exception as e:
2853
+ print(colored(
2854
+ f"Error during real-time KG evolution: {e}",
2855
+ "red"
2856
+ ))
2847
2857
 
2848
- if result_state.turn_count % 10 == 0:
2849
- print(colored("\nChecking for potential team improvements...", "cyan"))
2858
+ print(colored(
2859
+ "\nChecking for potential team improvements...",
2860
+ "cyan"
2861
+ ))
2850
2862
  try:
2851
- summary = breathe(messages=result_state.messages[-20:], npc=active_npc)
2863
+ summary = breathe(
2864
+ messages=result_state.messages[-20:],
2865
+ npc=active_npc
2866
+ )
2852
2867
  characterization = summary.get('output')
2853
2868
 
2854
2869
  if characterization and result_state.team:
2855
- team_ctx_path = get_team_ctx_path(result_state.team.team_path)
2870
+ team_ctx_path = get_team_ctx_path(
2871
+ result_state.team.team_path
2872
+ )
2856
2873
  if not team_ctx_path:
2857
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
2874
+ team_ctx_path = os.path.join(
2875
+ result_state.team.team_path,
2876
+ "team.ctx"
2877
+ )
2858
2878
 
2859
2879
  ctx_data = {}
2860
2880
  if os.path.exists(team_ctx_path):
@@ -2879,11 +2899,20 @@ def process_result(
2879
2899
  suggestion = response.get("response", {}).get("suggestion")
2880
2900
 
2881
2901
  if suggestion:
2882
- new_context = (current_context + " " + suggestion).strip()
2883
- print(colored(f"{npc_name} suggests updating team context:", "yellow"))
2884
- print(f" - OLD: {current_context}\n + NEW: {new_context}")
2902
+ new_context = (
2903
+ current_context + " " + suggestion
2904
+ ).strip()
2905
+ print(colored(
2906
+ f"{npc_name} suggests updating team context:",
2907
+ "yellow"
2908
+ ))
2909
+ print(
2910
+ f" - OLD: {current_context}\n + NEW: {new_context}"
2911
+ )
2885
2912
 
2886
- choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
2913
+ choice = input(
2914
+ "Apply? [y/N/e(dit)]: "
2915
+ ).strip().lower()
2887
2916
 
2888
2917
  if choice == 'y':
2889
2918
  ctx_data['context'] = new_context
@@ -2891,21 +2920,29 @@ def process_result(
2891
2920
  yaml.dump(ctx_data, f)
2892
2921
  print(colored("Team context updated.", "green"))
2893
2922
  elif choice == 'e':
2894
- edited_context = input(f"Edit context [{new_context}]: ").strip()
2923
+ edited_context = input(
2924
+ f"Edit context [{new_context}]: "
2925
+ ).strip()
2895
2926
  if edited_context:
2896
2927
  ctx_data['context'] = edited_context
2897
2928
  else:
2898
2929
  ctx_data['context'] = new_context
2899
2930
  with open(team_ctx_path, 'w') as f:
2900
2931
  yaml.dump(ctx_data, f)
2901
- print(colored("Team context updated with edits.", "green"))
2932
+ print(colored(
2933
+ "Team context updated with edits.",
2934
+ "green"
2935
+ ))
2902
2936
  else:
2903
2937
  print("Suggestion declined.")
2904
2938
  except Exception as e:
2905
2939
  import traceback
2906
- print(colored(f"Could not generate team suggestions: {e}", "yellow"))
2940
+ print(colored(
2941
+ f"Could not generate team suggestions: {e}",
2942
+ "yellow"
2943
+ ))
2907
2944
  traceback.print_exc()
2908
-
2945
+
2909
2946
  initial_state = ShellState(
2910
2947
  conversation_id=start_new_conversation(),
2911
2948
  stream_output=NPCSH_STREAM_OUTPUT,
@@ -12,6 +12,16 @@ from dataclasses import dataclass, asdict, field
12
12
  from pathlib import Path
13
13
  from concurrent.futures import ThreadPoolExecutor
14
14
 
15
+
16
+ try:
17
+ from datasets import load_dataset
18
+ except:
19
+ load_dataset = None
20
+ from sklearn.feature_extraction.text import TfidfVectorizer
21
+ from sklearn.metrics.pairwise import cosine_similarity
22
+
23
+
24
+
15
25
  from npcpy.tools import auto_tools
16
26
  from npcpy.llm_funcs import get_llm_response
17
27
  from npcpy.data.web import search_web
@@ -88,11 +98,6 @@ def list_files(directory: str = ".") -> List[str]:
88
98
  return os.listdir(directory)
89
99
 
90
100
 
91
-
92
- from datasets import load_dataset
93
- from sklearn.feature_extraction.text import TfidfVectorizer
94
- from sklearn.metrics.pairwise import cosine_similarity
95
-
96
101
  DATASET_CACHE = None
97
102
  SEARCH_INDEX = None
98
103