npcsh 0.3.29__py3-none-any.whl → 0.3.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. npcsh/cli.py +30 -10
  2. npcsh/image.py +0 -2
  3. npcsh/llm_funcs.py +32 -23
  4. npcsh/npc_compiler.py +35 -4
  5. npcsh/npc_team/npcsh.ctx +8 -2
  6. npcsh/npc_team/tools/bash_executer.tool +32 -0
  7. npcsh/npc_team/tools/code_executor.tool +16 -0
  8. npcsh/npc_team/tools/generic_search.tool +9 -1
  9. npcsh/npc_team/tools/npcsh_executor.tool +9 -0
  10. npcsh/npc_team/tools/sql_executor.tool +2 -2
  11. npcsh/search.py +15 -8
  12. npcsh/shell.py +103 -89
  13. npcsh/shell_helpers.py +75 -52
  14. npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +32 -0
  15. npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +16 -0
  16. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/generic_search.tool +9 -1
  17. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/npcsh.ctx +8 -2
  18. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +9 -0
  19. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/sql_executor.tool +2 -2
  20. {npcsh-0.3.29.dist-info → npcsh-0.3.31.dist-info}/METADATA +12 -21
  21. {npcsh-0.3.29.dist-info → npcsh-0.3.31.dist-info}/RECORD +41 -35
  22. {npcsh-0.3.29.dist-info → npcsh-0.3.31.dist-info}/WHEEL +1 -1
  23. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/calculator.tool +0 -0
  24. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/celona.npc +0 -0
  25. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/corca.npc +0 -0
  26. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/eriane.npc +0 -0
  27. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/foreman.npc +0 -0
  28. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/image_generation.tool +0 -0
  29. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/lineru.npc +0 -0
  30. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/local_search.tool +0 -0
  31. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/maurawa.npc +0 -0
  32. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/raone.npc +0 -0
  33. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/screen_cap.tool +0 -0
  34. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  35. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/slean.npc +0 -0
  36. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/test_pipeline.py +0 -0
  37. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/turnic.npc +0 -0
  38. {npcsh-0.3.29.data → npcsh-0.3.31.data}/data/npcsh/npc_team/welxor.npc +0 -0
  39. {npcsh-0.3.29.dist-info → npcsh-0.3.31.dist-info}/entry_points.txt +0 -0
  40. {npcsh-0.3.29.dist-info → npcsh-0.3.31.dist-info}/licenses/LICENSE +0 -0
  41. {npcsh-0.3.29.dist-info → npcsh-0.3.31.dist-info}/top_level.txt +0 -0
npcsh/cli.py CHANGED
@@ -33,6 +33,7 @@ from .llm_funcs import (
33
33
  get_stream,
34
34
  get_conversation,
35
35
  )
36
+ from .plonk import plonk, action_space
36
37
  from .search import search_web
37
38
  from .shell_helpers import *
38
39
  import os
@@ -54,10 +55,12 @@ def main():
54
55
  "assemble",
55
56
  "build",
56
57
  "compile",
58
+ "chat",
57
59
  "init",
58
60
  "new",
59
61
  "plonk",
60
62
  "sample",
63
+ "search",
61
64
  "select",
62
65
  "serve",
63
66
  "spool",
@@ -165,6 +168,10 @@ def main():
165
168
  "directory", nargs="?", default=".", help="Directory to build project in"
166
169
  )
167
170
 
171
+ # chat
172
+ chat_parser = subparsers.add_parser("chat", help="chat with an NPC")
173
+ chat_parser.add_argument("-n", "--npc_name", help="name of npc")
174
+
168
175
  # Compile command
169
176
  compile_parser = subparsers.add_parser("compile", help="Compile an NPC")
170
177
  compile_parser.add_argument("path", help="Path to NPC file")
@@ -300,7 +307,7 @@ def main():
300
307
 
301
308
  # Web search
302
309
  search_parser = subparsers.add_parser("search", help="search the web")
303
- search_parser.add_argument("query", help="search query")
310
+ search_parser.add_argument("--query", "-q", help="search query")
304
311
  search_parser.add_argument(
305
312
  "--search_provider",
306
313
  "-sp",
@@ -317,7 +324,7 @@ def main():
317
324
 
318
325
  # Voice chat
319
326
  whisper_parser = subparsers.add_parser("whisper", help="start voice chat")
320
- whisper_parser.add_argument("npc_name", help="name of the NPC to chat with")
327
+ whisper_parser.add_argument("-n", "--npc_name", help="name of the NPC to chat with")
321
328
 
322
329
  args = parser.parse_args()
323
330
 
@@ -367,6 +374,13 @@ def main():
367
374
  port=args.port if args.port else 5337,
368
375
  cors_origins=cors_origins,
369
376
  )
377
+ elif args.command == "chat":
378
+ npc_name = args.npc_name
379
+ npc_path = get_npc_path(npc_name, NPCSH_DB_PATH)
380
+ current_npc = load_npc_from_file(npc_path, sqlite3.connect(NPCSH_DB_PATH))
381
+ return enter_spool_mode(
382
+ model=args.model, provider=args.provider, npc=current_npc
383
+ )
370
384
 
371
385
  elif args.command == "init":
372
386
  if args.templates:
@@ -395,16 +409,18 @@ def main():
395
409
  )
396
410
 
397
411
  elif args.command == "compile":
398
- compile_npc(args.path)
412
+ npc_compiler = NPCCompiler(npc_directory, NPCSH_DB_PATH)
413
+ compiled = npc_compiler.compile(args.path)
414
+ print("NPC compiled to:", compiled)
399
415
 
400
416
  elif args.command == "plonk":
401
417
  task = args.task or args.spell
402
418
  npc_name = args.name
403
- run_plonk_task(
404
- task=task,
405
- npc_name=npc_name,
406
- model=args.model or NPCSH_REASONING_MODEL,
407
- provider=args.provider or NPCSH_REASONING_PROVIDER,
419
+ plonk(
420
+ task,
421
+ action_space,
422
+ model=args.model or NPCSH_CHAT_MODEL,
423
+ provider=args.provider or NPCSH_CHAT_PROVIDER,
408
424
  )
409
425
 
410
426
  elif args.command == "sample":
@@ -443,10 +459,14 @@ def main():
443
459
  model=args.model,
444
460
  provider=args.provider,
445
461
  )
446
- print(result)
462
+ print(result["output"])
447
463
 
448
464
  elif args.command == "whisper":
449
- start_whisper_chat(args.npc_name)
465
+ npc_name = args.npc_name
466
+ npc_path = get_npc_path(npc_name, NPCSH_DB_PATH)
467
+ current_npc = load_npc_from_file(npc_path, sqlite3.connect(NPCSH_DB_PATH))
468
+
469
+ enter_whisper_mode(npc=current_npc)
450
470
 
451
471
  elif args.command == "tool":
452
472
  result = invoke_tool(
npcsh/image.py CHANGED
@@ -283,8 +283,6 @@ def analyze_image(
283
283
  api_key=api_key,
284
284
  )
285
285
 
286
- print(response)
287
- # Add to command history *inside* the try block
288
286
  return response
289
287
 
290
288
  except Exception as e:
npcsh/llm_funcs.py CHANGED
@@ -802,15 +802,20 @@ ReAct choices then will enter reasoning flow
802
802
 
803
803
  prompt = f"""
804
804
  A user submitted this query: {command}
805
+
805
806
  Determine the nature of the user's request:
806
- 1. Is it a specific request for a task that could be accomplished via a bash command or a simple python script that could be executed in a single bash call?
807
- 2. Should a tool be invoked to fulfill the request?
808
- 3. Is it a general question that requires an informative answer or a highly specific question that
807
+
808
+ 1. Should a tool be invoked to fulfill the request?
809
+
810
+ 2. Is it a general question that requires an informative answer or a highly specific question that
809
811
  requires inforrmation on the web?
810
- 4. Would this question be best answered by an alternative NPC?
811
- 5. Is it a complex request that actually requires more than one
812
- tool to be called, perhaps in a sequence?
813
- 6. is there a need for the user to provide additional input to fulfill the request?
812
+
813
+ 3. Would this question be best answered by an alternative NPC?
814
+
815
+ 4. Is it a complex request that actually requires more than one
816
+ tool to be called, perhaps in a sequence?
817
+
818
+ 5. is there a need for the user to provide additional input to fulfill the request?
814
819
 
815
820
 
816
821
 
@@ -877,8 +882,12 @@ ReAct choices then will enter reasoning flow
877
882
 
878
883
  prompt += f"""
879
884
  In considering how to answer this, consider:
880
- - Whether it can be answered via a bash command on the user's computer. e.g. if a user is curious about file sizes within a directory or about processes running on their computer, these are likely best handled by a bash command.
881
- - Whether more context from the user is required to adequately answer the question. e.g. if a user asks for a joke about their favorite city but they don't include the city , it would be helpful to ask for that information. Similarly, if a user asks to open a browser and to check the weather in a city, it would be helpful to ask for the city and which website or source to use.
885
+
886
+ - Whether more context from the user is required to adequately answer the question.
887
+ e.g. if a user asks for a joke about their favorite city but they don't include the city ,
888
+ it would be helpful to ask for that information. Similarly, if a user asks to open a browser
889
+ and to check the weather in a city, it would be helpful to ask for the city and which website
890
+ or source to use.
882
891
  - Whether a tool should be used.
883
892
 
884
893
 
@@ -887,14 +896,17 @@ ReAct choices then will enter reasoning flow
887
896
  extra tools or agent passes.
888
897
  Only use tools or pass to other NPCs
889
898
  when it is obvious that the answer needs to be as up-to-date as possible. For example,
890
- a question about where mount everest is does not necessarily need to be answered by a tool call or an agent pass.
899
+ a question about where mount everest is does not necessarily need to be answered by a tool call or an agent pass.
891
900
  Similarly, if a user asks to explain the plot of the aeneid, this can be answered without a tool call or agent pass.
892
- If a user were to ask for the current weather in tokyo or the current price of bitcoin or who the mayor of a city is, then a tool call or agent pass may be appropriate. If a user asks about the process using the most ram or the biggest file in a directory, a bash command will be most appropriate.
901
+
902
+ If a user were to ask for the current weather in tokyo or the current price of bitcoin or who the mayor of a city is,
903
+ then a tool call or agent pass may be appropriate.
904
+
893
905
  Tools are valuable but their use should be limited and purposeful to
894
906
  ensure the best user experience.
895
907
 
896
908
  Respond with a JSON object containing:
897
- - "action": one of ["execute_command", "invoke_tool", "answer_question", "pass_to_npc", "execute_sequence", "request_input"]
909
+ - "action": one of ["invoke_tool", "answer_question", "pass_to_npc", "execute_sequence", "request_input"]
898
910
  - "tool_name": : if action is "invoke_tool": the name of the tool to use.
899
911
  else if action is "execute_sequence", a list of tool names to use.
900
912
  - "explanation": a brief explanation of why you chose this action.
@@ -907,7 +919,7 @@ ReAct choices then will enter reasoning flow
907
919
 
908
920
  The format of the JSON object is:
909
921
  {{
910
- "action": "execute_command" | "invoke_tool" | "answer_question" | "pass_to_npc" | "execute_sequence" | "request_input",
922
+ "action": "invoke_tool" | "answer_question" | "pass_to_npc" | "execute_sequence" | "request_input",
911
923
  "tool_name": "<tool_name(s)_if_applicable>",
912
924
  "explanation": "<your_explanation>",
913
925
  "npc_name": "<npc_name(s)_if_applicable>"
@@ -915,7 +927,9 @@ ReAct choices then will enter reasoning flow
915
927
 
916
928
  If you execute a sequence, ensure that you have a specified NPC for each tool use.
917
929
 
918
- Remember, do not include ANY ADDITIONAL MARKDOWN FORMATTING. There should be no prefix 'json'. Start straight with the opening curly brace.
930
+ Remember, do not include ANY ADDITIONAL MARKDOWN FORMATTING.
931
+ There should be no leading ```json.
932
+
919
933
  """
920
934
 
921
935
  if docs_context:
@@ -932,11 +946,6 @@ ReAct choices then will enter reasoning flow
932
946
  {context}
933
947
 
934
948
  """
935
-
936
- # print(prompt)
937
-
938
- # For action determination, we don't need to pass the conversation messages to avoid confusion
939
- # print(npc, model, provider)
940
949
  action_response = get_llm_response(
941
950
  prompt,
942
951
  model=model,
@@ -965,12 +974,11 @@ ReAct choices then will enter reasoning flow
965
974
  else:
966
975
  response_content_parsed = response_content
967
976
 
968
- # Proceed according to the action specified
969
977
  action = response_content_parsed.get("action")
970
978
  explanation = response_content["explanation"]
971
- # Include the user's command in the conversation messages
972
979
  print(f"action chosen: {action}")
973
980
  print(f"explanation given: {explanation}")
981
+
974
982
  if response_content_parsed.get("tool_name"):
975
983
  print(f"tool name: {response_content_parsed.get('tool_name')}")
976
984
 
@@ -1316,8 +1324,9 @@ def handle_tool_call(
1316
1324
  stream=stream,
1317
1325
  messages=messages,
1318
1326
  )
1319
- if "Error" in tool_output:
1320
- raise Exception(tool_output)
1327
+ if not stream:
1328
+ if "Error" in tool_output:
1329
+ raise Exception(tool_output)
1321
1330
  except Exception as e:
1322
1331
  # diagnose_problem = get_llm_response(
1323
1332
  ## f"""a problem has occurred.
npcsh/npc_compiler.py CHANGED
@@ -501,6 +501,18 @@ class SilentUndefined(Undefined):
501
501
  return ""
502
502
 
503
503
 
504
+ class Context:
505
+ def __init__(self, context=None, mcp_servers=None, databases=None, files=None):
506
+ self.context = context
507
+ self.mcp_servers = mcp_servers
508
+ self.databases = databases
509
+ self.files = files
510
+
511
+ def load_context_file(self, path):
512
+ with open(path, "r") as f:
513
+ self.context = yaml.safe_load(f)
514
+
515
+
504
516
  class Tool:
505
517
  def __init__(self, tool_data: dict):
506
518
  if not tool_data or not isinstance(tool_data, dict):
@@ -551,6 +563,7 @@ class Tool:
551
563
 
552
564
  # Process Steps
553
565
  for i, step in enumerate(self.steps):
566
+
554
567
  context = self.execute_step(
555
568
  step,
556
569
  context,
@@ -564,6 +577,7 @@ class Tool:
564
577
  # if i is the last step and the user has reuqested a streaming output
565
578
  # then we should return the stream
566
579
  if i == len(self.steps) - 1 and stream: # this was causing the big issue X:
580
+ print("tool successful, passing output to stream")
567
581
  return context
568
582
  # Return the final output
569
583
  if context.get("output") is not None:
@@ -592,8 +606,14 @@ class Tool:
592
606
  except Exception as e:
593
607
  print(f"Error rendering template: {e}")
594
608
  rendered_code = code
595
-
596
- if engine == "natural":
609
+ # render engine if necessary
610
+ try:
611
+ template = jinja_env.from_string(engine)
612
+ rendered_engine = template.render(**context)
613
+ except:
614
+ print("error rendering engine")
615
+ rendered_engine = engine
616
+ if rendered_engine == "natural":
597
617
  if len(rendered_code.strip()) > 0:
598
618
  # print(f"Executing natural language step: {rendered_code}")
599
619
  if stream:
@@ -610,7 +630,7 @@ class Tool:
610
630
  context["llm_response"] = response_text
611
631
  context["results"] = response_text
612
632
 
613
- elif engine == "python":
633
+ elif rendered_engine == "python":
614
634
  exec_globals = {
615
635
  "__builtins__": __builtins__,
616
636
  "npc": npc,
@@ -639,12 +659,23 @@ class Tool:
639
659
  exec_env = context.copy()
640
660
  try:
641
661
  exec(rendered_code, exec_globals, new_locals)
662
+ exec_env.update(new_locals)
663
+
664
+ context.update(exec_env)
665
+
642
666
  exec_env.update(new_locals)
643
667
  context.update(exec_env)
644
- # If output is set, also set it as results
668
+
669
+ # Add this line to explicitly copy the output
670
+ if "output" in new_locals:
671
+ context["output"] = new_locals["output"]
672
+
673
+ # Then your existing code
645
674
  if "output" in exec_env:
646
675
  if exec_env["output"] is not None:
647
676
  context["results"] = exec_env["output"]
677
+ print("result from code execution: ", exec_env["output"])
678
+
648
679
  except NameError as e:
649
680
  tb_lines = traceback.format_exc().splitlines()
650
681
  limited_tb = (
npcsh/npc_team/npcsh.ctx CHANGED
@@ -1,5 +1,11 @@
1
- text: |
1
+ context: |
2
2
  The npcsh NPC team is devoted to providing a safe and helpful
3
3
  environment for users where they can work and be as successful as possible.
4
4
  npcsh is a command-line tool that makes it easy for users to harness
5
- the power of LLMs from a command line shell.
5
+ the power of LLMs from a command line shell.
6
+ databases:
7
+ - ~/npcsh_history.db
8
+ mcp_servers:
9
+ - /path/to/mcp/server.py
10
+ - @npm for server
11
+
@@ -0,0 +1,32 @@
1
+ tool_name: bash_executor
2
+ description: Execute bash queries.
3
+ inputs:
4
+ - bash_command
5
+ - user_request
6
+ steps:
7
+ - engine: python
8
+ code: |
9
+ import subprocess
10
+ import os
11
+ cmd = '{{bash_command}}' # Properly quote the command input
12
+ def run_command(cmd):
13
+ process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
14
+ stdout, stderr = process.communicate()
15
+ if stderr:
16
+ print(f"Error: {stderr.decode('utf-8')}")
17
+ return stderr
18
+ return stdout
19
+ result = run_command(cmd)
20
+ output = result.decode('utf-8')
21
+
22
+ - engine: natural
23
+ code: |
24
+
25
+ Here is the result of the bash command:
26
+ ```
27
+ {{ output }}
28
+ ```
29
+ This was the original user request: {{ user_request }}
30
+
31
+ Please provide a response accordingly.
32
+
@@ -0,0 +1,16 @@
1
+ tool_name: code_executor
2
+ description: Execute scripts with a specified language. choose from python, bash, R, or javascript. Set the ultimate result as the "output" variable. It must be a string. Do not add unnecessary print statements.
3
+ inputs:
4
+ - code
5
+ - language
6
+ steps:
7
+ - engine: '{{ language }}'
8
+ code: |
9
+ {{code}}
10
+ - engine: natural
11
+ code: |
12
+ Here is the result of the code execution that an agent ran.
13
+ ```
14
+ {{ output }}
15
+ ```
16
+ please provide a response accordingly.
@@ -2,13 +2,21 @@ tool_name: "internet_search"
2
2
  description: Searches the web for information based on a query in order to verify timiely details (e.g. current events) or to corroborate information in uncertain situations. Should be mainly only used when users specifically request a search, otherwise an LLMs basic knowledge should be sufficient.
3
3
  inputs:
4
4
  - query
5
+ - provider: ''
5
6
  steps:
6
7
  - engine: "python"
7
8
  code: |
8
9
  from npcsh.search import search_web
10
+ from npcsh.npc_sysenv import NPCSH_SEARCH_PROVIDER
9
11
  query = "{{ query }}"
12
+ provider = '{{ provider }}'
13
+ if provider.strip() != '':
14
+ results = search_web(query, num_results=5, provider = provider)
15
+ else:
16
+ results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
17
+
10
18
  print('QUERY in tool', query)
11
- results = search_web(query, num_results=5)
19
+ results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
12
20
  print('RESULTS in tool', results)
13
21
  - engine: "natural"
14
22
  code: |
@@ -0,0 +1,9 @@
1
+ tool_name: npcsh_executor
2
+ description: Execute npcsh commands. Use the macro commands.
3
+ inputs:
4
+ - code
5
+ - language
6
+ steps:
7
+ - engine: "{{language}}"
8
+ code: |
9
+ {{code}}
@@ -1,5 +1,5 @@
1
- tool_name: sql_executor
2
- description: Execute SQL queries on the ~/npcsh_history.db and display the result. The database contains only information about conversations and other user-provided data. It does not store any information about individual files.
1
+ tool_name: data_pull
2
+ description: Execute queries on the ~/npcsh_history.db to pull data. The database contains only information about conversations and other user-provided data. It does not store any information about individual files.
3
3
  inputs:
4
4
  - sql_query
5
5
  - interpret: false # Note that this is not a boolean, but a string
npcsh/search.py CHANGED
@@ -5,6 +5,7 @@ import os
5
5
 
6
6
  from bs4 import BeautifulSoup
7
7
  from duckduckgo_search import DDGS
8
+ from duckduckgo_search.exceptions import DuckDuckGoSearchException
8
9
 
9
10
  try:
10
11
  from googlesearch import search
@@ -41,7 +42,6 @@ def search_perplexity(
41
42
  "max_tokens": max_tokens,
42
43
  "temperature": temperature,
43
44
  "top_p": top_p,
44
- "search_domain_filter": ["perplexity.ai"],
45
45
  "return_images": False,
46
46
  "return_related_questions": False,
47
47
  "search_recency_filter": "month",
@@ -58,6 +58,7 @@ def search_perplexity(
58
58
  # Make the POST request to the API
59
59
  response = requests.post(url, json=payload, headers=headers)
60
60
  response = json.loads(response.text)
61
+ print(response)
61
62
  return [response["choices"][0]["message"]["content"], response["citations"]]
62
63
 
63
64
 
@@ -89,13 +90,19 @@ def search_web(
89
90
 
90
91
  if provider == "duckduckgo":
91
92
  ddgs = DDGS()
92
- search_results = ddgs.text(query, max_results=num_results)
93
- print(search_results, type(search_results))
94
- urls = [r["href"] for r in search_results]
95
- results = [
96
- {"title": r["title"], "link": r["href"], "content": r["body"]}
97
- for r in search_results
98
- ]
93
+ try:
94
+ search_results = ddgs.text(query, max_results=num_results)
95
+ print(search_results, type(search_results))
96
+ urls = [r["href"] for r in search_results]
97
+ results = [
98
+ {"title": r["title"], "link": r["href"], "content": r["body"]}
99
+ for r in search_results
100
+ ]
101
+ except DuckDuckGoSearchException as e:
102
+ print("DuckDuckGo search failed: ", e)
103
+ urls = []
104
+ results = []
105
+
99
106
  else: # google
100
107
  urls = list(search(query, num_results=num_results))
101
108
  # google shit doesnt seem to be working anymore, apparently a lbock they made on browsers without js?