python-obfuscation-framework 1.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. pof/__init__.py +21 -0
  2. pof/__main__.py +22 -0
  3. pof/cli.py +187 -0
  4. pof/errors.py +2 -0
  5. pof/evasion/__init__.py +57 -0
  6. pof/evasion/argv.py +44 -0
  7. pof/evasion/base.py +48 -0
  8. pof/evasion/cpu/__init__.py +0 -0
  9. pof/evasion/cpu/cpu_count.py +27 -0
  10. pof/evasion/fs/__init__.py +0 -0
  11. pof/evasion/fs/directory_exist.py +29 -0
  12. pof/evasion/fs/directory_list_exist.py +46 -0
  13. pof/evasion/fs/directory_list_missing.py +45 -0
  14. pof/evasion/fs/directory_missing.py +28 -0
  15. pof/evasion/fs/exec_method.py +51 -0
  16. pof/evasion/fs/executable_path.py +66 -0
  17. pof/evasion/fs/file_exist.py +29 -0
  18. pof/evasion/fs/file_list_exist.py +46 -0
  19. pof/evasion/fs/file_list_missing.py +45 -0
  20. pof/evasion/fs/file_missing.py +31 -0
  21. pof/evasion/fs/tmp.py +112 -0
  22. pof/evasion/hardware/__init__.py +0 -0
  23. pof/evasion/hardware/ram_count.py +50 -0
  24. pof/evasion/hooks/__init__.py +0 -0
  25. pof/evasion/hooks/debugger.py +36 -0
  26. pof/evasion/hooks/tracemalloc.py +23 -0
  27. pof/evasion/human/__init__.py +0 -0
  28. pof/evasion/human/p.py +45 -0
  29. pof/evasion/human/prompt.py +69 -0
  30. pof/evasion/integrity.py +129 -0
  31. pof/evasion/multi.py +41 -0
  32. pof/evasion/os/__init__.py +0 -0
  33. pof/evasion/os/domain.py +27 -0
  34. pof/evasion/os/hostname.py +27 -0
  35. pof/evasion/os/uid.py +28 -0
  36. pof/evasion/os/username.py +27 -0
  37. pof/evasion/processes/__init__.py +0 -0
  38. pof/evasion/processes/proc_count.py +47 -0
  39. pof/evasion/time/__init__.py +0 -0
  40. pof/evasion/time/expire.py +75 -0
  41. pof/evasion/time/uptime.py +48 -0
  42. pof/evasion/time/utc.py +26 -0
  43. pof/evasion/utils.py +198 -0
  44. pof/main.py +369 -0
  45. pof/obfuscator/__init__.py +86 -0
  46. pof/obfuscator/builtins.py +482 -0
  47. pof/obfuscator/cipher/__init__.py +0 -0
  48. pof/obfuscator/cipher/deep_encryption.py +194 -0
  49. pof/obfuscator/cipher/rc4.py +22 -0
  50. pof/obfuscator/cipher/shift.py +19 -0
  51. pof/obfuscator/cipher/xor.py +121 -0
  52. pof/obfuscator/compression/__init__.py +0 -0
  53. pof/obfuscator/compression/bz2.py +22 -0
  54. pof/obfuscator/compression/gzip.py +22 -0
  55. pof/obfuscator/compression/lzma.py +22 -0
  56. pof/obfuscator/compression/zlib.py +22 -0
  57. pof/obfuscator/constants.py +294 -0
  58. pof/obfuscator/definitions.py +341 -0
  59. pof/obfuscator/encoding/__init__.py +0 -0
  60. pof/obfuscator/encoding/a85.py +21 -0
  61. pof/obfuscator/encoding/b16.py +21 -0
  62. pof/obfuscator/encoding/b32.py +21 -0
  63. pof/obfuscator/encoding/b32hex.py +21 -0
  64. pof/obfuscator/encoding/b64.py +21 -0
  65. pof/obfuscator/encoding/b85.py +25 -0
  66. pof/obfuscator/encoding/binascii.py +22 -0
  67. pof/obfuscator/encoding/snt.py +23 -0
  68. pof/obfuscator/esoteric/__init__.py +0 -0
  69. pof/obfuscator/esoteric/call.py +49 -0
  70. pof/obfuscator/esoteric/doc.py +237 -0
  71. pof/obfuscator/esoteric/globals.py +62 -0
  72. pof/obfuscator/esoteric/imports.py +55 -0
  73. pof/obfuscator/extract_variables.py +297 -0
  74. pof/obfuscator/junk/__init__.py +0 -0
  75. pof/obfuscator/junk/add_comments.py +102 -0
  76. pof/obfuscator/junk/add_newlines.py +36 -0
  77. pof/obfuscator/names.py +474 -0
  78. pof/obfuscator/names_rope.py +375 -0
  79. pof/obfuscator/numbers.py +271 -0
  80. pof/obfuscator/other/__init__.py +0 -0
  81. pof/obfuscator/other/tokens.py +47 -0
  82. pof/obfuscator/remove/__init__.py +0 -0
  83. pof/obfuscator/remove/comments.py +36 -0
  84. pof/obfuscator/remove/exceptions.py +75 -0
  85. pof/obfuscator/remove/indents.py +28 -0
  86. pof/obfuscator/remove/loggings.py +120 -0
  87. pof/obfuscator/remove/loggings_old.py +45 -0
  88. pof/obfuscator/remove/newline.py +27 -0
  89. pof/obfuscator/remove/print.py +40 -0
  90. pof/obfuscator/restructure.py +15 -0
  91. pof/obfuscator/stegano/__init__.py +0 -0
  92. pof/obfuscator/stegano/docstrings.py +111 -0
  93. pof/obfuscator/stegano/ipv6encoding.py +21 -0
  94. pof/obfuscator/stegano/macencoding.py +21 -0
  95. pof/obfuscator/stegano/uuidencoding.py +21 -0
  96. pof/obfuscator/strings.py +359 -0
  97. pof/stager/__init__.py +17 -0
  98. pof/stager/cipher/__init__.py +0 -0
  99. pof/stager/cipher/rc4.py +36 -0
  100. pof/stager/download.py +80 -0
  101. pof/stager/image.py +374 -0
  102. pof/stager/lots/__init__.py +1 -0
  103. pof/stager/lots/cl1pnet.py +51 -0
  104. pof/stager/lots/pastebin.py +35 -0
  105. pof/stager/lots/pasters.py +30 -0
  106. pof/stager/quine.py +135 -0
  107. pof/utils/__init__.py +0 -0
  108. pof/utils/cipher/__init__.py +7 -0
  109. pof/utils/cipher/rc4.py +407 -0
  110. pof/utils/cipher/shift.py +41 -0
  111. pof/utils/compression/__init__.py +11 -0
  112. pof/utils/compression/bz2.py +38 -0
  113. pof/utils/compression/gzip.py +38 -0
  114. pof/utils/compression/lzma.py +38 -0
  115. pof/utils/compression/zlib.py +38 -0
  116. pof/utils/encoding/__init__.py +19 -0
  117. pof/utils/encoding/a85.py +35 -0
  118. pof/utils/encoding/b16.py +30 -0
  119. pof/utils/encoding/b3.py +93 -0
  120. pof/utils/encoding/b32.py +30 -0
  121. pof/utils/encoding/b32hex.py +30 -0
  122. pof/utils/encoding/b64.py +30 -0
  123. pof/utils/encoding/b85.py +35 -0
  124. pof/utils/encoding/binascii.py +38 -0
  125. pof/utils/encoding/snt.py +97 -0
  126. pof/utils/entropy.py +24 -0
  127. pof/utils/extract_names.py +204 -0
  128. pof/utils/generator/__init__.py +17 -0
  129. pof/utils/generator/advanced.py +53 -0
  130. pof/utils/generator/base.py +178 -0
  131. pof/utils/generator/basic.py +107 -0
  132. pof/utils/generator/names.txt +37241 -0
  133. pof/utils/generator/unicode.py +171 -0
  134. pof/utils/se/__init__.py +3 -0
  135. pof/utils/se/homoglyphs.py +99 -0
  136. pof/utils/se/homoglyphs.txt +96 -0
  137. pof/utils/stegano/__init__.py +5 -0
  138. pof/utils/stegano/ipv6encoding.py +97 -0
  139. pof/utils/stegano/macencoding.py +96 -0
  140. pof/utils/stegano/uuidencoding.py +102 -0
  141. pof/utils/tokens.py +68 -0
  142. python_obfuscation_framework-1.4.1.dist-info/LICENSE +674 -0
  143. python_obfuscation_framework-1.4.1.dist-info/METADATA +851 -0
  144. python_obfuscation_framework-1.4.1.dist-info/RECORD +147 -0
  145. python_obfuscation_framework-1.4.1.dist-info/WHEEL +5 -0
  146. python_obfuscation_framework-1.4.1.dist-info/entry_points.txt +2 -0
  147. python_obfuscation_framework-1.4.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,36 @@
1
+ from tokenize import COMMENT, DEDENT, ENCODING, INDENT, NEWLINE, NL, STRING
2
+
3
+
4
+ class CommentsObfuscator:
5
+ """Remove comments and docstrings from the code."""
6
+
7
+ @staticmethod
8
+ def obfuscate_tokens(tokens):
9
+ result = [] # obfuscated tokens
10
+ prev_toknum = None
11
+ head = True # to detect file docstrings
12
+ for toknum, tokval, *_ in tokens:
13
+ new_tokens = [(toknum, tokval)]
14
+
15
+ if toknum == STRING and (
16
+ prev_toknum
17
+ in [
18
+ NEWLINE,
19
+ DEDENT,
20
+ INDENT,
21
+ ENCODING,
22
+ ]
23
+ or head
24
+ ):
25
+ # Docstring
26
+ new_tokens = None
27
+ elif toknum == COMMENT:
28
+ new_tokens = None
29
+
30
+ if head and toknum not in [NEWLINE, NL, STRING, COMMENT]:
31
+ head = False
32
+
33
+ if new_tokens:
34
+ result.extend(new_tokens)
35
+ prev_toknum = toknum
36
+ return result
@@ -0,0 +1,75 @@
1
+ # Remove exception text, or replace it with a code.
2
+ # Replace text with code to still be able to use exception message on error
3
+ # raise Exception("text...") --> raise Exception("42")
4
+ # and log the code to text: 42: "text..."
5
+ import logging
6
+ from tokenize import NAME, OP, STRING
7
+
8
+
9
+ class ExceptionObfuscator:
10
+ """Remove print statements from the code."""
11
+
12
+ def __init__(self, add_codes=None, generator=None) -> None:
13
+ if add_codes is None and generator is not None:
14
+ add_codes = True
15
+ self.add_codes = add_codes
16
+ self.generator = generator
17
+
18
+ def get_code(self):
19
+ return next(self.generator)
20
+
21
+ def obfuscate_tokens(self, tokens): # noqa: C901
22
+ result = [] # obfuscated tokens
23
+ parenthesis_depth = 0 # parenthesis depth
24
+ prev_tokval = None
25
+ prev_toknum = None
26
+ exception_par_depth = 0
27
+ inside_exception = False
28
+ is_exception = False
29
+ for index, (toknum, tokval, *_) in enumerate(tokens):
30
+ new_tokens = [(toknum, tokval)]
31
+ next_tokval = None
32
+ if len(tokens) > index + 1:
33
+ _, next_tokval, *__ = tokens[index + 1]
34
+
35
+ if toknum == OP and tokval == "(":
36
+ parenthesis_depth += 1
37
+ elif toknum == OP and tokval == ")":
38
+ parenthesis_depth -= 1
39
+
40
+ if inside_exception:
41
+ if exception_par_depth == parenthesis_depth:
42
+ inside_exception = False
43
+ else:
44
+ new_tokens = None
45
+
46
+ elif (
47
+ prev_toknum == NAME and is_exception and toknum == OP and tokval == "("
48
+ ):
49
+ inside_exception = True
50
+ exception_par_depth = parenthesis_depth - 1
51
+ is_exception = False
52
+
53
+ if self.add_codes:
54
+ current_code = self.get_code()
55
+ new_tokens.extend([(STRING, f'"{current_code}"')])
56
+ logging.debug(
57
+ "Exception code {current_code} --> {next_tokval}",
58
+ extra={
59
+ "current_code": current_code,
60
+ "next_tokval": next_tokval,
61
+ },
62
+ )
63
+
64
+ elif prev_tokval == "raise" and prev_toknum == NAME:
65
+ is_exception = True
66
+ else:
67
+ # except Exception as e:
68
+ # raise e
69
+ is_exception = False
70
+
71
+ if new_tokens:
72
+ result.extend(new_tokens)
73
+ prev_tokval = tokval
74
+ prev_toknum = toknum
75
+ return result
@@ -0,0 +1,28 @@
1
+ # TODO (deoktr): support multi symbols indents (mix spaces and tabs)
2
+ # this might be extremly difficult because we'll have to keep track of the
3
+ # current function/class declaration, we can't mix inside the same function be
4
+ # between multiple we can
5
+ from tokenize import DEDENT, INDENT
6
+
7
+
8
+ class IndentsObfuscator:
9
+ """Remove indents to minimum."""
10
+
11
+ def __init__(self, indent=" ") -> None:
12
+ self.indent = indent
13
+
14
+ def obfuscate_tokens(self, tokens):
15
+ result = [] # obfuscated tokens
16
+ depth = 0 # indent depth
17
+ for toknum, tokval, *_ in tokens:
18
+ new_tokens = [(toknum, tokval)]
19
+
20
+ if toknum == INDENT:
21
+ depth += 1
22
+ new_tokens = [(toknum, depth * self.indent)]
23
+ elif toknum == DEDENT:
24
+ depth -= 1
25
+
26
+ if new_tokens:
27
+ result.extend(new_tokens)
28
+ return result
@@ -0,0 +1,120 @@
1
+ from tokenize import NAME, OP, STRING
2
+
3
+
4
+ # TODO (deoktr): REFACTORING this class
5
+ class LoggingObfuscator:
6
+ """Keep logging and change them with a generated code."""
7
+
8
+ def __init__(self, add_codes=None, generator=None) -> None:
9
+ if add_codes is None and generator is not None:
10
+ add_codes = True
11
+ self.add_codes = add_codes
12
+ self.generator = generator
13
+
14
+ def obfuscate_tokens(self, tokens):
15
+ result = [] # obfuscated tokens
16
+ parenthesis_depth = 0 # parenthesis depth
17
+ prev_tokval = None
18
+ logging_par_depth = 0
19
+ inside_log = False
20
+ logging_type = None
21
+ for index, (toknum, tokval, *_) in enumerate(tokens):
22
+ new_tokens = [(toknum, tokval)]
23
+ next_tokval = None
24
+ if len(tokens) > index + 1:
25
+ _, next_tokval, *__ = tokens[index + 1]
26
+
27
+ if toknum == OP and tokval == "(":
28
+ parenthesis_depth += 1
29
+ elif toknum == OP and tokval == ")":
30
+ parenthesis_depth -= 1
31
+
32
+ if inside_log:
33
+ if (
34
+ logging_par_depth is not None
35
+ and parenthesis_depth < logging_par_depth
36
+ and tokval not in ["debug", "warning", "info", "critical", "error"]
37
+ ):
38
+ inside_log = False
39
+
40
+ code = ""
41
+ if self.add_codes:
42
+ code = next(self.generator)
43
+
44
+ new_tokens = [
45
+ (NAME, logging_type),
46
+ (OP, "("),
47
+ (STRING, repr(code)),
48
+ (OP, ")"),
49
+ ]
50
+ logging_type = None
51
+ else:
52
+ new_tokens = None
53
+
54
+ elif (
55
+ toknum == OP
56
+ and tokval == "."
57
+ and prev_tokval == "logging"
58
+ and next_tokval in ["debug", "warning", "info", "critical", "error"]
59
+ ):
60
+ inside_log = True
61
+ logging_type = next_tokval
62
+ logging_par_depth = parenthesis_depth + 1
63
+
64
+ if new_tokens:
65
+ result.extend(new_tokens)
66
+ prev_tokval = tokval
67
+ return result
68
+
69
+
70
+ class LoggingRemoveObfuscator:
71
+ """Remove logging statements from the code."""
72
+
73
+ @staticmethod
74
+ def obfuscate_tokens(tokens):
75
+ result = [] # obfuscated tokens
76
+ parenthesis_depth = 0 # parenthesis depth
77
+ prev_tokval = None
78
+ logging_par_depth = 0
79
+ inside_log = False
80
+ for index, (toknum, tokval, *_) in enumerate(tokens):
81
+ new_tokens = [(toknum, tokval)]
82
+ next_tokval = None
83
+ if len(tokens) > index + 1:
84
+ _, next_tokval, *__ = tokens[index + 1]
85
+
86
+ if toknum == OP and tokval == "(":
87
+ parenthesis_depth += 1
88
+ elif toknum == OP and tokval == ")":
89
+ parenthesis_depth -= 1
90
+
91
+ if inside_log:
92
+ if (
93
+ logging_par_depth is not None
94
+ and parenthesis_depth <= logging_par_depth
95
+ and tokval not in ["debug", "warning", "info", "critical", "error"]
96
+ and tokval != ")"
97
+ ):
98
+ inside_log = False
99
+ # replace logging statements with "pass"
100
+ new_tokens = [(NAME, "pass"), *new_tokens]
101
+ else:
102
+ new_tokens = None
103
+
104
+ elif (
105
+ toknum == OP
106
+ and tokval == "."
107
+ and prev_tokval == "logging"
108
+ and next_tokval in ["debug", "warning", "info", "critical", "error"]
109
+ ):
110
+ inside_log = True
111
+ logging_par_depth = parenthesis_depth
112
+
113
+ new_tokens = None
114
+ # remove the last items "logging"
115
+ result.pop()
116
+
117
+ if new_tokens:
118
+ result.extend(new_tokens)
119
+ prev_tokval = tokval
120
+ return result
@@ -0,0 +1,45 @@
1
+ from tokenize import NAME, OP
2
+
3
+
4
+ class LoggingObfuscator:
5
+ """Remove logging statements from the code."""
6
+
7
+ @staticmethod
8
+ def obfuscate_tokens(tokens):
9
+ result = [] # obfuscated tokens
10
+ parenthesis_depth = 0 # parenthesis depth
11
+ prev_tokval = None
12
+ logging_par_depth = 0
13
+ inside_log = False
14
+ for index, (toknum, tokval, *_) in enumerate(tokens):
15
+ new_tokens = [(toknum, tokval)]
16
+ next_tokval = None
17
+ if len(tokens) > index + 1:
18
+ _, next_tokval, *__ = tokens[index + 1]
19
+
20
+ if not inside_log and toknum == NAME and tokval == "logging":
21
+ new_tokens = None
22
+ inside_log = True
23
+ logging_par_depth = parenthesis_depth
24
+
25
+ if tokval == "import" and next_tokval == "logging":
26
+ new_tokens = None
27
+
28
+ if inside_log:
29
+ if logging_par_depth == parenthesis_depth and (
30
+ tokval not in ("(", "logging")
31
+ and prev_tokval not in (".", "logging")
32
+ ): # check if still inside log
33
+ inside_log = False
34
+ else:
35
+ new_tokens = None
36
+
37
+ if toknum == OP and tokval == "(":
38
+ parenthesis_depth += 1
39
+ elif toknum == OP and tokval == ")":
40
+ parenthesis_depth -= 1
41
+
42
+ if new_tokens:
43
+ result.extend(new_tokens)
44
+ prev_tokval = tokval
45
+ return result
@@ -0,0 +1,27 @@
1
+ from tokenize import INDENT, NEWLINE, NL
2
+
3
+
4
+ class NewlineObfuscator:
5
+ """Remove empty lines."""
6
+
7
+ @staticmethod
8
+ def obfuscate_tokens(tokens):
9
+ result = [] # obfuscated tokens
10
+ prev_toknum = None
11
+ for toknum, tokval, *_ in tokens:
12
+ new_tokens = [(toknum, tokval)]
13
+
14
+ # remove empty lines from the original source
15
+ # remove empty lines created after token manipulations
16
+ # \n after \n --> 2 new lines in a row = one is useless
17
+ # \n after NL --> same ^
18
+ # \n after INDENT --> docstrings are placed after an indent
19
+ if toknum == NL or (
20
+ toknum == NEWLINE and (prev_toknum in (NEWLINE, NL, INDENT))
21
+ ):
22
+ new_tokens = None
23
+
24
+ if new_tokens:
25
+ result.extend(new_tokens)
26
+ prev_toknum = toknum
27
+ return result
@@ -0,0 +1,40 @@
1
+ # this could cause some problems, if the print statement was the
2
+ # only one present in the file
3
+ from tokenize import NAME, OP
4
+
5
+
6
+ class PrintObfuscator:
7
+ """Remove print statements from the code."""
8
+
9
+ @staticmethod
10
+ def obfuscate_tokens(tokens):
11
+ result = [] # obfuscated tokens
12
+ parenthesis_depth = 0 # parenthesis depth
13
+ prev_tokval = None
14
+ print_par_depth = 0
15
+ inside_print = False
16
+ for toknum, tokval, *_ in tokens:
17
+ new_tokens = [(toknum, tokval)]
18
+
19
+ if not inside_print and toknum == NAME and tokval == "print":
20
+ new_tokens = None
21
+ inside_print = True
22
+ print_par_depth = parenthesis_depth
23
+
24
+ if inside_print:
25
+ if print_par_depth == parenthesis_depth and (
26
+ tokval not in ("(", "print") and prev_tokval != "print"
27
+ ): # check if still inside print
28
+ inside_print = False
29
+ else:
30
+ new_tokens = None
31
+
32
+ if toknum == OP and tokval == "(":
33
+ parenthesis_depth += 1
34
+ elif toknum == OP and tokval == ")":
35
+ parenthesis_depth -= 1
36
+
37
+ if new_tokens:
38
+ result.extend(new_tokens)
39
+ prev_tokval = tokval
40
+ return result
@@ -0,0 +1,15 @@
1
+ # TODO (deoktr): WORK IN PROGRESS !
2
+
3
+
4
+ class RestructureObfuscator:
5
+ """Obfuscate by restructuring the entire code.
6
+
7
+ This is done by moving functions and classes around.
8
+ """
9
+
10
+ @staticmethod
11
+ def obfuscate_tokens(tokens):
12
+ # TODO (deoktr): test if ast include decorators into function definition or if
13
+ # it's easy to get, and move stuff using it, it's probably easier than
14
+ # using tokens
15
+ return tokens
File without changes
@@ -0,0 +1,111 @@
1
+ import ast
2
+ import io
3
+ from tokenize import LPAR, NAME, NEWLINE, OP, RPAR, STRING, generate_tokens
4
+
5
+ from pof.utils.encoding import Base64Encoding
6
+ from pof.utils.tokens import untokenize
7
+
8
+
9
+ class DocstringObfuscator:
10
+ """Hide code inside doc strings."""
11
+
12
+ # TODO (deoktr): add ability to choose entry point (function) name without calling
13
+ # it put the exec code inside this function
14
+ # TODO (deoktr): add ability to choose the base code
15
+ # TODO (deoktr): add ability to split the docstring among multiple class/functions
16
+
17
+ def __init__(self, encoding_class=None, base_code=None) -> None:
18
+ if encoding_class is None:
19
+ encoding_class = Base64Encoding
20
+ self.encoding_class = encoding_class
21
+
22
+ if base_code is None:
23
+ base_code = "class Foo:\n pass\n"
24
+ self.base_code = base_code
25
+
26
+ def get_exec_tokens(self, name):
27
+ # the replace will remove \n and space indents from the docstrings
28
+ # because on some encoding it can break it, it works without problems
29
+ # with base64 but doesn't with base85, base16 and other.
30
+ docstring_tokens = [
31
+ (NAME, name),
32
+ (OP, "."),
33
+ (NAME, "__doc__"),
34
+ (OP, "."),
35
+ (NAME, "replace"),
36
+ (LPAR, "("),
37
+ (STRING, repr(r"\n")),
38
+ (OP, ","),
39
+ (STRING, "''"),
40
+ (RPAR, ")"),
41
+ (OP, "."),
42
+ (NAME, "replace"),
43
+ (LPAR, "("),
44
+ (STRING, repr(" ")),
45
+ (OP, ","),
46
+ (STRING, repr("")),
47
+ (RPAR, ")"),
48
+ ]
49
+ return [
50
+ (NEWLINE, "\n"),
51
+ (NAME, "exec"),
52
+ (LPAR, "("),
53
+ *self.encoding_class.decode_tokens(docstring_tokens),
54
+ (RPAR, ")"),
55
+ ]
56
+
57
+ def get_docstring(self, code, indent=" "):
58
+ encode_tokens = ast.literal_eval(
59
+ untokenize(self.encoding_class.encode_tokens(code.encode())),
60
+ )
61
+
62
+ docstring = "\n" + indent
63
+ chunk_size = 74
64
+ for i in range(0, len(encode_tokens), chunk_size):
65
+ chunk = encode_tokens[i : i + chunk_size]
66
+ docstring += chunk + "\n" + indent
67
+ return f'"""{docstring}"""'
68
+
69
+ def get_base_tokens(self):
70
+ io_obj = io.StringIO(self.base_code)
71
+ return list(generate_tokens(io_obj.readline))
72
+
73
+ def obfuscate_tokens(self, tokens):
74
+ code = untokenize(tokens)
75
+ docstring = self.get_docstring(code)
76
+
77
+ base_tokens = self.get_base_tokens()
78
+
79
+ in_declaration = False
80
+ prev_tokval = None
81
+ name = None
82
+ new_tokens = []
83
+ add_next = False
84
+ for toknum, tokval, *_ in base_tokens:
85
+ tokens = [(toknum, tokval)]
86
+
87
+ if add_next:
88
+ tokens.extend([(STRING, docstring), (NEWLINE, "\n")])
89
+ add_next = False
90
+
91
+ # and name is None : used to add the docstring on only one
92
+ # class/function definition
93
+ # FIXME (deoktr): split it among multiple definitions
94
+ if prev_tokval in ["def", "class"] and name is None:
95
+ name = tokval
96
+ in_declaration = True
97
+ elif prev_tokval == ":" and in_declaration:
98
+ add_next = True
99
+ in_declaration = False
100
+
101
+ prev_tokval = tokval
102
+ new_tokens.extend(tokens)
103
+
104
+ return [
105
+ *self.encoding_class.import_tokens(),
106
+ (NEWLINE, "\n"),
107
+ *new_tokens,
108
+ (NEWLINE, "\n"),
109
+ *self.get_exec_tokens(name),
110
+ (NEWLINE, "\n"),
111
+ ]
@@ -0,0 +1,21 @@
1
+ from tokenize import LPAR, NAME, NEWLINE, RPAR
2
+
3
+ from pof.utils.stegano import IPv6Encoding
4
+ from pof.utils.tokens import untokenize
5
+
6
+
7
+ class IPv6Obfuscator(IPv6Encoding):
8
+ """Encode the source code in a list of valid IPv6."""
9
+
10
+ @classmethod
11
+ def obfuscate_tokens(cls, tokens):
12
+ code = untokenize(tokens)
13
+ return [
14
+ *cls.import_tokens(),
15
+ (NEWLINE, "\n"),
16
+ (NAME, "exec"),
17
+ (LPAR, "("),
18
+ *cls.decode_tokens(cls.encode_tokens(code.encode())),
19
+ (RPAR, ")"),
20
+ (NEWLINE, "\n"),
21
+ ]
@@ -0,0 +1,21 @@
1
+ from tokenize import LPAR, NAME, NEWLINE, RPAR
2
+
3
+ from pof.utils.stegano import MACEncoding
4
+ from pof.utils.tokens import untokenize
5
+
6
+
7
+ class MACObfuscator(MACEncoding):
8
+ """Encode the source code in a list of valid MAC."""
9
+
10
+ @classmethod
11
+ def obfuscate_tokens(cls, tokens):
12
+ code = untokenize(tokens)
13
+ return [
14
+ *cls.import_tokens(),
15
+ (NEWLINE, "\n"),
16
+ (NAME, "exec"),
17
+ (LPAR, "("),
18
+ *cls.decode_tokens(cls.encode_tokens(code.encode())),
19
+ (RPAR, ")"),
20
+ (NEWLINE, "\n"),
21
+ ]
@@ -0,0 +1,21 @@
1
+ from tokenize import LPAR, NAME, NEWLINE, RPAR
2
+
3
+ from pof.utils.stegano import UUIDEncoding
4
+ from pof.utils.tokens import untokenize
5
+
6
+
7
+ class UUIDObfuscator(UUIDEncoding):
8
+ """Encode the source code in a list of valid UUID."""
9
+
10
+ @classmethod
11
+ def obfuscate_tokens(cls, tokens):
12
+ code = untokenize(tokens)
13
+ return [
14
+ *cls.import_tokens(),
15
+ (NEWLINE, "\n"),
16
+ (NAME, "exec"),
17
+ (LPAR, "("),
18
+ *cls.decode_tokens(cls.encode_tokens(code.encode())),
19
+ (RPAR, ")"),
20
+ (NEWLINE, "\n"),
21
+ ]