http-content-parser 0.0.18__tar.gz → 0.0.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/.gitignore +4 -1
  2. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/PKG-INFO +4 -4
  3. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/README.md +1 -1
  4. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/pyproject.toml +2 -2
  5. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/api_parser.py +4 -1
  6. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/curl_parser.py +2 -0
  7. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/generate_api_file.py +5 -26
  8. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/req_data.py +1 -1
  9. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/tests/test_api_model_parser.py +1 -1
  10. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/tests/test_curl.py +1 -1
  11. http_content_parser-0.0.18/tests/curl +0 -6
  12. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/LICENSE +0 -0
  13. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/requirements.txt +0 -0
  14. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/__init__.py +0 -0
  15. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/openapi_parser.py +0 -0
  16. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/param_util.py +0 -0
  17. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/postman_parser.py +0 -0
  18. {http_content_parser-0.0.18 → http_content_parser-0.0.20}/src/http_content_parser/swagger2_parser.py +0 -0
@@ -158,4 +158,7 @@ cython_debug/
158
158
  # and can be added to the global gitignore or merged into this file. For a more nuclear
159
159
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
160
  #.idea/
161
- .vscode/
161
+ .vscode/
162
+
163
+ #
164
+ tmp
@@ -1,8 +1,8 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: http_content_parser
3
- Version: 0.0.18
3
+ Version: 0.0.20
4
4
  Summary: parse http's payload and response
5
- Author-email: max su <suleiabc@gmail.com>
5
+ Author-email: leo <suleiabc@gmail.com>
6
6
  License-File: LICENSE
7
7
  Classifier: License :: OSI Approved :: MIT License
8
8
  Classifier: Operating System :: OS Independent
@@ -24,5 +24,5 @@ python3 -m build
24
24
 
25
25
  ```bash
26
26
  rm -f dist/*
27
- python3 -m twine upload dist/*
27
+ python3 -m twine upload dist/*
28
28
  ```
@@ -12,5 +12,5 @@ python3 -m build
12
12
 
13
13
  ```bash
14
14
  rm -f dist/*
15
- python3 -m twine upload dist/*
15
+ python3 -m twine upload dist/*
16
16
  ```
@@ -4,8 +4,8 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "http_content_parser"
7
- version = "0.0.18"
8
- authors = [{ name = "max su", email = "suleiabc@gmail.com" }]
7
+ version = "0.0.20"
8
+ authors = [{ name = "leo", email = "suleiabc@gmail.com" }]
9
9
  description = "parse http's payload and response"
10
10
  readme = "README.md"
11
11
  requires-python = ">=3.8"
@@ -57,7 +57,10 @@ class ApiModelParser:
57
57
  + req_model.method
58
58
  )
59
59
  req_model.header = json.dumps(req_model.header)
60
- req_model.query_param = json.dumps(url_content["query_params"])
60
+ if url_content["query_params"]:
61
+ req_model.query_param = json.dumps(url_content["query_params"])
62
+ else:
63
+ req_model.query_param = {}
61
64
  req_model.path = url_content["path"][1:]
62
65
  payload_list.append(req_model)
63
66
  return payload_list
@@ -5,6 +5,8 @@ from urllib.parse import urlparse, parse_qs
5
5
 
6
6
  class CurlParser(object):
7
7
  def parse_url(self, url: str) -> dict:
8
+ if "://" not in url:
9
+ url = "http://" + url
8
10
  # 解析 URL
9
11
  parsed_url = urlparse(url)
10
12
  # 获取各个组成部分
@@ -11,6 +11,7 @@ from http_content_parser.openapi_parser import OpenApiParser
11
11
  from http_content_parser.postman_parser import parse_postman
12
12
 
13
13
 
14
+ # TODO 不直接生成yaml文件,只生成dict,方便后续扩展
14
15
  class GenerateApiFile:
15
16
  def __init__(self) -> None:
16
17
  pass
@@ -79,36 +80,14 @@ class GenerateApiFile:
79
80
  + req_model.method
80
81
  )
81
82
  req_model.header = json.dumps(req_model.header)
82
- req_model.query_param = json.dumps(url_content["query_params"])
83
+ if url_content["query_params"]:
84
+ req_model.query_param = json.dumps(url_content["query_params"])
85
+ else:
86
+ req_model.query_param = {}
83
87
  req_model.path = url_content["path"][1:]
84
88
  payload_list.append(req_model)
85
89
  return payload_list
86
90
 
87
- def convert_curl_data_to_model_old(
88
- self, curl_file_path, url_filter=None
89
- ) -> list[ReqData]:
90
- curl_parser = CurlParser()
91
- payload_list = []
92
- with open(curl_file_path, "rt") as f:
93
- lines = f.readlines()
94
- line_num_array = curl_parser.get_curl_line_num_scope(lines=lines)
95
- for r in line_num_array:
96
- res_dict = curl_parser.split_curl_to_struct(
97
- lines, r[0], r[1], url_filter
98
- )
99
- template = ReqData(dd=res_dict)
100
- split_url = curl_parser.split_url(template.original_url, "_")
101
- template.temp_api_label = (
102
- self.replace_api_label_chars(split_url["url_path"])
103
- + template.method
104
- )
105
- template.header = json.dumps(template.header)
106
- template.query_param = json.dumps(split_url["url_params"])
107
- split_url_origin = curl_parser.split_url(template.original_url, "/")
108
- template.path = split_url_origin["url_path"][:-1]
109
- payload_list.append(template)
110
- return payload_list
111
-
112
91
  def produce_api_yaml_for_swagger2(self, swagger2_dict, yaml_file):
113
92
  swagger_parser = Swagger2Parser(swagger2_dict)
114
93
  api_dict = swagger_parser.get_swagger_api_info()
@@ -1,7 +1,7 @@
1
1
  # -*- coding: UTF-8 -*-
2
2
 
3
3
  """
4
- __author__ = maxsulei
4
+ __author__ = leo
5
5
  """
6
6
 
7
7
 
@@ -5,6 +5,6 @@ api_parser = ApiModelParser()
5
5
 
6
6
 
7
7
  def test_curl_parser():
8
- curl_file = "tests/curl"
8
+ curl_file = "./tmp"
9
9
  api_info = api_parser.get_api_model_for_curl(curl_file=curl_file)
10
10
  print(api_info)
@@ -9,7 +9,7 @@ def test_curl():
9
9
  # json_dict = json.load(f)
10
10
  # gaf.produce_api_yaml_for_postman(json_dict, "./test.yaml")
11
11
  curl_file = (
12
- "/Users/lei.susl/vs_workspace/company/apiTest/testcase/iac/tocex_master/temp"
12
+ "./tmp"
13
13
  )
14
14
  gaf.produce_api_yaml_for_curl(curl_file=curl_file, yaml_file="api.yaml")
15
15
 
@@ -1,6 +0,0 @@
1
- curl --location 'http://tocex-master-sg7-live.cluster.shopeemobile.com/apis/tocex/v1/machineset/node_provision/get' \
2
- --header 'Authorization: ApiToken eyJpZCI6IjNjNzk2MzFkLWJiZmYtNGE0NS1iMTVjLWY5MTFmMWNlZDE4NiIsImFjY291bnQiOiJlY3AuYm90QHNob3BlZS5jb20iLCJncm91cCI6IiIsInVzZXJfcm9sZSI6InJvb3QiLCJjcmVhdGVkX2F0IjoxNjQ2MDI1NTcxLCJleHBpcmVzX2F0IjoxOTU1NjI1NTcxLCJjb21tZW50IjoiKGxpdmUpIGVjcCBib3QgdG9rZW4iLCJzZXJ2aWNlcyI6W119.ffniWLZOLxvieccUbBuJus6lDsm6QzAf1OAf3EfRb3jRfhQ5XItVsWJEZ/cPulHSfn3gxdL7n/hPKYIBdubJpxK2jxFcUcplcHWiCKw93eWfZFj/+JtBKWmFBlxuXF+3tsXPwMIlUzgJRUr5dxqM6nKrB8aYRMmLkmSvUOu/O4lx5JgfMGqU1AqzYgtZy5RYtTcfV61pOE//kEJCsDVFHPVCTTkJYyDunCXR0pVd1BHIt7fd+bsaozdU2Ry9a1FAOFFxI+mQAG1gpg2bc+FqqC+YyMpQZiATI/B6dCkjoA2xLUm5eMmHIBO0QdOVVwAQpDNHbQyzLrqlbkiabERulw==' \
3
- --header 'Content-Type: application/json' \
4
- --data '{
5
- "host_ip": "10.214.82.96"
6
- }'