summarizing-machine 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- summarizing_machine/__init__.py +20 -0
- summarizing_machine/cli.py +96 -0
- summarizing_machine/config.py +23 -0
- summarizing_machine/githf.py +69 -0
- summarizing_machine/machina.yaml +7 -0
- summarizing_machine/machine.py +177 -0
- summarizing_machine/providers/__init__.py +7 -0
- summarizing_machine/providers/camelids.py +87 -0
- summarizing_machine/providers/castor_pollux.py +127 -0
- summarizing_machine/providers/depsek.py +81 -0
- summarizing_machine/providers/electroid.py +78 -0
- summarizing_machine/providers/openai.py +82 -0
- summarizing_machine/providers/qrog.py +82 -0
- summarizing_machine/providers/strangelove.py +81 -0
- summarizing_machine/utilities.py +418 -0
- summarizing_machine-0.0.1.dist-info/METADATA +77 -0
- summarizing_machine-0.0.1.dist-info/RECORD +21 -0
- summarizing_machine-0.0.1.dist-info/WHEEL +5 -0
- summarizing_machine-0.0.1.dist-info/entry_points.txt +2 -0
- summarizing_machine-0.0.1.dist-info/licenses/LICENSE +21 -0
- summarizing_machine-0.0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Python
|
|
3
|
+
|
|
4
|
+
"""Copyright (c) Alexander Fedotov.
|
|
5
|
+
This source code is licensed under the license found in the
|
|
6
|
+
LICENSE file in the root directory of this source tree.
|
|
7
|
+
"""
|
|
8
|
+
from .config import Config
|
|
9
|
+
from .machine import machine
|
|
10
|
+
from .githf import fetch_instructions
|
|
11
|
+
from .utilities import (plato_text_to_muj,
|
|
12
|
+
plato_text_to_mpuj,
|
|
13
|
+
llm_soup_to_text,
|
|
14
|
+
new_plato_text)
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
'machine',
|
|
18
|
+
'fetch_instructions',
|
|
19
|
+
'Config'
|
|
20
|
+
]
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Python
|
|
3
|
+
|
|
4
|
+
"""Copyright (c) Alexander Fedotov.
|
|
5
|
+
This source code is licensed under the license found in the
|
|
6
|
+
LICENSE file in the root directory of this source tree.
|
|
7
|
+
"""
|
|
8
|
+
from os import environ
|
|
9
|
+
import sys
|
|
10
|
+
import click
|
|
11
|
+
import fileinput
|
|
12
|
+
from .config import Config
|
|
13
|
+
from .utilities import new_plato_text
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@click.command()
|
|
17
|
+
@click.option('-k', '--provider-api-key',
|
|
18
|
+
envvar='PROVIDER_API_KEY',
|
|
19
|
+
default='no_key', help='Language Model API provider key.')
|
|
20
|
+
@click.option('-t', '--github-token', envvar='GITHUB_TOKEN',
|
|
21
|
+
default='no_token', help='GitHub API token for private repo access.')
|
|
22
|
+
@click.option('-d', '--debug/--no-debug',
|
|
23
|
+
default=False, help='Print full stack trace on errors.')
|
|
24
|
+
@click.option('-i', '--interactive',
|
|
25
|
+
is_flag=True, help='Respond and stay interactive')
|
|
26
|
+
@click.argument('filenames', nargs=-1,
|
|
27
|
+
type=click.Path(exists=True))
|
|
28
|
+
def run(provider_api_key, github_token, debug, interactive, filenames):
|
|
29
|
+
"""
|
|
30
|
+
$ text | summarizing-machine # Accepts text from the pipe
|
|
31
|
+
$ echo "...<text>..." | summarizing-machine #
|
|
32
|
+
|
|
33
|
+
$ summarizing-machine multilogue.txt new_turn.txt # ...or files.
|
|
34
|
+
"""
|
|
35
|
+
config = Config()
|
|
36
|
+
|
|
37
|
+
if provider_api_key:
|
|
38
|
+
if provider_api_key.startswith('sk-'):
|
|
39
|
+
if provider_api_key.startswith('sk-proj-'):
|
|
40
|
+
config.provider = 'OpenAI'
|
|
41
|
+
environ['OPENAI_API_KEY'] = provider_api_key
|
|
42
|
+
elif provider_api_key.startswith('sk-ant-'):
|
|
43
|
+
config.provider = 'Anthropic'
|
|
44
|
+
environ['ANTHROPIC_API_KEY'] = provider_api_key
|
|
45
|
+
else:
|
|
46
|
+
config.provider = 'DepSek'
|
|
47
|
+
environ['DEPSEK_API_KEY'] = provider_api_key
|
|
48
|
+
elif provider_api_key.startswith('AIzaSy'):
|
|
49
|
+
config.provider = 'Gemini'
|
|
50
|
+
environ['GEMINI_API_KEY'] = provider_api_key
|
|
51
|
+
elif provider_api_key.startswith('gsk_'):
|
|
52
|
+
config.provider = 'Groq'
|
|
53
|
+
environ['GROQ_API_KEY'] = provider_api_key
|
|
54
|
+
elif provider_api_key.startswith('xai-'):
|
|
55
|
+
config.provider = 'XAI'
|
|
56
|
+
environ['XAI_API_KEY'] = provider_api_key
|
|
57
|
+
elif provider_api_key.startswith('LLM|'):
|
|
58
|
+
config.provider = 'Meta'
|
|
59
|
+
environ['META_API_KEY'] = provider_api_key
|
|
60
|
+
elif provider_api_key == 'no_provider_key':
|
|
61
|
+
sys.stderr.write(f'No provider key!\n')
|
|
62
|
+
sys.stderr.flush()
|
|
63
|
+
sys.exit(1)
|
|
64
|
+
else:
|
|
65
|
+
if config.provider == '':
|
|
66
|
+
raise ValueError(f"Unrecognized API key prefix and no provider specified.")
|
|
67
|
+
|
|
68
|
+
config.provider_api_key = provider_api_key
|
|
69
|
+
|
|
70
|
+
if github_token:
|
|
71
|
+
config.github_token = github_token
|
|
72
|
+
environ['GITHUB_TOKEN'] = github_token
|
|
73
|
+
|
|
74
|
+
raw_input = ''
|
|
75
|
+
for line in fileinput.input(files=filenames or ('-',), encoding="utf-8"):
|
|
76
|
+
raw_input += line
|
|
77
|
+
|
|
78
|
+
from .machine import machine
|
|
79
|
+
|
|
80
|
+
try:
|
|
81
|
+
thoughts, text = machine(raw_input, config)
|
|
82
|
+
output = raw_input + '\n\n' + new_plato_text(thoughts, text, config.name)
|
|
83
|
+
sys.stdout.write(output)
|
|
84
|
+
sys.stdout.flush()
|
|
85
|
+
except Exception as e:
|
|
86
|
+
if debug:
|
|
87
|
+
import traceback
|
|
88
|
+
traceback.print_exc()
|
|
89
|
+
else:
|
|
90
|
+
sys.stderr.write(f'Machine did not work {e}\n')
|
|
91
|
+
sys.stderr.flush()
|
|
92
|
+
sys.exit(1)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
if __name__ == '__main__':
|
|
96
|
+
run()
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Python
|
|
3
|
+
|
|
4
|
+
"""Copyright (c) Alexander Fedotov.
|
|
5
|
+
This source code is licensed under the license found in the
|
|
6
|
+
LICENSE file in the root directory of this source tree.
|
|
7
|
+
"""
|
|
8
|
+
from os import environ
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class Config:
|
|
14
|
+
github_token: str = field(default_factory=lambda: environ.get('GITHUB_TOKEN', ''))
|
|
15
|
+
github_name: str = field(default_factory=lambda: environ.get('GITHUB_NAME', ''))
|
|
16
|
+
github_email: str = field(default_factory=lambda: environ.get('GITHUB_EMAIL', ''))
|
|
17
|
+
provider_api_key: str = field(default_factory=lambda: environ.get('PROVIDER_API_KEY', ''))
|
|
18
|
+
provider: str = field(default_factory=lambda: environ.get('PROVIDER', ''))
|
|
19
|
+
machine_organization_name: str = field(default_factory=lambda: environ.get('MACHINE_ORGANIZATION_NAME', 'summarizing-machine'))
|
|
20
|
+
private_repo_with_text: str = field(default_factory=lambda: environ.get('PRIVATE_REPO_WITH_TEXT','summarizing_machine'))
|
|
21
|
+
system_prompt_file: str = field(default_factory=lambda: environ.get('SYSTEM_PROMPT_FILE', 'machina.yaml'))
|
|
22
|
+
name: str = ''
|
|
23
|
+
instructions: str = ''
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Python
|
|
3
|
+
|
|
4
|
+
"""Copyright (c) Alexander Fedotov.
|
|
5
|
+
This source code is licensed under the license found in the
|
|
6
|
+
LICENSE file in the root directory of this source tree.
|
|
7
|
+
"""
|
|
8
|
+
import sys
|
|
9
|
+
from os import path
|
|
10
|
+
import yaml
|
|
11
|
+
import urllib.request
|
|
12
|
+
import urllib.error
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def download_github_file(owner, repo, file_path, token):
|
|
16
|
+
"""
|
|
17
|
+
Downloads a file from a GitHub repository using the GitHub REST API.
|
|
18
|
+
We request the raw content by using the 'application/vnd.github.v3.raw' accept header.
|
|
19
|
+
"""
|
|
20
|
+
url = f"https://api.github.com/repos/{owner}/{repo}/contents/{file_path}"
|
|
21
|
+
|
|
22
|
+
headers = {
|
|
23
|
+
"Authorization": f"token {token}",
|
|
24
|
+
"Accept": "application/vnd.github.v3.raw",
|
|
25
|
+
"User-Agent": "Summarizing-Machine"
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
req = urllib.request.Request(url, headers=headers)
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
with urllib.request.urlopen(req, timeout=10) as response:
|
|
32
|
+
return response.read()
|
|
33
|
+
except urllib.error.HTTPError as e:
|
|
34
|
+
print(f"HTTP Error {e.code}: {e.reason}")
|
|
35
|
+
error_info = e.read().decode('utf-8')
|
|
36
|
+
print(f"Details: {error_info}")
|
|
37
|
+
return None
|
|
38
|
+
except urllib.error.URLError as e:
|
|
39
|
+
print(f"URL Error: {e.reason}")
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def fetch_instructions(config):
|
|
44
|
+
"""Retrieve the system prompt from a private GitHub repo.
|
|
45
|
+
Falls back to the local machina.yaml if GitHub is unreachable.
|
|
46
|
+
Returns the 'name' of the Machine in dashed format.
|
|
47
|
+
Returns the 'description' field from the YAML as the system prompt string.
|
|
48
|
+
"""
|
|
49
|
+
try:
|
|
50
|
+
raw_yaml = download_github_file(
|
|
51
|
+
owner=config.machine_organization_name,
|
|
52
|
+
repo=config.private_repo_with_text,
|
|
53
|
+
file_path=config.system_prompt_file,
|
|
54
|
+
token=config.github_token
|
|
55
|
+
)
|
|
56
|
+
except Exception as e:
|
|
57
|
+
print(f"Warning: could not fetch the instructions from GitHub: {e}",
|
|
58
|
+
file=sys.stderr)
|
|
59
|
+
local_path = path.join(path.dirname(__file__), 'machina.yaml')
|
|
60
|
+
with open(local_path, 'r') as f:
|
|
61
|
+
raw_yaml = f.read()
|
|
62
|
+
|
|
63
|
+
# Parse
|
|
64
|
+
parsed = yaml.safe_load(raw_yaml)
|
|
65
|
+
name = parsed.get('name')
|
|
66
|
+
config.name = name
|
|
67
|
+
instructions = parsed.get('description', 'You are a helpful assistant.')
|
|
68
|
+
config.instructions = instructions
|
|
69
|
+
return name, instructions
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
# Copyright (c) Alexander Fedotov, 2026. All rights reserved.
|
|
2
|
+
name: Summarizing-Machine
|
|
3
|
+
description: The Assistant is Summarizing-Machine. Summarizing-Machine does its best to understand
|
|
4
|
+
the conversation that it is participating in and answers in a most thoughtful way possible
|
|
5
|
+
the questions that it is being asked.
|
|
6
|
+
Summarizing-Machine responds in plain text without any markdown, emphasis or lists. All
|
|
7
|
+
paragraphs except the first should begin with a newline and tab.
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Python
|
|
3
|
+
|
|
4
|
+
"""Copyright (c) Alexander Fedotov.
|
|
5
|
+
This source code is licensed under the license found in the
|
|
6
|
+
LICENSE file in the root directory of this source tree.
|
|
7
|
+
"""
|
|
8
|
+
import sys
|
|
9
|
+
from os import environ, path
|
|
10
|
+
from .githf import fetch_instructions
|
|
11
|
+
from .utilities import (plato_text_to_muj,
|
|
12
|
+
plato_text_to_mpuj,
|
|
13
|
+
plato_text_to_cmj,
|
|
14
|
+
llm_soup_to_text)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def machine(plato_text, config, **kwargs):
|
|
18
|
+
"""Core agent logic.
|
|
19
|
+
|
|
20
|
+
1. Fetches the system prompt from a private GitHub repo.
|
|
21
|
+
2. Calls Provider
|
|
22
|
+
3. Returns a (thoughts, text) tuple.
|
|
23
|
+
"""
|
|
24
|
+
# Fetch the confidential system prompt, name is for a checkup.
|
|
25
|
+
name, system_prompt = fetch_instructions(config)
|
|
26
|
+
|
|
27
|
+
# Load an appropriate library and query the API.
|
|
28
|
+
provider = config.provider
|
|
29
|
+
api_key = config.provider_api_key
|
|
30
|
+
|
|
31
|
+
if provider == 'OpenAI':
|
|
32
|
+
# Transform plato_text to MUJ format
|
|
33
|
+
messages = plato_text_to_muj(plato_text=plato_text,
|
|
34
|
+
machine_name=name)
|
|
35
|
+
# Call OpenAI API via opehaina
|
|
36
|
+
environ['OPENAI_API_KEY'] = api_key
|
|
37
|
+
try:
|
|
38
|
+
from .providers import openai
|
|
39
|
+
except ImportError:
|
|
40
|
+
print("openai module is missing.", file=sys.stderr)
|
|
41
|
+
sys.exit(1)
|
|
42
|
+
|
|
43
|
+
thoughts, text = openai.respond(
|
|
44
|
+
messages=messages,
|
|
45
|
+
instructions=system_prompt,
|
|
46
|
+
**kwargs
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
thoughts = llm_soup_to_text(thoughts)
|
|
50
|
+
return thoughts, text
|
|
51
|
+
|
|
52
|
+
elif provider == 'Gemini':
|
|
53
|
+
# Transform plato_text to MPUJ format
|
|
54
|
+
messages = plato_text_to_mpuj(plato_text=plato_text,
|
|
55
|
+
machine_name=name)
|
|
56
|
+
# Call Gemini through castor-polux
|
|
57
|
+
environ['GEMINI_API_KEY'] = api_key
|
|
58
|
+
try:
|
|
59
|
+
from .providers import castor_pollux
|
|
60
|
+
except ImportError:
|
|
61
|
+
print("No module castor-pollux", file=sys.stderr)
|
|
62
|
+
sys.exit(1)
|
|
63
|
+
|
|
64
|
+
thoughts, text = castor_pollux.respond(
|
|
65
|
+
messages=messages,
|
|
66
|
+
instructions=system_prompt,
|
|
67
|
+
**kwargs
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
thoughts = llm_soup_to_text(thoughts)
|
|
71
|
+
return thoughts, text
|
|
72
|
+
|
|
73
|
+
elif provider == 'Anthropic':
|
|
74
|
+
# Transform plato_text to MUJ format
|
|
75
|
+
messages = plato_text_to_muj(plato_text=plato_text,
|
|
76
|
+
machine_name=name)
|
|
77
|
+
|
|
78
|
+
# Call the Anthropic API via electroid
|
|
79
|
+
environ['ANTHROPIC_API_KEY'] = api_key
|
|
80
|
+
try:
|
|
81
|
+
from .providers import electroid
|
|
82
|
+
except ImportError:
|
|
83
|
+
print("no electroid module", file=sys.stderr)
|
|
84
|
+
sys.exit(1)
|
|
85
|
+
|
|
86
|
+
text, thoughts = electroid.respond(
|
|
87
|
+
messages=messages,
|
|
88
|
+
instructions=system_prompt,
|
|
89
|
+
**kwargs
|
|
90
|
+
)
|
|
91
|
+
return text, thoughts
|
|
92
|
+
|
|
93
|
+
elif provider == 'Groq':
|
|
94
|
+
# Transform plato_text to MUJ format
|
|
95
|
+
messages = plato_text_to_muj(plato_text=plato_text,
|
|
96
|
+
machine_name=name)
|
|
97
|
+
# Call OpenAI API via opehaina
|
|
98
|
+
environ['GROQ_API_KEY'] = api_key
|
|
99
|
+
try:
|
|
100
|
+
from .providers import qrog
|
|
101
|
+
except ImportError:
|
|
102
|
+
print("openai module is missing.", file=sys.stderr)
|
|
103
|
+
sys.exit(1)
|
|
104
|
+
|
|
105
|
+
thoughts, text = qrog.respond(
|
|
106
|
+
messages=messages,
|
|
107
|
+
instructions=system_prompt,
|
|
108
|
+
**kwargs
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
thoughts = llm_soup_to_text(thoughts)
|
|
112
|
+
return thoughts, text
|
|
113
|
+
|
|
114
|
+
elif provider == 'Xai':
|
|
115
|
+
# Transform plato_text to MUJ format
|
|
116
|
+
messages = plato_text_to_muj(plato_text=plato_text,
|
|
117
|
+
machine_name=name)
|
|
118
|
+
# Call OpenAI API via opehaina
|
|
119
|
+
environ['XAI_API_KEY'] = api_key
|
|
120
|
+
try:
|
|
121
|
+
from .providers import strangelove
|
|
122
|
+
except ImportError:
|
|
123
|
+
print("openai module is missing.", file=sys.stderr)
|
|
124
|
+
sys.exit(1)
|
|
125
|
+
|
|
126
|
+
thoughts, text = strangelove.respond(
|
|
127
|
+
messages=messages,
|
|
128
|
+
instructions=system_prompt,
|
|
129
|
+
**kwargs
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
thoughts = llm_soup_to_text(thoughts)
|
|
133
|
+
return thoughts, text
|
|
134
|
+
|
|
135
|
+
elif provider == 'DepSek':
|
|
136
|
+
# Transform plato_text to CMJ format
|
|
137
|
+
messages = plato_text_to_cmj(plato_text=plato_text,
|
|
138
|
+
machine_name=name)
|
|
139
|
+
# Call OpenAI API via opehaina
|
|
140
|
+
environ['DEPSEK_API_KEY'] = api_key
|
|
141
|
+
try:
|
|
142
|
+
from .providers import depsek
|
|
143
|
+
except ImportError:
|
|
144
|
+
print("openai module is missing.", file=sys.stderr)
|
|
145
|
+
sys.exit(1)
|
|
146
|
+
|
|
147
|
+
thoughts, text = depsek.respond(
|
|
148
|
+
messages=messages,
|
|
149
|
+
instructions=system_prompt,
|
|
150
|
+
**kwargs
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
return thoughts, text
|
|
154
|
+
|
|
155
|
+
elif provider == 'Meta':
|
|
156
|
+
# Transform plato_text to CMJ format
|
|
157
|
+
messages = plato_text_to_cmj(plato_text=plato_text,
|
|
158
|
+
machine_name=name)
|
|
159
|
+
# Call OpenAI API via opehaina
|
|
160
|
+
environ['META_API_KEY'] = api_key
|
|
161
|
+
try:
|
|
162
|
+
from .providers import camelids
|
|
163
|
+
except ImportError:
|
|
164
|
+
print("openai module is missing.", file=sys.stderr)
|
|
165
|
+
sys.exit(1)
|
|
166
|
+
|
|
167
|
+
thoughts, text = camelids.respond(
|
|
168
|
+
messages=messages,
|
|
169
|
+
instructions=system_prompt,
|
|
170
|
+
**kwargs
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
return thoughts, text
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
if __name__ == '__main__':
|
|
177
|
+
print('You have launched main')
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Python
|
|
3
|
+
|
|
4
|
+
"""Copyright (c) Alexander Fedotov.
|
|
5
|
+
This source code is licensed under the license found in the
|
|
6
|
+
LICENSE file in the root directory of this source tree.
|
|
7
|
+
"""
|
|
8
|
+
import urllib.request
|
|
9
|
+
import urllib.error
|
|
10
|
+
import json
|
|
11
|
+
from os import environ
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def respond(messages=None, instructions=None, **kwargs):
|
|
15
|
+
"""A continuation of text with a given context and instruction.
|
|
16
|
+
kwargs:
|
|
17
|
+
temperature = 0 to 1.0
|
|
18
|
+
top_p = 0.0 to 1.0
|
|
19
|
+
top_k = The maximum number of tokens to consider when sampling.
|
|
20
|
+
n = 1 is mandatory for this method continuationS have n > 1
|
|
21
|
+
max_tokens = number of tokens
|
|
22
|
+
stop = ['stop'] array of up to 4 sequences
|
|
23
|
+
"""
|
|
24
|
+
api_key = environ.get('META_API_KEY', '') # meta_KEY', '')
|
|
25
|
+
api_base = environ.get('META_API_BASE', 'https://api.llama.com/v1')
|
|
26
|
+
content_model = environ.get('META_DEFAULT_CONTENT_MODEL', 'Llama-4-Maverick-17B-128E-Instruct-FP8')
|
|
27
|
+
|
|
28
|
+
instruction = kwargs.get('system_instruction', instructions)
|
|
29
|
+
first_message = [dict(role='system', content=instruction)] if instruction else []
|
|
30
|
+
|
|
31
|
+
# add contents and user text to the first (instruction) message
|
|
32
|
+
first_message.extend(messages)
|
|
33
|
+
instruction_and_contents = first_message
|
|
34
|
+
|
|
35
|
+
# Define the payload
|
|
36
|
+
payload = {
|
|
37
|
+
'model': kwargs.get('model', content_model),
|
|
38
|
+
'messages': instruction_and_contents,
|
|
39
|
+
'response_format': kwargs.get('response_format',{'type': 'text'}),
|
|
40
|
+
'temperature': kwargs.get('temperature', 1.0), # 0.0 to 1.0
|
|
41
|
+
'max_completion_tokens': kwargs.get('max_tokens', 4028),
|
|
42
|
+
'top_p': kwargs.get('top_p', 0.9),
|
|
43
|
+
'top_k': kwargs.get('top_k', 10),
|
|
44
|
+
'stream': False
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
# Convert data dictionary to JSON and encode it to bytes
|
|
48
|
+
data_bytes = json.dumps(payload).encode('utf-8')
|
|
49
|
+
|
|
50
|
+
# Set the mandatory headers
|
|
51
|
+
headers = {
|
|
52
|
+
"Content-Type": "application/json",
|
|
53
|
+
"Authorization": f"Bearer {api_key}",
|
|
54
|
+
"User-Agent": "Summarizing-Machine"
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# Create the Request object
|
|
58
|
+
req = urllib.request.Request(
|
|
59
|
+
f'{api_base}/chat/completions',
|
|
60
|
+
data=data_bytes,
|
|
61
|
+
headers=headers,
|
|
62
|
+
method="POST")
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
# Execute the request
|
|
66
|
+
with urllib.request.urlopen(req, timeout=300) as response:
|
|
67
|
+
response_data = response.read().decode('utf-8')
|
|
68
|
+
output = json.loads(response_data)
|
|
69
|
+
text = output['completion_message']['content']['text']
|
|
70
|
+
|
|
71
|
+
return '', text
|
|
72
|
+
|
|
73
|
+
except urllib.error.HTTPError as e:
|
|
74
|
+
# Handle HTTP errors (e.g., 401 Unauthorized, 400 Bad Request)
|
|
75
|
+
error_info = e.read().decode('utf-8', errors='ignore')
|
|
76
|
+
print(f"HTTP Error {e.code}: {e.reason}")
|
|
77
|
+
print(f"Error Details: {error_info}")
|
|
78
|
+
return '', ''
|
|
79
|
+
|
|
80
|
+
except urllib.error.URLError as e:
|
|
81
|
+
# Handle network/connection errors
|
|
82
|
+
print(f"Failed to reach the server: {e.reason}")
|
|
83
|
+
return '', ''
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
if __name__ == '__main__':
|
|
87
|
+
...
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Python
|
|
3
|
+
|
|
4
|
+
"""Copyright (c) Alexander Fedotov.
|
|
5
|
+
This source code is licensed under the license found in the
|
|
6
|
+
LICENSE file in the root directory of this source tree.
|
|
7
|
+
"""
|
|
8
|
+
import urllib.request
|
|
9
|
+
import urllib.error
|
|
10
|
+
import urllib.parse
|
|
11
|
+
import json
|
|
12
|
+
from os import environ
|
|
13
|
+
from ..utilities import messages_to_mpj
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def respond(messages=None, instructions=None, **kwargs):
|
|
17
|
+
"""
|
|
18
|
+
"""
|
|
19
|
+
api_key = environ.get('GEMINI_API_KEY', '')
|
|
20
|
+
api_base = environ.get('GEMINI_API_BASE', 'https://generativelanguage.googleapis.com/v1beta')
|
|
21
|
+
content_model = environ.get('GEMINI_DEFAULT_CONTENT_MODEL', 'gemma-4-31b-it')
|
|
22
|
+
|
|
23
|
+
garbage = [
|
|
24
|
+
{'category': 'HARM_CATEGORY_HATE_SPEECH', 'threshold': 'BLOCK_NONE'},
|
|
25
|
+
{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'BLOCK_NONE'},
|
|
26
|
+
{'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'threshold': 'BLOCK_NONE'},
|
|
27
|
+
{'category': 'HARM_CATEGORY_HARASSMENT', 'threshold': 'BLOCK_NONE'},
|
|
28
|
+
{'category': 'HARM_CATEGORY_CIVIC_INTEGRITY', 'threshold': 'BLOCK_NONE'}
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
instructions = kwargs.get('system_instruction', instructions)
|
|
32
|
+
system_instruction = dict(role='system', parts=[dict(text=instructions)]) if instructions else None
|
|
33
|
+
|
|
34
|
+
# Trickery for thinking models
|
|
35
|
+
thinking_config = None
|
|
36
|
+
model = kwargs.get("model", content_model)
|
|
37
|
+
if model.startswith('gemini-2.5'):
|
|
38
|
+
thinking_config = {
|
|
39
|
+
'includeThoughts': kwargs.get('include_thoughts', True),
|
|
40
|
+
'thinkingBudget': kwargs.get('thinking_budget', 10000)
|
|
41
|
+
}
|
|
42
|
+
elif model.startswith('gemini-3'):
|
|
43
|
+
thinking_config = {
|
|
44
|
+
'includeThoughts': kwargs.get('include_thoughts', True),
|
|
45
|
+
'thinkingLevel': kwargs.get('thinking_level', 'high')
|
|
46
|
+
}
|
|
47
|
+
elif model.startswith('gemma-4'):
|
|
48
|
+
thinking_config = {
|
|
49
|
+
'includeThoughts': kwargs.get('include_thoughts', True)
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Define the payload
|
|
53
|
+
payload = {
|
|
54
|
+
'systemInstruction': system_instruction,
|
|
55
|
+
'contents': messages,
|
|
56
|
+
'safetySettings': garbage,
|
|
57
|
+
'generationConfig': {
|
|
58
|
+
'stopSequences': kwargs.get('stop_sequences', ['STOP', 'Title']),
|
|
59
|
+
'responseMimeType': kwargs.get('mime_type', 'text/plain'),
|
|
60
|
+
'responseModalities': kwargs.get('modalities', ['TEXT']),
|
|
61
|
+
'temperature': kwargs.get('temperature', 1.0),
|
|
62
|
+
'maxOutputTokens': kwargs.get('max_tokens', 10000),
|
|
63
|
+
'topP': kwargs.get('top_p', 0.9),
|
|
64
|
+
'topK': kwargs.get('top_k', 10),
|
|
65
|
+
'enableEnhancedCivicAnswers': False,
|
|
66
|
+
},
|
|
67
|
+
}
|
|
68
|
+
if thinking_config:
|
|
69
|
+
payload['generationConfig']['thinkingConfig'] = thinking_config
|
|
70
|
+
if kwargs.get('sources'):
|
|
71
|
+
payload['tools'].append(
|
|
72
|
+
{
|
|
73
|
+
"url_context": {}
|
|
74
|
+
}
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Convert data dictionary to JSON and encode it to bytes
|
|
78
|
+
data_bytes = json.dumps(payload).encode('utf-8')
|
|
79
|
+
|
|
80
|
+
# Set the mandatory headers
|
|
81
|
+
headers = {
|
|
82
|
+
"Content-Type": "application/json",
|
|
83
|
+
"User-Agent": "Summarizing-Machine"
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
# urlencode parameter
|
|
87
|
+
params = urllib.parse.urlencode({'key': api_key})
|
|
88
|
+
|
|
89
|
+
# Create the Request object
|
|
90
|
+
req = urllib.request.Request(
|
|
91
|
+
f'{api_base}/models/{kwargs.get("model", content_model)}:generateContent?{params}',
|
|
92
|
+
data=data_bytes,
|
|
93
|
+
headers=headers,
|
|
94
|
+
method="POST")
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
# Execute the request
|
|
98
|
+
with urllib.request.urlopen(req, timeout=300) as response:
|
|
99
|
+
response_data = response.read().decode('utf-8')
|
|
100
|
+
output = json.loads(response_data)
|
|
101
|
+
text = ''
|
|
102
|
+
thoughts = ''
|
|
103
|
+
if output['candidates'][0]['finishReason'] == 'SAFETY':
|
|
104
|
+
raise Exception('Answer censored by Google.')
|
|
105
|
+
for part in output['candidates'][0]['content']['parts']:
|
|
106
|
+
if part.get('thought'):
|
|
107
|
+
thoughts += part['text']
|
|
108
|
+
else:
|
|
109
|
+
text += part['text']
|
|
110
|
+
|
|
111
|
+
return thoughts, text
|
|
112
|
+
|
|
113
|
+
except urllib.error.HTTPError as e:
|
|
114
|
+
# Handle HTTP errors (e.g., 401 Unauthorized, 400 Bad Request)
|
|
115
|
+
error_info = e.read().decode('utf-8', errors='ignore')
|
|
116
|
+
print(f"HTTP Error {e.code}: {e.reason}")
|
|
117
|
+
print(f"Error Details: {error_info}")
|
|
118
|
+
return '', ''
|
|
119
|
+
|
|
120
|
+
except urllib.error.URLError as e:
|
|
121
|
+
# Handle network/connection errors
|
|
122
|
+
print(f"Failed to reach the server: {e.reason}")
|
|
123
|
+
return '', ''
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
if __name__ == '__main__':
|
|
127
|
+
...
|