agent-framework-devui 1.0.0b251001__py3-none-any.whl → 1.0.0b251007__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agent-framework-devui might be problematic. Click here for more details.
- agent_framework_devui/_discovery.py +98 -0
- agent_framework_devui/_executor.py +88 -81
- agent_framework_devui/_mapper.py +37 -61
- agent_framework_devui/_server.py +62 -15
- agent_framework_devui/_session.py +3 -3
- agent_framework_devui/_utils.py +421 -0
- agent_framework_devui/models/_discovery_models.py +7 -0
- agent_framework_devui/ui/agentframework.svg +33 -0
- agent_framework_devui/ui/assets/index-D0SfShuZ.js +445 -0
- agent_framework_devui/ui/assets/index-WsCIE0bH.css +1 -0
- agent_framework_devui/ui/index.html +3 -3
- {agent_framework_devui-1.0.0b251001.dist-info → agent_framework_devui-1.0.0b251007.dist-info}/METADATA +3 -3
- agent_framework_devui-1.0.0b251007.dist-info/RECORD +22 -0
- agent_framework_devui/ui/assets/index-D1AmQWga.css +0 -1
- agent_framework_devui/ui/assets/index-DPEaaIdK.js +0 -435
- agent_framework_devui-1.0.0b251001.dist-info/RECORD +0 -20
- {agent_framework_devui-1.0.0b251001.dist-info → agent_framework_devui-1.0.0b251007.dist-info}/WHEEL +0 -0
- {agent_framework_devui-1.0.0b251001.dist-info → agent_framework_devui-1.0.0b251007.dist-info}/entry_points.txt +0 -0
- {agent_framework_devui-1.0.0b251001.dist-info → agent_framework_devui-1.0.0b251007.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,421 @@
|
|
|
1
|
+
# Copyright (c) Microsoft. All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""Utility functions for DevUI."""
|
|
4
|
+
|
|
5
|
+
import inspect
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
from dataclasses import fields, is_dataclass
|
|
9
|
+
from typing import Any, get_args, get_origin
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
# ============================================================================
|
|
14
|
+
# Type System Utilities
|
|
15
|
+
# ============================================================================
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def is_serialization_mixin(cls: type) -> bool:
|
|
19
|
+
"""Check if class is a SerializationMixin subclass.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
cls: Class to check
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
True if class is a SerializationMixin subclass
|
|
26
|
+
"""
|
|
27
|
+
try:
|
|
28
|
+
from agent_framework._serialization import SerializationMixin
|
|
29
|
+
|
|
30
|
+
return isinstance(cls, type) and issubclass(cls, SerializationMixin)
|
|
31
|
+
except ImportError:
|
|
32
|
+
return False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _type_to_schema(type_hint: Any, field_name: str) -> dict[str, Any]:
|
|
36
|
+
"""Convert a type hint to JSON schema.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
type_hint: Type hint to convert
|
|
40
|
+
field_name: Name of the field (for documentation)
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
JSON schema dict
|
|
44
|
+
"""
|
|
45
|
+
type_str = str(type_hint)
|
|
46
|
+
|
|
47
|
+
# Handle None/Optional
|
|
48
|
+
if type_hint is type(None):
|
|
49
|
+
return {"type": "null"}
|
|
50
|
+
|
|
51
|
+
# Handle basic types
|
|
52
|
+
if type_hint is str or "str" in type_str:
|
|
53
|
+
return {"type": "string"}
|
|
54
|
+
if type_hint is int or "int" in type_str:
|
|
55
|
+
return {"type": "integer"}
|
|
56
|
+
if type_hint is float or "float" in type_str:
|
|
57
|
+
return {"type": "number"}
|
|
58
|
+
if type_hint is bool or "bool" in type_str:
|
|
59
|
+
return {"type": "boolean"}
|
|
60
|
+
|
|
61
|
+
# Handle Literal types (for enum-like values)
|
|
62
|
+
if "Literal" in type_str:
|
|
63
|
+
origin = get_origin(type_hint)
|
|
64
|
+
if origin is not None:
|
|
65
|
+
args = get_args(type_hint)
|
|
66
|
+
if args:
|
|
67
|
+
return {"type": "string", "enum": list(args)}
|
|
68
|
+
|
|
69
|
+
# Handle Union/Optional
|
|
70
|
+
if "Union" in type_str or "Optional" in type_str:
|
|
71
|
+
origin = get_origin(type_hint)
|
|
72
|
+
if origin is not None:
|
|
73
|
+
args = get_args(type_hint)
|
|
74
|
+
# Filter out None type
|
|
75
|
+
non_none_args = [arg for arg in args if arg is not type(None)]
|
|
76
|
+
if len(non_none_args) == 1:
|
|
77
|
+
return _type_to_schema(non_none_args[0], field_name)
|
|
78
|
+
# Multiple types - pick first non-None
|
|
79
|
+
if non_none_args:
|
|
80
|
+
return _type_to_schema(non_none_args[0], field_name)
|
|
81
|
+
|
|
82
|
+
# Handle collections
|
|
83
|
+
if "list" in type_str or "List" in type_str or "Sequence" in type_str:
|
|
84
|
+
origin = get_origin(type_hint)
|
|
85
|
+
if origin is not None:
|
|
86
|
+
args = get_args(type_hint)
|
|
87
|
+
if args:
|
|
88
|
+
items_schema = _type_to_schema(args[0], field_name)
|
|
89
|
+
return {"type": "array", "items": items_schema}
|
|
90
|
+
return {"type": "array"}
|
|
91
|
+
|
|
92
|
+
if "dict" in type_str or "Dict" in type_str or "Mapping" in type_str:
|
|
93
|
+
return {"type": "object"}
|
|
94
|
+
|
|
95
|
+
# Default fallback
|
|
96
|
+
return {"type": "string", "description": f"Type: {type_hint}"}
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def generate_schema_from_serialization_mixin(cls: type[Any]) -> dict[str, Any]:
|
|
100
|
+
"""Generate JSON schema from SerializationMixin class.
|
|
101
|
+
|
|
102
|
+
Introspects the __init__ signature to extract parameter types and defaults.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
cls: SerializationMixin subclass
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
JSON schema dict
|
|
109
|
+
"""
|
|
110
|
+
sig = inspect.signature(cls)
|
|
111
|
+
|
|
112
|
+
# Get type hints
|
|
113
|
+
try:
|
|
114
|
+
from typing import get_type_hints
|
|
115
|
+
|
|
116
|
+
type_hints = get_type_hints(cls)
|
|
117
|
+
except Exception:
|
|
118
|
+
type_hints = {}
|
|
119
|
+
|
|
120
|
+
properties: dict[str, Any] = {}
|
|
121
|
+
required: list[str] = []
|
|
122
|
+
|
|
123
|
+
for param_name, param in sig.parameters.items():
|
|
124
|
+
if param_name in ("self", "kwargs"):
|
|
125
|
+
continue
|
|
126
|
+
|
|
127
|
+
# Get type annotation
|
|
128
|
+
param_type = type_hints.get(param_name, str)
|
|
129
|
+
|
|
130
|
+
# Generate schema for this parameter
|
|
131
|
+
param_schema = _type_to_schema(param_type, param_name)
|
|
132
|
+
properties[param_name] = param_schema
|
|
133
|
+
|
|
134
|
+
# Check if required (no default value, not VAR_KEYWORD)
|
|
135
|
+
if param.default == inspect.Parameter.empty and param.kind != inspect.Parameter.VAR_KEYWORD:
|
|
136
|
+
required.append(param_name)
|
|
137
|
+
|
|
138
|
+
schema: dict[str, Any] = {"type": "object", "properties": properties}
|
|
139
|
+
|
|
140
|
+
if required:
|
|
141
|
+
schema["required"] = required
|
|
142
|
+
|
|
143
|
+
return schema
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def generate_schema_from_dataclass(cls: type[Any]) -> dict[str, Any]:
|
|
147
|
+
"""Generate JSON schema from dataclass.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
cls: Dataclass type
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
JSON schema dict
|
|
154
|
+
"""
|
|
155
|
+
if not is_dataclass(cls):
|
|
156
|
+
return {"type": "object"}
|
|
157
|
+
|
|
158
|
+
properties: dict[str, Any] = {}
|
|
159
|
+
required: list[str] = []
|
|
160
|
+
|
|
161
|
+
for field in fields(cls):
|
|
162
|
+
# Generate schema for field type
|
|
163
|
+
field_schema = _type_to_schema(field.type, field.name)
|
|
164
|
+
properties[field.name] = field_schema
|
|
165
|
+
|
|
166
|
+
# Check if required (no default value)
|
|
167
|
+
if field.default == field.default_factory: # No default
|
|
168
|
+
required.append(field.name)
|
|
169
|
+
|
|
170
|
+
schema: dict[str, Any] = {"type": "object", "properties": properties}
|
|
171
|
+
|
|
172
|
+
if required:
|
|
173
|
+
schema["required"] = required
|
|
174
|
+
|
|
175
|
+
return schema
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def generate_input_schema(input_type: type) -> dict[str, Any]:
|
|
179
|
+
"""Generate JSON schema for workflow input type.
|
|
180
|
+
|
|
181
|
+
Supports multiple input types in priority order:
|
|
182
|
+
1. Built-in types (str, dict, int, etc.)
|
|
183
|
+
2. Pydantic models (via model_json_schema)
|
|
184
|
+
3. SerializationMixin classes (via __init__ introspection)
|
|
185
|
+
4. Dataclasses (via fields introspection)
|
|
186
|
+
5. Fallback to string
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
input_type: Input type to generate schema for
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
JSON schema dict
|
|
193
|
+
"""
|
|
194
|
+
# 1. Built-in types
|
|
195
|
+
if input_type is str:
|
|
196
|
+
return {"type": "string"}
|
|
197
|
+
if input_type is dict:
|
|
198
|
+
return {"type": "object"}
|
|
199
|
+
if input_type is int:
|
|
200
|
+
return {"type": "integer"}
|
|
201
|
+
if input_type is float:
|
|
202
|
+
return {"type": "number"}
|
|
203
|
+
if input_type is bool:
|
|
204
|
+
return {"type": "boolean"}
|
|
205
|
+
|
|
206
|
+
# 2. Pydantic models (legacy support)
|
|
207
|
+
if hasattr(input_type, "model_json_schema"):
|
|
208
|
+
return input_type.model_json_schema() # type: ignore
|
|
209
|
+
|
|
210
|
+
# 3. SerializationMixin classes (ChatMessage, etc.)
|
|
211
|
+
if is_serialization_mixin(input_type):
|
|
212
|
+
return generate_schema_from_serialization_mixin(input_type)
|
|
213
|
+
|
|
214
|
+
# 4. Dataclasses
|
|
215
|
+
if is_dataclass(input_type):
|
|
216
|
+
return generate_schema_from_dataclass(input_type)
|
|
217
|
+
|
|
218
|
+
# 5. Fallback to string
|
|
219
|
+
type_name = getattr(input_type, "__name__", str(input_type))
|
|
220
|
+
return {"type": "string", "description": f"Input type: {type_name}"}
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
# ============================================================================
|
|
224
|
+
# Input Parsing Utilities
|
|
225
|
+
# ============================================================================
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def parse_input_for_type(input_data: Any, target_type: type) -> Any:
|
|
229
|
+
"""Parse input data to match the target type.
|
|
230
|
+
|
|
231
|
+
Handles conversion from raw input (string, dict) to the expected type:
|
|
232
|
+
- Built-in types: direct conversion
|
|
233
|
+
- Pydantic models: use model_validate or model_validate_json
|
|
234
|
+
- SerializationMixin: use from_dict or construct from string
|
|
235
|
+
- Dataclasses: construct from dict
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
input_data: Raw input data (string, dict, or already correct type)
|
|
239
|
+
target_type: Expected type for the input
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
Parsed input matching target_type, or original input if parsing fails
|
|
243
|
+
"""
|
|
244
|
+
# If already correct type, return as-is
|
|
245
|
+
if isinstance(input_data, target_type):
|
|
246
|
+
return input_data
|
|
247
|
+
|
|
248
|
+
# Handle string input
|
|
249
|
+
if isinstance(input_data, str):
|
|
250
|
+
return _parse_string_input(input_data, target_type)
|
|
251
|
+
|
|
252
|
+
# Handle dict input
|
|
253
|
+
if isinstance(input_data, dict):
|
|
254
|
+
return _parse_dict_input(input_data, target_type)
|
|
255
|
+
|
|
256
|
+
# Fallback: return original
|
|
257
|
+
return input_data
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _parse_string_input(input_str: str, target_type: type) -> Any:
|
|
261
|
+
"""Parse string input to target type.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
input_str: Input string
|
|
265
|
+
target_type: Target type
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
Parsed input or original string
|
|
269
|
+
"""
|
|
270
|
+
# Built-in types
|
|
271
|
+
if target_type is str:
|
|
272
|
+
return input_str
|
|
273
|
+
if target_type is int:
|
|
274
|
+
try:
|
|
275
|
+
return int(input_str)
|
|
276
|
+
except ValueError:
|
|
277
|
+
return input_str
|
|
278
|
+
elif target_type is float:
|
|
279
|
+
try:
|
|
280
|
+
return float(input_str)
|
|
281
|
+
except ValueError:
|
|
282
|
+
return input_str
|
|
283
|
+
elif target_type is bool:
|
|
284
|
+
return input_str.lower() in ("true", "1", "yes")
|
|
285
|
+
|
|
286
|
+
# Pydantic models
|
|
287
|
+
if hasattr(target_type, "model_validate_json"):
|
|
288
|
+
try:
|
|
289
|
+
# Try parsing as JSON first
|
|
290
|
+
if input_str.strip().startswith("{"):
|
|
291
|
+
return target_type.model_validate_json(input_str) # type: ignore
|
|
292
|
+
|
|
293
|
+
# Try common field names with the string value
|
|
294
|
+
common_fields = ["text", "message", "content", "input", "data"]
|
|
295
|
+
for field in common_fields:
|
|
296
|
+
try:
|
|
297
|
+
return target_type(**{field: input_str}) # type: ignore
|
|
298
|
+
except Exception as e:
|
|
299
|
+
logger.debug(f"Failed to parse string input with field '{field}': {e}")
|
|
300
|
+
continue
|
|
301
|
+
except Exception as e:
|
|
302
|
+
logger.debug(f"Failed to parse string as Pydantic model: {e}")
|
|
303
|
+
|
|
304
|
+
# SerializationMixin (like ChatMessage)
|
|
305
|
+
if is_serialization_mixin(target_type):
|
|
306
|
+
try:
|
|
307
|
+
# Try parsing as JSON dict first
|
|
308
|
+
if input_str.strip().startswith("{"):
|
|
309
|
+
data = json.loads(input_str)
|
|
310
|
+
if hasattr(target_type, "from_dict"):
|
|
311
|
+
return target_type.from_dict(data) # type: ignore
|
|
312
|
+
return target_type(**data) # type: ignore
|
|
313
|
+
|
|
314
|
+
# For ChatMessage specifically: create from text
|
|
315
|
+
# Try common field patterns
|
|
316
|
+
common_fields = ["text", "message", "content"]
|
|
317
|
+
sig = inspect.signature(target_type)
|
|
318
|
+
params = list(sig.parameters.keys())
|
|
319
|
+
|
|
320
|
+
# If it has 'text' param, use it
|
|
321
|
+
if "text" in params:
|
|
322
|
+
try:
|
|
323
|
+
return target_type(role="user", text=input_str) # type: ignore
|
|
324
|
+
except Exception as e:
|
|
325
|
+
logger.debug(f"Failed to create SerializationMixin with text field: {e}")
|
|
326
|
+
|
|
327
|
+
# Try other common fields
|
|
328
|
+
for field in common_fields:
|
|
329
|
+
if field in params:
|
|
330
|
+
try:
|
|
331
|
+
return target_type(**{field: input_str}) # type: ignore
|
|
332
|
+
except Exception as e:
|
|
333
|
+
logger.debug(f"Failed to create SerializationMixin with field '{field}': {e}")
|
|
334
|
+
continue
|
|
335
|
+
except Exception as e:
|
|
336
|
+
logger.debug(f"Failed to parse string as SerializationMixin: {e}")
|
|
337
|
+
|
|
338
|
+
# Dataclasses
|
|
339
|
+
if is_dataclass(target_type):
|
|
340
|
+
try:
|
|
341
|
+
# Try parsing as JSON
|
|
342
|
+
if input_str.strip().startswith("{"):
|
|
343
|
+
data = json.loads(input_str)
|
|
344
|
+
return target_type(**data) # type: ignore
|
|
345
|
+
|
|
346
|
+
# Try common field names
|
|
347
|
+
common_fields = ["text", "message", "content", "input", "data"]
|
|
348
|
+
for field in common_fields:
|
|
349
|
+
try:
|
|
350
|
+
return target_type(**{field: input_str}) # type: ignore
|
|
351
|
+
except Exception as e:
|
|
352
|
+
logger.debug(f"Failed to create dataclass with field '{field}': {e}")
|
|
353
|
+
continue
|
|
354
|
+
except Exception as e:
|
|
355
|
+
logger.debug(f"Failed to parse string as dataclass: {e}")
|
|
356
|
+
|
|
357
|
+
# Fallback: return original string
|
|
358
|
+
return input_str
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def _parse_dict_input(input_dict: dict[str, Any], target_type: type) -> Any:
|
|
362
|
+
"""Parse dict input to target type.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
input_dict: Input dictionary
|
|
366
|
+
target_type: Target type
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
Parsed input or original dict
|
|
370
|
+
"""
|
|
371
|
+
# Handle primitive types - extract from common field names
|
|
372
|
+
if target_type in (str, int, float, bool):
|
|
373
|
+
try:
|
|
374
|
+
# If it's already the right type, return as-is
|
|
375
|
+
if isinstance(input_dict, target_type):
|
|
376
|
+
return input_dict
|
|
377
|
+
|
|
378
|
+
# Try "input" field first (common for workflow inputs)
|
|
379
|
+
if "input" in input_dict:
|
|
380
|
+
return target_type(input_dict["input"]) # type: ignore
|
|
381
|
+
|
|
382
|
+
# If single-key dict, extract the value
|
|
383
|
+
if len(input_dict) == 1:
|
|
384
|
+
value = next(iter(input_dict.values()))
|
|
385
|
+
return target_type(value) # type: ignore
|
|
386
|
+
|
|
387
|
+
# Otherwise, return as-is
|
|
388
|
+
return input_dict
|
|
389
|
+
except (ValueError, TypeError) as e:
|
|
390
|
+
logger.debug(f"Failed to convert dict to {target_type}: {e}")
|
|
391
|
+
return input_dict
|
|
392
|
+
|
|
393
|
+
# If target is dict, return as-is
|
|
394
|
+
if target_type is dict:
|
|
395
|
+
return input_dict
|
|
396
|
+
|
|
397
|
+
# Pydantic models
|
|
398
|
+
if hasattr(target_type, "model_validate"):
|
|
399
|
+
try:
|
|
400
|
+
return target_type.model_validate(input_dict) # type: ignore
|
|
401
|
+
except Exception as e:
|
|
402
|
+
logger.debug(f"Failed to validate dict as Pydantic model: {e}")
|
|
403
|
+
|
|
404
|
+
# SerializationMixin
|
|
405
|
+
if is_serialization_mixin(target_type):
|
|
406
|
+
try:
|
|
407
|
+
if hasattr(target_type, "from_dict"):
|
|
408
|
+
return target_type.from_dict(input_dict) # type: ignore
|
|
409
|
+
return target_type(**input_dict) # type: ignore
|
|
410
|
+
except Exception as e:
|
|
411
|
+
logger.debug(f"Failed to parse dict as SerializationMixin: {e}")
|
|
412
|
+
|
|
413
|
+
# Dataclasses
|
|
414
|
+
if is_dataclass(target_type):
|
|
415
|
+
try:
|
|
416
|
+
return target_type(**input_dict) # type: ignore
|
|
417
|
+
except Exception as e:
|
|
418
|
+
logger.debug(f"Failed to parse dict as dataclass: {e}")
|
|
419
|
+
|
|
420
|
+
# Fallback: return original dict
|
|
421
|
+
return input_dict
|
|
@@ -37,6 +37,13 @@ class EntityInfo(BaseModel):
|
|
|
37
37
|
# Environment variable requirements
|
|
38
38
|
required_env_vars: list[EnvVarRequirement] | None = None
|
|
39
39
|
|
|
40
|
+
# Agent-specific fields (optional, populated when available)
|
|
41
|
+
instructions: str | None = None
|
|
42
|
+
model_id: str | None = None
|
|
43
|
+
chat_client_type: str | None = None
|
|
44
|
+
context_providers: list[str] | None = None
|
|
45
|
+
middleware: list[str] | None = None
|
|
46
|
+
|
|
40
47
|
# Workflow-specific fields (populated only for detailed info requests)
|
|
41
48
|
executors: list[str] | None = None
|
|
42
49
|
workflow_dump: dict[str, Any] | None = None
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
<svg width="805" height="805" viewBox="0 0 805 805" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<g filter="url(#filter0_iii_510_1294)">
|
|
3
|
+
<path d="M402.488 119.713C439.197 119.713 468.955 149.472 468.955 186.18C468.955 192.086 471.708 197.849 476.915 200.635L546.702 237.977C555.862 242.879 566.95 240.96 576.092 236.023C585.476 230.955 596.218 228.078 607.632 228.078C644.341 228.078 674.098 257.836 674.099 294.545C674.099 316.95 663.013 336.765 646.028 348.806C637.861 354.595 631.412 363.24 631.412 373.251V430.818C631.412 440.83 637.861 449.475 646.028 455.264C663.013 467.305 674.099 487.121 674.099 509.526C674.099 546.235 644.341 575.994 607.632 575.994C598.598 575.994 589.985 574.191 582.133 570.926C573.644 567.397 563.91 566.393 555.804 570.731L469.581 616.867C469.193 617.074 468.955 617.479 468.955 617.919C468.955 654.628 439.197 684.386 402.488 684.386C365.779 684.386 336.021 654.628 336.021 617.919C336.021 616.802 335.423 615.765 334.439 615.238L249.895 570C241.61 565.567 231.646 566.713 223.034 570.472C214.898 574.024 205.914 575.994 196.47 575.994C159.761 575.994 130.002 546.235 130.002 509.526C130.002 486.66 141.549 466.49 159.13 454.531C167.604 448.766 174.349 439.975 174.349 429.726V372.538C174.349 362.289 167.604 353.498 159.13 347.734C141.549 335.774 130.002 315.604 130.002 292.738C130.002 256.029 159.761 226.271 196.47 226.271C208.223 226.271 219.263 229.322 228.843 234.674C238.065 239.827 249.351 241.894 258.666 236.91L328.655 199.459C333.448 196.895 336.021 191.616 336.021 186.18C336.021 149.471 365.779 119.713 402.488 119.713ZM475.716 394.444C471.337 396.787 468.955 401.586 468.955 406.552C468.955 429.68 457.142 450.048 439.221 461.954C430.571 467.7 423.653 476.574 423.653 486.959V537.511C423.653 547.896 430.746 556.851 439.379 562.622C449 569.053 461.434 572.052 471.637 566.592L527.264 536.826C536.887 531.677 541.164 520.44 541.164 509.526C541.164 485.968 553.42 465.272 571.904 453.468C580.846 447.757 588.054 438.749 588.054 428.139V371.427C588.054 363.494 582.671 356.676 575.716 352.862C569.342 349.366 561.663 348.454 555.253 351.884L475.716 394.444ZM247.992 349.841C241.997 346.633 234.806 347.465 228.873 350.785C222.524 354.337 217.706 360.639 217.706 367.915V429.162C217.706 439.537 224.611 448.404 233.248 454.152C251.144 466.062 262.937 486.417 262.937 509.526C262.937 519.654 267.026 529.991 275.955 534.769L334.852 566.284C344.582 571.49 356.362 568.81 365.528 562.667C373.735 557.166 380.296 548.643 380.296 538.764V486.305C380.296 476.067 373.564 467.282 365.103 461.516C347.548 449.552 336.021 429.398 336.021 406.552C336.021 400.967 333.389 395.536 328.465 392.902L247.992 349.841ZM270.019 280.008C265.421 282.469 262.936 287.522 262.937 292.738C262.937 293.308 262.929 293.876 262.915 294.443C262.615 306.354 266.961 318.871 277.466 324.492L334.017 354.751C344.13 360.163 356.442 357.269 366.027 350.969C376.495 344.088 389.024 340.085 402.488 340.085C416.203 340.085 428.947 344.239 439.532 351.357C449.163 357.834 461.63 360.861 471.864 355.385L526.625 326.083C537.106 320.474 541.458 307.999 541.182 296.115C541.17 295.593 541.164 295.069 541.164 294.545C541.164 288.551 538.376 282.696 533.091 279.868L463.562 242.664C454.384 237.753 443.274 239.688 434.123 244.65C424.716 249.75 413.941 252.647 402.488 252.647C390.83 252.647 379.873 249.646 370.348 244.373C361.148 239.281 349.917 237.256 340.646 242.217L270.019 280.008Z" fill="url(#paint0_linear_510_1294)"/>
|
|
4
|
+
</g>
|
|
5
|
+
<defs>
|
|
6
|
+
<filter id="filter0_iii_510_1294" x="103.759" y="93.4694" width="578.735" height="599.314" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
|
|
7
|
+
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
|
|
8
|
+
<feBlend mode="normal" in="SourceGraphic" in2="BackgroundImageFix" result="shape"/>
|
|
9
|
+
<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0" result="hardAlpha"/>
|
|
10
|
+
<feOffset dx="8.39647" dy="8.39647"/>
|
|
11
|
+
<feGaussianBlur stdDeviation="20.9912"/>
|
|
12
|
+
<feComposite in2="hardAlpha" operator="arithmetic" k2="-1" k3="1"/>
|
|
13
|
+
<feColorMatrix type="matrix" values="0 0 0 0 0.835294 0 0 0 0 0.623529 0 0 0 0 1 0 0 0 1 0"/>
|
|
14
|
+
<feBlend mode="normal" in2="shape" result="effect1_innerShadow_510_1294"/>
|
|
15
|
+
<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0" result="hardAlpha"/>
|
|
16
|
+
<feOffset dx="-26.2432" dy="-26.2432"/>
|
|
17
|
+
<feGaussianBlur stdDeviation="20.9912"/>
|
|
18
|
+
<feComposite in2="hardAlpha" operator="arithmetic" k2="-1" k3="1"/>
|
|
19
|
+
<feColorMatrix type="matrix" values="0 0 0 0 0.368627 0 0 0 0 0.262745 0 0 0 0 0.564706 0 0 0 0.3 0"/>
|
|
20
|
+
<feBlend mode="plus-darker" in2="effect1_innerShadow_510_1294" result="effect2_innerShadow_510_1294"/>
|
|
21
|
+
<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0" result="hardAlpha"/>
|
|
22
|
+
<feOffset dx="-26.2432" dy="-26.2432"/>
|
|
23
|
+
<feGaussianBlur stdDeviation="50"/>
|
|
24
|
+
<feComposite in2="hardAlpha" operator="arithmetic" k2="-1" k3="1"/>
|
|
25
|
+
<feColorMatrix type="matrix" values="0 0 0 0 0.368627 0 0 0 0 0.262745 0 0 0 0 0.564706 0 0 0 0.1 0"/>
|
|
26
|
+
<feBlend mode="plus-darker" in2="effect2_innerShadow_510_1294" result="effect3_innerShadow_510_1294"/>
|
|
27
|
+
</filter>
|
|
28
|
+
<linearGradient id="paint0_linear_510_1294" x1="255.628" y1="-34.3245" x2="618.483" y2="632.032" gradientUnits="userSpaceOnUse">
|
|
29
|
+
<stop stop-color="#D59FFF"/>
|
|
30
|
+
<stop offset="1" stop-color="#8562C5"/>
|
|
31
|
+
</linearGradient>
|
|
32
|
+
</defs>
|
|
33
|
+
</svg>
|