flowstate-sdk 0.1.12__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
flowstate_sdk/context.py CHANGED
@@ -7,3 +7,6 @@ run_stack: ContextVar[List[str]] = ContextVar("run_stack", default=[])
7
7
  parent_children_mappings: ContextVar[Dict[str, Set[str]]] = ContextVar(
8
8
  "parent_children_mappings", default=defaultdict(set)
9
9
  )
10
+ is_replay: ContextVar[bool] = ContextVar("is_replay", default=False)
11
+ replay_counter: ContextVar[int] = ContextVar("replay_counter", default=0)
12
+ replay_step_list: ContextVar[List[str]] = ContextVar("replay_step_list", default=[])
@@ -0,0 +1,15 @@
1
+ from flowstate_sdk.langchain.callback_handler import FlowstateCallbackHandler
2
+ from flowstate_sdk.langchain.chat_models import (
3
+ FlowstateChatAnthropic,
4
+ FlowstateChatClaude,
5
+ FlowstateChatGoogle,
6
+ FlowstateChatOpenAI,
7
+ )
8
+
9
+ __all__ = [
10
+ "FlowstateCallbackHandler",
11
+ "FlowstateChatOpenAI",
12
+ "FlowstateChatAnthropic",
13
+ "FlowstateChatClaude",
14
+ "FlowstateChatGoogle",
15
+ ]
@@ -1,481 +1,3 @@
1
- import os
2
- import time
3
- import uuid
4
- from typing import Any, Dict, Iterable, List, Optional, Tuple
1
+ from flowstate_sdk.langchain.telemetry import FlowstateCallbackHandler
5
2
 
6
- from flowstate_sdk import context
7
- from flowstate_sdk.cost_table import COST_TABLE
8
- from flowstate_sdk.enums import TaskTypes
9
- from flowstate_sdk.shared_dataclasses import ProviderMetrics
10
- from flowstate_sdk.task_context import TaskContext
11
- from langchain_core.callbacks import BaseCallbackHandler
12
- from langchain_core.outputs import LLMResult
13
- from langchain_core.messages.human import HumanMessage
14
- from langchain_core.messages.system import SystemMessage
15
-
16
-
17
- def _get_messages_from_kwargs(kwargs: Dict[str, Any]) -> Optional[List[Any]]:
18
- msgs = _unwrap_messages(kwargs.get("messages"))
19
- if msgs:
20
- return msgs
21
-
22
- inv = kwargs.get("invocation_params")
23
- if isinstance(inv, dict):
24
- msgs = _unwrap_messages(inv.get("messages"))
25
- if msgs:
26
- return msgs
27
-
28
- inputs = kwargs.get("inputs")
29
- if isinstance(inputs, dict):
30
- msgs = _unwrap_messages(inputs.get("messages"))
31
- if msgs:
32
- return msgs
33
-
34
- inp = kwargs.get("input")
35
- if isinstance(inp, dict):
36
- msgs = _unwrap_messages(inp.get("messages"))
37
- if msgs:
38
- return msgs
39
-
40
- return None
41
-
42
-
43
- def _unwrap_messages(messages: Any) -> Optional[List[Any]]:
44
- if messages is None:
45
- return None
46
- if isinstance(messages, (list, tuple)):
47
- return list(messages)
48
- if hasattr(messages, "messages"):
49
- try:
50
- inner = getattr(messages, "messages")
51
- except Exception:
52
- inner = None
53
- if isinstance(inner, (list, tuple)):
54
- return list(inner)
55
- return None
56
-
57
-
58
- def _normalize_messages(messages: Any) -> List[Any]:
59
- """
60
- LangChain may pass:
61
- - List[List[BaseMessage]] (batched)
62
- - List[BaseMessage]
63
- - List[dict] (OpenAI-style: {"role": "...", "content": ...})
64
- This normalizes to a single flat List[Any] of messages for the first batch item.
65
- """
66
- if not messages:
67
- return []
68
- if isinstance(messages, (list, tuple)):
69
- # If it's a batch: [[msg1, msg2, ...], [msg1, ...]]
70
- if messages and isinstance(messages[0], (list, tuple)):
71
- return list(messages[0])
72
- return list(messages)
73
- if hasattr(messages, "messages"):
74
- inner = _unwrap_messages(messages)
75
- if inner:
76
- if inner and isinstance(inner[0], (list, tuple)):
77
- return list(inner[0])
78
- return list(inner)
79
- return []
80
-
81
-
82
- def _content_to_text(content: Any) -> str:
83
- """
84
- Convert message content into text.
85
- Supports:
86
- - str
87
- - list parts (OpenAI multimodal / tool-like parts)
88
- - dict with "text"
89
- - anything else -> str(...)
90
- """
91
- if content is None:
92
- return ""
93
- if isinstance(content, str):
94
- return content
95
- if isinstance(content, list):
96
- out: List[str] = []
97
- for part in content:
98
- if isinstance(part, str):
99
- out.append(part)
100
- elif isinstance(part, dict):
101
- if isinstance(part.get("text"), str):
102
- out.append(part["text"])
103
- else:
104
- out.append(str(part))
105
- else:
106
- out.append(str(part))
107
- return "\n".join(out)
108
- if isinstance(content, dict):
109
- if isinstance(content.get("text"), str):
110
- return content["text"]
111
- return str(content)
112
- return str(content)
113
-
114
-
115
- def _msg_type_and_content(m: Any) -> Tuple[Optional[str], Any]:
116
- """
117
- Extract (type/role, content) from either:
118
- - LangChain BaseMessage objects (m.type, m.content)
119
- - OpenAI-style dicts ({'role': 'user'|'system'|'assistant', 'content': ...})
120
- Normalize role names to LangChain-like:
121
- user -> human
122
- assistant -> ai
123
- """
124
-
125
- if isinstance(m, SystemMessage):
126
- return "system", m.content
127
- if isinstance(m, HumanMessage):
128
- return "human", m.content
129
-
130
- # LangChain message objects
131
- if hasattr(m, "type") or hasattr(m, "content"):
132
- msg_type = getattr(m, "type", None)
133
- content = getattr(m, "content", None)
134
- role = getattr(m, "role", None) if hasattr(m, "role") else None
135
- if msg_type in (None, "chat") and role:
136
- msg_type = role
137
- if msg_type == "user":
138
- msg_type = "human"
139
- elif msg_type == "assistant":
140
- msg_type = "ai"
141
- return msg_type, content
142
-
143
- # Dict messages (OpenAI-style)
144
- if isinstance(m, dict):
145
- role = m.get("role") or m.get("type")
146
- content = m.get("content")
147
- if role == "user":
148
- role = "human"
149
- elif role == "assistant":
150
- role = "ai"
151
- return role, content
152
-
153
- return None, None
154
-
155
-
156
- def _extract_system_and_user_from_messages(
157
- msgs: Optional[List[Any]],
158
- ) -> Tuple[Optional[str], Optional[str]]:
159
- if not msgs:
160
- return None, None
161
-
162
- system_parts: List[str] = []
163
- user_parts: List[str] = []
164
-
165
- for m in msgs:
166
- m_type, content = _msg_type_and_content(m)
167
- text = _content_to_text(content).strip()
168
- if not text:
169
- continue
170
-
171
- if m_type == "system":
172
- system_parts.append(text)
173
- elif m_type == "human":
174
- user_parts.append(text)
175
-
176
- system_prompt = "\n\n".join(system_parts).strip() or None
177
- user_prompt = "\n\n".join(user_parts).strip() or None
178
- return system_prompt, user_prompt
179
-
180
-
181
- def _coerce_int(value: Any) -> Optional[int]:
182
- if value is None:
183
- return None
184
- try:
185
- if isinstance(value, bool):
186
- return int(value)
187
- if isinstance(value, (int, float)):
188
- return int(value)
189
- if isinstance(value, str) and value.strip() != "":
190
- return int(float(value))
191
- except Exception:
192
- return None
193
- return None
194
-
195
-
196
- def _iter_usage_dicts(candidates: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
197
- for data in candidates:
198
- if not isinstance(data, dict):
199
- continue
200
- yield data
201
- for key in ("token_usage", "usage", "usage_metadata"):
202
- nested = data.get(key)
203
- if isinstance(nested, dict):
204
- yield nested
205
-
206
-
207
- def _extract_token_usage(candidates: Iterable[Dict[str, Any]]) -> Tuple[Optional[int], Optional[int]]:
208
- input_tokens: Optional[int] = None
209
- output_tokens: Optional[int] = None
210
- for usage in _iter_usage_dicts(candidates):
211
- if input_tokens is None:
212
- input_tokens = _coerce_int(usage.get("input_tokens"))
213
- if input_tokens is None:
214
- input_tokens = _coerce_int(usage.get("prompt_tokens"))
215
- if output_tokens is None:
216
- output_tokens = _coerce_int(usage.get("output_tokens"))
217
- if output_tokens is None:
218
- output_tokens = _coerce_int(usage.get("completion_tokens"))
219
- if input_tokens is not None and output_tokens is not None:
220
- break
221
- return input_tokens, output_tokens
222
-
223
-
224
- def _normalize_model_name(model: Optional[str]) -> Optional[str]:
225
- if not model or not isinstance(model, str):
226
- return model
227
- model = model.strip()
228
- if model.startswith("gpt") and not model.startswith("gpt-"):
229
- if len(model) > 3 and model[3].isdigit():
230
- model = "gpt-" + model[3:]
231
- return model
232
-
233
-
234
- def _is_tool_call_message(message: Any, gen_info: Optional[Dict[str, Any]]) -> bool:
235
- if isinstance(gen_info, dict):
236
- finish_reason = gen_info.get("finish_reason")
237
- if finish_reason in ("tool_calls", "function_call"):
238
- return True
239
-
240
- if message is None:
241
- return False
242
-
243
- tool_calls = getattr(message, "tool_calls", None)
244
- if tool_calls:
245
- return True
246
-
247
- additional_kwargs = getattr(message, "additional_kwargs", None)
248
- if isinstance(additional_kwargs, dict):
249
- if additional_kwargs.get("tool_calls") or additional_kwargs.get("function_call"):
250
- return True
251
-
252
- return False
253
-
254
-
255
- def _estimate_token_count(text: Optional[str], model: Optional[str]) -> Optional[int]:
256
- if not text:
257
- return 0
258
- try:
259
- import tiktoken # type: ignore
260
- except Exception:
261
- tiktoken = None
262
-
263
- if tiktoken is not None:
264
- try:
265
- try:
266
- enc = tiktoken.encoding_for_model(model or "")
267
- except Exception:
268
- enc = tiktoken.get_encoding("cl100k_base")
269
- return len(enc.encode(text))
270
- except Exception:
271
- return None
272
-
273
- if os.getenv("FLOWSTATE_APPROX_TOKENS"):
274
- approx = (len(text) + 3) // 4
275
- return approx if approx > 0 else 0
276
-
277
- return None
278
-
279
-
280
- class FlowstateCallbackHandler(BaseCallbackHandler):
281
- def __init__(self, provider: str, model: str) -> None:
282
- self.provider = provider
283
- self.model = model
284
- self._start_ts: Optional[float] = None
285
- self._input_chars: Optional[int] = None
286
- self._input_str: Optional[str] = None
287
- self._tool_name: Optional[str] = None
288
- self._system_prompt: Optional[str] = None
289
- self._user_prompt: Optional[str] = None
290
-
291
- def _get_active_task(self) -> TaskContext:
292
- run_stack: List[TaskContext] = context.run_stack.get()
293
- if not run_stack:
294
- raise RuntimeError(
295
- "No active TaskContext. Wrap your step with @sdk_task or run.step()."
296
- )
297
- return run_stack[-1]
298
-
299
- # ---- Chat model callbacks (preferred for ChatOpenAI, etc.) ----
300
-
301
- def on_chat_model_start(
302
- self, serialized: Dict[str, Any], messages: List[List[Any]], **kwargs: Any
303
- ) -> None:
304
- self._start_ts = time.time()
305
- self._tool_name = serialized.get("name")
306
-
307
- msgs = _normalize_messages(messages)
308
- if not msgs:
309
- msgs = _get_messages_from_kwargs(kwargs) or []
310
-
311
- self._system_prompt, self._user_prompt = _extract_system_and_user_from_messages(
312
- msgs
313
- )
314
-
315
- # Build a raw input string by concatenating message contents.
316
- input_parts: List[str] = []
317
- for m in msgs:
318
- _, content = _msg_type_and_content(m)
319
- text = _content_to_text(content).strip()
320
- if text:
321
- input_parts.append(text)
322
-
323
- self._input_str = "\n\n".join(input_parts).strip() or None
324
- self._input_chars = len(self._input_str) if self._input_str else None
325
-
326
- def on_chat_model_end(self, response: LLMResult, **kwargs: Any) -> None:
327
- self._log_from_llm_result(response)
328
-
329
- # ---- LLM callbacks (for legacy/non-chat or alternate code paths) ----
330
-
331
- def on_llm_start(
332
- self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
333
- ) -> None:
334
- self._start_ts = time.time()
335
- self._tool_name = serialized.get("name")
336
-
337
- self._input_str = prompts[0] if prompts else None
338
- self._input_chars = len(self._input_str) if self._input_str else None
339
-
340
- msgs = _get_messages_from_kwargs(kwargs)
341
- self._system_prompt, self._user_prompt = _extract_system_and_user_from_messages(
342
- msgs
343
- )
344
-
345
- def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
346
- self._log_from_llm_result(response)
347
-
348
- # ---- Shared logging logic ----
349
-
350
- def _log_from_llm_result(self, response: LLMResult) -> None:
351
- latency = (time.time() - self._start_ts) if self._start_ts else None
352
-
353
- generations = response.generations
354
- generation_chunk = None
355
- if len(generations) > 0 and len(generations[0]) > 0:
356
- generation_chunk = generations[0][0]
357
- if not generation_chunk:
358
- return
359
-
360
- gen_info = generation_chunk.generation_info or {}
361
- finish_reason = gen_info.get("finish_reason")
362
- ai_message_chunk = getattr(generation_chunk, "message", None)
363
-
364
- if finish_reason and finish_reason != "stop":
365
- return
366
- if finish_reason is None and _is_tool_call_message(ai_message_chunk, gen_info):
367
- return
368
-
369
- llm_output = getattr(response, "llm_output", None)
370
- resolved_model = self.model
371
- if isinstance(llm_output, dict):
372
- for key in ("model_name", "model", "model_id"):
373
- val = llm_output.get(key)
374
- if isinstance(val, str) and val.strip():
375
- resolved_model = val
376
- break
377
- resolved_model = _normalize_model_name(resolved_model)
378
-
379
- # Extract output text robustly (chat message content or plain .text)
380
- output_text = ""
381
- if response.generations and response.generations[0]:
382
- generation = response.generations[0][0]
383
- if getattr(generation, "message", None) is not None:
384
- output_text = _content_to_text(generation.message.content)
385
- else:
386
- output_text = generation.text or ""
387
-
388
- usage_candidates: List[Dict[str, Any]] = []
389
- gen_response_metadata = getattr(generation_chunk, "response_metadata", None)
390
- if isinstance(gen_response_metadata, dict):
391
- usage_candidates.append(gen_response_metadata)
392
- gen_additional_kwargs = getattr(generation_chunk, "additional_kwargs", None)
393
- if isinstance(gen_additional_kwargs, dict):
394
- usage_candidates.append(gen_additional_kwargs)
395
- if ai_message_chunk is not None:
396
- usage_metadata = getattr(ai_message_chunk, "usage_metadata", None)
397
- if isinstance(usage_metadata, dict):
398
- usage_candidates.append(usage_metadata)
399
- response_metadata = getattr(ai_message_chunk, "response_metadata", None)
400
- if isinstance(response_metadata, dict):
401
- usage_candidates.append(response_metadata)
402
- additional_kwargs = getattr(ai_message_chunk, "additional_kwargs", None)
403
- if isinstance(additional_kwargs, dict):
404
- usage_candidates.append(additional_kwargs)
405
- if isinstance(gen_info, dict) and gen_info:
406
- usage_candidates.append(gen_info)
407
- if isinstance(llm_output, dict):
408
- usage_candidates.append(llm_output)
409
-
410
- input_tokens, output_tokens = _extract_token_usage(usage_candidates)
411
- if input_tokens is None:
412
- input_tokens = _estimate_token_count(self._input_str, resolved_model)
413
- if output_tokens is None:
414
- output_tokens = _estimate_token_count(output_text, resolved_model)
415
-
416
- if os.getenv("FLOWSTATE_DEBUG_LLM_USAGE"):
417
- try:
418
- import json
419
-
420
- debug_payload = {
421
- "finish_reason": finish_reason,
422
- "usage_candidates": usage_candidates,
423
- "llm_output": llm_output,
424
- "resolved_model": resolved_model,
425
- "input_tokens": input_tokens,
426
- "output_tokens": output_tokens,
427
- }
428
- print(
429
- "FlowstateCallbackHandler.usage_debug =>",
430
- json.dumps(debug_payload, default=str)[:4000],
431
- )
432
- except Exception:
433
- print("FlowstateCallbackHandler.usage_debug => (failed to serialize)")
434
-
435
- input_cost_per_token_usd = (
436
- COST_TABLE.get(self.provider, {}).get(resolved_model, {}).get("input", 0.0)
437
- )
438
- output_cost_per_token_usd = (
439
- COST_TABLE.get(self.provider, {}).get(resolved_model, {}).get("output", 0.0)
440
- )
441
- cost_usd = None
442
- if input_tokens is not None or output_tokens is not None:
443
- input_token_count = input_tokens or 0
444
- output_token_count = output_tokens or 0
445
- cost_value = (
446
- input_cost_per_token_usd * input_token_count
447
- + output_cost_per_token_usd * output_token_count
448
- )
449
- if cost_value != 0.0:
450
- cost_usd = cost_value
451
-
452
- provider_metrics = ProviderMetrics(
453
- run_id=context.current_run.get(),
454
- provider=self.provider,
455
- model=resolved_model,
456
- input_chars=self._input_chars,
457
- output_chars=len(output_text) if output_text else None,
458
- latency_sec=latency,
459
- input_tokens=input_tokens,
460
- output_tokens=output_tokens,
461
- tool_name=self._tool_name,
462
- cost_usd=cost_usd,
463
- raw_input=self._input_str,
464
- system_prompt=self._system_prompt,
465
- user_prompt=self._user_prompt,
466
- raw_response=response,
467
- )
468
-
469
- task_ctx = self._get_active_task()
470
- llm_ctx = TaskContext(
471
- task_step_id=str(uuid.uuid4()),
472
- client=task_ctx.client,
473
- func_name=f"{task_ctx.func_name}.llm",
474
- type=TaskTypes.LLM,
475
- metadata={"provider": self.provider, "model": resolved_model},
476
- )
477
- llm_ctx.__enter__()
478
- try:
479
- llm_ctx.log_llm_usage(provider_metrics)
480
- finally:
481
- llm_ctx.__exit__(None, None, None)
3
+ __all__ = ["FlowstateCallbackHandler"]
@@ -0,0 +1,91 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from flowstate_sdk.langchain.flowstate_chat_base import FlowstateChatModelMixin
6
+
7
+ try:
8
+ from langchain_openai import ChatOpenAI
9
+ except Exception: # pragma: no cover - optional dependency
10
+ ChatOpenAI = None
11
+
12
+ try:
13
+ from langchain_anthropic import ChatAnthropic
14
+ except Exception: # pragma: no cover - optional dependency
15
+ ChatAnthropic = None
16
+
17
+ try:
18
+ from langchain_google_genai import ChatGoogleGenerativeAI
19
+ except Exception: # pragma: no cover - optional dependency
20
+ ChatGoogleGenerativeAI = None
21
+
22
+
23
+ def _missing_dependency(name: str, package: str) -> None:
24
+ raise ImportError(
25
+ f"{name} requires optional dependency '{package}'. "
26
+ f"Install it to use this Flowstate wrapper."
27
+ )
28
+
29
+
30
+ if ChatOpenAI is not None:
31
+
32
+ class FlowstateChatOpenAI(FlowstateChatModelMixin, ChatOpenAI):
33
+ provider = "openai"
34
+
35
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
36
+ kwargs = self._flowstate_init_kwargs(kwargs)
37
+ super().__init__(*args, **kwargs)
38
+
39
+ else:
40
+
41
+ class FlowstateChatOpenAI(FlowstateChatModelMixin): # type: ignore[misc]
42
+ provider = "openai"
43
+
44
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
45
+ _missing_dependency("FlowstateChatOpenAI", "langchain-openai")
46
+
47
+
48
+ if ChatAnthropic is not None:
49
+
50
+ class FlowstateChatAnthropic(FlowstateChatModelMixin, ChatAnthropic):
51
+ provider = "anthropic"
52
+
53
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
54
+ kwargs = self._flowstate_init_kwargs(kwargs)
55
+ super().__init__(*args, **kwargs)
56
+
57
+ else:
58
+
59
+ class FlowstateChatAnthropic(FlowstateChatModelMixin): # type: ignore[misc]
60
+ provider = "anthropic"
61
+
62
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
63
+ _missing_dependency("FlowstateChatAnthropic", "langchain-anthropic")
64
+
65
+
66
+ if ChatGoogleGenerativeAI is not None:
67
+
68
+ class FlowstateChatGoogle(FlowstateChatModelMixin, ChatGoogleGenerativeAI):
69
+ provider = "google"
70
+
71
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
72
+ kwargs = self._flowstate_init_kwargs(kwargs)
73
+ super().__init__(*args, **kwargs)
74
+
75
+ else:
76
+
77
+ class FlowstateChatGoogle(FlowstateChatModelMixin): # type: ignore[misc]
78
+ provider = "google"
79
+
80
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
81
+ _missing_dependency("FlowstateChatGoogle", "langchain-google-genai")
82
+
83
+
84
+ FlowstateChatClaude = FlowstateChatAnthropic
85
+
86
+ __all__ = [
87
+ "FlowstateChatOpenAI",
88
+ "FlowstateChatAnthropic",
89
+ "FlowstateChatClaude",
90
+ "FlowstateChatGoogle",
91
+ ]
@@ -0,0 +1,51 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, Iterable, List, Optional
4
+
5
+ from flowstate_sdk.langchain.telemetry import FlowstateCallbackHandler
6
+
7
+
8
+ def _extract_model_name(kwargs: Dict[str, Any]) -> Optional[str]:
9
+ for key in ("model", "model_name", "model_id"):
10
+ val = kwargs.get(key)
11
+ if isinstance(val, str) and val.strip():
12
+ return val
13
+ return None
14
+
15
+
16
+ def _normalize_callbacks(callbacks: Any) -> List[Any]:
17
+ if callbacks is None:
18
+ return []
19
+ if isinstance(callbacks, list):
20
+ return list(callbacks)
21
+ if isinstance(callbacks, tuple):
22
+ return list(callbacks)
23
+ return [callbacks]
24
+
25
+
26
+ def _has_flowstate_callback(callbacks: Iterable[Any]) -> bool:
27
+ for cb in callbacks:
28
+ if isinstance(cb, FlowstateCallbackHandler):
29
+ return True
30
+ return False
31
+
32
+
33
+ def _merge_callbacks(existing: Any, additional: List[Any]) -> List[Any]:
34
+ existing_list = _normalize_callbacks(existing)
35
+ if _has_flowstate_callback(existing_list):
36
+ return existing_list
37
+ return existing_list + list(additional)
38
+
39
+
40
+ class FlowstateChatModelMixin:
41
+ provider: str = ""
42
+
43
+ @classmethod
44
+ def _flowstate_build_callbacks(cls, kwargs: Dict[str, Any]) -> List[Any]:
45
+ model = _extract_model_name(kwargs) or "unknown"
46
+ handler = FlowstateCallbackHandler(cls.provider, model)
47
+ return _merge_callbacks(kwargs.pop("callbacks", None), [handler])
48
+
49
+ def _flowstate_init_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
50
+ kwargs["callbacks"] = self._flowstate_build_callbacks(kwargs)
51
+ return kwargs
@@ -0,0 +1,481 @@
1
+ import os
2
+ import time
3
+ import uuid
4
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
5
+
6
+ from flowstate_sdk import context
7
+ from flowstate_sdk.cost_table import COST_TABLE
8
+ from flowstate_sdk.enums import TaskTypes
9
+ from flowstate_sdk.shared_dataclasses import ProviderMetrics
10
+ from flowstate_sdk.task_context import TaskContext
11
+ from langchain_core.callbacks import BaseCallbackHandler
12
+ from langchain_core.outputs import LLMResult
13
+ from langchain_core.messages.human import HumanMessage
14
+ from langchain_core.messages.system import SystemMessage
15
+
16
+
17
+ def _get_messages_from_kwargs(kwargs: Dict[str, Any]) -> Optional[List[Any]]:
18
+ msgs = _unwrap_messages(kwargs.get("messages"))
19
+ if msgs:
20
+ return msgs
21
+
22
+ inv = kwargs.get("invocation_params")
23
+ if isinstance(inv, dict):
24
+ msgs = _unwrap_messages(inv.get("messages"))
25
+ if msgs:
26
+ return msgs
27
+
28
+ inputs = kwargs.get("inputs")
29
+ if isinstance(inputs, dict):
30
+ msgs = _unwrap_messages(inputs.get("messages"))
31
+ if msgs:
32
+ return msgs
33
+
34
+ inp = kwargs.get("input")
35
+ if isinstance(inp, dict):
36
+ msgs = _unwrap_messages(inp.get("messages"))
37
+ if msgs:
38
+ return msgs
39
+
40
+ return None
41
+
42
+
43
+ def _unwrap_messages(messages: Any) -> Optional[List[Any]]:
44
+ if messages is None:
45
+ return None
46
+ if isinstance(messages, (list, tuple)):
47
+ return list(messages)
48
+ if hasattr(messages, "messages"):
49
+ try:
50
+ inner = getattr(messages, "messages")
51
+ except Exception:
52
+ inner = None
53
+ if isinstance(inner, (list, tuple)):
54
+ return list(inner)
55
+ return None
56
+
57
+
58
+ def _normalize_messages(messages: Any) -> List[Any]:
59
+ """
60
+ LangChain may pass:
61
+ - List[List[BaseMessage]] (batched)
62
+ - List[BaseMessage]
63
+ - List[dict] (OpenAI-style: {"role": "...", "content": ...})
64
+ This normalizes to a single flat List[Any] of messages for the first batch item.
65
+ """
66
+ if not messages:
67
+ return []
68
+ if isinstance(messages, (list, tuple)):
69
+ # If it's a batch: [[msg1, msg2, ...], [msg1, ...]]
70
+ if messages and isinstance(messages[0], (list, tuple)):
71
+ return list(messages[0])
72
+ return list(messages)
73
+ if hasattr(messages, "messages"):
74
+ inner = _unwrap_messages(messages)
75
+ if inner:
76
+ if inner and isinstance(inner[0], (list, tuple)):
77
+ return list(inner[0])
78
+ return list(inner)
79
+ return []
80
+
81
+
82
+ def _content_to_text(content: Any) -> str:
83
+ """
84
+ Convert message content into text.
85
+ Supports:
86
+ - str
87
+ - list parts (OpenAI multimodal / tool-like parts)
88
+ - dict with "text"
89
+ - anything else -> str(...)
90
+ """
91
+ if content is None:
92
+ return ""
93
+ if isinstance(content, str):
94
+ return content
95
+ if isinstance(content, list):
96
+ out: List[str] = []
97
+ for part in content:
98
+ if isinstance(part, str):
99
+ out.append(part)
100
+ elif isinstance(part, dict):
101
+ if isinstance(part.get("text"), str):
102
+ out.append(part["text"])
103
+ else:
104
+ out.append(str(part))
105
+ else:
106
+ out.append(str(part))
107
+ return "\n".join(out)
108
+ if isinstance(content, dict):
109
+ if isinstance(content.get("text"), str):
110
+ return content["text"]
111
+ return str(content)
112
+ return str(content)
113
+
114
+
115
+ def _msg_type_and_content(m: Any) -> Tuple[Optional[str], Any]:
116
+ """
117
+ Extract (type/role, content) from either:
118
+ - LangChain BaseMessage objects (m.type, m.content)
119
+ - OpenAI-style dicts ({'role': 'user'|'system'|'assistant', 'content': ...})
120
+ Normalize role names to LangChain-like:
121
+ user -> human
122
+ assistant -> ai
123
+ """
124
+
125
+ if isinstance(m, SystemMessage):
126
+ return "system", m.content
127
+ if isinstance(m, HumanMessage):
128
+ return "human", m.content
129
+
130
+ # LangChain message objects
131
+ if hasattr(m, "type") or hasattr(m, "content"):
132
+ msg_type = getattr(m, "type", None)
133
+ content = getattr(m, "content", None)
134
+ role = getattr(m, "role", None) if hasattr(m, "role") else None
135
+ if msg_type in (None, "chat") and role:
136
+ msg_type = role
137
+ if msg_type == "user":
138
+ msg_type = "human"
139
+ elif msg_type == "assistant":
140
+ msg_type = "ai"
141
+ return msg_type, content
142
+
143
+ # Dict messages (OpenAI-style)
144
+ if isinstance(m, dict):
145
+ role = m.get("role") or m.get("type")
146
+ content = m.get("content")
147
+ if role == "user":
148
+ role = "human"
149
+ elif role == "assistant":
150
+ role = "ai"
151
+ return role, content
152
+
153
+ return None, None
154
+
155
+
156
+ def _extract_system_and_user_from_messages(
157
+ msgs: Optional[List[Any]],
158
+ ) -> Tuple[Optional[str], Optional[str]]:
159
+ if not msgs:
160
+ return None, None
161
+
162
+ system_parts: List[str] = []
163
+ user_parts: List[str] = []
164
+
165
+ for m in msgs:
166
+ m_type, content = _msg_type_and_content(m)
167
+ text = _content_to_text(content).strip()
168
+ if not text:
169
+ continue
170
+
171
+ if m_type == "system":
172
+ system_parts.append(text)
173
+ elif m_type == "human":
174
+ user_parts.append(text)
175
+
176
+ system_prompt = "\n\n".join(system_parts).strip() or None
177
+ user_prompt = "\n\n".join(user_parts).strip() or None
178
+ return system_prompt, user_prompt
179
+
180
+
181
+ def _coerce_int(value: Any) -> Optional[int]:
182
+ if value is None:
183
+ return None
184
+ try:
185
+ if isinstance(value, bool):
186
+ return int(value)
187
+ if isinstance(value, (int, float)):
188
+ return int(value)
189
+ if isinstance(value, str) and value.strip() != "":
190
+ return int(float(value))
191
+ except Exception:
192
+ return None
193
+ return None
194
+
195
+
196
+ def _iter_usage_dicts(candidates: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
197
+ for data in candidates:
198
+ if not isinstance(data, dict):
199
+ continue
200
+ yield data
201
+ for key in ("token_usage", "usage", "usage_metadata"):
202
+ nested = data.get(key)
203
+ if isinstance(nested, dict):
204
+ yield nested
205
+
206
+
207
+ def _extract_token_usage(candidates: Iterable[Dict[str, Any]]) -> Tuple[Optional[int], Optional[int]]:
208
+ input_tokens: Optional[int] = None
209
+ output_tokens: Optional[int] = None
210
+ for usage in _iter_usage_dicts(candidates):
211
+ if input_tokens is None:
212
+ input_tokens = _coerce_int(usage.get("input_tokens"))
213
+ if input_tokens is None:
214
+ input_tokens = _coerce_int(usage.get("prompt_tokens"))
215
+ if output_tokens is None:
216
+ output_tokens = _coerce_int(usage.get("output_tokens"))
217
+ if output_tokens is None:
218
+ output_tokens = _coerce_int(usage.get("completion_tokens"))
219
+ if input_tokens is not None and output_tokens is not None:
220
+ break
221
+ return input_tokens, output_tokens
222
+
223
+
224
+ def _normalize_model_name(model: Optional[str]) -> Optional[str]:
225
+ if not model or not isinstance(model, str):
226
+ return model
227
+ model = model.strip()
228
+ if model.startswith("gpt") and not model.startswith("gpt-"):
229
+ if len(model) > 3 and model[3].isdigit():
230
+ model = "gpt-" + model[3:]
231
+ return model
232
+
233
+
234
+ def _is_tool_call_message(message: Any, gen_info: Optional[Dict[str, Any]]) -> bool:
235
+ if isinstance(gen_info, dict):
236
+ finish_reason = gen_info.get("finish_reason")
237
+ if finish_reason in ("tool_calls", "function_call"):
238
+ return True
239
+
240
+ if message is None:
241
+ return False
242
+
243
+ tool_calls = getattr(message, "tool_calls", None)
244
+ if tool_calls:
245
+ return True
246
+
247
+ additional_kwargs = getattr(message, "additional_kwargs", None)
248
+ if isinstance(additional_kwargs, dict):
249
+ if additional_kwargs.get("tool_calls") or additional_kwargs.get("function_call"):
250
+ return True
251
+
252
+ return False
253
+
254
+
255
+ def _estimate_token_count(text: Optional[str], model: Optional[str]) -> Optional[int]:
256
+ if not text:
257
+ return 0
258
+ try:
259
+ import tiktoken # type: ignore
260
+ except Exception:
261
+ tiktoken = None
262
+
263
+ if tiktoken is not None:
264
+ try:
265
+ try:
266
+ enc = tiktoken.encoding_for_model(model or "")
267
+ except Exception:
268
+ enc = tiktoken.get_encoding("cl100k_base")
269
+ return len(enc.encode(text))
270
+ except Exception:
271
+ return None
272
+
273
+ if os.getenv("FLOWSTATE_APPROX_TOKENS"):
274
+ approx = (len(text) + 3) // 4
275
+ return approx if approx > 0 else 0
276
+
277
+ return None
278
+
279
+
280
+ class FlowstateCallbackHandler(BaseCallbackHandler):
281
+ def __init__(self, provider: str, model: str) -> None:
282
+ self.provider = provider
283
+ self.model = model
284
+ self._start_ts: Optional[float] = None
285
+ self._input_chars: Optional[int] = None
286
+ self._input_str: Optional[str] = None
287
+ self._tool_name: Optional[str] = None
288
+ self._system_prompt: Optional[str] = None
289
+ self._user_prompt: Optional[str] = None
290
+
291
+ def _get_active_task(self) -> TaskContext:
292
+ run_stack: List[TaskContext] = context.run_stack.get()
293
+ if not run_stack:
294
+ raise RuntimeError(
295
+ "No active TaskContext. Wrap your step with @sdk_task or run.step()."
296
+ )
297
+ return run_stack[-1]
298
+
299
+ # ---- Chat model callbacks (preferred for ChatOpenAI, etc.) ----
300
+
301
+ def on_chat_model_start(
302
+ self, serialized: Dict[str, Any], messages: List[List[Any]], **kwargs: Any
303
+ ) -> None:
304
+ self._start_ts = time.time()
305
+ self._tool_name = serialized.get("name")
306
+
307
+ msgs = _normalize_messages(messages)
308
+ if not msgs:
309
+ msgs = _get_messages_from_kwargs(kwargs) or []
310
+
311
+ self._system_prompt, self._user_prompt = _extract_system_and_user_from_messages(
312
+ msgs
313
+ )
314
+
315
+ # Build a raw input string by concatenating message contents.
316
+ input_parts: List[str] = []
317
+ for m in msgs:
318
+ _, content = _msg_type_and_content(m)
319
+ text = _content_to_text(content).strip()
320
+ if text:
321
+ input_parts.append(text)
322
+
323
+ self._input_str = "\n\n".join(input_parts).strip() or None
324
+ self._input_chars = len(self._input_str) if self._input_str else None
325
+
326
+ def on_chat_model_end(self, response: LLMResult, **kwargs: Any) -> None:
327
+ self._log_from_llm_result(response)
328
+
329
+ # ---- LLM callbacks (for legacy/non-chat or alternate code paths) ----
330
+
331
+ def on_llm_start(
332
+ self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
333
+ ) -> None:
334
+ self._start_ts = time.time()
335
+ self._tool_name = serialized.get("name")
336
+
337
+ self._input_str = prompts[0] if prompts else None
338
+ self._input_chars = len(self._input_str) if self._input_str else None
339
+
340
+ msgs = _get_messages_from_kwargs(kwargs)
341
+ self._system_prompt, self._user_prompt = _extract_system_and_user_from_messages(
342
+ msgs
343
+ )
344
+
345
+ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
346
+ self._log_from_llm_result(response)
347
+
348
+ # ---- Shared logging logic ----
349
+
350
+ def _log_from_llm_result(self, response: LLMResult) -> None:
351
+ latency = (time.time() - self._start_ts) if self._start_ts else None
352
+
353
+ generations = response.generations
354
+ generation_chunk = None
355
+ if len(generations) > 0 and len(generations[0]) > 0:
356
+ generation_chunk = generations[0][0]
357
+ if not generation_chunk:
358
+ return
359
+
360
+ gen_info = generation_chunk.generation_info or {}
361
+ finish_reason = gen_info.get("finish_reason")
362
+ ai_message_chunk = getattr(generation_chunk, "message", None)
363
+
364
+ if finish_reason and finish_reason != "stop":
365
+ return
366
+ if finish_reason is None and _is_tool_call_message(ai_message_chunk, gen_info):
367
+ return
368
+
369
+ llm_output = getattr(response, "llm_output", None)
370
+ resolved_model = self.model
371
+ if isinstance(llm_output, dict):
372
+ for key in ("model_name", "model", "model_id"):
373
+ val = llm_output.get(key)
374
+ if isinstance(val, str) and val.strip():
375
+ resolved_model = val
376
+ break
377
+ resolved_model = _normalize_model_name(resolved_model)
378
+
379
+ # Extract output text robustly (chat message content or plain .text)
380
+ output_text = ""
381
+ if response.generations and response.generations[0]:
382
+ generation = response.generations[0][0]
383
+ if getattr(generation, "message", None) is not None:
384
+ output_text = _content_to_text(generation.message.content)
385
+ else:
386
+ output_text = generation.text or ""
387
+
388
+ usage_candidates: List[Dict[str, Any]] = []
389
+ gen_response_metadata = getattr(generation_chunk, "response_metadata", None)
390
+ if isinstance(gen_response_metadata, dict):
391
+ usage_candidates.append(gen_response_metadata)
392
+ gen_additional_kwargs = getattr(generation_chunk, "additional_kwargs", None)
393
+ if isinstance(gen_additional_kwargs, dict):
394
+ usage_candidates.append(gen_additional_kwargs)
395
+ if ai_message_chunk is not None:
396
+ usage_metadata = getattr(ai_message_chunk, "usage_metadata", None)
397
+ if isinstance(usage_metadata, dict):
398
+ usage_candidates.append(usage_metadata)
399
+ response_metadata = getattr(ai_message_chunk, "response_metadata", None)
400
+ if isinstance(response_metadata, dict):
401
+ usage_candidates.append(response_metadata)
402
+ additional_kwargs = getattr(ai_message_chunk, "additional_kwargs", None)
403
+ if isinstance(additional_kwargs, dict):
404
+ usage_candidates.append(additional_kwargs)
405
+ if isinstance(gen_info, dict) and gen_info:
406
+ usage_candidates.append(gen_info)
407
+ if isinstance(llm_output, dict):
408
+ usage_candidates.append(llm_output)
409
+
410
+ input_tokens, output_tokens = _extract_token_usage(usage_candidates)
411
+ if input_tokens is None:
412
+ input_tokens = _estimate_token_count(self._input_str, resolved_model)
413
+ if output_tokens is None:
414
+ output_tokens = _estimate_token_count(output_text, resolved_model)
415
+
416
+ if os.getenv("FLOWSTATE_DEBUG_LLM_USAGE"):
417
+ try:
418
+ import json
419
+
420
+ debug_payload = {
421
+ "finish_reason": finish_reason,
422
+ "usage_candidates": usage_candidates,
423
+ "llm_output": llm_output,
424
+ "resolved_model": resolved_model,
425
+ "input_tokens": input_tokens,
426
+ "output_tokens": output_tokens,
427
+ }
428
+ print(
429
+ "FlowstateCallbackHandler.usage_debug =>",
430
+ json.dumps(debug_payload, default=str)[:4000],
431
+ )
432
+ except Exception:
433
+ print("FlowstateCallbackHandler.usage_debug => (failed to serialize)")
434
+
435
+ input_cost_per_token_usd = (
436
+ COST_TABLE.get(self.provider, {}).get(resolved_model, {}).get("input", 0.0)
437
+ )
438
+ output_cost_per_token_usd = (
439
+ COST_TABLE.get(self.provider, {}).get(resolved_model, {}).get("output", 0.0)
440
+ )
441
+ cost_usd = None
442
+ if input_tokens is not None or output_tokens is not None:
443
+ input_token_count = input_tokens or 0
444
+ output_token_count = output_tokens or 0
445
+ cost_value = (
446
+ input_cost_per_token_usd * input_token_count
447
+ + output_cost_per_token_usd * output_token_count
448
+ )
449
+ if cost_value != 0.0:
450
+ cost_usd = cost_value
451
+
452
+ provider_metrics = ProviderMetrics(
453
+ run_id=context.current_run.get(),
454
+ provider=self.provider,
455
+ model=resolved_model,
456
+ input_chars=self._input_chars,
457
+ output_chars=len(output_text) if output_text else None,
458
+ latency_sec=latency,
459
+ input_tokens=input_tokens,
460
+ output_tokens=output_tokens,
461
+ tool_name=self._tool_name,
462
+ cost_usd=cost_usd,
463
+ raw_input=self._input_str,
464
+ system_prompt=self._system_prompt,
465
+ user_prompt=self._user_prompt,
466
+ raw_response=response,
467
+ )
468
+
469
+ task_ctx = self._get_active_task()
470
+ llm_ctx = TaskContext(
471
+ task_step_id=str(uuid.uuid4()),
472
+ client=task_ctx.client,
473
+ func_name=f"{task_ctx.func_name}.llm",
474
+ type=TaskTypes.LLM,
475
+ metadata={"provider": self.provider, "model": resolved_model},
476
+ )
477
+ llm_ctx.__enter__()
478
+ try:
479
+ llm_ctx.log_llm_usage(provider_metrics)
480
+ finally:
481
+ llm_ctx.__exit__(None, None, None)
flowstate_sdk/workflow.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import uuid
2
2
  from collections import defaultdict
3
3
  from datetime import datetime
4
+ import os
4
5
 
5
6
  from . import context
6
7
  from .sdk_client import SDKClient
@@ -40,6 +41,12 @@ class Workflow:
40
41
  }
41
42
  self.client.emit(workflow_payload)
42
43
 
44
+ if os.getenv("IS_FLOWSTATE_REPLAY"):
45
+ replay_list = self.client.retrieve_replay_plan(os.getenv("FLOWSTATE_REPLAY_PARENT_ID"))
46
+ context.replay_counter.set(0)
47
+ context.replay_step_list.set(replay_list)
48
+ context.is_replay.set(True)
49
+
43
50
  run_payload = RunRecord(
44
51
  run_id=str(self.run_id),
45
52
  workflow_id=str(self.workflow_id),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flowstate_sdk
3
- Version: 0.1.12
3
+ Version: 0.1.13
4
4
  Summary: SDK for Agentic AI workflow management with Flowstate
5
5
  Author: Flowstate
6
6
  License: Nobody can use this
@@ -0,0 +1,21 @@
1
+ flowstate_sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ flowstate_sdk/constants.py,sha256=4qinJNguVuYn-uyapRV0VeRq0p5lRbVZU8yZRZjCZ3M,849
3
+ flowstate_sdk/context.py,sha256=ObbwvJRQufC2AQmMdeGC3iVAogfZiE2U6KwlVsCSglU,608
4
+ flowstate_sdk/cost_table.py,sha256=E9roz61y6YelSQw1YKff6-mg7hiJtK5bKcs3tSAcMak,8019
5
+ flowstate_sdk/decorators.py,sha256=oCD09GYNbKZ0R4yoiHNpQGQ1Hot9T-15LLTRSNmTlfw,3390
6
+ flowstate_sdk/enums.py,sha256=7RZD_nYjd_Y19_PpBi-35-bt9Kq_zM1UhpBE2s82r_o,202
7
+ flowstate_sdk/sdk_client.py,sha256=WGQfuX1EH7s2MQXLSornTwjTtn5WwsGQ5N1aKU_XfQA,3706
8
+ flowstate_sdk/shared_dataclasses.py,sha256=-hq4NUdBfK02NI1m8cEScPYBtURUcC7ll7q_qB6Spno,783
9
+ flowstate_sdk/task.py,sha256=3quyOV30H2YY9j7mNNLNJo3IKBvglfqyCfOvLLcksas,702
10
+ flowstate_sdk/task_context.py,sha256=DtM3o4dsJzMNHwFygRq2474wSwJMPiZOMJDrZDInAS4,5944
11
+ flowstate_sdk/workflow.py,sha256=7vnOhpMwi21W7OJAcFoU0bWth2scHquG1ch6rR2vveo,2280
12
+ flowstate_sdk/langchain/__init__.py,sha256=EJJtYjTKgBsP4oJZGqGhNX5rUXoSTNBswO2Xf13F2KQ,391
13
+ flowstate_sdk/langchain/callback_handler.py,sha256=onJj_m3-6GRd4YUtAcH4JqspmdOFMvvRBe1WjifmASk,111
14
+ flowstate_sdk/langchain/chat_models.py,sha256=UJQXh2rNQmkIjy9bW4Av1H9dIamANbWKqwBviusVhXo,2643
15
+ flowstate_sdk/langchain/flowstate_chat_base.py,sha256=gM3Is3wPWxCasv4BlTBObKuVEWuFGlvMGKdIhxKWjIc,1610
16
+ flowstate_sdk/langchain/telemetry.py,sha256=5f_zOAWquP8XBknPmFxM9Vpat_LrhTW1GGhnKpQzpiQ,16892
17
+ flowstate_sdk-0.1.13.dist-info/licenses/LICENSE,sha256=gBfhEg0GV1iXXcGHVELsXUX-y_ewaTaFvE2MWfJoNUI,19
18
+ flowstate_sdk-0.1.13.dist-info/METADATA,sha256=HYhCdLXqC65TAxO1ItZ471TlMHNCU_V-MLJWdhSWfAQ,376
19
+ flowstate_sdk-0.1.13.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
20
+ flowstate_sdk-0.1.13.dist-info/top_level.txt,sha256=dZ-q9vpo98TaUsZA3d8NNe1XxK2RaPmoIbDXuidZ1uk,14
21
+ flowstate_sdk-0.1.13.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- flowstate_sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- flowstate_sdk/constants.py,sha256=4qinJNguVuYn-uyapRV0VeRq0p5lRbVZU8yZRZjCZ3M,849
3
- flowstate_sdk/context.py,sha256=5vm-4GhZ_FkvTbivKJ80J88GhOStwIhMs5iwqm_YxX0,380
4
- flowstate_sdk/cost_table.py,sha256=E9roz61y6YelSQw1YKff6-mg7hiJtK5bKcs3tSAcMak,8019
5
- flowstate_sdk/decorators.py,sha256=oCD09GYNbKZ0R4yoiHNpQGQ1Hot9T-15LLTRSNmTlfw,3390
6
- flowstate_sdk/enums.py,sha256=7RZD_nYjd_Y19_PpBi-35-bt9Kq_zM1UhpBE2s82r_o,202
7
- flowstate_sdk/sdk_client.py,sha256=WGQfuX1EH7s2MQXLSornTwjTtn5WwsGQ5N1aKU_XfQA,3706
8
- flowstate_sdk/shared_dataclasses.py,sha256=-hq4NUdBfK02NI1m8cEScPYBtURUcC7ll7q_qB6Spno,783
9
- flowstate_sdk/task.py,sha256=3quyOV30H2YY9j7mNNLNJo3IKBvglfqyCfOvLLcksas,702
10
- flowstate_sdk/task_context.py,sha256=DtM3o4dsJzMNHwFygRq2474wSwJMPiZOMJDrZDInAS4,5944
11
- flowstate_sdk/workflow.py,sha256=CqKLNJ_1hY93NKXIvanNSYX-fALYbzZs5m0VnRsaNzI,1988
12
- flowstate_sdk/langchain/callback_handler.py,sha256=5f_zOAWquP8XBknPmFxM9Vpat_LrhTW1GGhnKpQzpiQ,16892
13
- flowstate_sdk-0.1.12.dist-info/licenses/LICENSE,sha256=gBfhEg0GV1iXXcGHVELsXUX-y_ewaTaFvE2MWfJoNUI,19
14
- flowstate_sdk-0.1.12.dist-info/METADATA,sha256=A1giTx1fkBdZLPM2KhGhMRof_xww7Wvg10P8Ejs2cPU,376
15
- flowstate_sdk-0.1.12.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
16
- flowstate_sdk-0.1.12.dist-info/top_level.txt,sha256=dZ-q9vpo98TaUsZA3d8NNe1XxK2RaPmoIbDXuidZ1uk,14
17
- flowstate_sdk-0.1.12.dist-info/RECORD,,