feat: add langfuse llm node input and output (#17368)

This commit is contained in:
Tonny 2025-04-03 16:05:37 +08:00 committed by GitHub
parent 12faecdf89
commit a83318cf4b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 30 additions and 0 deletions

View File

@ -213,9 +213,24 @@ class LangFuseDataTrace(BaseTraceInstance):
if process_data and process_data.get("model_mode") == "chat":
total_token = metadata.get("total_tokens", 0)
prompt_tokens = 0
completion_tokens = 0
try:
if outputs.get("usage"):
prompt_tokens = outputs.get("usage", {}).get("prompt_tokens", 0)
completion_tokens = outputs.get("usage", {}).get("completion_tokens", 0)
else:
prompt_tokens = process_data.get("usage", {}).get("prompt_tokens", 0)
completion_tokens = process_data.get("usage", {}).get("completion_tokens", 0)
except Exception:
logger.error("Failed to extract usage", exc_info=True)
# add generation
generation_usage = GenerationUsage(
input=prompt_tokens,
output=completion_tokens,
total=total_token,
unit=UnitEnum.TOKENS,
)
node_generation_data = LangfuseGeneration(

View File

@ -199,6 +199,7 @@ class LangSmithDataTrace(BaseTraceInstance):
)
process_data = json.loads(node_execution.process_data) if node_execution.process_data else {}
if process_data and process_data.get("model_mode") == "chat":
run_type = LangSmithRunType.llm
metadata.update(
@ -212,9 +213,23 @@ class LangSmithDataTrace(BaseTraceInstance):
else:
run_type = LangSmithRunType.tool
prompt_tokens = 0
completion_tokens = 0
try:
if outputs.get("usage"):
prompt_tokens = outputs.get("usage", {}).get("prompt_tokens", 0)
completion_tokens = outputs.get("usage", {}).get("completion_tokens", 0)
else:
prompt_tokens = process_data.get("usage", {}).get("prompt_tokens", 0)
completion_tokens = process_data.get("usage", {}).get("completion_tokens", 0)
except Exception:
logger.error("Failed to extract usage", exc_info=True)
node_dotted_order = generate_dotted_order(node_execution_id, created_at, workflow_dotted_order)
langsmith_run = LangSmithRunModel(
total_tokens=node_total_tokens,
input_tokens=prompt_tokens,
output_tokens=completion_tokens,
name=node_type,
inputs=inputs,
run_type=run_type,