mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-14 06:05:51 +08:00
feat: add langfuse llm node input and output (#16924)
This commit is contained in:
parent
7f8bfb6e4a
commit
82189e1bc5
@ -29,7 +29,7 @@ from core.ops.langfuse_trace.entities.langfuse_trace_entity import (
|
|||||||
)
|
)
|
||||||
from core.ops.utils import filter_none_values
|
from core.ops.utils import filter_none_values
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from models.model import EndUser
|
from models.model import EndUser, Message
|
||||||
from models.workflow import WorkflowNodeExecution
|
from models.workflow import WorkflowNodeExecution
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -213,9 +213,32 @@ class LangFuseDataTrace(BaseTraceInstance):
|
|||||||
|
|
||||||
if process_data and process_data.get("model_mode") == "chat":
|
if process_data and process_data.get("model_mode") == "chat":
|
||||||
total_token = metadata.get("total_tokens", 0)
|
total_token = metadata.get("total_tokens", 0)
|
||||||
|
|
||||||
|
# through workflow_run_id get message data
|
||||||
|
message_data = (
|
||||||
|
db.session.query(
|
||||||
|
Message.answer_tokens, # input
|
||||||
|
Message.message_tokens, # output
|
||||||
|
)
|
||||||
|
.filter(Message.workflow_run_id == trace_info.workflow_run_id)
|
||||||
|
.first()
|
||||||
|
)
|
||||||
|
|
||||||
|
if message_data:
|
||||||
|
# chatflow data
|
||||||
|
input_tokens = message_data.message_tokens
|
||||||
|
output_tokens = message_data.answer_tokens
|
||||||
|
else:
|
||||||
|
# workflow data
|
||||||
|
input_tokens = json.loads(node_execution.outputs).get("usage", {}).get("prompt_tokens", 0)
|
||||||
|
output_tokens = json.loads(node_execution.outputs).get("usage", {}).get("completion_tokens", 0)
|
||||||
|
|
||||||
# add generation
|
# add generation
|
||||||
generation_usage = GenerationUsage(
|
generation_usage = GenerationUsage(
|
||||||
total=total_token,
|
total=total_token,
|
||||||
|
input=input_tokens,
|
||||||
|
output=output_tokens,
|
||||||
|
unit=UnitEnum.TOKENS,
|
||||||
)
|
)
|
||||||
|
|
||||||
node_generation_data = LangfuseGeneration(
|
node_generation_data = LangfuseGeneration(
|
||||||
|
@ -49,6 +49,6 @@ def process_trace_tasks(file_info):
|
|||||||
except Exception:
|
except Exception:
|
||||||
failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}"
|
failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}"
|
||||||
redis_client.incr(failed_key)
|
redis_client.incr(failed_key)
|
||||||
logging.info(f"Processing trace tasks failed, app_id: {app_id}")
|
logging.exception(f"Processing trace tasks failed, app_id: {app_id}")
|
||||||
finally:
|
finally:
|
||||||
storage.delete(file_path)
|
storage.delete(file_path)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user