mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-04-21 05:09:41 +08:00
Revert "feat: add langfuse llm node input and output" (#16947)
This commit is contained in:
parent
377d11d13b
commit
ea1d459423
@ -29,7 +29,7 @@ from core.ops.langfuse_trace.entities.langfuse_trace_entity import (
|
|||||||
)
|
)
|
||||||
from core.ops.utils import filter_none_values
|
from core.ops.utils import filter_none_values
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from models.model import EndUser, Message
|
from models.model import EndUser
|
||||||
from models.workflow import WorkflowNodeExecution
|
from models.workflow import WorkflowNodeExecution
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -213,32 +213,9 @@ class LangFuseDataTrace(BaseTraceInstance):
|
|||||||
|
|
||||||
if process_data and process_data.get("model_mode") == "chat":
|
if process_data and process_data.get("model_mode") == "chat":
|
||||||
total_token = metadata.get("total_tokens", 0)
|
total_token = metadata.get("total_tokens", 0)
|
||||||
|
|
||||||
# through workflow_run_id get message data
|
|
||||||
message_data = (
|
|
||||||
db.session.query(
|
|
||||||
Message.answer_tokens, # input
|
|
||||||
Message.message_tokens, # output
|
|
||||||
)
|
|
||||||
.filter(Message.workflow_run_id == trace_info.workflow_run_id)
|
|
||||||
.first()
|
|
||||||
)
|
|
||||||
|
|
||||||
if message_data:
|
|
||||||
# chatflow data
|
|
||||||
input_tokens = message_data.message_tokens
|
|
||||||
output_tokens = message_data.answer_tokens
|
|
||||||
else:
|
|
||||||
# workflow data
|
|
||||||
input_tokens = json.loads(node_execution.outputs).get("usage", {}).get("prompt_tokens", 0)
|
|
||||||
output_tokens = json.loads(node_execution.outputs).get("usage", {}).get("completion_tokens", 0)
|
|
||||||
|
|
||||||
# add generation
|
# add generation
|
||||||
generation_usage = GenerationUsage(
|
generation_usage = GenerationUsage(
|
||||||
total=total_token,
|
total=total_token,
|
||||||
input=input_tokens,
|
|
||||||
output=output_tokens,
|
|
||||||
unit=UnitEnum.TOKENS,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
node_generation_data = LangfuseGeneration(
|
node_generation_data = LangfuseGeneration(
|
||||||
|
@ -49,6 +49,6 @@ def process_trace_tasks(file_info):
|
|||||||
except Exception:
|
except Exception:
|
||||||
failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}"
|
failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}"
|
||||||
redis_client.incr(failed_key)
|
redis_client.incr(failed_key)
|
||||||
logging.exception(f"Processing trace tasks failed, app_id: {app_id}")
|
logging.info(f"Processing trace tasks failed, app_id: {app_id}")
|
||||||
finally:
|
finally:
|
||||||
storage.delete(file_path)
|
storage.delete(file_path)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user