feat: disable token counting in large language models for performance testing

Signed-off-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
-LAN- 2025-03-24 18:54:41 +08:00
parent 307ebbb4ff
commit 94c08462a1
No known key found for this signature in database
GPG Key ID: 6BA0D108DED011FF
2 changed files with 3 additions and 4 deletions

View File

@ -537,7 +537,6 @@ if you are not sure about the structure.
"""
raise NotImplementedError
@abstractmethod
def get_num_tokens(
self,
model: str,
@ -554,7 +553,8 @@ if you are not sure about the structure.
:param tools: tools for tool calling
:return:
"""
raise NotImplementedError
# Disable the token count in LLMs for profermance testing.
return 0
def enforce_stop_tokens(self, text: str, stop: list[str]) -> str:
"""Cut off the text as soon as any stop words occur."""

View File

@ -25,8 +25,7 @@ class GPT2Tokenizer:
# future = _executor.submit(GPT2Tokenizer._get_num_tokens_by_gpt2, text)
# result = future.result()
# return cast(int, result)
# return GPT2Tokenizer._get_num_tokens_by_gpt2(text)
return 0
return GPT2Tokenizer._get_num_tokens_by_gpt2(text)
@staticmethod
def get_encoder() -> Any: