diff --git a/api/controllers/console/auth/data_source_oauth.py b/api/controllers/console/auth/data_source_oauth.py index 3c3f45260a..faca67bb17 100644 --- a/api/controllers/console/auth/data_source_oauth.py +++ b/api/controllers/console/auth/data_source_oauth.py @@ -34,7 +34,6 @@ class OAuthDataSource(Resource): OAUTH_DATASOURCE_PROVIDERS = get_oauth_providers() with current_app.app_context(): oauth_provider = OAUTH_DATASOURCE_PROVIDERS.get(provider) - print(vars(oauth_provider)) if not oauth_provider: return {"error": "Invalid provider"}, 400 if dify_config.NOTION_INTEGRATION_TYPE == "internal": diff --git a/api/controllers/console/auth/oauth.py b/api/controllers/console/auth/oauth.py index f53c28e2ec..5de8c6766d 100644 --- a/api/controllers/console/auth/oauth.py +++ b/api/controllers/console/auth/oauth.py @@ -52,7 +52,6 @@ class OAuthLogin(Resource): OAUTH_PROVIDERS = get_oauth_providers() with current_app.app_context(): oauth_provider = OAUTH_PROVIDERS.get(provider) - print(vars(oauth_provider)) if not oauth_provider: return {"error": "Invalid provider"}, 400 diff --git a/api/core/llm_generator/output_parser/suggested_questions_after_answer.py b/api/core/llm_generator/output_parser/suggested_questions_after_answer.py index 182aeed98f..c451bf514c 100644 --- a/api/core/llm_generator/output_parser/suggested_questions_after_answer.py +++ b/api/core/llm_generator/output_parser/suggested_questions_after_answer.py @@ -15,6 +15,5 @@ class SuggestedQuestionsAfterAnswerOutputParser: json_obj = json.loads(action_match.group(0).strip()) else: json_obj = [] - print(f"Could not parse LLM output: {text}") return json_obj diff --git a/api/core/rag/datasource/vdb/oracle/oraclevector.py b/api/core/rag/datasource/vdb/oracle/oraclevector.py index 4ced5d61e5..71c58c9d0c 100644 --- a/api/core/rag/datasource/vdb/oracle/oraclevector.py +++ b/api/core/rag/datasource/vdb/oracle/oraclevector.py @@ -230,7 +230,6 @@ class OracleVector(BaseVector): except LookupError: nltk.download("punkt") nltk.download("stopwords") - print("run download") e_str = re.sub(r"[^\w ]", "", query) all_tokens = nltk.word_tokenize(e_str) stop_words = stopwords.words("english") diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 60a5901b21..035c34dcf4 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -64,7 +64,6 @@ class GraphEngineThreadPool(ThreadPoolExecutor): self.submit_count -= 1 def check_is_full(self) -> None: - print(f"submit_count: {self.submit_count}, max_submit_count: {self.max_submit_count}") if self.submit_count > self.max_submit_count: raise ValueError(f"Max submit count {self.max_submit_count} of workflow thread pool reached.")