From 97a15dce3610a43dbe0db61802d85e8915b1074d Mon Sep 17 00:00:00 2001 From: DanielWalnut <45447813+hetaoBackend@users.noreply.github.com> Date: Fri, 9 May 2025 15:50:46 +0800 Subject: [PATCH] refactor: refactor the prompt management mechanism (#17) --- src/podcast/graph/script_writer_node.py | 2 +- src/ppt/graph/ppt_composer_node.py | 2 +- src/prompts/{ => podcast}/podcast_script_writer.md | 0 src/prompts/{ => ppt}/ppt_composer.md | 0 src/prompts/prose/prose_continue.md | 4 ++++ src/prompts/prose/prose_fix.md | 4 ++++ src/prompts/prose/prose_improver.md | 3 +++ src/prompts/prose/prose_longer.md | 2 ++ src/prompts/prose/prose_shorter.md | 2 ++ src/prompts/prose/prose_zap.md | 3 +++ src/prose/graph/prose_continue_node.py | 10 ++-------- src/prose/graph/prose_fix_node.py | 10 ++-------- src/prose/graph/prose_improve_node.py | 9 ++------- src/prose/graph/prose_longer_node.py | 8 ++------ src/prose/graph/prose_shorter_node.py | 7 ++----- src/prose/graph/prose_zap_node.py | 8 ++------ 16 files changed, 32 insertions(+), 42 deletions(-) rename src/prompts/{ => podcast}/podcast_script_writer.md (100%) rename src/prompts/{ => ppt}/ppt_composer.md (100%) create mode 100644 src/prompts/prose/prose_continue.md create mode 100644 src/prompts/prose/prose_fix.md create mode 100644 src/prompts/prose/prose_improver.md create mode 100644 src/prompts/prose/prose_longer.md create mode 100644 src/prompts/prose/prose_shorter.md create mode 100644 src/prompts/prose/prose_zap.md diff --git a/src/podcast/graph/script_writer_node.py b/src/podcast/graph/script_writer_node.py index c187678..2b3831b 100644 --- a/src/podcast/graph/script_writer_node.py +++ b/src/podcast/graph/script_writer_node.py @@ -22,7 +22,7 @@ def script_writer_node(state: PodcastState): ).with_structured_output(Script, method="json_mode") script = model.invoke( [ - SystemMessage(content=get_prompt_template("podcast_script_writer")), + SystemMessage(content=get_prompt_template("podcast/podcast_script_writer")), HumanMessage(content=state["input"]), ], ) diff --git a/src/ppt/graph/ppt_composer_node.py b/src/ppt/graph/ppt_composer_node.py index fe3207d..df7a0f8 100644 --- a/src/ppt/graph/ppt_composer_node.py +++ b/src/ppt/graph/ppt_composer_node.py @@ -21,7 +21,7 @@ def ppt_composer_node(state: PPTState): model = get_llm_by_type(AGENT_LLM_MAP["ppt_composer"]) ppt_content = model.invoke( [ - SystemMessage(content=get_prompt_template("ppt_composer")), + SystemMessage(content=get_prompt_template("ppt/ppt_composer")), HumanMessage(content=state["input"]), ], ) diff --git a/src/prompts/podcast_script_writer.md b/src/prompts/podcast/podcast_script_writer.md similarity index 100% rename from src/prompts/podcast_script_writer.md rename to src/prompts/podcast/podcast_script_writer.md diff --git a/src/prompts/ppt_composer.md b/src/prompts/ppt/ppt_composer.md similarity index 100% rename from src/prompts/ppt_composer.md rename to src/prompts/ppt/ppt_composer.md diff --git a/src/prompts/prose/prose_continue.md b/src/prompts/prose/prose_continue.md new file mode 100644 index 0000000..0883c43 --- /dev/null +++ b/src/prompts/prose/prose_continue.md @@ -0,0 +1,4 @@ +You are an AI writing assistant that continues existing text based on context from prior text. +- Give more weight/priority to the later characters than the beginning ones. +- Limit your response to no more than 200 characters, but make sure to construct complete sentences. +- Use Markdown formatting when appropriate diff --git a/src/prompts/prose/prose_fix.md b/src/prompts/prose/prose_fix.md new file mode 100644 index 0000000..cd435ab --- /dev/null +++ b/src/prompts/prose/prose_fix.md @@ -0,0 +1,4 @@ +You are an AI writing assistant that fixes grammar and spelling errors in existing text. +- Limit your response to no more than 200 characters, but make sure to construct complete sentences. +- Use Markdown formatting when appropriate. +- If the text is already correct, just return the original text. diff --git a/src/prompts/prose/prose_improver.md b/src/prompts/prose/prose_improver.md new file mode 100644 index 0000000..1f644b7 --- /dev/null +++ b/src/prompts/prose/prose_improver.md @@ -0,0 +1,3 @@ +You are an AI writing assistant that improves existing text. +- Limit your response to no more than 200 characters, but make sure to construct complete sentences. +- Use Markdown formatting when appropriate. \ No newline at end of file diff --git a/src/prompts/prose/prose_longer.md b/src/prompts/prose/prose_longer.md new file mode 100644 index 0000000..8982ab3 --- /dev/null +++ b/src/prompts/prose/prose_longer.md @@ -0,0 +1,2 @@ +You are an AI writing assistant that lengthens existing text. +- Use Markdown formatting when appropriate. diff --git a/src/prompts/prose/prose_shorter.md b/src/prompts/prose/prose_shorter.md new file mode 100644 index 0000000..2b2606c --- /dev/null +++ b/src/prompts/prose/prose_shorter.md @@ -0,0 +1,2 @@ +You are an AI writing assistant that shortens existing text. +- Use Markdown formatting when appropriate. diff --git a/src/prompts/prose/prose_zap.md b/src/prompts/prose/prose_zap.md new file mode 100644 index 0000000..2b27d11 --- /dev/null +++ b/src/prompts/prose/prose_zap.md @@ -0,0 +1,3 @@ +You area an AI writing assistant that generates text based on a prompt. +- You take an input from the user and a command for manipulating the text." +- Use Markdown formatting when appropriate. diff --git a/src/prose/graph/prose_continue_node.py b/src/prose/graph/prose_continue_node.py index 9364745..b82856a 100644 --- a/src/prose/graph/prose_continue_node.py +++ b/src/prose/graph/prose_continue_node.py @@ -7,24 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage from src.config.agents import AGENT_LLM_MAP from src.llms.llm import get_llm_by_type +from src.prompts.template import get_prompt_template from src.prose.graph.state import ProseState logger = logging.getLogger(__name__) -prompt = """ -You are an AI writing assistant that continues existing text based on context from prior text. -- Give more weight/priority to the later characters than the beginning ones. -- Limit your response to no more than 200 characters, but make sure to construct complete sentences. -- Use Markdown formatting when appropriate -""" - def prose_continue_node(state: ProseState): logger.info("Generating prose continue content...") model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"]) prose_content = model.invoke( [ - SystemMessage(content=prompt), + SystemMessage(content=get_prompt_template("prose/prose_continue")), HumanMessage(content=state["content"]), ], ) diff --git a/src/prose/graph/prose_fix_node.py b/src/prose/graph/prose_fix_node.py index ab42ca7..1f2ab17 100644 --- a/src/prose/graph/prose_fix_node.py +++ b/src/prose/graph/prose_fix_node.py @@ -7,24 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage from src.config.agents import AGENT_LLM_MAP from src.llms.llm import get_llm_by_type +from src.prompts.template import get_prompt_template from src.prose.graph.state import ProseState logger = logging.getLogger(__name__) -prompt = """ -You are an AI writing assistant that fixes grammar and spelling errors in existing text. -- Limit your response to no more than 200 characters, but make sure to construct complete sentences. -- Use Markdown formatting when appropriate. -- If the text is already correct, just return the original text. -""" - def prose_fix_node(state: ProseState): logger.info("Generating prose fix content...") model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"]) prose_content = model.invoke( [ - SystemMessage(content=prompt), + SystemMessage(content=get_prompt_template("prose/prose_fix")), HumanMessage(content=f"The existing text is: {state['content']}"), ], ) diff --git a/src/prose/graph/prose_improve_node.py b/src/prose/graph/prose_improve_node.py index 7b1df9d..b9ae166 100644 --- a/src/prose/graph/prose_improve_node.py +++ b/src/prose/graph/prose_improve_node.py @@ -8,22 +8,17 @@ from langchain.schema import HumanMessage, SystemMessage from src.config.agents import AGENT_LLM_MAP from src.llms.llm import get_llm_by_type from src.prose.graph.state import ProseState +from src.prompts.template import get_prompt_template logger = logging.getLogger(__name__) -prompt = """ -You are an AI writing assistant that improves existing text. -- Limit your response to no more than 200 characters, but make sure to construct complete sentences. -- Use Markdown formatting when appropriate. -""" - def prose_improve_node(state: ProseState): logger.info("Generating prose improve content...") model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"]) prose_content = model.invoke( [ - SystemMessage(content=prompt), + SystemMessage(content=get_prompt_template("prose/prose_improver")), HumanMessage(content=f"The existing text is: {state['content']}"), ], ) diff --git a/src/prose/graph/prose_longer_node.py b/src/prose/graph/prose_longer_node.py index 83d1450..d7705ab 100644 --- a/src/prose/graph/prose_longer_node.py +++ b/src/prose/graph/prose_longer_node.py @@ -7,22 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage from src.config.agents import AGENT_LLM_MAP from src.llms.llm import get_llm_by_type +from src.prompts.template import get_prompt_template from src.prose.graph.state import ProseState logger = logging.getLogger(__name__) -prompt = """ -You are an AI writing assistant that lengthens existing text. -- Use Markdown formatting when appropriate. -""" - def prose_longer_node(state: ProseState): logger.info("Generating prose longer content...") model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"]) prose_content = model.invoke( [ - SystemMessage(content=prompt), + SystemMessage(content=get_prompt_template("prose/prose_longer")), HumanMessage(content=f"The existing text is: {state['content']}"), ], ) diff --git a/src/prose/graph/prose_shorter_node.py b/src/prose/graph/prose_shorter_node.py index 8e234db..51fa330 100644 --- a/src/prose/graph/prose_shorter_node.py +++ b/src/prose/graph/prose_shorter_node.py @@ -7,13 +7,10 @@ from langchain.schema import HumanMessage, SystemMessage from src.config.agents import AGENT_LLM_MAP from src.llms.llm import get_llm_by_type +from src.prompts.template import get_prompt_template from src.prose.graph.state import ProseState logger = logging.getLogger(__name__) -prompt = """ -You are an AI writing assistant that shortens existing text. -- Use Markdown formatting when appropriate. -""" def prose_shorter_node(state: ProseState): @@ -21,7 +18,7 @@ def prose_shorter_node(state: ProseState): model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"]) prose_content = model.invoke( [ - SystemMessage(content=prompt), + SystemMessage(content=get_prompt_template("prose/prose_shorter")), HumanMessage(content=f"The existing text is: {state['content']}"), ], ) diff --git a/src/prose/graph/prose_zap_node.py b/src/prose/graph/prose_zap_node.py index f92bd7d..814567c 100644 --- a/src/prose/graph/prose_zap_node.py +++ b/src/prose/graph/prose_zap_node.py @@ -7,14 +7,10 @@ from langchain.schema import HumanMessage, SystemMessage from src.config.agents import AGENT_LLM_MAP from src.llms.llm import get_llm_by_type +from src.prompts.template import get_prompt_template from src.prose.graph.state import ProseState logger = logging.getLogger(__name__) -prompt = """ -You area an AI writing assistant that generates text based on a prompt. -- You take an input from the user and a command for manipulating the text." -- Use Markdown formatting when appropriate. -""" def prose_zap_node(state: ProseState): @@ -22,7 +18,7 @@ def prose_zap_node(state: ProseState): model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"]) prose_content = model.invoke( [ - SystemMessage(content=prompt), + SystemMessage(content=get_prompt_template("prose/prose_zap")), HumanMessage( content=f"For this text: {state['content']}.\nYou have to respect the command: {state['command']}" ),