refactor: refactor the prompt management mechanism (#17)

This commit is contained in:
DanielWalnut 2025-05-09 15:50:46 +08:00 committed by GitHub
parent 091f437bc5
commit 97a15dce36
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 32 additions and 42 deletions

View File

@ -22,7 +22,7 @@ def script_writer_node(state: PodcastState):
).with_structured_output(Script, method="json_mode")
script = model.invoke(
[
SystemMessage(content=get_prompt_template("podcast_script_writer")),
SystemMessage(content=get_prompt_template("podcast/podcast_script_writer")),
HumanMessage(content=state["input"]),
],
)

View File

@ -21,7 +21,7 @@ def ppt_composer_node(state: PPTState):
model = get_llm_by_type(AGENT_LLM_MAP["ppt_composer"])
ppt_content = model.invoke(
[
SystemMessage(content=get_prompt_template("ppt_composer")),
SystemMessage(content=get_prompt_template("ppt/ppt_composer")),
HumanMessage(content=state["input"]),
],
)

View File

@ -0,0 +1,4 @@
You are an AI writing assistant that continues existing text based on context from prior text.
- Give more weight/priority to the later characters than the beginning ones.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate

View File

@ -0,0 +1,4 @@
You are an AI writing assistant that fixes grammar and spelling errors in existing text.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate.
- If the text is already correct, just return the original text.

View File

@ -0,0 +1,3 @@
You are an AI writing assistant that improves existing text.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate.

View File

@ -0,0 +1,2 @@
You are an AI writing assistant that lengthens existing text.
- Use Markdown formatting when appropriate.

View File

@ -0,0 +1,2 @@
You are an AI writing assistant that shortens existing text.
- Use Markdown formatting when appropriate.

View File

@ -0,0 +1,3 @@
You area an AI writing assistant that generates text based on a prompt.
- You take an input from the user and a command for manipulating the text."
- Use Markdown formatting when appropriate.

View File

@ -7,24 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prompts.template import get_prompt_template
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
prompt = """
You are an AI writing assistant that continues existing text based on context from prior text.
- Give more weight/priority to the later characters than the beginning ones.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate
"""
def prose_continue_node(state: ProseState):
logger.info("Generating prose continue content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(content=prompt),
SystemMessage(content=get_prompt_template("prose/prose_continue")),
HumanMessage(content=state["content"]),
],
)

View File

@ -7,24 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prompts.template import get_prompt_template
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
prompt = """
You are an AI writing assistant that fixes grammar and spelling errors in existing text.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate.
- If the text is already correct, just return the original text.
"""
def prose_fix_node(state: ProseState):
logger.info("Generating prose fix content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(content=prompt),
SystemMessage(content=get_prompt_template("prose/prose_fix")),
HumanMessage(content=f"The existing text is: {state['content']}"),
],
)

View File

@ -8,22 +8,17 @@ from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prose.graph.state import ProseState
from src.prompts.template import get_prompt_template
logger = logging.getLogger(__name__)
prompt = """
You are an AI writing assistant that improves existing text.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate.
"""
def prose_improve_node(state: ProseState):
logger.info("Generating prose improve content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(content=prompt),
SystemMessage(content=get_prompt_template("prose/prose_improver")),
HumanMessage(content=f"The existing text is: {state['content']}"),
],
)

View File

@ -7,22 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prompts.template import get_prompt_template
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
prompt = """
You are an AI writing assistant that lengthens existing text.
- Use Markdown formatting when appropriate.
"""
def prose_longer_node(state: ProseState):
logger.info("Generating prose longer content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(content=prompt),
SystemMessage(content=get_prompt_template("prose/prose_longer")),
HumanMessage(content=f"The existing text is: {state['content']}"),
],
)

View File

@ -7,13 +7,10 @@ from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prompts.template import get_prompt_template
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
prompt = """
You are an AI writing assistant that shortens existing text.
- Use Markdown formatting when appropriate.
"""
def prose_shorter_node(state: ProseState):
@ -21,7 +18,7 @@ def prose_shorter_node(state: ProseState):
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(content=prompt),
SystemMessage(content=get_prompt_template("prose/prose_shorter")),
HumanMessage(content=f"The existing text is: {state['content']}"),
],
)

View File

@ -7,14 +7,10 @@ from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prompts.template import get_prompt_template
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
prompt = """
You area an AI writing assistant that generates text based on a prompt.
- You take an input from the user and a command for manipulating the text."
- Use Markdown formatting when appropriate.
"""
def prose_zap_node(state: ProseState):
@ -22,7 +18,7 @@ def prose_zap_node(state: ProseState):
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(content=prompt),
SystemMessage(content=get_prompt_template("prose/prose_zap")),
HumanMessage(
content=f"For this text: {state['content']}.\nYou have to respect the command: {state['command']}"
),