wd666/agents/research_agent.py

48 lines
1.9 KiB
Python

from typing import Generator
from utils.llm_client import LLMClient
import config
class ResearchAgent:
"""研究模式专用 Agent"""
def __init__(self, role: str, llm_client: LLMClient):
self.role = role
self.llm_client = llm_client
self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {})
self.name = self.role_config.get("name", role.capitalize())
@property
def model_name(self) -> str:
return self.llm_client.model
def _get_system_prompt(self, context: str = "") -> str:
if self.role == "expert_a":
return f"""You are Expert A, a Senior Analyst.
Your goal is to provide a deep, foundational analysis of the user's topic.
Structure your thinking clearly. Propose a solid initial framework or solution.
Context: {context}"""
elif self.role == "expert_b":
return f"""You are Expert B, a Critical Reviewer.
Your goal is to find flaws, risks, and missed opportunities in Expert A's analysis.
Be constructive but rigorous. Don't just agree; add value by challenging assumptions.
Context: {context}"""
elif self.role == "expert_c":
return f"""You are Expert C, a Senior Strategist and Visual Thinker.
Your goal is to synthesize the final output.
Combine the structural strength of Expert A with the critical insights of Expert B.
Produce a final, polished, comprehensive plan or report.
CRITICAL: You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the timeline, process, or architecture."""
else:
return "You are a helpful assistant."
def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]:
"""Generate response stream"""
system_prompt = self._get_system_prompt(context)
yield from self.llm_client.chat_stream(
system_prompt=system_prompt,
user_prompt=prompt
)