wd666/agents/research_agent.py

62 lines
2.6 KiB
Python
Raw Normal View History

from typing import Generator
2026-01-07 11:02:05 +08:00
from utils.llm_client import LLMClient
import config
class ResearchAgent:
"""研究模式专用 Agent"""
2026-01-07 14:04:52 +08:00
def __init__(self, role: str, llm_client: LLMClient, name: str = None, language: str = "Chinese"):
2026-01-07 11:02:05 +08:00
self.role = role
self.llm_client = llm_client
self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {})
self.name = name if name else self.role_config.get("name", role.capitalize())
2026-01-07 14:04:52 +08:00
self.language = language
2026-01-07 11:02:05 +08:00
@property
def model_name(self) -> str:
return self.llm_client.model
2026-01-07 11:02:05 +08:00
def _get_system_prompt(self, context: str = "") -> str:
2026-01-07 14:04:52 +08:00
base_prompt = ""
if self.role == "council_member":
2026-01-07 14:04:52 +08:00
base_prompt = f"""You are {self.name}, a member of the Multi-Model Decision Council.
Your goal is to participate in a round-table discussion to solve the user's problem.
Be conversational, insightful, and constructive.
Build upon others' ideas or respectfully disagree with valid reasoning.
Context: {context}"""
elif self.role == "expert_a":
2026-01-07 14:04:52 +08:00
base_prompt = f"""You are Expert A, a Senior Analyst.
You are participating in a round-table discussion.
Your goal is to analyze the topic and propose solutions.
Be conversational, direct, and responsive to other experts.
Do not write a full final report; focus on the current discussion turn.
2026-01-07 11:02:05 +08:00
Context: {context}"""
elif self.role == "expert_b":
2026-01-07 14:04:52 +08:00
base_prompt = f"""You are Expert B, a Critical Reviewer.
You are participating in a round-table discussion.
Your goal is to critique Expert A's points and offer alternative perspectives.
Be conversational and constructive. Challenge assumptions directly.
2026-01-07 11:02:05 +08:00
Context: {context}"""
elif self.role == "expert_c":
2026-01-07 14:04:52 +08:00
base_prompt = f"""You are Expert C, a Senior Strategist and Visual Thinker.
Your goal is to synthesize the final output.
Combine the structural strength of Expert A with the critical insights of Expert B.
Produce a final, polished, comprehensive plan or report.
CRITICAL: You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the timeline, process, or architecture."""
2026-01-07 11:02:05 +08:00
else:
2026-01-07 14:04:52 +08:00
base_prompt = "You are a helpful assistant."
return f"{base_prompt}\n\nIMPORTANT: You MUST output your response in {self.language}."
2026-01-07 11:02:05 +08:00
def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]:
"""Generate response stream"""
system_prompt = self._get_system_prompt(context)
yield from self.llm_client.chat_stream(
system_prompt=system_prompt,
user_prompt=prompt
)