62 lines
2.6 KiB
Python
62 lines
2.6 KiB
Python
from typing import Generator
|
|
from utils.llm_client import LLMClient
|
|
import config
|
|
|
|
class ResearchAgent:
|
|
"""研究模式专用 Agent"""
|
|
|
|
def __init__(self, role: str, llm_client: LLMClient, name: str = None, language: str = "Chinese"):
|
|
self.role = role
|
|
self.llm_client = llm_client
|
|
self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {})
|
|
self.name = name if name else self.role_config.get("name", role.capitalize())
|
|
self.language = language
|
|
|
|
@property
|
|
def model_name(self) -> str:
|
|
return self.llm_client.model
|
|
|
|
def _get_system_prompt(self, context: str = "") -> str:
|
|
base_prompt = ""
|
|
if self.role == "council_member":
|
|
base_prompt = f"""You are {self.name}, a member of the Multi-Model Decision Council.
|
|
Your goal is to participate in a round-table discussion to solve the user's problem.
|
|
Be conversational, insightful, and constructive.
|
|
Build upon others' ideas or respectfully disagree with valid reasoning.
|
|
Context: {context}"""
|
|
|
|
elif self.role == "expert_a":
|
|
base_prompt = f"""You are Expert A, a Senior Analyst.
|
|
You are participating in a round-table discussion.
|
|
Your goal is to analyze the topic and propose solutions.
|
|
Be conversational, direct, and responsive to other experts.
|
|
Do not write a full final report; focus on the current discussion turn.
|
|
Context: {context}"""
|
|
|
|
elif self.role == "expert_b":
|
|
base_prompt = f"""You are Expert B, a Critical Reviewer.
|
|
You are participating in a round-table discussion.
|
|
Your goal is to critique Expert A's points and offer alternative perspectives.
|
|
Be conversational and constructive. Challenge assumptions directly.
|
|
Context: {context}"""
|
|
|
|
elif self.role == "expert_c":
|
|
base_prompt = f"""You are Expert C, a Senior Strategist and Visual Thinker.
|
|
Your goal is to synthesize the final output.
|
|
Combine the structural strength of Expert A with the critical insights of Expert B.
|
|
Produce a final, polished, comprehensive plan or report.
|
|
CRITICAL: You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the timeline, process, or architecture."""
|
|
|
|
else:
|
|
base_prompt = "You are a helpful assistant."
|
|
|
|
return f"{base_prompt}\n\nIMPORTANT: You MUST output your response in {self.language}."
|
|
|
|
def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]:
|
|
"""Generate response stream"""
|
|
system_prompt = self._get_system_prompt(context)
|
|
yield from self.llm_client.chat_stream(
|
|
system_prompt=system_prompt,
|
|
user_prompt=prompt
|
|
)
|