This commit is contained in:
xyz 2026-01-07 14:04:52 +08:00
parent 8734b15be6
commit 1b7d45fc34
6 changed files with 34 additions and 20 deletions

View File

@ -19,16 +19,18 @@ class AgentMessage:
class BaseAgent:
"""Agent 基类"""
def __init__(self, agent_id: str, llm_client):
def __init__(self, agent_id: str, llm_client, language: str = "Chinese"):
"""
初始化 Agent
Args:
agent_id: Agent 标识符 ( 'ceo', 'cto')
llm_client: LLM 客户端实例
language: 输出语言
"""
self.agent_id = agent_id
self.llm_client = llm_client
self.language = language
profile = get_agent_profile(agent_id)
if not profile:
@ -38,7 +40,7 @@ class BaseAgent:
self.emoji = profile["emoji"]
self.perspective = profile["perspective"]
self.focus_areas = profile["focus_areas"]
self.system_prompt = profile["system_prompt"]
self.system_prompt = f"{profile['system_prompt']}\n\nIMPORTANT: You MUST output your response in {self.language}."
# 存储对话历史
self.conversation_history = []

View File

@ -5,26 +5,28 @@ import config
class ResearchAgent:
"""研究模式专用 Agent"""
def __init__(self, role: str, llm_client: LLMClient, name: str = None):
def __init__(self, role: str, llm_client: LLMClient, name: str = None, language: str = "Chinese"):
self.role = role
self.llm_client = llm_client
self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {})
self.name = name if name else self.role_config.get("name", role.capitalize())
self.language = language
@property
def model_name(self) -> str:
return self.llm_client.model
def _get_system_prompt(self, context: str = "") -> str:
base_prompt = ""
if self.role == "council_member":
return f"""You are {self.name}, a member of the Multi-Model Decision Council.
base_prompt = f"""You are {self.name}, a member of the Multi-Model Decision Council.
Your goal is to participate in a round-table discussion to solve the user's problem.
Be conversational, insightful, and constructive.
Build upon others' ideas or respectfully disagree with valid reasoning.
Context: {context}"""
elif self.role == "expert_a":
return f"""You are Expert A, a Senior Analyst.
base_prompt = f"""You are Expert A, a Senior Analyst.
You are participating in a round-table discussion.
Your goal is to analyze the topic and propose solutions.
Be conversational, direct, and responsive to other experts.
@ -32,21 +34,23 @@ Do not write a full final report; focus on the current discussion turn.
Context: {context}"""
elif self.role == "expert_b":
return f"""You are Expert B, a Critical Reviewer.
base_prompt = f"""You are Expert B, a Critical Reviewer.
You are participating in a round-table discussion.
Your goal is to critique Expert A's points and offer alternative perspectives.
Be conversational and constructive. Challenge assumptions directly.
Context: {context}"""
elif self.role == "expert_c":
return f"""You are Expert C, a Senior Strategist and Visual Thinker.
base_prompt = f"""You are Expert C, a Senior Strategist and Visual Thinker.
Your goal is to synthesize the final output.
Combine the structural strength of Expert A with the critical insights of Expert B.
Produce a final, polished, comprehensive plan or report.
CRITICAL: You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the timeline, process, or architecture."""
else:
return "You are a helpful assistant."
base_prompt = "You are a helpful assistant."
return f"{base_prompt}\n\nIMPORTANT: You MUST output your response in {self.language}."
def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]:
"""Generate response stream"""

14
app.py
View File

@ -137,6 +137,14 @@ with st.sidebar:
if not api_key:
st.warning("请配置 API Key 以继续")
# Output Language Selection
output_language = st.sidebar.selectbox(
"🌐 输出语言",
options=config.SUPPORTED_LANGUAGES,
index=0,
help="所有 AI Agent 将使用此语言进行回复"
)
st.divider()
# 模式选择
@ -261,7 +269,8 @@ if mode == "Deep Research":
config_obj = ResearchConfig(
topic=research_topic,
context=research_context,
experts=experts_config
experts=experts_config,
language=output_language
)
manager.create_agents(config_obj)
@ -481,7 +490,8 @@ elif mode == "Debate Workshop":
context=context,
agent_ids=selected_agents,
max_rounds=max_rounds,
agent_clients=agent_clients
agent_clients=agent_clients,
language=output_language
)
debate_manager.setup_debate(debate_config)

View File

@ -93,6 +93,9 @@ AVAILABLE_MODELS = {
MAX_DEBATE_ROUNDS = 3 # 最大辩论轮数
MAX_AGENTS = 6 # 最大参与 Agent 数量
# 支持的输出语言
SUPPORTED_LANGUAGES = ["Chinese", "English", "Japanese", "Spanish", "French", "German"]
# 研究模式模型角色配置
RESEARCH_MODEL_ROLES = {
"expert_a": {

View File

@ -18,6 +18,7 @@ class DebateConfig:
agent_ids: List[str] = None
max_rounds: int = 2
agent_clients: dict = None # Map[agent_id, LLMClient]
language: str = "Chinese"
@dataclass
@ -64,7 +65,7 @@ class DebateManager:
if hasattr(debate_config, 'agent_clients') and debate_config.agent_clients and agent_id in debate_config.agent_clients:
client = debate_config.agent_clients[agent_id]
agent = BaseAgent(agent_id, client)
agent = BaseAgent(agent_id, client, language=debate_config.language)
self.agents.append(agent)
def run_debate_stream(

View File

@ -10,6 +10,7 @@ class ResearchConfig:
context: str = ""
# Dynamic list of experts: [{"name": "Expert 1", "model": "gpt-4o", "role": "analyst"}, ...]
experts: List[Dict[str, str]] = None
language: str = "Chinese"
class ResearchManager:
"""Manages the Multi-Model Council workflow"""
@ -33,19 +34,12 @@ class ResearchManager:
self.agents = []
if config.experts:
for idx, expert_conf in enumerate(config.experts):
# Assign role based on position or config
# First agents are discussion members, last one is Synthesizer usually,
# but for equality we treat them all as members until the end.
# We'll assign a generic "member" role or specific if provided.
role_type = "council_member"
# If it's the last one, maybe give them synthesizer duty?
# For now, all are members, and we explicitly pick one for synthesis.
agent = ResearchAgent(
role=role_type,
llm_client=self._get_client(expert_conf["model"]),
name=expert_conf.get("name", f"Expert {idx+1}")
name=expert_conf.get("name", f"Expert {idx+1}"),
language=config.language
)
self.agents.append(agent)