feat: implement council v3 round-table mode

This commit is contained in:
xyz 2026-01-07 13:44:46 +08:00
parent d26a7a36be
commit da7ccd2d26
10 changed files with 215 additions and 108 deletions

Binary file not shown.

Binary file not shown.

View File

@ -5,27 +5,37 @@ import config
class ResearchAgent:
"""研究模式专用 Agent"""
def __init__(self, role: str, llm_client: LLMClient):
def __init__(self, role: str, llm_client: LLMClient, name: str = None):
self.role = role
self.llm_client = llm_client
self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {})
self.name = self.role_config.get("name", role.capitalize())
self.name = name if name else self.role_config.get("name", role.capitalize())
@property
def model_name(self) -> str:
return self.llm_client.model
def _get_system_prompt(self, context: str = "") -> str:
if self.role == "expert_a":
if self.role == "council_member":
return f"""You are {self.name}, a member of the Multi-Model Decision Council.
Your goal is to participate in a round-table discussion to solve the user's problem.
Be conversational, insightful, and constructive.
Build upon others' ideas or respectfully disagree with valid reasoning.
Context: {context}"""
elif self.role == "expert_a":
return f"""You are Expert A, a Senior Analyst.
Your goal is to provide a deep, foundational analysis of the user's topic.
Structure your thinking clearly. Propose a solid initial framework or solution.
You are participating in a round-table discussion.
Your goal is to analyze the topic and propose solutions.
Be conversational, direct, and responsive to other experts.
Do not write a full final report; focus on the current discussion turn.
Context: {context}"""
elif self.role == "expert_b":
return f"""You are Expert B, a Critical Reviewer.
Your goal is to find flaws, risks, and missed opportunities in Expert A's analysis.
Be constructive but rigorous. Don't just agree; add value by challenging assumptions.
You are participating in a round-table discussion.
Your goal is to critique Expert A's points and offer alternative perspectives.
Be conversational and constructive. Challenge assumptions directly.
Context: {context}"""
elif self.role == "expert_c":

99
app.py
View File

@ -131,20 +131,8 @@ with st.sidebar:
st.divider()
if mode == "Deep Research":
st.subheader("🧪 研究模型配置")
# 3 个角色的模型配置
roles_config = {}
for role_key, role_info in config.RESEARCH_MODEL_ROLES.items():
roles_config[role_key] = st.selectbox(
f"{role_info['name']} ({role_info['description']})",
options=list(AVAILABLE_MODELS.keys()),
index=list(AVAILABLE_MODELS.keys()).index(role_info['default_model']) if role_info['default_model'] in AVAILABLE_MODELS else 0,
key=f"model_{role_key}"
)
else: # Debate Workshop
if mode == "Debate Workshop": # Debate Workshop Settings
# 模型选择
model = st.selectbox(
"🤖 选择通用模型",
@ -205,11 +193,39 @@ with st.sidebar:
# ==================== 主界面逻辑 ====================
if mode == "Deep Research":
st.title("🧪 Multi-Model Council V2")
st.markdown("*多模型智囊团:分析 (Expert A) -> 批判 (Expert B) -> 改进 (Expert A) -> 决策 (Expert C)*")
st.title("🧪 Multi-Model Council V4")
st.markdown("*多模型智囊团:自定义 N 个专家进行多轮对话讨论,最后由最后一位专家决策*")
col1, col2 = st.columns([3, 1])
with col1:
research_topic = st.text_area("研究/决策主题", placeholder="请输入你想深入研究或决策的主题...", height=100)
with col2:
max_rounds = st.number_input("讨论轮数", min_value=1, max_value=5, value=2, help="专家们进行对话的轮数")
# Expert Configuration
st.subheader("👥 专家配置")
num_experts = st.number_input("专家数量", min_value=2, max_value=5, value=3)
experts_config = []
cols = st.columns(num_experts)
for i in range(num_experts):
with cols[i]:
default_model_key = list(AVAILABLE_MODELS.keys())[i % len(AVAILABLE_MODELS)]
st.markdown(f"**Expert {i+1}**")
# Default names
default_name = f"Expert {i+1}"
if i == num_experts - 1:
default_name = f"Expert {i+1} (Synthesizer)"
expert_name = st.text_input(f"名称 #{i+1}", value=default_name, key=f"expert_name_{i}")
expert_model = st.selectbox(f"模型 #{i+1}", options=list(AVAILABLE_MODELS.keys()), index=list(AVAILABLE_MODELS.keys()).index(default_model_key), key=f"expert_model_{i}")
experts_config.append({
"name": expert_name,
"model": expert_model
})
# Input
research_topic = st.text_area("研究/决策主题", placeholder="请输入你想深入研究或决策的主题...", height=100)
research_context = st.text_area("补充背景 (可选)", placeholder="任何额外的背景信息...", height=80)
start_research_btn = st.button("🚀 开始多模型协作", type="primary", disabled=not research_topic)
@ -219,49 +235,48 @@ if mode == "Deep Research":
st.session_state.research_output = ""
st.session_state.research_steps_output = []
manager = ResearchManager(api_key=api_key)
manager = ResearchManager(
api_key=api_key,
base_url=base_url,
provider=provider_id
)
config_obj = ResearchConfig(
topic=research_topic,
context=research_context,
expert_a_model=roles_config['expert_a'],
expert_b_model=roles_config['expert_b'],
expert_c_model=roles_config['expert_c']
experts=experts_config
)
manager.create_agents(config_obj)
st.divider()
st.subheader("🧠 智囊团思考中...")
st.subheader("🗣️ 智囊团讨论中...")
# Collaborative Execution
current_step_name = ""
current_step_content = ""
step_placeholder = st.empty()
status_container = st.status("正在初始化...", expanded=True)
chat_container = st.container()
try:
for event in manager.collaborate(research_topic, research_context):
for event in manager.collaborate(research_topic, research_context, max_rounds=max_rounds):
if event["type"] == "step_start":
current_step_name = event["step"]
current_agent = event["agent"]
current_model = event["model"]
status_container.update(label=f"🔄 {current_step_name} [{current_agent}] ({current_model})", state="running")
step_placeholder = st.empty()
current_step_content = ""
# Create a chat message block
with chat_container:
st.markdown(f"#### {current_step_name}")
st.caption(f"🤖 {current_agent} ({current_model})")
message_placeholder = st.empty()
current_content = ""
elif event["type"] == "content":
current_step_content += event["content"]
step_placeholder.markdown(f"**Thinking...**\n\n{current_step_content}")
current_content += event["content"]
message_placeholder.markdown(current_content)
elif event["type"] == "step_end":
# Save step result
# Save step result for history
st.session_state.research_steps_output.append({
"step": current_step_name,
"output": event["output"]
})
status_container.write(f"### {current_step_name}\n{event['output']}")
status_container.update(label=f"{current_step_name} 完成", state="running")
status_container.update(label="✅ 所有步骤完成", state="complete", expanded=False)
st.divider() # Separator between turns
# The last step output is the final plan
if st.session_state.research_steps_output:
@ -423,9 +438,9 @@ elif mode == "Debate Workshop":
try:
# 初始化默认客户端
llm_client = LLMClient(
provider="aihubmix",
provider=provider_id,
api_key=api_key,
base_url="https://aihubmix.com/v1",
base_url=base_url,
model=model
)
@ -434,9 +449,9 @@ elif mode == "Debate Workshop":
for ag_id, ag_model in agent_model_map.items():
if ag_model != model: # Only create new client if different from default
agent_clients[ag_id] = LLMClient(
provider="aihubmix",
provider=provider_id,
api_key=api_key,
base_url="https://aihubmix.com/v1",
base_url=base_url,
model=ag_model
)

View File

@ -10,8 +10,39 @@ load_dotenv()
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "sk-yd8Tik0nFW5emKYcBdFc433b7c8b4dC182848f76819bBe73")
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY", "")
SILICONFLOW_API_KEY = os.getenv("SILICONFLOW_API_KEY", "")
# AIHubMix 配置
# LLM Providers Configuration
LLM_PROVIDERS = {
"AIHubMix": {
"base_url": "https://aihubmix.com/v1",
"api_key_var": "AIHUBMIX_API_KEY",
"default_model": "gpt-4o"
},
"DeepSeek": {
"base_url": "https://api.deepseek.com",
"api_key_var": "DEEPSEEK_API_KEY",
"default_model": "deepseek-chat"
},
"SiliconFlow": {
"base_url": "https://api.siliconflow.cn/v1",
"api_key_var": "SILICONFLOW_API_KEY",
"default_model": "deepseek-ai/DeepSeek-V3" # SiliconFlow often uses full path
},
"OpenAI": {
"base_url": "https://api.openai.com/v1",
"api_key_var": "OPENAI_API_KEY",
"default_model": "gpt-4o"
},
"Custom": {
"base_url": "http://localhost:8000/v1",
"api_key_var": "CUSTOM_API_KEY",
"default_model": "local-model"
}
}
# AIHubMix 配置 (Legacy, keeping for backward compatibility if needed, but main logic will use LLM_PROVIDERS)
AIHUBMIX_BASE_URL = "https://aihubmix.com/v1"
# 模型配置
@ -20,17 +51,42 @@ LLM_PROVIDER = "aihubmix" # 默认使用 AIHubMix
# 支持的模型列表
AVAILABLE_MODELS = {
# OpenAI
"gpt-4o": "GPT-4o (OpenAI)",
"gpt-4o-mini": "GPT-4o Mini (OpenAI)",
"gpt-4-turbo": "GPT-4 Turbo (OpenAI)",
# Anthropic
"claude-3-5-sonnet-20241022": "Claude 3.5 Sonnet (Anthropic)",
"claude-3-opus-20240229": "Claude 3 Opus (Anthropic)",
"claude-3-haiku-20240307": "Claude 3 Haiku (Anthropic)",
# Google
"gemini-1.5-pro": "Gemini 1.5 Pro (Google)",
"gemini-1.5-flash": "Gemini 1.5 Flash (Google)",
"gemini-2.0-flash-exp": "Gemini 2.0 Flash Exp (Google)",
# DeepSeek
"deepseek-chat": "DeepSeek V3 (DeepSeek)",
"deepseek-reasoner": "DeepSeek R1 (DeepSeek)",
"deepseek-coder": "DeepSeek Coder (DeepSeek)",
"deepseek-v2.5": "DeepSeek V2.5 (DeepSeek)",
"deepseek-coder-v2": "DeepSeek Coder V2 (DeepSeek)",
# Meta
"llama-3.3-70b-instruct": "Llama 3.3 70B (Meta)",
"llama-3.1-405b-instruct": "Llama 3.1 405B (Meta)",
# Alibaba
"qwen-2.5-72b-instruct": "Qwen 2.5 72B (Alibaba)",
"qwen-plus": "Qwen Plus (Alibaba)",
"qwen-turbo": "Qwen Turbo (Alibaba)",
# Mistral
"mistral-large-latest": "Mistral Large (Mistral)",
# Perplexity
"llama-3.1-sonar-huge-128k-online": "Sonar Huge Online (Perplexity)",
}
# 辩论配置

View File

@ -8,9 +8,8 @@ import config
class ResearchConfig:
topic: str
context: str = ""
expert_a_model: str = "gpt-4o"
expert_b_model: str = "gemini-1.5-pro"
expert_c_model: str = "claude-3-5-sonnet-20241022"
# Dynamic list of experts: [{"name": "Expert 1", "model": "gpt-4o", "role": "analyst"}, ...]
experts: List[Dict[str, str]] = None
class ResearchManager:
"""Manages the Multi-Model Council workflow"""
@ -19,62 +18,88 @@ class ResearchManager:
self.api_key = api_key
self.base_url = base_url
self.provider = provider
self.agents = {}
self.agents = []
def _get_client(self, model: str) -> LLMClient:
return LLMClient(
provider=self.provider,
provider=self.provider, # Configured to respect provider or default to aihubmix logic inside client
api_key=self.api_key,
base_url=self.base_url,
model=model
)
def create_agents(self, config: ResearchConfig):
"""Initialize agents with specific models"""
self.agents["expert_a"] = ResearchAgent("expert_a", self._get_client(config.expert_a_model))
self.agents["expert_b"] = ResearchAgent("expert_b", self._get_client(config.expert_b_model))
self.agents["expert_c"] = ResearchAgent("expert_c", self._get_client(config.expert_c_model))
"""Initialize agents with specific models from config"""
self.agents = []
if config.experts:
for idx, expert_conf in enumerate(config.experts):
# Assign role based on position or config
# First agents are discussion members, last one is Synthesizer usually,
# but for equality we treat them all as members until the end.
# We'll assign a generic "member" role or specific if provided.
def collaborate(self, topic: str, context: str) -> Generator[Dict[str, str], None, None]:
role_type = "council_member"
# If it's the last one, maybe give them synthesizer duty?
# For now, all are members, and we explicitly pick one for synthesis.
agent = ResearchAgent(
role=role_type,
llm_client=self._get_client(expert_conf["model"]),
name=expert_conf.get("name", f"Expert {idx+1}")
)
self.agents.append(agent)
def collaborate(self, topic: str, context: str, max_rounds: int = 3) -> Generator[Dict[str, str], None, None]:
"""
Execute the collaborative research process:
1. Expert A: Propose Analysis
2. Expert B: Critique
3. Expert C: Synthesis & Final Plan
Execute the collaborative research process with multi-round discussion:
1. Conversation Loop (All Experts Round Robin)
2. Final Synthesis (Last Expert)
"""
# Step 1: Expert A Analysis
findings_a = ""
yield {"type": "step_start", "step": "Expert A Analysis", "agent": self.agents["expert_a"].name, "model": self.agents["expert_a"].model_name}
prompt_a = f"Please provide a comprehensive analysis and initial proposal for the topic: '{topic}'.\nContext: {context}"
for chunk in self.agents["expert_a"].generate(prompt_a, context):
findings_a += chunk
yield {"type": "content", "content": chunk}
yield {"type": "step_end", "output": findings_a}
conversation_history = []
discussion_context = f"Topic: '{topic}'\nBackground Context: {context}\n\n"
# Step 2: Expert B Critique
findings_b = ""
yield {"type": "step_start", "step": "Expert B Critique", "agent": self.agents["expert_b"].name, "model": self.agents["expert_b"].model_name}
prompt_b = f"Review Expert A's proposal on '{topic}'. Critique it, find gaps, and suggest improvements.\nExpert A's Proposal:\n{findings_a}"
for chunk in self.agents["expert_b"].generate(prompt_b, context):
findings_b += chunk
yield {"type": "content", "content": chunk}
yield {"type": "step_end", "output": findings_b}
# Round-Robin Discussion
for round_num in range(1, max_rounds + 1):
for agent in self.agents:
yield {"type": "step_start", "step": f"Round {round_num}: {agent.name}", "agent": agent.name, "model": agent.model_name}
# Step 3: Expert A Refinement (Innovative Optimization)
findings_a_refined = ""
yield {"type": "step_start", "step": "Expert A Refinement", "agent": self.agents["expert_a"].name, "model": self.agents["expert_a"].model_name}
prompt_a_refine = f"Expert B has critiqued your initial proposal.\nCritique: {findings_b}\n\nPlease refine your proposal to address these points. Strengthen your arguments and fix the gaps."
for chunk in self.agents["expert_a"].generate(prompt_a_refine, context):
findings_a_refined += chunk
yield {"type": "content", "content": chunk}
yield {"type": "step_end", "output": findings_a_refined}
# Construct prompt
if round_num == 1 and not conversation_history:
prompt = f"You are {agent.name}. You are starting the discussion on '{topic}'. Provide your initial analysis and key points. Be conversational but substantive."
else:
prompt = f"You are {agent.name}. Review the discussion so far. Respond to previous points. Defend your views or refine them. Keep the discussion moving towards a solution.\n\nDiscussion History:\n{_format_history(conversation_history)}"
# Step 4: Expert C Synthesis & Visualization
findings_c = ""
yield {"type": "step_start", "step": "Expert C Synthesis & Visualization", "agent": self.agents["expert_c"].name, "model": self.agents["expert_c"].model_name}
prompt_c = f"Synthesize a final comprehensive plan for '{topic}' based on the refined proposal.\nRefined Proposal:\n{findings_a_refined}\nCritique Reference:\n{findings_b}\n\nIMPORTANT: Include a Mermaid.js diagram (sequenceDiagram, gantt, or flowchart) to visualize the roadmap or process at the end of your report."
for chunk in self.agents["expert_c"].generate(prompt_c, context):
findings_c += chunk
response = ""
for chunk in agent.generate(prompt, context=discussion_context):
response += chunk
yield {"type": "content", "content": chunk}
conversation_history.append({"agent": agent.name, "content": response})
yield {"type": "step_end", "output": response}
# Final Synthesis by the LAST agent (or a specific designated one)
synthesizer = self.agents[-1]
yield {"type": "step_start", "step": f"Final Synthesis ({synthesizer.name})", "agent": synthesizer.name, "model": synthesizer.model_name}
prompt_syn = f"""Synthesize the entire discussion into a final comprehensive plan for '{topic}'.
Discussion History:
{_format_history(conversation_history)}
IMPORTANT:
1. Reconcile the different viewpoints from all experts.
2. Provide a concrete action plan.
3. You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the roadmap or process."""
findings_syn = ""
for chunk in synthesizer.generate(prompt_syn, context=discussion_context):
findings_syn += chunk
yield {"type": "content", "content": chunk}
yield {"type": "step_end", "output": findings_c}
yield {"type": "step_end", "output": findings_syn}
def _format_history(history: List[Dict[str, str]]) -> str:
formatted = ""
for turn in history:
formatted += f"[{turn['agent']}]: {turn['content']}\n\n"
return formatted

View File

@ -36,24 +36,25 @@ class LLMClient:
self.client = OpenAI(api_key=api_key)
self.model = model or "gpt-4o"
elif self.provider == "aihubmix":
# AIHubMix 兼容 OpenAI API 格式
elif self.provider in ["aihubmix", "deepseek", "siliconflow", "custom"]:
# OpenAI 兼容接口 Providers
from openai import OpenAI
default_urls = {
"aihubmix": "https://aihubmix.com/v1",
"deepseek": "https://api.deepseek.com",
"siliconflow": "https://api.siliconflow.cn/v1",
"custom": "http://localhost:8000/v1"
}
final_base_url = base_url or default_urls.get(self.provider)
self.client = OpenAI(
api_key=api_key,
base_url=base_url or "https://aihubmix.com/v1"
base_url=final_base_url
)
self.model = model or "gpt-4o"
elif self.provider == "custom":
# 自定义 OpenAI 兼容接口vLLM、Ollama、TGI 等)
from openai import OpenAI
self.client = OpenAI(
api_key=api_key or "not-needed",
base_url=base_url or "http://localhost:8000/v1"
)
self.model = model or "local-model"
else:
raise ValueError(f"不支持的 provider: {self.provider}")