109 lines
4.0 KiB
Python
109 lines
4.0 KiB
Python
|
|
"""
|
||
|
|
Auto Agent Generator - 根据主题自动生成专家配置
|
||
|
|
Uses LLM to analyze the topic and suggest appropriate expert agents.
|
||
|
|
"""
|
||
|
|
import json
|
||
|
|
import re
|
||
|
|
from typing import List, Dict
|
||
|
|
from utils.llm_client import LLMClient
|
||
|
|
|
||
|
|
|
||
|
|
EXPERT_GENERATION_PROMPT = """You are an expert team composition advisor. Given a research/decision topic, you need to suggest the most appropriate team of experts to analyze it.
|
||
|
|
|
||
|
|
Instructions:
|
||
|
|
1. Analyze the topic carefully to understand its domain and key aspects
|
||
|
|
2. Generate {num_experts} distinct expert roles that would provide the most valuable perspectives
|
||
|
|
3. Each expert should have a unique focus area relevant to the topic
|
||
|
|
4. The LAST expert should always be a "Synthesizer" role who can integrate all perspectives
|
||
|
|
|
||
|
|
Output Format (MUST be valid JSON array):
|
||
|
|
[
|
||
|
|
{{"name": "Expert Name", "perspective": "Brief description of their viewpoint", "focus": "Key areas they analyze"}},
|
||
|
|
...
|
||
|
|
]
|
||
|
|
|
||
|
|
Examples of good expert names based on topic:
|
||
|
|
- For "Should we launch an e-commerce platform?": "市场渠道分析师", "电商运营专家", "供应链顾问", "数字化转型综合师"
|
||
|
|
- For "Career transition to AI field": "职业发展顾问", "AI行业专家", "技能评估分析师", "综合规划师"
|
||
|
|
|
||
|
|
IMPORTANT:
|
||
|
|
- Use {language} for all names and descriptions
|
||
|
|
- Make names specific to the topic, not generic like "Expert 1"
|
||
|
|
- The last expert MUST be a synthesizer/integrator type
|
||
|
|
|
||
|
|
Topic: {topic}
|
||
|
|
|
||
|
|
Generate exactly {num_experts} experts as a JSON array:"""
|
||
|
|
|
||
|
|
|
||
|
|
def generate_experts_for_topic(
|
||
|
|
topic: str,
|
||
|
|
num_experts: int,
|
||
|
|
llm_client: LLMClient,
|
||
|
|
language: str = "Chinese"
|
||
|
|
) -> List[Dict[str, str]]:
|
||
|
|
"""
|
||
|
|
Use LLM to generate appropriate expert configurations based on the topic.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
topic: The research/decision topic
|
||
|
|
num_experts: Number of experts to generate (2-5)
|
||
|
|
llm_client: LLM client instance for API calls
|
||
|
|
language: Output language (Chinese/English)
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
List of expert dicts: [{"name": "...", "perspective": "...", "focus": "..."}, ...]
|
||
|
|
"""
|
||
|
|
if not topic.strip():
|
||
|
|
return []
|
||
|
|
|
||
|
|
prompt = EXPERT_GENERATION_PROMPT.format(
|
||
|
|
topic=topic,
|
||
|
|
num_experts=num_experts,
|
||
|
|
language=language
|
||
|
|
)
|
||
|
|
|
||
|
|
try:
|
||
|
|
response = llm_client.chat(
|
||
|
|
system_prompt="You are a helpful assistant that generates JSON output only. No markdown, no explanation.",
|
||
|
|
user_prompt=prompt,
|
||
|
|
max_tokens=800
|
||
|
|
)
|
||
|
|
|
||
|
|
# Extract JSON from response (handle potential markdown wrapping)
|
||
|
|
json_match = re.search(r'\[[\s\S]*\]', response)
|
||
|
|
if json_match:
|
||
|
|
experts = json.loads(json_match.group())
|
||
|
|
# Validate structure
|
||
|
|
if isinstance(experts, list) and len(experts) >= 1:
|
||
|
|
validated = []
|
||
|
|
for exp in experts[:num_experts]:
|
||
|
|
if isinstance(exp, dict) and "name" in exp:
|
||
|
|
validated.append({
|
||
|
|
"name": exp.get("name", "Expert"),
|
||
|
|
"perspective": exp.get("perspective", ""),
|
||
|
|
"focus": exp.get("focus", "")
|
||
|
|
})
|
||
|
|
return validated
|
||
|
|
except (json.JSONDecodeError, Exception) as e:
|
||
|
|
print(f"[AutoAgentGenerator] Error parsing LLM response: {e}")
|
||
|
|
|
||
|
|
# Fallback: return generic experts
|
||
|
|
fallback = []
|
||
|
|
for i in range(num_experts):
|
||
|
|
if i == num_experts - 1:
|
||
|
|
fallback.append({"name": f"综合分析师", "perspective": "整合视角", "focus": "综合决策"})
|
||
|
|
else:
|
||
|
|
fallback.append({"name": f"专家 {i+1}", "perspective": "分析视角", "focus": "专业分析"})
|
||
|
|
return fallback
|
||
|
|
|
||
|
|
|
||
|
|
def get_default_model_for_expert(expert_index: int, total_experts: int, available_models: list) -> str:
|
||
|
|
"""
|
||
|
|
Assign a default model to an expert based on their position.
|
||
|
|
Spreads experts across available models for diversity.
|
||
|
|
"""
|
||
|
|
if not available_models:
|
||
|
|
return "gpt-4o"
|
||
|
|
return available_models[expert_index % len(available_models)]
|