81 lines
4.1 KiB
Python
81 lines
4.1 KiB
Python
from typing import List, Dict, Generator
|
|
from dataclasses import dataclass
|
|
from agents.research_agent import ResearchAgent
|
|
from utils.llm_client import LLMClient
|
|
import config
|
|
|
|
@dataclass
|
|
class ResearchConfig:
|
|
topic: str
|
|
context: str = ""
|
|
expert_a_model: str = "gpt-4o"
|
|
expert_b_model: str = "gemini-1.5-pro"
|
|
expert_c_model: str = "claude-3-5-sonnet-20241022"
|
|
|
|
class ResearchManager:
|
|
"""Manages the Multi-Model Council workflow"""
|
|
|
|
def __init__(self, api_key: str, base_url: str = None, provider: str = "aihubmix"):
|
|
self.api_key = api_key
|
|
self.base_url = base_url
|
|
self.provider = provider
|
|
self.agents = {}
|
|
|
|
def _get_client(self, model: str) -> LLMClient:
|
|
return LLMClient(
|
|
provider=self.provider,
|
|
api_key=self.api_key,
|
|
base_url=self.base_url,
|
|
model=model
|
|
)
|
|
|
|
def create_agents(self, config: ResearchConfig):
|
|
"""Initialize agents with specific models"""
|
|
self.agents["expert_a"] = ResearchAgent("expert_a", self._get_client(config.expert_a_model))
|
|
self.agents["expert_b"] = ResearchAgent("expert_b", self._get_client(config.expert_b_model))
|
|
self.agents["expert_c"] = ResearchAgent("expert_c", self._get_client(config.expert_c_model))
|
|
|
|
def collaborate(self, topic: str, context: str) -> Generator[Dict[str, str], None, None]:
|
|
"""
|
|
Execute the collaborative research process:
|
|
1. Expert A: Propose Analysis
|
|
2. Expert B: Critique
|
|
3. Expert C: Synthesis & Final Plan
|
|
"""
|
|
|
|
# Step 1: Expert A Analysis
|
|
findings_a = ""
|
|
yield {"type": "step_start", "step": "Expert A Analysis", "agent": self.agents["expert_a"].name, "model": self.agents["expert_a"].model_name}
|
|
prompt_a = f"Please provide a comprehensive analysis and initial proposal for the topic: '{topic}'.\nContext: {context}"
|
|
for chunk in self.agents["expert_a"].generate(prompt_a, context):
|
|
findings_a += chunk
|
|
yield {"type": "content", "content": chunk}
|
|
yield {"type": "step_end", "output": findings_a}
|
|
|
|
# Step 2: Expert B Critique
|
|
findings_b = ""
|
|
yield {"type": "step_start", "step": "Expert B Critique", "agent": self.agents["expert_b"].name, "model": self.agents["expert_b"].model_name}
|
|
prompt_b = f"Review Expert A's proposal on '{topic}'. Critique it, find gaps, and suggest improvements.\nExpert A's Proposal:\n{findings_a}"
|
|
for chunk in self.agents["expert_b"].generate(prompt_b, context):
|
|
findings_b += chunk
|
|
yield {"type": "content", "content": chunk}
|
|
yield {"type": "step_end", "output": findings_b}
|
|
|
|
# Step 3: Expert A Refinement (Innovative Optimization)
|
|
findings_a_refined = ""
|
|
yield {"type": "step_start", "step": "Expert A Refinement", "agent": self.agents["expert_a"].name, "model": self.agents["expert_a"].model_name}
|
|
prompt_a_refine = f"Expert B has critiqued your initial proposal.\nCritique: {findings_b}\n\nPlease refine your proposal to address these points. Strengthen your arguments and fix the gaps."
|
|
for chunk in self.agents["expert_a"].generate(prompt_a_refine, context):
|
|
findings_a_refined += chunk
|
|
yield {"type": "content", "content": chunk}
|
|
yield {"type": "step_end", "output": findings_a_refined}
|
|
|
|
# Step 4: Expert C Synthesis & Visualization
|
|
findings_c = ""
|
|
yield {"type": "step_start", "step": "Expert C Synthesis & Visualization", "agent": self.agents["expert_c"].name, "model": self.agents["expert_c"].model_name}
|
|
prompt_c = f"Synthesize a final comprehensive plan for '{topic}' based on the refined proposal.\nRefined Proposal:\n{findings_a_refined}\nCritique Reference:\n{findings_b}\n\nIMPORTANT: Include a Mermaid.js diagram (sequenceDiagram, gantt, or flowchart) to visualize the roadmap or process at the end of your report."
|
|
for chunk in self.agents["expert_c"].generate(prompt_c, context):
|
|
findings_c += chunk
|
|
yield {"type": "content", "content": chunk}
|
|
yield {"type": "step_end", "output": findings_c}
|