Compare commits

..

6 Commits

Author SHA1 Message Date
52f5c91528 更新 README.md 2026-01-09 09:47:00 +08:00
77fc360789 更新 README.md 2026-01-09 09:39:30 +08:00
ea687d57f9 更新 README.md 2026-01-09 09:38:29 +08:00
e2999717d5 更新 README.md 2026-01-09 09:38:14 +08:00
51b33f97bd 更新 README.md 2026-01-09 09:37:40 +08:00
a45c58ad54 更新 README.md 2026-01-09 09:33:16 +08:00
28 changed files with 216 additions and 2589 deletions

10
.gitignore vendored
View File

@ -1,10 +0,0 @@
# Python 字节码缓存
__pycache__/
*.py[cod]
*$py.class
# 项目特定的存储/缓存文件夹
.storage/
# 环境变量文件(通常包含敏感信息)
.env

View File

@ -1 +0,0 @@
3.12

View File

@ -63,7 +63,6 @@ uv sync
## 🚀 快速开始
```bash
cd wd666
uv run streamlit run app.py
```

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -19,18 +19,16 @@ class AgentMessage:
class BaseAgent:
"""Agent 基类"""
def __init__(self, agent_id: str, llm_client, language: str = "Chinese"):
def __init__(self, agent_id: str, llm_client):
"""
初始化 Agent
Args:
agent_id: Agent 标识符 ( 'ceo', 'cto')
llm_client: LLM 客户端实例
language: 输出语言
"""
self.agent_id = agent_id
self.llm_client = llm_client
self.language = language
profile = get_agent_profile(agent_id)
if not profile:
@ -40,18 +38,11 @@ class BaseAgent:
self.emoji = profile["emoji"]
self.perspective = profile["perspective"]
self.focus_areas = profile["focus_areas"]
self.system_prompt = f"{profile['system_prompt']}\n\nIMPORTANT: You MUST output your response in {self.language}."
self.system_prompt = profile["system_prompt"]
# 存储对话历史
self.conversation_history = []
@property
def model_name(self) -> str:
"""获取当前使用的模型名称"""
if hasattr(self.llm_client, "model"):
return self.llm_client.model
return "Unknown Model"
def generate_response(
self,
topic: str,

View File

@ -1,56 +1,39 @@
from typing import Generator
from typing import Generator, List, Dict
from utils.llm_client import LLMClient
import config
class ResearchAgent:
"""研究模式专用 Agent"""
def __init__(self, role: str, llm_client: LLMClient, name: str = None, language: str = "Chinese"):
def __init__(self, role: str, llm_client: LLMClient):
self.role = role
self.llm_client = llm_client
self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {})
self.name = name if name else self.role_config.get("name", role.capitalize())
self.language = language
@property
def model_name(self) -> str:
return self.llm_client.model
self.name = self.role_config.get("name", role.capitalize())
def _get_system_prompt(self, context: str = "") -> str:
base_prompt = ""
if self.role == "council_member":
base_prompt = f"""You are {self.name}, a member of the Multi-Model Decision Council.
Your goal is to participate in a round-table discussion to solve the user's problem.
Be conversational, insightful, and constructive.
Build upon others' ideas or respectfully disagree with valid reasoning.
if self.role == "planner":
return f"""You are a Senior Research Planner.
Your goal is to break down a complex user topic into a structured research plan.
You must create a clear, step-by-step plan that covers different angles of the topic.
Format your output as a Markdown list of steps.
Context: {context}"""
elif self.role == "expert_a":
base_prompt = f"""You are Expert A, a Senior Analyst.
You are participating in a round-table discussion.
Your goal is to analyze the topic and propose solutions.
Be conversational, direct, and responsive to other experts.
Do not write a full final report; focus on the current discussion turn.
elif self.role == "researcher":
return f"""You are a Deep Researcher.
Your goal is to execute a specific research step and provide detailed, in-depth analysis.
Use your vast knowledge to provide specific facts, figures, and logical reasoning.
Do not be superficial. Go deep.
Context: {context}"""
elif self.role == "expert_b":
base_prompt = f"""You are Expert B, a Critical Reviewer.
You are participating in a round-table discussion.
Your goal is to critique Expert A's points and offer alternative perspectives.
Be conversational and constructive. Challenge assumptions directly.
elif self.role == "writer":
return f"""You are a Senior Report Writer.
Your goal is to synthesize multiple research findings into a cohesive, high-quality report.
The report should be well-structured, easy to read, and provide actionable insights.
Context: {context}"""
elif self.role == "expert_c":
base_prompt = f"""You are Expert C, a Senior Strategist and Visual Thinker.
Your goal is to synthesize the final output.
Combine the structural strength of Expert A with the critical insights of Expert B.
Produce a final, polished, comprehensive plan or report.
CRITICAL: You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the timeline, process, or architecture."""
else:
base_prompt = "You are a helpful assistant."
return f"{base_prompt}\n\nIMPORTANT: You MUST output your response in {self.language}."
return "You are a helpful assistant."
def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]:
"""Generate response stream"""

1030
app.py

File diff suppressed because it is too large Load Diff

View File

@ -9,110 +9,35 @@ load_dotenv()
# API 配置
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "")
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY", "")
SILICONFLOW_API_KEY = os.getenv("SILICONFLOW_API_KEY", "")
AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "sk-yd8Tik0nFW5emKYcBdFc433b7c8b4dC182848f76819bBe73")
# LLM Providers Configuration
LLM_PROVIDERS = {
"AIHubMix": {
"base_url": "https://aihubmix.com/v1",
"api_key_var": "AIHUBMIX_API_KEY",
"default_model": "gpt-4o"
},
"DeepSeek": {
"base_url": "https://api.deepseek.com",
"api_key_var": "DEEPSEEK_API_KEY",
"default_model": "deepseek-chat"
},
"SiliconFlow": {
"base_url": "https://api.siliconflow.cn/v1",
"api_key_var": "SILICONFLOW_API_KEY",
"default_model": "deepseek-ai/DeepSeek-V3" # SiliconFlow often uses full path
},
"OpenAI": {
"base_url": "https://api.openai.com/v1",
"api_key_var": "OPENAI_API_KEY",
"default_model": "gpt-4o"
},
"Custom": {
"base_url": "http://localhost:8000/v1",
"api_key_var": "CUSTOM_API_KEY",
"default_model": "local-model"
}
}
# AIHubMix 配置 (Legacy, keeping for backward compatibility if needed, but main logic will use LLM_PROVIDERS)
# AIHubMix 配置
AIHUBMIX_BASE_URL = "https://aihubmix.com/v1"
# 模型配置
DEFAULT_MODEL = "gpt-4o" # AIHubMix 支持的模型
LLM_PROVIDER = "aihubmix" # 默认使用 AIHubMix
# 支持的模型列表
AVAILABLE_MODELS = {
# OpenAI
"gpt-4o": "GPT-4o (OpenAI)",
"gpt-4o-mini": "GPT-4o Mini (OpenAI)",
"gpt-4-turbo": "GPT-4 Turbo (OpenAI)",
# Anthropic
"claude-3-5-sonnet-20241022": "Claude 3.5 Sonnet (Anthropic)",
"claude-3-opus-20240229": "Claude 3 Opus (Anthropic)",
"claude-3-haiku-20240307": "Claude 3 Haiku (Anthropic)",
# Google
"gemini-1.5-pro": "Gemini 1.5 Pro (Google)",
"gemini-1.5-flash": "Gemini 1.5 Flash (Google)",
"gemini-2.0-flash-exp": "Gemini 2.0 Flash Exp (Google)",
# DeepSeek (官方 API: https://api.deepseek.com)
"deepseek-chat": "DeepSeek V3 通用对话 (DeepSeek)",
"deepseek-reasoner": "DeepSeek R1 深度推理 (DeepSeek)",
"deepseek-coder": "DeepSeek Coder 代码模型 (DeepSeek)",
# Meta
"llama-3.3-70b-instruct": "Llama 3.3 70B (Meta)",
"llama-3.1-405b-instruct": "Llama 3.1 405B (Meta)",
# Alibaba
"qwen-2.5-72b-instruct": "Qwen 2.5 72B (Alibaba)",
"qwen-plus": "Qwen Plus (Alibaba)",
"qwen-turbo": "Qwen Turbo (Alibaba)",
# Mistral
"mistral-large-latest": "Mistral Large (Mistral)",
# Perplexity
"llama-3.1-sonar-huge-128k-online": "Sonar Huge Online (Perplexity)",
}
# 辩论配置
MAX_DEBATE_ROUNDS = 3 # 最大辩论轮数
MAX_AGENTS = 6 # 最大参与 Agent 数量
# 支持的输出语言
SUPPORTED_LANGUAGES = ["Chinese", "English", "Japanese", "Spanish", "French", "German"]
# 生成配置
MAX_OUTPUT_TOKENS = 300 # 限制单次回复长度,保持精简
# 研究模式模型角色配置
RESEARCH_MODEL_ROLES = {
"expert_a": {
"name": "Expert A (Analyst)",
"planner": {
"name": "Planner",
"default_model": "gpt-4o",
"description": "负责初步分析,提出核心观点和方案"
"description": "负责拆解问题,制定研究计划"
},
"expert_b": {
"name": "Expert B (Critique)",
"researcher": {
"name": "Researcher",
"default_model": "gemini-1.5-pro",
"description": "负责批判性分析,指出潜在问题和漏洞"
"description": "负责执行具体的研究步骤,深度分析"
},
"expert_c": {
"name": "Expert C (Synthesizer)",
"writer": {
"name": "Writer",
"default_model": "claude-3-5-sonnet-20241022",
"description": "负责综合各方观点,生成最终决策方案"
"description": "负责汇总信息,撰写最终报告"
}
}

View File

@ -1,6 +0,0 @@
def main():
print("Hello from multi-agent!")
if __name__ == "__main__":
main()

Binary file not shown.

View File

@ -17,8 +17,6 @@ class DebateConfig:
context: str = ""
agent_ids: List[str] = None
max_rounds: int = 2
agent_clients: dict = None # Map[agent_id, LLMClient]
language: str = "Chinese"
@dataclass
@ -60,12 +58,7 @@ class DebateManager:
# 创建参与的 Agent
for agent_id in debate_config.agent_ids:
# Check if specific client is provided in config, else use default
client = self.llm_client
if hasattr(debate_config, 'agent_clients') and debate_config.agent_clients and agent_id in debate_config.agent_clients:
client = debate_config.agent_clients[agent_id]
agent = BaseAgent(agent_id, client, language=debate_config.language)
agent = BaseAgent(agent_id, self.llm_client)
self.agents.append(agent)
def run_debate_stream(
@ -113,7 +106,6 @@ class DebateManager:
"agent_id": agent.agent_id,
"agent_name": agent.name,
"emoji": agent.emoji,
"model_name": agent.model_name,
"round": round_num
}

View File

@ -8,92 +8,44 @@ import config
class ResearchConfig:
topic: str
context: str = ""
# Dynamic list of experts: [{"name": "Expert 1", "model": "gpt-4o", "role": "analyst"}, ...]
experts: List[Dict[str, str]] = None
language: str = "Chinese"
planner_model: str = "gpt-4o"
researcher_model: str = "gemini-1.5-pro"
writer_model: str = "claude-3-5-sonnet-20241022"
class ResearchManager:
"""Manages the Multi-Model Council workflow"""
"""Manages the Deep Research workflow"""
def __init__(self, api_key: str, base_url: str = None, provider: str = "aihubmix"):
self.api_key = api_key
self.base_url = base_url
self.provider = provider
self.agents = []
self.agents = {}
def _get_client(self, model: str) -> LLMClient:
return LLMClient(
provider=self.provider, # Configured to respect provider or default to aihubmix logic inside client
provider=self.provider,
api_key=self.api_key,
base_url=self.base_url,
model=model
)
def create_agents(self, config: ResearchConfig):
"""Initialize agents with specific models from config"""
self.agents = []
if config.experts:
for idx, expert_conf in enumerate(config.experts):
role_type = "council_member"
agent = ResearchAgent(
role=role_type,
llm_client=self._get_client(expert_conf["model"]),
name=expert_conf.get("name", f"Expert {idx+1}"),
language=config.language
)
self.agents.append(agent)
"""Initialize agents with specific models"""
self.agents["planner"] = ResearchAgent("planner", self._get_client(config.planner_model))
self.agents["researcher"] = ResearchAgent("researcher", self._get_client(config.researcher_model))
self.agents["writer"] = ResearchAgent("writer", self._get_client(config.writer_model))
def collaborate(self, topic: str, context: str, max_rounds: int = 3) -> Generator[Dict[str, str], None, None]:
"""
Execute the collaborative research process with multi-round discussion:
1. Conversation Loop (All Experts Round Robin)
2. Final Synthesis (Last Expert)
"""
def generate_plan(self, topic: str, context: str) -> Generator[str, None, None]:
"""Step 1: Generate Research Plan"""
prompt = f"Please create a comprehensive research plan for the topic: '{topic}'.\nBreak it down into 3-5 distinct, actionable steps."
yield from self.agents["planner"].generate(prompt, context)
conversation_history = []
discussion_context = f"Topic: '{topic}'\nBackground Context: {context}\n\n"
def execute_step(self, step: str, previous_findings: str) -> Generator[str, None, None]:
"""Step 2: Execute a single research step"""
prompt = f"Execute this research step: '{step}'.\nPrevious findings: {previous_findings}"
yield from self.agents["researcher"].generate(prompt)
# Round-Robin Discussion
for round_num in range(1, max_rounds + 1):
for agent in self.agents:
yield {"type": "step_start", "step": f"Round {round_num}: {agent.name}", "agent": agent.name, "model": agent.model_name}
# Construct prompt
if round_num == 1 and not conversation_history:
prompt = f"You are {agent.name}. You are starting the discussion on '{topic}'. Provide your initial analysis and key points. Be conversational but substantive."
else:
prompt = f"You are {agent.name}. Review the discussion so far. Respond to previous points. Defend your views or refine them. Keep the discussion moving towards a solution.\n\nDiscussion History:\n{_format_history(conversation_history)}"
response = ""
for chunk in agent.generate(prompt, context=discussion_context):
response += chunk
yield {"type": "content", "content": chunk}
conversation_history.append({"agent": agent.name, "content": response})
yield {"type": "step_end", "output": response}
# Final Synthesis by the LAST agent (or a specific designated one)
synthesizer = self.agents[-1]
yield {"type": "step_start", "step": f"Final Synthesis ({synthesizer.name})", "agent": synthesizer.name, "model": synthesizer.model_name}
prompt_syn = f"""Synthesize the entire discussion into a final comprehensive plan for '{topic}'.
Discussion History:
{_format_history(conversation_history)}
IMPORTANT:
1. Reconcile the different viewpoints from all experts.
2. Provide a concrete action plan.
3. You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the roadmap or process."""
findings_syn = ""
for chunk in synthesizer.generate(prompt_syn, context=discussion_context):
findings_syn += chunk
yield {"type": "content", "content": chunk}
yield {"type": "step_end", "output": findings_syn}
def _format_history(history: List[Dict[str, str]]) -> str:
formatted = ""
for turn in history:
formatted += f"[{turn['agent']}]: {turn['content']}\n\n"
return formatted
def generate_report(self, topic: str, all_findings: str) -> Generator[str, None, None]:
"""Step 3: Generate Final Report"""
prompt = f"Write a final comprehensive report on '{topic}' based on these findings:\n{all_findings}"
yield from self.agents["writer"].generate(prompt)

View File

@ -1,13 +0,0 @@
[project]
name = "multi-agent"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.12"
dependencies = [
"anthropic>=0.75.0",
"openai>=2.14.0",
"pydantic>=2.12.5",
"python-dotenv>=1.2.1",
"streamlit>=1.52.2",
]

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,108 +0,0 @@
"""
Auto Agent Generator - 根据主题自动生成专家配置
Uses LLM to analyze the topic and suggest appropriate expert agents.
"""
import json
import re
from typing import List, Dict
from utils.llm_client import LLMClient
EXPERT_GENERATION_PROMPT = """You are an expert team composition advisor. Given a research/decision topic, you need to suggest the most appropriate team of experts to analyze it.
Instructions:
1. Analyze the topic carefully to understand its domain and key aspects
2. Generate {num_experts} distinct expert roles that would provide the most valuable perspectives
3. Each expert should have a unique focus area relevant to the topic
4. The LAST expert should always be a "Synthesizer" role who can integrate all perspectives
Output Format (MUST be valid JSON array):
[
{{"name": "Expert Name", "perspective": "Brief description of their viewpoint", "focus": "Key areas they analyze"}},
...
]
Examples of good expert names based on topic:
- For "Should we launch an e-commerce platform?": "市场渠道分析师", "电商运营专家", "供应链顾问", "数字化转型综合师"
- For "Career transition to AI field": "职业发展顾问", "AI行业专家", "技能评估分析师", "综合规划师"
IMPORTANT:
- Use {language} for all names and descriptions
- Make names specific to the topic, not generic like "Expert 1"
- The last expert MUST be a synthesizer/integrator type
Topic: {topic}
Generate exactly {num_experts} experts as a JSON array:"""
def generate_experts_for_topic(
topic: str,
num_experts: int,
llm_client: LLMClient,
language: str = "Chinese"
) -> List[Dict[str, str]]:
"""
Use LLM to generate appropriate expert configurations based on the topic.
Args:
topic: The research/decision topic
num_experts: Number of experts to generate (2-5)
llm_client: LLM client instance for API calls
language: Output language (Chinese/English)
Returns:
List of expert dicts: [{"name": "...", "perspective": "...", "focus": "..."}, ...]
"""
if not topic.strip():
return []
prompt = EXPERT_GENERATION_PROMPT.format(
topic=topic,
num_experts=num_experts,
language=language
)
try:
response = llm_client.chat(
system_prompt="You are a helpful assistant that generates JSON output only. No markdown, no explanation.",
user_prompt=prompt,
max_tokens=800
)
# Extract JSON from response (handle potential markdown wrapping)
json_match = re.search(r'\[[\s\S]*\]', response)
if json_match:
experts = json.loads(json_match.group())
# Validate structure
if isinstance(experts, list) and len(experts) >= 1:
validated = []
for exp in experts[:num_experts]:
if isinstance(exp, dict) and "name" in exp:
validated.append({
"name": exp.get("name", "Expert"),
"perspective": exp.get("perspective", ""),
"focus": exp.get("focus", "")
})
return validated
except (json.JSONDecodeError, Exception) as e:
print(f"[AutoAgentGenerator] Error parsing LLM response: {e}")
# Fallback: return generic experts
fallback = []
for i in range(num_experts):
if i == num_experts - 1:
fallback.append({"name": f"综合分析师", "perspective": "整合视角", "focus": "综合决策"})
else:
fallback.append({"name": f"专家 {i+1}", "perspective": "分析视角", "focus": "专业分析"})
return fallback
def get_default_model_for_expert(expert_index: int, total_experts: int, available_models: list) -> str:
"""
Assign a default model to an expert based on their position.
Spreads experts across available models for diversity.
"""
if not available_models:
return "gpt-4o"
return available_models[expert_index % len(available_models)]

View File

@ -5,8 +5,6 @@ from typing import Generator
import os
import config
class LLMClient:
"""LLM API 统一客户端"""
@ -38,25 +36,24 @@ class LLMClient:
self.client = OpenAI(api_key=api_key)
self.model = model or "gpt-4o"
elif self.provider in ["aihubmix", "deepseek", "siliconflow", "custom"]:
# OpenAI 兼容接口 Providers
elif self.provider == "aihubmix":
# AIHubMix 兼容 OpenAI API 格式
from openai import OpenAI
default_urls = {
"aihubmix": "https://aihubmix.com/v1",
"deepseek": "https://api.deepseek.com",
"siliconflow": "https://api.siliconflow.cn/v1",
"custom": "http://localhost:8000/v1"
}
final_base_url = base_url or default_urls.get(self.provider)
self.client = OpenAI(
api_key=api_key,
base_url=final_base_url
base_url=base_url or "https://aihubmix.com/v1"
)
self.model = model or "gpt-4o"
elif self.provider == "custom":
# 自定义 OpenAI 兼容接口vLLM、Ollama、TGI 等)
from openai import OpenAI
self.client = OpenAI(
api_key=api_key or "not-needed",
base_url=base_url or "http://localhost:8000/v1"
)
self.model = model or "local-model"
else:
raise ValueError(f"不支持的 provider: {self.provider}")
@ -64,7 +61,7 @@ class LLMClient:
self,
system_prompt: str,
user_prompt: str,
max_tokens: int = config.MAX_OUTPUT_TOKENS
max_tokens: int = 1024
) -> Generator[str, None, None]:
"""
流式对话

View File

@ -1,184 +0,0 @@
"""
Storage Manager - Handle local persistence of configuration, history/reports, and assets.
"""
import os
import json
import time
from typing import List, Dict, Any
from pathlib import Path
# Constants
STORAGE_DIR = ".storage"
CONFIG_FILE = "config.json"
HISTORY_DIR = "history"
ASSETS_DIR = "assets"
class StorageManager:
def __init__(self):
self.root_dir = Path(STORAGE_DIR)
self.config_path = self.root_dir / CONFIG_FILE
self.history_dir = self.root_dir / HISTORY_DIR
self.assets_dir = self.root_dir / ASSETS_DIR
# Ensure directories exist
self.root_dir.mkdir(exist_ok=True)
self.history_dir.mkdir(exist_ok=True)
self.assets_dir.mkdir(exist_ok=True)
def save_config(self, config_data: Dict[str, Any]):
"""Save UI configuration to file"""
try:
with open(self.config_path, 'w', encoding='utf-8') as f:
json.dump(config_data, f, indent=2, ensure_ascii=False)
except Exception as e:
print(f"Error saving config: {e}")
def load_config(self) -> Dict[str, Any]:
"""Load UI configuration from file"""
if not self.config_path.exists():
return {}
try:
with open(self.config_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
print(f"Error loading config: {e}")
return {}
def save_asset(self, uploaded_file) -> str:
"""Save an uploaded file (e.g., background image) into assets directory.
Args:
uploaded_file: a file-like object (Streamlit UploadedFile) or bytes-like
Returns:
The saved file path as string, or None on failure.
"""
try:
# Determine filename
if hasattr(uploaded_file, 'name'):
filename = uploaded_file.name
else:
filename = f"asset_{int(time.time())}"
# sanitize
safe_name = "".join([c for c in filename if c.isalnum() or c in (' ', '.', '_', '-')]).strip().replace(' ', '_')
dest = self.assets_dir / f"{int(time.time())}_{safe_name}"
# Write bytes
with open(dest, 'wb') as out:
# Streamlit UploadedFile has getbuffer()
if hasattr(uploaded_file, 'getbuffer'):
out.write(uploaded_file.getbuffer())
else:
# try reading
data = uploaded_file.read()
if isinstance(data, str):
data = data.encode('utf-8')
out.write(data)
return str(dest)
except Exception as e:
print(f"Error saving asset: {e}")
return None
def save_history(self, session_type: str, topic: str, content: str, metadata: Dict[str, Any] = None):
"""
Save a session report/history
Args:
session_type: 'council' or 'debate'
topic: The main topic
content: The full markdown report or content
metadata: Additional info (model used, date, etc)
"""
timestamp = int(time.time())
date_str = time.strftime("%Y-%m-%d %H:%M:%S")
# Create a safe filename
safe_topic = "".join([c for c in topic[:20] if c.isalnum() or c in (' ', '_', '-')]).strip().replace(' ', '_')
filename = f"{timestamp}_{session_type}_{safe_topic}.json"
data = {
"id": str(timestamp),
"timestamp": timestamp,
"date": date_str,
"type": session_type,
"topic": topic,
"content": content,
"metadata": metadata or {}
}
try:
with open(self.history_dir / filename, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
return True
except Exception as e:
print(f"Error saving history: {e}")
return False
def list_history(self) -> List[Dict[str, Any]]:
"""List all history items (metadata only)"""
items = []
if not self.history_dir.exists():
return []
for file in self.history_dir.glob("*.json"):
try:
with open(file, 'r', encoding='utf-8') as f:
data = json.load(f)
# Return summary info
items.append({
"id": data.get("id"),
"date": data.get("date"),
"type": data.get("type"),
"topic": data.get("topic"),
"filename": file.name
})
except Exception:
continue
# Sort by timestamp desc
return sorted(items, key=lambda x: x.get("date", ""), reverse=True)
def load_history_item(self, filename: str) -> Dict[str, Any]:
"""Load full content of a history item"""
path = self.history_dir / filename
if not path.exists():
return None
try:
with open(path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return None
# ==================== Session Cache (Resume Functionality) ====================
def save_session_state(self, key: str, data: Dict[str, Any]):
"""Save temporary session state for recovery"""
try:
# We use a dedicated cache file per key
cache_file = self.root_dir / f"{key}_cache.json"
data["_timestamp"] = int(time.time())
with open(cache_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
except Exception as e:
print(f"Error saving session cache: {e}")
def load_session_state(self, key: str) -> Dict[str, Any]:
"""Load temporary session state"""
cache_file = self.root_dir / f"{key}_cache.json"
if not cache_file.exists():
return None
try:
with open(cache_file, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return None
def clear_session_state(self, key: str):
"""Clear temporary session state"""
cache_file = self.root_dir / f"{key}_cache.json"
if cache_file.exists():
try:
os.remove(cache_file)
except Exception:
pass

1126
uv.lock generated

File diff suppressed because it is too large Load Diff