ZHZ.YHY/config.py

55 lines
1.5 KiB
Python
Raw Permalink Normal View History

2026-01-08 21:48:24 +08:00
import os
from dotenv import load_dotenv
load_dotenv()
class Config:
# 硅基流动 API 配置
SILICONFLOW_API_KEY = os.getenv("SILICONFLOW_API_KEY")
SILICONFLOW_BASE_URL = os.getenv("SILICONFLOW_BASE_URL", "https://api.siliconflow.cn/v1")
SILICONFLOW_MODEL = os.getenv("SILICONFLOW_MODEL", "deepseek-ai/DeepSeek-V2.5")
# OpenAI 兼容配置(保留但默认使用 SiliconFlow
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4")
# 应用配置
APP_HOST = os.getenv("APP_HOST", "0.0.0.0")
APP_PORT = int(os.getenv("APP_PORT", "8000"))
# 支持的编程语言
SUPPORTED_LANGUAGES = [
"python",
"javascript",
"typescript",
"java",
"cpp",
"c",
"go",
"rust",
"ruby",
"php"
]
# 解释详细程度
EXPLANATION_DEPTH = {
"basic": "简单解释每行代码的作用",
"detailed": "详细解释,包括语法、原理和最佳实践",
"comprehensive": "全面深入的分析,包括复杂度、性能考虑等"
}
# 硅基流动支持的模型列表
SILICONFLOW_MODELS = [
"deepseek-ai/DeepSeek-V2.5",
"deepseek-ai/DeepSeek-V2",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"01-ai/Yi-1.5-34B-Chat",
"01-ai/Yi-1.5-9B-Chat",
"meta-llama/Llama-3.1-405B-Instruct",
"meta-llama/Llama-3.1-70B-Instruct"
]
config = Config()