130 lines
5.0 KiB
Python
130 lines
5.0 KiB
Python
import requests
|
||
import json
|
||
from typing import List, Dict, Optional, Generator
|
||
from config import Config, SYSTEM_PROMPTS, get_api_key
|
||
|
||
|
||
class DeepSeekService:
|
||
def __init__(self):
|
||
self.api_key = get_api_key()
|
||
self.api_base = Config.DEEPSEEK_API_BASE
|
||
self.model = Config.MODEL_NAME
|
||
self.max_tokens = Config.MAX_TOKENS
|
||
self.temperature = Config.TEMPERATURE
|
||
|
||
def _get_headers(self) -> Dict[str, str]:
|
||
return {
|
||
"Authorization": f"Bearer {self.api_key}",
|
||
"Content-Type": "application/json"
|
||
}
|
||
|
||
def _build_messages(self, system_prompt: str, conversation_history: List[Dict[str, str]],
|
||
user_input: str) -> List[Dict[str, str]]:
|
||
messages = [{"role": "system", "content": system_prompt}]
|
||
messages.extend(conversation_history)
|
||
messages.append({"role": "user", "content": user_input})
|
||
return messages
|
||
|
||
def _call_api(self, messages: List[Dict[str, str]], stream: bool = False) -> Dict:
|
||
payload = {
|
||
"model": self.model,
|
||
"messages": messages,
|
||
"max_tokens": self.max_tokens,
|
||
"temperature": self.temperature,
|
||
"stream": stream
|
||
}
|
||
|
||
# 创建会话对象并设置超时
|
||
session = requests.Session()
|
||
session.timeout = 120 # 会话级别的超时设置
|
||
|
||
response = session.post(
|
||
f"{self.api_base}/chat/completions",
|
||
headers=self._get_headers(),
|
||
json=payload
|
||
)
|
||
|
||
if response.status_code != 200:
|
||
error_msg = f"API调用失败:状态码 {response.status_code}"
|
||
try:
|
||
error_detail = response.json().get("error", {})
|
||
error_msg += f",错误信息:{error_detail.get('message', '未知错误')}"
|
||
except:
|
||
error_msg += f",响应内容:{response.text}"
|
||
raise Exception(error_msg)
|
||
|
||
return response.json()
|
||
|
||
def chat(self, user_input: str, conversation_history: List[Dict[str, str]] = None,
|
||
system_type: str = "general_assistant") -> Dict[str, str]:
|
||
if conversation_history is None:
|
||
conversation_history = []
|
||
|
||
system_prompt = SYSTEM_PROMPTS.get(system_type, SYSTEM_PROMPTS["general_assistant"])
|
||
messages = self._build_messages(system_prompt, conversation_history, user_input)
|
||
|
||
response = self._call_api(messages)
|
||
assistant_message = response["choices"][0]["message"]["content"]
|
||
|
||
return {"role": "assistant", "content": assistant_message}
|
||
|
||
def chat_with_feedback(self, user_input: str, user_answer: str,
|
||
conversation_history: List[Dict[str, str]] = None) -> Dict[str, str]:
|
||
if conversation_history is None:
|
||
conversation_history = []
|
||
|
||
feedback_prompt = SYSTEM_PROMPTS["answer_feedback"]
|
||
|
||
context = f"面试问题:{user_input}\n\n候选人回答:{user_answer}"
|
||
messages = self._build_messages(feedback_prompt, conversation_history, context)
|
||
|
||
response = self._call_api(messages)
|
||
feedback_content = response["choices"][0]["message"]["content"]
|
||
|
||
return {"role": "assistant", "content": feedback_content}
|
||
|
||
def generate_interview_question(self, job_position: str, difficulty: str,
|
||
conversation_history: List[Dict[str, str]] = None,
|
||
phase: str = "intro") -> str:
|
||
if conversation_history is None:
|
||
conversation_history = []
|
||
|
||
system_prompt = SYSTEM_PROMPTS["interview_simulation"]
|
||
|
||
context = f"""
|
||
目标岗位:{job_position}
|
||
难度级别:{difficulty}
|
||
面试阶段:{phase}
|
||
|
||
请根据以上信息,提出一个针对性的面试问题。
|
||
"""
|
||
messages = self._build_messages(system_prompt, conversation_history, context)
|
||
|
||
response = self._call_api(messages)
|
||
question = response["choices"][0]["message"]["content"]
|
||
|
||
return question
|
||
|
||
def optimize_resume(self, resume_content: str, target_position: str = None) -> Dict[str, str]:
|
||
system_prompt = SYSTEM_PROMPTS["resume_optimization"]
|
||
|
||
if target_position:
|
||
user_input = f"目标岗位:{target_position}\n\n简历内容:\n{resume_content}"
|
||
else:
|
||
user_input = f"请分析以下简历内容,提供优化建议:\n\n{resume_content}"
|
||
|
||
messages = [{"role": "system", "content": system_prompt}]
|
||
messages.append({"role": "user", "content": user_input})
|
||
|
||
response = self._call_api(messages)
|
||
optimization_suggestions = response["choices"][0]["message"]["content"]
|
||
|
||
return {
|
||
"resume_content": resume_content,
|
||
"target_position": target_position,
|
||
"suggestions": optimization_suggestions
|
||
}
|
||
|
||
|
||
deepseek_service = DeepSeekService()
|