diff --git a/backend/ai_services/bailian_service.py b/backend/ai_services/bailian_service.py index 62dc8f3..2c55725 100644 --- a/backend/ai_services/bailian_service.py +++ b/backend/ai_services/bailian_service.py @@ -95,12 +95,28 @@ class BailianService: {'role': 'user', 'content': f"{prompt}\n\n以下是需要评估的内容:\n{content}{chapter_context}"} ] - completion = self.client.chat.completions.create( - model=evaluation.model_selection, - messages=messages, - response_format={"type": "json_object"} - ) + # 增加重试机制 (最多重试3次) + completion = None + last_error = None + import time + for attempt in range(3): + try: + completion = self.client.chat.completions.create( + model=evaluation.model_selection, + messages=messages, + response_format={"type": "json_object"} + ) + break # 成功则跳出循环 + except Exception as e: + last_error = e + logger.warning(f"AI Evaluation attempt {attempt+1}/3 failed for eval {evaluation.id}: {e}") + if attempt < 2: + time.sleep(2 * (attempt + 1)) # 简单的指数退避 + + if not completion: + raise last_error or Exception("AI Service call failed after retries") + response_content = completion.choices[0].message.content # Convert to dict for storage raw_response = completion.model_dump() diff --git a/backend/ai_services/views.py b/backend/ai_services/views.py index 51a4c8d..cdadb9e 100644 --- a/backend/ai_services/views.py +++ b/backend/ai_services/views.py @@ -225,9 +225,34 @@ class TranscriptionTaskViewSet(viewsets.ModelViewSet): eval.save() evaluations_to_process.append(eval) - # 执行评估 - for eval_obj in evaluations_to_process: - service.evaluate_task(eval_obj) + # 执行评估 (改为异步并发执行) + # 提取ID列表,避免传递模型对象导致可能的线程问题 + eval_ids = [e.id for e in evaluations_to_process] + + if eval_ids: + import threading + from concurrent.futures import ThreadPoolExecutor + + def run_evaluations_background(ids): + # 在后台线程中重新引入依赖 + from .models import AIEvaluation + from .bailian_service import BailianService + + # 为该线程创建独立的服务实例 + local_service = BailianService() + + # 获取最新的对象 + target_evals = AIEvaluation.objects.filter(id__in=ids) + + # 使用线程池并发执行 + # max_workers=4 可以同时处理4个评估请求 + with ThreadPoolExecutor(max_workers=4) as executor: + executor.map(local_service.evaluate_task, target_evals) + + # 启动后台线程,不阻塞当前 HTTP 请求 + thread = threading.Thread(target=run_evaluations_background, args=(eval_ids,)) + thread.daemon = True # 设置为守护线程 + thread.start() # 返回该任务的所有评估结果 all_evals = AIEvaluation.objects.filter(task=task)