147 lines
5.4 KiB
Python
147 lines
5.4 KiB
Python
import logging
|
||
import json
|
||
import os
|
||
from django.conf import settings
|
||
from openai import OpenAI
|
||
from .models import AIEvaluation
|
||
|
||
logger = logging.getLogger(__name__)
|
||
|
||
class BailianService:
|
||
def __init__(self):
|
||
self.api_key = getattr(settings, 'DASHSCOPE_API_KEY', None)
|
||
if not self.api_key:
|
||
self.api_key = os.environ.get("DASHSCOPE_API_KEY")
|
||
|
||
if self.api_key:
|
||
self.client = OpenAI(
|
||
api_key=self.api_key,
|
||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||
)
|
||
else:
|
||
self.client = None
|
||
logger.warning("DASHSCOPE_API_KEY not configured.")
|
||
|
||
def evaluate_task(self, evaluation: AIEvaluation):
|
||
"""
|
||
执行AI评估
|
||
"""
|
||
if not self.client:
|
||
evaluation.status = AIEvaluation.Status.FAILED
|
||
evaluation.error_message = "服务未配置 (DASHSCOPE_API_KEY missing)"
|
||
evaluation.save()
|
||
return
|
||
|
||
task = evaluation.task
|
||
if not task.transcription:
|
||
evaluation.status = AIEvaluation.Status.FAILED
|
||
evaluation.error_message = "关联任务无逐字稿内容"
|
||
evaluation.save()
|
||
return
|
||
|
||
evaluation.status = AIEvaluation.Status.PROCESSING
|
||
evaluation.save()
|
||
|
||
try:
|
||
prompt = evaluation.prompt
|
||
content = task.transcription
|
||
|
||
# 截断过长的内容以防止超出Token限制 (简单处理,取前10000字)
|
||
if len(content) > 10000:
|
||
content = content[:10000] + "...(内容过长已截断)"
|
||
|
||
# Construct messages
|
||
messages = [
|
||
{'role': 'system', 'content': 'You are a helpful assistant designed to output JSON.'},
|
||
{'role': 'user', 'content': f"{prompt}\n\n以下是需要评估的内容:\n{content}"}
|
||
]
|
||
|
||
completion = self.client.chat.completions.create(
|
||
model=evaluation.model_selection,
|
||
messages=messages,
|
||
response_format={"type": "json_object"}
|
||
)
|
||
|
||
response_content = completion.choices[0].message.content
|
||
# Convert to dict for storage
|
||
raw_response = completion.model_dump()
|
||
|
||
evaluation.raw_response = raw_response
|
||
|
||
# Parse JSON
|
||
try:
|
||
result = json.loads(response_content)
|
||
evaluation.score = result.get('score')
|
||
evaluation.evaluation = result.get('evaluation') or result.get('comment')
|
||
|
||
# 尝试获取推理过程(如果模型返回了)
|
||
evaluation.reasoning = result.get('reasoning') or result.get('analysis')
|
||
|
||
if not evaluation.reasoning:
|
||
# 如果JSON里没有,把整个JSON作为推理参考
|
||
evaluation.reasoning = json.dumps(result, ensure_ascii=False, indent=2)
|
||
|
||
evaluation.status = AIEvaluation.Status.COMPLETED
|
||
except json.JSONDecodeError:
|
||
evaluation.status = AIEvaluation.Status.FAILED
|
||
evaluation.error_message = f"无法解析JSON响应: {response_content}"
|
||
evaluation.reasoning = response_content
|
||
|
||
evaluation.save()
|
||
return evaluation
|
||
|
||
except Exception as e:
|
||
logger.error(f"AI Evaluation failed: {e}")
|
||
evaluation.status = AIEvaluation.Status.FAILED
|
||
evaluation.error_message = str(e)
|
||
evaluation.save()
|
||
return evaluation
|
||
|
||
def summarize_task(self, task):
|
||
"""
|
||
使用 AI 模型总结转写和章节数据
|
||
"""
|
||
if not self.client:
|
||
logger.error("DashScope client not initialized")
|
||
return
|
||
|
||
try:
|
||
summary_data = json.dumps(task.summary_data or {}, ensure_ascii=False)
|
||
chapters_data = json.dumps(task.auto_chapters_data or {}, ensure_ascii=False)
|
||
|
||
prompt = f"""
|
||
你是一个专业的会议摘要和内容分析助手。请根据以下提供的“总结原始数据”和“章节原始数据”,生成一个结构清晰、专业且易于阅读的 Markdown 格式总结。
|
||
|
||
要求:
|
||
1. 包含一个总体的“核心摘要”。
|
||
2. 包含一个详细的“内容大纲”。
|
||
3. 如果有问答或对话信息,请包含“关键问答”或“发言人观点”。
|
||
4. 包含一个带有时间戳的“章节回顾”,格式为 [HH:MM:SS] 标题。
|
||
5. 语言简练,重点突出。
|
||
|
||
总结原始数据:
|
||
{summary_data}
|
||
|
||
章节原始数据:
|
||
{chapters_data}
|
||
"""
|
||
messages = [
|
||
{'role': 'system', 'content': '你是一个专业的文档总结助手。请直接返回 Markdown 格式的内容,不要包含任何引导性文字。'},
|
||
{'role': 'user', 'content': prompt}
|
||
]
|
||
|
||
completion = self.client.chat.completions.create(
|
||
model="qwen-turbo",
|
||
messages=messages,
|
||
temperature=0.7
|
||
)
|
||
|
||
ai_summary = completion.choices[0].message.content
|
||
if ai_summary:
|
||
task.summary = ai_summary
|
||
task.save()
|
||
logger.info(f"AI summary generated for task {task.id}")
|
||
|
||
except Exception as e:
|
||
logger.error(f"Failed to generate AI summary: {e}")
|