tingwu_new
All checks were successful
Deploy to Server / deploy (push) Successful in 17s

This commit is contained in:
jeremygan2021
2026-03-11 21:01:28 +08:00
parent 2c17e3bcd7
commit 852bc74bc1
7 changed files with 273 additions and 63 deletions

View File

@@ -1,16 +1,16 @@
from django.contrib import admin
from unfold.admin import ModelAdmin as UnfoldModelAdmin
from unfold.admin import StackedInline as UnfoldStackedInline
from .models import TranscriptionTask, AIEvaluation
from .models import TranscriptionTask, AIEvaluation, AIEvaluationTemplate
class AIEvaluationInline(UnfoldStackedInline):
model = AIEvaluation
extra = 0
can_delete = False
verbose_name = "AI评估"
verbose_name_plural = "AI评估"
readonly_fields = ['created_at', 'updated_at', 'raw_response', 'reasoning']
fields = ('score', 'evaluation', 'model_selection', 'prompt', 'reasoning', 'status', 'error_message')
can_delete = True
verbose_name = "AI评估结果"
verbose_name_plural = "AI评估结果"
readonly_fields = ['created_at', 'updated_at', 'raw_response', 'reasoning', 'template']
fields = ('template', 'score', 'evaluation', 'model_selection', 'prompt', 'reasoning', 'status', 'error_message')
@admin.register(TranscriptionTask)
class TranscriptionTaskAdmin(UnfoldModelAdmin):
@@ -20,17 +20,23 @@ class TranscriptionTaskAdmin(UnfoldModelAdmin):
readonly_fields = ['id', 'created_at', 'updated_at', 'task_id']
inlines = [AIEvaluationInline]
@admin.register(AIEvaluationTemplate)
class AIEvaluationTemplateAdmin(UnfoldModelAdmin):
list_display = ['name', 'model_selection', 'is_active', 'created_at']
list_filter = ['is_active', 'model_selection', 'created_at']
search_fields = ['name', 'prompt']
@admin.register(AIEvaluation)
class AIEvaluationAdmin(UnfoldModelAdmin):
list_display = ['id', 'task', 'score', 'status', 'model_selection', 'created_at']
list_filter = ['status', 'model_selection', 'created_at']
list_display = ['id', 'task', 'template', 'score', 'status', 'model_selection', 'created_at']
list_filter = ['status', 'model_selection', 'created_at', 'template']
search_fields = ['task__id', 'evaluation', 'reasoning']
readonly_fields = ['id', 'created_at', 'updated_at', 'raw_response']
fieldsets = (
(None, {
'fields': ('task', 'status', 'score', 'evaluation')
'fields': ('task', 'template', 'status', 'score', 'evaluation')
}),
('配置', {
('配置快照', {
'fields': ('model_selection', 'prompt'),
'classes': ('collapse',),
}),

View File

@@ -0,0 +1,55 @@
# Generated by Django 6.0.1 on 2026-03-11 13:00
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ai_services', '0004_remove_transcriptiontask_evaluation_and_more'),
]
operations = [
migrations.CreateModel(
name='AIEvaluationTemplate',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='例如:销售话术评分、逻辑性分析', max_length=100, verbose_name='模板名称')),
('model_selection', models.CharField(default='qwen-plus', help_text='例如: qwen-plus, qwen-turbo, qwen-max', max_length=50, verbose_name='模型选择')),
('prompt', models.TextField(default='你是一个专业的评分助手。请根据提供的转写内容对内容质量、逻辑清晰度、语言表达等方面进行综合评分0-100分并给出详细的评语。请以JSON格式返回包含"score""evaluation"字段。', help_text='用于指导AI评分的提示词', verbose_name='评分提示词')),
('is_active', models.BooleanField(default=True, help_text='启用后,新的转写任务完成后将自动使用此模板进行评估', verbose_name='是否启用')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
],
options={
'verbose_name': 'AI评估模板',
'verbose_name_plural': 'AI评估模板',
'ordering': ['-created_at'],
},
),
migrations.AlterModelOptions(
name='aievaluation',
options={'ordering': ['-created_at'], 'verbose_name': 'AI评估结果', 'verbose_name_plural': 'AI评估结果'},
),
migrations.AlterField(
model_name='aievaluation',
name='model_selection',
field=models.CharField(default='qwen-plus', max_length=50, verbose_name='模型选择'),
),
migrations.AlterField(
model_name='aievaluation',
name='prompt',
field=models.TextField(verbose_name='评分提示词'),
),
migrations.AlterField(
model_name='aievaluation',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ai_evaluations', to='ai_services.transcriptiontask', verbose_name='关联任务'),
),
migrations.AddField(
model_name='aievaluation',
name='template',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='evaluations', to='ai_services.aievaluationtemplate', verbose_name='使用的模板'),
),
]

View File

@@ -43,25 +43,8 @@ class TranscriptionTask(models.Model):
return f"{self.id} - {self.get_status_display()}"
class AIEvaluation(models.Model):
class Status(models.TextChoices):
PENDING = 'PENDING', _('等待中')
PROCESSING = 'PROCESSING', _('生成中')
COMPLETED = 'COMPLETED', _('已完成')
FAILED = 'FAILED', _('失败')
task = models.OneToOneField(
TranscriptionTask,
on_delete=models.CASCADE,
related_name='ai_evaluation',
verbose_name=_('关联任务')
)
# 评分与评语
score = models.IntegerField(verbose_name=_('AI评分'), blank=True, null=True, help_text=_('0-100分'))
evaluation = models.TextField(verbose_name=_('AI评语'), blank=True, null=True)
# 配置选项 (可在Admin中设置)
class AIEvaluationTemplate(models.Model):
name = models.CharField(verbose_name=_('模板名称'), max_length=100, help_text=_('例如:销售话术评分、逻辑性分析'))
model_selection = models.CharField(
verbose_name=_('模型选择'),
max_length=50,
@@ -73,6 +56,53 @@ class AIEvaluation(models.Model):
default='你是一个专业的评分助手。请根据提供的转写内容对内容质量、逻辑清晰度、语言表达等方面进行综合评分0-100分并给出详细的评语。请以JSON格式返回包含"score""evaluation"字段。',
help_text=_('用于指导AI评分的提示词')
)
is_active = models.BooleanField(verbose_name=_('是否启用'), default=True, help_text=_('启用后,新的转写任务完成后将自动使用此模板进行评估'))
created_at = models.DateTimeField(verbose_name=_('创建时间'), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_('更新时间'), auto_now=True)
class Meta:
verbose_name = _('AI评估模板')
verbose_name_plural = _('AI评估模板')
ordering = ['-created_at']
def __str__(self):
return self.name
class AIEvaluation(models.Model):
class Status(models.TextChoices):
PENDING = 'PENDING', _('等待中')
PROCESSING = 'PROCESSING', _('生成中')
COMPLETED = 'COMPLETED', _('已完成')
FAILED = 'FAILED', _('失败')
task = models.ForeignKey(
TranscriptionTask,
on_delete=models.CASCADE,
related_name='ai_evaluations',
verbose_name=_('关联任务')
)
template = models.ForeignKey(
AIEvaluationTemplate,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='evaluations',
verbose_name=_('使用的模板')
)
# 评分与评语
score = models.IntegerField(verbose_name=_('AI评分'), blank=True, null=True, help_text=_('0-100分'))
evaluation = models.TextField(verbose_name=_('AI评语'), blank=True, null=True)
# 记录当时的配置 (快照)
model_selection = models.CharField(
verbose_name=_('模型选择'),
max_length=50,
default='qwen-plus'
)
prompt = models.TextField(verbose_name=_('评分提示词'))
# 原始数据与推理
raw_response = models.JSONField(verbose_name=_('原始响应'), blank=True, null=True, help_text=_('大模型返回的完整JSON'))
@@ -90,9 +120,9 @@ class AIEvaluation(models.Model):
updated_at = models.DateTimeField(verbose_name=_('更新时间'), auto_now=True)
class Meta:
verbose_name = _('AI智能评估')
verbose_name_plural = _('AI智能评估')
verbose_name = _('AI评估结果')
verbose_name_plural = _('AI评估结果')
ordering = ['-created_at']
def __str__(self):
return f"Evaluation for Task {self.task.id}"
return f"Evaluation for Task {self.task.id} ({self.template.name if self.template else 'Custom'})"

View File

@@ -1,18 +1,25 @@
from rest_framework import serializers
from .models import TranscriptionTask, AIEvaluation
from .models import TranscriptionTask, AIEvaluation, AIEvaluationTemplate
class AIEvaluationTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = AIEvaluationTemplate
fields = ['id', 'name', 'model_selection', 'prompt', 'is_active', 'created_at']
class AIEvaluationSerializer(serializers.ModelSerializer):
template = AIEvaluationTemplateSerializer(read_only=True)
class Meta:
model = AIEvaluation
fields = ['id', 'score', 'evaluation', 'model_selection', 'prompt', 'reasoning', 'status', 'error_message', 'created_at', 'updated_at']
fields = ['id', 'template', 'score', 'evaluation', 'model_selection', 'prompt', 'reasoning', 'status', 'error_message', 'created_at', 'updated_at']
class TranscriptionTaskSerializer(serializers.ModelSerializer):
ai_evaluation = AIEvaluationSerializer(read_only=True)
ai_evaluations = AIEvaluationSerializer(many=True, read_only=True)
class Meta:
model = TranscriptionTask
fields = ['id', 'file_url', 'task_id', 'status', 'transcription', 'summary', 'error_message', 'created_at', 'updated_at', 'transcription_data', 'summary_data', 'auto_chapters_data', 'ai_evaluation']
read_only_fields = ['id', 'file_url', 'task_id', 'status', 'transcription', 'summary', 'error_message', 'created_at', 'updated_at', 'transcription_data', 'summary_data', 'auto_chapters_data', 'ai_evaluation']
fields = ['id', 'file_url', 'task_id', 'status', 'transcription', 'summary', 'error_message', 'created_at', 'updated_at', 'transcription_data', 'summary_data', 'auto_chapters_data', 'ai_evaluations']
read_only_fields = ['id', 'file_url', 'task_id', 'status', 'transcription', 'summary', 'error_message', 'created_at', 'updated_at', 'transcription_data', 'summary_data', 'auto_chapters_data', 'ai_evaluations']
class TranscriptionUploadSerializer(serializers.Serializer):
file = serializers.FileField(help_text="上传的音频文件")

View File

@@ -16,6 +16,8 @@ from django.conf import settings
logger = logging.getLogger(__name__)
from .models import TranscriptionTask, AIEvaluation, AIEvaluationTemplate
class AliyunTingwuService:
def __init__(self):
self.access_key_id = settings.ALIYUN_ACCESS_KEY_ID
@@ -157,6 +159,9 @@ class AliyunTingwuService:
:param task: TranscriptionTask 实例
:param result: get_task_info 返回的完整 JSON (或 Data 部分)
"""
# 记录之前的状态,用于判断是否是首次完成
previous_status = task.status
# 1. 提取 Data 对象
if isinstance(result, dict):
data_obj = result.get('Data', result)
@@ -269,6 +274,14 @@ class AliyunTingwuService:
# 结构: {"MindMapSummary": [{"Title": "...", "Topic": [...]}]}
summary_text = []
# 1. 优先提取段落标题和摘要
if 'ParagraphTitle' in summarization:
summary_text.append(f"### {summarization['ParagraphTitle']}")
if 'ParagraphSummary' in summarization:
summary_text.append(summarization['ParagraphSummary'])
summary_text.append("") # 空行分隔
# 2. 提取思维导图作为大纲
def parse_mindmap_topic(topic_list, level=0):
indent = " " * level
for topic in topic_list:
@@ -281,13 +294,37 @@ class AliyunTingwuService:
parse_mindmap_topic(sub_topics, level + 1)
if 'MindMapSummary' in summarization:
summary_text.append("### 内容大纲")
parse_mindmap_topic(summarization['MindMapSummary'])
elif 'Text' in summarization:
summary_text.append(summarization['Text'])
elif 'Headline' in summarization:
summary_text.append(summarization['Headline'])
elif 'ParagraphSummary' in summarization:
summary_text.append(summarization['ParagraphSummary'])
summary_text.append("")
# 3. 提取对话总结 (ConversationalSummary)
if 'ConversationalSummary' in summarization and isinstance(summarization['ConversationalSummary'], list):
summary_text.append("### 对话总结")
for conv in summarization['ConversationalSummary']:
speaker = conv.get('SpeakerName', '发言人')
summary = conv.get('Summary', '')
if summary:
summary_text.append(f"- **{speaker}**: {summary}")
summary_text.append("")
# 4. 提取问答总结 (QuestionsAnsweringSummary)
if 'QuestionsAnsweringSummary' in summarization and isinstance(summarization['QuestionsAnsweringSummary'], list):
summary_text.append("### 问答回顾")
for qa in summarization['QuestionsAnsweringSummary']:
question = qa.get('Question', '')
answer = qa.get('Answer', '')
if question and answer:
summary_text.append(f"**Q: {question}**")
summary_text.append(f"A: {answer}")
summary_text.append("")
# 兼容旧逻辑:如果上述都为空,尝试 Text 或 Headline
if not summary_text:
if 'Text' in summarization:
summary_text.append(summarization['Text'])
elif 'Headline' in summarization:
summary_text.append(summarization['Headline'])
if summary_text:
task.summary = "\n".join(summary_text)
@@ -336,3 +373,37 @@ class AliyunTingwuService:
task.summary = "\n".join(summary_text)
task.save()
# 4. 自动触发 AI 评估 (如果任务首次成功且有启用的模板)
if previous_status != 'SUCCEEDED' and task.status == 'SUCCEEDED' and task.transcription:
self.trigger_ai_evaluations(task)
def trigger_ai_evaluations(self, task):
"""
根据启用的模板自动触发 AI 评估
"""
active_templates = AIEvaluationTemplate.objects.filter(is_active=True)
if not active_templates.exists():
logger.info("No active AI evaluation templates found.")
return
from .bailian_service import BailianService
service = BailianService()
for template in active_templates:
# 创建评估记录
evaluation = AIEvaluation.objects.create(
task=task,
template=template,
model_selection=template.model_selection,
prompt=template.prompt,
status=AIEvaluation.Status.PENDING
)
# 触发评估 (同步执行,或者放入 Celery 任务)
# 这里为了简单直接调用,生产环境建议使用 Celery
try:
service.evaluate_task(evaluation)
logger.info(f"Triggered evaluation {evaluation.id} for template {template.name}")
except Exception as e:
logger.error(f"Failed to trigger evaluation {evaluation.id}: {e}")

View File

@@ -154,7 +154,7 @@ class TranscriptionTaskViewSet(viewsets.ModelViewSet):
}
}
},
responses={200: AIEvaluationSerializer}
responses={200: AIEvaluationSerializer(many=True)}
)
def evaluate(self, request, pk=None):
"""
@@ -162,30 +162,63 @@ class TranscriptionTaskViewSet(viewsets.ModelViewSet):
"""
task = self.get_object()
# 1. 检查或创建 Evaluation 对象
evaluation, created = AIEvaluation.objects.get_or_create(task=task)
# 1. 如果有 active template触发所有 active template
# 2. 如果请求体提供了 custom prompt则创建一个 custom evaluation (no template)
# 2. 如果请求中有配置,更新配置
from .models import AIEvaluationTemplate
from .bailian_service import BailianService
service = BailianService()
evaluations_to_process = []
# A. 如果指定了 Prompt/Model视为手动单次评估
model_selection = request.data.get('model_selection')
prompt = request.data.get('prompt')
updated = False
if model_selection:
evaluation.model_selection = model_selection
updated = True
if prompt:
evaluation.prompt = prompt
updated = True
# 创建一个不关联 Template 的评估
eval, _ = AIEvaluation.objects.get_or_create(
task=task,
template=None,
defaults={
'model_selection': model_selection or 'qwen-plus',
'prompt': prompt
}
)
# 更新配置
eval.model_selection = model_selection or eval.model_selection
eval.prompt = prompt
eval.save()
evaluations_to_process.append(eval)
else:
# B. 否则触发所有 Active Templates
active_templates = AIEvaluationTemplate.objects.filter(is_active=True)
if not active_templates.exists():
return Response({'message': 'No active templates and no custom prompt provided'}, status=status.HTTP_400_BAD_REQUEST)
for t in active_templates:
eval, _ = AIEvaluation.objects.get_or_create(
task=task,
template=t,
defaults={
'model_selection': t.model_selection,
'prompt': t.prompt
}
)
# 始终更新为模板最新配置? 或者保留历史? 用户意图似乎是"模版搭好...启用...生成几份"
# 这里假设触发时应用模板当前配置
eval.model_selection = t.model_selection
eval.prompt = t.prompt
eval.save()
evaluations_to_process.append(eval)
# 执行评估
for eval_obj in evaluations_to_process:
service.evaluate_task(eval_obj)
if updated:
evaluation.save()
# 3. 调用 Service 执行评估
from .bailian_service import BailianService
service = BailianService()
service.evaluate_task(evaluation)
serializer = AIEvaluationSerializer(evaluation)
# 返回该任务的所有评估结果
all_evals = AIEvaluation.objects.filter(task=task)
serializer = AIEvaluationSerializer(all_evals, many=True)
return Response(serializer.data)
@action(detail=True, methods=['get'])
@@ -240,6 +273,9 @@ class TranscriptionTaskViewSet(viewsets.ModelViewSet):
# 调用 Service 进行解析和更新
service.parse_and_update_task(task, result)
# 重新获取 task 以包含更新后的关联字段 (如 ai_evaluations)
task.refresh_from_db()
serializer = self.get_serializer(task)
return Response(serializer.data)

View File

@@ -363,7 +363,12 @@ UNFOLD = {
"link": reverse_lazy("admin:ai_services_transcriptiontask_changelist"),
},
{
"title": "AI 智能评估",
"title": "AI 评估模板",
"icon": "rule",
"link": reverse_lazy("admin:ai_services_aievaluationtemplate_changelist"),
},
{
"title": "AI 评估结果",
"icon": "psychology",
"link": reverse_lazy("admin:ai_services_aievaluation_changelist"),
},