diff --git a/.autograde/aggregate_final_grade.py b/.autograde/aggregate_final_grade.py
deleted file mode 100644
index 2f19bd5..0000000
--- a/.autograde/aggregate_final_grade.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python3
-"""
-期末大作业成绩汇总脚本
-
-汇总编程测试分数 + REPORT.md 分数 + FRONTEND.md 分数
-"""
-
-import argparse
-import json
-import os
-import sys
-
-
-def load_json(filepath, default=None):
- """安全加载 JSON 文件"""
- if not os.path.exists(filepath):
- return default or {}
- try:
- with open(filepath, "r", encoding="utf-8") as f:
- return json.load(f)
- except Exception as e:
- print(f"Error loading {filepath}: {e}", file=sys.stderr)
- return default or {}
-
-
-def main():
- parser = argparse.ArgumentParser(description="Aggregate final project grades")
- parser.add_argument("--programming", required=True, help="Programming test grade JSON")
- parser.add_argument("--report", required=True, help="REPORT.md LLM grade JSON")
- parser.add_argument("--frontend", required=True, help="FRONTEND.md LLM grade JSON")
- parser.add_argument("--out", default="final_grade.json", help="Output JSON file")
- parser.add_argument("--summary", default="final_summary.md", help="Output summary markdown")
- args = parser.parse_args()
-
- # 加载各部分成绩
- prog_grade = load_json(args.programming, {"total_score": 0, "max_score": 80})
- report_grade = load_json(args.report, {"total": 0})
- frontend_grade = load_json(args.frontend, {"total": 0})
-
- # 提取分数
- prog_score = prog_grade.get("total_score", 0)
- prog_max = prog_grade.get("max_score", 80)
-
- report_score = report_grade.get("total", 0)
- report_max = 10 # REPORT.md 满分 10 分
-
- frontend_score = frontend_grade.get("total", 0)
- frontend_max = 10 # FRONTEND.md 满分 10 分
-
- # 计算总分
- total_score = prog_score + report_score + frontend_score
- total_max = prog_max + report_max + frontend_max
-
- # 构建最终成绩数据(保留 LLM 评分的详细内容)
- final_grade = {
- "total_score": round(total_score, 2),
- "max_score": total_max,
- "breakdown": {
- "programming": {
- "score": round(prog_score, 2),
- "max_score": prog_max,
- "groups": prog_grade.get("groups", {})
- },
- "report": {
- "score": round(report_score, 2),
- "max_score": report_max,
- "flags": report_grade.get("flags", []),
- "confidence": report_grade.get("confidence"),
- "criteria": report_grade.get("criteria", []) # LLM 评分详情
- },
- "frontend": {
- "score": round(frontend_score, 2),
- "max_score": frontend_max,
- "flags": frontend_grade.get("flags", []),
- "confidence": frontend_grade.get("confidence"),
- "criteria": frontend_grade.get("criteria", []) # LLM 评分详情
- }
- }
- }
-
- # 保存 final_grade.json
- with open(args.out, "w", encoding="utf-8") as f:
- json.dump(final_grade, f, ensure_ascii=False, indent=2)
-
- # 生成 summary.md
- with open(args.summary, "w", encoding="utf-8") as f:
- f.write("# 期末大作业成绩报告\n\n")
- f.write(f"## 总分:{total_score:.2f} / {total_max}\n\n")
-
- f.write("## 分项成绩\n\n")
- f.write("| 项目 | 得分 | 满分 | 备注 |\n")
- f.write("|------|------|------|------|\n")
- f.write(f"| 编程测试 | {prog_score:.2f} | {prog_max} | Core + Advanced + Challenge |\n")
- f.write(f"| REPORT.md | {report_score:.2f} | {report_max} | 后端与系统设计报告 |\n")
- f.write(f"| FRONTEND.md | {frontend_score:.2f} | {frontend_max} | 前端界面与交互设计报告 |\n")
-
- # 编程测试详情
- if prog_grade.get("groups"):
- f.write("\n### 编程测试详情\n\n")
- f.write("| 分组 | 通过 | 总数 | 得分 | 满分 |\n")
- f.write("|------|------|------|------|------|\n")
- for group_name, group_info in prog_grade["groups"].items():
- f.write(f"| {group_name} | {group_info.get('passed', 0)} | "
- f"{group_info.get('total', 0)} | {group_info.get('score', 0):.2f} | "
- f"{group_info.get('max_score', 0)} |\n")
-
- # 标记
- all_flags = report_grade.get("flags", []) + frontend_grade.get("flags", [])
- if all_flags:
- f.write("\n### 标记\n\n")
- for flag in set(all_flags):
- f.write(f"- {flag}\n")
-
- print(f"Final grade: {total_score:.2f}/{total_max}")
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/.autograde/aggregate_llm_grades.py b/.autograde/aggregate_llm_grades.py
deleted file mode 100644
index 44f2fce..0000000
--- a/.autograde/aggregate_llm_grades.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python3
-"""
-聚合多个 LLM 评分结果
-"""
-import json
-import argparse
-from pathlib import Path
-
-
-def load_grade(filepath):
- """加载单个评分文件"""
- try:
- with open(filepath, 'r', encoding='utf-8') as f:
- return json.load(f)
- except FileNotFoundError:
- print(f"Warning: {filepath} not found")
- return None
- except json.JSONDecodeError as e:
- print(f"Error parsing {filepath}: {e}")
- return None
-
-
-def aggregate_grades(input_files, output_file, summary_file):
- """聚合多个评分文件"""
- grades = []
- total_score = 0
- max_score = 0
- need_review_count = 0
-
- for input_file in input_files:
- grade = load_grade(input_file)
- if grade:
- grades.append(grade)
- # 支持两种格式:'total' (llm_grade.py) 或 'score' (旧格式)
- score = grade.get('total', grade.get('score', 0))
- total_score += score
- # 默认每题 10 分
- max_score += grade.get('max_score', 10)
- # 检查是否需要审核
- if 'need_review' in grade.get('flags', []) or grade.get('need_review', False):
- need_review_count += 1
-
- # 计算总分
- final_score = total_score if max_score > 0 else 0
- final_max_score = max_score
-
- # 生成汇总结果
- result = {
- 'total_score': final_score,
- 'max_score': final_max_score,
- 'questions': len(grades),
- 'need_review': need_review_count > 0,
- 'details': grades
- }
-
- # 保存 JSON
- with open(output_file, 'w', encoding='utf-8') as f:
- json.dump(result, f, indent=2, ensure_ascii=False)
-
- # 生成 Markdown 摘要
- summary_lines = [
- '# LLM 简答题评分汇总',
- '',
- f'**总分**: {final_score:.1f} / {final_max_score:.1f}',
- f'**题目数**: {len(grades)}',
- f'**需要人工审核**: {"是" if result["need_review"] else "否"}',
- '',
- '## 各题详情',
- ''
- ]
-
- for i, grade in enumerate(grades, 1):
- q_name = grade.get('question', f'Q{i}')
- # 支持两种格式:'total' (llm_grade.py) 或 'score' (旧格式)
- score = grade.get('total', grade.get('score', 0))
- max_q_score = grade.get('max_score', 10)
- # 检查是否需要审核
- need_review = 'need_review' in grade.get('flags', []) or grade.get('need_review', False)
- confidence = grade.get('confidence', 1.0)
-
- summary_lines.append(f'### SA{i}')
- summary_lines.append(f'- **得分**: {score:.2f} / {max_q_score:.1f}')
- summary_lines.append(f'- **置信度**: {confidence:.2f}')
- if need_review:
- summary_lines.append('- ⚠️ **需要人工审核**')
-
- # 显示分项评分
- if 'criteria' in grade:
- summary_lines.append('- **分项**:')
- for criterion in grade['criteria']:
- crit_id = criterion.get('id', '')
- crit_score = criterion.get('score', 0)
- crit_reason = criterion.get('reason', '')
- summary_lines.append(f' - {crit_id}: {crit_score:.1f} - {crit_reason}')
-
- summary_lines.append('')
-
- with open(summary_file, 'w', encoding='utf-8') as f:
- f.write('\n'.join(summary_lines))
-
- print(f"✅ Aggregated {len(grades)} grades")
- print(f" Total: {final_score:.1f} / {final_max_score:.1f}")
- print(f" Output: {output_file}")
-
-
-def main():
- parser = argparse.ArgumentParser(description='Aggregate LLM grading results')
- parser.add_argument('--inputs', nargs='+', required=True,
- help='Input grade JSON files')
- parser.add_argument('--out', required=True,
- help='Output aggregated JSON file')
- parser.add_argument('--summary', required=True,
- help='Output summary Markdown file')
-
- args = parser.parse_args()
-
- aggregate_grades(args.inputs, args.out, args.summary)
-
-
-if __name__ == '__main__':
- main()
diff --git a/.autograde/create_minimal_metadata.py b/.autograde/create_minimal_metadata.py
deleted file mode 100644
index 75b3875..0000000
--- a/.autograde/create_minimal_metadata.py
+++ /dev/null
@@ -1,372 +0,0 @@
-#!/usr/bin/env python3
-"""
-创建完整的成绩元数据文件
-
-从 grade.json / final_grade.json / llm_grade.json 生成 metadata.json
-包含所有详细信息:未通过的测试、各题详情等
-"""
-
-import json
-import os
-import sys
-import re
-from datetime import datetime
-
-
-def extract_student_id():
- """从环境变量或仓库名中提取学生 ID"""
- # 优先从环境变量获取
- student_id = os.getenv("STUDENT_ID")
- if student_id:
- return student_id
-
- # 从仓库名提取
- # 支持格式:org/assignment-stu_xxx 或 org/assignment-stu-xxx
- repo = os.getenv("REPO", "")
- if repo:
- # 匹配 xxx-stu_yyy 或 xxx-stu-yyy 格式
- match = re.search(r'-stu[_-]([a-zA-Z0-9_]+)$', repo)
- if match:
- return match.group(1)
- # 也尝试匹配 stu_xxx 在路径中的情况
- match = re.search(r'stu[_-]([a-zA-Z0-9_]+)', repo)
- if match:
- return match.group(1)
-
- return None
-
-
-def extract_assignment_id():
- """从环境变量或仓库名中提取作业 ID"""
- # 优先从环境变量获取
- assignment_id = os.getenv("ASSIGNMENT_ID")
- if assignment_id:
- return assignment_id
-
- # 从仓库名提取
- # 支持格式:org/assignment-stu_xxx 或 org/assignment-template
- repo = os.getenv("REPO", "")
- if repo:
- # 取仓库名部分(去掉组织)
- repo_name = repo.split("/")[-1] if "/" in repo else repo
-
- # 移除 -stu_xxx 或 -template 后缀
- assignment = re.sub(r'-stu[_-][a-zA-Z0-9_]+$', '', repo_name)
- assignment = re.sub(r'-template$', '', assignment)
-
- if assignment:
- return assignment
-
- return "unknown"
-
-
-def create_final_metadata(final_grade_file='final_grade.json'):
- """从 final_grade.json 创建元数据(期末大作业专用)"""
- try:
- with open(final_grade_file, 'r', encoding='utf-8') as f:
- final_data = json.load(f)
-
- assignment_id = extract_assignment_id()
- student_id = extract_student_id()
-
- total_score = final_data.get("total_score", 0)
- max_score = final_data.get("max_score", 100)
- breakdown = final_data.get("breakdown", {})
-
- # 构建各组成部分
- components = []
-
- # 编程测试部分
- prog = breakdown.get("programming", {})
- if prog:
- prog_component = {
- "type": "programming_java",
- "score": prog.get("score", 0),
- "max_score": prog.get("max_score", 80),
- "details": {
- "groups": prog.get("groups", {})
- }
- }
- components.append(prog_component)
-
- # REPORT.md 部分
- report = breakdown.get("report", {})
- if report:
- report_component = {
- "type": "llm_report",
- "score": report.get("score", 0),
- "max_score": report.get("max_score", 10),
- "details": {
- "flags": report.get("flags", []),
- "confidence": report.get("confidence"),
- "criteria": report.get("criteria", []) # LLM 各评分项及理由
- }
- }
- components.append(report_component)
-
- # FRONTEND.md 部分
- frontend = breakdown.get("frontend", {})
- if frontend:
- frontend_component = {
- "type": "llm_frontend",
- "score": frontend.get("score", 0),
- "max_score": frontend.get("max_score", 10),
- "details": {
- "flags": frontend.get("flags", []),
- "confidence": frontend.get("confidence"),
- "criteria": frontend.get("criteria", []) # LLM 各评分项及理由
- }
- }
- components.append(frontend_component)
-
- metadata = {
- "version": "1.0",
- "assignment": assignment_id,
- "student_id": student_id,
- "components": components,
- "total_score": round(total_score, 2),
- "total_max_score": max_score,
- "timestamp": datetime.now().isoformat(),
- "generator": "gitea-autograde"
- }
-
- return metadata
- except Exception as e:
- print(f"Error creating final metadata: {e}", file=sys.stderr)
- return {}
-
-
-def create_grade_metadata(grade_file='grade.json'):
- """从 grade.json 创建元数据,包含所有详细信息"""
- try:
- with open(grade_file, 'r') as f:
- grade_data = json.load(f)
-
- assignment_id = extract_assignment_id()
- student_id = extract_student_id()
- language = os.getenv("LANGUAGE", "java")
-
- # 提取所有相关信息
- final_score = grade_data.get("final_score", grade_data.get("total_score", grade_data.get("score", 0)))
- base_score = grade_data.get("base_score", final_score)
- penalty = grade_data.get("penalty", 0)
- passed = grade_data.get("passed", 0)
- total = grade_data.get("total", 0)
- fails = grade_data.get("fails", [])
- max_score = grade_data.get("max_score", 100)
- test_framework = grade_data.get("test_framework", "junit")
- coverage = grade_data.get("coverage")
- raw_score = grade_data.get("raw_score")
- groups = grade_data.get("groups", {})
-
- # 动态生成 type 字段
- type_map = {
- "python": "programming_python",
- "java": "programming_java",
- "r": "programming_r"
- }
- component_type = type_map.get(language, f"programming_{language}")
-
- component = {
- "type": component_type,
- "language": language,
- "score": round(final_score, 2),
- "max_score": max_score,
- "details": {
- "passed": passed,
- "total": total,
- "base_score": round(base_score, 2),
- "penalty": round(penalty, 2),
- "coverage": round(coverage, 2) if coverage else None,
- "raw_score": round(raw_score, 2) if raw_score else None,
- "failed_tests": fails,
- "test_framework": test_framework,
- "groups": groups
- }
- }
-
- metadata = {
- "version": "1.0",
- "assignment": assignment_id,
- "student_id": student_id,
- "components": [component],
- "total_score": round(final_score, 2),
- "total_max_score": max_score,
- "timestamp": datetime.now().isoformat(),
- "generator": "gitea-autograde"
- }
-
- return metadata
- except Exception as e:
- print(f"Error creating grade metadata: {e}", file=sys.stderr)
- return {}
-
-
-def create_llm_metadata(llm_grade_file='artifacts/llm_grade.json'):
- """从 llm_grade.json 创建元数据,包含所有详细信息"""
- try:
- with open(llm_grade_file, 'r') as f:
- llm_data = json.load(f)
-
- assignment_id = extract_assignment_id()
- student_id = extract_student_id()
-
- # 提取聚合后的信息
- total_score = llm_data.get("total_score", llm_data.get("total", 0))
- max_score = llm_data.get("max_score", 30)
- need_review = llm_data.get("need_review", False)
- questions_data = llm_data.get("details", llm_data.get("questions", []))
-
- # 构建各题详情
- question_details = []
- for i, q_data in enumerate(questions_data, 1):
- q_score = q_data.get("total", q_data.get("score", 0))
- q_max = q_data.get("max_score", 10)
- q_confidence = q_data.get("confidence", 1.0)
- q_flags = q_data.get("flags", [])
- q_need_review = "need_review" in q_flags or q_data.get("need_review", False)
- q_criteria = q_data.get("criteria", [])
-
- # 规范化 criteria 格式
- formatted_criteria = []
- for crit in q_criteria:
- formatted_criteria.append({
- "id": crit.get("id", ""),
- "score": round(float(crit.get("score", 0)), 2),
- "reason": crit.get("reason", "")
- })
-
- question_detail = {
- "question_id": f"SA{i}",
- "question_name": q_data.get("question", f"SA{i}"),
- "score": round(float(q_score), 2),
- "max_score": q_max,
- "confidence": round(float(q_confidence), 2),
- "need_review": q_need_review,
- "flags": q_flags,
- "criteria": formatted_criteria
- }
- question_details.append(question_detail)
-
- component = {
- "type": "llm_essay",
- "score": round(float(total_score), 2),
- "max_score": max_score,
- "details": {
- "questions": len(question_details),
- "need_review": need_review,
- "question_details": question_details
- }
- }
-
- metadata = {
- "version": "1.0",
- "assignment": assignment_id,
- "student_id": student_id,
- "components": [component],
- "total_score": round(float(total_score), 2),
- "total_max_score": max_score,
- "timestamp": datetime.now().isoformat(),
- "generator": "gitea-autograde"
- }
-
- return metadata
- except Exception as e:
- print(f"Error creating LLM metadata: {e}", file=sys.stderr)
- return {}
-
-
-def create_objective_metadata(objective_file='objective_grade.json'):
- """从 objective_grade.json 创建元数据"""
- try:
- with open(objective_file, 'r', encoding='utf-8') as f:
- objective_data = json.load(f)
-
- assignment_id = extract_assignment_id()
- student_id = extract_student_id()
-
- total_score = objective_data.get("score", 0)
- max_score = objective_data.get("max_score", 0)
- components = objective_data.get("components", [])
-
- formatted_components = []
- for comp in components:
- comp_type = comp.get("type", "objective")
- formatted_components.append({
- "type": f"objective_{comp_type}",
- "score": comp.get("score", 0),
- "max_score": comp.get("max_score", 0),
- "details": comp.get("details", {})
- })
-
- if not formatted_components:
- formatted_components.append({
- "type": "objective_total",
- "score": total_score,
- "max_score": max_score,
- "details": {}
- })
-
- metadata = {
- "version": "1.0",
- "assignment": assignment_id,
- "student_id": student_id,
- "components": formatted_components,
- "total_score": total_score,
- "total_max_score": max_score,
- "timestamp": datetime.now().isoformat(),
- "generator": "gitea-autograde"
- }
-
- return metadata
- except Exception as e:
- print(f"Error creating objective metadata: {e}", file=sys.stderr)
- return {}
-
-
-def main():
- """主函数"""
- # 检查命令行参数或环境变量
- grade_type = os.getenv("GRADE_TYPE", "programming").lower()
- grade_file_override = os.getenv("GRADE_FILE")
-
- if grade_type == "final":
- # 期末大作业成绩(包含编程+报告)
- final_file = grade_file_override or "final_grade.json"
- if os.path.exists(final_file):
- metadata = create_final_metadata(final_file)
- else:
- print(f"Error: {final_file} not found", file=sys.stderr)
- metadata = {}
- elif grade_type == "llm":
- # LLM 成绩
- llm_file = grade_file_override or "artifacts/llm_grade.json"
- if os.path.exists(llm_file):
- metadata = create_llm_metadata(llm_file)
- elif os.path.exists("llm_grade.json"):
- metadata = create_llm_metadata("llm_grade.json")
- else:
- print(f"Error: {llm_file} not found", file=sys.stderr)
- metadata = {}
- elif grade_type == "objective":
- objective_file = grade_file_override or "objective_grade.json"
- if os.path.exists(objective_file):
- metadata = create_objective_metadata(objective_file)
- else:
- print(f"Error: {objective_file} not found", file=sys.stderr)
- metadata = {}
- else:
- # 编程成绩
- grade_file = grade_file_override or "grade.json"
- if os.path.exists(grade_file):
- metadata = create_grade_metadata(grade_file)
- else:
- print(f"Error: {grade_file} not found", file=sys.stderr)
- metadata = {}
-
- # 输出到 stdout
- print(json.dumps(metadata, ensure_ascii=False, indent=2))
-
-
-if __name__ == "__main__":
- main()
diff --git a/.autograde/generate_pdf_report.py b/.autograde/generate_pdf_report.py
deleted file mode 100644
index 885836c..0000000
--- a/.autograde/generate_pdf_report.py
+++ /dev/null
@@ -1,815 +0,0 @@
-#!/usr/bin/env python3
-"""
-生成专业的 PDF 成绩报告
-
-适用于打印归档,包含:
-- 封面页(课程信息、学生信息)
-- 后端开发反思报告
-- 前端开发反思报告
-- 评分详情页
-- 防伪水印
-"""
-
-import argparse
-import hashlib
-import json
-import os
-import re
-import sys
-from datetime import datetime
-from pathlib import Path
-
-try:
- import markdown
- from weasyprint import HTML, CSS
- from weasyprint.text.fonts import FontConfiguration
- HAS_PDF_SUPPORT = True
-except ImportError:
- HAS_PDF_SUPPORT = False
-
-
-def load_json(filepath, default=None):
- """安全加载 JSON 文件"""
- if not os.path.exists(filepath):
- return default or {}
- try:
- with open(filepath, "r", encoding="utf-8") as f:
- return json.load(f)
- except Exception as e:
- print(f"Error loading {filepath}: {e}", file=sys.stderr)
- return default or {}
-
-
-def read_file(filepath):
- """读取文件内容"""
- if os.path.exists(filepath):
- with open(filepath, "r", encoding="utf-8") as f:
- return f.read()
- return ""
-
-
-def fix_image_paths(content, images_dir):
- """修复图片路径为绝对路径"""
- if not images_dir or not os.path.isdir(images_dir):
- return content
-
- abs_images_dir = os.path.abspath(images_dir)
-
- def replace_img(match):
- alt = match.group(1)
- src = match.group(2)
- if not src.startswith(('http://', 'https://', 'file://', '/')):
- abs_src = os.path.join(abs_images_dir, os.path.basename(src))
- if os.path.exists(abs_src):
- return f''
- return match.group(0)
-
- content = re.sub(r'!\[([^\]]*)\]\(([^)]+)\)', replace_img, content)
- return content
-
-
-def markdown_to_html(md_content):
- """将 Markdown 转换为 HTML(仅内容部分)"""
- extensions = ['tables', 'fenced_code', 'nl2br']
- return markdown.markdown(md_content, extensions=extensions)
-
-
-def generate_watermark_id(student_id, commit_sha):
- """生成唯一的水印标识"""
- raw = f"{student_id}-{commit_sha}-{datetime.now().isoformat()}"
- return hashlib.sha256(raw.encode()).hexdigest()[:16].upper()
-
-
-def generate_cover_page(student_id, student_name="", class_name="",
- assignment_name="VibeVault 期末大作业"):
- """生成封面页 HTML"""
- current_date = datetime.now().strftime('%Y年%m月%d日')
- current_semester = "2025年秋季学期"
-
- # 如果有学生姓名,直接显示;否则留空供手写
- name_value = student_name if student_name else ' ' * 8
- class_value = class_name if class_name else ' ' * 8
- id_value = student_id if student_id else ' ' * 8
-
- return f'''
-
-
-
-
-
《Java 程序设计》
- 期末大作业
- {assignment_name}
-
-
-
-
-
- | 学 号: |
- {id_value} |
-
-
- | 姓 名: |
- {name_value} |
-
-
- | 班 级: |
- {class_value} |
-
-
- | 提交日期: |
- {current_date} |
-
-
-
-
-
-
- '''
-
-
-def generate_report_section(title, content, icon="📝"):
- """生成报告章节 HTML"""
- if not content or content.strip() in ['', '*(未提交)*']:
- html_content = '(未提交)
'
- else:
- html_content = markdown_to_html(content)
-
- return f'''
-
-
{icon} {title}
-
- {html_content}
-
-
- '''
-
-
-def generate_grade_page(final_grade):
- """生成评分详情页 HTML"""
- total = final_grade.get("total_score", 0)
- max_score = final_grade.get("max_score", 100)
- breakdown = final_grade.get("breakdown", {})
-
- # 编程测试详情
- prog = breakdown.get("programming", {})
- prog_rows = ""
- if prog.get("groups"):
- for group_name, group_info in prog["groups"].items():
- prog_rows += f'''
-
- | {group_name} |
- {group_info.get('passed', 0)} / {group_info.get('total', 0)} |
- {group_info.get('score', 0):.1f} |
- {group_info.get('max_score', 0)} |
-
- '''
-
- # LLM 评分详情
- def format_llm_details(section_data):
- criteria = section_data.get("criteria", [])
- if not criteria:
- return f'无详细评分
'
-
- rows = ""
- for c in criteria:
- reason = c.get("reason", "").replace("<", "<").replace(">", ">")
- rows += f'''
-
- | {c.get('id', '')} |
- {c.get('score', 0)} |
- {reason} |
-
- '''
-
- confidence = section_data.get("confidence")
- flags = section_data.get("flags", [])
- footer = ""
- if confidence:
- footer += f'置信度: {confidence:.2f}'
- if flags:
- footer += f'标记: {", ".join(flags)}'
-
- return f'''
-
-
- '''
-
- report = breakdown.get("report", {})
- frontend = breakdown.get("frontend", {})
-
- return f'''
-
-
📊 评分详情
-
-
-
- {total:.1f}
- / {max_score}
-
-
总分
-
-
-
-
成绩汇总
-
-
- | 项目 | 得分 | 满分 | 占比 |
-
-
-
- | 编程测试 |
- {prog.get('score', 0):.1f} |
- {prog.get('max_score', 80)} |
- {prog.get('max_score', 80)}% |
-
-
- | 后端反思报告 |
- {report.get('score', 0):.1f} |
- {report.get('max_score', 10)} |
- {report.get('max_score', 10)}% |
-
-
- | 前端反思报告 |
- {frontend.get('score', 0):.1f} |
- {frontend.get('max_score', 10)} |
- {frontend.get('max_score', 10)}% |
-
-
-
-
-
-
-
编程测试详情
-
-
- | 测试组 | 通过数 | 得分 | 满分 |
-
- {prog_rows or '| 无测试数据 |
'}
-
-
-
-
-
后端反思报告评分
- {format_llm_details(report)}
-
-
-
-
前端反思报告评分
- {format_llm_details(frontend)}
-
-
-
-
- '''
-
-
-def get_css_styles(watermark_text="", commit_sha=""):
- """获取 PDF 样式,包含水印和版本标记"""
-
- # 水印样式
- watermark_css = ""
- if watermark_text:
- watermark_css = f'''
- /* 水印 */
- body::after {{
- content: "{watermark_text}";
- position: fixed;
- top: 50%;
- left: 50%;
- transform: translate(-50%, -50%) rotate(-45deg);
- font-size: 60pt;
- color: rgba(200, 200, 200, 0.15);
- white-space: nowrap;
- pointer-events: none;
- z-index: 9999;
- }}
-
- .report-section::before,
- .grade-page::before {{
- content: "{watermark_text}";
- position: absolute;
- top: 50%;
- left: 50%;
- transform: translate(-50%, -50%) rotate(-45deg);
- font-size: 48pt;
- color: rgba(200, 200, 200, 0.12);
- white-space: nowrap;
- pointer-events: none;
- z-index: -1;
- }}
- '''
-
- # 版本标记(右上角)
- commit_marker = ""
- if commit_sha:
- short_sha = commit_sha[:7] if len(commit_sha) > 7 else commit_sha
- commit_marker = f'''
- @top-right {{
- content: "{short_sha}";
- font-size: 8pt;
- color: #999;
- font-family: 'Consolas', 'Monaco', monospace;
- }}
- '''
-
- return f'''
- @page {{
- size: A4;
- margin: 2cm 2.5cm;
- {commit_marker}
- @bottom-center {{
- content: counter(page);
- font-size: 10pt;
- color: #666;
- }}
- }}
-
- @page cover {{
- margin: 0;
- @bottom-center {{ content: none; }}
- }}
-
- @font-face {{
- font-family: 'Noto Sans CJK SC';
- src: local('Noto Sans CJK SC'), local('Noto Sans SC'),
- local('Source Han Sans SC'), local('Source Han Sans CN'),
- local('PingFang SC'), local('Microsoft YaHei'),
- local('SimHei'), local('WenQuanYi Micro Hei');
- }}
-
- * {{
- margin: 0;
- padding: 0;
- box-sizing: border-box;
- }}
-
- body {{
- font-family: 'Noto Sans CJK SC', 'Source Han Sans SC', 'PingFang SC',
- 'Microsoft YaHei', 'SimHei', 'WenQuanYi Micro Hei', sans-serif;
- font-size: 11pt;
- line-height: 1.8;
- color: #333;
- }}
-
- {watermark_css}
-
- /* 封面页样式 */
- .cover-page {{
- page: cover;
- height: 100vh;
- display: flex;
- flex-direction: column;
- justify-content: center;
- align-items: center;
- text-align: center;
- padding: 3cm;
- page-break-after: always;
- }}
-
- .cover-header {{
- margin-bottom: 4cm;
- }}
-
- .university-name {{
- font-size: 18pt;
- color: #1a5490;
- letter-spacing: 0.5em;
- font-weight: bold;
- }}
-
- .cover-title h1 {{
- font-size: 26pt;
- color: #1a5490;
- margin-bottom: 0.5cm;
- font-weight: bold;
- }}
-
- .cover-title h2 {{
- font-size: 20pt;
- color: #333;
- margin-bottom: 0.3cm;
- font-weight: normal;
- }}
-
- .cover-title h3 {{
- font-size: 14pt;
- color: #666;
- font-weight: normal;
- }}
-
- .cover-info {{
- margin-top: 3cm;
- }}
-
- .info-table {{
- margin: 0 auto;
- border-collapse: collapse;
- }}
-
- .info-table td {{
- padding: 0.4cm 0.5cm;
- font-size: 12pt;
- }}
-
- .info-table .label {{
- text-align: right;
- color: #333;
- }}
-
- .info-table .value {{
- text-align: left;
- min-width: 6cm;
- }}
-
- .info-table .underline {{
- border-bottom: 1px solid #333;
- }}
-
- .cover-footer {{
- margin-top: 4cm;
- color: #666;
- font-size: 11pt;
- }}
-
- /* 报告章节样式 */
- .report-section {{
- page-break-before: always;
- position: relative;
- }}
-
- .section-title {{
- font-size: 18pt;
- color: #1a5490;
- border-bottom: 2px solid #1a5490;
- padding-bottom: 0.3cm;
- margin-bottom: 0.8cm;
- }}
-
- .section-content {{
- text-align: justify;
- }}
-
- .section-content h1 {{
- font-size: 16pt;
- color: #1a5490;
- margin: 1cm 0 0.5cm 0;
- }}
-
- .section-content h2 {{
- font-size: 14pt;
- color: #333;
- margin: 0.8cm 0 0.4cm 0;
- }}
-
- .section-content h3 {{
- font-size: 12pt;
- color: #555;
- margin: 0.6cm 0 0.3cm 0;
- }}
-
- .section-content p {{
- margin: 0.4cm 0;
- text-indent: 2em;
- }}
-
- .section-content ul, .section-content ol {{
- margin: 0.4cm 0 0.4cm 1.5cm;
- }}
-
- .section-content li {{
- margin: 0.2cm 0;
- }}
-
- .section-content img {{
- max-width: 100%;
- height: auto;
- margin: 0.5cm auto;
- display: block;
- border: 1px solid #ddd;
- }}
-
- .section-content code {{
- font-family: 'Consolas', 'Monaco', monospace;
- background: #f5f5f5;
- padding: 0.1cm 0.2cm;
- border-radius: 3px;
- font-size: 10pt;
- }}
-
- .section-content pre {{
- background: #f5f5f5;
- padding: 0.5cm;
- border-radius: 5px;
- overflow-x: auto;
- font-size: 9pt;
- margin: 0.5cm 0;
- }}
-
- .section-content blockquote {{
- border-left: 4px solid #1a5490;
- padding-left: 0.5cm;
- margin: 0.5cm 0;
- color: #555;
- background: #f9f9f9;
- padding: 0.3cm 0.5cm;
- }}
-
- .section-content table {{
- width: 100%;
- border-collapse: collapse;
- margin: 0.5cm 0;
- font-size: 10pt;
- }}
-
- .section-content th, .section-content td {{
- border: 1px solid #ddd;
- padding: 0.3cm;
- text-align: left;
- }}
-
- .section-content th {{
- background: #1a5490;
- color: white;
- }}
-
- .section-content tr:nth-child(even) {{
- background: #f9f9f9;
- }}
-
- .empty-notice {{
- color: #999;
- font-style: italic;
- text-align: center;
- padding: 2cm;
- }}
-
- /* 评分页样式 */
- .grade-page {{
- page-break-before: always;
- position: relative;
- }}
-
- .page-title {{
- font-size: 18pt;
- color: #1a5490;
- text-align: center;
- margin-bottom: 1cm;
- }}
-
- .total-score {{
- text-align: center;
- margin: 1cm 0;
- }}
-
- .score-circle {{
- display: inline-block;
- width: 4cm;
- height: 4cm;
- border: 4px solid #1a5490;
- border-radius: 50%;
- line-height: 4cm;
- text-align: center;
- }}
-
- .score-value {{
- font-size: 28pt;
- font-weight: bold;
- color: #1a5490;
- }}
-
- .score-max {{
- font-size: 14pt;
- color: #666;
- }}
-
- .score-label {{
- font-size: 12pt;
- color: #666;
- margin-top: 0.3cm;
- }}
-
- .grade-summary, .grade-details {{
- margin: 0.8cm 0;
- }}
-
- .grade-summary h2, .grade-details h2 {{
- font-size: 14pt;
- color: #333;
- border-bottom: 1px solid #ddd;
- padding-bottom: 0.2cm;
- margin-bottom: 0.4cm;
- }}
-
- .summary-table, .detail-table {{
- width: 100%;
- border-collapse: collapse;
- font-size: 10pt;
- }}
-
- .summary-table th, .summary-table td,
- .detail-table th, .detail-table td {{
- border: 1px solid #ddd;
- padding: 0.25cm 0.4cm;
- text-align: left;
- }}
-
- .summary-table th, .detail-table th {{
- background: #1a5490;
- color: white;
- font-weight: normal;
- }}
-
- .summary-table tr:nth-child(even),
- .detail-table tr:nth-child(even) {{
- background: #f9f9f9;
- }}
-
- .score-cell {{
- text-align: center;
- font-weight: bold;
- color: #1a5490;
- }}
-
- .reason-cell {{
- font-size: 9pt;
- color: #555;
- max-width: 10cm;
- }}
-
- .detail-footer {{
- font-size: 9pt;
- color: #666;
- margin-top: 0.2cm;
- }}
-
- .detail-footer .confidence {{
- margin-right: 1cm;
- }}
-
- .detail-footer .flags {{
- color: #c00;
- }}
-
- .no-detail {{
- color: #999;
- font-style: italic;
- padding: 0.5cm;
- text-align: center;
- }}
-
- .grade-footer {{
- margin-top: 1cm;
- padding-top: 0.5cm;
- border-top: 1px solid #ddd;
- font-size: 9pt;
- color: #999;
- text-align: center;
- }}
-
- .grade-footer p {{
- margin: 0.1cm 0;
- text-indent: 0;
- }}
- '''
-
-
-def create_full_html(args, final_grade, student_info):
- """创建完整的 HTML 文档"""
-
- # 读取报告内容
- report_content = read_file(args.report)
- frontend_content = read_file(args.frontend)
-
- # 修复图片路径
- frontend_content = fix_image_paths(frontend_content, args.images)
-
- # 移除报告中的标题行(避免重复)
- report_content = re.sub(r'^#\s*后端开发反思报告.*\n', '', report_content, flags=re.MULTILINE)
- frontend_content = re.sub(r'^#\s*前端开发反思报告.*\n', '', frontend_content, flags=re.MULTILINE)
-
- # 提取学生信息
- student_id = student_info.get("student_id", "")
- student_name = student_info.get("name", "")
- class_name = student_info.get("class_name", "")
- commit_sha = student_info.get("commit_sha", "")
-
- # 生成水印文本
- watermark_text = ""
- if student_id:
- watermark_id = generate_watermark_id(student_id, commit_sha)
- watermark_text = f"{student_id} · {watermark_id}"
-
- # 构建 HTML
- html = f'''
-
-
-
- Java程序设计 - 期末大作业报告
-
-
-
- {generate_cover_page(student_id, student_name, class_name)}
- {generate_report_section("后端开发反思报告", report_content)}
- {generate_report_section("前端开发反思报告", frontend_content, "🎨")}
- {generate_grade_page(final_grade)}
-
-'''
-
- return html
-
-
-def convert_to_pdf(html_content, pdf_file, images_dir=None):
- """使用 weasyprint 生成 PDF"""
- if not HAS_PDF_SUPPORT:
- print("weasyprint not available", file=sys.stderr)
- return False
-
- try:
- font_config = FontConfiguration()
- base_url = os.path.abspath(images_dir) if images_dir else os.getcwd()
-
- HTML(string=html_content, base_url=base_url).write_pdf(
- pdf_file,
- font_config=font_config
- )
- return True
- except Exception as e:
- print(f"PDF generation error: {e}", file=sys.stderr)
- return False
-
-
-def main():
- parser = argparse.ArgumentParser(description="Generate professional PDF grade report")
- parser.add_argument("--report", default="REPORT.md", help="REPORT.md file path")
- parser.add_argument("--frontend", default="FRONTEND.md", help="FRONTEND.md file path")
- parser.add_argument("--grade", default="final_grade.json", help="Final grade JSON file")
- parser.add_argument("--images", default="images", help="Images directory")
- parser.add_argument("--out", default="grade_report.pdf", help="Output PDF file")
- parser.add_argument("--student-id", default="", help="Student ID")
- parser.add_argument("--student-name", default="", help="Student name")
- parser.add_argument("--class-name", default="", help="Class name")
- parser.add_argument("--commit-sha", default="", help="Commit SHA for watermark")
- args = parser.parse_args()
-
- # 从环境变量获取学生信息
- student_id = args.student_id or os.getenv("STUDENT_ID", "")
- student_name = args.student_name or os.getenv("STUDENT_NAME", "")
- class_name = args.class_name or os.getenv("CLASS_NAME", "")
- commit_sha = args.commit_sha or os.getenv("COMMIT_SHA", "")
-
- # 从仓库名提取学生 ID
- if not student_id:
- repo = os.getenv("REPO", "")
- match = re.search(r'-stu[_-]?st?(\d+)$', repo)
- if match:
- student_id = match.group(1)
- else:
- match = re.search(r'-stu[_-]([a-zA-Z0-9_]+)$', repo)
- if match:
- student_id = match.group(1)
-
- student_info = {
- "student_id": student_id,
- "name": student_name,
- "class_name": class_name,
- "commit_sha": commit_sha
- }
-
- # 加载成绩
- final_grade = load_json(args.grade, {"total_score": 0, "max_score": 100, "breakdown": {}})
-
- # 创建 HTML
- html_content = create_full_html(args, final_grade, student_info)
-
- # 保存 HTML(调试用)
- html_out = args.out.replace(".pdf", ".html")
- with open(html_out, "w", encoding="utf-8") as f:
- f.write(html_content)
-
- # 生成 PDF
- if HAS_PDF_SUPPORT:
- if convert_to_pdf(html_content, args.out, args.images):
- print(f"✅ PDF report generated: {args.out}")
- return 0
- else:
- print(f"⚠️ PDF generation failed", file=sys.stderr)
- return 1
- else:
- print(f"ℹ️ weasyprint not installed, HTML saved: {html_out}")
- return 0
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/.autograde/grade.py b/.autograde/grade.py
deleted file mode 100644
index c95734b..0000000
--- a/.autograde/grade.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/env python3
-"""
-编程题评分脚本
-
-解析 JUnit XML 报告,计算分数,考虑迟交扣分,生成 grade.json 和 summary.md
-"""
-
-import argparse
-import xml.etree.ElementTree as ET
-import json
-import subprocess
-import os
-import time
-import sys
-from pathlib import Path
-from dotenv import load_dotenv
-
-# 加载环境变量(支持从 .env 文件或环境变量读取)
-load_dotenv()
-
-
-def commit_ts():
- """获取最后一次提交的时间戳(Unix 时间戳)"""
- try:
- out = subprocess.check_output(
- ["git", "log", "-1", "--format=%ct"],
- stderr=subprocess.DEVNULL
- ).decode().strip()
- return int(out)
- except Exception:
- return int(time.time())
-
-
-def parse_junit(junit_path):
- """
- 解析 JUnit XML 报告
-
- Returns
- -------
- passed : int
- 通过的测试数
- total : int
- 总测试数
- fails : list
- 失败的测试名称列表
- """
- if not os.path.exists(junit_path):
- return (0, 0, [])
-
- try:
- root = ET.parse(junit_path).getroot()
- total = 0
- passed = 0
- fails = []
-
- for testsuite in root.iter("testsuite"):
- for testcase in testsuite.iter("testcase"):
- total += 1
- # 检查是否有 failure、error 或 skipped 子元素
- if list(testcase):
- classname = testcase.get("classname", "")
- name = testcase.get("name", "")
- full_name = f"{classname}.{name}" if classname else name
- fails.append(full_name)
- else:
- passed += 1
-
- return (passed, total, fails)
- except Exception as e:
- print(f"Error parsing JUnit XML: {e}", file=sys.stderr)
- return (0, 0, [])
-
-
-def calculate_late_penalty(deadline_str):
- """
- 计算迟交扣分
-
- Parameters
- ----------
- deadline_str : str
- ISO 格式的截止时间(如 "2025-03-15T23:59:59+08:00")
-
- Returns
- -------
- penalty : float
- 扣分数(0-30)
- """
- if not deadline_str:
- return 0.0
-
- try:
- # 解析截止时间(支持多种格式)
- deadline_str = deadline_str.strip()
- # 移除时区信息(简化处理)
- if '+' in deadline_str:
- deadline_str = deadline_str.split('+')[0]
- elif 'Z' in deadline_str:
- deadline_str = deadline_str.replace('Z', '')
-
- # 解析时间
- if 'T' in deadline_str:
- dl = time.mktime(time.strptime(deadline_str[:19], "%Y-%m-%dT%H:%M:%S"))
- else:
- dl = time.mktime(time.strptime(deadline_str[:19], "%Y-%m-%d %H:%M:%S"))
-
- commit_time = commit_ts()
- late_sec = max(0, commit_time - dl)
- days = late_sec / 86400
-
- # 扣分规则:第一天 10 分,之后每天 5 分,最多 30 分
- if days > 0:
- penalty = min(30.0, 10.0 + 5.0 * days)
- else:
- penalty = 0.0
-
- return round(penalty, 2)
- except Exception as e:
- print(f"Error calculating late penalty: {e}", file=sys.stderr)
- return 0.0
-
-
-def main():
- parser = argparse.ArgumentParser(description="Grade programming assignments from JUnit XML")
- parser.add_argument("--junit", required=True, help="Path to JUnit XML file")
- parser.add_argument("--out", default="grade.json", help="Output JSON file")
- parser.add_argument("--summary", default="summary.md", help="Output summary markdown file")
- parser.add_argument("--bonus", default=None, help="Optional bonus file (e.g., lintr.rds)")
- args = parser.parse_args()
-
- # 解析 JUnit XML
- passed, total, fails = parse_junit(args.junit)
-
- # 计算基础分数
- if total > 0:
- base_score = 100.0 * (passed / total)
- else:
- base_score = 0.0
-
- # 计算迟交扣分
- deadline = os.getenv("DEADLINE", "")
- penalty = calculate_late_penalty(deadline)
-
- # 最终分数
- final_score = max(0.0, round(base_score - penalty, 2))
-
- # 生成 grade.json
- grade_data = {
- "score": final_score,
- "base_score": round(base_score, 2),
- "penalty": penalty,
- "passed": passed,
- "total": total,
- "fails": fails,
- "timestamp": int(time.time())
- }
-
- with open(args.out, "w", encoding="utf-8") as f:
- json.dump(grade_data, f, ensure_ascii=False, indent=2)
-
- # 生成 summary.md
- with open(args.summary, "w", encoding="utf-8") as f:
- f.write("# 成绩报告\n\n")
- f.write(f"- **通过用例**:{passed}/{total}\n")
- f.write(f"- **原始分**:{base_score:.2f}/100\n")
- if penalty > 0:
- f.write(f"- **迟交扣分**:-{penalty:.2f}\n")
- f.write(f"- **最终分**:**{final_score:.2f}/100**\n\n")
-
- if fails:
- f.write("## 未通过的测试\n\n")
- for fail in fails:
- f.write(f"- {fail}\n")
- f.write("\n")
-
- if deadline:
- f.write(f"## 截止时间\n\n")
- f.write(f"- 截止时间:{deadline}\n")
- commit_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(commit_ts()))
- f.write(f"- 提交时间:{commit_time_str}\n")
-
- print(f"Grading complete: {final_score:.2f}/100 ({passed}/{total} tests passed)")
-
-
-if __name__ == "__main__":
- main()
-
-
diff --git a/.autograde/grade_grouped.py b/.autograde/grade_grouped.py
deleted file mode 100644
index 9c7da3c..0000000
--- a/.autograde/grade_grouped.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/env python3
-"""
-分组编程题评分脚本
-
-解析 JUnit XML 报告,按测试分组(Core/Advanced/Challenge)计算加权分数
-"""
-
-import argparse
-import xml.etree.ElementTree as ET
-import json
-import os
-import re
-import sys
-from pathlib import Path
-from glob import glob
-
-
-def parse_junit_files(junit_dir):
- """
- 解析目录下所有 JUnit XML 报告
-
- Returns
- -------
- results : list of dict
- 每个测试的结果,包含 classname, name, passed
- """
- results = []
-
- xml_files = glob(os.path.join(junit_dir, "TEST-*.xml"))
- if not xml_files:
- xml_files = glob(os.path.join(junit_dir, "*.xml"))
-
- for xml_file in xml_files:
- try:
- root = ET.parse(xml_file).getroot()
-
- for testsuite in root.iter("testsuite"):
- for testcase in testsuite.iter("testcase"):
- classname = testcase.get("classname", "")
- name = testcase.get("name", "")
-
- # 检查是否有 failure、error 或 skipped 子元素
- failed = any(testcase.iter("failure")) or any(testcase.iter("error"))
- skipped = any(testcase.iter("skipped"))
-
- results.append({
- "classname": classname,
- "name": name,
- "passed": not failed and not skipped,
- "skipped": skipped
- })
- except Exception as e:
- print(f"Error parsing {xml_file}: {e}", file=sys.stderr)
-
- return results
-
-
-def load_groups_config(groups_file):
- """加载测试分组配置"""
- if not os.path.exists(groups_file):
- # 默认配置
- return {
- "groups": {
- "core": {"pattern": ".*core.*", "weight": 0.75, "max_score": 60},
- "advanced": {"pattern": ".*advanced.*", "weight": 0.125, "max_score": 10},
- "challenge": {"pattern": ".*challenge.*", "weight": 0.125, "max_score": 10}
- },
- "fallback_group": "core"
- }
-
- with open(groups_file, "r", encoding="utf-8") as f:
- return json.load(f)
-
-
-def categorize_test(classname, groups_config):
- """根据 classname 将测试分类到对应的组"""
- for group_name, group_info in groups_config.get("groups", {}).items():
- pattern = group_info.get("pattern", "")
- if re.search(pattern, classname, re.IGNORECASE):
- return group_name
-
- return groups_config.get("fallback_group", "core")
-
-
-def calculate_grouped_score(test_results, groups_config):
- """
- 按分组计算加权分数
-
- Returns
- -------
- dict
- 包含各组得分和总分的字典
- """
- groups = groups_config.get("groups", {})
-
- # 初始化各组统计
- group_stats = {}
- for group_name, group_info in groups.items():
- group_stats[group_name] = {
- "passed": 0,
- "total": 0,
- "max_score": group_info.get("max_score", 10),
- "weight": group_info.get("weight", 0.1),
- "tests": []
- }
-
- # 分类并统计测试结果
- for test in test_results:
- group = categorize_test(test["classname"], groups_config)
- if group not in group_stats:
- group = groups_config.get("fallback_group", "core")
-
- group_stats[group]["total"] += 1
- if test["passed"]:
- group_stats[group]["passed"] += 1
- else:
- group_stats[group]["tests"].append(f"{test['classname']}.{test['name']}")
-
- # 计算各组得分
- total_score = 0
- group_scores = {}
-
- for group_name, stats in group_stats.items():
- if stats["total"] > 0:
- pass_rate = stats["passed"] / stats["total"]
- group_score = pass_rate * stats["max_score"]
- else:
- group_score = 0
-
- group_scores[group_name] = {
- "passed": stats["passed"],
- "total": stats["total"],
- "max_score": stats["max_score"],
- "score": round(group_score, 2),
- "failed_tests": stats["tests"][:10] # 只保留前 10 个失败测试
- }
-
- total_score += group_score
-
- return {
- "total_score": round(total_score, 2),
- "max_score": 80, # 编程测试总分 80 分(Core 60 + Advanced 10 + Challenge 10)
- "groups": group_scores
- }
-
-
-def main():
- parser = argparse.ArgumentParser(description="Grade programming assignments with test groups")
- parser.add_argument("--junit-dir", required=True, help="Directory containing JUnit XML files")
- parser.add_argument("--groups", default="test_groups.json", help="Test groups configuration file")
- parser.add_argument("--out", default="grade.json", help="Output JSON file")
- parser.add_argument("--summary", default="summary.md", help="Output summary markdown file")
- args = parser.parse_args()
-
- # 解析测试结果
- test_results = parse_junit_files(args.junit_dir)
-
- if not test_results:
- print("Warning: No test results found", file=sys.stderr)
- grade_data = {
- "total_score": 0,
- "max_score": 80,
- "groups": {},
- "error": "No test results found"
- }
- else:
- # 加载分组配置
- groups_config = load_groups_config(args.groups)
-
- # 计算分组分数
- grade_data = calculate_grouped_score(test_results, groups_config)
-
- # 保存 grade.json
- with open(args.out, "w", encoding="utf-8") as f:
- json.dump(grade_data, f, ensure_ascii=False, indent=2)
-
- # 生成 summary.md
- with open(args.summary, "w", encoding="utf-8") as f:
- f.write("# 编程测试成绩报告\n\n")
- f.write(f"**总分:{grade_data['total_score']:.2f} / {grade_data['max_score']}**\n\n")
-
- f.write("## 分组得分\n\n")
- f.write("| 分组 | 通过 | 总数 | 得分 | 满分 |\n")
- f.write("|------|------|------|------|------|\n")
-
- for group_name, group_info in grade_data.get("groups", {}).items():
- f.write(f"| {group_name} | {group_info['passed']} | {group_info['total']} | "
- f"{group_info['score']:.2f} | {group_info['max_score']} |\n")
-
- # 列出失败的测试
- all_failed = []
- for group_name, group_info in grade_data.get("groups", {}).items():
- all_failed.extend(group_info.get("failed_tests", []))
-
- if all_failed:
- f.write("\n## 未通过的测试\n\n")
- for test in all_failed[:20]: # 最多显示 20 个
- f.write(f"- {test}\n")
- if len(all_failed) > 20:
- f.write(f"\n... 还有 {len(all_failed) - 20} 个未通过的测试\n")
-
- print(f"Grading complete: {grade_data['total_score']:.2f}/{grade_data['max_score']}")
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/.autograde/llm_grade.py b/.autograde/llm_grade.py
deleted file mode 100644
index 7d22602..0000000
--- a/.autograde/llm_grade.py
+++ /dev/null
@@ -1,249 +0,0 @@
-#!/usr/bin/env python3
-"""
-LLM 简答题评分脚本
-
-调用 LLM API,按评分量表对简答题进行评分,输出 JSON 格式结果
-"""
-
-import os
-import json
-import argparse
-import requests
-import sys
-from pathlib import Path
-from dotenv import load_dotenv
-
-# 加载环境变量(支持从 .env 文件或环境变量读取)
-load_dotenv()
-
-
-def read_file(path):
- """读取文件内容"""
- if os.path.exists(path):
- return open(path, 'r', encoding='utf-8').read()
- return ""
-
-
-def read_file_or_string(value):
- """
- 如果 value 是一个存在的文件路径,读取文件内容;
- 否则直接返回 value 作为字符串。
- """
- if os.path.exists(value):
- return open(value, 'r', encoding='utf-8').read()
- return value # 当作字符串直接返回
-
-
-PROMPT_TEMPLATE = """你是严格且一致的助教,按提供的评分量表为学生的简答题评分。
-
-评分规则:
-- 严格依据量表中各评分项的 scoring_guide 进行评分
-- 每个评分项只能给出 scoring_guide 中定义的整数分值(如 0, 1, 2, 3, 4)
-- 不输出任何解释性文本;只输出 JSON
-
-输出格式:
-{{
- "total": number (各项分数之和),
- "criteria": [
- {{"id": "评分项id", "score": 整数(必须是scoring_guide中定义的分值), "reason": "简短评语"}},
- ...
- ],
- "flags": [],
- "confidence": number(0-1, 评分置信度)
-}}
-
-重要:
-- score 必须是整数,只能是 scoring_guide 中定义的分值(如 0/1/2/3/4)
-- 不要给出 2.5, 3.5 这样的中间值
-- total 必须等于所有 criteria 的 score 之和
-- 如果答案与题目无关或为空,total=0,并加 flag "need_review"
-
-【题目】
-<<<{question}>>>
-
-【评分量表】
-<<<{rubric}>>>
-
-【学生答案】
-<<<{answer}>>>
-"""
-
-
-def call_llm(url, key, model, prompt):
- """
- 调用 LLM API
-
- Parameters
- ----------
- url : str
- API 地址
- key : str
- API 密钥
- model : str
- 模型名称
- prompt : str
- 提示词
-
- Returns
- -------
- dict
- LLM 返回的 JSON 结果
- """
- headers = {
- "Authorization": f"Bearer {key}",
- "Content-Type": "application/json"
- }
-
- data = {
- "model": model,
- "temperature": 0,
- "top_p": 1,
- "messages": [{"role": "user", "content": prompt}],
- "response_format": {"type": "json_object"}
- }
-
- try:
- # 设置超时:连接超时 10 秒,读取超时 60 秒
- response = requests.post(
- url,
- headers=headers,
- json=data,
- timeout=(10, 60)
- )
- response.raise_for_status()
- result = response.json()
- content = result.get("choices", [{}])[0].get("message", {}).get("content", "{}")
- return json.loads(content)
- except requests.exceptions.Timeout as e:
- print(f"LLM API request timeout: {e}", file=sys.stderr)
- raise
- except requests.exceptions.HTTPError as e:
- print(f"LLM API HTTP error: {e} (status: {response.status_code})", file=sys.stderr)
- raise
- except requests.exceptions.RequestException as e:
- print(f"LLM API request failed: {e}", file=sys.stderr)
- raise
- except json.JSONDecodeError as e:
- print(f"Failed to parse LLM response as JSON: {e}", file=sys.stderr)
- raise
-
-
-def main():
- parser = argparse.ArgumentParser(description="Grade short answer questions using LLM")
- parser.add_argument("--question", required=True, help="Path to question file")
- parser.add_argument("--answer", required=True, help="Path to answer file")
- parser.add_argument("--rubric", required=True, help="Path to rubric JSON file")
- parser.add_argument("--out", default="grade.json", help="Output JSON file")
- parser.add_argument("--summary", default="summary.md", help="Output summary markdown file")
- parser.add_argument("--model", default=os.getenv("LLM_MODEL", "deepseek-chat"))
- parser.add_argument("--api_url", default=os.getenv("LLM_API_URL", "https://api.deepseek.com/chat/completions"))
- parser.add_argument("--api_key", default=os.getenv("LLM_API_KEY", ""))
- args = parser.parse_args()
-
- # 验证必需的配置
- if not args.api_key:
- print("Warning: LLM_API_KEY not set. LLM grading may fail.", file=sys.stderr)
-
- # 读取文件或字符串
- # question 可以是文件路径或直接的问题字符串
- question = read_file_or_string(args.question).strip()
- # answer 和 rubric 必须是文件路径
- answer = read_file(args.answer).strip()
- rubric_text = read_file(args.rubric).strip()
-
- if not question or not answer:
- print(f"Warning: Empty question or answer file", file=sys.stderr)
- resp = {
- "total": 0,
- "criteria": [],
- "flags": ["need_review", "empty_answer"],
- "confidence": 0.0
- }
- else:
- # 调用 LLM
- try:
- prompt = PROMPT_TEMPLATE.format(
- question=question,
- rubric=rubric_text,
- answer=answer
- )
- resp = call_llm(args.api_url, args.api_key, args.model, prompt)
- except Exception as e:
- print(f"LLM grading failed: {e}", file=sys.stderr)
- resp = {
- "total": 0,
- "criteria": [],
- "flags": ["need_review", "llm_error"],
- "confidence": 0.0
- }
-
- # 确保各项分数是整数,并重新计算 total
- criteria = resp.get("criteria", [])
- if criteria:
- for c in criteria:
- # 将分数四舍五入为整数(LLM 可能返回小数)
- score = c.get("score", 0)
- c["score"] = round(float(score))
-
- # 重新计算 total(各项得分之和)
- calculated_total = sum(c.get("score", 0) for c in criteria)
- resp["total"] = calculated_total
-
- # 边界带自动送审
- try:
- rubric_data = json.loads(rubric_text)
- lo, hi = rubric_data.get("borderline_band", [None, None])
- total = float(resp.get("total", 0))
- flags = set(resp.get("flags", []))
-
- if lo is not None and hi is not None and lo <= total <= hi:
- flags.add("need_review")
-
- # 低置信度送审
- confidence = resp.get("confidence", 1.0)
- if confidence < 0.7:
- flags.add("need_review")
-
- resp["flags"] = sorted(list(flags))
- except Exception:
- pass
-
- # 保存 grade.json
- with open(args.out, "w", encoding="utf-8") as f:
- json.dump(resp, f, ensure_ascii=False, indent=2)
-
- # 生成 summary.md
- try:
- rubric_data = json.loads(rubric_text)
- max_score = rubric_data.get("max_score", 10)
- except Exception:
- max_score = 10
-
- lines = [
- f"# 简答题评分",
- f"",
- f"- **总分**:**{resp.get('total', 0):.2f} / {max_score}**",
- f"- **置信度**:{resp.get('confidence', 0):.2f}",
- f"- **标记**:{', '.join(resp.get('flags', [])) or '无'}",
- f"",
- f"## 分项评分"
- ]
-
- for criterion in resp.get("criteria", []):
- criterion_id = criterion.get("id", "")
- score = criterion.get("score", 0)
- reason = criterion.get("reason", "")
- lines.append(f"- **{criterion_id}**: {score} 分")
- if reason:
- lines.append(f" - {reason}")
-
- with open(args.summary, "w", encoding="utf-8") as f:
- f.write("\n".join(lines))
-
- print(f"LLM grading complete: {resp.get('total', 0):.2f}/{max_score}")
-
-
-if __name__ == "__main__":
- main()
-
-
diff --git a/.autograde/objective_grade.py b/.autograde/objective_grade.py
deleted file mode 100644
index dec7a41..0000000
--- a/.autograde/objective_grade.py
+++ /dev/null
@@ -1,444 +0,0 @@
-#!/usr/bin/env python3
-"""
-选择题/判断题评分脚本
-
-读取学生答案和标准答案,生成成绩 JSON 文件
-"""
-
-import json
-import argparse
-import sys
-from datetime import datetime
-from pathlib import Path
-
-
-def load_answers(answer_file):
- """
- 加载学生答案文件(支持 JSON 和简单文本格式)
-
- JSON 格式示例:
- {
- "MC1": "A",
- "MC2": "B",
- "TF1": true,
- "TF2": false
- }
-
- 文本格式示例(每行一个答案):
- A
- B
- true
- false
- """
- try:
- with open(answer_file, 'r', encoding='utf-8') as f:
- content = f.read().strip()
-
- # 尝试作为 JSON 加载
- if content.startswith('{'):
- return json.loads(content)
-
- # 否则按行加载,忽略空行和注释
- lines = [line.strip() for line in content.split('\n') if line.strip() and not line.strip().startswith('#')]
-
- # 转换为字典格式:{"MC1": answer, "MC2": answer, ...}
- answers = {}
- for i, line in enumerate(lines, 1):
- # 尝试识别题型
- if line.lower() in ('true', 'false', 't', 'f'):
- question_id = f"TF{len([k for k in answers if k.startswith('TF')])+1}"
- answers[question_id] = line.lower() in ('true', 't')
- else:
- question_id = f"MC{len([k for k in answers if k.startswith('MC')])+1}"
- answers[question_id] = line.upper()
-
- return answers
- except Exception as e:
- print(f"Error loading answers: {e}", file=sys.stderr)
- return {}
-
-
-def load_standard_answers(std_file):
- """加载标准答案文件(JSON 格式)"""
- try:
- with open(std_file, 'r', encoding='utf-8') as f:
- return json.load(f)
- except Exception as e:
- print(f"Error loading standard answers: {e}", file=sys.stderr)
- return {}
-
-
-def grade_multiple_choice(student_answers, standard_answers, question_texts=None):
- """
- 评选择题
-
- Parameters
- ----------
- student_answers : dict
- 学生答案,格式 {"MC1": "A", "MC2": "B", ...}
- standard_answers : dict
- 标准答案,格式 {"MC1": "A", "MC2": "B", ...}
- question_texts : dict, optional
- 题目文本,格式 {"MC1": "题目文本", ...}
-
- Returns
- -------
- dict
- 成绩数据
- """
- questions = []
- correct_count = 0
-
- for question_id, std_answer in standard_answers.items():
- if not question_id.startswith('MC'):
- continue
-
- student_answer = student_answers.get(question_id, "")
- is_correct = str(student_answer).upper() == str(std_answer).upper()
-
- if is_correct:
- correct_count += 1
- score = 1
- else:
- score = 0
-
- questions.append({
- "question_id": question_id,
- "question_text": question_texts.get(question_id, "") if question_texts else "",
- "correct_answer": str(std_answer).upper(),
- "student_answer": str(student_answer).upper(),
- "correct": is_correct,
- "score": score,
- "max_score": 1
- })
-
- total_count = len(questions)
-
- return {
- "type": "multiple_choice",
- "score": correct_count,
- "max_score": total_count,
- "details": {
- "correct": correct_count,
- "total": total_count,
- "questions": questions
- }
- }
-
-
-def grade_true_false(student_answers, standard_answers, question_texts=None):
- """
- 评判断题
-
- Parameters
- ----------
- student_answers : dict
- 学生答案,格式 {"TF1": true, "TF2": false, ...}
- standard_answers : dict
- 标准答案,格式 {"TF1": true, "TF2": false, ...}
- question_texts : dict, optional
- 题目文本
-
- Returns
- -------
- dict
- 成绩数据
- """
- questions = []
- correct_count = 0
-
- for question_id, std_answer in standard_answers.items():
- if not question_id.startswith('TF'):
- continue
-
- student_answer = student_answers.get(question_id, None)
-
- # 规范化布尔值
- if isinstance(student_answer, str):
- student_answer = student_answer.lower() in ('true', 't', '1', 'yes')
-
- is_correct = bool(student_answer) == bool(std_answer)
-
- if is_correct:
- correct_count += 1
- score = 1
- else:
- score = 0
-
- questions.append({
- "question_id": question_id,
- "question_text": question_texts.get(question_id, "") if question_texts else "",
- "correct_answer": bool(std_answer),
- "student_answer": bool(student_answer) if student_answer is not None else None,
- "correct": is_correct,
- "score": score,
- "max_score": 1
- })
-
- total_count = len(questions)
-
- return {
- "type": "true_false",
- "score": correct_count,
- "max_score": total_count,
- "details": {
- "correct": correct_count,
- "total": total_count,
- "questions": questions
- }
- }
-
-
-def grade_multiple_select(student_answers, standard_answers, question_texts=None):
- """
- 评多选题
-
- Parameters
- ----------
- student_answers : dict
- 学生答案,格式 {"MS1": ["A", "B"], "MS2": ["C"], ...}
- standard_answers : dict
- 标准答案,格式 {"MS1": ["A", "B"], "MS2": ["C"], ...}
- question_texts : dict, optional
- 题目文本
-
- Returns
- -------
- dict
- 成绩数据
- """
- questions = []
- correct_count = 0
-
- for question_id, std_answer in standard_answers.items():
- if not question_id.startswith('MS'):
- continue
-
- student_answer = student_answers.get(question_id, [])
-
- # 规范化答案(转为大写并排序)
- if isinstance(student_answer, str):
- student_answer = [student_answer]
- if not isinstance(student_answer, list):
- student_answer = []
-
- std_set = set([str(a).upper() for a in std_answer])
- stu_set = set([str(a).upper() for a in student_answer])
-
- is_correct = std_set == stu_set
-
- if is_correct:
- correct_count += 1
- score = 1
- else:
- score = 0
-
- questions.append({
- "question_id": question_id,
- "question_text": question_texts.get(question_id, "") if question_texts else "",
- "correct_answer": sorted(list(std_set)),
- "student_answer": sorted(list(stu_set)) if stu_set else [],
- "correct": is_correct,
- "score": score,
- "max_score": 1
- })
-
- total_count = len(questions)
-
- return {
- "type": "multiple_select",
- "score": correct_count,
- "max_score": total_count,
- "details": {
- "correct": correct_count,
- "total": total_count,
- "questions": questions
- }
- }
-
-
-def grade_fill_blank(student_answers, standard_answers, question_texts=None):
- """
- 评填空题
-
- Parameters
- ----------
- student_answers : dict
- 学生答案,格式 {"FB1": "答案", "FB2": ["答案1", "答案2"], ...}
- standard_answers : dict
- 标准答案,格式同上
- question_texts : dict, optional
- 题目文本
-
- Returns
- -------
- dict
- 成绩数据
- """
- questions = []
- correct_count = 0
-
- def normalize_answer(ans):
- """规范化答案:去除空格、转小写"""
- if isinstance(ans, str):
- return ans.strip().lower()
- elif isinstance(ans, list):
- return [a.strip().lower() for a in ans]
- return ans
-
- def compare_answers(student, standard):
- """比较答案是否相等"""
- student_norm = normalize_answer(student)
- standard_norm = normalize_answer(standard)
-
- if isinstance(standard_norm, list) and isinstance(student_norm, list):
- return student_norm == standard_norm
- elif isinstance(standard_norm, str) and isinstance(student_norm, str):
- return student_norm == standard_norm
- return False
-
- for question_id, std_answer in standard_answers.items():
- if not question_id.startswith('FB'):
- continue
-
- student_answer = student_answers.get(question_id, "")
-
- is_correct = compare_answers(student_answer, std_answer)
-
- if is_correct:
- correct_count += 1
- score = 1
- else:
- score = 0
-
- questions.append({
- "question_id": question_id,
- "question_text": question_texts.get(question_id, "") if question_texts else "",
- "correct_answer": std_answer,
- "student_answer": student_answer,
- "correct": is_correct,
- "score": score,
- "max_score": 1
- })
-
- total_count = len(questions)
-
- return {
- "type": "fill_blank",
- "score": correct_count,
- "max_score": total_count,
- "details": {
- "correct": correct_count,
- "total": total_count,
- "questions": questions
- }
- }
-
-
-def main():
- parser = argparse.ArgumentParser(description="Grade objective questions")
- parser.add_argument("--answers", required=True, help="Student answers file (JSON or text)")
- parser.add_argument("--standard", required=True, help="Standard answers file (JSON)")
- parser.add_argument("--questions", help="Question texts file (JSON, optional)")
- parser.add_argument("--out", default="grade.json", help="Output grade JSON file")
- parser.add_argument("--summary", default="summary.md", help="Output summary markdown file")
- parser.add_argument("--type", choices=['mc', 'tf', 'ms', 'fb', 'all'], default='all',
- help="Question type to grade")
-
- args = parser.parse_args()
-
- # 加载文件
- student_answers = load_answers(args.answers)
- standard_answers = load_standard_answers(args.standard)
- question_texts = None
-
- if args.questions:
- try:
- with open(args.questions, 'r', encoding='utf-8') as f:
- question_texts = json.load(f)
- except Exception as e:
- print(f"Warning: Could not load question texts: {e}", file=sys.stderr)
-
- if not student_answers or not standard_answers:
- print("Error: Could not load answers", file=sys.stderr)
- sys.exit(1)
-
- # 评分
- components = []
- total_score = 0
- total_max_score = 0
-
- if args.type in ('mc', 'all'):
- mc_grade = grade_multiple_choice(student_answers, standard_answers, question_texts)
- if mc_grade['details']['total'] > 0:
- components.append(mc_grade)
- total_score += mc_grade['score']
- total_max_score += mc_grade['max_score']
-
- if args.type in ('tf', 'all'):
- tf_grade = grade_true_false(student_answers, standard_answers, question_texts)
- if tf_grade['details']['total'] > 0:
- components.append(tf_grade)
- total_score += tf_grade['score']
- total_max_score += tf_grade['max_score']
-
- if args.type in ('ms', 'all'):
- ms_grade = grade_multiple_select(student_answers, standard_answers, question_texts)
- if ms_grade['details']['total'] > 0:
- components.append(ms_grade)
- total_score += ms_grade['score']
- total_max_score += ms_grade['max_score']
-
- if args.type in ('fb', 'all'):
- fb_grade = grade_fill_blank(student_answers, standard_answers, question_texts)
- if fb_grade['details']['total'] > 0:
- components.append(fb_grade)
- total_score += fb_grade['score']
- total_max_score += fb_grade['max_score']
-
- # 生成 grade.json
- grade_data = {
- "score": total_score,
- "max_score": total_max_score,
- "components": components,
- "timestamp": int(__import__('time').time())
- }
-
- with open(args.out, 'w', encoding='utf-8') as f:
- json.dump(grade_data, f, ensure_ascii=False, indent=2)
-
- # 生成 summary.md
- summary_lines = [
- "# 客观题评分\n",
- f"- **总分**:{total_score} / {total_max_score}\n",
- f"- **组件数**:{len(components)}\n",
- ""
- ]
-
- for comp in components:
- comp_type = comp['type']
- correct = comp['details']['correct']
- total = comp['details']['total']
-
- type_names = {
- 'multiple_choice': '选择题',
- 'true_false': '判断题',
- 'multiple_select': '多选题',
- 'fill_blank': '填空题'
- }
-
- type_name = type_names.get(comp_type, comp_type)
- summary_lines.append(f"## {type_name}\n")
- summary_lines.append(f"- **正确**:{correct} / {total}\n")
- summary_lines.append("")
-
- with open(args.summary, 'w', encoding='utf-8') as f:
- f.write("\n".join(summary_lines))
-
- print(f"Grading complete: {total_score}/{total_max_score}")
- return 0
-
-
-if __name__ == "__main__":
- sys.exit(main())
-
diff --git a/.autograde/post_comment.py b/.autograde/post_comment.py
deleted file mode 100644
index 4040b74..0000000
--- a/.autograde/post_comment.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python3
-"""
-发送评论到 Gitea PR
-
-从环境变量读取配置,发送评论到指定的 PR
-支持在 Markdown 评论中嵌入 JSON 数据,便于后续结构化提取
-"""
-
-import os
-import sys
-import json
-import requests
-from datetime import datetime
-
-
-def create_comment_with_metadata(summary, commit_sha, comment_type='grade', metadata=None):
- """
- 创建包含元数据的评论内容
-
- Parameters
- ----------
- summary : str
- 人类可读的 Markdown 格式总结
- commit_sha : str
- 提交 SHA
- comment_type : str
- 评论类型 ('grade', 'llm', 'combined')
- metadata : dict, optional
- 结构化的成绩数据,将嵌入为 JSON
-
- Returns
- -------
- str
- 完整的评论内容(Markdown + JSON)
- """
- commit_short = commit_sha[:7] if commit_sha else 'unknown'
-
- # 根据类型设置标题和图标
- if comment_type == 'llm':
- title = "🤖 LLM 简答题评分结果"
- footer = "*此评论由 Gitea Actions 自动生成(使用 DeepSeek API) | Commit: `{}`*"
- elif comment_type == 'combined':
- title = "📊 综合评分结果"
- footer = "*此评论由 Gitea Actions 自动生成 | Commit: `{}`*"
- else:
- title = "🤖 自动评分结果"
- footer = "*此评论由 Gitea Actions 自动生成 | Commit: `{}`*"
-
- # 构建评论
- parts = [
- f"## {title}",
- "",
- summary,
- ""
- ]
-
- # 如果提供了元数据,嵌入 JSON
- if metadata:
- # 确保元数据包含版本和时间戳
- if 'version' not in metadata:
- metadata['version'] = '1.0'
- if 'timestamp' not in metadata:
- metadata['timestamp'] = datetime.now().isoformat()
-
- # 使用 Markdown 代码块嵌入 JSON(更可靠,Gitea 会保留)
- # 放在评论末尾,对学生不太显眼
- json_str = json.dumps(metadata, ensure_ascii=False, indent=2)
- parts.extend([
- "",
- "---",
- "",
- "",
- "```json",
- json_str,
- "```",
- ""
- ])
-
- parts.extend([
- footer.format(commit_short)
- ])
-
- return "\n".join(parts)
-
-
-def main():
- # 从环境变量读取配置
- api_url = os.environ.get('API_URL', '')
- repo = os.environ.get('REPO', '')
- pr_number = os.environ.get('PR_NUMBER', '')
- token = os.environ.get('GITEA_TOKEN', '')
- summary = os.environ.get('SUMMARY', '')
- commit_sha = os.environ.get('COMMIT_SHA', '')
- comment_type = os.environ.get('COMMENT_TYPE', 'grade')
-
- # 可选:从环境变量读取 JSON 元数据
- metadata_str = os.environ.get('GRADE_METADATA', '')
- metadata = None
- if metadata_str:
- try:
- metadata = json.loads(metadata_str)
- except json.JSONDecodeError as e:
- print(f"Warning: Failed to parse GRADE_METADATA: {e}", file=sys.stderr)
-
- # 验证必需参数
- if not all([api_url, repo, pr_number, token, summary]):
- print("Error: Missing required environment variables", file=sys.stderr)
- print(f"API_URL: {api_url}", file=sys.stderr)
- print(f"REPO: {repo}", file=sys.stderr)
- print(f"PR_NUMBER: {pr_number}", file=sys.stderr)
- print(f"GITEA_TOKEN: {'set' if token else 'not set'}", file=sys.stderr)
- print(f"SUMMARY: {'set' if summary else 'not set'}", file=sys.stderr)
- sys.exit(1)
-
- # 构建评论内容(包含元数据)
- comment_body = create_comment_with_metadata(
- summary=summary,
- commit_sha=commit_sha,
- comment_type=comment_type,
- metadata=metadata
- )
-
- # 构建 API URL
- comment_url = f"{api_url}/repos/{repo}/issues/{pr_number}/comments"
-
- # 发送请求
- headers = {
- "Authorization": f"token {token}",
- "Content-Type": "application/json"
- }
-
- data = {"body": comment_body}
-
- try:
- print(f"Posting comment to: {comment_url}")
- if metadata:
- print("✓ Comment includes structured metadata")
- response = requests.post(comment_url, headers=headers, json=data, timeout=30)
- response.raise_for_status()
- print("✅ Comment posted successfully to PR")
- return 0
- except requests.exceptions.Timeout:
- print("⚠️ Request timeout", file=sys.stderr)
- return 1
- except requests.exceptions.HTTPError as e:
- print(f"⚠️ HTTP error: {e}", file=sys.stderr)
- print(f"Response: {response.text}", file=sys.stderr)
- return 1
- except Exception as e:
- print(f"⚠️ Failed to post comment: {e}", file=sys.stderr)
- return 1
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/.autograde/run_tests.py b/.autograde/run_tests.py
deleted file mode 100644
index 9726f92..0000000
--- a/.autograde/run_tests.py
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/env python3
-"""
-通用测试运行器 - 根据语言配置运行测试并生成 JUnit XML
-
-支持的语言:
-- python: pytest
-- java: maven (mvn test)
-- r: testthat (通过 JUnit Reporter)
-
-环境变量:
-- LANGUAGE: 编程语言 (python/java/r)
-- TEST_DIR: 测试目录路径
-- SOURCE_DIR: 源代码目录路径
-"""
-
-import argparse
-import subprocess
-import sys
-import os
-from pathlib import Path
-
-
-def run_python_tests(test_dir, output_xml, **kwargs):
- """运行 Python pytest 测试"""
- cmd = [
- "pytest", test_dir,
- f"--junit-xml={output_xml}",
- "-v", "--tb=short"
- ]
-
- # 添加覆盖率选项(如果指定)
- source_dir = kwargs.get('source_dir')
- if source_dir:
- cmd.extend([
- f"--cov={source_dir}",
- "--cov-report=term-missing",
- "--cov-report=json:coverage.json"
- ])
-
- print(f"Running: {' '.join(cmd)}")
- result = subprocess.run(cmd, capture_output=False)
- return result
-
-
-def run_java_tests(test_dir, output_xml, **kwargs):
- """运行 Java Maven 测试"""
- cmd = ["mvn", "test", "-B"]
-
- print(f"Running: {' '.join(cmd)}")
- result = subprocess.run(cmd, capture_output=False)
-
- # Maven 自动生成 XML 在 target/surefire-reports/
- # 需要复制到指定的输出位置
- surefire_dir = Path("target/surefire-reports")
- if surefire_dir.exists():
- # 合并所有 TEST-*.xml 文件
- import xml.etree.ElementTree as ET
-
- xml_files = list(surefire_dir.glob("TEST-*.xml"))
- if xml_files:
- # 简单情况:只复制第一个(或合并)
- import shutil
- if len(xml_files) == 1:
- shutil.copy(xml_files[0], output_xml)
- else:
- # 合并多个 XML 文件(简化版本)
- root = ET.Element("testsuites")
- for xml_file in xml_files:
- tree = ET.parse(xml_file)
- root.append(tree.getroot())
-
- tree = ET.ElementTree(root)
- tree.write(output_xml, encoding='utf-8', xml_declaration=True)
-
- return result
-
-
-def run_r_tests(test_dir, output_xml, **kwargs):
- """运行 R testthat 测试"""
- # R 脚本:使用 testthat 的 JUnitReporter
- # 注意:需要安装 testthat (>= 3.0.0)
-
- r_script = f"""
-library(testthat)
-
-# 配置 JUnit reporter
-reporter <- JunitReporter$new(file = '{output_xml}')
-
-# 运行测试
-test_dir(
- path = '{test_dir}',
- reporter = reporter,
- stop_on_failure = FALSE
-)
-"""
-
- # 将脚本写入临时文件
- import tempfile
- with tempfile.NamedTemporaryFile(mode='w', suffix='.R', delete=False) as f:
- f.write(r_script)
- script_path = f.name
-
- try:
- cmd = ["Rscript", script_path]
- print(f"Running: {' '.join(cmd)}")
- result = subprocess.run(cmd, capture_output=False)
- return result
- finally:
- # 清理临时文件
- if os.path.exists(script_path):
- os.remove(script_path)
-
-
-def main():
- parser = argparse.ArgumentParser(
- description="通用测试运行器 - 支持 Python/Java/R"
- )
- parser.add_argument(
- "--language",
- required=True,
- choices=["python", "java", "r"],
- help="编程语言"
- )
- parser.add_argument(
- "--test-dir",
- required=True,
- help="测试目录路径"
- )
- parser.add_argument(
- "--output-xml",
- default="test-results.xml",
- help="JUnit XML 输出文件路径"
- )
- parser.add_argument(
- "--source-dir",
- help="源代码目录(用于覆盖率)"
- )
-
- args = parser.parse_args()
-
- # 语言对应的运行器
- runners = {
- "python": run_python_tests,
- "java": run_java_tests,
- "r": run_r_tests,
- }
-
- if args.language not in runners:
- print(f"❌ Unsupported language: {args.language}", file=sys.stderr)
- sys.exit(1)
-
- # 运行测试
- result = runners[args.language](
- args.test_dir,
- args.output_xml,
- source_dir=args.source_dir
- )
-
- sys.exit(result.returncode)
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/.autograde/test_objective_grade.sh b/.autograde/test_objective_grade.sh
deleted file mode 100644
index f233eaa..0000000
--- a/.autograde/test_objective_grade.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-# 测试客观题评分脚本
-
-set -e
-
-echo "=== 测试客观题评分脚本 ==="
-
-cd "$(dirname "$0")/.."
-
-# 测试 1: 使用 JSON 格式答案
-echo ""
-echo "测试 1: JSON 格式答案(全对)"
-python3 ./.autograde/objective_grade.py \
- --answers objective_questions/standard_answers.json \
- --standard objective_questions/standard_answers.json \
- --questions objective_questions/question_texts.json \
- --out test_grade1.json \
- --summary test_summary1.md \
- --type both
-
-echo "分数:"
-python3 -c "import json; data=json.load(open('test_grade1.json')); print(f\"{data['score']}/{data['max_score']}\")"
-
-echo ""
-echo "摘要:"
-cat test_summary1.md
-
-# 测试 2: 使用部分错误的答案
-echo ""
-echo "测试 2: 部分错误答案"
-cat > test_answers2.json << 'EOF'
-{
- "MC1": "A",
- "MC2": "A",
- "MC3": "C",
- "MC4": "B",
- "MC5": "C",
- "TF1": true,
- "TF2": false,
- "TF3": true,
- "TF4": true,
- "TF5": false
-}
-EOF
-
-python3 ./.autograde/objective_grade.py \
- --answers test_answers2.json \
- --standard objective_questions/standard_answers.json \
- --questions objective_questions/question_texts.json \
- --out test_grade2.json \
- --summary test_summary2.md \
- --type both
-
-echo "分数:"
-python3 -c "import json; data=json.load(open('test_grade2.json')); print(f\"{data['score']}/{data['max_score']}\")"
-
-echo ""
-echo "摘要:"
-cat test_summary2.md
-
-# 测试 3: 只评选择题
-echo ""
-echo "测试 3: 只评选择题"
-python3 ./.autograde/objective_grade.py \
- --answers objective_questions/standard_answers.json \
- --standard objective_questions/standard_answers.json \
- --questions objective_questions/question_texts.json \
- --out test_grade3.json \
- --summary test_summary3.md \
- --type mc
-
-echo "分数:"
-python3 -c "import json; data=json.load(open('test_grade3.json')); print(f\"{data['score']}/{data['max_score']}\")"
-
-# 测试 4: 只评判断题
-echo ""
-echo "测试 4: 只评判断题"
-python3 ./.autograde/objective_grade.py \
- --answers objective_questions/standard_answers.json \
- --standard objective_questions/standard_answers.json \
- --questions objective_questions/question_texts.json \
- --out test_grade4.json \
- --summary test_summary4.md \
- --type tf
-
-echo "分数:"
-python3 -c "import json; data=json.load(open('test_grade4.json')); print(f\"{data['score']}/{data['max_score']}\")"
-
-# 清理测试文件
-rm -f test_grade*.json test_summary*.md test_answers*.json
-
-echo ""
-echo "✅ 所有测试通过!"
-
-
diff --git a/.autograde/upload_metadata.py b/.autograde/upload_metadata.py
deleted file mode 100644
index 8341ac5..0000000
--- a/.autograde/upload_metadata.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python3
-"""
-Upload metadata.json to teacher-only repository via Gitea API.
-"""
-
-import argparse
-import base64
-import json
-import os
-import sys
-import urllib.error
-import urllib.request
-from pathlib import Path
-from urllib.parse import urlparse
-
-
-def detect_host(server_url: str, external_host: str | None) -> str:
- """Detect the Gitea host to use for API calls.
-
- If server_url uses internal name (like 'gitea'), use external_host instead.
- """
- parsed = urlparse(server_url)
- raw_host = parsed.netloc or parsed.path.split("/")[0]
- host = raw_host
- if raw_host.lower().startswith("gitea"):
- if not external_host:
- raise ValueError(
- f"Server URL uses internal name '{raw_host}' but EXTERNAL_GITEA_HOST is not set. "
- "Please configure EXTERNAL_GITEA_HOST in .env and run sync_runner_config.sh"
- )
- host = external_host
- return host
-
-
-def main() -> int:
- parser = argparse.ArgumentParser(description="Upload metadata.json to course metadata repo")
- parser.add_argument("--metadata-file", required=True)
- parser.add_argument("--metadata-repo", required=True, help="owner/repo of metadata store")
- parser.add_argument("--branch", default="main")
- parser.add_argument("--student-repo", required=True)
- parser.add_argument("--run-id", required=True)
- parser.add_argument("--commit-sha", required=True)
- parser.add_argument("--workflow", required=True, choices=["grade", "objective", "llm"])
- parser.add_argument("--server-url", required=True)
- parser.add_argument("--external-host")
- parser.add_argument("--assignment-id", help="Assignment ID (e.g., hw1)")
- args = parser.parse_args()
-
- token = os.environ.get("METADATA_TOKEN")
- if not token:
- print("METADATA_TOKEN is not set", file=sys.stderr)
- return 1
-
- path = Path(args.metadata_file)
- if not path.is_file():
- print(f"metadata file not found: {path}", file=sys.stderr)
- return 0
-
- try:
- owner, repo_name = args.metadata_repo.split("/", 1)
- except ValueError:
- print(f"Invalid metadata repo: {args.metadata_repo}", file=sys.stderr)
- return 1
-
- # Extract student ID from student repo name
- # student repo format: hw1-stu_20250001 or hw1-stu_student1
- student_id = args.student_repo.split("/")[-1] # Get repo name
-
- # Auto-detect assignment ID from student repo if not provided
- assignment_id = args.assignment_id
- if not assignment_id:
- # Try to extract from student_repo format: hw1-stu_xxx
- repo_name_part = args.student_repo.split("/")[-1]
- if "-stu_" in repo_name_part:
- assignment_id = repo_name_part.split("-stu_")[0]
- elif "-template" in repo_name_part:
- assignment_id = repo_name_part.split("-template")[0]
- elif "-tests" in repo_name_part:
- assignment_id = repo_name_part.split("-tests")[0]
- else:
- assignment_id = "unknown"
-
- # New path structure: {assignment_id}/{student_id}/{workflow}_{run_id}_{sha}.json
- target_path = f"{assignment_id}/{student_id}/{args.workflow}_{args.run_id}_{args.commit_sha[:7]}.json"
-
- host = detect_host(args.server_url, args.external_host)
- api_url = f"http://{host}/api/v1/repos/{owner}/{repo_name}/contents/{target_path}"
- message = f"Upload {args.workflow} metadata for {args.student_repo} {args.commit_sha}"
-
- # Check if file exists to determine if we need to update (PUT) or create (POST)
- get_req = urllib.request.Request(
- api_url,
- headers={"Authorization": f"token {token}"},
- method="GET"
- )
-
- sha = None
- try:
- with urllib.request.urlopen(get_req) as resp:
- existing_file = json.loads(resp.read().decode())
- # API may return a list (directory contents) or dict (single file)
- if isinstance(existing_file, dict):
- sha = existing_file.get("sha")
- print(f"File exists, updating (sha: {sha})")
- elif isinstance(existing_file, list):
- # Response is a directory listing, file doesn't exist at this exact path
- print(f"Path is a directory or file not found in expected format")
- else:
- print(f"Unexpected response type: {type(existing_file)}")
- except urllib.error.HTTPError as e:
- if e.code != 404:
- print(f"Error checking file existence: {e}", file=sys.stderr)
- return 1
- # File doesn't exist, proceed with creation
-
- content = base64.b64encode(path.read_bytes()).decode()
- payload = {
- "content": content,
- "message": message,
- "branch": args.branch
- }
-
- if sha:
- payload["sha"] = sha
-
- data = json.dumps(payload).encode()
-
- req = urllib.request.Request(
- api_url,
- data=data,
- headers={
- "Authorization": f"token {token}",
- "Content-Type": "application/json",
- },
- method="PUT" if sha else "POST",
- )
-
- try:
- with urllib.request.urlopen(req, timeout=30) as resp:
- resp_body = resp.read().decode()
- print(resp_body)
- except urllib.error.HTTPError as exc:
- print(f"Metadata upload failed: {exc.status} {exc.reason}", file=sys.stderr)
- print(exc.read().decode(), file=sys.stderr)
- return 1
- except urllib.error.URLError as exc:
- print(f"Metadata upload failed: {exc}", file=sys.stderr)
- return 1
-
- print(f"✅ Metadata stored at {args.metadata_repo}:{target_path}")
- return 0
-
-
-if __name__ == "__main__":
- raise SystemExit(main())
-
-
diff --git a/.autograde/workflow_templates/README.md b/.autograde/workflow_templates/README.md
deleted file mode 100644
index 4f42a13..0000000
--- a/.autograde/workflow_templates/README.md
+++ /dev/null
@@ -1,216 +0,0 @@
-# Workflow 模板
-
-本目录包含不同编程语言的 Gitea Actions workflow 模板。
-
-## 可用模板
-
-| 文件 | 语言 | 容器 | 测试框架 |
-|------|------|------|----------|
-| `python.yml` | Python | python:3.11 | pytest |
-| `java.yml` | Java | maven:3.9-eclipse-temurin-17 | JUnit 5 |
-| `r.yml` | R | r-base:4.3 | testthat |
-
-## 使用方法
-
-### 1. 选择模板
-
-根据你的编程语言选择对应的模板:
-
-```bash
-# 对于 Python 作业
-cp .autograde/workflow_templates/python.yml .gitea/workflows/grade.yml
-
-# 对于 Java 作业
-cp .autograde/workflow_templates/java.yml .gitea/workflows/grade.yml
-
-# 对于 R 作业
-cp .autograde/workflow_templates/r.yml .gitea/workflows/grade.yml
-```
-
-### 2. 自定义配置
-
-编辑 `.gitea/workflows/grade.yml` 根据需要修改:
-
-- **容器版本**:修改 `container:` 字段
-- **超时时间**:修改 `timeout-minutes:`
-- **依赖安装**:修改 "Install dependencies" 步骤
-- **测试命令**:修改测试运行步骤
-
-### 3. 配置 Secrets
-
-确保在 Gitea 仓库设置中配置了以下 Secrets:
-
-- `TESTS_TOKEN`:用于访问隐藏测试仓库的 token(可选)
-- `EXTERNAL_GITEA_HOST`:外部访问的 Gitea 地址(可选)
-
-## Python 模板 (python.yml)
-
-### 特点
-- 使用 `python:3.11` 容器
-- 自动安装 `requirements.txt` 中的依赖
-- 使用 `run_tests.py` 运行 pytest
-- 支持代码覆盖率
-
-### 自定义选项
-```yaml
-# 修改 Python 版本
-container: python:3.10 # 或 python:3.9
-
-# 添加额外的依赖
-- name: Install dependencies
- run: |
- pip install -r requirements.txt
- pip install numpy pandas # 额外的包
-```
-
-## Java 模板 (java.yml)
-
-### 特点
-- 使用 `maven:3.9-eclipse-temurin-17` 容器
-- Maven 自动管理依赖(通过 `pom.xml`)
-- JUnit 5 测试框架
-- 自动提取 Surefire 报告
-
-### 自定义选项
-```yaml
-# 修改 JDK 版本
-container: maven:3.9-eclipse-temurin-11 # Java 11
-container: maven:3.9-eclipse-temurin-21 # Java 21
-
-# 自定义 Maven 命令
-run: |
- mvn clean test -B -DskipTests=false
-```
-
-### Maven 配置提示
-
-确保 `pom.xml` 中配置了 Surefire 插件:
-
-```xml
-
- org.apache.maven.plugins
- maven-surefire-plugin
- 3.2.2
-
- ${project.build.directory}/surefire-reports
-
-
-```
-
-## R 模板 (r.yml)
-
-### 特点
-- 使用 `r-base:4.3` 容器
-- 自动从 `DESCRIPTION` 安装依赖
-- testthat 测试框架
-- JUnitReporter 输出 XML
-
-### 自定义选项
-```yaml
-# 修改 R 版本
-container: r-base:4.2 # 或其他版本
-
-# 修改 CRAN 镜像
-run: |
- Rscript -e "install.packages('testthat', repos='https://cran.r-project.org/')"
-```
-
-### R 项目结构要求
-
-```
-project/
-├── DESCRIPTION # 包依赖定义
-├── R/ # R 源代码
-└── tests/
- └── testthat/ # testthat 测试
-```
-
-## 通用 Workflow 流程
-
-所有模板都遵循相同的流程:
-
-1. **安装系统依赖**(git, rsync 等)
-2. **检出代码** - 克隆学生仓库
-3. **安装语言依赖** - 根据语言安装包
-4. **获取隐藏测试**(可选)- 从私有仓库获取
-5. **运行测试** - 生成 JUnit XML
-6. **评分** - 解析 XML,计算分数
-7. **生成元数据** - 创建 JSON metadata
-8. **发布评论** - 在 PR 中发布结果
-
-## 高级配置
-
-### 添加代码质量检查
-
-```yaml
-- name: Run linter
- run: |
- # Python: pylint, flake8
- pip install pylint
- pylint src/
-
- # Java: checkstyle
- mvn checkstyle:check
-
- # R: lintr
- Rscript -e "lintr::lint_package()"
-```
-
-### 自定义评分规则
-
-修改 `grade.py` 的调用参数:
-
-```yaml
-- name: Grade
- run: |
- python3 ./.autograde/grade.py \
- --junit junit.xml \
- --out grade.json \
- --summary summary.md \
- --bonus bonus.json # 可选的加分项
-```
-
-### 多个测试套件
-
-```yaml
-- name: Run public tests
- run: |
- pytest tests_public/ --junit-xml=public.xml
-
-- name: Run hidden tests
- run: |
- pytest tests_hidden/ --junit-xml=hidden.xml
-
-- name: Merge test results
- run: |
- python3 ./.autograde/merge_junit.py public.xml hidden.xml -o junit.xml
-```
-
-## 故障排查
-
-### 测试无法运行
-- 检查测试目录路径是否正确
-- 确认依赖是否正确安装
-- 查看 Actions 日志中的错误信息
-
-### JUnit XML 未生成
-- Python: 确保 pytest 命令包含 `--junit-xml`
-- Java: 检查 Surefire 插件配置
-- R: 确认 testthat >= 3.0.0
-
-### 元数据为空
-- 检查 `grade.json` 是否生成
-- 确认 `LANGUAGE` 环境变量设置正确
-- 查看 `create_minimal_metadata.py` 的输出
-
-## 相关文档
-
-- [运行测试脚本](../run_tests.py) - 通用测试运行器
-- [评分脚本](../grade.py) - JUnit XML 解析和评分
-- [元数据生成](../create_minimal_metadata.py) - JSON 元数据
-- [示例](../../examples/) - 各语言的完整示例
-
----
-
-最后更新: 2025-11-13
-
diff --git a/.autograde/workflow_templates/java.yml b/.autograde/workflow_templates/java.yml
deleted file mode 100644
index 3890e01..0000000
--- a/.autograde/workflow_templates/java.yml
+++ /dev/null
@@ -1,200 +0,0 @@
-name: autograde-java
-
-on:
- push:
- branches:
- - main
- workflow_dispatch:
-
-permissions:
- contents: read
- pull-requests: write
-
-jobs:
- java:
- runs-on: docker
- container:
- image: gradle:9.0-jdk21
- options: --user root
- timeout-minutes: 20
-
- steps:
- - name: Install dependencies (CN mirror)
- run: |
- set -e
- # 替换 Debian/Ubuntu 源为腾讯云镜像
- for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do
- [ -f "$f" ] || continue
- sed -i -E 's|https?://deb.debian.org|http://mirrors.cloud.tencent.com|g' "$f" || true
- sed -i -E 's|https?://security.debian.org|http://mirrors.cloud.tencent.com/debian-security|g' "$f" || true
- sed -i -E 's|https?://archive.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
- sed -i -E 's|https?://ports.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
- done
- apt-get -o Acquire::Check-Valid-Until=false -o Acquire::AllowInsecureRepositories=true update -y
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends git ca-certificates python3 python3-pip nodejs rsync
- pip3 install --break-system-packages python-dotenv requests -i https://mirrors.cloud.tencent.com/pypi/simple
- rm -rf /var/lib/apt/lists/*
-
- - name: Checkout code
- uses: actions/checkout@v3
- with:
- fetch-depth: 1
-
- - name: Fix permissions
- run: |
- # Ensure workspace is owned by current user
- chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
-
- - name: Fetch hidden tests (if available)
- working-directory: ${{ github.workspace }}
- env:
- EXTERNAL_GITEA_HOST: ${{ secrets.EXTERNAL_GITEA_HOST }}
- run: |
- set -e
-
- TESTS_USERNAME="${RUNNER_TESTS_USERNAME:-}"
- TESTS_TOKEN="${RUNNER_TESTS_TOKEN:-}"
-
- if [ -z "$TESTS_TOKEN" ] || [ -z "$TESTS_USERNAME" ]; then
- echo "Warning: RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN not set, skipping private tests"
- exit 0
- fi
-
- # Resolve Gitea Host
- if [ -n "$EXTERNAL_GITEA_HOST" ]; then
- HOST="$EXTERNAL_GITEA_HOST"
- elif [ -n "$GITEA_ROOT_URL" ]; then
- HOST=$(echo "$GITEA_ROOT_URL" | sed 's|https\?://||' | sed 's|/$||')
- else
- HOST=$(echo "${{ github.server_url }}" | sed 's|https\?://||' | cut -d'/' -f1)
- fi
-
- echo "📥 Fetching private tests repository..."
- echo " Gitea host: $HOST"
-
- # Infer organization and assignment ID from repository name
- ORG=$(echo "${{ github.repository }}" | cut -d'/' -f1)
- REPO_NAME=$(echo "${{ github.repository }}" | cut -d'/' -f2)
-
- # Extract assignment ID from repo name (e.g., hw2-stu_xxx -> hw2, hw2-template -> hw2)
- if echo "$REPO_NAME" | grep -q -- '-stu_'; then
- ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-stu_.*//')
- elif echo "$REPO_NAME" | grep -q -- '-template'; then
- ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-template.*//')
- else
- ASSIGNMENT_ID="hw1" # fallback
- fi
-
- echo " Organization: $ORG"
- echo " Assignment ID: $ASSIGNMENT_ID"
-
- # Clone private test repository
- AUTH_URL="http://${TESTS_USERNAME}:${TESTS_TOKEN}@${HOST}/${ORG}/${ASSIGNMENT_ID}-tests.git"
-
- if ! git -c http.sslVerify=false clone --depth=1 "$AUTH_URL" _priv_tests 2>&1; then
- echo "❌ Failed to clone ${ASSIGNMENT_ID}-tests repository!"
- exit 1
- fi
-
- # Verify test directory exists
- if [ ! -d "_priv_tests/java" ]; then
- echo "❌ java/ directory not found in ${ASSIGNMENT_ID}-tests!"
- exit 1
- fi
-
- if [ ! -d "_priv_tests/java/src/test" ]; then
- echo "❌ java/src/test/ not found in ${ASSIGNMENT_ID}-tests!"
- exit 1
- fi
-
- # Copy tests to src/test/
- rsync -a _priv_tests/java/src/test/ src/test/
- echo "✅ Private tests copied: _priv_tests/java/src/test/ → src/test/"
-
- - name: Run tests using Gradle
- working-directory: ${{ github.workspace }}
- env:
- LANGUAGE: java
- run: |
- gradle test --no-daemon || true
-
- - name: Extract test results
- working-directory: ${{ github.workspace }}
- run: |
- # Find JUnit XML report
- XML_REPORT=$(find build/test-results/test -name "TEST-*.xml" | head -n 1)
- if [ -n "$XML_REPORT" ]; then
- cp "$XML_REPORT" junit.xml
- echo "✅ Found JUnit report: $XML_REPORT"
- else
- echo "⚠️ No JUnit report found!"
- touch junit.xml
- fi
-
- - name: Grade
- working-directory: ${{ github.workspace }}
- env:
- LANGUAGE: java
- run: |
- python3 ./.autograde/grade.py --junit junit.xml --out grade.json --summary summary.md
-
- - name: Prepare artifacts
- working-directory: ${{ github.workspace }}
- run: |
- mkdir -p artifacts
- cp junit.xml summary.md grade.json artifacts/ 2>/dev/null || true
-
- - name: Create grade metadata
- working-directory: ${{ github.workspace }}
- env:
- LANGUAGE: java
- REPO: ${{ github.repository }}
- run: |
- if [ ! -f grade.json ]; then
- echo "⚠️ grade.json not found, skipping metadata creation"
- exit 0
- fi
-
- # 生成 JSON 元数据
- if [ -f ./.autograde/create_grade_metadata.py ]; then
- python3 ./.autograde/create_grade_metadata.py > metadata.json || echo "{}" > metadata.json
- echo "✅ Grade metadata created (using create_grade_metadata.py)"
- elif [ -f ./.autograde/create_minimal_metadata.py ]; then
- export GRADE_TYPE=programming
- python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
- echo "✅ Grade metadata created (using create_minimal_metadata.py)"
- else
- echo "⚠️ No metadata creation script found, skipping"
- echo "{}" > metadata.json
- fi
-
- - name: Upload metadata (teacher only)
- if: env.RUNNER_METADATA_REPO != '' && env.RUNNER_METADATA_TOKEN != ''
- working-directory: ${{ github.workspace }}
- shell: bash
- env:
- METADATA_REPO: ${{ env.RUNNER_METADATA_REPO }}
- METADATA_TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
- METADATA_BRANCH: ${{ env.RUNNER_METADATA_BRANCH }}
- STUDENT_REPO: ${{ github.repository }}
- RUN_ID: ${{ github.run_id }}
- COMMIT_SHA: ${{ github.sha }}
- SERVER_URL: ${{ github.server_url }}
- run: |
- set -e
- if [ ! -f metadata.json ]; then
- echo "No metadata.json found, skip uploading."
- exit 0
- fi
-
- python3 ./.autograde/upload_metadata.py \
- --metadata-file metadata.json \
- --metadata-repo "${METADATA_REPO}" \
- --branch "${METADATA_BRANCH:-main}" \
- --student-repo "${STUDENT_REPO}" \
- --run-id "${RUN_ID}" \
- --commit-sha "${COMMIT_SHA}" \
- --workflow grade \
- --server-url "${SERVER_URL}" \
- --external-host "${EXTERNAL_GITEA_HOST}"
- rm -f metadata.json
diff --git a/.autograde/workflow_templates/python.yml b/.autograde/workflow_templates/python.yml
deleted file mode 100644
index a92d546..0000000
--- a/.autograde/workflow_templates/python.yml
+++ /dev/null
@@ -1,206 +0,0 @@
-name: autograde-python
-
-on:
- push:
- branches:
- - main
- workflow_dispatch:
-
-permissions:
- contents: read
- pull-requests: write
-
-jobs:
- python:
- runs-on: docker
- container: python:3.11
- timeout-minutes: 15
-
- steps:
- - name: Install dependencies (CN mirror)
- run: |
- set -e
- # 替换 Debian/Ubuntu 源为腾讯云镜像
- for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do
- [ -f "$f" ] || continue
- sed -i -E 's|https?://deb.debian.org|http://mirrors.cloud.tencent.com|g' "$f" || true
- sed -i -E 's|https?://security.debian.org|http://mirrors.cloud.tencent.com/debian-security|g' "$f" || true
- sed -i -E 's|https?://archive.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
- sed -i -E 's|https?://ports.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
- done
- apt-get -o Acquire::Check-Valid-Until=false -o Acquire::AllowInsecureRepositories=true update -y
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends git ca-certificates python3 python3-pip nodejs rsync
- rm -rf /var/lib/apt/lists/*
-
- - name: Checkout code
- uses: actions/checkout@v3
- with:
- fetch-depth: 1
-
- - name: Fix permissions
- run: |
- # Ensure workspace is owned by current user
- chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
-
- - name: Install Python deps
- working-directory: ${{ github.workspace }}
- run: |
- # 使用腾讯云镜像源加速
- python -m pip install -U pip -i https://mirrors.cloud.tencent.com/pypi/simple
- if [ -f requirements.txt ]; then pip install -r requirements.txt -i https://mirrors.cloud.tencent.com/pypi/simple; fi
- if [ -f pyproject.toml ]; then pip install . -i https://mirrors.cloud.tencent.com/pypi/simple; fi
- pip install pytest pytest-cov junit-xml python-dotenv requests -i https://mirrors.cloud.tencent.com/pypi/simple
-
- - name: Fetch private tests
- working-directory: ${{ github.workspace }}
- env:
- EXTERNAL_GITEA_HOST: ${{ secrets.EXTERNAL_GITEA_HOST }}
- run: |
- set -e
-
- TESTS_USERNAME="${RUNNER_TESTS_USERNAME:-}"
- TESTS_TOKEN="${RUNNER_TESTS_TOKEN:-}"
-
- if [ -z "$TESTS_TOKEN" ] || [ -z "$TESTS_USERNAME" ]; then
- echo "❌ RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN 未配置!"
- echo "测试必须从私有的 tests 仓库获取"
- exit 1
- fi
-
- # Resolve Gitea Host
- if [ -n "$EXTERNAL_GITEA_HOST" ]; then
- HOST="$EXTERNAL_GITEA_HOST"
- elif [ -n "$GITEA_ROOT_URL" ]; then
- HOST=$(echo "$GITEA_ROOT_URL" | sed 's|https\?://||' | sed 's|/$||')
- else
- HOST=$(echo "${{ github.server_url }}" | sed 's|https\?://||' | cut -d'/' -f1)
- fi
-
- echo "📥 Fetching private tests repository..."
- echo " Gitea host: $HOST"
-
- # Infer organization and assignment ID from repository name
- ORG=$(echo "${{ github.repository }}" | cut -d'/' -f1)
- REPO_NAME=$(echo "${{ github.repository }}" | cut -d'/' -f2)
-
- # Extract assignment ID from repo name (e.g., hw1-stu_xxx -> hw1, hw1-template -> hw1)
- if echo "$REPO_NAME" | grep -q -- '-stu_'; then
- ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-stu_.*//')
- elif echo "$REPO_NAME" | grep -q -- '-template'; then
- ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-template.*//')
- else
- ASSIGNMENT_ID="hw1" # fallback
- fi
-
- echo " Organization: $ORG"
- echo " Assignment ID: $ASSIGNMENT_ID"
-
- AUTH_URL="http://${TESTS_USERNAME}:${TESTS_TOKEN}@${HOST}/${ORG}/${ASSIGNMENT_ID}-tests.git"
- if ! git -c http.sslVerify=false clone --depth=1 "$AUTH_URL" _priv_tests 2>&1; then
- echo "❌ Failed to clone ${ASSIGNMENT_ID}-tests repository!"
- exit 1
- fi
-
- # 验证测试目录存在
- if [ ! -d "_priv_tests/python" ]; then
- echo "❌ python/ directory not found in ${ASSIGNMENT_ID}-tests!"
- exit 1
- fi
-
- if [ ! -d "_priv_tests/python/tests" ]; then
- echo "❌ python/tests/ not found in ${ASSIGNMENT_ID}-tests!"
- exit 1
- fi
-
- # 复制测试到 tests/
- mkdir -p tests
- rsync -a _priv_tests/python/tests/ tests/
- echo "✅ Tests copied: _priv_tests/python/tests/ → tests/"
-
- # 复制数据文件(如果存在)
- if [ -d "_priv_tests/python/data" ]; then
- mkdir -p tests/data
- rsync -a _priv_tests/python/data/ tests/data/
- echo "✅ Data files copied: _priv_tests/python/data/ → tests/data/"
- fi
-
- # 验证测试文件
- if [ -z "$(find tests -name 'test_*.py' 2>/dev/null)" ]; then
- echo "❌ No test files found in tests/ directory!"
- exit 1
- fi
-
- echo "✅ Test suite ready:"
- find tests -name 'test_*.py'
-
- - name: Run tests
- working-directory: ${{ github.workspace }}
- run: |
- # 设置随机种子
- export PYTHONHASHSEED=2025
- pytest -q --maxfail=0 --junitxml=junit.xml --tb=short || true
-
- - name: Grade
- working-directory: ${{ github.workspace }}
- run: |
- python ./.autograde/grade.py --junit junit.xml --out grade.json --summary summary.md
-
- - name: Prepare artifacts
- working-directory: ${{ github.workspace }}
- run: |
- mkdir -p artifacts
- cp junit.xml summary.md grade.json artifacts/ 2>/dev/null || true
-
- - name: Create grade metadata
- working-directory: ${{ github.workspace }}
- env:
- REPO: ${{ github.repository }}
- run: |
- if [ ! -f grade.json ]; then
- echo "⚠️ grade.json not found, skipping metadata creation"
- exit 0
- fi
-
- # 生成 JSON 元数据
- if [ -f ./.autograde/create_grade_metadata.py ]; then
- python3 ./.autograde/create_grade_metadata.py > metadata.json || echo "{}" > metadata.json
- echo "✅ Grade metadata created (using create_grade_metadata.py)"
- elif [ -f ./.autograde/create_minimal_metadata.py ]; then
- export GRADE_TYPE=programming
- python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
- echo "✅ Grade metadata created (using create_minimal_metadata.py)"
- else
- echo "⚠️ No metadata creation script found, skipping"
- echo "{}" > metadata.json
- fi
-
- - name: Upload metadata (teacher only)
- if: env.RUNNER_METADATA_REPO != '' && env.RUNNER_METADATA_TOKEN != ''
- working-directory: ${{ github.workspace }}
- shell: bash
- env:
- METADATA_REPO: ${{ env.RUNNER_METADATA_REPO }}
- METADATA_TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
- METADATA_BRANCH: ${{ env.RUNNER_METADATA_BRANCH }}
- STUDENT_REPO: ${{ github.repository }}
- RUN_ID: ${{ github.run_id }}
- COMMIT_SHA: ${{ github.sha }}
- SERVER_URL: ${{ github.server_url }}
- run: |
- set -e
- if [ ! -f metadata.json ]; then
- echo "No metadata.json found, skip uploading."
- exit 0
- fi
-
- python ./.autograde/upload_metadata.py \
- --metadata-file metadata.json \
- --metadata-repo "${METADATA_REPO}" \
- --branch "${METADATA_BRANCH:-main}" \
- --student-repo "${STUDENT_REPO}" \
- --run-id "${RUN_ID}" \
- --commit-sha "${COMMIT_SHA}" \
- --workflow grade \
- --server-url "${SERVER_URL}" \
- --external-host "${EXTERNAL_GITEA_HOST}"
- rm -f metadata.json
diff --git a/.autograde/workflow_templates/r.yml b/.autograde/workflow_templates/r.yml
deleted file mode 100644
index 52db86d..0000000
--- a/.autograde/workflow_templates/r.yml
+++ /dev/null
@@ -1,200 +0,0 @@
-name: autograde-r
-
-on:
- push:
- branches:
- - main
- workflow_dispatch:
-
-permissions:
- contents: read
- pull-requests: write
-
-jobs:
- r:
- runs-on: docker
- container: r-base:4.3.1
- timeout-minutes: 15
-
- steps:
- - name: Install dependencies (CN mirror)
- run: |
- set -e
- # 替换 Debian/Ubuntu 源为腾讯云镜像
- for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do
- [ -f "$f" ] || continue
- sed -i -E 's|https?://deb.debian.org|http://mirrors.cloud.tencent.com|g' "$f" || true
- sed -i -E 's|https?://security.debian.org|http://mirrors.cloud.tencent.com/debian-security|g' "$f" || true
- sed -i -E 's|https?://archive.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
- sed -i -E 's|https?://ports.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
- done
- apt-get -o Acquire::Check-Valid-Until=false -o Acquire::AllowInsecureRepositories=true update -y
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends git ca-certificates python3 python3-pip nodejs rsync libcurl4-openssl-dev libssl-dev libxml2-dev
- pip3 install --break-system-packages python-dotenv requests -i https://mirrors.cloud.tencent.com/pypi/simple
- rm -rf /var/lib/apt/lists/*
-
- - name: Configure CRAN Mirror
- run: |
- echo 'options(repos = c(CRAN = "https://mirrors.tuna.tsinghua.edu.cn/CRAN/"))' >> ~/.Rprofile
-
- - name: Install R packages
- run: |
- Rscript -e 'install.packages(c("testthat", "covr", "xml2"))'
-
- - name: Checkout code
- uses: actions/checkout@v3
- with:
- fetch-depth: 1
-
- - name: Fix permissions
- run: |
- # Ensure workspace is owned by current user
- chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
-
- - name: Fetch private tests
- working-directory: ${{ github.workspace }}
- env:
- EXTERNAL_GITEA_HOST: ${{ secrets.EXTERNAL_GITEA_HOST }}
- run: |
- set -e
-
- TESTS_USERNAME="${RUNNER_TESTS_USERNAME:-}"
- TESTS_TOKEN="${RUNNER_TESTS_TOKEN:-}"
-
- if [ -z "$TESTS_TOKEN" ] || [ -z "$TESTS_USERNAME" ]; then
- echo "❌ RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN 未配置!"
- echo "测试必须从私有的 tests 仓库获取"
- exit 1
- fi
-
- # Resolve Gitea Host
- if [ -n "$EXTERNAL_GITEA_HOST" ]; then
- HOST="$EXTERNAL_GITEA_HOST"
- elif [ -n "$GITEA_ROOT_URL" ]; then
- HOST=$(echo "$GITEA_ROOT_URL" | sed 's|https\?://||' | sed 's|/$||')
- else
- HOST=$(echo "${{ github.server_url }}" | sed 's|https\?://||' | cut -d'/' -f1)
- fi
-
- echo "📥 Fetching private tests repository..."
- echo " Gitea host: $HOST"
-
- # Infer organization and assignment ID from repository name
- ORG=$(echo "${{ github.repository }}" | cut -d'/' -f1)
- REPO_NAME=$(echo "${{ github.repository }}" | cut -d'/' -f2)
-
- # Extract assignment ID from repo name (e.g., hw1-stu_xxx -> hw1, hw1-template -> hw1)
- if echo "$REPO_NAME" | grep -q -- '-stu_'; then
- ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-stu_.*//')
- elif echo "$REPO_NAME" | grep -q -- '-template'; then
- ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-template.*//')
- else
- ASSIGNMENT_ID="hw1" # fallback
- fi
-
- echo " Organization: $ORG"
- echo " Assignment ID: $ASSIGNMENT_ID"
-
- AUTH_URL="http://${TESTS_USERNAME}:${TESTS_TOKEN}@${HOST}/${ORG}/${ASSIGNMENT_ID}-tests.git"
- if ! git -c http.sslVerify=false clone --depth=1 "$AUTH_URL" _priv_tests 2>&1; then
- echo "❌ Failed to clone ${ASSIGNMENT_ID}-tests repository!"
- exit 1
- fi
-
- # 验证测试目录存在
- if [ ! -d "_priv_tests/r" ]; then
- echo "❌ r/ directory not found in ${ASSIGNMENT_ID}-tests!"
- exit 1
- fi
-
- if [ ! -d "_priv_tests/r/tests" ]; then
- echo "❌ r/tests/ not found in ${ASSIGNMENT_ID}-tests!"
- exit 1
- fi
-
- # 复制测试到 tests/
- mkdir -p tests
- rsync -a _priv_tests/r/tests/ tests/
- echo "✅ Tests copied: _priv_tests/r/tests/ → tests/"
-
- # 验证测试文件
- if [ -z "$(find tests -name 'test_*.R' 2>/dev/null)" ]; then
- echo "❌ No test files found in tests/ directory!"
- exit 1
- fi
-
- echo "✅ Test suite ready:"
- find tests -name 'test_*.R'
-
- - name: Run tests
- working-directory: ${{ github.workspace }}
- run: |
- Rscript -e 'library(testthat); test_dir("tests", reporter = JunitReporter$new(file = "junit.xml"))' || true
-
- - name: Grade
- working-directory: ${{ github.workspace }}
- env:
- LANGUAGE: r
- run: |
- python3 ./.autograde/grade.py --junit junit.xml --out grade.json --summary summary.md
-
- - name: Prepare artifacts
- working-directory: ${{ github.workspace }}
- run: |
- mkdir -p artifacts
- cp junit.xml summary.md grade.json artifacts/ 2>/dev/null || true
-
- - name: Create grade metadata
- working-directory: ${{ github.workspace }}
- env:
- LANGUAGE: r
- REPO: ${{ github.repository }}
- run: |
- if [ ! -f grade.json ]; then
- echo "⚠️ grade.json not found, skipping metadata creation"
- exit 0
- fi
-
- # 生成 JSON 元数据
- if [ -f ./.autograde/create_grade_metadata.py ]; then
- python3 ./.autograde/create_grade_metadata.py > metadata.json || echo "{}" > metadata.json
- echo "✅ Grade metadata created (using create_grade_metadata.py)"
- elif [ -f ./.autograde/create_minimal_metadata.py ]; then
- export GRADE_TYPE=programming
- python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
- echo "✅ Grade metadata created (using create_minimal_metadata.py)"
- else
- echo "⚠️ No metadata creation script found, skipping"
- echo "{}" > metadata.json
- fi
-
- - name: Upload metadata (teacher only)
- if: env.RUNNER_METADATA_REPO != '' && env.RUNNER_METADATA_TOKEN != ''
- working-directory: ${{ github.workspace }}
- shell: bash
- env:
- METADATA_REPO: ${{ env.RUNNER_METADATA_REPO }}
- METADATA_TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
- METADATA_BRANCH: ${{ env.RUNNER_METADATA_BRANCH }}
- STUDENT_REPO: ${{ github.repository }}
- RUN_ID: ${{ github.run_id }}
- COMMIT_SHA: ${{ github.sha }}
- SERVER_URL: ${{ github.server_url }}
- run: |
- set -e
- if [ ! -f metadata.json ]; then
- echo "No metadata.json found, skip uploading."
- exit 0
- fi
-
- python3 ./.autograde/upload_metadata.py \
- --metadata-file metadata.json \
- --metadata-repo "${METADATA_REPO}" \
- --branch "${METADATA_BRANCH:-main}" \
- --student-repo "${STUDENT_REPO}" \
- --run-id "${RUN_ID}" \
- --commit-sha "${COMMIT_SHA}" \
- --workflow grade \
- --server-url "${SERVER_URL}" \
- --external-host "${EXTERNAL_GITEA_HOST}"
- rm -f metadata.json
diff --git a/.gitea/workflows/autograde.yml b/.gitea/workflows/autograde.yml
index 69b6ce3..66d6b42 100644
--- a/.gitea/workflows/autograde.yml
+++ b/.gitea/workflows/autograde.yml
@@ -72,7 +72,7 @@ jobs:
- name: Fix permissions
run: chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
- - name: Fetch hidden tests
+ - name: Fetch hidden tests and grading scripts
working-directory: ${{ github.workspace }}
env:
EXTERNAL_GITEA_HOST: ${{ secrets.EXTERNAL_GITEA_HOST }}
@@ -83,8 +83,9 @@ jobs:
TESTS_TOKEN="${RUNNER_TESTS_TOKEN:-}"
if [ -z "$TESTS_TOKEN" ] || [ -z "$TESTS_USERNAME" ]; then
- echo "Warning: RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN not set, skipping private tests"
- exit 0
+ echo "❌ RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN not set!"
+ echo "Cannot fetch grading scripts - aborting."
+ exit 1
fi
# Resolve Gitea Host
@@ -108,7 +109,7 @@ jobs:
ASSIGNMENT_ID="final-vibevault"
fi
- echo "📥 Fetching private tests from ${ORG}/${ASSIGNMENT_ID}-tests..."
+ echo "📥 Fetching tests and grading scripts from ${ORG}/${ASSIGNMENT_ID}-tests..."
AUTH_URL="http://${TESTS_USERNAME}:${TESTS_TOKEN}@${HOST}/${ORG}/${ASSIGNMENT_ID}-tests.git"
@@ -117,6 +118,19 @@ jobs:
exit 1
fi
+ # ===== Copy grading scripts (from tests repo, cannot be modified by students) =====
+ if [ -d "_priv_tests/autograde" ]; then
+ # Remove any local .autograde (prevent student tampering)
+ rm -rf .autograde
+ mkdir -p .autograde
+ cp _priv_tests/autograde/*.py .autograde/
+ cp _priv_tests/autograde/*.sh .autograde/ 2>/dev/null || true
+ echo "✅ Grading scripts copied from tests repo"
+ else
+ echo "❌ No autograde directory in tests repo!"
+ exit 1
+ fi
+
# Copy Java tests
if [ -d "_priv_tests/java/src/test" ]; then
rsync -a _priv_tests/java/src/test/ src/test/
@@ -135,6 +149,9 @@ jobs:
cp _priv_tests/llm/*.json .llm_rubrics/ 2>/dev/null || true
echo "✅ LLM rubrics copied"
fi
+
+ # Cleanup
+ rm -rf _priv_tests
- name: Run tests
working-directory: ${{ github.workspace }}