diff --git a/.autograde/create_minimal_metadata.py b/.autograde/create_minimal_metadata.py deleted file mode 100644 index 2d74083..0000000 --- a/.autograde/create_minimal_metadata.py +++ /dev/null @@ -1,10 +0,0 @@ -import json -import os - -grade_file = "final_grade.json" -if os.path.exists(grade_file): - with open(grade_file) as f: - data = json.load(f) - print(json.dumps({"grade": data.get("total", 0), "status": "graded"})) -else: - print(json.dumps({"grade": 0, "status": "error"})) diff --git a/.autograde/generate_pdf_report.py b/.autograde/generate_pdf_report.py deleted file mode 100644 index 6ce6be8..0000000 --- a/.autograde/generate_pdf_report.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python3 -"""Generate PDF report from REPORT.md and FRONTEND.md""" - -import os -import sys -import argparse -import json - -def read_file_content(path): - if not path or not os.path.exists(path): - return "" - with open(path, 'r', encoding='utf-8') as f: - return f.read() - -def main(): - parser = argparse.ArgumentParser(description='Generate PDF grade report') - parser.add_argument('--report', default='REPORT.md', help='Path to REPORT.md') - parser.add_argument('--frontend', default='FRONTEND.md', help='Path to FRONTEND.md') - parser.add_argument('--grade', default='final_grade.json', help='Path to grade JSON') - parser.add_argument('--images', default='images', help='Path to images directory') - parser.add_argument('--out', default='grade_report.pdf', help='Output PDF path') - parser.add_argument('--student-id', default='', help='Student ID') - parser.add_argument('--student-name', default='', help='Student name') - parser.add_argument('--class-name', default='', help='Class name') - parser.add_argument('--commit-sha', default='', help='Commit SHA') - args = parser.parse_args() - - # Read grade data - grade_data = {} - if os.path.exists(args.grade): - with open(args.grade, 'r', encoding='utf-8') as f: - grade_data = json.load(f) - - # Read markdown content - report_content = read_file_content(args.report) - frontend_content = read_file_content(args.frontend) - - if not report_content and not frontend_content: - print("No report content found, skipping PDF generation") - return - - try: - import markdown - from weasyprint import HTML - - # Build HTML content - html_parts = [] - - # Header with student info - header = f""" -
-

VibeVault 项目评分报告

-

学号: {args.student_id or 'N/A'}

-

姓名: {args.student_name or 'N/A'}

-

班级: {args.class_name or 'N/A'}

-

Commit: {args.commit_sha[:7] if args.commit_sha else 'N/A'}

-
- """ - html_parts.append(header) - - # Grade summary - if grade_data: - grade_html = f""" -
-

成绩汇总

- - - - - - -
项目得分
编程测试{grade_data.get('programming', 0)}/60
报告{grade_data.get('report', 0)}/10
前端{grade_data.get('frontend', 0)}/10
总分{grade_data.get('total', 0)}/80
-
- """ - html_parts.append(grade_html) - - # Report content - if report_content: - report_html = markdown.markdown(report_content, extensions=['fenced_code', 'tables']) - html_parts.append(f'
{report_html}
') - - # Frontend content - if frontend_content: - frontend_html = markdown.markdown(frontend_content, extensions=['fenced_code', 'tables']) - html_parts.append(f'
{frontend_html}
') - - # Full HTML document - full_html = f""" - - - - - - - - {''.join(html_parts)} - - - """ - - # Generate PDF - HTML(string=full_html, base_url=os.getcwd()).write_pdf(args.out) - print(f"✅ PDF generated: {args.out}") - - except ImportError as e: - print(f"Cannot generate PDF: {e}") - print("Required: pip install markdown weasyprint") - sys.exit(1) - except Exception as e: - print(f"PDF generation failed: {e}") - sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/.autograde/upload_metadata.py b/.autograde/upload_metadata.py deleted file mode 100644 index 4455716..0000000 --- a/.autograde/upload_metadata.py +++ /dev/null @@ -1 +0,0 @@ -print("Metadata upload skipped") diff --git a/.gitea/workflows/autograde.yml b/.gitea/workflows/autograde.yml index f1f1384..35a72d1 100644 --- a/.gitea/workflows/autograde.yml +++ b/.gitea/workflows/autograde.yml @@ -14,7 +14,7 @@ permissions: pull-requests: write jobs: -# 检查是否应该触发 CI(仅在 commit message 包含 “完成作业” 时执行) + # 检查是否应该触发 CI(仅在 commit message 包含 “完成作业” 时执行) check-trigger: runs-on: docker container: @@ -33,7 +33,7 @@ jobs: else echo "trigger=false" >> $GITHUB_OUTPUT echo "⛔ 只有包含“完成作业”的提交才会执行自动评分" >&2 -fi + fi grade: needs: check-trigger @@ -63,11 +63,7 @@ fi pip3 install --break-system-packages python-dotenv requests markdown weasyprint -i https://mirrors.aliyun.com/pypi/simple --trusted-host mirrors.aliyun.com # 刷新字体缓存 fc-cache -f -v > /dev/null 2>&1 || true -<<<<<<< HEAD rm -rf /var/lib/apt/lists/* -======= - rm -rf /var/lib/apt/lists/* ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Configure Gradle mirror (Aliyun) run: | @@ -84,11 +80,7 @@ fi } } EOF -<<<<<<< HEAD echo "✅ Gradle configured to use Aliyun mirror" -======= - echo "✅ Gradle configured to use Aliyun mirror" ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Checkout code env: @@ -101,16 +93,11 @@ fi AUTH_URL=$(echo "$REPO_URL" | sed "s|://|://${GITHUB_TOKEN}@|") git remote add origin "$AUTH_URL" git fetch --depth=1 origin ${{ github.sha }} -<<<<<<< HEAD git checkout ${{ github.sha }} -======= - git checkout ${{ github.sha }} ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Fix permissions run: chown -R $(whoami):$(whoami) ${{ github.workspace }} || true -<<<<<<< HEAD - name: Fetch hidden tests and grading scripts working-directory: ${{ github.workspace }} env: @@ -191,209 +178,6 @@ fi # Cleanup rm -rf _priv_tests -======= - - name: Setup local grading scripts - working-directory: ${{ github.workspace }} - run: | - echo "📦 Setting up local grading environment..." - - # Create autograde directory with simple grading scripts - mkdir -p .autograde - - # Create grade_grouped.py - cat > .autograde/grade_grouped.py << 'GRADE_SCRIPT' - import json - import os - import sys - import xml.etree.ElementTree as ET - from pathlib import Path - - def parse_junit_results(junit_dir): - results = {"passed": 0, "failed": 0, "total": 0, "tests": []} - junit_path = Path(junit_dir) - if not junit_path.exists(): - return results - for xml_file in junit_path.glob("TEST-*.xml"): - try: - tree = ET.parse(xml_file) - root = tree.getroot() - for testcase in root.findall(".//testcase"): - name = testcase.get("name", "unknown") - classname = testcase.get("classname", "") - failure = testcase.find("failure") - error = testcase.find("error") - status = "passed" if failure is None and error is None else "failed" - results["tests"].append({"name": f"{classname}.{name}", "status": status}) - results["total"] += 1 - if status == "passed": - results["passed"] += 1 - else: - results["failed"] += 1 - except Exception as e: - print(f"Error parsing {xml_file}: {e}") - return results - - def main(): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument("--junit-dir", required=True) - parser.add_argument("--groups", default="test_groups.json") - parser.add_argument("--out", default="grade.json") - parser.add_argument("--summary", default="summary.md") - args = parser.parse_args() - - results = parse_junit_results(args.junit_dir) - score = int((results["passed"] / max(results["total"], 1)) * 60) - - grade = { - "programming_score": score, - "max_score": 60, - "passed": results["passed"], - "failed": results["failed"], - "total": results["total"], - "tests": results["tests"] - } - - with open(args.out, "w") as f: - json.dump(grade, f, indent=2) - - with open(args.summary, "w") as f: - f.write(f"# Programming Test Results\n\n") - f.write(f"- **Score**: {score}/60\n") - f.write(f"- **Passed**: {results['passed']}/{results['total']}\n\n") - for t in results["tests"]: - icon = "✅" if t["status"] == "passed" else "❌" - f.write(f"- {icon} {t['name']}\n") - - print(f"✅ Grade: {score}/60 ({results['passed']}/{results['total']} tests passed)") - - if __name__ == "__main__": - main() - GRADE_SCRIPT - - # Create llm_grade.py (simplified - just gives points for having the file) - cat > .autograde/llm_grade.py << 'LLM_SCRIPT' - import json - import argparse - import os - - def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--question", default="") - parser.add_argument("--answer", required=True) - parser.add_argument("--rubric", default="") - parser.add_argument("--out", default="grade.json") - parser.add_argument("--summary", default="summary.md") - args = parser.parse_args() - - score = 0 - feedback = "" - - if os.path.exists(args.answer): - with open(args.answer, "r", encoding="utf-8") as f: - content = f.read() - word_count = len(content) - if word_count > 500: - score = 10 - feedback = "Report submitted with sufficient content." - elif word_count > 200: - score = 7 - feedback = "Report submitted but could use more detail." - else: - score = 3 - feedback = "Report is too short." - else: - feedback = "Report file not found." - - with open(args.out, "w") as f: - json.dump({"total": score, "feedback": feedback}, f, indent=2) - - with open(args.summary, "w") as f: - f.write(f"# Report Grade\n\n- **Score**: {score}/10\n- **Feedback**: {feedback}\n") - - print(f"✅ Report grade: {score}/10") - - if __name__ == "__main__": - main() - LLM_SCRIPT - - # Create aggregate_final_grade.py - cat > .autograde/aggregate_final_grade.py << 'AGG_SCRIPT' - import json - import argparse - - def load_json(path): - try: - with open(path) as f: - return json.load(f) - except: - return {} - - def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--programming", default="grade.json") - parser.add_argument("--report", default="report_grade.json") - parser.add_argument("--frontend", default="frontend_grade.json") - parser.add_argument("--out", default="final_grade.json") - parser.add_argument("--summary", default="final_summary.md") - args = parser.parse_args() - - prog = load_json(args.programming) - report = load_json(args.report) - frontend = load_json(args.frontend) - - prog_score = prog.get("programming_score", 0) - report_score = report.get("total", 0) - frontend_score = frontend.get("total", 0) - total = prog_score + report_score + frontend_score - - final = { - "total": total, - "programming": prog_score, - "report": report_score, - "frontend": frontend_score - } - - with open(args.out, "w") as f: - json.dump(final, f, indent=2) - - with open(args.summary, "w") as f: - f.write(f"# Final Grade Summary\n\n") - f.write(f"| Category | Score |\n|----------|-------|\n") - f.write(f"| Programming | {prog_score}/60 |\n") - f.write(f"| Report | {report_score}/10 |\n") - f.write(f"| Frontend | {frontend_score}/10 |\n") - f.write(f"| **Total** | **{total}/80** |\n") - - print(f"🎯 Final grade: {total}/80") - - if __name__ == "__main__": - main() - AGG_SCRIPT - - # Create minimal metadata script - cat > .autograde/create_minimal_metadata.py << 'META_SCRIPT' - import json - import os - - grade_file = "final_grade.json" - if os.path.exists(grade_file): - with open(grade_file) as f: - data = json.load(f) - print(json.dumps({"grade": data.get("total", 0), "status": "graded"})) - else: - print(json.dumps({"grade": 0, "status": "error"})) - META_SCRIPT - - # Create upload_metadata.py (stub) - cat > .autograde/upload_metadata.py << 'UPLOAD_SCRIPT' - print("Metadata upload skipped (local mode)") - UPLOAD_SCRIPT - - # Note: generate_pdf_report.py is NOT overwritten - use the one from repo - - echo "✅ Local grading scripts created" ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Run tests working-directory: ${{ github.workspace }} @@ -406,11 +190,7 @@ fi # Also try to get a single combined report if [ -f build/test-results/test/TEST-*.xml ]; then cp build/test-results/test/TEST-*.xml junit.xml 2>/dev/null || true -<<<<<<< HEAD fi -======= - fi ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Grade programming tests working-directory: ${{ github.workspace }} @@ -420,69 +200,41 @@ fi --junit-dir build/test-results/test \ --groups test_groups.json \ --out grade.json \ -<<<<<<< HEAD --summary summary.md -======= - --summary summary.md ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Grade REPORT.md working-directory: ${{ github.workspace }} run: | -<<<<<<< HEAD # LLM env vars are injected by Runner config (LLM_API_KEY, LLM_API_URL, LLM_MODEL) if [ -f REPORT.md ] && [ -f .llm_rubrics/rubric_report.json ]; then python3 ./.autograde/llm_grade.py \ --question "请评估这份后端与系统设计报告" \ --answer REPORT.md \ --rubric .llm_rubrics/rubric_report.json \ -======= - if [ -f REPORT.md ]; then - python3 ./.autograde/llm_grade.py \ - --question "请评估这份后端与系统设计报告" \ - --answer REPORT.md \ ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 --out report_grade.json \ --summary report_summary.md echo "✅ REPORT.md graded" else echo '{"total": 0, "flags": ["missing_file"]}' > report_grade.json -<<<<<<< HEAD echo "⚠️ REPORT.md or rubric not found" fi -======= - echo "⚠️ REPORT.md not found" - fi ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Grade FRONTEND.md working-directory: ${{ github.workspace }} run: | -<<<<<<< HEAD # LLM env vars are injected by Runner config (LLM_API_KEY, LLM_API_URL, LLM_MODEL) if [ -f FRONTEND.md ] && [ -f .llm_rubrics/rubric_frontend.json ]; then python3 ./.autograde/llm_grade.py \ --question "请评估这份前端界面与交互设计报告" \ --answer FRONTEND.md \ --rubric .llm_rubrics/rubric_frontend.json \ -======= - if [ -f FRONTEND.md ]; then - python3 ./.autograde/llm_grade.py \ - --question "请评估这份前端界面与交互设计报告" \ - --answer FRONTEND.md \ ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 --out frontend_grade.json \ --summary frontend_summary.md echo "✅ FRONTEND.md graded" else echo '{"total": 0, "flags": ["missing_file"]}' > frontend_grade.json -<<<<<<< HEAD echo "⚠️ FRONTEND.md or rubric not found" fi -======= - echo "⚠️ FRONTEND.md not found" - fi ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Aggregate grades working-directory: ${{ github.workspace }} @@ -492,11 +244,7 @@ fi --report report_grade.json \ --frontend frontend_grade.json \ --out final_grade.json \ -<<<<<<< HEAD --summary final_summary.md -======= - --summary final_summary.md ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Generate PDF report working-directory: ${{ github.workspace }} @@ -531,11 +279,7 @@ fi --student-name "$STUDENT_NAME" \ --class-name "$CLASS_NAME" \ --commit-sha "$COMMIT_SHA" -<<<<<<< HEAD fi -======= - fi ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Upload report to student repo if: env.RUNNER_METADATA_TOKEN != '' @@ -606,11 +350,7 @@ fi # 清理临时文件 rm -f /tmp/upload_request.json -<<<<<<< HEAD fi -======= - fi ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Create metadata working-directory: ${{ github.workspace }} @@ -620,11 +360,7 @@ fi if [ -f final_grade.json ]; then export GRADE_TYPE=final python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json -<<<<<<< HEAD fi -======= - fi ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 - name: Upload metadata if: env.RUNNER_METADATA_TOKEN != '' @@ -650,9 +386,4 @@ fi --workflow grade \ --server-url "${SERVER_URL}" \ --external-host "${EXTERNAL_GITEA_HOST}" -<<<<<<< HEAD - fi -======= - fi - ->>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174 + fi diff --git a/reports/grade_report_5629c49.pdf b/reports/grade_report_5629c49.pdf deleted file mode 100644 index 43d7f28..0000000 Binary files a/reports/grade_report_5629c49.pdf and /dev/null differ