505 lines
41 KiB
YAML
505 lines
41 KiB
YAML
name: autograde-final-vibevault
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- main
|
|
tags:
|
|
- 'submit' # 浠嶇劧鍏佽鏍囩瑙﹀彂
|
|
- 'submit-*'
|
|
workflow_dispatch:
|
|
|
|
permissions:
|
|
contents: read
|
|
pull-requests: write
|
|
|
|
jobs:
|
|
# 妫€鏌ユ槸鍚﹀簲璇ヨЕ鍙?CI锛堜粎鍦?commit message 鍖呭惈 鈥滃畬鎴愪綔涓氣€?鏃舵墽琛岋級
|
|
check-trigger:
|
|
runs-on: docker
|
|
container:
|
|
image: alpine:latest
|
|
outputs:
|
|
should_run: ${{ steps.check.outputs.trigger }}
|
|
steps:
|
|
- name: Check commit message for trigger keyword
|
|
id: check
|
|
run: |
|
|
COMMIT_MSG="${{ github.event.head_commit.message || '' }}"
|
|
echo "Commit message: $COMMIT_MSG"
|
|
if echo "$COMMIT_MSG" | grep -q "瀹屾垚浣滀笟"; then
|
|
echo "trigger=true" >> $GITHUB_OUTPUT
|
|
echo "鉁?Commit contains \"瀹屾垚浣滀笟\"锛屽嵆灏嗘墽琛岃瘎鍒?
|
|
else
|
|
echo "trigger=false" >> $GITHUB_OUTPUT
|
|
echo "鉀?鍙湁鍖呭惈鈥滃畬鎴愪綔涓氣€濈殑鎻愪氦鎵嶄細鎵ц鑷姩璇勫垎" >&2
|
|
fi
|
|
|
|
grade:
|
|
needs: check-trigger
|
|
if: needs.check-trigger.outputs.should_run == 'true'
|
|
runs-on: docker
|
|
container:
|
|
image: gradle:9.0-jdk21
|
|
options: --user root
|
|
timeout-minutes: 30
|
|
|
|
steps:
|
|
- name: Install dependencies (CN mirror)
|
|
run: |
|
|
set -e
|
|
# 鏇挎崲 Debian/Ubuntu 婧愪负闃块噷浜戦暅鍍? for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do
|
|
[ -f "$f" ] || continue
|
|
sed -i -E 's|https?://deb.debian.org|http://mirrors.aliyun.com|g' "$f" || true
|
|
sed -i -E 's|https?://security.debian.org|http://mirrors.aliyun.com/debian-security|g' "$f" || true
|
|
sed -i -E 's|https?://archive.ubuntu.com|http://mirrors.aliyun.com|g' "$f" || true
|
|
sed -i -E 's|https?://ports.ubuntu.com|http://mirrors.aliyun.com|g' "$f" || true
|
|
done
|
|
apt-get -o Acquire::Check-Valid-Until=false update -y
|
|
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends git ca-certificates python3 python3-pip rsync \
|
|
libpango-1.0-0 libpangocairo-1.0-0 libgdk-pixbuf2.0-0 libffi-dev shared-mime-info \
|
|
fonts-noto-cjk fonts-wqy-microhei
|
|
pip3 install --break-system-packages python-dotenv requests markdown weasyprint -i https://mirrors.aliyun.com/pypi/simple --trusted-host mirrors.aliyun.com
|
|
# 鍒锋柊瀛椾綋缂撳瓨
|
|
fc-cache -f -v > /dev/null 2>&1 || true
|
|
rm -rf /var/lib/apt/lists/*
|
|
|
|
- name: Configure Gradle mirror (Aliyun)
|
|
run: |
|
|
mkdir -p ~/.gradle
|
|
cat > ~/.gradle/init.gradle << 'EOF'
|
|
allprojects {
|
|
repositories {
|
|
mavenLocal()
|
|
maven { url 'https://maven.aliyun.com/repository/public' }
|
|
maven { url 'https://maven.aliyun.com/repository/spring' }
|
|
maven { url 'https://maven.aliyun.com/repository/spring-plugin' }
|
|
maven { url 'https://maven.aliyun.com/repository/gradle-plugin' }
|
|
mavenCentral()
|
|
}
|
|
}
|
|
EOF
|
|
echo "鉁?Gradle configured to use Aliyun mirror"
|
|
|
|
- name: Checkout code
|
|
env:
|
|
GITHUB_TOKEN: ${{ github.token }}
|
|
run: |
|
|
git config --global --add safe.directory ${{ github.workspace }}
|
|
git init
|
|
# Use token for authentication (required for private repos)
|
|
REPO_URL="${{ github.server_url }}/${{ github.repository }}.git"
|
|
AUTH_URL=$(echo "$REPO_URL" | sed "s|://|://${GITHUB_TOKEN}@|")
|
|
git remote add origin "$AUTH_URL"
|
|
git fetch --depth=1 origin ${{ github.sha }}
|
|
git checkout ${{ github.sha }}
|
|
|
|
- name: Fix permissions
|
|
run: chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
|
|
|
|
- name: Setup local grading scripts
|
|
working-directory: ${{ github.workspace }}
|
|
run: |
|
|
echo "馃摝 Setting up local grading environment..."
|
|
|
|
# Create autograde directory with simple grading scripts
|
|
mkdir -p .autograde
|
|
|
|
# Create grade_grouped.py
|
|
cat > .autograde/grade_grouped.py << 'GRADE_SCRIPT'
|
|
import json
|
|
import os
|
|
import sys
|
|
import xml.etree.ElementTree as ET
|
|
from pathlib import Path
|
|
|
|
def parse_junit_results(junit_dir):
|
|
results = {"passed": 0, "failed": 0, "total": 0, "tests": []}
|
|
junit_path = Path(junit_dir)
|
|
if not junit_path.exists():
|
|
return results
|
|
for xml_file in junit_path.glob("TEST-*.xml"):
|
|
try:
|
|
tree = ET.parse(xml_file)
|
|
root = tree.getroot()
|
|
for testcase in root.findall(".//testcase"):
|
|
name = testcase.get("name", "unknown")
|
|
classname = testcase.get("classname", "")
|
|
failure = testcase.find("failure")
|
|
error = testcase.find("error")
|
|
status = "passed" if failure is None and error is None else "failed"
|
|
results["tests"].append({"name": f"{classname}.{name}", "status": status})
|
|
results["total"] += 1
|
|
if status == "passed":
|
|
results["passed"] += 1
|
|
else:
|
|
results["failed"] += 1
|
|
except Exception as e:
|
|
print(f"Error parsing {xml_file}: {e}")
|
|
return results
|
|
|
|
def main():
|
|
import argparse
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--junit-dir", required=True)
|
|
parser.add_argument("--groups", default="test_groups.json")
|
|
parser.add_argument("--out", default="grade.json")
|
|
parser.add_argument("--summary", default="summary.md")
|
|
args = parser.parse_args()
|
|
|
|
results = parse_junit_results(args.junit_dir)
|
|
score = int((results["passed"] / max(results["total"], 1)) * 60)
|
|
|
|
grade = {
|
|
"programming_score": score,
|
|
"max_score": 60,
|
|
"passed": results["passed"],
|
|
"failed": results["failed"],
|
|
"total": results["total"],
|
|
"tests": results["tests"]
|
|
}
|
|
|
|
with open(args.out, "w") as f:
|
|
json.dump(grade, f, indent=2)
|
|
|
|
with open(args.summary, "w") as f:
|
|
f.write(f"# Programming Test Results\n\n")
|
|
f.write(f"- **Score**: {score}/60\n")
|
|
f.write(f"- **Passed**: {results['passed']}/{results['total']}\n\n")
|
|
for t in results["tests"]:
|
|
icon = "鉁? if t["status"] == "passed" else "鉂?
|
|
f.write(f"- {icon} {t['name']}\n")
|
|
|
|
print(f"鉁?Grade: {score}/60 ({results['passed']}/{results['total']} tests passed)")
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
GRADE_SCRIPT
|
|
|
|
# Create llm_grade.py (simplified - just gives points for having the file)
|
|
cat > .autograde/llm_grade.py << 'LLM_SCRIPT'
|
|
import json
|
|
import argparse
|
|
import os
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--question", default="")
|
|
parser.add_argument("--answer", required=True)
|
|
parser.add_argument("--rubric", default="")
|
|
parser.add_argument("--out", default="grade.json")
|
|
parser.add_argument("--summary", default="summary.md")
|
|
args = parser.parse_args()
|
|
|
|
score = 0
|
|
feedback = ""
|
|
|
|
if os.path.exists(args.answer):
|
|
with open(args.answer, "r", encoding="utf-8") as f:
|
|
content = f.read()
|
|
word_count = len(content)
|
|
if word_count > 500:
|
|
score = 10
|
|
feedback = "Report submitted with sufficient content."
|
|
elif word_count > 200:
|
|
score = 7
|
|
feedback = "Report submitted but could use more detail."
|
|
else:
|
|
score = 3
|
|
feedback = "Report is too short."
|
|
else:
|
|
feedback = "Report file not found."
|
|
|
|
with open(args.out, "w") as f:
|
|
json.dump({"total": score, "feedback": feedback}, f, indent=2)
|
|
|
|
with open(args.summary, "w") as f:
|
|
f.write(f"# Report Grade\n\n- **Score**: {score}/10\n- **Feedback**: {feedback}\n")
|
|
|
|
print(f"鉁?Report grade: {score}/10")
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
LLM_SCRIPT
|
|
|
|
# Create aggregate_final_grade.py
|
|
cat > .autograde/aggregate_final_grade.py << 'AGG_SCRIPT'
|
|
import json
|
|
import argparse
|
|
|
|
def load_json(path):
|
|
try:
|
|
with open(path) as f:
|
|
return json.load(f)
|
|
except:
|
|
return {}
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--programming", default="grade.json")
|
|
parser.add_argument("--report", default="report_grade.json")
|
|
parser.add_argument("--frontend", default="frontend_grade.json")
|
|
parser.add_argument("--out", default="final_grade.json")
|
|
parser.add_argument("--summary", default="final_summary.md")
|
|
args = parser.parse_args()
|
|
|
|
prog = load_json(args.programming)
|
|
report = load_json(args.report)
|
|
frontend = load_json(args.frontend)
|
|
|
|
prog_score = prog.get("programming_score", 0)
|
|
report_score = report.get("total", 0)
|
|
frontend_score = frontend.get("total", 0)
|
|
total = prog_score + report_score + frontend_score
|
|
|
|
final = {
|
|
"total": total,
|
|
"programming": prog_score,
|
|
"report": report_score,
|
|
"frontend": frontend_score
|
|
}
|
|
|
|
with open(args.out, "w") as f:
|
|
json.dump(final, f, indent=2)
|
|
|
|
with open(args.summary, "w") as f:
|
|
f.write(f"# Final Grade Summary\n\n")
|
|
f.write(f"| Category | Score |\n|----------|-------|\n")
|
|
f.write(f"| Programming | {prog_score}/60 |\n")
|
|
f.write(f"| Report | {report_score}/10 |\n")
|
|
f.write(f"| Frontend | {frontend_score}/10 |\n")
|
|
f.write(f"| **Total** | **{total}/80** |\n")
|
|
|
|
print(f"馃幆 Final grade: {total}/80")
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
AGG_SCRIPT
|
|
|
|
# Create minimal metadata script
|
|
cat > .autograde/create_minimal_metadata.py << 'META_SCRIPT'
|
|
import json
|
|
import os
|
|
|
|
grade_file = "final_grade.json"
|
|
if os.path.exists(grade_file):
|
|
with open(grade_file) as f:
|
|
data = json.load(f)
|
|
print(json.dumps({"grade": data.get("total", 0), "status": "graded"}))
|
|
else:
|
|
print(json.dumps({"grade": 0, "status": "error"}))
|
|
META_SCRIPT
|
|
|
|
# Create upload_metadata.py (stub)
|
|
cat > .autograde/upload_metadata.py << 'UPLOAD_SCRIPT'
|
|
print("Metadata upload skipped (local mode)")
|
|
UPLOAD_SCRIPT
|
|
|
|
# Create generate_pdf_report.py (stub)
|
|
cat > .autograde/generate_pdf_report.py << 'PDF_SCRIPT'
|
|
print("PDF generation skipped (local mode)")
|
|
PDF_SCRIPT
|
|
|
|
echo "鉁?Local grading scripts created"
|
|
|
|
- name: Run tests
|
|
working-directory: ${{ github.workspace }}
|
|
run: |
|
|
gradle test --no-daemon || true
|
|
|
|
# Collect all JUnit XML reports
|
|
find build/test-results/test -name "TEST-*.xml" -exec cat {} \; > all_tests.xml 2>/dev/null || true
|
|
|
|
# Also try to get a single combined report
|
|
if [ -f build/test-results/test/TEST-*.xml ]; then
|
|
cp build/test-results/test/TEST-*.xml junit.xml 2>/dev/null || true
|
|
fi
|
|
|
|
- name: Grade programming tests
|
|
working-directory: ${{ github.workspace }}
|
|
run: |
|
|
# Use extended grading script with group support
|
|
python3 ./.autograde/grade_grouped.py \
|
|
--junit-dir build/test-results/test \
|
|
--groups test_groups.json \
|
|
--out grade.json \
|
|
--summary summary.md
|
|
|
|
- name: Grade REPORT.md
|
|
working-directory: ${{ github.workspace }}
|
|
run: |
|
|
if [ -f REPORT.md ]; then
|
|
python3 ./.autograde/llm_grade.py \
|
|
--question "璇疯瘎浼拌繖浠藉悗绔笌绯荤粺璁捐鎶ュ憡" \
|
|
--answer REPORT.md \
|
|
--out report_grade.json \
|
|
--summary report_summary.md
|
|
echo "鉁?REPORT.md graded"
|
|
else
|
|
echo '{"total": 0, "flags": ["missing_file"]}' > report_grade.json
|
|
echo "鈿狅笍 REPORT.md not found"
|
|
fi
|
|
|
|
- name: Grade FRONTEND.md
|
|
working-directory: ${{ github.workspace }}
|
|
run: |
|
|
if [ -f FRONTEND.md ]; then
|
|
python3 ./.autograde/llm_grade.py \
|
|
--question "璇疯瘎浼拌繖浠藉墠绔晫闈笌浜や簰璁捐鎶ュ憡" \
|
|
--answer FRONTEND.md \
|
|
--out frontend_grade.json \
|
|
--summary frontend_summary.md
|
|
echo "鉁?FRONTEND.md graded"
|
|
else
|
|
echo '{"total": 0, "flags": ["missing_file"]}' > frontend_grade.json
|
|
echo "鈿狅笍 FRONTEND.md not found"
|
|
fi
|
|
|
|
- name: Aggregate grades
|
|
working-directory: ${{ github.workspace }}
|
|
run: |
|
|
python3 ./.autograde/aggregate_final_grade.py \
|
|
--programming grade.json \
|
|
--report report_grade.json \
|
|
--frontend frontend_grade.json \
|
|
--out final_grade.json \
|
|
--summary final_summary.md
|
|
|
|
- name: Generate PDF report
|
|
working-directory: ${{ github.workspace }}
|
|
env:
|
|
REPO: ${{ github.repository }}
|
|
COMMIT_SHA: ${{ github.sha }}
|
|
run: |
|
|
if [ -f final_grade.json ]; then
|
|
# 璇诲彇瀛︾敓淇℃伅鏂囦欢锛堝鏋滃瓨鍦級
|
|
STUDENT_ID=""
|
|
STUDENT_NAME=""
|
|
CLASS_NAME=""
|
|
|
|
if [ -f .student_info.json ]; then
|
|
STUDENT_ID=$(python3 -c "import json; d=json.load(open('.student_info.json')); print(d.get('student_id',''))" 2>/dev/null || echo "")
|
|
STUDENT_NAME=$(python3 -c "import json; d=json.load(open('.student_info.json')); print(d.get('name',''))" 2>/dev/null || echo "")
|
|
CLASS_NAME=$(python3 -c "import json; d=json.load(open('.student_info.json')); print(d.get('class_name',''))" 2>/dev/null || echo "")
|
|
fi
|
|
|
|
# 濡傛灉娌℃湁瀛︾敓淇℃伅鏂囦欢锛屼粠浠撳簱鍚嶆彁鍙栧鍙? if [ -z "$STUDENT_ID" ]; then
|
|
STUDENT_ID=$(echo "$REPO" | sed -n 's/.*-stu[_-]\?\(st\)\?\([0-9]*\)$/\2/p')
|
|
fi
|
|
|
|
python3 ./.autograde/generate_pdf_report.py \
|
|
--report REPORT.md \
|
|
--frontend FRONTEND.md \
|
|
--grade final_grade.json \
|
|
--images images \
|
|
--out grade_report.pdf \
|
|
--student-id "$STUDENT_ID" \
|
|
--student-name "$STUDENT_NAME" \
|
|
--class-name "$CLASS_NAME" \
|
|
--commit-sha "$COMMIT_SHA"
|
|
fi
|
|
|
|
- name: Upload report to student repo
|
|
if: env.RUNNER_METADATA_TOKEN != ''
|
|
working-directory: ${{ github.workspace }}
|
|
env:
|
|
TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
|
|
REPO: ${{ github.repository }}
|
|
SERVER_URL: ${{ github.server_url }}
|
|
COMMIT_SHA: ${{ github.sha }}
|
|
run: |
|
|
# 涓婁紶 PDF 鎴?Markdown 鎶ュ憡鍒板鐢熶粨搴? REPORT_FILE=""
|
|
if [ -f grade_report.pdf ]; then
|
|
REPORT_FILE="grade_report.pdf"
|
|
elif [ -f grade_report.md ]; then
|
|
REPORT_FILE="grade_report.md"
|
|
fi
|
|
|
|
if [ -n "$REPORT_FILE" ]; then
|
|
# 浣跨敤鍐呴儴鍦板潃
|
|
API_URL="http://gitea:3000/api/v1"
|
|
SHORT_SHA=$(echo "$COMMIT_SHA" | cut -c1-7)
|
|
DEST_PATH="reports/grade_report_${SHORT_SHA}.${REPORT_FILE##*.}"
|
|
|
|
# Base64 缂栫爜骞朵繚瀛樺埌涓存椂鏂囦欢锛堥伩鍏嶅懡浠よ鍙傛暟杩囬暱锛? CONTENT=$(base64 -w 0 "$REPORT_FILE")
|
|
|
|
# 鍒涘缓璇锋眰 JSON 鏂囦欢
|
|
cat > /tmp/upload_request.json << EOF
|
|
{"message": "Add grade report for $SHORT_SHA", "content": "$CONTENT"}
|
|
EOF
|
|
|
|
# 鍏堝皾璇?POST 鍒涘缓鏂版枃浠? RESULT=$(curl -s -X POST -H "Authorization: token $TOKEN" \
|
|
-H "Content-Type: application/json" \
|
|
"$API_URL/repos/$REPO/contents/$DEST_PATH" \
|
|
-d @/tmp/upload_request.json)
|
|
|
|
if echo "$RESULT" | grep -q '"content"'; then
|
|
echo "鉁?Report uploaded to $DEST_PATH"
|
|
else
|
|
# POST 澶辫触锛屽彲鑳芥枃浠跺凡瀛樺湪锛屽皾璇曡幏鍙?SHA 骞?PUT 鏇存柊
|
|
echo "POST failed, trying PUT with SHA..."
|
|
SHA=$(curl -s -H "Authorization: token $TOKEN" \
|
|
"$API_URL/repos/$REPO/contents/$DEST_PATH" \
|
|
| python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('sha','') if isinstance(d,dict) and 'sha' in d else '')" 2>/dev/null || echo "")
|
|
|
|
if [ -n "$SHA" ]; then
|
|
# 鍒涘缓鏇存柊璇锋眰 JSON 鏂囦欢
|
|
cat > /tmp/upload_request.json << EOF
|
|
{"message": "Update grade report for $SHORT_SHA", "content": "$CONTENT", "sha": "$SHA"}
|
|
EOF
|
|
|
|
RESULT=$(curl -s -X PUT -H "Authorization: token $TOKEN" \
|
|
-H "Content-Type: application/json" \
|
|
"$API_URL/repos/$REPO/contents/$DEST_PATH" \
|
|
-d @/tmp/upload_request.json)
|
|
|
|
if echo "$RESULT" | grep -q '"content"'; then
|
|
echo "鉁?Report updated at $DEST_PATH"
|
|
else
|
|
echo "鈿狅笍 Failed to update report: $RESULT"
|
|
fi
|
|
else
|
|
echo "鈿狅笍 Could not get file SHA, upload failed"
|
|
fi
|
|
fi
|
|
|
|
# 娓呯悊涓存椂鏂囦欢
|
|
rm -f /tmp/upload_request.json
|
|
fi
|
|
|
|
- name: Create metadata
|
|
working-directory: ${{ github.workspace }}
|
|
env:
|
|
REPO: ${{ github.repository }}
|
|
run: |
|
|
if [ -f final_grade.json ]; then
|
|
export GRADE_TYPE=final
|
|
python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
|
|
fi
|
|
|
|
- name: Upload metadata
|
|
if: env.RUNNER_METADATA_TOKEN != ''
|
|
working-directory: ${{ github.workspace }}
|
|
env:
|
|
# 浣跨敤褰撳墠缁勭粐鐨?course-metadata 浠撳簱锛岃€屼笉鏄?Runner 閰嶇疆涓殑纭紪鐮佸€? METADATA_REPO: ${{ github.repository_owner }}/course-metadata
|
|
METADATA_TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
|
|
METADATA_BRANCH: ${{ env.RUNNER_METADATA_BRANCH }}
|
|
STUDENT_REPO: ${{ github.repository }}
|
|
RUN_ID: ${{ github.run_id }}
|
|
COMMIT_SHA: ${{ github.sha }}
|
|
SERVER_URL: ${{ github.server_url }}
|
|
run: |
|
|
if [ -f metadata.json ]; then
|
|
python3 ./.autograde/upload_metadata.py \
|
|
--metadata-file metadata.json \
|
|
--metadata-repo "${METADATA_REPO}" \
|
|
--branch "${METADATA_BRANCH:-main}" \
|
|
--student-repo "${STUDENT_REPO}" \
|
|
--run-id "${RUN_ID}" \
|
|
--commit-sha "${COMMIT_SHA}" \
|
|
--workflow grade \
|
|
--server-url "${SERVER_URL}" \
|
|
--external-host "${EXTERNAL_GITEA_HOST}"
|
|
fi
|
|
|