Update content

This commit is contained in:
hblu 2025-12-03 12:46:38 +08:00
parent f6a83a1c80
commit 8dfb1bba8a
32 changed files with 0 additions and 2766 deletions

View File

@ -1,121 +0,0 @@
#!/usr/bin/env python3
"""
聚合多个 LLM 评分结果
"""
import json
import argparse
from pathlib import Path
def load_grade(filepath):
"""加载单个评分文件"""
try:
with open(filepath, 'r', encoding='utf-8') as f:
return json.load(f)
except FileNotFoundError:
print(f"Warning: {filepath} not found")
return None
except json.JSONDecodeError as e:
print(f"Error parsing {filepath}: {e}")
return None
def aggregate_grades(input_files, output_file, summary_file):
"""聚合多个评分文件"""
grades = []
total_score = 0
max_score = 0
need_review_count = 0
for input_file in input_files:
grade = load_grade(input_file)
if grade:
grades.append(grade)
# 支持两种格式:'total' (llm_grade.py) 或 'score' (旧格式)
score = grade.get('total', grade.get('score', 0))
total_score += score
# 默认每题 10 分
max_score += grade.get('max_score', 10)
# 检查是否需要审核
if 'need_review' in grade.get('flags', []) or grade.get('need_review', False):
need_review_count += 1
# 计算总分
final_score = total_score if max_score > 0 else 0
final_max_score = max_score
# 生成汇总结果
result = {
'total_score': final_score,
'max_score': final_max_score,
'questions': len(grades),
'need_review': need_review_count > 0,
'details': grades
}
# 保存 JSON
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(result, f, indent=2, ensure_ascii=False)
# 生成 Markdown 摘要
summary_lines = [
'# LLM 简答题评分汇总',
'',
f'**总分**: {final_score:.1f} / {final_max_score:.1f}',
f'**题目数**: {len(grades)}',
f'**需要人工审核**: {"" if result["need_review"] else ""}',
'',
'## 各题详情',
''
]
for i, grade in enumerate(grades, 1):
q_name = grade.get('question', f'Q{i}')
# 支持两种格式:'total' (llm_grade.py) 或 'score' (旧格式)
score = grade.get('total', grade.get('score', 0))
max_q_score = grade.get('max_score', 10)
# 检查是否需要审核
need_review = 'need_review' in grade.get('flags', []) or grade.get('need_review', False)
confidence = grade.get('confidence', 1.0)
summary_lines.append(f'### SA{i}')
summary_lines.append(f'- **得分**: {score:.2f} / {max_q_score:.1f}')
summary_lines.append(f'- **置信度**: {confidence:.2f}')
if need_review:
summary_lines.append('- ⚠️ **需要人工审核**')
# 显示分项评分
if 'criteria' in grade:
summary_lines.append('- **分项**:')
for criterion in grade['criteria']:
crit_id = criterion.get('id', '')
crit_score = criterion.get('score', 0)
crit_reason = criterion.get('reason', '')
summary_lines.append(f' - {crit_id}: {crit_score:.1f} - {crit_reason}')
summary_lines.append('')
with open(summary_file, 'w', encoding='utf-8') as f:
f.write('\n'.join(summary_lines))
print(f"✅ Aggregated {len(grades)} grades")
print(f" Total: {final_score:.1f} / {final_max_score:.1f}")
print(f" Output: {output_file}")
def main():
parser = argparse.ArgumentParser(description='Aggregate LLM grading results')
parser.add_argument('--inputs', nargs='+', required=True,
help='Input grade JSON files')
parser.add_argument('--out', required=True,
help='Output aggregated JSON file')
parser.add_argument('--summary', required=True,
help='Output summary Markdown file')
args = parser.parse_args()
aggregate_grades(args.inputs, args.out, args.summary)
if __name__ == '__main__':
main()

View File

@ -1,372 +0,0 @@
#!/usr/bin/env python3
"""
创建完整的成绩元数据文件
grade.json / final_grade.json / llm_grade.json 生成 metadata.json
包含所有详细信息未通过的测试各题详情等
"""
import json
import os
import sys
import re
from datetime import datetime
def extract_student_id():
"""从环境变量或仓库名中提取学生 ID"""
# 优先从环境变量获取
student_id = os.getenv("STUDENT_ID")
if student_id:
return student_id
# 从仓库名提取
# 支持格式org/assignment-stu_xxx 或 org/assignment-stu-xxx
repo = os.getenv("REPO", "")
if repo:
# 匹配 xxx-stu_yyy 或 xxx-stu-yyy 格式
match = re.search(r'-stu[_-]([a-zA-Z0-9_]+)$', repo)
if match:
return match.group(1)
# 也尝试匹配 stu_xxx 在路径中的情况
match = re.search(r'stu[_-]([a-zA-Z0-9_]+)', repo)
if match:
return match.group(1)
return None
def extract_assignment_id():
"""从环境变量或仓库名中提取作业 ID"""
# 优先从环境变量获取
assignment_id = os.getenv("ASSIGNMENT_ID")
if assignment_id:
return assignment_id
# 从仓库名提取
# 支持格式org/assignment-stu_xxx 或 org/assignment-template
repo = os.getenv("REPO", "")
if repo:
# 取仓库名部分(去掉组织)
repo_name = repo.split("/")[-1] if "/" in repo else repo
# 移除 -stu_xxx 或 -template 后缀
assignment = re.sub(r'-stu[_-][a-zA-Z0-9_]+$', '', repo_name)
assignment = re.sub(r'-template$', '', assignment)
if assignment:
return assignment
return "unknown"
def create_final_metadata(final_grade_file='final_grade.json'):
"""从 final_grade.json 创建元数据(期末大作业专用)"""
try:
with open(final_grade_file, 'r', encoding='utf-8') as f:
final_data = json.load(f)
assignment_id = extract_assignment_id()
student_id = extract_student_id()
total_score = final_data.get("total_score", 0)
max_score = final_data.get("max_score", 100)
breakdown = final_data.get("breakdown", {})
# 构建各组成部分
components = []
# 编程测试部分
prog = breakdown.get("programming", {})
if prog:
prog_component = {
"type": "programming_java",
"score": prog.get("score", 0),
"max_score": prog.get("max_score", 80),
"details": {
"groups": prog.get("groups", {})
}
}
components.append(prog_component)
# REPORT.md 部分
report = breakdown.get("report", {})
if report:
report_component = {
"type": "llm_report",
"score": report.get("score", 0),
"max_score": report.get("max_score", 10),
"details": {
"flags": report.get("flags", []),
"confidence": report.get("confidence"),
"criteria": report.get("criteria", []) # LLM 各评分项及理由
}
}
components.append(report_component)
# FRONTEND.md 部分
frontend = breakdown.get("frontend", {})
if frontend:
frontend_component = {
"type": "llm_frontend",
"score": frontend.get("score", 0),
"max_score": frontend.get("max_score", 10),
"details": {
"flags": frontend.get("flags", []),
"confidence": frontend.get("confidence"),
"criteria": frontend.get("criteria", []) # LLM 各评分项及理由
}
}
components.append(frontend_component)
metadata = {
"version": "1.0",
"assignment": assignment_id,
"student_id": student_id,
"components": components,
"total_score": round(total_score, 2),
"total_max_score": max_score,
"timestamp": datetime.now().isoformat(),
"generator": "gitea-autograde"
}
return metadata
except Exception as e:
print(f"Error creating final metadata: {e}", file=sys.stderr)
return {}
def create_grade_metadata(grade_file='grade.json'):
"""从 grade.json 创建元数据,包含所有详细信息"""
try:
with open(grade_file, 'r') as f:
grade_data = json.load(f)
assignment_id = extract_assignment_id()
student_id = extract_student_id()
language = os.getenv("LANGUAGE", "java")
# 提取所有相关信息
final_score = grade_data.get("final_score", grade_data.get("total_score", grade_data.get("score", 0)))
base_score = grade_data.get("base_score", final_score)
penalty = grade_data.get("penalty", 0)
passed = grade_data.get("passed", 0)
total = grade_data.get("total", 0)
fails = grade_data.get("fails", [])
max_score = grade_data.get("max_score", 100)
test_framework = grade_data.get("test_framework", "junit")
coverage = grade_data.get("coverage")
raw_score = grade_data.get("raw_score")
groups = grade_data.get("groups", {})
# 动态生成 type 字段
type_map = {
"python": "programming_python",
"java": "programming_java",
"r": "programming_r"
}
component_type = type_map.get(language, f"programming_{language}")
component = {
"type": component_type,
"language": language,
"score": round(final_score, 2),
"max_score": max_score,
"details": {
"passed": passed,
"total": total,
"base_score": round(base_score, 2),
"penalty": round(penalty, 2),
"coverage": round(coverage, 2) if coverage else None,
"raw_score": round(raw_score, 2) if raw_score else None,
"failed_tests": fails,
"test_framework": test_framework,
"groups": groups
}
}
metadata = {
"version": "1.0",
"assignment": assignment_id,
"student_id": student_id,
"components": [component],
"total_score": round(final_score, 2),
"total_max_score": max_score,
"timestamp": datetime.now().isoformat(),
"generator": "gitea-autograde"
}
return metadata
except Exception as e:
print(f"Error creating grade metadata: {e}", file=sys.stderr)
return {}
def create_llm_metadata(llm_grade_file='artifacts/llm_grade.json'):
"""从 llm_grade.json 创建元数据,包含所有详细信息"""
try:
with open(llm_grade_file, 'r') as f:
llm_data = json.load(f)
assignment_id = extract_assignment_id()
student_id = extract_student_id()
# 提取聚合后的信息
total_score = llm_data.get("total_score", llm_data.get("total", 0))
max_score = llm_data.get("max_score", 30)
need_review = llm_data.get("need_review", False)
questions_data = llm_data.get("details", llm_data.get("questions", []))
# 构建各题详情
question_details = []
for i, q_data in enumerate(questions_data, 1):
q_score = q_data.get("total", q_data.get("score", 0))
q_max = q_data.get("max_score", 10)
q_confidence = q_data.get("confidence", 1.0)
q_flags = q_data.get("flags", [])
q_need_review = "need_review" in q_flags or q_data.get("need_review", False)
q_criteria = q_data.get("criteria", [])
# 规范化 criteria 格式
formatted_criteria = []
for crit in q_criteria:
formatted_criteria.append({
"id": crit.get("id", ""),
"score": round(float(crit.get("score", 0)), 2),
"reason": crit.get("reason", "")
})
question_detail = {
"question_id": f"SA{i}",
"question_name": q_data.get("question", f"SA{i}"),
"score": round(float(q_score), 2),
"max_score": q_max,
"confidence": round(float(q_confidence), 2),
"need_review": q_need_review,
"flags": q_flags,
"criteria": formatted_criteria
}
question_details.append(question_detail)
component = {
"type": "llm_essay",
"score": round(float(total_score), 2),
"max_score": max_score,
"details": {
"questions": len(question_details),
"need_review": need_review,
"question_details": question_details
}
}
metadata = {
"version": "1.0",
"assignment": assignment_id,
"student_id": student_id,
"components": [component],
"total_score": round(float(total_score), 2),
"total_max_score": max_score,
"timestamp": datetime.now().isoformat(),
"generator": "gitea-autograde"
}
return metadata
except Exception as e:
print(f"Error creating LLM metadata: {e}", file=sys.stderr)
return {}
def create_objective_metadata(objective_file='objective_grade.json'):
"""从 objective_grade.json 创建元数据"""
try:
with open(objective_file, 'r', encoding='utf-8') as f:
objective_data = json.load(f)
assignment_id = extract_assignment_id()
student_id = extract_student_id()
total_score = objective_data.get("score", 0)
max_score = objective_data.get("max_score", 0)
components = objective_data.get("components", [])
formatted_components = []
for comp in components:
comp_type = comp.get("type", "objective")
formatted_components.append({
"type": f"objective_{comp_type}",
"score": comp.get("score", 0),
"max_score": comp.get("max_score", 0),
"details": comp.get("details", {})
})
if not formatted_components:
formatted_components.append({
"type": "objective_total",
"score": total_score,
"max_score": max_score,
"details": {}
})
metadata = {
"version": "1.0",
"assignment": assignment_id,
"student_id": student_id,
"components": formatted_components,
"total_score": total_score,
"total_max_score": max_score,
"timestamp": datetime.now().isoformat(),
"generator": "gitea-autograde"
}
return metadata
except Exception as e:
print(f"Error creating objective metadata: {e}", file=sys.stderr)
return {}
def main():
"""主函数"""
# 检查命令行参数或环境变量
grade_type = os.getenv("GRADE_TYPE", "programming").lower()
grade_file_override = os.getenv("GRADE_FILE")
if grade_type == "final":
# 期末大作业成绩(包含编程+报告)
final_file = grade_file_override or "final_grade.json"
if os.path.exists(final_file):
metadata = create_final_metadata(final_file)
else:
print(f"Error: {final_file} not found", file=sys.stderr)
metadata = {}
elif grade_type == "llm":
# LLM 成绩
llm_file = grade_file_override or "artifacts/llm_grade.json"
if os.path.exists(llm_file):
metadata = create_llm_metadata(llm_file)
elif os.path.exists("llm_grade.json"):
metadata = create_llm_metadata("llm_grade.json")
else:
print(f"Error: {llm_file} not found", file=sys.stderr)
metadata = {}
elif grade_type == "objective":
objective_file = grade_file_override or "objective_grade.json"
if os.path.exists(objective_file):
metadata = create_objective_metadata(objective_file)
else:
print(f"Error: {objective_file} not found", file=sys.stderr)
metadata = {}
else:
# 编程成绩
grade_file = grade_file_override or "grade.json"
if os.path.exists(grade_file):
metadata = create_grade_metadata(grade_file)
else:
print(f"Error: {grade_file} not found", file=sys.stderr)
metadata = {}
# 输出到 stdout
print(json.dumps(metadata, ensure_ascii=False, indent=2))
if __name__ == "__main__":
main()

View File

@ -1,187 +0,0 @@
#!/usr/bin/env python3
"""
编程题评分脚本
解析 JUnit XML 报告计算分数考虑迟交扣分生成 grade.json summary.md
"""
import argparse
import xml.etree.ElementTree as ET
import json
import subprocess
import os
import time
import sys
from pathlib import Path
from dotenv import load_dotenv
# 加载环境变量(支持从 .env 文件或环境变量读取)
load_dotenv()
def commit_ts():
"""获取最后一次提交的时间戳Unix 时间戳)"""
try:
out = subprocess.check_output(
["git", "log", "-1", "--format=%ct"],
stderr=subprocess.DEVNULL
).decode().strip()
return int(out)
except Exception:
return int(time.time())
def parse_junit(junit_path):
"""
解析 JUnit XML 报告
Returns
-------
passed : int
通过的测试数
total : int
总测试数
fails : list
失败的测试名称列表
"""
if not os.path.exists(junit_path):
return (0, 0, [])
try:
root = ET.parse(junit_path).getroot()
total = 0
passed = 0
fails = []
for testsuite in root.iter("testsuite"):
for testcase in testsuite.iter("testcase"):
total += 1
# 检查是否有 failure、error 或 skipped 子元素
if list(testcase):
classname = testcase.get("classname", "")
name = testcase.get("name", "")
full_name = f"{classname}.{name}" if classname else name
fails.append(full_name)
else:
passed += 1
return (passed, total, fails)
except Exception as e:
print(f"Error parsing JUnit XML: {e}", file=sys.stderr)
return (0, 0, [])
def calculate_late_penalty(deadline_str):
"""
计算迟交扣分
Parameters
----------
deadline_str : str
ISO 格式的截止时间 "2025-03-15T23:59:59+08:00"
Returns
-------
penalty : float
扣分数0-30
"""
if not deadline_str:
return 0.0
try:
# 解析截止时间(支持多种格式)
deadline_str = deadline_str.strip()
# 移除时区信息(简化处理)
if '+' in deadline_str:
deadline_str = deadline_str.split('+')[0]
elif 'Z' in deadline_str:
deadline_str = deadline_str.replace('Z', '')
# 解析时间
if 'T' in deadline_str:
dl = time.mktime(time.strptime(deadline_str[:19], "%Y-%m-%dT%H:%M:%S"))
else:
dl = time.mktime(time.strptime(deadline_str[:19], "%Y-%m-%d %H:%M:%S"))
commit_time = commit_ts()
late_sec = max(0, commit_time - dl)
days = late_sec / 86400
# 扣分规则:第一天 10 分,之后每天 5 分,最多 30 分
if days > 0:
penalty = min(30.0, 10.0 + 5.0 * days)
else:
penalty = 0.0
return round(penalty, 2)
except Exception as e:
print(f"Error calculating late penalty: {e}", file=sys.stderr)
return 0.0
def main():
parser = argparse.ArgumentParser(description="Grade programming assignments from JUnit XML")
parser.add_argument("--junit", required=True, help="Path to JUnit XML file")
parser.add_argument("--out", default="grade.json", help="Output JSON file")
parser.add_argument("--summary", default="summary.md", help="Output summary markdown file")
parser.add_argument("--bonus", default=None, help="Optional bonus file (e.g., lintr.rds)")
args = parser.parse_args()
# 解析 JUnit XML
passed, total, fails = parse_junit(args.junit)
# 计算基础分数
if total > 0:
base_score = 100.0 * (passed / total)
else:
base_score = 0.0
# 计算迟交扣分
deadline = os.getenv("DEADLINE", "")
penalty = calculate_late_penalty(deadline)
# 最终分数
final_score = max(0.0, round(base_score - penalty, 2))
# 生成 grade.json
grade_data = {
"score": final_score,
"base_score": round(base_score, 2),
"penalty": penalty,
"passed": passed,
"total": total,
"fails": fails,
"timestamp": int(time.time())
}
with open(args.out, "w", encoding="utf-8") as f:
json.dump(grade_data, f, ensure_ascii=False, indent=2)
# 生成 summary.md
with open(args.summary, "w", encoding="utf-8") as f:
f.write("# 成绩报告\n\n")
f.write(f"- **通过用例**{passed}/{total}\n")
f.write(f"- **原始分**{base_score:.2f}/100\n")
if penalty > 0:
f.write(f"- **迟交扣分**-{penalty:.2f}\n")
f.write(f"- **最终分****{final_score:.2f}/100**\n\n")
if fails:
f.write("## 未通过的测试\n\n")
for fail in fails:
f.write(f"- {fail}\n")
f.write("\n")
if deadline:
f.write(f"## 截止时间\n\n")
f.write(f"- 截止时间:{deadline}\n")
commit_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(commit_ts()))
f.write(f"- 提交时间:{commit_time_str}\n")
print(f"Grading complete: {final_score:.2f}/100 ({passed}/{total} tests passed)")
if __name__ == "__main__":
main()

View File

@ -1,249 +0,0 @@
#!/usr/bin/env python3
"""
LLM 简答题评分脚本
调用 LLM API按评分量表对简答题进行评分输出 JSON 格式结果
"""
import os
import json
import argparse
import requests
import sys
from pathlib import Path
from dotenv import load_dotenv
# 加载环境变量(支持从 .env 文件或环境变量读取)
load_dotenv()
def read_file(path):
"""读取文件内容"""
if os.path.exists(path):
return open(path, 'r', encoding='utf-8').read()
return ""
def read_file_or_string(value):
"""
如果 value 是一个存在的文件路径读取文件内容
否则直接返回 value 作为字符串
"""
if os.path.exists(value):
return open(value, 'r', encoding='utf-8').read()
return value # 当作字符串直接返回
PROMPT_TEMPLATE = """你是严格且一致的助教,按提供的评分量表为学生的简答题评分。
评分规则
- 严格依据量表中各评分项的 scoring_guide 进行评分
- 每个评分项只能给出 scoring_guide 中定义的整数分值 0, 1, 2, 3, 4
- 不输出任何解释性文本只输出 JSON
输出格式
{{
"total": number (各项分数之和),
"criteria": [
{{"id": "评分项id", "score": 整数(必须是scoring_guide中定义的分值), "reason": "简短评语"}},
...
],
"flags": [],
"confidence": number(0-1, 评分置信度)
}}
重要
- score 必须是整数只能是 scoring_guide 中定义的分值 0/1/2/3/4
- 不要给出 2.5, 3.5 这样的中间值
- total 必须等于所有 criteria score 之和
- 如果答案与题目无关或为空total=0并加 flag "need_review"
题目
<<<{question}>>>
评分量表
<<<{rubric}>>>
学生答案
<<<{answer}>>>
"""
def call_llm(url, key, model, prompt):
"""
调用 LLM API
Parameters
----------
url : str
API 地址
key : str
API 密钥
model : str
模型名称
prompt : str
提示词
Returns
-------
dict
LLM 返回的 JSON 结果
"""
headers = {
"Authorization": f"Bearer {key}",
"Content-Type": "application/json"
}
data = {
"model": model,
"temperature": 0,
"top_p": 1,
"messages": [{"role": "user", "content": prompt}],
"response_format": {"type": "json_object"}
}
try:
# 设置超时:连接超时 10 秒,读取超时 60 秒
response = requests.post(
url,
headers=headers,
json=data,
timeout=(10, 60)
)
response.raise_for_status()
result = response.json()
content = result.get("choices", [{}])[0].get("message", {}).get("content", "{}")
return json.loads(content)
except requests.exceptions.Timeout as e:
print(f"LLM API request timeout: {e}", file=sys.stderr)
raise
except requests.exceptions.HTTPError as e:
print(f"LLM API HTTP error: {e} (status: {response.status_code})", file=sys.stderr)
raise
except requests.exceptions.RequestException as e:
print(f"LLM API request failed: {e}", file=sys.stderr)
raise
except json.JSONDecodeError as e:
print(f"Failed to parse LLM response as JSON: {e}", file=sys.stderr)
raise
def main():
parser = argparse.ArgumentParser(description="Grade short answer questions using LLM")
parser.add_argument("--question", required=True, help="Path to question file")
parser.add_argument("--answer", required=True, help="Path to answer file")
parser.add_argument("--rubric", required=True, help="Path to rubric JSON file")
parser.add_argument("--out", default="grade.json", help="Output JSON file")
parser.add_argument("--summary", default="summary.md", help="Output summary markdown file")
parser.add_argument("--model", default=os.getenv("LLM_MODEL", "deepseek-chat"))
parser.add_argument("--api_url", default=os.getenv("LLM_API_URL", "https://api.deepseek.com/chat/completions"))
parser.add_argument("--api_key", default=os.getenv("LLM_API_KEY", ""))
args = parser.parse_args()
# 验证必需的配置
if not args.api_key:
print("Warning: LLM_API_KEY not set. LLM grading may fail.", file=sys.stderr)
# 读取文件或字符串
# question 可以是文件路径或直接的问题字符串
question = read_file_or_string(args.question).strip()
# answer 和 rubric 必须是文件路径
answer = read_file(args.answer).strip()
rubric_text = read_file(args.rubric).strip()
if not question or not answer:
print(f"Warning: Empty question or answer file", file=sys.stderr)
resp = {
"total": 0,
"criteria": [],
"flags": ["need_review", "empty_answer"],
"confidence": 0.0
}
else:
# 调用 LLM
try:
prompt = PROMPT_TEMPLATE.format(
question=question,
rubric=rubric_text,
answer=answer
)
resp = call_llm(args.api_url, args.api_key, args.model, prompt)
except Exception as e:
print(f"LLM grading failed: {e}", file=sys.stderr)
resp = {
"total": 0,
"criteria": [],
"flags": ["need_review", "llm_error"],
"confidence": 0.0
}
# 确保各项分数是整数,并重新计算 total
criteria = resp.get("criteria", [])
if criteria:
for c in criteria:
# 将分数四舍五入为整数LLM 可能返回小数)
score = c.get("score", 0)
c["score"] = round(float(score))
# 重新计算 total各项得分之和
calculated_total = sum(c.get("score", 0) for c in criteria)
resp["total"] = calculated_total
# 边界带自动送审
try:
rubric_data = json.loads(rubric_text)
lo, hi = rubric_data.get("borderline_band", [None, None])
total = float(resp.get("total", 0))
flags = set(resp.get("flags", []))
if lo is not None and hi is not None and lo <= total <= hi:
flags.add("need_review")
# 低置信度送审
confidence = resp.get("confidence", 1.0)
if confidence < 0.7:
flags.add("need_review")
resp["flags"] = sorted(list(flags))
except Exception:
pass
# 保存 grade.json
with open(args.out, "w", encoding="utf-8") as f:
json.dump(resp, f, ensure_ascii=False, indent=2)
# 生成 summary.md
try:
rubric_data = json.loads(rubric_text)
max_score = rubric_data.get("max_score", 10)
except Exception:
max_score = 10
lines = [
f"# 简答题评分",
f"",
f"- **总分****{resp.get('total', 0):.2f} / {max_score}**",
f"- **置信度**{resp.get('confidence', 0):.2f}",
f"- **标记**{', '.join(resp.get('flags', [])) or ''}",
f"",
f"## 分项评分"
]
for criterion in resp.get("criteria", []):
criterion_id = criterion.get("id", "")
score = criterion.get("score", 0)
reason = criterion.get("reason", "")
lines.append(f"- **{criterion_id}**: {score}")
if reason:
lines.append(f" - {reason}")
with open(args.summary, "w", encoding="utf-8") as f:
f.write("\n".join(lines))
print(f"LLM grading complete: {resp.get('total', 0):.2f}/{max_score}")
if __name__ == "__main__":
main()

View File

@ -1,444 +0,0 @@
#!/usr/bin/env python3
"""
选择题/判断题评分脚本
读取学生答案和标准答案生成成绩 JSON 文件
"""
import json
import argparse
import sys
from datetime import datetime
from pathlib import Path
def load_answers(answer_file):
"""
加载学生答案文件支持 JSON 和简单文本格式
JSON 格式示例
{
"MC1": "A",
"MC2": "B",
"TF1": true,
"TF2": false
}
文本格式示例每行一个答案
A
B
true
false
"""
try:
with open(answer_file, 'r', encoding='utf-8') as f:
content = f.read().strip()
# 尝试作为 JSON 加载
if content.startswith('{'):
return json.loads(content)
# 否则按行加载,忽略空行和注释
lines = [line.strip() for line in content.split('\n') if line.strip() and not line.strip().startswith('#')]
# 转换为字典格式:{"MC1": answer, "MC2": answer, ...}
answers = {}
for i, line in enumerate(lines, 1):
# 尝试识别题型
if line.lower() in ('true', 'false', 't', 'f'):
question_id = f"TF{len([k for k in answers if k.startswith('TF')])+1}"
answers[question_id] = line.lower() in ('true', 't')
else:
question_id = f"MC{len([k for k in answers if k.startswith('MC')])+1}"
answers[question_id] = line.upper()
return answers
except Exception as e:
print(f"Error loading answers: {e}", file=sys.stderr)
return {}
def load_standard_answers(std_file):
"""加载标准答案文件JSON 格式)"""
try:
with open(std_file, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
print(f"Error loading standard answers: {e}", file=sys.stderr)
return {}
def grade_multiple_choice(student_answers, standard_answers, question_texts=None):
"""
评选择题
Parameters
----------
student_answers : dict
学生答案格式 {"MC1": "A", "MC2": "B", ...}
standard_answers : dict
标准答案格式 {"MC1": "A", "MC2": "B", ...}
question_texts : dict, optional
题目文本格式 {"MC1": "题目文本", ...}
Returns
-------
dict
成绩数据
"""
questions = []
correct_count = 0
for question_id, std_answer in standard_answers.items():
if not question_id.startswith('MC'):
continue
student_answer = student_answers.get(question_id, "")
is_correct = str(student_answer).upper() == str(std_answer).upper()
if is_correct:
correct_count += 1
score = 1
else:
score = 0
questions.append({
"question_id": question_id,
"question_text": question_texts.get(question_id, "") if question_texts else "",
"correct_answer": str(std_answer).upper(),
"student_answer": str(student_answer).upper(),
"correct": is_correct,
"score": score,
"max_score": 1
})
total_count = len(questions)
return {
"type": "multiple_choice",
"score": correct_count,
"max_score": total_count,
"details": {
"correct": correct_count,
"total": total_count,
"questions": questions
}
}
def grade_true_false(student_answers, standard_answers, question_texts=None):
"""
评判断题
Parameters
----------
student_answers : dict
学生答案格式 {"TF1": true, "TF2": false, ...}
standard_answers : dict
标准答案格式 {"TF1": true, "TF2": false, ...}
question_texts : dict, optional
题目文本
Returns
-------
dict
成绩数据
"""
questions = []
correct_count = 0
for question_id, std_answer in standard_answers.items():
if not question_id.startswith('TF'):
continue
student_answer = student_answers.get(question_id, None)
# 规范化布尔值
if isinstance(student_answer, str):
student_answer = student_answer.lower() in ('true', 't', '1', 'yes')
is_correct = bool(student_answer) == bool(std_answer)
if is_correct:
correct_count += 1
score = 1
else:
score = 0
questions.append({
"question_id": question_id,
"question_text": question_texts.get(question_id, "") if question_texts else "",
"correct_answer": bool(std_answer),
"student_answer": bool(student_answer) if student_answer is not None else None,
"correct": is_correct,
"score": score,
"max_score": 1
})
total_count = len(questions)
return {
"type": "true_false",
"score": correct_count,
"max_score": total_count,
"details": {
"correct": correct_count,
"total": total_count,
"questions": questions
}
}
def grade_multiple_select(student_answers, standard_answers, question_texts=None):
"""
评多选题
Parameters
----------
student_answers : dict
学生答案格式 {"MS1": ["A", "B"], "MS2": ["C"], ...}
standard_answers : dict
标准答案格式 {"MS1": ["A", "B"], "MS2": ["C"], ...}
question_texts : dict, optional
题目文本
Returns
-------
dict
成绩数据
"""
questions = []
correct_count = 0
for question_id, std_answer in standard_answers.items():
if not question_id.startswith('MS'):
continue
student_answer = student_answers.get(question_id, [])
# 规范化答案(转为大写并排序)
if isinstance(student_answer, str):
student_answer = [student_answer]
if not isinstance(student_answer, list):
student_answer = []
std_set = set([str(a).upper() for a in std_answer])
stu_set = set([str(a).upper() for a in student_answer])
is_correct = std_set == stu_set
if is_correct:
correct_count += 1
score = 1
else:
score = 0
questions.append({
"question_id": question_id,
"question_text": question_texts.get(question_id, "") if question_texts else "",
"correct_answer": sorted(list(std_set)),
"student_answer": sorted(list(stu_set)) if stu_set else [],
"correct": is_correct,
"score": score,
"max_score": 1
})
total_count = len(questions)
return {
"type": "multiple_select",
"score": correct_count,
"max_score": total_count,
"details": {
"correct": correct_count,
"total": total_count,
"questions": questions
}
}
def grade_fill_blank(student_answers, standard_answers, question_texts=None):
"""
评填空题
Parameters
----------
student_answers : dict
学生答案格式 {"FB1": "答案", "FB2": ["答案1", "答案2"], ...}
standard_answers : dict
标准答案格式同上
question_texts : dict, optional
题目文本
Returns
-------
dict
成绩数据
"""
questions = []
correct_count = 0
def normalize_answer(ans):
"""规范化答案:去除空格、转小写"""
if isinstance(ans, str):
return ans.strip().lower()
elif isinstance(ans, list):
return [a.strip().lower() for a in ans]
return ans
def compare_answers(student, standard):
"""比较答案是否相等"""
student_norm = normalize_answer(student)
standard_norm = normalize_answer(standard)
if isinstance(standard_norm, list) and isinstance(student_norm, list):
return student_norm == standard_norm
elif isinstance(standard_norm, str) and isinstance(student_norm, str):
return student_norm == standard_norm
return False
for question_id, std_answer in standard_answers.items():
if not question_id.startswith('FB'):
continue
student_answer = student_answers.get(question_id, "")
is_correct = compare_answers(student_answer, std_answer)
if is_correct:
correct_count += 1
score = 1
else:
score = 0
questions.append({
"question_id": question_id,
"question_text": question_texts.get(question_id, "") if question_texts else "",
"correct_answer": std_answer,
"student_answer": student_answer,
"correct": is_correct,
"score": score,
"max_score": 1
})
total_count = len(questions)
return {
"type": "fill_blank",
"score": correct_count,
"max_score": total_count,
"details": {
"correct": correct_count,
"total": total_count,
"questions": questions
}
}
def main():
parser = argparse.ArgumentParser(description="Grade objective questions")
parser.add_argument("--answers", required=True, help="Student answers file (JSON or text)")
parser.add_argument("--standard", required=True, help="Standard answers file (JSON)")
parser.add_argument("--questions", help="Question texts file (JSON, optional)")
parser.add_argument("--out", default="grade.json", help="Output grade JSON file")
parser.add_argument("--summary", default="summary.md", help="Output summary markdown file")
parser.add_argument("--type", choices=['mc', 'tf', 'ms', 'fb', 'all'], default='all',
help="Question type to grade")
args = parser.parse_args()
# 加载文件
student_answers = load_answers(args.answers)
standard_answers = load_standard_answers(args.standard)
question_texts = None
if args.questions:
try:
with open(args.questions, 'r', encoding='utf-8') as f:
question_texts = json.load(f)
except Exception as e:
print(f"Warning: Could not load question texts: {e}", file=sys.stderr)
if not student_answers or not standard_answers:
print("Error: Could not load answers", file=sys.stderr)
sys.exit(1)
# 评分
components = []
total_score = 0
total_max_score = 0
if args.type in ('mc', 'all'):
mc_grade = grade_multiple_choice(student_answers, standard_answers, question_texts)
if mc_grade['details']['total'] > 0:
components.append(mc_grade)
total_score += mc_grade['score']
total_max_score += mc_grade['max_score']
if args.type in ('tf', 'all'):
tf_grade = grade_true_false(student_answers, standard_answers, question_texts)
if tf_grade['details']['total'] > 0:
components.append(tf_grade)
total_score += tf_grade['score']
total_max_score += tf_grade['max_score']
if args.type in ('ms', 'all'):
ms_grade = grade_multiple_select(student_answers, standard_answers, question_texts)
if ms_grade['details']['total'] > 0:
components.append(ms_grade)
total_score += ms_grade['score']
total_max_score += ms_grade['max_score']
if args.type in ('fb', 'all'):
fb_grade = grade_fill_blank(student_answers, standard_answers, question_texts)
if fb_grade['details']['total'] > 0:
components.append(fb_grade)
total_score += fb_grade['score']
total_max_score += fb_grade['max_score']
# 生成 grade.json
grade_data = {
"score": total_score,
"max_score": total_max_score,
"components": components,
"timestamp": int(__import__('time').time())
}
with open(args.out, 'w', encoding='utf-8') as f:
json.dump(grade_data, f, ensure_ascii=False, indent=2)
# 生成 summary.md
summary_lines = [
"# 客观题评分\n",
f"- **总分**{total_score} / {total_max_score}\n",
f"- **组件数**{len(components)}\n",
""
]
for comp in components:
comp_type = comp['type']
correct = comp['details']['correct']
total = comp['details']['total']
type_names = {
'multiple_choice': '选择题',
'true_false': '判断题',
'multiple_select': '多选题',
'fill_blank': '填空题'
}
type_name = type_names.get(comp_type, comp_type)
summary_lines.append(f"## {type_name}\n")
summary_lines.append(f"- **正确**{correct} / {total}\n")
summary_lines.append("")
with open(args.summary, 'w', encoding='utf-8') as f:
f.write("\n".join(summary_lines))
print(f"Grading complete: {total_score}/{total_max_score}")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,155 +0,0 @@
#!/usr/bin/env python3
"""
发送评论到 Gitea PR
从环境变量读取配置发送评论到指定的 PR
支持在 Markdown 评论中嵌入 JSON 数据便于后续结构化提取
"""
import os
import sys
import json
import requests
from datetime import datetime
def create_comment_with_metadata(summary, commit_sha, comment_type='grade', metadata=None):
"""
创建包含元数据的评论内容
Parameters
----------
summary : str
人类可读的 Markdown 格式总结
commit_sha : str
提交 SHA
comment_type : str
评论类型 ('grade', 'llm', 'combined')
metadata : dict, optional
结构化的成绩数据将嵌入为 JSON
Returns
-------
str
完整的评论内容Markdown + JSON
"""
commit_short = commit_sha[:7] if commit_sha else 'unknown'
# 根据类型设置标题和图标
if comment_type == 'llm':
title = "🤖 LLM 简答题评分结果"
footer = "*此评论由 Gitea Actions 自动生成(使用 DeepSeek API | Commit: `{}`*"
elif comment_type == 'combined':
title = "📊 综合评分结果"
footer = "*此评论由 Gitea Actions 自动生成 | Commit: `{}`*"
else:
title = "🤖 自动评分结果"
footer = "*此评论由 Gitea Actions 自动生成 | Commit: `{}`*"
# 构建评论
parts = [
f"## {title}",
"",
summary,
""
]
# 如果提供了元数据,嵌入 JSON
if metadata:
# 确保元数据包含版本和时间戳
if 'version' not in metadata:
metadata['version'] = '1.0'
if 'timestamp' not in metadata:
metadata['timestamp'] = datetime.now().isoformat()
# 使用 Markdown 代码块嵌入 JSON更可靠Gitea 会保留)
# 放在评论末尾,对学生不太显眼
json_str = json.dumps(metadata, ensure_ascii=False, indent=2)
parts.extend([
"",
"---",
"",
"<!-- GRADE_METADATA -->",
"```json",
json_str,
"```",
""
])
parts.extend([
footer.format(commit_short)
])
return "\n".join(parts)
def main():
# 从环境变量读取配置
api_url = os.environ.get('API_URL', '')
repo = os.environ.get('REPO', '')
pr_number = os.environ.get('PR_NUMBER', '')
token = os.environ.get('GITEA_TOKEN', '')
summary = os.environ.get('SUMMARY', '')
commit_sha = os.environ.get('COMMIT_SHA', '')
comment_type = os.environ.get('COMMENT_TYPE', 'grade')
# 可选:从环境变量读取 JSON 元数据
metadata_str = os.environ.get('GRADE_METADATA', '')
metadata = None
if metadata_str:
try:
metadata = json.loads(metadata_str)
except json.JSONDecodeError as e:
print(f"Warning: Failed to parse GRADE_METADATA: {e}", file=sys.stderr)
# 验证必需参数
if not all([api_url, repo, pr_number, token, summary]):
print("Error: Missing required environment variables", file=sys.stderr)
print(f"API_URL: {api_url}", file=sys.stderr)
print(f"REPO: {repo}", file=sys.stderr)
print(f"PR_NUMBER: {pr_number}", file=sys.stderr)
print(f"GITEA_TOKEN: {'set' if token else 'not set'}", file=sys.stderr)
print(f"SUMMARY: {'set' if summary else 'not set'}", file=sys.stderr)
sys.exit(1)
# 构建评论内容(包含元数据)
comment_body = create_comment_with_metadata(
summary=summary,
commit_sha=commit_sha,
comment_type=comment_type,
metadata=metadata
)
# 构建 API URL
comment_url = f"{api_url}/repos/{repo}/issues/{pr_number}/comments"
# 发送请求
headers = {
"Authorization": f"token {token}",
"Content-Type": "application/json"
}
data = {"body": comment_body}
try:
print(f"Posting comment to: {comment_url}")
if metadata:
print("✓ Comment includes structured metadata")
response = requests.post(comment_url, headers=headers, json=data, timeout=30)
response.raise_for_status()
print("✅ Comment posted successfully to PR")
return 0
except requests.exceptions.Timeout:
print("⚠️ Request timeout", file=sys.stderr)
return 1
except requests.exceptions.HTTPError as e:
print(f"⚠️ HTTP error: {e}", file=sys.stderr)
print(f"Response: {response.text}", file=sys.stderr)
return 1
except Exception as e:
print(f"⚠️ Failed to post comment: {e}", file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,164 +0,0 @@
#!/usr/bin/env python3
"""
通用测试运行器 - 根据语言配置运行测试并生成 JUnit XML
支持的语言:
- python: pytest
- java: maven (mvn test)
- r: testthat (通过 JUnit Reporter)
环境变量:
- LANGUAGE: 编程语言 (python/java/r)
- TEST_DIR: 测试目录路径
- SOURCE_DIR: 源代码目录路径
"""
import argparse
import subprocess
import sys
import os
from pathlib import Path
def run_python_tests(test_dir, output_xml, **kwargs):
"""运行 Python pytest 测试"""
cmd = [
"pytest", test_dir,
f"--junit-xml={output_xml}",
"-v", "--tb=short"
]
# 添加覆盖率选项(如果指定)
source_dir = kwargs.get('source_dir')
if source_dir:
cmd.extend([
f"--cov={source_dir}",
"--cov-report=term-missing",
"--cov-report=json:coverage.json"
])
print(f"Running: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=False)
return result
def run_java_tests(test_dir, output_xml, **kwargs):
"""运行 Java Maven 测试"""
cmd = ["mvn", "test", "-B"]
print(f"Running: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=False)
# Maven 自动生成 XML 在 target/surefire-reports/
# 需要复制到指定的输出位置
surefire_dir = Path("target/surefire-reports")
if surefire_dir.exists():
# 合并所有 TEST-*.xml 文件
import xml.etree.ElementTree as ET
xml_files = list(surefire_dir.glob("TEST-*.xml"))
if xml_files:
# 简单情况:只复制第一个(或合并)
import shutil
if len(xml_files) == 1:
shutil.copy(xml_files[0], output_xml)
else:
# 合并多个 XML 文件(简化版本)
root = ET.Element("testsuites")
for xml_file in xml_files:
tree = ET.parse(xml_file)
root.append(tree.getroot())
tree = ET.ElementTree(root)
tree.write(output_xml, encoding='utf-8', xml_declaration=True)
return result
def run_r_tests(test_dir, output_xml, **kwargs):
"""运行 R testthat 测试"""
# R 脚本:使用 testthat 的 JUnitReporter
# 注意:需要安装 testthat (>= 3.0.0)
r_script = f"""
library(testthat)
# 配置 JUnit reporter
reporter <- JunitReporter$new(file = '{output_xml}')
# 运行测试
test_dir(
path = '{test_dir}',
reporter = reporter,
stop_on_failure = FALSE
)
"""
# 将脚本写入临时文件
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.R', delete=False) as f:
f.write(r_script)
script_path = f.name
try:
cmd = ["Rscript", script_path]
print(f"Running: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=False)
return result
finally:
# 清理临时文件
if os.path.exists(script_path):
os.remove(script_path)
def main():
parser = argparse.ArgumentParser(
description="通用测试运行器 - 支持 Python/Java/R"
)
parser.add_argument(
"--language",
required=True,
choices=["python", "java", "r"],
help="编程语言"
)
parser.add_argument(
"--test-dir",
required=True,
help="测试目录路径"
)
parser.add_argument(
"--output-xml",
default="test-results.xml",
help="JUnit XML 输出文件路径"
)
parser.add_argument(
"--source-dir",
help="源代码目录(用于覆盖率)"
)
args = parser.parse_args()
# 语言对应的运行器
runners = {
"python": run_python_tests,
"java": run_java_tests,
"r": run_r_tests,
}
if args.language not in runners:
print(f"❌ Unsupported language: {args.language}", file=sys.stderr)
sys.exit(1)
# 运行测试
result = runners[args.language](
args.test_dir,
args.output_xml,
source_dir=args.source_dir
)
sys.exit(result.returncode)
if __name__ == "__main__":
main()

View File

@ -1,95 +0,0 @@
#!/bin/bash
# 测试客观题评分脚本
set -e
echo "=== 测试客观题评分脚本 ==="
cd "$(dirname "$0")/.."
# 测试 1: 使用 JSON 格式答案
echo ""
echo "测试 1: JSON 格式答案(全对)"
python3 ./.autograde/objective_grade.py \
--answers objective_questions/standard_answers.json \
--standard objective_questions/standard_answers.json \
--questions objective_questions/question_texts.json \
--out test_grade1.json \
--summary test_summary1.md \
--type both
echo "分数:"
python3 -c "import json; data=json.load(open('test_grade1.json')); print(f\"{data['score']}/{data['max_score']}\")"
echo ""
echo "摘要:"
cat test_summary1.md
# 测试 2: 使用部分错误的答案
echo ""
echo "测试 2: 部分错误答案"
cat > test_answers2.json << 'EOF'
{
"MC1": "A",
"MC2": "A",
"MC3": "C",
"MC4": "B",
"MC5": "C",
"TF1": true,
"TF2": false,
"TF3": true,
"TF4": true,
"TF5": false
}
EOF
python3 ./.autograde/objective_grade.py \
--answers test_answers2.json \
--standard objective_questions/standard_answers.json \
--questions objective_questions/question_texts.json \
--out test_grade2.json \
--summary test_summary2.md \
--type both
echo "分数:"
python3 -c "import json; data=json.load(open('test_grade2.json')); print(f\"{data['score']}/{data['max_score']}\")"
echo ""
echo "摘要:"
cat test_summary2.md
# 测试 3: 只评选择题
echo ""
echo "测试 3: 只评选择题"
python3 ./.autograde/objective_grade.py \
--answers objective_questions/standard_answers.json \
--standard objective_questions/standard_answers.json \
--questions objective_questions/question_texts.json \
--out test_grade3.json \
--summary test_summary3.md \
--type mc
echo "分数:"
python3 -c "import json; data=json.load(open('test_grade3.json')); print(f\"{data['score']}/{data['max_score']}\")"
# 测试 4: 只评判断题
echo ""
echo "测试 4: 只评判断题"
python3 ./.autograde/objective_grade.py \
--answers objective_questions/standard_answers.json \
--standard objective_questions/standard_answers.json \
--questions objective_questions/question_texts.json \
--out test_grade4.json \
--summary test_summary4.md \
--type tf
echo "分数:"
python3 -c "import json; data=json.load(open('test_grade4.json')); print(f\"{data['score']}/{data['max_score']}\")"
# 清理测试文件
rm -f test_grade*.json test_summary*.md test_answers*.json
echo ""
echo "✅ 所有测试通过!"

View File

@ -1,157 +0,0 @@
#!/usr/bin/env python3
"""
Upload metadata.json to teacher-only repository via Gitea API.
"""
import argparse
import base64
import json
import os
import sys
import urllib.error
import urllib.request
from pathlib import Path
from urllib.parse import urlparse
def detect_host(server_url: str, external_host: str | None) -> str:
"""Detect the Gitea host to use for API calls.
If server_url uses internal name (like 'gitea'), use external_host instead.
"""
parsed = urlparse(server_url)
raw_host = parsed.netloc or parsed.path.split("/")[0]
host = raw_host
if raw_host.lower().startswith("gitea"):
if not external_host:
raise ValueError(
f"Server URL uses internal name '{raw_host}' but EXTERNAL_GITEA_HOST is not set. "
"Please configure EXTERNAL_GITEA_HOST in .env and run sync_runner_config.sh"
)
host = external_host
return host
def main() -> int:
parser = argparse.ArgumentParser(description="Upload metadata.json to course metadata repo")
parser.add_argument("--metadata-file", required=True)
parser.add_argument("--metadata-repo", required=True, help="owner/repo of metadata store")
parser.add_argument("--branch", default="main")
parser.add_argument("--student-repo", required=True)
parser.add_argument("--run-id", required=True)
parser.add_argument("--commit-sha", required=True)
parser.add_argument("--workflow", required=True, choices=["grade", "objective", "llm"])
parser.add_argument("--server-url", required=True)
parser.add_argument("--external-host")
parser.add_argument("--assignment-id", help="Assignment ID (e.g., hw1)")
args = parser.parse_args()
token = os.environ.get("METADATA_TOKEN")
if not token:
print("METADATA_TOKEN is not set", file=sys.stderr)
return 1
path = Path(args.metadata_file)
if not path.is_file():
print(f"metadata file not found: {path}", file=sys.stderr)
return 0
try:
owner, repo_name = args.metadata_repo.split("/", 1)
except ValueError:
print(f"Invalid metadata repo: {args.metadata_repo}", file=sys.stderr)
return 1
# Extract student ID from student repo name
# student repo format: hw1-stu_20250001 or hw1-stu_student1
student_id = args.student_repo.split("/")[-1] # Get repo name
# Auto-detect assignment ID from student repo if not provided
assignment_id = args.assignment_id
if not assignment_id:
# Try to extract from student_repo format: hw1-stu_xxx
repo_name_part = args.student_repo.split("/")[-1]
if "-stu_" in repo_name_part:
assignment_id = repo_name_part.split("-stu_")[0]
elif "-template" in repo_name_part:
assignment_id = repo_name_part.split("-template")[0]
elif "-tests" in repo_name_part:
assignment_id = repo_name_part.split("-tests")[0]
else:
assignment_id = "unknown"
# New path structure: {assignment_id}/{student_id}/{workflow}_{run_id}_{sha}.json
target_path = f"{assignment_id}/{student_id}/{args.workflow}_{args.run_id}_{args.commit_sha[:7]}.json"
host = detect_host(args.server_url, args.external_host)
api_url = f"http://{host}/api/v1/repos/{owner}/{repo_name}/contents/{target_path}"
message = f"Upload {args.workflow} metadata for {args.student_repo} {args.commit_sha}"
# Check if file exists to determine if we need to update (PUT) or create (POST)
get_req = urllib.request.Request(
api_url,
headers={"Authorization": f"token {token}"},
method="GET"
)
sha = None
try:
with urllib.request.urlopen(get_req) as resp:
existing_file = json.loads(resp.read().decode())
# API may return a list (directory contents) or dict (single file)
if isinstance(existing_file, dict):
sha = existing_file.get("sha")
print(f"File exists, updating (sha: {sha})")
elif isinstance(existing_file, list):
# Response is a directory listing, file doesn't exist at this exact path
print(f"Path is a directory or file not found in expected format")
else:
print(f"Unexpected response type: {type(existing_file)}")
except urllib.error.HTTPError as e:
if e.code != 404:
print(f"Error checking file existence: {e}", file=sys.stderr)
return 1
# File doesn't exist, proceed with creation
content = base64.b64encode(path.read_bytes()).decode()
payload = {
"content": content,
"message": message,
"branch": args.branch
}
if sha:
payload["sha"] = sha
data = json.dumps(payload).encode()
req = urllib.request.Request(
api_url,
data=data,
headers={
"Authorization": f"token {token}",
"Content-Type": "application/json",
},
method="PUT" if sha else "POST",
)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
resp_body = resp.read().decode()
print(resp_body)
except urllib.error.HTTPError as exc:
print(f"Metadata upload failed: {exc.status} {exc.reason}", file=sys.stderr)
print(exc.read().decode(), file=sys.stderr)
return 1
except urllib.error.URLError as exc:
print(f"Metadata upload failed: {exc}", file=sys.stderr)
return 1
print(f"✅ Metadata stored at {args.metadata_repo}:{target_path}")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -1,216 +0,0 @@
# Workflow 模板
本目录包含不同编程语言的 Gitea Actions workflow 模板。
## 可用模板
| 文件 | 语言 | 容器 | 测试框架 |
|------|------|------|----------|
| `python.yml` | Python | python:3.11 | pytest |
| `java.yml` | Java | maven:3.9-eclipse-temurin-17 | JUnit 5 |
| `r.yml` | R | r-base:4.3 | testthat |
## 使用方法
### 1. 选择模板
根据你的编程语言选择对应的模板:
```bash
# 对于 Python 作业
cp .autograde/workflow_templates/python.yml .gitea/workflows/grade.yml
# 对于 Java 作业
cp .autograde/workflow_templates/java.yml .gitea/workflows/grade.yml
# 对于 R 作业
cp .autograde/workflow_templates/r.yml .gitea/workflows/grade.yml
```
### 2. 自定义配置
编辑 `.gitea/workflows/grade.yml` 根据需要修改:
- **容器版本**:修改 `container:` 字段
- **超时时间**:修改 `timeout-minutes:`
- **依赖安装**:修改 "Install dependencies" 步骤
- **测试命令**:修改测试运行步骤
### 3. 配置 Secrets
确保在 Gitea 仓库设置中配置了以下 Secrets
- `TESTS_TOKEN`:用于访问隐藏测试仓库的 token可选
- `EXTERNAL_GITEA_HOST`:外部访问的 Gitea 地址(可选)
## Python 模板 (python.yml)
### 特点
- 使用 `python:3.11` 容器
- 自动安装 `requirements.txt` 中的依赖
- 使用 `run_tests.py` 运行 pytest
- 支持代码覆盖率
### 自定义选项
```yaml
# 修改 Python 版本
container: python:3.10 # 或 python:3.9
# 添加额外的依赖
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install numpy pandas # 额外的包
```
## Java 模板 (java.yml)
### 特点
- 使用 `maven:3.9-eclipse-temurin-17` 容器
- Maven 自动管理依赖(通过 `pom.xml`
- JUnit 5 测试框架
- 自动提取 Surefire 报告
### 自定义选项
```yaml
# 修改 JDK 版本
container: maven:3.9-eclipse-temurin-11 # Java 11
container: maven:3.9-eclipse-temurin-21 # Java 21
# 自定义 Maven 命令
run: |
mvn clean test -B -DskipTests=false
```
### Maven 配置提示
确保 `pom.xml` 中配置了 Surefire 插件:
```xml
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>3.2.2</version>
<configuration>
<reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
</configuration>
</plugin>
```
## R 模板 (r.yml)
### 特点
- 使用 `r-base:4.3` 容器
- 自动从 `DESCRIPTION` 安装依赖
- testthat 测试框架
- JUnitReporter 输出 XML
### 自定义选项
```yaml
# 修改 R 版本
container: r-base:4.2 # 或其他版本
# 修改 CRAN 镜像
run: |
Rscript -e "install.packages('testthat', repos='https://cran.r-project.org/')"
```
### R 项目结构要求
```
project/
├── DESCRIPTION # 包依赖定义
├── R/ # R 源代码
└── tests/
└── testthat/ # testthat 测试
```
## 通用 Workflow 流程
所有模板都遵循相同的流程:
1. **安装系统依赖**git, rsync 等)
2. **检出代码** - 克隆学生仓库
3. **安装语言依赖** - 根据语言安装包
4. **获取隐藏测试**(可选)- 从私有仓库获取
5. **运行测试** - 生成 JUnit XML
6. **评分** - 解析 XML计算分数
7. **生成元数据** - 创建 JSON metadata
8. **发布评论** - 在 PR 中发布结果
## 高级配置
### 添加代码质量检查
```yaml
- name: Run linter
run: |
# Python: pylint, flake8
pip install pylint
pylint src/
# Java: checkstyle
mvn checkstyle:check
# R: lintr
Rscript -e "lintr::lint_package()"
```
### 自定义评分规则
修改 `grade.py` 的调用参数:
```yaml
- name: Grade
run: |
python3 ./.autograde/grade.py \
--junit junit.xml \
--out grade.json \
--summary summary.md \
--bonus bonus.json # 可选的加分项
```
### 多个测试套件
```yaml
- name: Run public tests
run: |
pytest tests_public/ --junit-xml=public.xml
- name: Run hidden tests
run: |
pytest tests_hidden/ --junit-xml=hidden.xml
- name: Merge test results
run: |
python3 ./.autograde/merge_junit.py public.xml hidden.xml -o junit.xml
```
## 故障排查
### 测试无法运行
- 检查测试目录路径是否正确
- 确认依赖是否正确安装
- 查看 Actions 日志中的错误信息
### JUnit XML 未生成
- Python: 确保 pytest 命令包含 `--junit-xml`
- Java: 检查 Surefire 插件配置
- R: 确认 testthat >= 3.0.0
### 元数据为空
- 检查 `grade.json` 是否生成
- 确认 `LANGUAGE` 环境变量设置正确
- 查看 `create_minimal_metadata.py` 的输出
## 相关文档
- [运行测试脚本](../run_tests.py) - 通用测试运行器
- [评分脚本](../grade.py) - JUnit XML 解析和评分
- [元数据生成](../create_minimal_metadata.py) - JSON 元数据
- [示例](../../examples/) - 各语言的完整示例
---
最后更新: 2025-11-13

View File

@ -1,200 +0,0 @@
name: autograde-java
on:
push:
branches:
- main
workflow_dispatch:
permissions:
contents: read
pull-requests: write
jobs:
java:
runs-on: docker
container:
image: gradle:9.0-jdk21
options: --user root
timeout-minutes: 20
steps:
- name: Install dependencies (CN mirror)
run: |
set -e
# 替换 Debian/Ubuntu 源为腾讯云镜像
for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do
[ -f "$f" ] || continue
sed -i -E 's|https?://deb.debian.org|http://mirrors.cloud.tencent.com|g' "$f" || true
sed -i -E 's|https?://security.debian.org|http://mirrors.cloud.tencent.com/debian-security|g' "$f" || true
sed -i -E 's|https?://archive.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
sed -i -E 's|https?://ports.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
done
apt-get -o Acquire::Check-Valid-Until=false -o Acquire::AllowInsecureRepositories=true update -y
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends git ca-certificates python3 python3-pip nodejs rsync
pip3 install --break-system-packages python-dotenv requests -i https://mirrors.cloud.tencent.com/pypi/simple
rm -rf /var/lib/apt/lists/*
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Fix permissions
run: |
# Ensure workspace is owned by current user
chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
- name: Fetch hidden tests (if available)
working-directory: ${{ github.workspace }}
env:
EXTERNAL_GITEA_HOST: ${{ secrets.EXTERNAL_GITEA_HOST }}
run: |
set -e
TESTS_USERNAME="${RUNNER_TESTS_USERNAME:-}"
TESTS_TOKEN="${RUNNER_TESTS_TOKEN:-}"
if [ -z "$TESTS_TOKEN" ] || [ -z "$TESTS_USERNAME" ]; then
echo "Warning: RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN not set, skipping private tests"
exit 0
fi
# Resolve Gitea Host
if [ -n "$EXTERNAL_GITEA_HOST" ]; then
HOST="$EXTERNAL_GITEA_HOST"
elif [ -n "$GITEA_ROOT_URL" ]; then
HOST=$(echo "$GITEA_ROOT_URL" | sed 's|https\?://||' | sed 's|/$||')
else
HOST=$(echo "${{ github.server_url }}" | sed 's|https\?://||' | cut -d'/' -f1)
fi
echo "📥 Fetching private tests repository..."
echo " Gitea host: $HOST"
# Infer organization and assignment ID from repository name
ORG=$(echo "${{ github.repository }}" | cut -d'/' -f1)
REPO_NAME=$(echo "${{ github.repository }}" | cut -d'/' -f2)
# Extract assignment ID from repo name (e.g., hw2-stu_xxx -> hw2, hw2-template -> hw2)
if echo "$REPO_NAME" | grep -q -- '-stu_'; then
ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-stu_.*//')
elif echo "$REPO_NAME" | grep -q -- '-template'; then
ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-template.*//')
else
ASSIGNMENT_ID="hw1" # fallback
fi
echo " Organization: $ORG"
echo " Assignment ID: $ASSIGNMENT_ID"
# Clone private test repository
AUTH_URL="http://${TESTS_USERNAME}:${TESTS_TOKEN}@${HOST}/${ORG}/${ASSIGNMENT_ID}-tests.git"
if ! git -c http.sslVerify=false clone --depth=1 "$AUTH_URL" _priv_tests 2>&1; then
echo "❌ Failed to clone ${ASSIGNMENT_ID}-tests repository!"
exit 1
fi
# Verify test directory exists
if [ ! -d "_priv_tests/java" ]; then
echo "❌ java/ directory not found in ${ASSIGNMENT_ID}-tests!"
exit 1
fi
if [ ! -d "_priv_tests/java/src/test" ]; then
echo "❌ java/src/test/ not found in ${ASSIGNMENT_ID}-tests!"
exit 1
fi
# Copy tests to src/test/
rsync -a _priv_tests/java/src/test/ src/test/
echo "✅ Private tests copied: _priv_tests/java/src/test/ → src/test/"
- name: Run tests using Gradle
working-directory: ${{ github.workspace }}
env:
LANGUAGE: java
run: |
gradle test --no-daemon || true
- name: Extract test results
working-directory: ${{ github.workspace }}
run: |
# Find JUnit XML report
XML_REPORT=$(find build/test-results/test -name "TEST-*.xml" | head -n 1)
if [ -n "$XML_REPORT" ]; then
cp "$XML_REPORT" junit.xml
echo "✅ Found JUnit report: $XML_REPORT"
else
echo "⚠️ No JUnit report found!"
touch junit.xml
fi
- name: Grade
working-directory: ${{ github.workspace }}
env:
LANGUAGE: java
run: |
python3 ./.autograde/grade.py --junit junit.xml --out grade.json --summary summary.md
- name: Prepare artifacts
working-directory: ${{ github.workspace }}
run: |
mkdir -p artifacts
cp junit.xml summary.md grade.json artifacts/ 2>/dev/null || true
- name: Create grade metadata
working-directory: ${{ github.workspace }}
env:
LANGUAGE: java
REPO: ${{ github.repository }}
run: |
if [ ! -f grade.json ]; then
echo "⚠️ grade.json not found, skipping metadata creation"
exit 0
fi
# 生成 JSON 元数据
if [ -f ./.autograde/create_grade_metadata.py ]; then
python3 ./.autograde/create_grade_metadata.py > metadata.json || echo "{}" > metadata.json
echo "✅ Grade metadata created (using create_grade_metadata.py)"
elif [ -f ./.autograde/create_minimal_metadata.py ]; then
export GRADE_TYPE=programming
python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
echo "✅ Grade metadata created (using create_minimal_metadata.py)"
else
echo "⚠️ No metadata creation script found, skipping"
echo "{}" > metadata.json
fi
- name: Upload metadata (teacher only)
if: env.RUNNER_METADATA_REPO != '' && env.RUNNER_METADATA_TOKEN != ''
working-directory: ${{ github.workspace }}
shell: bash
env:
METADATA_REPO: ${{ env.RUNNER_METADATA_REPO }}
METADATA_TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
METADATA_BRANCH: ${{ env.RUNNER_METADATA_BRANCH }}
STUDENT_REPO: ${{ github.repository }}
RUN_ID: ${{ github.run_id }}
COMMIT_SHA: ${{ github.sha }}
SERVER_URL: ${{ github.server_url }}
run: |
set -e
if [ ! -f metadata.json ]; then
echo "No metadata.json found, skip uploading."
exit 0
fi
python3 ./.autograde/upload_metadata.py \
--metadata-file metadata.json \
--metadata-repo "${METADATA_REPO}" \
--branch "${METADATA_BRANCH:-main}" \
--student-repo "${STUDENT_REPO}" \
--run-id "${RUN_ID}" \
--commit-sha "${COMMIT_SHA}" \
--workflow grade \
--server-url "${SERVER_URL}" \
--external-host "${EXTERNAL_GITEA_HOST}"
rm -f metadata.json

View File

@ -1,206 +0,0 @@
name: autograde-python
on:
push:
branches:
- main
workflow_dispatch:
permissions:
contents: read
pull-requests: write
jobs:
python:
runs-on: docker
container: python:3.11
timeout-minutes: 15
steps:
- name: Install dependencies (CN mirror)
run: |
set -e
# 替换 Debian/Ubuntu 源为腾讯云镜像
for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do
[ -f "$f" ] || continue
sed -i -E 's|https?://deb.debian.org|http://mirrors.cloud.tencent.com|g' "$f" || true
sed -i -E 's|https?://security.debian.org|http://mirrors.cloud.tencent.com/debian-security|g' "$f" || true
sed -i -E 's|https?://archive.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
sed -i -E 's|https?://ports.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
done
apt-get -o Acquire::Check-Valid-Until=false -o Acquire::AllowInsecureRepositories=true update -y
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends git ca-certificates python3 python3-pip nodejs rsync
rm -rf /var/lib/apt/lists/*
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Fix permissions
run: |
# Ensure workspace is owned by current user
chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
- name: Install Python deps
working-directory: ${{ github.workspace }}
run: |
# 使用腾讯云镜像源加速
python -m pip install -U pip -i https://mirrors.cloud.tencent.com/pypi/simple
if [ -f requirements.txt ]; then pip install -r requirements.txt -i https://mirrors.cloud.tencent.com/pypi/simple; fi
if [ -f pyproject.toml ]; then pip install . -i https://mirrors.cloud.tencent.com/pypi/simple; fi
pip install pytest pytest-cov junit-xml python-dotenv requests -i https://mirrors.cloud.tencent.com/pypi/simple
- name: Fetch private tests
working-directory: ${{ github.workspace }}
env:
EXTERNAL_GITEA_HOST: ${{ secrets.EXTERNAL_GITEA_HOST }}
run: |
set -e
TESTS_USERNAME="${RUNNER_TESTS_USERNAME:-}"
TESTS_TOKEN="${RUNNER_TESTS_TOKEN:-}"
if [ -z "$TESTS_TOKEN" ] || [ -z "$TESTS_USERNAME" ]; then
echo "❌ RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN 未配置!"
echo "测试必须从私有的 tests 仓库获取"
exit 1
fi
# Resolve Gitea Host
if [ -n "$EXTERNAL_GITEA_HOST" ]; then
HOST="$EXTERNAL_GITEA_HOST"
elif [ -n "$GITEA_ROOT_URL" ]; then
HOST=$(echo "$GITEA_ROOT_URL" | sed 's|https\?://||' | sed 's|/$||')
else
HOST=$(echo "${{ github.server_url }}" | sed 's|https\?://||' | cut -d'/' -f1)
fi
echo "📥 Fetching private tests repository..."
echo " Gitea host: $HOST"
# Infer organization and assignment ID from repository name
ORG=$(echo "${{ github.repository }}" | cut -d'/' -f1)
REPO_NAME=$(echo "${{ github.repository }}" | cut -d'/' -f2)
# Extract assignment ID from repo name (e.g., hw1-stu_xxx -> hw1, hw1-template -> hw1)
if echo "$REPO_NAME" | grep -q -- '-stu_'; then
ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-stu_.*//')
elif echo "$REPO_NAME" | grep -q -- '-template'; then
ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-template.*//')
else
ASSIGNMENT_ID="hw1" # fallback
fi
echo " Organization: $ORG"
echo " Assignment ID: $ASSIGNMENT_ID"
AUTH_URL="http://${TESTS_USERNAME}:${TESTS_TOKEN}@${HOST}/${ORG}/${ASSIGNMENT_ID}-tests.git"
if ! git -c http.sslVerify=false clone --depth=1 "$AUTH_URL" _priv_tests 2>&1; then
echo "❌ Failed to clone ${ASSIGNMENT_ID}-tests repository!"
exit 1
fi
# 验证测试目录存在
if [ ! -d "_priv_tests/python" ]; then
echo "❌ python/ directory not found in ${ASSIGNMENT_ID}-tests!"
exit 1
fi
if [ ! -d "_priv_tests/python/tests" ]; then
echo "❌ python/tests/ not found in ${ASSIGNMENT_ID}-tests!"
exit 1
fi
# 复制测试到 tests/
mkdir -p tests
rsync -a _priv_tests/python/tests/ tests/
echo "✅ Tests copied: _priv_tests/python/tests/ → tests/"
# 复制数据文件(如果存在)
if [ -d "_priv_tests/python/data" ]; then
mkdir -p tests/data
rsync -a _priv_tests/python/data/ tests/data/
echo "✅ Data files copied: _priv_tests/python/data/ → tests/data/"
fi
# 验证测试文件
if [ -z "$(find tests -name 'test_*.py' 2>/dev/null)" ]; then
echo "❌ No test files found in tests/ directory!"
exit 1
fi
echo "✅ Test suite ready:"
find tests -name 'test_*.py'
- name: Run tests
working-directory: ${{ github.workspace }}
run: |
# 设置随机种子
export PYTHONHASHSEED=2025
pytest -q --maxfail=0 --junitxml=junit.xml --tb=short || true
- name: Grade
working-directory: ${{ github.workspace }}
run: |
python ./.autograde/grade.py --junit junit.xml --out grade.json --summary summary.md
- name: Prepare artifacts
working-directory: ${{ github.workspace }}
run: |
mkdir -p artifacts
cp junit.xml summary.md grade.json artifacts/ 2>/dev/null || true
- name: Create grade metadata
working-directory: ${{ github.workspace }}
env:
REPO: ${{ github.repository }}
run: |
if [ ! -f grade.json ]; then
echo "⚠️ grade.json not found, skipping metadata creation"
exit 0
fi
# 生成 JSON 元数据
if [ -f ./.autograde/create_grade_metadata.py ]; then
python3 ./.autograde/create_grade_metadata.py > metadata.json || echo "{}" > metadata.json
echo "✅ Grade metadata created (using create_grade_metadata.py)"
elif [ -f ./.autograde/create_minimal_metadata.py ]; then
export GRADE_TYPE=programming
python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
echo "✅ Grade metadata created (using create_minimal_metadata.py)"
else
echo "⚠️ No metadata creation script found, skipping"
echo "{}" > metadata.json
fi
- name: Upload metadata (teacher only)
if: env.RUNNER_METADATA_REPO != '' && env.RUNNER_METADATA_TOKEN != ''
working-directory: ${{ github.workspace }}
shell: bash
env:
METADATA_REPO: ${{ env.RUNNER_METADATA_REPO }}
METADATA_TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
METADATA_BRANCH: ${{ env.RUNNER_METADATA_BRANCH }}
STUDENT_REPO: ${{ github.repository }}
RUN_ID: ${{ github.run_id }}
COMMIT_SHA: ${{ github.sha }}
SERVER_URL: ${{ github.server_url }}
run: |
set -e
if [ ! -f metadata.json ]; then
echo "No metadata.json found, skip uploading."
exit 0
fi
python ./.autograde/upload_metadata.py \
--metadata-file metadata.json \
--metadata-repo "${METADATA_REPO}" \
--branch "${METADATA_BRANCH:-main}" \
--student-repo "${STUDENT_REPO}" \
--run-id "${RUN_ID}" \
--commit-sha "${COMMIT_SHA}" \
--workflow grade \
--server-url "${SERVER_URL}" \
--external-host "${EXTERNAL_GITEA_HOST}"
rm -f metadata.json

View File

@ -1,200 +0,0 @@
name: autograde-r
on:
push:
branches:
- main
workflow_dispatch:
permissions:
contents: read
pull-requests: write
jobs:
r:
runs-on: docker
container: r-base:4.3.1
timeout-minutes: 15
steps:
- name: Install dependencies (CN mirror)
run: |
set -e
# 替换 Debian/Ubuntu 源为腾讯云镜像
for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do
[ -f "$f" ] || continue
sed -i -E 's|https?://deb.debian.org|http://mirrors.cloud.tencent.com|g' "$f" || true
sed -i -E 's|https?://security.debian.org|http://mirrors.cloud.tencent.com/debian-security|g' "$f" || true
sed -i -E 's|https?://archive.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
sed -i -E 's|https?://ports.ubuntu.com|http://mirrors.cloud.tencent.com|g' "$f" || true
done
apt-get -o Acquire::Check-Valid-Until=false -o Acquire::AllowInsecureRepositories=true update -y
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends git ca-certificates python3 python3-pip nodejs rsync libcurl4-openssl-dev libssl-dev libxml2-dev
pip3 install --break-system-packages python-dotenv requests -i https://mirrors.cloud.tencent.com/pypi/simple
rm -rf /var/lib/apt/lists/*
- name: Configure CRAN Mirror
run: |
echo 'options(repos = c(CRAN = "https://mirrors.tuna.tsinghua.edu.cn/CRAN/"))' >> ~/.Rprofile
- name: Install R packages
run: |
Rscript -e 'install.packages(c("testthat", "covr", "xml2"))'
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Fix permissions
run: |
# Ensure workspace is owned by current user
chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
- name: Fetch private tests
working-directory: ${{ github.workspace }}
env:
EXTERNAL_GITEA_HOST: ${{ secrets.EXTERNAL_GITEA_HOST }}
run: |
set -e
TESTS_USERNAME="${RUNNER_TESTS_USERNAME:-}"
TESTS_TOKEN="${RUNNER_TESTS_TOKEN:-}"
if [ -z "$TESTS_TOKEN" ] || [ -z "$TESTS_USERNAME" ]; then
echo "❌ RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN 未配置!"
echo "测试必须从私有的 tests 仓库获取"
exit 1
fi
# Resolve Gitea Host
if [ -n "$EXTERNAL_GITEA_HOST" ]; then
HOST="$EXTERNAL_GITEA_HOST"
elif [ -n "$GITEA_ROOT_URL" ]; then
HOST=$(echo "$GITEA_ROOT_URL" | sed 's|https\?://||' | sed 's|/$||')
else
HOST=$(echo "${{ github.server_url }}" | sed 's|https\?://||' | cut -d'/' -f1)
fi
echo "📥 Fetching private tests repository..."
echo " Gitea host: $HOST"
# Infer organization and assignment ID from repository name
ORG=$(echo "${{ github.repository }}" | cut -d'/' -f1)
REPO_NAME=$(echo "${{ github.repository }}" | cut -d'/' -f2)
# Extract assignment ID from repo name (e.g., hw1-stu_xxx -> hw1, hw1-template -> hw1)
if echo "$REPO_NAME" | grep -q -- '-stu_'; then
ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-stu_.*//')
elif echo "$REPO_NAME" | grep -q -- '-template'; then
ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-template.*//')
else
ASSIGNMENT_ID="hw1" # fallback
fi
echo " Organization: $ORG"
echo " Assignment ID: $ASSIGNMENT_ID"
AUTH_URL="http://${TESTS_USERNAME}:${TESTS_TOKEN}@${HOST}/${ORG}/${ASSIGNMENT_ID}-tests.git"
if ! git -c http.sslVerify=false clone --depth=1 "$AUTH_URL" _priv_tests 2>&1; then
echo "❌ Failed to clone ${ASSIGNMENT_ID}-tests repository!"
exit 1
fi
# 验证测试目录存在
if [ ! -d "_priv_tests/r" ]; then
echo "❌ r/ directory not found in ${ASSIGNMENT_ID}-tests!"
exit 1
fi
if [ ! -d "_priv_tests/r/tests" ]; then
echo "❌ r/tests/ not found in ${ASSIGNMENT_ID}-tests!"
exit 1
fi
# 复制测试到 tests/
mkdir -p tests
rsync -a _priv_tests/r/tests/ tests/
echo "✅ Tests copied: _priv_tests/r/tests/ → tests/"
# 验证测试文件
if [ -z "$(find tests -name 'test_*.R' 2>/dev/null)" ]; then
echo "❌ No test files found in tests/ directory!"
exit 1
fi
echo "✅ Test suite ready:"
find tests -name 'test_*.R'
- name: Run tests
working-directory: ${{ github.workspace }}
run: |
Rscript -e 'library(testthat); test_dir("tests", reporter = JunitReporter$new(file = "junit.xml"))' || true
- name: Grade
working-directory: ${{ github.workspace }}
env:
LANGUAGE: r
run: |
python3 ./.autograde/grade.py --junit junit.xml --out grade.json --summary summary.md
- name: Prepare artifacts
working-directory: ${{ github.workspace }}
run: |
mkdir -p artifacts
cp junit.xml summary.md grade.json artifacts/ 2>/dev/null || true
- name: Create grade metadata
working-directory: ${{ github.workspace }}
env:
LANGUAGE: r
REPO: ${{ github.repository }}
run: |
if [ ! -f grade.json ]; then
echo "⚠️ grade.json not found, skipping metadata creation"
exit 0
fi
# 生成 JSON 元数据
if [ -f ./.autograde/create_grade_metadata.py ]; then
python3 ./.autograde/create_grade_metadata.py > metadata.json || echo "{}" > metadata.json
echo "✅ Grade metadata created (using create_grade_metadata.py)"
elif [ -f ./.autograde/create_minimal_metadata.py ]; then
export GRADE_TYPE=programming
python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
echo "✅ Grade metadata created (using create_minimal_metadata.py)"
else
echo "⚠️ No metadata creation script found, skipping"
echo "{}" > metadata.json
fi
- name: Upload metadata (teacher only)
if: env.RUNNER_METADATA_REPO != '' && env.RUNNER_METADATA_TOKEN != ''
working-directory: ${{ github.workspace }}
shell: bash
env:
METADATA_REPO: ${{ env.RUNNER_METADATA_REPO }}
METADATA_TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
METADATA_BRANCH: ${{ env.RUNNER_METADATA_BRANCH }}
STUDENT_REPO: ${{ github.repository }}
RUN_ID: ${{ github.run_id }}
COMMIT_SHA: ${{ github.sha }}
SERVER_URL: ${{ github.server_url }}
run: |
set -e
if [ ! -f metadata.json ]; then
echo "No metadata.json found, skip uploading."
exit 0
fi
python3 ./.autograde/upload_metadata.py \
--metadata-file metadata.json \
--metadata-repo "${METADATA_REPO}" \
--branch "${METADATA_BRANCH:-main}" \
--student-repo "${STUDENT_REPO}" \
--run-id "${RUN_ID}" \
--commit-sha "${COMMIT_SHA}" \
--workflow grade \
--server-url "${SERVER_URL}" \
--external-host "${EXTERNAL_GITEA_HOST}"
rm -f metadata.json

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.