完成作业
This commit is contained in:
parent
5cc51bc2a1
commit
45330280a1
@ -102,206 +102,201 @@ jobs:
|
||||
working-directory: ${{ github.workspace }}
|
||||
run: |
|
||||
echo "📦 Setting up local grading environment..."
|
||||
|
||||
# Create autograde directory with simple grading scripts
|
||||
mkdir -p .autograde
|
||||
|
||||
# Create grade_grouped.py
|
||||
cat > .autograde/grade_grouped.py << 'GRADE_SCRIPT'
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import xml.etree.ElementTree as ET
|
||||
from pathlib import Path
|
||||
cat > .autograde/grade_grouped.py << 'EOF'
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import xml.etree.ElementTree as ET
|
||||
from pathlib import Path
|
||||
|
||||
def parse_junit_results(junit_dir):
|
||||
results = {"passed": 0, "failed": 0, "total": 0, "tests": []}
|
||||
junit_path = Path(junit_dir)
|
||||
if not junit_path.exists():
|
||||
return results
|
||||
for xml_file in junit_path.glob("TEST-*.xml"):
|
||||
try:
|
||||
tree = ET.parse(xml_file)
|
||||
root = tree.getroot()
|
||||
for testcase in root.findall(".//testcase"):
|
||||
name = testcase.get("name", "unknown")
|
||||
classname = testcase.get("classname", "")
|
||||
failure = testcase.find("failure")
|
||||
error = testcase.find("error")
|
||||
status = "passed" if failure is None and error is None else "failed"
|
||||
results["tests"].append({"name": f"{classname}.{name}", "status": status})
|
||||
results["total"] += 1
|
||||
if status == "passed":
|
||||
results["passed"] += 1
|
||||
else:
|
||||
results["failed"] += 1
|
||||
except Exception as e:
|
||||
print(f"Error parsing {xml_file}: {e}")
|
||||
return results
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--junit-dir", required=True)
|
||||
parser.add_argument("--groups", default="test_groups.json")
|
||||
parser.add_argument("--out", default="grade.json")
|
||||
parser.add_argument("--summary", default="summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
results = parse_junit_results(args.junit_dir)
|
||||
score = int((results["passed"] / max(results["total"], 1)) * 60)
|
||||
|
||||
grade = {
|
||||
"programming_score": score,
|
||||
"max_score": 60,
|
||||
"passed": results["passed"],
|
||||
"failed": results["failed"],
|
||||
"total": results["total"],
|
||||
"tests": results["tests"]
|
||||
}
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump(grade, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Programming Test Results\n\n")
|
||||
f.write(f"- **Score**: {score}/60\n")
|
||||
f.write(f"- **Passed**: {results['passed']}/{results['total']}\n\n")
|
||||
for t in results["tests"]:
|
||||
icon = "PASS" if t["status"] == "passed" else "FAIL"
|
||||
f.write(f"- {icon} {t['name']}\n")
|
||||
|
||||
print(f"Grade: {score}/60 ({results['passed']}/{results['total']} tests passed)")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
EOF
|
||||
|
||||
def parse_junit_results(junit_dir):
|
||||
results = {"passed": 0, "failed": 0, "total": 0, "tests": []}
|
||||
junit_path = Path(junit_dir)
|
||||
if not junit_path.exists():
|
||||
return results
|
||||
for xml_file in junit_path.glob("TEST-*.xml"):
|
||||
try:
|
||||
tree = ET.parse(xml_file)
|
||||
root = tree.getroot()
|
||||
for testcase in root.findall(".//testcase"):
|
||||
name = testcase.get("name", "unknown")
|
||||
classname = testcase.get("classname", "")
|
||||
failure = testcase.find("failure")
|
||||
error = testcase.find("error")
|
||||
status = "passed" if failure is None and error is None else "failed"
|
||||
results["tests"].append({"name": f"{classname}.{name}", "status": status})
|
||||
results["total"] += 1
|
||||
if status == "passed":
|
||||
results["passed"] += 1
|
||||
else:
|
||||
results["failed"] += 1
|
||||
except Exception as e:
|
||||
print(f"Error parsing {xml_file}: {e}")
|
||||
return results
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--junit-dir", required=True)
|
||||
parser.add_argument("--groups", default="test_groups.json")
|
||||
parser.add_argument("--out", default="grade.json")
|
||||
parser.add_argument("--summary", default="summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
results = parse_junit_results(args.junit_dir)
|
||||
score = int((results["passed"] / max(results["total"], 1)) * 60)
|
||||
|
||||
grade = {
|
||||
"programming_score": score,
|
||||
"max_score": 60,
|
||||
"passed": results["passed"],
|
||||
"failed": results["failed"],
|
||||
"total": results["total"],
|
||||
"tests": results["tests"]
|
||||
}
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump(grade, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Programming Test Results\n\n")
|
||||
f.write(f"- **Score**: {score}/60\n")
|
||||
f.write(f"- **Passed**: {results['passed']}/{results['total']}\n\n")
|
||||
for t in results["tests"]:
|
||||
icon = "✅" if t["status"] == "passed" else "❌"
|
||||
f.write(f"- {icon} {t['name']}\n")
|
||||
|
||||
print(f"✅ Grade: {score}/60 ({results['passed']}/{results['total']} tests passed)")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
GRADE_SCRIPT
|
||||
|
||||
# Create llm_grade.py (simplified - just gives points for having the file)
|
||||
cat > .autograde/llm_grade.py << 'LLM_SCRIPT'
|
||||
import json
|
||||
import argparse
|
||||
import os
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--question", default="")
|
||||
parser.add_argument("--answer", required=True)
|
||||
parser.add_argument("--rubric", default="")
|
||||
parser.add_argument("--out", default="grade.json")
|
||||
parser.add_argument("--summary", default="summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
score = 0
|
||||
feedback = ""
|
||||
|
||||
if os.path.exists(args.answer):
|
||||
with open(args.answer, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
word_count = len(content)
|
||||
if word_count > 500:
|
||||
score = 10
|
||||
feedback = "Report submitted with sufficient content."
|
||||
elif word_count > 200:
|
||||
score = 7
|
||||
feedback = "Report submitted but could use more detail."
|
||||
else:
|
||||
score = 3
|
||||
feedback = "Report is too short."
|
||||
else:
|
||||
feedback = "Report file not found."
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump({"total": score, "feedback": feedback}, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Report Grade\n\n- **Score**: {score}/10\n- **Feedback**: {feedback}\n")
|
||||
|
||||
print(f"✅ Report grade: {score}/10")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
LLM_SCRIPT
|
||||
# Create llm_grade.py
|
||||
cat > .autograde/llm_grade.py << 'EOF'
|
||||
import json
|
||||
import argparse
|
||||
import os
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--question", default="")
|
||||
parser.add_argument("--answer", required=True)
|
||||
parser.add_argument("--rubric", default="")
|
||||
parser.add_argument("--out", default="grade.json")
|
||||
parser.add_argument("--summary", default="summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
score = 0
|
||||
feedback = ""
|
||||
|
||||
if os.path.exists(args.answer):
|
||||
with open(args.answer, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
word_count = len(content)
|
||||
if word_count > 500:
|
||||
score = 10
|
||||
feedback = "Report submitted with sufficient content."
|
||||
elif word_count > 200:
|
||||
score = 7
|
||||
feedback = "Report submitted but could use more detail."
|
||||
else:
|
||||
score = 3
|
||||
feedback = "Report is too short."
|
||||
else:
|
||||
feedback = "Report file not found."
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump({"total": score, "feedback": feedback}, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Report Grade\n\n- **Score**: {score}/10\n- **Feedback**: {feedback}\n")
|
||||
|
||||
print(f"Report grade: {score}/10")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
EOF
|
||||
|
||||
# Create aggregate_final_grade.py
|
||||
cat > .autograde/aggregate_final_grade.py << 'AGG_SCRIPT'
|
||||
import json
|
||||
import argparse
|
||||
cat > .autograde/aggregate_final_grade.py << 'EOF'
|
||||
import json
|
||||
import argparse
|
||||
|
||||
def load_json(path):
|
||||
try:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--programming", default="grade.json")
|
||||
parser.add_argument("--report", default="report_grade.json")
|
||||
parser.add_argument("--frontend", default="frontend_grade.json")
|
||||
parser.add_argument("--out", default="final_grade.json")
|
||||
parser.add_argument("--summary", default="final_summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
prog = load_json(args.programming)
|
||||
report = load_json(args.report)
|
||||
frontend = load_json(args.frontend)
|
||||
|
||||
prog_score = prog.get("programming_score", 0)
|
||||
report_score = report.get("total", 0)
|
||||
frontend_score = frontend.get("total", 0)
|
||||
total = prog_score + report_score + frontend_score
|
||||
|
||||
final = {
|
||||
"total": total,
|
||||
"programming": prog_score,
|
||||
"report": report_score,
|
||||
"frontend": frontend_score
|
||||
}
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump(final, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Final Grade Summary\n\n")
|
||||
f.write(f"| Category | Score |\n|----------|-------|\n")
|
||||
f.write(f"| Programming | {prog_score}/60 |\n")
|
||||
f.write(f"| Report | {report_score}/10 |\n")
|
||||
f.write(f"| Frontend | {frontend_score}/10 |\n")
|
||||
f.write(f"| **Total** | **{total}/80** |\n")
|
||||
|
||||
print(f"Final grade: {total}/80")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
EOF
|
||||
|
||||
def load_json(path):
|
||||
try:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
return {}
|
||||
# Create stub scripts
|
||||
cat > .autograde/create_minimal_metadata.py << 'EOF'
|
||||
import json
|
||||
import os
|
||||
grade_file = "final_grade.json"
|
||||
if os.path.exists(grade_file):
|
||||
with open(grade_file) as f:
|
||||
data = json.load(f)
|
||||
print(json.dumps({"grade": data.get("total", 0), "status": "graded"}))
|
||||
else:
|
||||
print(json.dumps({"grade": 0, "status": "error"}))
|
||||
EOF
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--programming", default="grade.json")
|
||||
parser.add_argument("--report", default="report_grade.json")
|
||||
parser.add_argument("--frontend", default="frontend_grade.json")
|
||||
parser.add_argument("--out", default="final_grade.json")
|
||||
parser.add_argument("--summary", default="final_summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
prog = load_json(args.programming)
|
||||
report = load_json(args.report)
|
||||
frontend = load_json(args.frontend)
|
||||
|
||||
prog_score = prog.get("programming_score", 0)
|
||||
report_score = report.get("total", 0)
|
||||
frontend_score = frontend.get("total", 0)
|
||||
total = prog_score + report_score + frontend_score
|
||||
|
||||
final = {
|
||||
"total": total,
|
||||
"programming": prog_score,
|
||||
"report": report_score,
|
||||
"frontend": frontend_score
|
||||
}
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump(final, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Final Grade Summary\n\n")
|
||||
f.write(f"| Category | Score |\n|----------|-------|\n")
|
||||
f.write(f"| Programming | {prog_score}/60 |\n")
|
||||
f.write(f"| Report | {report_score}/10 |\n")
|
||||
f.write(f"| Frontend | {frontend_score}/10 |\n")
|
||||
f.write(f"| **Total** | **{total}/80** |\n")
|
||||
|
||||
print(f"🎯 Final grade: {total}/80")
|
||||
cat > .autograde/upload_metadata.py << 'EOF'
|
||||
print("Metadata upload skipped")
|
||||
EOF
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
AGG_SCRIPT
|
||||
cat > .autograde/generate_pdf_report.py << 'EOF'
|
||||
print("PDF generation skipped")
|
||||
EOF
|
||||
|
||||
# Create minimal metadata script
|
||||
cat > .autograde/create_minimal_metadata.py << 'META_SCRIPT'
|
||||
import json
|
||||
import os
|
||||
|
||||
grade_file = "final_grade.json"
|
||||
if os.path.exists(grade_file):
|
||||
with open(grade_file) as f:
|
||||
data = json.load(f)
|
||||
print(json.dumps({"grade": data.get("total", 0), "status": "graded"}))
|
||||
else:
|
||||
print(json.dumps({"grade": 0, "status": "error"}))
|
||||
META_SCRIPT
|
||||
|
||||
# Create upload_metadata.py (stub)
|
||||
cat > .autograde/upload_metadata.py << 'UPLOAD_SCRIPT'
|
||||
print("Metadata upload skipped (local mode)")
|
||||
UPLOAD_SCRIPT
|
||||
|
||||
# Create generate_pdf_report.py (stub)
|
||||
cat > .autograde/generate_pdf_report.py << 'PDF_SCRIPT'
|
||||
print("PDF generation skipped (local mode)")
|
||||
PDF_SCRIPT
|
||||
|
||||
echo "✅ Local grading scripts created"
|
||||
echo "Local grading scripts created"
|
||||
|
||||
- name: Run tests
|
||||
working-directory: ${{ github.workspace }}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user