完成作业
This commit is contained in:
parent
53b844b9d9
commit
5f2c7af0de
@ -1,10 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
grade_file = "final_grade.json"
|
||||
if os.path.exists(grade_file):
|
||||
with open(grade_file) as f:
|
||||
data = json.load(f)
|
||||
print(json.dumps({"grade": data.get("total", 0), "status": "graded"}))
|
||||
else:
|
||||
print(json.dumps({"grade": 0, "status": "error"}))
|
||||
@ -1,156 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate PDF report from REPORT.md and FRONTEND.md"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import json
|
||||
|
||||
def read_file_content(path):
|
||||
if not path or not os.path.exists(path):
|
||||
return ""
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
return f.read()
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Generate PDF grade report')
|
||||
parser.add_argument('--report', default='REPORT.md', help='Path to REPORT.md')
|
||||
parser.add_argument('--frontend', default='FRONTEND.md', help='Path to FRONTEND.md')
|
||||
parser.add_argument('--grade', default='final_grade.json', help='Path to grade JSON')
|
||||
parser.add_argument('--images', default='images', help='Path to images directory')
|
||||
parser.add_argument('--out', default='grade_report.pdf', help='Output PDF path')
|
||||
parser.add_argument('--student-id', default='', help='Student ID')
|
||||
parser.add_argument('--student-name', default='', help='Student name')
|
||||
parser.add_argument('--class-name', default='', help='Class name')
|
||||
parser.add_argument('--commit-sha', default='', help='Commit SHA')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read grade data
|
||||
grade_data = {}
|
||||
if os.path.exists(args.grade):
|
||||
with open(args.grade, 'r', encoding='utf-8') as f:
|
||||
grade_data = json.load(f)
|
||||
|
||||
# Read markdown content
|
||||
report_content = read_file_content(args.report)
|
||||
frontend_content = read_file_content(args.frontend)
|
||||
|
||||
if not report_content and not frontend_content:
|
||||
print("No report content found, skipping PDF generation")
|
||||
return
|
||||
|
||||
try:
|
||||
import markdown
|
||||
from weasyprint import HTML
|
||||
|
||||
# Build HTML content
|
||||
html_parts = []
|
||||
|
||||
# Header with student info
|
||||
header = f"""
|
||||
<div class="header">
|
||||
<h1>VibeVault 项目评分报告</h1>
|
||||
<p><strong>学号</strong>: {args.student_id or 'N/A'}</p>
|
||||
<p><strong>姓名</strong>: {args.student_name or 'N/A'}</p>
|
||||
<p><strong>班级</strong>: {args.class_name or 'N/A'}</p>
|
||||
<p><strong>Commit</strong>: {args.commit_sha[:7] if args.commit_sha else 'N/A'}</p>
|
||||
</div>
|
||||
"""
|
||||
html_parts.append(header)
|
||||
|
||||
# Grade summary
|
||||
if grade_data:
|
||||
grade_html = f"""
|
||||
<div class="grade-summary">
|
||||
<h2>成绩汇总</h2>
|
||||
<table>
|
||||
<tr><th>项目</th><th>得分</th></tr>
|
||||
<tr><td>编程测试</td><td>{grade_data.get('programming', 0)}/60</td></tr>
|
||||
<tr><td>报告</td><td>{grade_data.get('report', 0)}/10</td></tr>
|
||||
<tr><td>前端</td><td>{grade_data.get('frontend', 0)}/10</td></tr>
|
||||
<tr><td><strong>总分</strong></td><td><strong>{grade_data.get('total', 0)}/80</strong></td></tr>
|
||||
</table>
|
||||
</div>
|
||||
"""
|
||||
html_parts.append(grade_html)
|
||||
|
||||
# Report content
|
||||
if report_content:
|
||||
report_html = markdown.markdown(report_content, extensions=['fenced_code', 'tables'])
|
||||
html_parts.append(f'<div class="report-section">{report_html}</div>')
|
||||
|
||||
# Frontend content
|
||||
if frontend_content:
|
||||
frontend_html = markdown.markdown(frontend_content, extensions=['fenced_code', 'tables'])
|
||||
html_parts.append(f'<div class="frontend-section">{frontend_html}</div>')
|
||||
|
||||
# Full HTML document
|
||||
full_html = f"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<style>
|
||||
@page {{ size: A4; margin: 2cm; }}
|
||||
body {{
|
||||
font-family: "Noto Sans CJK SC", "WenQuanYi Micro Hei", "Microsoft YaHei", sans-serif;
|
||||
line-height: 1.6;
|
||||
color: #333;
|
||||
}}
|
||||
.header {{
|
||||
border-bottom: 2px solid #333;
|
||||
padding-bottom: 20px;
|
||||
margin-bottom: 20px;
|
||||
}}
|
||||
.header h1 {{ color: #2c3e50; margin-bottom: 10px; }}
|
||||
.grade-summary {{
|
||||
background: #f8f9fa;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 30px;
|
||||
}}
|
||||
.grade-summary table {{
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}}
|
||||
.grade-summary th, .grade-summary td {{
|
||||
padding: 10px;
|
||||
text-align: left;
|
||||
border-bottom: 1px solid #ddd;
|
||||
}}
|
||||
h1, h2, h3 {{ color: #2c3e50; }}
|
||||
code {{
|
||||
background: #f4f4f4;
|
||||
padding: 2px 6px;
|
||||
border-radius: 3px;
|
||||
font-family: monospace;
|
||||
}}
|
||||
pre {{
|
||||
background: #f4f4f4;
|
||||
padding: 15px;
|
||||
overflow-x: auto;
|
||||
border-radius: 5px;
|
||||
}}
|
||||
img {{ max-width: 100%; height: auto; }}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
{''.join(html_parts)}
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Generate PDF
|
||||
HTML(string=full_html, base_url=os.getcwd()).write_pdf(args.out)
|
||||
print(f"✅ PDF generated: {args.out}")
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Cannot generate PDF: {e}")
|
||||
print("Required: pip install markdown weasyprint")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"PDF generation failed: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1 +0,0 @@
|
||||
print("Metadata upload skipped")
|
||||
@ -14,7 +14,7 @@ permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
# 检查是否应该触发 CI(仅在 commit message 包含 “完成作业” 时执行)
|
||||
# 检查是否应该触发 CI(仅在 commit message 包含 “完成作业” 时执行)
|
||||
check-trigger:
|
||||
runs-on: docker
|
||||
container:
|
||||
@ -33,7 +33,7 @@ jobs:
|
||||
else
|
||||
echo "trigger=false" >> $GITHUB_OUTPUT
|
||||
echo "⛔ 只有包含“完成作业”的提交才会执行自动评分" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
grade:
|
||||
needs: check-trigger
|
||||
@ -63,11 +63,7 @@ fi
|
||||
pip3 install --break-system-packages python-dotenv requests markdown weasyprint -i https://mirrors.aliyun.com/pypi/simple --trusted-host mirrors.aliyun.com
|
||||
# 刷新字体缓存
|
||||
fc-cache -f -v > /dev/null 2>&1 || true
|
||||
<<<<<<< HEAD
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
=======
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Configure Gradle mirror (Aliyun)
|
||||
run: |
|
||||
@ -84,11 +80,7 @@ fi
|
||||
}
|
||||
}
|
||||
EOF
|
||||
<<<<<<< HEAD
|
||||
echo "✅ Gradle configured to use Aliyun mirror"
|
||||
=======
|
||||
echo "✅ Gradle configured to use Aliyun mirror"
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Checkout code
|
||||
env:
|
||||
@ -101,16 +93,11 @@ fi
|
||||
AUTH_URL=$(echo "$REPO_URL" | sed "s|://|://${GITHUB_TOKEN}@|")
|
||||
git remote add origin "$AUTH_URL"
|
||||
git fetch --depth=1 origin ${{ github.sha }}
|
||||
<<<<<<< HEAD
|
||||
git checkout ${{ github.sha }}
|
||||
=======
|
||||
git checkout ${{ github.sha }}
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Fix permissions
|
||||
run: chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
|
||||
|
||||
<<<<<<< HEAD
|
||||
- name: Fetch hidden tests and grading scripts
|
||||
working-directory: ${{ github.workspace }}
|
||||
env:
|
||||
@ -191,209 +178,6 @@ fi
|
||||
|
||||
# Cleanup
|
||||
rm -rf _priv_tests
|
||||
=======
|
||||
- name: Setup local grading scripts
|
||||
working-directory: ${{ github.workspace }}
|
||||
run: |
|
||||
echo "📦 Setting up local grading environment..."
|
||||
|
||||
# Create autograde directory with simple grading scripts
|
||||
mkdir -p .autograde
|
||||
|
||||
# Create grade_grouped.py
|
||||
cat > .autograde/grade_grouped.py << 'GRADE_SCRIPT'
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import xml.etree.ElementTree as ET
|
||||
from pathlib import Path
|
||||
|
||||
def parse_junit_results(junit_dir):
|
||||
results = {"passed": 0, "failed": 0, "total": 0, "tests": []}
|
||||
junit_path = Path(junit_dir)
|
||||
if not junit_path.exists():
|
||||
return results
|
||||
for xml_file in junit_path.glob("TEST-*.xml"):
|
||||
try:
|
||||
tree = ET.parse(xml_file)
|
||||
root = tree.getroot()
|
||||
for testcase in root.findall(".//testcase"):
|
||||
name = testcase.get("name", "unknown")
|
||||
classname = testcase.get("classname", "")
|
||||
failure = testcase.find("failure")
|
||||
error = testcase.find("error")
|
||||
status = "passed" if failure is None and error is None else "failed"
|
||||
results["tests"].append({"name": f"{classname}.{name}", "status": status})
|
||||
results["total"] += 1
|
||||
if status == "passed":
|
||||
results["passed"] += 1
|
||||
else:
|
||||
results["failed"] += 1
|
||||
except Exception as e:
|
||||
print(f"Error parsing {xml_file}: {e}")
|
||||
return results
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--junit-dir", required=True)
|
||||
parser.add_argument("--groups", default="test_groups.json")
|
||||
parser.add_argument("--out", default="grade.json")
|
||||
parser.add_argument("--summary", default="summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
results = parse_junit_results(args.junit_dir)
|
||||
score = int((results["passed"] / max(results["total"], 1)) * 60)
|
||||
|
||||
grade = {
|
||||
"programming_score": score,
|
||||
"max_score": 60,
|
||||
"passed": results["passed"],
|
||||
"failed": results["failed"],
|
||||
"total": results["total"],
|
||||
"tests": results["tests"]
|
||||
}
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump(grade, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Programming Test Results\n\n")
|
||||
f.write(f"- **Score**: {score}/60\n")
|
||||
f.write(f"- **Passed**: {results['passed']}/{results['total']}\n\n")
|
||||
for t in results["tests"]:
|
||||
icon = "✅" if t["status"] == "passed" else "❌"
|
||||
f.write(f"- {icon} {t['name']}\n")
|
||||
|
||||
print(f"✅ Grade: {score}/60 ({results['passed']}/{results['total']} tests passed)")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
GRADE_SCRIPT
|
||||
|
||||
# Create llm_grade.py (simplified - just gives points for having the file)
|
||||
cat > .autograde/llm_grade.py << 'LLM_SCRIPT'
|
||||
import json
|
||||
import argparse
|
||||
import os
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--question", default="")
|
||||
parser.add_argument("--answer", required=True)
|
||||
parser.add_argument("--rubric", default="")
|
||||
parser.add_argument("--out", default="grade.json")
|
||||
parser.add_argument("--summary", default="summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
score = 0
|
||||
feedback = ""
|
||||
|
||||
if os.path.exists(args.answer):
|
||||
with open(args.answer, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
word_count = len(content)
|
||||
if word_count > 500:
|
||||
score = 10
|
||||
feedback = "Report submitted with sufficient content."
|
||||
elif word_count > 200:
|
||||
score = 7
|
||||
feedback = "Report submitted but could use more detail."
|
||||
else:
|
||||
score = 3
|
||||
feedback = "Report is too short."
|
||||
else:
|
||||
feedback = "Report file not found."
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump({"total": score, "feedback": feedback}, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Report Grade\n\n- **Score**: {score}/10\n- **Feedback**: {feedback}\n")
|
||||
|
||||
print(f"✅ Report grade: {score}/10")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
LLM_SCRIPT
|
||||
|
||||
# Create aggregate_final_grade.py
|
||||
cat > .autograde/aggregate_final_grade.py << 'AGG_SCRIPT'
|
||||
import json
|
||||
import argparse
|
||||
|
||||
def load_json(path):
|
||||
try:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--programming", default="grade.json")
|
||||
parser.add_argument("--report", default="report_grade.json")
|
||||
parser.add_argument("--frontend", default="frontend_grade.json")
|
||||
parser.add_argument("--out", default="final_grade.json")
|
||||
parser.add_argument("--summary", default="final_summary.md")
|
||||
args = parser.parse_args()
|
||||
|
||||
prog = load_json(args.programming)
|
||||
report = load_json(args.report)
|
||||
frontend = load_json(args.frontend)
|
||||
|
||||
prog_score = prog.get("programming_score", 0)
|
||||
report_score = report.get("total", 0)
|
||||
frontend_score = frontend.get("total", 0)
|
||||
total = prog_score + report_score + frontend_score
|
||||
|
||||
final = {
|
||||
"total": total,
|
||||
"programming": prog_score,
|
||||
"report": report_score,
|
||||
"frontend": frontend_score
|
||||
}
|
||||
|
||||
with open(args.out, "w") as f:
|
||||
json.dump(final, f, indent=2)
|
||||
|
||||
with open(args.summary, "w") as f:
|
||||
f.write(f"# Final Grade Summary\n\n")
|
||||
f.write(f"| Category | Score |\n|----------|-------|\n")
|
||||
f.write(f"| Programming | {prog_score}/60 |\n")
|
||||
f.write(f"| Report | {report_score}/10 |\n")
|
||||
f.write(f"| Frontend | {frontend_score}/10 |\n")
|
||||
f.write(f"| **Total** | **{total}/80** |\n")
|
||||
|
||||
print(f"🎯 Final grade: {total}/80")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
AGG_SCRIPT
|
||||
|
||||
# Create minimal metadata script
|
||||
cat > .autograde/create_minimal_metadata.py << 'META_SCRIPT'
|
||||
import json
|
||||
import os
|
||||
|
||||
grade_file = "final_grade.json"
|
||||
if os.path.exists(grade_file):
|
||||
with open(grade_file) as f:
|
||||
data = json.load(f)
|
||||
print(json.dumps({"grade": data.get("total", 0), "status": "graded"}))
|
||||
else:
|
||||
print(json.dumps({"grade": 0, "status": "error"}))
|
||||
META_SCRIPT
|
||||
|
||||
# Create upload_metadata.py (stub)
|
||||
cat > .autograde/upload_metadata.py << 'UPLOAD_SCRIPT'
|
||||
print("Metadata upload skipped (local mode)")
|
||||
UPLOAD_SCRIPT
|
||||
|
||||
# Note: generate_pdf_report.py is NOT overwritten - use the one from repo
|
||||
|
||||
echo "✅ Local grading scripts created"
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Run tests
|
||||
working-directory: ${{ github.workspace }}
|
||||
@ -406,11 +190,7 @@ fi
|
||||
# Also try to get a single combined report
|
||||
if [ -f build/test-results/test/TEST-*.xml ]; then
|
||||
cp build/test-results/test/TEST-*.xml junit.xml 2>/dev/null || true
|
||||
<<<<<<< HEAD
|
||||
fi
|
||||
=======
|
||||
fi
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Grade programming tests
|
||||
working-directory: ${{ github.workspace }}
|
||||
@ -420,69 +200,41 @@ fi
|
||||
--junit-dir build/test-results/test \
|
||||
--groups test_groups.json \
|
||||
--out grade.json \
|
||||
<<<<<<< HEAD
|
||||
--summary summary.md
|
||||
=======
|
||||
--summary summary.md
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Grade REPORT.md
|
||||
working-directory: ${{ github.workspace }}
|
||||
run: |
|
||||
<<<<<<< HEAD
|
||||
# LLM env vars are injected by Runner config (LLM_API_KEY, LLM_API_URL, LLM_MODEL)
|
||||
if [ -f REPORT.md ] && [ -f .llm_rubrics/rubric_report.json ]; then
|
||||
python3 ./.autograde/llm_grade.py \
|
||||
--question "请评估这份后端与系统设计报告" \
|
||||
--answer REPORT.md \
|
||||
--rubric .llm_rubrics/rubric_report.json \
|
||||
=======
|
||||
if [ -f REPORT.md ]; then
|
||||
python3 ./.autograde/llm_grade.py \
|
||||
--question "请评估这份后端与系统设计报告" \
|
||||
--answer REPORT.md \
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
--out report_grade.json \
|
||||
--summary report_summary.md
|
||||
echo "✅ REPORT.md graded"
|
||||
else
|
||||
echo '{"total": 0, "flags": ["missing_file"]}' > report_grade.json
|
||||
<<<<<<< HEAD
|
||||
echo "⚠️ REPORT.md or rubric not found"
|
||||
fi
|
||||
=======
|
||||
echo "⚠️ REPORT.md not found"
|
||||
fi
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Grade FRONTEND.md
|
||||
working-directory: ${{ github.workspace }}
|
||||
run: |
|
||||
<<<<<<< HEAD
|
||||
# LLM env vars are injected by Runner config (LLM_API_KEY, LLM_API_URL, LLM_MODEL)
|
||||
if [ -f FRONTEND.md ] && [ -f .llm_rubrics/rubric_frontend.json ]; then
|
||||
python3 ./.autograde/llm_grade.py \
|
||||
--question "请评估这份前端界面与交互设计报告" \
|
||||
--answer FRONTEND.md \
|
||||
--rubric .llm_rubrics/rubric_frontend.json \
|
||||
=======
|
||||
if [ -f FRONTEND.md ]; then
|
||||
python3 ./.autograde/llm_grade.py \
|
||||
--question "请评估这份前端界面与交互设计报告" \
|
||||
--answer FRONTEND.md \
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
--out frontend_grade.json \
|
||||
--summary frontend_summary.md
|
||||
echo "✅ FRONTEND.md graded"
|
||||
else
|
||||
echo '{"total": 0, "flags": ["missing_file"]}' > frontend_grade.json
|
||||
<<<<<<< HEAD
|
||||
echo "⚠️ FRONTEND.md or rubric not found"
|
||||
fi
|
||||
=======
|
||||
echo "⚠️ FRONTEND.md not found"
|
||||
fi
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Aggregate grades
|
||||
working-directory: ${{ github.workspace }}
|
||||
@ -492,11 +244,7 @@ fi
|
||||
--report report_grade.json \
|
||||
--frontend frontend_grade.json \
|
||||
--out final_grade.json \
|
||||
<<<<<<< HEAD
|
||||
--summary final_summary.md
|
||||
=======
|
||||
--summary final_summary.md
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Generate PDF report
|
||||
working-directory: ${{ github.workspace }}
|
||||
@ -531,11 +279,7 @@ fi
|
||||
--student-name "$STUDENT_NAME" \
|
||||
--class-name "$CLASS_NAME" \
|
||||
--commit-sha "$COMMIT_SHA"
|
||||
<<<<<<< HEAD
|
||||
fi
|
||||
=======
|
||||
fi
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Upload report to student repo
|
||||
if: env.RUNNER_METADATA_TOKEN != ''
|
||||
@ -606,11 +350,7 @@ fi
|
||||
|
||||
# 清理临时文件
|
||||
rm -f /tmp/upload_request.json
|
||||
<<<<<<< HEAD
|
||||
fi
|
||||
=======
|
||||
fi
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Create metadata
|
||||
working-directory: ${{ github.workspace }}
|
||||
@ -620,11 +360,7 @@ fi
|
||||
if [ -f final_grade.json ]; then
|
||||
export GRADE_TYPE=final
|
||||
python3 ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
|
||||
<<<<<<< HEAD
|
||||
fi
|
||||
=======
|
||||
fi
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
|
||||
- name: Upload metadata
|
||||
if: env.RUNNER_METADATA_TOKEN != ''
|
||||
@ -650,9 +386,4 @@ fi
|
||||
--workflow grade \
|
||||
--server-url "${SERVER_URL}" \
|
||||
--external-host "${EXTERNAL_GITEA_HOST}"
|
||||
<<<<<<< HEAD
|
||||
fi
|
||||
=======
|
||||
fi
|
||||
|
||||
>>>>>>> f52713277609e6ae5f53d33eebe8556ff9233174
|
||||
fi
|
||||
|
||||
Binary file not shown.
Loading…
Reference in New Issue
Block a user