Initial commit
This commit is contained in:
commit
1e10a311a8
6
.env.example
Normal file
6
.env.example
Normal file
@ -0,0 +1,6 @@
|
||||
# DeepSeek API Key
|
||||
# 获取地址:https://platform.deepseek.com/
|
||||
DEEPSEEK_API_KEY=sk-your-api-key-here
|
||||
|
||||
# 如果使用其他 API,在此添加
|
||||
# OTHER_API_KEY=your-key-here
|
||||
226
.gitea/workflows/autograde.yml
Normal file
226
.gitea/workflows/autograde.yml
Normal file
@ -0,0 +1,226 @@
|
||||
name: autograde-assignment-05-final-project
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- 'submit'
|
||||
- 'submit-*'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
check-trigger:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: alpine:latest
|
||||
outputs:
|
||||
should_run: ${{ steps.check.outputs.trigger }}
|
||||
steps:
|
||||
- name: Check commit message for trigger keyword
|
||||
id: check
|
||||
run: |
|
||||
COMMIT_MSG="${{ github.event.head_commit.message || '' }}"
|
||||
echo "Commit message: $COMMIT_MSG"
|
||||
if echo "$COMMIT_MSG" | grep -q "完成作业"; then
|
||||
echo "trigger=true" >> $GITHUB_OUTPUT
|
||||
echo "✅ Commit contains \"完成作业\",即将执行评分"
|
||||
else
|
||||
echo "trigger=false" >> $GITHUB_OUTPUT
|
||||
echo "⛔ 只有包含"完成作业"的提交才会执行自动评分" >&2
|
||||
fi
|
||||
|
||||
grade:
|
||||
needs: check-trigger
|
||||
if: needs.check-trigger.outputs.should_run == 'true'
|
||||
runs-on: docker
|
||||
container:
|
||||
image: python:3.11
|
||||
options: --user root
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Configure APT mirror (Aliyun)
|
||||
run: |
|
||||
set -e
|
||||
for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do
|
||||
[ -f "$f" ] || continue
|
||||
sed -i -E 's|https?://deb.debian.org|http://mirrors.aliyun.com|g' "$f" || true
|
||||
sed -i -E 's|https?://security.debian.org|http://mirrors.aliyun.com/debian-security|g' "$f" || true
|
||||
sed -i -E 's|https?://archive.ubuntu.com|http://mirrors.aliyun.com|g' "$f" || true
|
||||
sed -i -E 's|https?://ports.ubuntu.com|http://mirrors.aliyun.com|g' "$f" || true
|
||||
done
|
||||
apt-get -o Acquire::Check-Valid-Until=false update -y
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends git ca-certificates python3-pip rsync
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
- name: Checkout code
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
git config --global --add safe.directory ${{ github.workspace }}
|
||||
git init
|
||||
REPO_URL="${{ github.server_url }}/${{ github.repository }}.git"
|
||||
AUTH_URL=$(echo "$REPO_URL" | sed "s|://|://${GITHUB_TOKEN}@|")
|
||||
git remote add origin "$AUTH_URL"
|
||||
git fetch --depth=1 origin ${{ github.sha }}
|
||||
git checkout ${{ github.sha }}
|
||||
|
||||
- name: Fix permissions
|
||||
run: chown -R $(whoami):$(whoami) ${{ github.workspace }} || true
|
||||
|
||||
- name: Fetch grading scripts
|
||||
working-directory: ${{ github.workspace }}
|
||||
env:
|
||||
EXTERNAL_GITEA_HOST: ${{ secrets.EXTERNAL_GITEA_HOST }}
|
||||
run: |
|
||||
set -e
|
||||
TESTS_USERNAME="${RUNNER_TESTS_USERNAME:-}"
|
||||
TESTS_TOKEN="${RUNNER_TESTS_TOKEN:-}"
|
||||
if [ -z "$TESTS_TOKEN" ] || [ -z "$TESTS_USERNAME" ]; then
|
||||
echo "❌ RUNNER_TESTS_USERNAME / RUNNER_TESTS_TOKEN not set!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Resolve host
|
||||
if [ -n "$EXTERNAL_GITEA_HOST" ]; then
|
||||
HOST="$EXTERNAL_GITEA_HOST"
|
||||
elif [ -n "$GITEA_ROOT_URL" ]; then
|
||||
HOST=$(echo "$GITEA_ROOT_URL" | sed 's|https\?://||' | sed 's|/$||')
|
||||
else
|
||||
HOST=$(echo "${{ github.server_url }}" | sed 's|https\?://||' | cut -d'/' -f1)
|
||||
fi
|
||||
|
||||
ORG=$(echo "${{ github.repository }}" | cut -d'/' -f1)
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d'/' -f2)
|
||||
|
||||
if echo "$REPO_NAME" | grep -q -- '-stu_'; then
|
||||
ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-stu_.*//')
|
||||
elif echo "$REPO_NAME" | grep -q -- '-template'; then
|
||||
ASSIGNMENT_ID=$(echo "$REPO_NAME" | sed 's/-template.*//')
|
||||
else
|
||||
ASSIGNMENT_ID="assignment-05-final-project"
|
||||
fi
|
||||
|
||||
echo "📥 Fetching grading scripts from ${ORG}/${ASSIGNMENT_ID}-tests..."
|
||||
AUTH_URL="http://${TESTS_USERNAME}:${TESTS_TOKEN}@${HOST}/${ORG}/${ASSIGNMENT_ID}-tests.git"
|
||||
git -c http.sslVerify=false clone --depth=1 "$AUTH_URL" _priv_tests
|
||||
|
||||
rm -rf .autograde
|
||||
mkdir -p .autograde
|
||||
cp _priv_tests/autograde/*.py .autograde/
|
||||
cp _priv_tests/autograde/*.sh .autograde/ 2>/dev/null || true
|
||||
|
||||
# Copy LLM rubrics
|
||||
if [ -d "_priv_tests/llm" ]; then
|
||||
mkdir -p .llm_rubrics
|
||||
cp _priv_tests/llm/*.json .llm_rubrics/ 2>/dev/null || true
|
||||
fi
|
||||
|
||||
rm -rf _priv_tests
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple
|
||||
if [ -f requirements.txt ]; then
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
fi
|
||||
# 安装评分脚本依赖
|
||||
pip install --no-cache-dir requests python-dotenv pyyaml
|
||||
|
||||
- name: Validate manifest.yaml
|
||||
run: |
|
||||
if [ ! -f manifest.yaml ]; then
|
||||
echo "❌ manifest.yaml not found!"
|
||||
echo '{"error": "missing_manifest"}' > run_results.json
|
||||
else
|
||||
echo "✅ manifest.yaml found"
|
||||
fi
|
||||
|
||||
- name: Run project commands
|
||||
working-directory: ${{ github.workspace }}
|
||||
run: |
|
||||
if [ -f manifest.yaml ]; then
|
||||
python ./.autograde/run_project.py . --out run_results.json --timeout 60
|
||||
fi
|
||||
|
||||
- name: Grade documentation (LLM)
|
||||
run: |
|
||||
python ./.autograde/llm_evaluate.py \
|
||||
--run-results run_results.json \
|
||||
--rubric .llm_rubrics/rubric_documentation.json \
|
||||
--dimension documentation \
|
||||
--out doc_grade.json
|
||||
|
||||
- name: Grade functionality (LLM)
|
||||
run: |
|
||||
python ./.autograde/llm_evaluate.py \
|
||||
--run-results run_results.json \
|
||||
--rubric .llm_rubrics/rubric_functionality.json \
|
||||
--dimension functionality \
|
||||
--out func_grade.json
|
||||
|
||||
- name: Grade code quality (LLM)
|
||||
run: |
|
||||
python ./.autograde/llm_evaluate.py \
|
||||
--run-results run_results.json \
|
||||
--rubric .llm_rubrics/rubric_code_quality.json \
|
||||
--dimension code_quality \
|
||||
--out code_grade.json
|
||||
|
||||
- name: Aggregate grades
|
||||
run: |
|
||||
python ./.autograde/aggregate_grade.py \
|
||||
--documentation doc_grade.json \
|
||||
--functionality func_grade.json \
|
||||
--code-quality code_grade.json \
|
||||
--out final_grade.json \
|
||||
--summary final_summary.md
|
||||
|
||||
- name: Create metadata
|
||||
working-directory: ${{ github.workspace }}
|
||||
env:
|
||||
REPO: ${{ github.repository }}
|
||||
LANGUAGE: python
|
||||
run: |
|
||||
if [ -f final_grade.json ]; then
|
||||
export GRADE_TYPE=final
|
||||
export GRADE_FILE=final_grade.json
|
||||
if [ -f .autograde/create_minimal_metadata.py ]; then
|
||||
python ./.autograde/create_minimal_metadata.py > metadata.json || echo "{}" > metadata.json
|
||||
else
|
||||
echo "{}" > metadata.json
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Upload metadata
|
||||
if: env.RUNNER_METADATA_TOKEN != ''
|
||||
working-directory: ${{ github.workspace }}
|
||||
env:
|
||||
METADATA_REPO: ${{ github.repository_owner }}/course-metadata
|
||||
METADATA_TOKEN: ${{ env.RUNNER_METADATA_TOKEN }}
|
||||
METADATA_BRANCH: ${{ env.RUNNER_METADATA_BRANCH }}
|
||||
STUDENT_REPO: ${{ github.repository }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
COMMIT_SHA: ${{ github.sha }}
|
||||
SERVER_URL: ${{ github.server_url }}
|
||||
run: |
|
||||
if [ -f metadata.json ] && [ -f .autograde/upload_metadata.py ]; then
|
||||
python ./.autograde/upload_metadata.py \
|
||||
--metadata-file metadata.json \
|
||||
--metadata-repo "${METADATA_REPO}" \
|
||||
--branch "${METADATA_BRANCH:-main}" \
|
||||
--student-repo "${STUDENT_REPO}" \
|
||||
--run-id "${RUN_ID}" \
|
||||
--commit-sha "${COMMIT_SHA}" \
|
||||
--workflow grade \
|
||||
--server-url "${SERVER_URL}" \
|
||||
--external-host "${EXTERNAL_GITEA_HOST}"
|
||||
else
|
||||
echo "⚠️ metadata.json or upload_metadata.py not found, skipping upload"
|
||||
fi
|
||||
|
||||
48
CHANGELOG.md
Normal file
48
CHANGELOG.md
Normal file
@ -0,0 +1,48 @@
|
||||
# 版本记录
|
||||
|
||||
## v1.0.0 (YYYY-MM-DD) - 最终提交版本
|
||||
|
||||
### 新增
|
||||
|
||||
- [功能] 完成 XXX 功能
|
||||
- [功能] 添加 XXX 特性
|
||||
|
||||
### 修复
|
||||
|
||||
- [Bug] 修复 XXX 问题
|
||||
- 原因:描述问题原因
|
||||
- 修改:描述如何修复
|
||||
|
||||
### 改进
|
||||
|
||||
- [优化] 改进了 XXX 的性能
|
||||
- [重构] 重构了 XXX 模块,提高可读性
|
||||
|
||||
---
|
||||
|
||||
## v0.2.0 (YYYY-MM-DD) - 功能完善
|
||||
|
||||
### 新增
|
||||
|
||||
- 添加了 XXX 功能
|
||||
|
||||
### 修复
|
||||
|
||||
- 修复了 XXX 边界情况
|
||||
- 修复了 XXX 错误处理
|
||||
|
||||
---
|
||||
|
||||
## v0.1.0 (YYYY-MM-DD) - AI 初版
|
||||
|
||||
### 新增
|
||||
|
||||
- 基本功能实现
|
||||
- 初始代码框架由 AI 生成
|
||||
|
||||
### 已知问题
|
||||
|
||||
- XXX 边界情况未处理
|
||||
- XXX 错误处理不完善
|
||||
- XXX 功能待实现
|
||||
|
||||
73
README.md
Normal file
73
README.md
Normal file
@ -0,0 +1,73 @@
|
||||
# 项目名称
|
||||
|
||||
一句话描述:这个项目做什么?
|
||||
|
||||
## 功能特性
|
||||
|
||||
- ✅ 功能 1:描述
|
||||
- ✅ 功能 2:描述
|
||||
- ✅ 功能 3:LLM 功能描述
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 环境要求
|
||||
|
||||
- Python 3.10+
|
||||
- DeepSeek API Key
|
||||
|
||||
### 安装
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### 配置
|
||||
|
||||
1. 复制 `.env.example` 为 `.env`
|
||||
2. 填入你的 DeepSeek API Key
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# 编辑 .env 文件,填入 API Key
|
||||
```
|
||||
|
||||
### 运行
|
||||
|
||||
```bash
|
||||
# CLI 模式
|
||||
python src/main.py --help
|
||||
python src/main.py [命令] [参数]
|
||||
|
||||
# 或 Web 模式(如有)
|
||||
# streamlit run app.py
|
||||
```
|
||||
|
||||
## 使用示例
|
||||
|
||||
```bash
|
||||
# 示例命令 1
|
||||
python src/main.py example1
|
||||
|
||||
# 示例命令 2
|
||||
python src/main.py example2
|
||||
```
|
||||
|
||||
## 项目结构
|
||||
|
||||
```
|
||||
project/
|
||||
├── src/
|
||||
│ ├── __init__.py
|
||||
│ ├── main.py # 主入口
|
||||
│ └── ... # 其他模块
|
||||
├── data/ # 数据文件
|
||||
├── output/ # 输出文件
|
||||
├── manifest.yaml # 项目运行声明
|
||||
├── requirements.txt # 依赖
|
||||
└── README.md # 本文件
|
||||
```
|
||||
|
||||
## 作者
|
||||
|
||||
[姓名] - [学号]
|
||||
|
||||
80
REPORT.md
Normal file
80
REPORT.md
Normal file
@ -0,0 +1,80 @@
|
||||
# 期末项目反思报告
|
||||
|
||||
## 1. 项目定位(为什么做这个?)
|
||||
|
||||
### 项目名称
|
||||
|
||||
[在此填写项目名称]
|
||||
|
||||
### 解决的问题
|
||||
|
||||
这个项目解决什么问题?谁会用?为什么值得做?
|
||||
|
||||
> [在此处回答]
|
||||
|
||||
### 为什么选择这个方向
|
||||
|
||||
你有其他选择,为什么选了这个?
|
||||
|
||||
> [在此处回答]
|
||||
|
||||
## 2. 技术决策(你是如何思考的?)
|
||||
|
||||
### 关键技术选型
|
||||
|
||||
| 决策点 | 你的选择 | 考虑过的替代方案 | 选择理由 |
|
||||
|-------|---------|----------------|---------|
|
||||
| 数据存储 | [例:JSON] | [例:SQLite, CSV] | [理由] |
|
||||
| 用户界面 | [例:CLI] | [例:Streamlit] | [理由] |
|
||||
| LLM 提供商 | [例:DeepSeek] | [例:OpenAI] | [理由] |
|
||||
| ... | ... | ... | ... |
|
||||
|
||||
### 最难的技术决策
|
||||
|
||||
描述一个让你纠结的技术决策,以及最终如何选择的。
|
||||
|
||||
> [在此处回答]
|
||||
|
||||
## 3. 与 AI 协作
|
||||
|
||||
### 最有效的 Prompt
|
||||
|
||||
展示一个你觉得写得好的 Prompt:
|
||||
|
||||
```
|
||||
[你的 Prompt]
|
||||
```
|
||||
|
||||
为什么这个 Prompt 有效?
|
||||
|
||||
> [在此处回答]
|
||||
|
||||
### AI 帮不了的地方
|
||||
|
||||
有什么是 AI 帮不了、必须你自己思考的?
|
||||
|
||||
> [在此处回答]
|
||||
|
||||
## 4. 迭代与成长
|
||||
|
||||
### 最大的挑战
|
||||
|
||||
描述你遇到的最大挑战,以及如何克服的。
|
||||
|
||||
- **挑战**:[描述挑战]
|
||||
- **尝试的解决方案**:[你尝试了什么]
|
||||
- **最终方案**:[最后怎么解决的]
|
||||
- **学到的经验**:[从中学到了什么]
|
||||
|
||||
### 如果重来一次
|
||||
|
||||
如果让你重新做这个项目,你会有什么不同的做法?
|
||||
|
||||
> [在此处回答]
|
||||
|
||||
## 5. 自我评价
|
||||
|
||||
你觉得这个项目完成度如何?有什么遗憾?
|
||||
|
||||
> [在此处回答]
|
||||
|
||||
35
manifest.yaml
Normal file
35
manifest.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# manifest.yaml - 项目运行声明(必填)
|
||||
# 评测系统将根据这个文件自动运行你的项目
|
||||
|
||||
project:
|
||||
name: "你的项目名称"
|
||||
description: "一句话描述项目功能"
|
||||
|
||||
# 运行命令(评测系统会依次执行这些命令)
|
||||
commands:
|
||||
# 主功能演示(必填,至少 3 个命令)
|
||||
demo:
|
||||
- command: "python src/main.py --help"
|
||||
description: "显示帮助信息"
|
||||
# TODO: 添加你的功能演示命令
|
||||
# - command: "python src/main.py 参数"
|
||||
# description: "演示功能描述"
|
||||
|
||||
# 错误处理演示(必填,至少 2 个命令)
|
||||
error_handling:
|
||||
- command: "python src/main.py invalid_command"
|
||||
description: "测试无效命令处理"
|
||||
# TODO: 添加更多错误处理测试
|
||||
# - command: "python src/main.py --invalid-param"
|
||||
# description: "测试无效参数处理"
|
||||
|
||||
# Web 界面(可选,如有 Streamlit)
|
||||
# web:
|
||||
# command: "streamlit run app.py --server.headless true"
|
||||
# port: 8501
|
||||
|
||||
# 需要的环境变量(评测时会注入)
|
||||
env_vars:
|
||||
- DEEPSEEK_API_KEY
|
||||
# 如需其他 API Key,在此列出
|
||||
|
||||
10
requirements.txt
Normal file
10
requirements.txt
Normal file
@ -0,0 +1,10 @@
|
||||
# 核心依赖
|
||||
openai>=1.0.0
|
||||
python-dotenv>=1.0.0
|
||||
|
||||
# 根据你的项目需要添加其他依赖
|
||||
# requests>=2.31.0
|
||||
# pandas>=2.0.0
|
||||
# matplotlib>=3.7.0
|
||||
# streamlit>=1.28.0
|
||||
|
||||
2
src/__init__.py
Normal file
2
src/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
# 项目源代码包
|
||||
|
||||
40
src/main.py
Normal file
40
src/main.py
Normal file
@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
项目主入口
|
||||
TODO: 实现你的项目功能
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# 加载环境变量
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def main():
|
||||
"""主函数"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="你的项目描述",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
示例:
|
||||
python src/main.py --help 显示帮助
|
||||
python src/main.py [命令] [参数] 执行功能
|
||||
"""
|
||||
)
|
||||
|
||||
# TODO: 添加你的命令行参数
|
||||
# parser.add_argument("command", help="命令")
|
||||
# parser.add_argument("--option", help="选项")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# TODO: 实现你的主逻辑
|
||||
print("🚧 项目待实现")
|
||||
print("请修改 src/main.py 实现你的功能")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
Loading…
Reference in New Issue
Block a user