feat: implement training and deployment pipeline for LightGBM model on Mac to LXC

- Added comprehensive plans for training a LightGBM model on M4 Mac Mini and deploying it to an LXC container.
- Created scripts for model training, deployment, and a full pipeline execution.
- Enhanced model transfer with error handling and logging for better tracking.
- Introduced profiling for training time analysis and dataset generation optimization.

Made-with: Cursor
This commit is contained in:
21in7
2026-03-01 18:30:01 +09:00
parent 298d4ad95e
commit 8f834a1890
6 changed files with 1054 additions and 1 deletions

59
scripts/deploy_model.sh Executable file
View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
# 맥미니에서 학습한 모델을 LXC 컨테이너 볼륨 경로로 전송한다.
# 사용법: bash scripts/deploy_model.sh [LXC_HOST] [LXC_MODELS_PATH]
#
# 예시:
# bash scripts/deploy_model.sh 10.1.10.28 /path/to/cointrader/models
# bash scripts/deploy_model.sh root@10.1.10.28 /root/cointrader/models
set -euo pipefail
LXC_HOST="${1:-root@10.1.10.24}"
LXC_MODELS_PATH="${2:-/root/cointrader/models}"
LOCAL_MODEL="models/lgbm_filter.pkl"
LOCAL_LOG="models/training_log.json"
if [[ ! -f "$LOCAL_MODEL" ]]; then
echo "[오류] 모델 파일 없음: $LOCAL_MODEL"
echo "먼저 python scripts/train_model.py 를 실행하세요."
exit 1
fi
echo "=== 모델 전송 시작 ==="
echo " 대상: ${LXC_HOST}:${LXC_MODELS_PATH}"
echo " 파일: $LOCAL_MODEL"
# 기존 모델을 prev로 백업 (원격)
ssh "${LXC_HOST}" "
if [ -f '${LXC_MODELS_PATH}/lgbm_filter.pkl' ]; then
cp '${LXC_MODELS_PATH}/lgbm_filter.pkl' '${LXC_MODELS_PATH}/lgbm_filter_prev.pkl'
echo ' 기존 모델 백업 완료'
fi
mkdir -p '${LXC_MODELS_PATH}'
"
# 모델 파일 전송 (rsync 우선, 없으면 scp 폴백)
if command -v rsync &>/dev/null && ssh "${LXC_HOST}" "command -v rsync" &>/dev/null; then
rsync -avz --progress \
"$LOCAL_MODEL" \
"${LXC_HOST}:${LXC_MODELS_PATH}/lgbm_filter.pkl"
else
echo " rsync 없음 → scp 사용"
scp "$LOCAL_MODEL" "${LXC_HOST}:${LXC_MODELS_PATH}/lgbm_filter.pkl"
fi
# 학습 로그도 함께 전송 (있을 경우)
if [[ -f "$LOCAL_LOG" ]]; then
if command -v rsync &>/dev/null && ssh "${LXC_HOST}" "command -v rsync" &>/dev/null; then
rsync -avz "$LOCAL_LOG" "${LXC_HOST}:${LXC_MODELS_PATH}/training_log.json"
else
scp "$LOCAL_LOG" "${LXC_HOST}:${LXC_MODELS_PATH}/training_log.json"
fi
echo " 학습 로그 전송 완료"
fi
echo "=== 전송 완료 ==="
echo ""
echo "봇이 실행 중이라면 아래 명령으로 모델을 즉시 리로드할 수 있습니다:"
echo " docker exec cointrader python -c \\"
echo " \"from src.ml_filter import MLFilter; f=MLFilter(); f.reload_model(); print('리로드 완료')\""

34
scripts/train_and_deploy.sh Executable file
View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
# 맥미니에서 전체 학습 파이프라인을 실행하고 LXC로 배포한다.
# 사용법: bash scripts/train_and_deploy.sh [LXC_HOST] [LXC_MODELS_PATH]
#
# 예시:
# bash scripts/train_and_deploy.sh
# bash scripts/train_and_deploy.sh root@10.1.10.24 /root/cointrader/models
set -euo pipefail
LXC_HOST="${1:-root@10.1.10.24}"
LXC_MODELS_PATH="${2:-/root/cointrader/models}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
echo "=== [1/3] 데이터 수집 ==="
python scripts/fetch_history.py --symbol XRPUSDT --interval 1m --days 90 --output data/xrpusdt_1m.parquet
echo ""
echo "=== [2/3] 모델 학습 ==="
python scripts/train_model.py --data data/xrpusdt_1m.parquet
echo ""
echo "=== [3/3] LXC 배포 ==="
bash scripts/deploy_model.sh "$LXC_HOST" "$LXC_MODELS_PATH"
echo ""
echo "=== 전체 파이프라인 완료 ==="
echo ""
echo "봇 재시작이 필요하면:"
echo " ssh ${LXC_HOST} 'cd /root/cointrader && docker compose restart cointrader'"

View File

@@ -69,7 +69,7 @@ def _process_index(args: tuple) -> dict | None:
ind = Indicators(window)
df_ind = ind.calculate_all()
if df_ind.isna().any().any():
if df_ind.iloc[-1].isna().any():
return None
signal = ind.get_signal(df_ind)