feat: enhance model training and deployment scripts with time-weighted sampling

- Updated `train_model.py` and `train_mlx_model.py` to include a time weight decay parameter for improved sample weighting during training.
- Modified dataset generation to incorporate sample weights based on time decay, enhancing model performance.
- Adjusted deployment scripts to support new backend options and improved error handling for model file transfers.
- Added new entries to the training log for better tracking of model performance metrics over time.
- Included ONNX model export functionality in the MLX filter for compatibility with Linux servers.
This commit is contained in:
21in7
2026-03-01 21:25:06 +09:00
parent 301457ce57
commit db144750a3
10 changed files with 324 additions and 97 deletions

View File

@@ -275,28 +275,26 @@ def _calc_labels_vectorized(
fut_high = highs[idx + 1 : end]
fut_low = lows[idx + 1 : end]
label = None
label = 0 # 미도달(타임아웃) 시 실패로 간주
for h, l in zip(fut_high, fut_low):
if signal == "LONG":
if h >= tp:
label = 1
break
if l <= sl:
label = 0
break
else:
if l <= tp:
if h >= tp:
label = 1
break
else: # SHORT
if h >= sl:
label = 0
break
if l <= tp:
label = 1
break
if label is None:
valid_mask.append(False)
else:
labels.append(label)
valid_mask.append(True)
labels.append(label)
valid_mask.append(True)
return np.array(labels, dtype=np.int8), np.array(valid_mask, dtype=bool)
@@ -305,11 +303,17 @@ def generate_dataset_vectorized(
df: pd.DataFrame,
btc_df: pd.DataFrame | None = None,
eth_df: pd.DataFrame | None = None,
time_weight_decay: float = 0.0,
) -> pd.DataFrame:
"""
전체 시계열을 1회 계산해 학습 데이터셋을 생성한다.
기존 generate_dataset()의 drop-in 대체제.
btc_df, eth_df가 제공되면 21개 피처로 확장한다.
time_weight_decay: 지수 감쇠 강도. 0이면 균등 가중치.
양수일수록 최신 샘플에 더 높은 가중치를 부여한다.
예) 2.0 → 최신 샘플이 가장 오래된 샘플보다 e^2 ≈ 7.4배 높은 가중치.
결과 DataFrame에 'sample_weight' 컬럼으로 포함된다.
"""
print(" [1/3] 전체 시계열 지표 계산 (1회)...")
d = _calc_indicators(df)
@@ -338,4 +342,17 @@ def generate_dataset_vectorized(
feat_final = feat_all.iloc[final_idx][available_feature_cols].copy()
feat_final["label"] = labels
return feat_final.reset_index(drop=True)
# 시간 가중치: 오래된 샘플 → 낮은 가중치, 최신 샘플 → 높은 가중치
n = len(feat_final)
if time_weight_decay > 0 and n > 1:
weights = np.exp(time_weight_decay * np.linspace(0.0, 1.0, n)).astype(np.float32)
weights /= weights.mean() # 평균 1로 정규화해 학습률 스케일 유지
print(f" 시간 가중치 적용 (decay={time_weight_decay}): "
f"min={weights.min():.3f}, max={weights.max():.3f}")
else:
weights = np.ones(n, dtype=np.float32)
feat_final = feat_final.reset_index(drop=True)
feat_final["sample_weight"] = weights
return feat_final

View File

@@ -9,21 +9,17 @@ def build_labels(
stop_loss: float,
side: str,
) -> Optional[int]:
"""
진입 이후 미래 캔들을 순서대로 확인해 TP/SL 도달 여부를 판단한다.
LONG: high >= TP → 1, low <= SL → 0
SHORT: low <= TP → 1, high >= SL → 0
둘 다 미도달 → None (학습 데이터에서 제외)
"""
for high, low in zip(future_highs, future_lows):
if side == "LONG":
if high >= take_profit:
return 1
# 보수적 접근: 손절(SL)을 먼저 체크
if low <= stop_loss:
return 0
else: # SHORT
if low <= take_profit:
if high >= take_profit:
return 1
else: # SHORT
# 보수적 접근: 손절(SL)을 먼저 체크
if high >= stop_loss:
return 0
if low <= take_profit:
return 1
return None

View File

@@ -1,32 +1,63 @@
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
from loguru import logger
from src.ml_features import FEATURE_COLS
ONNX_MODEL_PATH = Path("models/mlx_filter.weights.onnx")
LGBM_MODEL_PATH = Path("models/lgbm_filter.pkl")
class MLFilter:
"""
LightGBM 모델을 로드하고 진입 여부를 판단한다.
모델 파일이 없으면 항상 진입을 허용한다 (폴백).
ML 필터. ONNX(MLX 신경망) 우선 로드, 없으면 LightGBM으로 폴백한다.
둘 다 없으면 항상 진입을 허용한다.
우선순위: ONNX > LightGBM > 폴백(항상 허용)
"""
def __init__(self, model_path: str = "models/lgbm_filter.pkl", threshold: float = 0.60):
self._model_path = Path(model_path)
def __init__(
self,
onnx_path: str = str(ONNX_MODEL_PATH),
lgbm_path: str = str(LGBM_MODEL_PATH),
threshold: float = 0.60,
):
self._onnx_path = Path(onnx_path)
self._lgbm_path = Path(lgbm_path)
self._threshold = threshold
self._model = None
self._onnx_session = None
self._lgbm_model = None
self._try_load()
def _try_load(self):
if self._model_path.exists():
# ONNX 우선 시도
if self._onnx_path.exists():
try:
self._model = joblib.load(self._model_path)
logger.info(f"ML 필터 모델 로드 완료: {self._model_path}")
import onnxruntime as ort
self._onnx_session = ort.InferenceSession(
str(self._onnx_path),
providers=["CPUExecutionProvider"],
)
self._lgbm_model = None
logger.info(f"ML 필터 ONNX 모델 로드 완료: {self._onnx_path}")
return
except Exception as e:
logger.warning(f"ML 필터 모델 로드 실패: {e}")
self._model = None
logger.warning(f"ONNX 모델 로드 실패: {e}")
self._onnx_session = None
# LightGBM 폴백
if self._lgbm_path.exists():
try:
self._lgbm_model = joblib.load(self._lgbm_path)
logger.info(f"ML 필터 LightGBM 모델 로드 완료: {self._lgbm_path}")
except Exception as e:
logger.warning(f"LightGBM 모델 로드 실패: {e}")
self._lgbm_model = None
def is_model_loaded(self) -> bool:
return self._model is not None
return self._onnx_session is not None or self._lgbm_model is not None
def should_enter(self, features: pd.Series) -> bool:
"""
@@ -36,8 +67,13 @@ class MLFilter:
if not self.is_model_loaded():
return True
try:
X = features.to_frame().T
proba = self._model.predict_proba(X)[0][1]
if self._onnx_session is not None:
input_name = self._onnx_session.get_inputs()[0].name
X = features[FEATURE_COLS].values.astype(np.float32).reshape(1, -1)
proba = float(self._onnx_session.run(None, {input_name: X})[0][0])
else:
X = features.to_frame().T
proba = float(self._lgbm_model.predict_proba(X)[0][1])
logger.debug(f"ML 필터 확률: {proba:.3f} (임계값: {self._threshold})")
return bool(proba >= self._threshold)
except Exception as e:
@@ -46,5 +82,7 @@ class MLFilter:
def reload_model(self):
"""재학습 후 모델을 핫 리로드한다."""
self._onnx_session = None
self._lgbm_model = None
self._try_load()
logger.info("ML 필터 모델 리로드 완료")

View File

@@ -1,6 +1,7 @@
"""
Apple MLX 기반 경량 신경망 필터.
M4의 통합 GPU를 자동으로 활용한다.
학습 후 ONNX로 export해 Linux 서버에서 onnxruntime으로 추론한다.
"""
import numpy as np
import pandas as pd
@@ -12,6 +13,83 @@ from pathlib import Path
from src.ml_features import FEATURE_COLS
def _export_onnx(
weights_npz: Path,
meta_npz: Path,
onnx_path: Path,
) -> None:
"""
MLX 가중치(.npz)를 읽어 ONNX 그래프로 변환한다.
네트워크 구조: fc1(ReLU) → dropout(추론 시 비활성) → fc2(ReLU) → fc3 → sigmoid
"""
import onnx
from onnx import helper, TensorProto, numpy_helper
meta = np.load(meta_npz)
mean: np.ndarray = meta["mean"].astype(np.float32)
std: np.ndarray = meta["std"].astype(np.float32)
input_dim = int(meta["input_dim"])
hidden_dim = int(meta["hidden_dim"])
w = np.load(weights_npz)
# MLX save_weights 키 패턴: fc1.weight, fc1.bias, ...
fc1_w = w["fc1.weight"].astype(np.float32) # (hidden, input)
fc1_b = w["fc1.bias"].astype(np.float32)
fc2_w = w["fc2.weight"].astype(np.float32) # (hidden//2, hidden)
fc2_b = w["fc2.bias"].astype(np.float32)
fc3_w = w["fc3.weight"].astype(np.float32) # (1, hidden//2)
fc3_b = w["fc3.bias"].astype(np.float32)
def _t(name: str, arr: np.ndarray) -> onnx.TensorProto:
return numpy_helper.from_array(arr, name=name)
initializers = [
_t("mean", mean),
_t("std", std),
_t("fc1_w", fc1_w),
_t("fc1_b", fc1_b),
_t("fc2_w", fc2_w),
_t("fc2_b", fc2_b),
_t("fc3_w", fc3_w),
_t("fc3_b", fc3_b),
]
nodes = [
# 정규화: (x - mean) / std
helper.make_node("Sub", ["X", "mean"], ["x_sub"]),
helper.make_node("Div", ["x_sub", "std"], ["x_norm"]),
# fc1: x_norm @ fc1_w.T + fc1_b
helper.make_node("Gemm", ["x_norm", "fc1_w", "fc1_b"], ["fc1_out"],
transB=1),
helper.make_node("Relu", ["fc1_out"], ["relu1"]),
# fc2: relu1 @ fc2_w.T + fc2_b
helper.make_node("Gemm", ["relu1", "fc2_w", "fc2_b"], ["fc2_out"],
transB=1),
helper.make_node("Relu", ["fc2_out"], ["relu2"]),
# fc3: relu2 @ fc3_w.T + fc3_b → (N, 1)
helper.make_node("Gemm", ["relu2", "fc3_w", "fc3_b"], ["logits"],
transB=1),
# sigmoid → (N, 1)
helper.make_node("Sigmoid", ["logits"], ["proba_2d"]),
# squeeze: (N, 1) → (N,)
helper.make_node("Flatten", ["proba_2d"], ["proba"], axis=0),
]
graph = helper.make_graph(
nodes,
"mlx_filter",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, input_dim])],
outputs=[helper.make_tensor_value_info("proba", TensorProto.FLOAT, [None])],
initializer=initializers,
)
model_proto = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 17)])
model_proto.ir_version = 8
onnx.checker.check_model(model_proto)
onnx_path.parent.mkdir(exist_ok=True)
onnx.save(model_proto, str(onnx_path))
print(f" ONNX export 완료: {onnx_path}")
class _Net(nn.Module):
"""3층 MLP 이진 분류기."""
@@ -53,7 +131,12 @@ class MLXFilter:
self._std: np.ndarray | None = None
self._trained = False
def fit(self, X: pd.DataFrame, y: pd.Series) -> "MLXFilter":
def fit(
self,
X: pd.DataFrame,
y: pd.Series,
sample_weight: np.ndarray | None = None,
) -> "MLXFilter":
X_np = X[FEATURE_COLS].values.astype(np.float32)
y_np = y.values.astype(np.float32)
@@ -61,11 +144,20 @@ class MLXFilter:
self._std = X_np.std(axis=0) + 1e-8
X_np = (X_np - self._mean) / self._std
w_np = sample_weight.astype(np.float32) if sample_weight is not None else None
optimizer = optim.Adam(learning_rate=self.lr)
def loss_fn(model: _Net, x: mx.array, y: mx.array) -> mx.array:
def loss_fn(
model: _Net, x: mx.array, y: mx.array, w: mx.array | None
) -> mx.array:
logits = model(x)
return nn.losses.binary_cross_entropy(logits, y, with_logits=True)
per_sample = nn.losses.binary_cross_entropy(
logits, y, with_logits=True, reduction="none"
)
if w is not None:
return (per_sample * w).sum() / w.sum()
return per_sample.mean()
loss_and_grad = nn.value_and_grad(self._model, loss_fn)
@@ -78,7 +170,8 @@ class MLXFilter:
batch_idx = idx[start : start + self.batch_size]
x_batch = mx.array(X_np[batch_idx])
y_batch = mx.array(y_np[batch_idx])
loss, grads = loss_and_grad(self._model, x_batch, y_batch)
w_batch = mx.array(w_np[batch_idx]) if w_np is not None else None
loss, grads = loss_and_grad(self._model, x_batch, y_batch, w_batch)
optimizer.update(self._model, grads)
mx.eval(self._model.parameters(), optimizer.state)
epoch_loss += loss.item()
@@ -114,6 +207,12 @@ class MLXFilter:
input_dim=np.array(self.input_dim),
hidden_dim=np.array(self.hidden_dim),
)
# ONNX export: Linux 서버에서 onnxruntime으로 추론하기 위해 변환
try:
onnx_path = path.with_suffix(".onnx")
_export_onnx(weights_path, meta_path, onnx_path)
except ImportError:
print(" [경고] onnx 패키지 없음 → ONNX export 생략 (pip install onnx)")
@classmethod
def load(cls, path: str | Path) -> "MLXFilter":