diff --git a/ai_planner.py b/ai_planner.py index e51e5ab..64f9be2 100644 --- a/ai_planner.py +++ b/ai_planner.py @@ -15,7 +15,9 @@ LLM 백엔드: 로컬 Ollama (structured output으로 JSON 스키마 강제) import json import os import re +import sys import time +import threading import traceback import ollama @@ -141,7 +143,7 @@ class AIPlanner: "⚠️ 제작은 재료가 있어야 합니다. 인벤토리를 확인하세요." ) - print(f"\n[AI] 생각 중... (model={OLLAMA_MODEL}, host={OLLAMA_HOST})") + print(f"\n[AI] 요청 시작 (model={OLLAMA_MODEL})") try: plan = self._call_ollama(user_message) @@ -177,36 +179,53 @@ class AIPlanner: def _call_ollama(self, user_message: str) -> dict: t0 = time.perf_counter() - client = ollama.Client(host=OLLAMA_HOST) - response = client.chat( - model=OLLAMA_MODEL, - messages=[ - {"role": "system", "content": SYSTEM_PROMPT}, - {"role": "user", "content": user_message}, - ], - format={ - "type": "object", - "properties": { - "thinking": {"type": "string"}, - "current_goal": {"type": "string"}, - "actions": { - "type": "array", - "items": { - "type": "object", - "properties": { - "action": {"type": "string"}, - "params": {"type": "object"}, - "reason": {"type": "string"}, + stop_event = threading.Event() + + def _spinner(): + while not stop_event.is_set(): + elapsed = time.perf_counter() - t0 + print(f"\r[AI] 생각 중... {elapsed:.0f}s", end="", flush=True) + stop_event.wait(1.0) + print("\r", end="") + + spinner = threading.Thread(target=_spinner, daemon=True) + spinner.start() + + try: + client = ollama.Client(host=OLLAMA_HOST) + response = client.chat( + model=OLLAMA_MODEL, + messages=[ + {"role": "system", "content": SYSTEM_PROMPT}, + {"role": "user", "content": user_message}, + ], + format={ + "type": "object", + "properties": { + "thinking": {"type": "string"}, + "current_goal": {"type": "string"}, + "actions": { + "type": "array", + "items": { + "type": "object", + "properties": { + "action": {"type": "string"}, + "params": {"type": "object"}, + "reason": {"type": "string"}, + }, + "required": ["action", "params"], }, - "required": ["action", "params"], }, + "after_this": {"type": "string"}, }, - "after_this": {"type": "string"}, + "required": ["actions"], }, - "required": ["actions"], - }, - options={"temperature": 0.3}, - ) + options={"temperature": 0.3}, + ) + finally: + stop_event.set() + spinner.join() + dt = time.perf_counter() - t0 content = response.message.content print(f"[AI] 응답 수신 ({dt:.2f}s, {len(content)}자)") diff --git a/main.py b/main.py index 406a1c0..9c1c82d 100644 --- a/main.py +++ b/main.py @@ -15,7 +15,7 @@ import json from factorio_rcon import FactorioRCON from state_reader import StateReader from context_compressor import ContextCompressor -from ai_planner import AIPlanner +from ai_planner import AIPlanner, OLLAMA_MODEL, OLLAMA_HOST from action_executor import ActionExecutor from agent_last_action_memory import save_last_action_memory, load_last_action_memory from ore_patch_memory import load_ore_patch_memory, compute_distance_sq @@ -137,8 +137,8 @@ def run(): print("=" * 60) print(" 팩토리오 순수 AI 에이전트 (치트 없음)") print(" - 실제 걷기 / 실제 채굴 / 실제 제작 / 건설 거리 제한") - print(f" - LLM: Ollama {os.environ.get('OLLAMA_MODEL', 'qwen3:14b')}") - print(f" - Ollama host: {os.environ.get('OLLAMA_HOST', 'http://192.168.50.67:11434')}") + print(f" - LLM: Ollama {OLLAMA_MODEL}") + print(f" - Ollama host: {OLLAMA_HOST}") print("=" * 60) with FactorioRCON(RCON_HOST, RCON_PORT, RCON_PASSWORD) as rcon: