feat: 인벤토리 캐시 및 JSON 인코더 추가

- 인벤토리 캐시 기능을 추가하여, RCON으로 인벤토리를 읽지 못할 경우 이전에 성공적으로 읽은 데이터를 활용
- Lua에서 JSON 인코딩을 위한 간단한 함수 추가, 일부 Factorio 버전에서 `game.table_to_json`이 없을 경우 대체
- `README.md`에 인벤토리 캐시 및 JSON 인코더 사용에 대한 설명 추가
- `scan_resources()`와 `mine_resource`의 반경을 확장하여 자원 탐색 실패를 줄임
This commit is contained in:
21in7
2026-03-25 23:03:08 +09:00
parent e98d08bb44
commit 8c90e80582
14 changed files with 573 additions and 123 deletions

View File

@@ -15,6 +15,7 @@ JSON 파싱 강화:
import json
import os
import re
import time
import urllib.request
import urllib.error
@@ -145,7 +146,7 @@ class AIPlanner:
for attempt in range(3):
try:
raw = self._call_glm(user_message)
raw = self._call_glm(user_message, attempt=attempt)
plan = self._parse_json(raw)
break
except (ValueError, json.JSONDecodeError) as e:
@@ -164,13 +165,13 @@ class AIPlanner:
thinking = plan.get("thinking", "")
if thinking:
print(f"\n🧠 AI 판단:\n{thinking}\n")
print(f"\n[AI] 판단:\n{thinking}\n")
print(f"🎯 현재 목표: {plan.get('current_goal', '')}")
print(f"📋 완료 후: {plan.get('after_this', '')}")
print(f"[AI] 현재 목표: {plan.get('current_goal', '')}")
print(f"[AI] 완료 후: {plan.get('after_this', '')}")
actions = plan.get("actions", [])
print(f" {len(actions)}개 행동 계획됨")
print(f"[AI] {len(actions)}개 행동 계획됨")
return actions
def record_feedback(self, action: dict, success: bool, message: str = ""):
@@ -188,7 +189,7 @@ class AIPlanner:
return ""
lines = ["### 이전 행동 결과 (성공/실패)\n"]
for fb in self.feedback_log[-8:]:
status = "" if fb["success"] else ""
status = "OK" if fb["success"] else "FAIL"
msg = f"{fb['message']}" if fb["message"] else ""
lines.append(
f" {status} {fb['action']} "
@@ -196,7 +197,7 @@ class AIPlanner:
)
return "\n".join(lines) + "\n\n"
def _call_glm(self, user_message: str) -> str:
def _call_glm(self, user_message: str, attempt: int) -> str:
payload = json.dumps({
"model": GLM_MODEL,
"messages": [
@@ -207,6 +208,10 @@ class AIPlanner:
"max_tokens": 2000,
}).encode("utf-8")
prompt_chars = len(user_message)
system_chars = len(SYSTEM_PROMPT)
max_tokens = 2000
req = urllib.request.Request(
GLM_API_URL,
data = payload,
@@ -217,9 +222,36 @@ class AIPlanner:
method = "POST",
)
try:
t_total0 = time.perf_counter()
t_payload0 = time.perf_counter()
# payload 직렬화 직후(대략)부터 타임라인 측정
_t0 = time.perf_counter()
with urllib.request.urlopen(req, timeout=90) as resp:
data = json.loads(resp.read().decode("utf-8"))
return data["choices"][0]["message"]["content"].strip()
raw_text = resp.read().decode("utf-8")
t_read_done = time.perf_counter()
t_json0 = time.perf_counter()
data = json.loads(raw_text)
content = data["choices"][0]["message"]["content"].strip()
t_json_done = time.perf_counter()
dt_total = time.perf_counter() - t_total0
dt_payload = t_payload0 - t_total0
dt_read = t_read_done - _t0
dt_json = t_json_done - t_json0
print(
"[GLM] 타이밍 | "
f"attempt {attempt+1}/3 | "
f"total {dt_total:.2f}s | "
f"http_read {dt_read:.2f}s | "
f"json_parse {dt_json:.2f}s | "
f"prompt_chars {prompt_chars} | "
f"system_chars {system_chars} | "
f"max_tokens {max_tokens} | "
f"resp_chars {len(raw_text)}"
)
return content
except urllib.error.HTTPError as e:
raise ConnectionError(f"GLM API 오류 {e.code}: {e.read().decode()}")