feat: enhance AIPlanner payload structure for LM Studio compatibility by including 'input' field and improve response content extraction methods
This commit is contained in:
@@ -196,17 +196,7 @@ class AIPlanner:
|
||||
spinner.start()
|
||||
|
||||
try:
|
||||
payload = {
|
||||
"model": OLLAMA_MODEL,
|
||||
"messages": [
|
||||
{"role": "system", "content": SYSTEM_PROMPT},
|
||||
{"role": "user", "content": user_message},
|
||||
],
|
||||
"format": "json",
|
||||
"think": False,
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.3, "num_ctx": 8192},
|
||||
}
|
||||
payload = self._build_chat_payload(user_message)
|
||||
resp = httpx.post(
|
||||
f"{OLLAMA_HOST}/api/v1/chat",
|
||||
json=payload,
|
||||
@@ -219,12 +209,68 @@ class AIPlanner:
|
||||
spinner.join()
|
||||
|
||||
dt = time.perf_counter() - t0
|
||||
content = data["message"]["content"]
|
||||
content = self._extract_response_content(data)
|
||||
print(f"[AI] 응답 수신 ({dt:.2f}s, {len(content)}자)")
|
||||
if _debug_enabled():
|
||||
print(f"[AI][디버그] raw={content[:300]}")
|
||||
return json.loads(content)
|
||||
|
||||
@staticmethod
|
||||
def _build_chat_payload(user_message: str) -> dict:
|
||||
messages = [
|
||||
{"role": "system", "content": SYSTEM_PROMPT},
|
||||
{"role": "user", "content": user_message},
|
||||
]
|
||||
return {
|
||||
"model": OLLAMA_MODEL,
|
||||
"messages": messages,
|
||||
# LM Studio(OpenAI Responses 호환)에서는 input 필드가 필수인 경우가 있어 함께 보낸다.
|
||||
"input": user_message,
|
||||
"format": "json",
|
||||
"think": False,
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.3, "num_ctx": 8192},
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _extract_response_content(data: dict) -> str:
|
||||
message = data.get("message")
|
||||
if isinstance(message, dict):
|
||||
content = message.get("content")
|
||||
if isinstance(content, str) and content.strip():
|
||||
return content
|
||||
|
||||
output_text = data.get("output_text")
|
||||
if isinstance(output_text, str) and output_text.strip():
|
||||
return output_text
|
||||
|
||||
choices = data.get("choices")
|
||||
if isinstance(choices, list) and choices:
|
||||
first = choices[0]
|
||||
if isinstance(first, dict):
|
||||
msg = first.get("message")
|
||||
if isinstance(msg, dict):
|
||||
content = msg.get("content")
|
||||
if isinstance(content, str) and content.strip():
|
||||
return content
|
||||
|
||||
output = data.get("output")
|
||||
if isinstance(output, list):
|
||||
for item in output:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
content_items = item.get("content")
|
||||
if not isinstance(content_items, list):
|
||||
continue
|
||||
for c in content_items:
|
||||
if not isinstance(c, dict):
|
||||
continue
|
||||
text = c.get("text")
|
||||
if isinstance(text, str) and text.strip():
|
||||
return text
|
||||
|
||||
raise ValueError(f"응답에서 텍스트 콘텐츠를 찾지 못했습니다. keys={list(data.keys())}")
|
||||
|
||||
@staticmethod
|
||||
def _ensure_move_before_build_actions(actions: list[dict]) -> list[dict]:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user